aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/aacraid
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/aacraid
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/scsi/aacraid')
-rw-r--r--drivers/scsi/aacraid/Makefile8
-rw-r--r--drivers/scsi/aacraid/README66
-rw-r--r--drivers/scsi/aacraid/TODO6
-rw-r--r--drivers/scsi/aacraid/aachba.c2037
-rw-r--r--drivers/scsi/aacraid/aacraid.h1623
-rw-r--r--drivers/scsi/aacraid/commctrl.c683
-rw-r--r--drivers/scsi/aacraid/comminit.c325
-rw-r--r--drivers/scsi/aacraid/commsup.c939
-rw-r--r--drivers/scsi/aacraid/dpcsup.c215
-rw-r--r--drivers/scsi/aacraid/linit.c749
-rw-r--r--drivers/scsi/aacraid/rkt.c440
-rw-r--r--drivers/scsi/aacraid/rx.c441
-rw-r--r--drivers/scsi/aacraid/sa.c374
13 files changed, 7906 insertions, 0 deletions
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
new file mode 100644
index 000000000000..28d133a3094f
--- /dev/null
+++ b/drivers/scsi/aacraid/Makefile
@@ -0,0 +1,8 @@
1# Adaptec aacraid
2
3obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
4
5aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
6 dpcsup.o rx.o sa.o rkt.o
7
8EXTRA_CFLAGS := -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/README b/drivers/scsi/aacraid/README
new file mode 100644
index 000000000000..fdb0f45f7336
--- /dev/null
+++ b/drivers/scsi/aacraid/README
@@ -0,0 +1,66 @@
1AACRAID Driver for Linux (take two)
2
3Introduction
4-------------------------
5The aacraid driver adds support for Adaptec (http://www.adaptec.com)
6RAID controllers. This is a major rewrite from the original
7Adaptec supplied driver. It has signficantly cleaned up both the code
8and the running binary size (the module is less than half the size of
9the original).
10
11Supported Cards/Chipsets
12-------------------------
13 Adaptec 2020S
14 Adaptec 2025S
15 Adaptec 2120S
16 Adaptec 2200S
17 Adaptec 2230S
18 Adaptec 2240S
19 Adaptec 2410SA
20 Adaptec 2610SA
21 Adaptec 2810SA
22 Adaptec 21610SA
23 Adaptec 3230S
24 Adaptec 3240S
25 Adaptec 4000SAS
26 Adaptec 4005SAS
27 Adaptec 4800SAS
28 Adaptec 4805SAS
29 Adaptec 5400S
30 Dell PERC 2 Quad Channel
31 Dell PERC 2/Si
32 Dell PERC 3/Si
33 Dell PERC 3/Di
34 Dell CERC 2
35 HP NetRAID-4M
36 Legend S220
37 Legend S230
38
39People
40-------------------------
41Alan Cox <alan@redhat.com>
42Christoph Hellwig <hch@infradead.org> (updates for new-style PCI probing and SCSI host registration,
43 small cleanups/fixes)
44Matt Domsch <matt_domsch@dell.com> (revision ioctl, adapter messages)
45Deanna Bonds (non-DASD support, PAE fibs and 64 bit, added new adaptec controllers
46 added new ioctls, changed scsi interface to use new error handler,
47 increased the number of fibs and outstanding commands to a container)
48
49 (fixed 64bit and 64G memory model, changed confusing naming convention
50 where fibs that go to the hardware are consistently called hw_fibs and
51 not just fibs like the name of the driver tracking structure)
52Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas.
53
54Original Driver
55-------------------------
56Adaptec Unix OEM Product Group
57
58Mailing List
59-------------------------
60linux-scsi@vger.kernel.org (Interested parties troll here)
61Also note this is very different to Brian's original driver
62so don't expect him to support it.
63Adaptec does support this driver. Contact either tech support or Mark Salyzyn.
64
65Original by Brian Boerner February 2001
66Rewritten by Alan Cox, November 2001
diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO
new file mode 100644
index 000000000000..25856a21d982
--- /dev/null
+++ b/drivers/scsi/aacraid/TODO
@@ -0,0 +1,6 @@
1o Testing
2o More testing
3o Feature request: display the firmware/bios/etc revisions in the
4 /proc info
5o Drop irq_mask, basically unused
6o I/O size increase
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
new file mode 100644
index 000000000000..f3fc35386060
--- /dev/null
+++ b/drivers/scsi/aacraid/aachba.c
@@ -0,0 +1,2037 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 */
25
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/pci.h>
31#include <linux/spinlock.h>
32#include <linux/slab.h>
33#include <linux/completion.h>
34#include <linux/blkdev.h>
35#include <asm/semaphore.h>
36#include <asm/uaccess.h>
37
38#include <scsi/scsi.h>
39#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_host.h>
42
43#include "aacraid.h"
44
45/* values for inqd_pdt: Peripheral device type in plain English */
46#define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
47#define INQD_PDT_PROC 0x03 /* Processor device */
48#define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
49#define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
50#define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
51#define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
52
53#define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
54#define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
55
56#define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
57
58#define MAX_DRIVER_SG_SEGMENT_COUNT 17
59
60/*
61 * Sense codes
62 */
63
64#define SENCODE_NO_SENSE 0x00
65#define SENCODE_END_OF_DATA 0x00
66#define SENCODE_BECOMING_READY 0x04
67#define SENCODE_INIT_CMD_REQUIRED 0x04
68#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
69#define SENCODE_INVALID_COMMAND 0x20
70#define SENCODE_LBA_OUT_OF_RANGE 0x21
71#define SENCODE_INVALID_CDB_FIELD 0x24
72#define SENCODE_LUN_NOT_SUPPORTED 0x25
73#define SENCODE_INVALID_PARAM_FIELD 0x26
74#define SENCODE_PARAM_NOT_SUPPORTED 0x26
75#define SENCODE_PARAM_VALUE_INVALID 0x26
76#define SENCODE_RESET_OCCURRED 0x29
77#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
78#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
79#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
80#define SENCODE_DIAGNOSTIC_FAILURE 0x40
81#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
82#define SENCODE_INVALID_MESSAGE_ERROR 0x49
83#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
84#define SENCODE_OVERLAPPED_COMMAND 0x4E
85
86/*
87 * Additional sense codes
88 */
89
90#define ASENCODE_NO_SENSE 0x00
91#define ASENCODE_END_OF_DATA 0x05
92#define ASENCODE_BECOMING_READY 0x01
93#define ASENCODE_INIT_CMD_REQUIRED 0x02
94#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
95#define ASENCODE_INVALID_COMMAND 0x00
96#define ASENCODE_LBA_OUT_OF_RANGE 0x00
97#define ASENCODE_INVALID_CDB_FIELD 0x00
98#define ASENCODE_LUN_NOT_SUPPORTED 0x00
99#define ASENCODE_INVALID_PARAM_FIELD 0x00
100#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
101#define ASENCODE_PARAM_VALUE_INVALID 0x02
102#define ASENCODE_RESET_OCCURRED 0x00
103#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
104#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
105#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
106#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
107#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
108#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
109#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
110#define ASENCODE_OVERLAPPED_COMMAND 0x00
111
112#define BYTE0(x) (unsigned char)(x)
113#define BYTE1(x) (unsigned char)((x) >> 8)
114#define BYTE2(x) (unsigned char)((x) >> 16)
115#define BYTE3(x) (unsigned char)((x) >> 24)
116
117/*------------------------------------------------------------------------------
118 * S T R U C T S / T Y P E D E F S
119 *----------------------------------------------------------------------------*/
120/* SCSI inquiry data */
121struct inquiry_data {
122 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
123 u8 inqd_dtq; /* RMB | Device Type Qualifier */
124 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
125 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
126 u8 inqd_len; /* Additional length (n-4) */
127 u8 inqd_pad1[2];/* Reserved - must be zero */
128 u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
129 u8 inqd_vid[8]; /* Vendor ID */
130 u8 inqd_pid[16];/* Product ID */
131 u8 inqd_prl[4]; /* Product Revision Level */
132};
133
134/*
135 * M O D U L E G L O B A L S
136 */
137
138static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
139static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
140static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
141#ifdef AAC_DETAILED_STATUS_INFO
142static char *aac_get_status_string(u32 status);
143#endif
144
145/*
146 * Non dasd selection is handled entirely in aachba now
147 */
148
149static int nondasd = -1;
150static int dacmode = -1;
151
152static int commit = -1;
153
154module_param(nondasd, int, 0);
155MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
156module_param(dacmode, int, 0);
157MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
158module_param(commit, int, 0);
159MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
160
161/**
162 * aac_get_config_status - check the adapter configuration
163 * @common: adapter to query
164 *
165 * Query config status, and commit the configuration if needed.
166 */
167int aac_get_config_status(struct aac_dev *dev)
168{
169 int status = 0;
170 struct fib * fibptr;
171
172 if (!(fibptr = fib_alloc(dev)))
173 return -ENOMEM;
174
175 fib_init(fibptr);
176 {
177 struct aac_get_config_status *dinfo;
178 dinfo = (struct aac_get_config_status *) fib_data(fibptr);
179
180 dinfo->command = cpu_to_le32(VM_ContainerConfig);
181 dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
182 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
183 }
184
185 status = fib_send(ContainerCommand,
186 fibptr,
187 sizeof (struct aac_get_config_status),
188 FsaNormal,
189 1, 1,
190 NULL, NULL);
191 if (status < 0 ) {
192 printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
193 } else {
194 struct aac_get_config_status_resp *reply
195 = (struct aac_get_config_status_resp *) fib_data(fibptr);
196 dprintk((KERN_WARNING
197 "aac_get_config_status: response=%d status=%d action=%d\n",
198 le32_to_cpu(reply->response),
199 le32_to_cpu(reply->status),
200 le32_to_cpu(reply->data.action)));
201 if ((le32_to_cpu(reply->response) != ST_OK) ||
202 (le32_to_cpu(reply->status) != CT_OK) ||
203 (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
204 printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
205 status = -EINVAL;
206 }
207 }
208 fib_complete(fibptr);
209 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
210 if (status >= 0) {
211 if (commit == 1) {
212 struct aac_commit_config * dinfo;
213 fib_init(fibptr);
214 dinfo = (struct aac_commit_config *) fib_data(fibptr);
215
216 dinfo->command = cpu_to_le32(VM_ContainerConfig);
217 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
218
219 status = fib_send(ContainerCommand,
220 fibptr,
221 sizeof (struct aac_commit_config),
222 FsaNormal,
223 1, 1,
224 NULL, NULL);
225 fib_complete(fibptr);
226 } else if (commit == 0) {
227 printk(KERN_WARNING
228 "aac_get_config_status: Foreign device configurations are being ignored\n");
229 }
230 }
231 fib_free(fibptr);
232 return status;
233}
234
235/**
236 * aac_get_containers - list containers
237 * @common: adapter to probe
238 *
239 * Make a list of all containers on this controller
240 */
241int aac_get_containers(struct aac_dev *dev)
242{
243 struct fsa_dev_info *fsa_dev_ptr;
244 u32 index;
245 int status = 0;
246 struct fib * fibptr;
247 unsigned instance;
248 struct aac_get_container_count *dinfo;
249 struct aac_get_container_count_resp *dresp;
250 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
251
252 instance = dev->scsi_host_ptr->unique_id;
253
254 if (!(fibptr = fib_alloc(dev)))
255 return -ENOMEM;
256
257 fib_init(fibptr);
258 dinfo = (struct aac_get_container_count *) fib_data(fibptr);
259 dinfo->command = cpu_to_le32(VM_ContainerConfig);
260 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
261
262 status = fib_send(ContainerCommand,
263 fibptr,
264 sizeof (struct aac_get_container_count),
265 FsaNormal,
266 1, 1,
267 NULL, NULL);
268 if (status >= 0) {
269 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
270 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
271 fib_complete(fibptr);
272 }
273
274 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
275 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
276
277 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc(
278 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
279 if (!fsa_dev_ptr) {
280 fib_free(fibptr);
281 return -ENOMEM;
282 }
283 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
284
285 dev->fsa_dev = fsa_dev_ptr;
286 dev->maximum_num_containers = maximum_num_containers;
287
288 for (index = 0; index < dev->maximum_num_containers; index++) {
289 struct aac_query_mount *dinfo;
290 struct aac_mount *dresp;
291
292 fsa_dev_ptr[index].devname[0] = '\0';
293
294 fib_init(fibptr);
295 dinfo = (struct aac_query_mount *) fib_data(fibptr);
296
297 dinfo->command = cpu_to_le32(VM_NameServe);
298 dinfo->count = cpu_to_le32(index);
299 dinfo->type = cpu_to_le32(FT_FILESYS);
300
301 status = fib_send(ContainerCommand,
302 fibptr,
303 sizeof (struct aac_query_mount),
304 FsaNormal,
305 1, 1,
306 NULL, NULL);
307 if (status < 0 ) {
308 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
309 break;
310 }
311 dresp = (struct aac_mount *)fib_data(fibptr);
312
313 dprintk ((KERN_DEBUG
314 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n",
315 (int)index, (int)le32_to_cpu(dresp->status),
316 (int)le32_to_cpu(dresp->mnt[0].vol),
317 (int)le32_to_cpu(dresp->mnt[0].state),
318 (unsigned)le32_to_cpu(dresp->mnt[0].capacity)));
319 if ((le32_to_cpu(dresp->status) == ST_OK) &&
320 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
321 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
322 fsa_dev_ptr[index].valid = 1;
323 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
324 fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity);
325 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
326 fsa_dev_ptr[index].ro = 1;
327 }
328 fib_complete(fibptr);
329 /*
330 * If there are no more containers, then stop asking.
331 */
332 if ((index + 1) >= le32_to_cpu(dresp->count)){
333 break;
334 }
335 }
336 fib_free(fibptr);
337 return status;
338}
339
340static void aac_io_done(struct scsi_cmnd * scsicmd)
341{
342 unsigned long cpu_flags;
343 struct Scsi_Host *host = scsicmd->device->host;
344 spin_lock_irqsave(host->host_lock, cpu_flags);
345 scsicmd->scsi_done(scsicmd);
346 spin_unlock_irqrestore(host->host_lock, cpu_flags);
347}
348
349static void get_container_name_callback(void *context, struct fib * fibptr)
350{
351 struct aac_get_name_resp * get_name_reply;
352 struct scsi_cmnd * scsicmd;
353
354 scsicmd = (struct scsi_cmnd *) context;
355
356 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
357 if (fibptr == NULL)
358 BUG();
359
360 get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
361 /* Failure is irrelevant, using default value instead */
362 if ((le32_to_cpu(get_name_reply->status) == CT_OK)
363 && (get_name_reply->data[0] != '\0')) {
364 int count;
365 char * dp;
366 char * sp = get_name_reply->data;
367 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
368 while (*sp == ' ')
369 ++sp;
370 count = sizeof(((struct inquiry_data *)NULL)->inqd_pid);
371 dp = ((struct inquiry_data *)scsicmd->request_buffer)->inqd_pid;
372 if (*sp) do {
373 *dp++ = (*sp) ? *sp++ : ' ';
374 } while (--count > 0);
375 }
376 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
377
378 fib_complete(fibptr);
379 fib_free(fibptr);
380 aac_io_done(scsicmd);
381}
382
383/**
384 * aac_get_container_name - get container name, none blocking.
385 */
386static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
387{
388 int status;
389 struct aac_get_name *dinfo;
390 struct fib * cmd_fibcontext;
391 struct aac_dev * dev;
392
393 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
394
395 if (!(cmd_fibcontext = fib_alloc(dev)))
396 return -ENOMEM;
397
398 fib_init(cmd_fibcontext);
399 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
400
401 dinfo->command = cpu_to_le32(VM_ContainerConfig);
402 dinfo->type = cpu_to_le32(CT_READ_NAME);
403 dinfo->cid = cpu_to_le32(cid);
404 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
405
406 status = fib_send(ContainerCommand,
407 cmd_fibcontext,
408 sizeof (struct aac_get_name),
409 FsaNormal,
410 0, 1,
411 (fib_callback) get_container_name_callback,
412 (void *) scsicmd);
413
414 /*
415 * Check that the command queued to the controller
416 */
417 if (status == -EINPROGRESS)
418 return 0;
419
420 printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status);
421 fib_complete(cmd_fibcontext);
422 fib_free(cmd_fibcontext);
423 return -1;
424}
425
426/**
427 * probe_container - query a logical volume
428 * @dev: device to query
429 * @cid: container identifier
430 *
431 * Queries the controller about the given volume. The volume information
432 * is updated in the struct fsa_dev_info structure rather than returned.
433 */
434
435static int probe_container(struct aac_dev *dev, int cid)
436{
437 struct fsa_dev_info *fsa_dev_ptr;
438 int status;
439 struct aac_query_mount *dinfo;
440 struct aac_mount *dresp;
441 struct fib * fibptr;
442 unsigned instance;
443
444 fsa_dev_ptr = dev->fsa_dev;
445 instance = dev->scsi_host_ptr->unique_id;
446
447 if (!(fibptr = fib_alloc(dev)))
448 return -ENOMEM;
449
450 fib_init(fibptr);
451
452 dinfo = (struct aac_query_mount *)fib_data(fibptr);
453
454 dinfo->command = cpu_to_le32(VM_NameServe);
455 dinfo->count = cpu_to_le32(cid);
456 dinfo->type = cpu_to_le32(FT_FILESYS);
457
458 status = fib_send(ContainerCommand,
459 fibptr,
460 sizeof(struct aac_query_mount),
461 FsaNormal,
462 1, 1,
463 NULL, NULL);
464 if (status < 0) {
465 printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
466 goto error;
467 }
468
469 dresp = (struct aac_mount *) fib_data(fibptr);
470
471 if ((le32_to_cpu(dresp->status) == ST_OK) &&
472 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
473 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
474 fsa_dev_ptr[cid].valid = 1;
475 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
476 fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity);
477 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
478 fsa_dev_ptr[cid].ro = 1;
479 }
480
481error:
482 fib_complete(fibptr);
483 fib_free(fibptr);
484
485 return status;
486}
487
488/* Local Structure to set SCSI inquiry data strings */
489struct scsi_inq {
490 char vid[8]; /* Vendor ID */
491 char pid[16]; /* Product ID */
492 char prl[4]; /* Product Revision Level */
493};
494
495/**
496 * InqStrCopy - string merge
497 * @a: string to copy from
498 * @b: string to copy to
499 *
500 * Copy a String from one location to another
501 * without copying \0
502 */
503
504static void inqstrcpy(char *a, char *b)
505{
506
507 while(*a != (char)0)
508 *b++ = *a++;
509}
510
511static char *container_types[] = {
512 "None",
513 "Volume",
514 "Mirror",
515 "Stripe",
516 "RAID5",
517 "SSRW",
518 "SSRO",
519 "Morph",
520 "Legacy",
521 "RAID4",
522 "RAID10",
523 "RAID00",
524 "V-MIRRORS",
525 "PSEUDO R4",
526 "RAID50",
527 "Unknown"
528};
529
530
531
532/* Function: setinqstr
533 *
534 * Arguments: [1] pointer to void [1] int
535 *
536 * Purpose: Sets SCSI inquiry data strings for vendor, product
537 * and revision level. Allows strings to be set in platform dependant
538 * files instead of in OS dependant driver source.
539 */
540
541static void setinqstr(int devtype, void *data, int tindex)
542{
543 struct scsi_inq *str;
544 struct aac_driver_ident *mp;
545
546 mp = aac_get_driver_ident(devtype);
547
548 str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
549
550 inqstrcpy (mp->vname, str->vid);
551 inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
552
553 if (tindex < (sizeof(container_types)/sizeof(char *))){
554 char *findit = str->pid;
555
556 for ( ; *findit != ' '; findit++); /* walk till we find a space */
557 /* RAID is superfluous in the context of a RAID device */
558 if (memcmp(findit-4, "RAID", 4) == 0)
559 *(findit -= 4) = ' ';
560 inqstrcpy (container_types[tindex], findit + 1);
561 }
562 inqstrcpy ("V1.0", str->prl);
563}
564
565void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
566 u8 a_sense_code, u8 incorrect_length,
567 u8 bit_pointer, u16 field_pointer,
568 u32 residue)
569{
570 sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
571 sense_buf[1] = 0; /* Segment number, always zero */
572
573 if (incorrect_length) {
574 sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
575 sense_buf[3] = BYTE3(residue);
576 sense_buf[4] = BYTE2(residue);
577 sense_buf[5] = BYTE1(residue);
578 sense_buf[6] = BYTE0(residue);
579 } else
580 sense_buf[2] = sense_key; /* Sense key */
581
582 if (sense_key == ILLEGAL_REQUEST)
583 sense_buf[7] = 10; /* Additional sense length */
584 else
585 sense_buf[7] = 6; /* Additional sense length */
586
587 sense_buf[12] = sense_code; /* Additional sense code */
588 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
589 if (sense_key == ILLEGAL_REQUEST) {
590 sense_buf[15] = 0;
591
592 if (sense_code == SENCODE_INVALID_PARAM_FIELD)
593 sense_buf[15] = 0x80;/* Std sense key specific field */
594 /* Illegal parameter is in the parameter block */
595
596 if (sense_code == SENCODE_INVALID_CDB_FIELD)
597 sense_buf[15] = 0xc0;/* Std sense key specific field */
598 /* Illegal parameter is in the CDB block */
599 sense_buf[15] |= bit_pointer;
600 sense_buf[16] = field_pointer >> 8; /* MSB */
601 sense_buf[17] = field_pointer; /* LSB */
602 }
603}
604
605int aac_get_adapter_info(struct aac_dev* dev)
606{
607 struct fib* fibptr;
608 struct aac_adapter_info* info;
609 int rcode;
610 u32 tmp;
611 if (!(fibptr = fib_alloc(dev)))
612 return -ENOMEM;
613
614 fib_init(fibptr);
615 info = (struct aac_adapter_info*) fib_data(fibptr);
616
617 memset(info,0,sizeof(struct aac_adapter_info));
618
619 rcode = fib_send(RequestAdapterInfo,
620 fibptr,
621 sizeof(struct aac_adapter_info),
622 FsaNormal,
623 1, 1,
624 NULL,
625 NULL);
626
627 memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
628
629 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
630 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d]\n",
631 dev->name,
632 dev->id,
633 tmp>>24,
634 (tmp>>16)&0xff,
635 tmp&0xff,
636 le32_to_cpu(dev->adapter_info.kernelbuild));
637 tmp = le32_to_cpu(dev->adapter_info.monitorrev);
638 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
639 dev->name, dev->id,
640 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
641 le32_to_cpu(dev->adapter_info.monitorbuild));
642 tmp = le32_to_cpu(dev->adapter_info.biosrev);
643 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
644 dev->name, dev->id,
645 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
646 le32_to_cpu(dev->adapter_info.biosbuild));
647 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
648 printk(KERN_INFO "%s%d: serial %x\n",
649 dev->name, dev->id,
650 le32_to_cpu(dev->adapter_info.serial[0]));
651
652 dev->nondasd_support = 0;
653 dev->raid_scsi_mode = 0;
654 if(dev->adapter_info.options & AAC_OPT_NONDASD){
655 dev->nondasd_support = 1;
656 }
657
658 /*
659 * If the firmware supports ROMB RAID/SCSI mode and we are currently
660 * in RAID/SCSI mode, set the flag. For now if in this mode we will
661 * force nondasd support on. If we decide to allow the non-dasd flag
662 * additional changes changes will have to be made to support
663 * RAID/SCSI. the function aac_scsi_cmd in this module will have to be
664 * changed to support the new dev->raid_scsi_mode flag instead of
665 * leaching off of the dev->nondasd_support flag. Also in linit.c the
666 * function aac_detect will have to be modified where it sets up the
667 * max number of channels based on the aac->nondasd_support flag only.
668 */
669 if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
670 (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
671 dev->nondasd_support = 1;
672 dev->raid_scsi_mode = 1;
673 }
674 if (dev->raid_scsi_mode != 0)
675 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
676 dev->name, dev->id);
677
678 if(nondasd != -1) {
679 dev->nondasd_support = (nondasd!=0);
680 }
681 if(dev->nondasd_support != 0){
682 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
683 }
684
685 dev->dac_support = 0;
686 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
687 printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id);
688 dev->dac_support = 1;
689 }
690
691 if(dacmode != -1) {
692 dev->dac_support = (dacmode!=0);
693 }
694 if(dev->dac_support != 0) {
695 if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) &&
696 !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) {
697 printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
698 dev->name, dev->id);
699 } else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) &&
700 !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) {
701 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
702 dev->name, dev->id);
703 dev->dac_support = 0;
704 } else {
705 printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
706 dev->name, dev->id);
707 rcode = -ENOMEM;
708 }
709 }
710
711 fib_complete(fibptr);
712 fib_free(fibptr);
713
714 return rcode;
715}
716
717
718static void read_callback(void *context, struct fib * fibptr)
719{
720 struct aac_dev *dev;
721 struct aac_read_reply *readreply;
722 struct scsi_cmnd *scsicmd;
723 u32 lba;
724 u32 cid;
725
726 scsicmd = (struct scsi_cmnd *) context;
727
728 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
729 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
730
731 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
732 dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
733
734 if (fibptr == NULL)
735 BUG();
736
737 if(scsicmd->use_sg)
738 pci_unmap_sg(dev->pdev,
739 (struct scatterlist *)scsicmd->buffer,
740 scsicmd->use_sg,
741 scsicmd->sc_data_direction);
742 else if(scsicmd->request_bufflen)
743 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
744 scsicmd->request_bufflen,
745 scsicmd->sc_data_direction);
746 readreply = (struct aac_read_reply *)fib_data(fibptr);
747 if (le32_to_cpu(readreply->status) == ST_OK)
748 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
749 else {
750 printk(KERN_WARNING "read_callback: read failed, status = %d\n",
751 le32_to_cpu(readreply->status));
752 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
753 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
754 HARDWARE_ERROR,
755 SENCODE_INTERNAL_TARGET_FAILURE,
756 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
757 0, 0);
758 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
759 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
760 ? sizeof(scsicmd->sense_buffer)
761 : sizeof(dev->fsa_dev[cid].sense_data));
762 }
763 fib_complete(fibptr);
764 fib_free(fibptr);
765
766 aac_io_done(scsicmd);
767}
768
769static void write_callback(void *context, struct fib * fibptr)
770{
771 struct aac_dev *dev;
772 struct aac_write_reply *writereply;
773 struct scsi_cmnd *scsicmd;
774 u32 lba;
775 u32 cid;
776
777 scsicmd = (struct scsi_cmnd *) context;
778 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
779 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
780
781 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
782 dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
783 if (fibptr == NULL)
784 BUG();
785
786 if(scsicmd->use_sg)
787 pci_unmap_sg(dev->pdev,
788 (struct scatterlist *)scsicmd->buffer,
789 scsicmd->use_sg,
790 scsicmd->sc_data_direction);
791 else if(scsicmd->request_bufflen)
792 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
793 scsicmd->request_bufflen,
794 scsicmd->sc_data_direction);
795
796 writereply = (struct aac_write_reply *) fib_data(fibptr);
797 if (le32_to_cpu(writereply->status) == ST_OK)
798 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
799 else {
800 printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
801 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
802 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
803 HARDWARE_ERROR,
804 SENCODE_INTERNAL_TARGET_FAILURE,
805 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
806 0, 0);
807 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
808 sizeof(struct sense_data));
809 }
810
811 fib_complete(fibptr);
812 fib_free(fibptr);
813 aac_io_done(scsicmd);
814}
815
816int aac_read(struct scsi_cmnd * scsicmd, int cid)
817{
818 u32 lba;
819 u32 count;
820 int status;
821
822 u16 fibsize;
823 struct aac_dev *dev;
824 struct fib * cmd_fibcontext;
825
826 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
827 /*
828 * Get block address and transfer length
829 */
830 if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */
831 {
832 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
833
834 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
835 count = scsicmd->cmnd[4];
836
837 if (count == 0)
838 count = 256;
839 } else {
840 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
841
842 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
843 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
844 }
845 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
846 /*
847 * Alocate and initialize a Fib
848 */
849 if (!(cmd_fibcontext = fib_alloc(dev))) {
850 return -1;
851 }
852
853 fib_init(cmd_fibcontext);
854
855 if(dev->dac_support == 1) {
856 struct aac_read64 *readcmd;
857 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
858 readcmd->command = cpu_to_le32(VM_CtHostRead64);
859 readcmd->cid = cpu_to_le16(cid);
860 readcmd->sector_count = cpu_to_le16(count);
861 readcmd->block = cpu_to_le32(lba);
862 readcmd->pad = 0;
863 readcmd->flags = 0;
864
865 aac_build_sg64(scsicmd, &readcmd->sg);
866 fibsize = sizeof(struct aac_read64) +
867 ((le32_to_cpu(readcmd->sg.count) - 1) *
868 sizeof (struct sgentry64));
869 BUG_ON (fibsize > (sizeof(struct hw_fib) -
870 sizeof(struct aac_fibhdr)));
871 /*
872 * Now send the Fib to the adapter
873 */
874 status = fib_send(ContainerCommand64,
875 cmd_fibcontext,
876 fibsize,
877 FsaNormal,
878 0, 1,
879 (fib_callback) read_callback,
880 (void *) scsicmd);
881 } else {
882 struct aac_read *readcmd;
883 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
884 readcmd->command = cpu_to_le32(VM_CtBlockRead);
885 readcmd->cid = cpu_to_le32(cid);
886 readcmd->block = cpu_to_le32(lba);
887 readcmd->count = cpu_to_le32(count * 512);
888
889 if (count * 512 > (64 * 1024))
890 BUG();
891
892 aac_build_sg(scsicmd, &readcmd->sg);
893 fibsize = sizeof(struct aac_read) +
894 ((le32_to_cpu(readcmd->sg.count) - 1) *
895 sizeof (struct sgentry));
896 BUG_ON (fibsize > (sizeof(struct hw_fib) -
897 sizeof(struct aac_fibhdr)));
898 /*
899 * Now send the Fib to the adapter
900 */
901 status = fib_send(ContainerCommand,
902 cmd_fibcontext,
903 fibsize,
904 FsaNormal,
905 0, 1,
906 (fib_callback) read_callback,
907 (void *) scsicmd);
908 }
909
910
911
912 /*
913 * Check that the command queued to the controller
914 */
915 if (status == -EINPROGRESS)
916 return 0;
917
918 printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
919 /*
920 * For some reason, the Fib didn't queue, return QUEUE_FULL
921 */
922 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
923 aac_io_done(scsicmd);
924 fib_complete(cmd_fibcontext);
925 fib_free(cmd_fibcontext);
926 return 0;
927}
928
929static int aac_write(struct scsi_cmnd * scsicmd, int cid)
930{
931 u32 lba;
932 u32 count;
933 int status;
934 u16 fibsize;
935 struct aac_dev *dev;
936 struct fib * cmd_fibcontext;
937
938 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
939 /*
940 * Get block address and transfer length
941 */
942 if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */
943 {
944 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
945 count = scsicmd->cmnd[4];
946 if (count == 0)
947 count = 256;
948 } else {
949 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
950 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
951 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
952 }
953 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n",
954 smp_processor_id(), (unsigned long long)lba, jiffies));
955 /*
956 * Allocate and initialize a Fib then setup a BlockWrite command
957 */
958 if (!(cmd_fibcontext = fib_alloc(dev))) {
959 scsicmd->result = DID_ERROR << 16;
960 aac_io_done(scsicmd);
961 return 0;
962 }
963 fib_init(cmd_fibcontext);
964
965 if(dev->dac_support == 1) {
966 struct aac_write64 *writecmd;
967 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
968 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
969 writecmd->cid = cpu_to_le16(cid);
970 writecmd->sector_count = cpu_to_le16(count);
971 writecmd->block = cpu_to_le32(lba);
972 writecmd->pad = 0;
973 writecmd->flags = 0;
974
975 aac_build_sg64(scsicmd, &writecmd->sg);
976 fibsize = sizeof(struct aac_write64) +
977 ((le32_to_cpu(writecmd->sg.count) - 1) *
978 sizeof (struct sgentry64));
979 BUG_ON (fibsize > (sizeof(struct hw_fib) -
980 sizeof(struct aac_fibhdr)));
981 /*
982 * Now send the Fib to the adapter
983 */
984 status = fib_send(ContainerCommand64,
985 cmd_fibcontext,
986 fibsize,
987 FsaNormal,
988 0, 1,
989 (fib_callback) write_callback,
990 (void *) scsicmd);
991 } else {
992 struct aac_write *writecmd;
993 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
994 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
995 writecmd->cid = cpu_to_le32(cid);
996 writecmd->block = cpu_to_le32(lba);
997 writecmd->count = cpu_to_le32(count * 512);
998 writecmd->sg.count = cpu_to_le32(1);
999 /* ->stable is not used - it did mean which type of write */
1000
1001 if (count * 512 > (64 * 1024)) {
1002 BUG();
1003 }
1004
1005 aac_build_sg(scsicmd, &writecmd->sg);
1006 fibsize = sizeof(struct aac_write) +
1007 ((le32_to_cpu(writecmd->sg.count) - 1) *
1008 sizeof (struct sgentry));
1009 BUG_ON (fibsize > (sizeof(struct hw_fib) -
1010 sizeof(struct aac_fibhdr)));
1011 /*
1012 * Now send the Fib to the adapter
1013 */
1014 status = fib_send(ContainerCommand,
1015 cmd_fibcontext,
1016 fibsize,
1017 FsaNormal,
1018 0, 1,
1019 (fib_callback) write_callback,
1020 (void *) scsicmd);
1021 }
1022
1023 /*
1024 * Check that the command queued to the controller
1025 */
1026 if (status == -EINPROGRESS)
1027 {
1028 dprintk("write queued.\n");
1029 return 0;
1030 }
1031
1032 printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
1033 /*
1034 * For some reason, the Fib didn't queue, return QUEUE_FULL
1035 */
1036 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1037 aac_io_done(scsicmd);
1038
1039 fib_complete(cmd_fibcontext);
1040 fib_free(cmd_fibcontext);
1041 return 0;
1042}
1043
1044static void synchronize_callback(void *context, struct fib *fibptr)
1045{
1046 struct aac_synchronize_reply *synchronizereply;
1047 struct scsi_cmnd *cmd;
1048
1049 cmd = context;
1050
1051 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
1052 smp_processor_id(), jiffies));
1053 BUG_ON(fibptr == NULL);
1054
1055
1056 synchronizereply = fib_data(fibptr);
1057 if (le32_to_cpu(synchronizereply->status) == CT_OK)
1058 cmd->result = DID_OK << 16 |
1059 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1060 else {
1061 struct scsi_device *sdev = cmd->device;
1062 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
1063 u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun);
1064 printk(KERN_WARNING
1065 "synchronize_callback: synchronize failed, status = %d\n",
1066 le32_to_cpu(synchronizereply->status));
1067 cmd->result = DID_OK << 16 |
1068 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1069 set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
1070 HARDWARE_ERROR,
1071 SENCODE_INTERNAL_TARGET_FAILURE,
1072 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1073 0, 0);
1074 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1075 min(sizeof(dev->fsa_dev[cid].sense_data),
1076 sizeof(cmd->sense_buffer)));
1077 }
1078
1079 fib_complete(fibptr);
1080 fib_free(fibptr);
1081 aac_io_done(cmd);
1082}
1083
1084static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1085{
1086 int status;
1087 struct fib *cmd_fibcontext;
1088 struct aac_synchronize *synchronizecmd;
1089 struct scsi_cmnd *cmd;
1090 struct scsi_device *sdev = scsicmd->device;
1091 int active = 0;
1092 unsigned long flags;
1093
1094 /*
1095 * Wait for all commands to complete to this specific
1096 * target (block).
1097 */
1098 spin_lock_irqsave(&sdev->list_lock, flags);
1099 list_for_each_entry(cmd, &sdev->cmd_list, list)
1100 if (cmd != scsicmd && cmd->serial_number != 0) {
1101 ++active;
1102 break;
1103 }
1104
1105 spin_unlock_irqrestore(&sdev->list_lock, flags);
1106
1107 /*
1108 * Yield the processor (requeue for later)
1109 */
1110 if (active)
1111 return SCSI_MLQUEUE_DEVICE_BUSY;
1112
1113 /*
1114 * Alocate and initialize a Fib
1115 */
1116 if (!(cmd_fibcontext =
1117 fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
1118 return SCSI_MLQUEUE_HOST_BUSY;
1119
1120 fib_init(cmd_fibcontext);
1121
1122 synchronizecmd = fib_data(cmd_fibcontext);
1123 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
1124 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
1125 synchronizecmd->cid = cpu_to_le32(cid);
1126 synchronizecmd->count =
1127 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
1128
1129 /*
1130 * Now send the Fib to the adapter
1131 */
1132 status = fib_send(ContainerCommand,
1133 cmd_fibcontext,
1134 sizeof(struct aac_synchronize),
1135 FsaNormal,
1136 0, 1,
1137 (fib_callback)synchronize_callback,
1138 (void *)scsicmd);
1139
1140 /*
1141 * Check that the command queued to the controller
1142 */
1143 if (status == -EINPROGRESS)
1144 return 0;
1145
1146 printk(KERN_WARNING
1147 "aac_synchronize: fib_send failed with status: %d.\n", status);
1148 fib_complete(cmd_fibcontext);
1149 fib_free(cmd_fibcontext);
1150 return SCSI_MLQUEUE_HOST_BUSY;
1151}
1152
1153/**
1154 * aac_scsi_cmd() - Process SCSI command
1155 * @scsicmd: SCSI command block
1156 *
1157 * Emulate a SCSI command and queue the required request for the
1158 * aacraid firmware.
1159 */
1160
1161int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1162{
1163 u32 cid = 0;
1164 struct Scsi_Host *host = scsicmd->device->host;
1165 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
1166 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
1167 int cardtype = dev->cardtype;
1168 int ret;
1169
1170 /*
1171 * If the bus, id or lun is out of range, return fail
1172 * Test does not apply to ID 16, the pseudo id for the controller
1173 * itself.
1174 */
1175 if (scsicmd->device->id != host->this_id) {
1176 if ((scsicmd->device->channel == 0) ){
1177 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){
1178 scsicmd->result = DID_NO_CONNECT << 16;
1179 scsicmd->scsi_done(scsicmd);
1180 return 0;
1181 }
1182 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
1183
1184 /*
1185 * If the target container doesn't exist, it may have
1186 * been newly created
1187 */
1188 if ((fsa_dev_ptr[cid].valid & 1) == 0) {
1189 switch (scsicmd->cmnd[0]) {
1190 case INQUIRY:
1191 case READ_CAPACITY:
1192 case TEST_UNIT_READY:
1193 spin_unlock_irq(host->host_lock);
1194 probe_container(dev, cid);
1195 spin_lock_irq(host->host_lock);
1196 if (fsa_dev_ptr[cid].valid == 0) {
1197 scsicmd->result = DID_NO_CONNECT << 16;
1198 scsicmd->scsi_done(scsicmd);
1199 return 0;
1200 }
1201 default:
1202 break;
1203 }
1204 }
1205 /*
1206 * If the target container still doesn't exist,
1207 * return failure
1208 */
1209 if (fsa_dev_ptr[cid].valid == 0) {
1210 scsicmd->result = DID_BAD_TARGET << 16;
1211 scsicmd->scsi_done(scsicmd);
1212 return 0;
1213 }
1214 } else { /* check for physical non-dasd devices */
1215 if(dev->nondasd_support == 1){
1216 return aac_send_srb_fib(scsicmd);
1217 } else {
1218 scsicmd->result = DID_NO_CONNECT << 16;
1219 scsicmd->scsi_done(scsicmd);
1220 return 0;
1221 }
1222 }
1223 }
1224 /*
1225 * else Command for the controller itself
1226 */
1227 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
1228 (scsicmd->cmnd[0] != TEST_UNIT_READY))
1229 {
1230 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
1231 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1232 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1233 ILLEGAL_REQUEST,
1234 SENCODE_INVALID_COMMAND,
1235 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1236 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1237 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1238 ? sizeof(scsicmd->sense_buffer)
1239 : sizeof(dev->fsa_dev[cid].sense_data));
1240 scsicmd->scsi_done(scsicmd);
1241 return 0;
1242 }
1243
1244
1245 /* Handle commands here that don't really require going out to the adapter */
1246 switch (scsicmd->cmnd[0]) {
1247 case INQUIRY:
1248 {
1249 struct inquiry_data *inq_data_ptr;
1250
1251 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
1252 inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
1253 memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
1254
1255 inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */
1256 inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
1257 inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1258 inq_data_ptr->inqd_len = 31;
1259 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
1260 inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
1261 /*
1262 * Set the Vendor, Product, and Revision Level
1263 * see: <vendor>.c i.e. aac.c
1264 */
1265 if (scsicmd->device->id == host->this_id) {
1266 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *)));
1267 inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
1268 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1269 scsicmd->scsi_done(scsicmd);
1270 return 0;
1271 }
1272 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr[cid].type);
1273 inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1274 return aac_get_container_name(scsicmd, cid);
1275 }
1276 case READ_CAPACITY:
1277 {
1278 u32 capacity;
1279 char *cp;
1280
1281 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1282 if (fsa_dev_ptr[cid].size <= 0x100000000LL)
1283 capacity = fsa_dev_ptr[cid].size - 1;
1284 else
1285 capacity = (u32)-1;
1286 cp = scsicmd->request_buffer;
1287 cp[0] = (capacity >> 24) & 0xff;
1288 cp[1] = (capacity >> 16) & 0xff;
1289 cp[2] = (capacity >> 8) & 0xff;
1290 cp[3] = (capacity >> 0) & 0xff;
1291 cp[4] = 0;
1292 cp[5] = 0;
1293 cp[6] = 2;
1294 cp[7] = 0;
1295
1296 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1297 scsicmd->scsi_done(scsicmd);
1298
1299 return 0;
1300 }
1301
1302 case MODE_SENSE:
1303 {
1304 char *mode_buf;
1305
1306 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1307 mode_buf = scsicmd->request_buffer;
1308 mode_buf[0] = 3; /* Mode data length */
1309 mode_buf[1] = 0; /* Medium type - default */
1310 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1311 mode_buf[3] = 0; /* Block descriptor length */
1312
1313 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1314 scsicmd->scsi_done(scsicmd);
1315
1316 return 0;
1317 }
1318 case MODE_SENSE_10:
1319 {
1320 char *mode_buf;
1321
1322 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1323 mode_buf = scsicmd->request_buffer;
1324 mode_buf[0] = 0; /* Mode data length (MSB) */
1325 mode_buf[1] = 6; /* Mode data length (LSB) */
1326 mode_buf[2] = 0; /* Medium type - default */
1327 mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1328 mode_buf[4] = 0; /* reserved */
1329 mode_buf[5] = 0; /* reserved */
1330 mode_buf[6] = 0; /* Block descriptor length (MSB) */
1331 mode_buf[7] = 0; /* Block descriptor length (LSB) */
1332
1333 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1334 scsicmd->scsi_done(scsicmd);
1335
1336 return 0;
1337 }
1338 case REQUEST_SENSE:
1339 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
1340 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
1341 memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
1342 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1343 scsicmd->scsi_done(scsicmd);
1344 return 0;
1345
1346 case ALLOW_MEDIUM_REMOVAL:
1347 dprintk((KERN_DEBUG "LOCK command.\n"));
1348 if (scsicmd->cmnd[4])
1349 fsa_dev_ptr[cid].locked = 1;
1350 else
1351 fsa_dev_ptr[cid].locked = 0;
1352
1353 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1354 scsicmd->scsi_done(scsicmd);
1355 return 0;
1356 /*
1357 * These commands are all No-Ops
1358 */
1359 case TEST_UNIT_READY:
1360 case RESERVE:
1361 case RELEASE:
1362 case REZERO_UNIT:
1363 case REASSIGN_BLOCKS:
1364 case SEEK_10:
1365 case START_STOP:
1366 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1367 scsicmd->scsi_done(scsicmd);
1368 return 0;
1369 }
1370
1371 switch (scsicmd->cmnd[0])
1372 {
1373 case READ_6:
1374 case READ_10:
1375 /*
1376 * Hack to keep track of ordinal number of the device that
1377 * corresponds to a container. Needed to convert
1378 * containers to /dev/sd device names
1379 */
1380
1381 spin_unlock_irq(host->host_lock);
1382 if (scsicmd->request->rq_disk)
1383 memcpy(fsa_dev_ptr[cid].devname,
1384 scsicmd->request->rq_disk->disk_name,
1385 8);
1386
1387 ret = aac_read(scsicmd, cid);
1388 spin_lock_irq(host->host_lock);
1389 return ret;
1390
1391 case WRITE_6:
1392 case WRITE_10:
1393 spin_unlock_irq(host->host_lock);
1394 ret = aac_write(scsicmd, cid);
1395 spin_lock_irq(host->host_lock);
1396 return ret;
1397
1398 case SYNCHRONIZE_CACHE:
1399 /* Issue FIB to tell Firmware to flush it's cache */
1400 return aac_synchronize(scsicmd, cid);
1401
1402 default:
1403 /*
1404 * Unhandled commands
1405 */
1406 printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
1407 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1408 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1409 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
1410 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1411 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1412 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1413 ? sizeof(scsicmd->sense_buffer)
1414 : sizeof(dev->fsa_dev[cid].sense_data));
1415 scsicmd->scsi_done(scsicmd);
1416 return 0;
1417 }
1418}
1419
1420static int query_disk(struct aac_dev *dev, void __user *arg)
1421{
1422 struct aac_query_disk qd;
1423 struct fsa_dev_info *fsa_dev_ptr;
1424
1425 fsa_dev_ptr = dev->fsa_dev;
1426 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1427 return -EFAULT;
1428 if (qd.cnum == -1)
1429 qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun);
1430 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
1431 {
1432 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
1433 return -EINVAL;
1434 qd.instance = dev->scsi_host_ptr->host_no;
1435 qd.bus = 0;
1436 qd.id = CONTAINER_TO_ID(qd.cnum);
1437 qd.lun = CONTAINER_TO_LUN(qd.cnum);
1438 }
1439 else return -EINVAL;
1440
1441 qd.valid = fsa_dev_ptr[qd.cnum].valid;
1442 qd.locked = fsa_dev_ptr[qd.cnum].locked;
1443 qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
1444
1445 if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
1446 qd.unmapped = 1;
1447 else
1448 qd.unmapped = 0;
1449
1450 strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
1451 min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
1452
1453 if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
1454 return -EFAULT;
1455 return 0;
1456}
1457
1458static int force_delete_disk(struct aac_dev *dev, void __user *arg)
1459{
1460 struct aac_delete_disk dd;
1461 struct fsa_dev_info *fsa_dev_ptr;
1462
1463 fsa_dev_ptr = dev->fsa_dev;
1464
1465 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1466 return -EFAULT;
1467
1468 if (dd.cnum >= dev->maximum_num_containers)
1469 return -EINVAL;
1470 /*
1471 * Mark this container as being deleted.
1472 */
1473 fsa_dev_ptr[dd.cnum].deleted = 1;
1474 /*
1475 * Mark the container as no longer valid
1476 */
1477 fsa_dev_ptr[dd.cnum].valid = 0;
1478 return 0;
1479}
1480
1481static int delete_disk(struct aac_dev *dev, void __user *arg)
1482{
1483 struct aac_delete_disk dd;
1484 struct fsa_dev_info *fsa_dev_ptr;
1485
1486 fsa_dev_ptr = dev->fsa_dev;
1487
1488 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1489 return -EFAULT;
1490
1491 if (dd.cnum >= dev->maximum_num_containers)
1492 return -EINVAL;
1493 /*
1494 * If the container is locked, it can not be deleted by the API.
1495 */
1496 if (fsa_dev_ptr[dd.cnum].locked)
1497 return -EBUSY;
1498 else {
1499 /*
1500 * Mark the container as no longer being valid.
1501 */
1502 fsa_dev_ptr[dd.cnum].valid = 0;
1503 fsa_dev_ptr[dd.cnum].devname[0] = '\0';
1504 return 0;
1505 }
1506}
1507
1508int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
1509{
1510 switch (cmd) {
1511 case FSACTL_QUERY_DISK:
1512 return query_disk(dev, arg);
1513 case FSACTL_DELETE_DISK:
1514 return delete_disk(dev, arg);
1515 case FSACTL_FORCE_DELETE_DISK:
1516 return force_delete_disk(dev, arg);
1517 case FSACTL_GET_CONTAINERS:
1518 return aac_get_containers(dev);
1519 default:
1520 return -ENOTTY;
1521 }
1522}
1523
1524/**
1525 *
1526 * aac_srb_callback
1527 * @context: the context set in the fib - here it is scsi cmd
1528 * @fibptr: pointer to the fib
1529 *
1530 * Handles the completion of a scsi command to a non dasd device
1531 *
1532 */
1533
1534static void aac_srb_callback(void *context, struct fib * fibptr)
1535{
1536 struct aac_dev *dev;
1537 struct aac_srb_reply *srbreply;
1538 struct scsi_cmnd *scsicmd;
1539
1540 scsicmd = (struct scsi_cmnd *) context;
1541 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1542
1543 if (fibptr == NULL)
1544 BUG();
1545
1546 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
1547
1548 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
1549 /*
1550 * Calculate resid for sg
1551 */
1552
1553 scsicmd->resid = scsicmd->request_bufflen -
1554 le32_to_cpu(srbreply->data_xfer_length);
1555
1556 if(scsicmd->use_sg)
1557 pci_unmap_sg(dev->pdev,
1558 (struct scatterlist *)scsicmd->buffer,
1559 scsicmd->use_sg,
1560 scsicmd->sc_data_direction);
1561 else if(scsicmd->request_bufflen)
1562 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
1563 scsicmd->sc_data_direction);
1564
1565 /*
1566 * First check the fib status
1567 */
1568
1569 if (le32_to_cpu(srbreply->status) != ST_OK){
1570 int len;
1571 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
1572 len = (le32_to_cpu(srbreply->sense_data_size) >
1573 sizeof(scsicmd->sense_buffer)) ?
1574 sizeof(scsicmd->sense_buffer) :
1575 le32_to_cpu(srbreply->sense_data_size);
1576 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1577 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1578 }
1579
1580 /*
1581 * Next check the srb status
1582 */
1583 switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
1584 case SRB_STATUS_ERROR_RECOVERY:
1585 case SRB_STATUS_PENDING:
1586 case SRB_STATUS_SUCCESS:
1587 if(scsicmd->cmnd[0] == INQUIRY ){
1588 u8 b;
1589 u8 b1;
1590 /* We can't expose disk devices because we can't tell whether they
1591 * are the raw container drives or stand alone drives. If they have
1592 * the removable bit set then we should expose them though.
1593 */
1594 b = (*(u8*)scsicmd->buffer)&0x1f;
1595 b1 = ((u8*)scsicmd->buffer)[1];
1596 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1597 || (b==TYPE_DISK && (b1&0x80)) ){
1598 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1599 /*
1600 * We will allow disk devices if in RAID/SCSI mode and
1601 * the channel is 2
1602 */
1603 } else if ((dev->raid_scsi_mode) &&
1604 (scsicmd->device->channel == 2)) {
1605 scsicmd->result = DID_OK << 16 |
1606 COMMAND_COMPLETE << 8;
1607 } else {
1608 scsicmd->result = DID_NO_CONNECT << 16 |
1609 COMMAND_COMPLETE << 8;
1610 }
1611 } else {
1612 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1613 }
1614 break;
1615 case SRB_STATUS_DATA_OVERRUN:
1616 switch(scsicmd->cmnd[0]){
1617 case READ_6:
1618 case WRITE_6:
1619 case READ_10:
1620 case WRITE_10:
1621 case READ_12:
1622 case WRITE_12:
1623 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1624 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1625 } else {
1626 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
1627 }
1628 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1629 break;
1630 case INQUIRY: {
1631 u8 b;
1632 u8 b1;
1633 /* We can't expose disk devices because we can't tell whether they
1634 * are the raw container drives or stand alone drives
1635 */
1636 b = (*(u8*)scsicmd->buffer)&0x0f;
1637 b1 = ((u8*)scsicmd->buffer)[1];
1638 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1639 || (b==TYPE_DISK && (b1&0x80)) ){
1640 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1641 /*
1642 * We will allow disk devices if in RAID/SCSI mode and
1643 * the channel is 2
1644 */
1645 } else if ((dev->raid_scsi_mode) &&
1646 (scsicmd->device->channel == 2)) {
1647 scsicmd->result = DID_OK << 16 |
1648 COMMAND_COMPLETE << 8;
1649 } else {
1650 scsicmd->result = DID_NO_CONNECT << 16 |
1651 COMMAND_COMPLETE << 8;
1652 }
1653 break;
1654 }
1655 default:
1656 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1657 break;
1658 }
1659 break;
1660 case SRB_STATUS_ABORTED:
1661 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
1662 break;
1663 case SRB_STATUS_ABORT_FAILED:
1664 // Not sure about this one - but assuming the hba was trying to abort for some reason
1665 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
1666 break;
1667 case SRB_STATUS_PARITY_ERROR:
1668 scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
1669 break;
1670 case SRB_STATUS_NO_DEVICE:
1671 case SRB_STATUS_INVALID_PATH_ID:
1672 case SRB_STATUS_INVALID_TARGET_ID:
1673 case SRB_STATUS_INVALID_LUN:
1674 case SRB_STATUS_SELECTION_TIMEOUT:
1675 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1676 break;
1677
1678 case SRB_STATUS_COMMAND_TIMEOUT:
1679 case SRB_STATUS_TIMEOUT:
1680 scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
1681 break;
1682
1683 case SRB_STATUS_BUSY:
1684 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1685 break;
1686
1687 case SRB_STATUS_BUS_RESET:
1688 scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
1689 break;
1690
1691 case SRB_STATUS_MESSAGE_REJECTED:
1692 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
1693 break;
1694 case SRB_STATUS_REQUEST_FLUSHED:
1695 case SRB_STATUS_ERROR:
1696 case SRB_STATUS_INVALID_REQUEST:
1697 case SRB_STATUS_REQUEST_SENSE_FAILED:
1698 case SRB_STATUS_NO_HBA:
1699 case SRB_STATUS_UNEXPECTED_BUS_FREE:
1700 case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
1701 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
1702 case SRB_STATUS_DELAYED_RETRY:
1703 case SRB_STATUS_BAD_FUNCTION:
1704 case SRB_STATUS_NOT_STARTED:
1705 case SRB_STATUS_NOT_IN_USE:
1706 case SRB_STATUS_FORCE_ABORT:
1707 case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
1708 default:
1709#ifdef AAC_DETAILED_STATUS_INFO
1710 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
1711 le32_to_cpu(srbreply->srb_status) & 0x3F,
1712 aac_get_status_string(
1713 le32_to_cpu(srbreply->srb_status) & 0x3F),
1714 scsicmd->cmnd[0],
1715 le32_to_cpu(srbreply->scsi_status));
1716#endif
1717 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1718 break;
1719 }
1720 if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
1721 int len;
1722 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
1723 len = (le32_to_cpu(srbreply->sense_data_size) >
1724 sizeof(scsicmd->sense_buffer)) ?
1725 sizeof(scsicmd->sense_buffer) :
1726 le32_to_cpu(srbreply->sense_data_size);
1727#ifdef AAC_DETAILED_STATUS_INFO
1728 dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
1729 le32_to_cpu(srbreply->status), len));
1730#endif
1731 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1732
1733 }
1734 /*
1735 * OR in the scsi status (already shifted up a bit)
1736 */
1737 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
1738
1739 fib_complete(fibptr);
1740 fib_free(fibptr);
1741 aac_io_done(scsicmd);
1742}
1743
1744/**
1745 *
1746 * aac_send_scb_fib
1747 * @scsicmd: the scsi command block
1748 *
1749 * This routine will form a FIB and fill in the aac_srb from the
1750 * scsicmd passed in.
1751 */
1752
1753static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
1754{
1755 struct fib* cmd_fibcontext;
1756 struct aac_dev* dev;
1757 int status;
1758 struct aac_srb *srbcmd;
1759 u16 fibsize;
1760 u32 flag;
1761 u32 timeout;
1762
1763 if( scsicmd->device->id > 15 || scsicmd->device->lun > 7) {
1764 scsicmd->result = DID_NO_CONNECT << 16;
1765 scsicmd->scsi_done(scsicmd);
1766 return 0;
1767 }
1768
1769 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1770 switch(scsicmd->sc_data_direction){
1771 case DMA_TO_DEVICE:
1772 flag = SRB_DataOut;
1773 break;
1774 case DMA_BIDIRECTIONAL:
1775 flag = SRB_DataIn | SRB_DataOut;
1776 break;
1777 case DMA_FROM_DEVICE:
1778 flag = SRB_DataIn;
1779 break;
1780 case DMA_NONE:
1781 default: /* shuts up some versions of gcc */
1782 flag = SRB_NoDataXfer;
1783 break;
1784 }
1785
1786
1787 /*
1788 * Allocate and initialize a Fib then setup a BlockWrite command
1789 */
1790 if (!(cmd_fibcontext = fib_alloc(dev))) {
1791 return -1;
1792 }
1793 fib_init(cmd_fibcontext);
1794
1795 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
1796 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1797 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
1798 srbcmd->id = cpu_to_le32(scsicmd->device->id);
1799 srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
1800 srbcmd->flags = cpu_to_le32(flag);
1801 timeout = (scsicmd->timeout-jiffies)/HZ;
1802 if(timeout == 0){
1803 timeout = 1;
1804 }
1805 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
1806 srbcmd->retry_limit = 0; /* Obsolete parameter */
1807 srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
1808
1809 if( dev->dac_support == 1 ) {
1810 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
1811 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1812
1813 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1814 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1815 /*
1816 * Build Scatter/Gather list
1817 */
1818 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
1819 ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
1820 sizeof (struct sgentry64));
1821 BUG_ON (fibsize > (sizeof(struct hw_fib) -
1822 sizeof(struct aac_fibhdr)));
1823
1824 /*
1825 * Now send the Fib to the adapter
1826 */
1827 status = fib_send(ScsiPortCommand64, cmd_fibcontext,
1828 fibsize, FsaNormal, 0, 1,
1829 (fib_callback) aac_srb_callback,
1830 (void *) scsicmd);
1831 } else {
1832 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
1833 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1834
1835 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1836 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1837 /*
1838 * Build Scatter/Gather list
1839 */
1840 fibsize = sizeof (struct aac_srb) +
1841 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
1842 sizeof (struct sgentry));
1843 BUG_ON (fibsize > (sizeof(struct hw_fib) -
1844 sizeof(struct aac_fibhdr)));
1845
1846 /*
1847 * Now send the Fib to the adapter
1848 */
1849 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1850 (fib_callback) aac_srb_callback, (void *) scsicmd);
1851 }
1852 /*
1853 * Check that the command queued to the controller
1854 */
1855 if (status == -EINPROGRESS){
1856 return 0;
1857 }
1858
1859 printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
1860 fib_complete(cmd_fibcontext);
1861 fib_free(cmd_fibcontext);
1862
1863 return -1;
1864}
1865
1866static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
1867{
1868 struct aac_dev *dev;
1869 unsigned long byte_count = 0;
1870
1871 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1872 // Get rid of old data
1873 psg->count = 0;
1874 psg->sg[0].addr = 0;
1875 psg->sg[0].count = 0;
1876 if (scsicmd->use_sg) {
1877 struct scatterlist *sg;
1878 int i;
1879 int sg_count;
1880 sg = (struct scatterlist *) scsicmd->request_buffer;
1881
1882 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1883 scsicmd->sc_data_direction);
1884 psg->count = cpu_to_le32(sg_count);
1885
1886 byte_count = 0;
1887
1888 for (i = 0; i < sg_count; i++) {
1889 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
1890 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1891 byte_count += sg_dma_len(sg);
1892 sg++;
1893 }
1894 /* hba wants the size to be exact */
1895 if(byte_count > scsicmd->request_bufflen){
1896 psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1897 byte_count = scsicmd->request_bufflen;
1898 }
1899 /* Check for command underflow */
1900 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1901 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1902 byte_count, scsicmd->underflow);
1903 }
1904 }
1905 else if(scsicmd->request_bufflen) {
1906 dma_addr_t addr;
1907 addr = pci_map_single(dev->pdev,
1908 scsicmd->request_buffer,
1909 scsicmd->request_bufflen,
1910 scsicmd->sc_data_direction);
1911 psg->count = cpu_to_le32(1);
1912 psg->sg[0].addr = cpu_to_le32(addr);
1913 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
1914 scsicmd->SCp.dma_handle = addr;
1915 byte_count = scsicmd->request_bufflen;
1916 }
1917 return byte_count;
1918}
1919
1920
1921static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
1922{
1923 struct aac_dev *dev;
1924 unsigned long byte_count = 0;
1925 u64 le_addr;
1926
1927 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1928 // Get rid of old data
1929 psg->count = 0;
1930 psg->sg[0].addr[0] = 0;
1931 psg->sg[0].addr[1] = 0;
1932 psg->sg[0].count = 0;
1933 if (scsicmd->use_sg) {
1934 struct scatterlist *sg;
1935 int i;
1936 int sg_count;
1937 sg = (struct scatterlist *) scsicmd->request_buffer;
1938
1939 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1940 scsicmd->sc_data_direction);
1941 psg->count = cpu_to_le32(sg_count);
1942
1943 byte_count = 0;
1944
1945 for (i = 0; i < sg_count; i++) {
1946 le_addr = cpu_to_le64(sg_dma_address(sg));
1947 psg->sg[i].addr[1] = (u32)(le_addr>>32);
1948 psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
1949 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1950 byte_count += sg_dma_len(sg);
1951 sg++;
1952 }
1953 /* hba wants the size to be exact */
1954 if(byte_count > scsicmd->request_bufflen){
1955 psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1956 byte_count = scsicmd->request_bufflen;
1957 }
1958 /* Check for command underflow */
1959 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1960 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1961 byte_count, scsicmd->underflow);
1962 }
1963 }
1964 else if(scsicmd->request_bufflen) {
1965 dma_addr_t addr;
1966 addr = pci_map_single(dev->pdev,
1967 scsicmd->request_buffer,
1968 scsicmd->request_bufflen,
1969 scsicmd->sc_data_direction);
1970 psg->count = cpu_to_le32(1);
1971 le_addr = cpu_to_le64(addr);
1972 psg->sg[0].addr[1] = (u32)(le_addr>>32);
1973 psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
1974 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
1975 scsicmd->SCp.dma_handle = addr;
1976 byte_count = scsicmd->request_bufflen;
1977 }
1978 return byte_count;
1979}
1980
1981#ifdef AAC_DETAILED_STATUS_INFO
1982
1983struct aac_srb_status_info {
1984 u32 status;
1985 char *str;
1986};
1987
1988
1989static struct aac_srb_status_info srb_status_info[] = {
1990 { SRB_STATUS_PENDING, "Pending Status"},
1991 { SRB_STATUS_SUCCESS, "Success"},
1992 { SRB_STATUS_ABORTED, "Aborted Command"},
1993 { SRB_STATUS_ABORT_FAILED, "Abort Failed"},
1994 { SRB_STATUS_ERROR, "Error Event"},
1995 { SRB_STATUS_BUSY, "Device Busy"},
1996 { SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
1997 { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
1998 { SRB_STATUS_NO_DEVICE, "No Device"},
1999 { SRB_STATUS_TIMEOUT, "Timeout"},
2000 { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
2001 { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
2002 { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
2003 { SRB_STATUS_BUS_RESET, "Bus Reset"},
2004 { SRB_STATUS_PARITY_ERROR, "Parity Error"},
2005 { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
2006 { SRB_STATUS_NO_HBA, "No HBA"},
2007 { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
2008 { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
2009 { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
2010 { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
2011 { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
2012 { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
2013 { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
2014 { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
2015 { SRB_STATUS_BAD_FUNCTION, "Bad Function"},
2016 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
2017 { SRB_STATUS_NOT_STARTED, "Not Started"},
2018 { SRB_STATUS_NOT_IN_USE, "Not In Use"},
2019 { SRB_STATUS_FORCE_ABORT, "Force Abort"},
2020 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
2021 { 0xff, "Unknown Error"}
2022};
2023
2024char *aac_get_status_string(u32 status)
2025{
2026 int i;
2027
2028 for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
2029 if(srb_status_info[i].status == status){
2030 return srb_status_info[i].str;
2031 }
2032 }
2033
2034 return "Bad Status Code";
2035}
2036
2037#endif
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
new file mode 100644
index 000000000000..700d90331c1c
--- /dev/null
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -0,0 +1,1623 @@
1#if (!defined(dprintk))
2# define dprintk(x)
3#endif
4
5/*------------------------------------------------------------------------------
6 * D E F I N E S
7 *----------------------------------------------------------------------------*/
8
9#define MAXIMUM_NUM_CONTAINERS 32
10
11#define AAC_NUM_FIB (256 + 64)
12#define AAC_NUM_IO_FIB 100
13
14#define AAC_MAX_LUN (8)
15
16#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
17
18/*
19 * These macros convert from physical channels to virtual channels
20 */
21#define CONTAINER_CHANNEL (0)
22#define ID_LUN_TO_CONTAINER(id, lun) (id)
23#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
24#define CONTAINER_TO_ID(cont) (cont)
25#define CONTAINER_TO_LUN(cont) (0)
26
27#define aac_phys_to_logical(x) (x+1)
28#define aac_logical_to_phys(x) (x?x-1:0)
29
30/* #define AAC_DETAILED_STATUS_INFO */
31
32struct diskparm
33{
34 int heads;
35 int sectors;
36 int cylinders;
37};
38
39
40/*
41 * DON'T CHANGE THE ORDER, this is set by the firmware
42 */
43
44#define CT_NONE 0
45#define CT_VOLUME 1
46#define CT_MIRROR 2
47#define CT_STRIPE 3
48#define CT_RAID5 4
49#define CT_SSRW 5
50#define CT_SSRO 6
51#define CT_MORPH 7
52#define CT_PASSTHRU 8
53#define CT_RAID4 9
54#define CT_RAID10 10 /* stripe of mirror */
55#define CT_RAID00 11 /* stripe of stripe */
56#define CT_VOLUME_OF_MIRRORS 12 /* volume of mirror */
57#define CT_PSEUDO_RAID 13 /* really raid4 */
58#define CT_LAST_VOLUME_TYPE 14
59#define CT_OK 218
60
61/*
62 * Types of objects addressable in some fashion by the client.
63 * This is a superset of those objects handled just by the filesystem
64 * and includes "raw" objects that an administrator would use to
65 * configure containers and filesystems.
66 */
67
68#define FT_REG 1 /* regular file */
69#define FT_DIR 2 /* directory */
70#define FT_BLK 3 /* "block" device - reserved */
71#define FT_CHR 4 /* "character special" device - reserved */
72#define FT_LNK 5 /* symbolic link */
73#define FT_SOCK 6 /* socket */
74#define FT_FIFO 7 /* fifo */
75#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
76#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */
77#define FT_SLICE 10 /* virtual disk - raw volume - slice */
78#define FT_PARTITION 11 /* FSA partition - carved out of a slice - building block for containers */
79#define FT_VOLUME 12 /* Container - Volume Set */
80#define FT_STRIPE 13 /* Container - Stripe Set */
81#define FT_MIRROR 14 /* Container - Mirror Set */
82#define FT_RAID5 15 /* Container - Raid 5 Set */
83#define FT_DATABASE 16 /* Storage object with "foreign" content manager */
84
85/*
86 * Host side memory scatter gather list
87 * Used by the adapter for read, write, and readdirplus operations
88 * We have separate 32 and 64 bit version because even
89 * on 64 bit systems not all cards support the 64 bit version
90 */
91struct sgentry {
92 u32 addr; /* 32-bit address. */
93 u32 count; /* Length. */
94};
95
96struct sgentry64 {
97 u32 addr[2]; /* 64-bit addr. 2 pieces for data alignment */
98 u32 count; /* Length. */
99};
100
101/*
102 * SGMAP
103 *
104 * This is the SGMAP structure for all commands that use
105 * 32-bit addressing.
106 */
107
108struct sgmap {
109 u32 count;
110 struct sgentry sg[1];
111};
112
113struct sgmap64 {
114 u32 count;
115 struct sgentry64 sg[1];
116};
117
118struct creation_info
119{
120 u8 buildnum; /* e.g., 588 */
121 u8 usec; /* e.g., 588 */
122 u8 via; /* e.g., 1 = FSU,
123 * 2 = API
124 */
125 u8 year; /* e.g., 1997 = 97 */
126 u32 date; /*
127 * unsigned Month :4; // 1 - 12
128 * unsigned Day :6; // 1 - 32
129 * unsigned Hour :6; // 0 - 23
130 * unsigned Minute :6; // 0 - 60
131 * unsigned Second :6; // 0 - 60
132 */
133 u32 serial[2]; /* e.g., 0x1DEADB0BFAFAF001 */
134};
135
136
137/*
138 * Define all the constants needed for the communication interface
139 */
140
141/*
142 * Define how many queue entries each queue will have and the total
143 * number of entries for the entire communication interface. Also define
144 * how many queues we support.
145 *
146 * This has to match the controller
147 */
148
149#define NUMBER_OF_COMM_QUEUES 8 // 4 command; 4 response
150#define HOST_HIGH_CMD_ENTRIES 4
151#define HOST_NORM_CMD_ENTRIES 8
152#define ADAP_HIGH_CMD_ENTRIES 4
153#define ADAP_NORM_CMD_ENTRIES 512
154#define HOST_HIGH_RESP_ENTRIES 4
155#define HOST_NORM_RESP_ENTRIES 512
156#define ADAP_HIGH_RESP_ENTRIES 4
157#define ADAP_NORM_RESP_ENTRIES 8
158
159#define TOTAL_QUEUE_ENTRIES \
160 (HOST_NORM_CMD_ENTRIES + HOST_HIGH_CMD_ENTRIES + ADAP_NORM_CMD_ENTRIES + ADAP_HIGH_CMD_ENTRIES + \
161 HOST_NORM_RESP_ENTRIES + HOST_HIGH_RESP_ENTRIES + ADAP_NORM_RESP_ENTRIES + ADAP_HIGH_RESP_ENTRIES)
162
163
164/*
165 * Set the queues on a 16 byte alignment
166 */
167
168#define QUEUE_ALIGNMENT 16
169
170/*
171 * The queue headers define the Communication Region queues. These
172 * are physically contiguous and accessible by both the adapter and the
173 * host. Even though all queue headers are in the same contiguous block
174 * they will be represented as individual units in the data structures.
175 */
176
177struct aac_entry {
178 u32 size; /* Size in bytes of Fib which this QE points to */
179 u32 addr; /* Receiver address of the FIB */
180};
181
182/*
183 * The adapter assumes the ProducerIndex and ConsumerIndex are grouped
184 * adjacently and in that order.
185 */
186
187struct aac_qhdr {
188 u64 header_addr; /* Address to hand the adapter to access to this queue head */
189 u32 *producer; /* The producer index for this queue (host address) */
190 u32 *consumer; /* The consumer index for this queue (host address) */
191};
192
193/*
194 * Define all the events which the adapter would like to notify
195 * the host of.
196 */
197
198#define HostNormCmdQue 1 /* Change in host normal priority command queue */
199#define HostHighCmdQue 2 /* Change in host high priority command queue */
200#define HostNormRespQue 3 /* Change in host normal priority response queue */
201#define HostHighRespQue 4 /* Change in host high priority response queue */
202#define AdapNormRespNotFull 5
203#define AdapHighRespNotFull 6
204#define AdapNormCmdNotFull 7
205#define AdapHighCmdNotFull 8
206#define SynchCommandComplete 9
207#define AdapInternalError 0xfe /* The adapter detected an internal error shutting down */
208
209/*
210 * Define all the events the host wishes to notify the
211 * adapter of. The first four values much match the Qid the
212 * corresponding queue.
213 */
214
215#define AdapNormCmdQue 2
216#define AdapHighCmdQue 3
217#define AdapNormRespQue 6
218#define AdapHighRespQue 7
219#define HostShutdown 8
220#define HostPowerFail 9
221#define FatalCommError 10
222#define HostNormRespNotFull 11
223#define HostHighRespNotFull 12
224#define HostNormCmdNotFull 13
225#define HostHighCmdNotFull 14
226#define FastIo 15
227#define AdapPrintfDone 16
228
229/*
230 * Define all the queues that the adapter and host use to communicate
231 * Number them to match the physical queue layout.
232 */
233
234enum aac_queue_types {
235 HostNormCmdQueue = 0, /* Adapter to host normal priority command traffic */
236 HostHighCmdQueue, /* Adapter to host high priority command traffic */
237 AdapNormCmdQueue, /* Host to adapter normal priority command traffic */
238 AdapHighCmdQueue, /* Host to adapter high priority command traffic */
239 HostNormRespQueue, /* Adapter to host normal priority response traffic */
240 HostHighRespQueue, /* Adapter to host high priority response traffic */
241 AdapNormRespQueue, /* Host to adapter normal priority response traffic */
242 AdapHighRespQueue /* Host to adapter high priority response traffic */
243};
244
245/*
246 * Assign type values to the FSA communication data structures
247 */
248
249#define FIB_MAGIC 0x0001
250
251/*
252 * Define the priority levels the FSA communication routines support.
253 */
254
255#define FsaNormal 1
256#define FsaHigh 2
257
258/*
259 * Define the FIB. The FIB is the where all the requested data and
260 * command information are put to the application on the FSA adapter.
261 */
262
263struct aac_fibhdr {
264 u32 XferState; // Current transfer state for this CCB
265 u16 Command; // Routing information for the destination
266 u8 StructType; // Type FIB
267 u8 Flags; // Flags for FIB
268 u16 Size; // Size of this FIB in bytes
269 u16 SenderSize; // Size of the FIB in the sender (for response sizing)
270 u32 SenderFibAddress; // Host defined data in the FIB
271 u32 ReceiverFibAddress; // Logical address of this FIB for the adapter
272 u32 SenderData; // Place holder for the sender to store data
273 union {
274 struct {
275 u32 _ReceiverTimeStart; // Timestamp for receipt of fib
276 u32 _ReceiverTimeDone; // Timestamp for completion of fib
277 } _s;
278 } _u;
279};
280
281#define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr))
282
283
284struct hw_fib {
285 struct aac_fibhdr header;
286 u8 data[FIB_DATA_SIZE_IN_BYTES]; // Command specific data
287};
288
289/*
290 * FIB commands
291 */
292
293#define TestCommandResponse 1
294#define TestAdapterCommand 2
295/*
296 * Lowlevel and comm commands
297 */
298#define LastTestCommand 100
299#define ReinitHostNormCommandQueue 101
300#define ReinitHostHighCommandQueue 102
301#define ReinitHostHighRespQueue 103
302#define ReinitHostNormRespQueue 104
303#define ReinitAdapNormCommandQueue 105
304#define ReinitAdapHighCommandQueue 107
305#define ReinitAdapHighRespQueue 108
306#define ReinitAdapNormRespQueue 109
307#define InterfaceShutdown 110
308#define DmaCommandFib 120
309#define StartProfile 121
310#define TermProfile 122
311#define SpeedTest 123
312#define TakeABreakPt 124
313#define RequestPerfData 125
314#define SetInterruptDefTimer 126
315#define SetInterruptDefCount 127
316#define GetInterruptDefStatus 128
317#define LastCommCommand 129
318/*
319 * Filesystem commands
320 */
321#define NuFileSystem 300
322#define UFS 301
323#define HostFileSystem 302
324#define LastFileSystemCommand 303
325/*
326 * Container Commands
327 */
328#define ContainerCommand 500
329#define ContainerCommand64 501
330/*
331 * Cluster Commands
332 */
333#define ClusterCommand 550
334/*
335 * Scsi Port commands (scsi passthrough)
336 */
337#define ScsiPortCommand 600
338#define ScsiPortCommand64 601
339/*
340 * Misc house keeping and generic adapter initiated commands
341 */
342#define AifRequest 700
343#define CheckRevision 701
344#define FsaHostShutdown 702
345#define RequestAdapterInfo 703
346#define IsAdapterPaused 704
347#define SendHostTime 705
348#define LastMiscCommand 706
349
350//
351// Commands that will target the failover level on the FSA adapter
352//
353
354enum fib_xfer_state {
355 HostOwned = (1<<0),
356 AdapterOwned = (1<<1),
357 FibInitialized = (1<<2),
358 FibEmpty = (1<<3),
359 AllocatedFromPool = (1<<4),
360 SentFromHost = (1<<5),
361 SentFromAdapter = (1<<6),
362 ResponseExpected = (1<<7),
363 NoResponseExpected = (1<<8),
364 AdapterProcessed = (1<<9),
365 HostProcessed = (1<<10),
366 HighPriority = (1<<11),
367 NormalPriority = (1<<12),
368 Async = (1<<13),
369 AsyncIo = (1<<13), // rpbfix: remove with new regime
370 PageFileIo = (1<<14), // rpbfix: remove with new regime
371 ShutdownRequest = (1<<15),
372 LazyWrite = (1<<16), // rpbfix: remove with new regime
373 AdapterMicroFib = (1<<17),
374 BIOSFibPath = (1<<18),
375 FastResponseCapable = (1<<19),
376 ApiFib = (1<<20) // Its an API Fib.
377};
378
379/*
380 * The following defines needs to be updated any time there is an
381 * incompatible change made to the aac_init structure.
382 */
383
384#define ADAPTER_INIT_STRUCT_REVISION 3
385
386struct aac_init
387{
388 u32 InitStructRevision;
389 u32 MiniPortRevision;
390 u32 fsrev;
391 u32 CommHeaderAddress;
392 u32 FastIoCommAreaAddress;
393 u32 AdapterFibsPhysicalAddress;
394 u32 AdapterFibsVirtualAddress;
395 u32 AdapterFibsSize;
396 u32 AdapterFibAlign;
397 u32 printfbuf;
398 u32 printfbufsiz;
399 u32 HostPhysMemPages; // number of 4k pages of host physical memory
400 u32 HostElapsedSeconds; // number of seconds since 1970.
401};
402
403enum aac_log_level {
404 LOG_AAC_INIT = 10,
405 LOG_AAC_INFORMATIONAL = 20,
406 LOG_AAC_WARNING = 30,
407 LOG_AAC_LOW_ERROR = 40,
408 LOG_AAC_MEDIUM_ERROR = 50,
409 LOG_AAC_HIGH_ERROR = 60,
410 LOG_AAC_PANIC = 70,
411 LOG_AAC_DEBUG = 80,
412 LOG_AAC_WINDBG_PRINT = 90
413};
414
415#define FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT 0x030b
416#define FSAFS_NTC_FIB_CONTEXT 0x030c
417
418struct aac_dev;
419
420struct adapter_ops
421{
422 void (*adapter_interrupt)(struct aac_dev *dev);
423 void (*adapter_notify)(struct aac_dev *dev, u32 event);
424 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 *status);
425 int (*adapter_check_health)(struct aac_dev *dev);
426};
427
428/*
429 * Define which interrupt handler needs to be installed
430 */
431
432struct aac_driver_ident
433{
434 int (*init)(struct aac_dev *dev);
435 char * name;
436 char * vname;
437 char * model;
438 u16 channels;
439 int quirks;
440};
441/*
442 * Some adapter firmware needs communication memory
443 * below 2gig. This tells the init function to set the
444 * dma mask such that fib memory will be allocated where the
445 * adapter firmware can get to it.
446 */
447#define AAC_QUIRK_31BIT 0x0001
448
449/*
450 * Some adapter firmware, when the raid card's cache is turned off, can not
451 * split up scatter gathers in order to deal with the limits of the
452 * underlying CHIM. This limit is 34 scatter gather elements.
453 */
454#define AAC_QUIRK_34SG 0x0002
455
456/*
457 * This adapter is a slave (no Firmware)
458 */
459#define AAC_QUIRK_SLAVE 0x0004
460
461/*
462 * This adapter is a master.
463 */
464#define AAC_QUIRK_MASTER 0x0008
465
466/*
467 * The adapter interface specs all queues to be located in the same
468 * physically contigous block. The host structure that defines the
469 * commuication queues will assume they are each a separate physically
470 * contigous memory region that will support them all being one big
471 * contigous block.
472 * There is a command and response queue for each level and direction of
473 * commuication. These regions are accessed by both the host and adapter.
474 */
475
476struct aac_queue {
477 u64 logical; /*address we give the adapter */
478 struct aac_entry *base; /*system virtual address */
479 struct aac_qhdr headers; /*producer,consumer q headers*/
480 u32 entries; /*Number of queue entries */
481 wait_queue_head_t qfull; /*Event to wait on if q full */
482 wait_queue_head_t cmdready; /*Cmd ready from the adapter */
483 /* This is only valid for adapter to host command queues. */
484 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
485 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
486 unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
487 u32 padding; /* Padding - FIXME - can remove I believe */
488 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
489 /* only valid for command queues which receive entries from the adapter. */
490 struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
491 u32 numpending; /* Number of entries on outstanding queue. */
492 struct aac_dev * dev; /* Back pointer to adapter structure */
493};
494
495/*
496 * Message queues. The order here is important, see also the
497 * queue type ordering
498 */
499
500struct aac_queue_block
501{
502 struct aac_queue queue[8];
503};
504
505/*
506 * SaP1 Message Unit Registers
507 */
508
509struct sa_drawbridge_CSR {
510 /* Offset | Name */
511 __le32 reserved[10]; /* 00h-27h | Reserved */
512 u8 LUT_Offset; /* 28h | Lookup Table Offset */
513 u8 reserved1[3]; /* 29h-2bh | Reserved */
514 __le32 LUT_Data; /* 2ch | Looup Table Data */
515 __le32 reserved2[26]; /* 30h-97h | Reserved */
516 __le16 PRICLEARIRQ; /* 98h | Primary Clear Irq */
517 __le16 SECCLEARIRQ; /* 9ah | Secondary Clear Irq */
518 __le16 PRISETIRQ; /* 9ch | Primary Set Irq */
519 __le16 SECSETIRQ; /* 9eh | Secondary Set Irq */
520 __le16 PRICLEARIRQMASK;/* a0h | Primary Clear Irq Mask */
521 __le16 SECCLEARIRQMASK;/* a2h | Secondary Clear Irq Mask */
522 __le16 PRISETIRQMASK; /* a4h | Primary Set Irq Mask */
523 __le16 SECSETIRQMASK; /* a6h | Secondary Set Irq Mask */
524 __le32 MAILBOX0; /* a8h | Scratchpad 0 */
525 __le32 MAILBOX1; /* ach | Scratchpad 1 */
526 __le32 MAILBOX2; /* b0h | Scratchpad 2 */
527 __le32 MAILBOX3; /* b4h | Scratchpad 3 */
528 __le32 MAILBOX4; /* b8h | Scratchpad 4 */
529 __le32 MAILBOX5; /* bch | Scratchpad 5 */
530 __le32 MAILBOX6; /* c0h | Scratchpad 6 */
531 __le32 MAILBOX7; /* c4h | Scratchpad 7 */
532 __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */
533 __le32 ROM_Control_Addr;/* cch | Rom Control and Address */
534 __le32 reserved3[12]; /* d0h-ffh | reserved */
535 __le32 LUT[64]; /* 100h-1ffh | Lookup Table Entries */
536};
537
538#define Mailbox0 SaDbCSR.MAILBOX0
539#define Mailbox1 SaDbCSR.MAILBOX1
540#define Mailbox2 SaDbCSR.MAILBOX2
541#define Mailbox3 SaDbCSR.MAILBOX3
542#define Mailbox4 SaDbCSR.MAILBOX4
543#define Mailbox5 SaDbCSR.MAILBOX5
544#define Mailbox7 SaDbCSR.MAILBOX7
545
546#define DoorbellReg_p SaDbCSR.PRISETIRQ
547#define DoorbellReg_s SaDbCSR.SECSETIRQ
548#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
549
550
551#define DOORBELL_0 0x0001
552#define DOORBELL_1 0x0002
553#define DOORBELL_2 0x0004
554#define DOORBELL_3 0x0008
555#define DOORBELL_4 0x0010
556#define DOORBELL_5 0x0020
557#define DOORBELL_6 0x0040
558
559
560#define PrintfReady DOORBELL_5
561#define PrintfDone DOORBELL_5
562
563struct sa_registers {
564 struct sa_drawbridge_CSR SaDbCSR; /* 98h - c4h */
565};
566
567
568#define Sa_MINIPORT_REVISION 1
569
570#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
571#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
572#define sa_writew(AEP, CSR, value) writew(value, &((AEP)->regs.sa->CSR))
573#define sa_writel(AEP, CSR, value) writel(value, &((AEP)->regs.sa->CSR))
574
575/*
576 * Rx Message Unit Registers
577 */
578
579struct rx_mu_registers {
580 /* Local | PCI*| Name */
581 __le32 ARSR; /* 1300h | 00h | APIC Register Select Register */
582 __le32 reserved0; /* 1304h | 04h | Reserved */
583 __le32 AWR; /* 1308h | 08h | APIC Window Register */
584 __le32 reserved1; /* 130Ch | 0Ch | Reserved */
585 __le32 IMRx[2]; /* 1310h | 10h | Inbound Message Registers */
586 __le32 OMRx[2]; /* 1318h | 18h | Outbound Message Registers */
587 __le32 IDR; /* 1320h | 20h | Inbound Doorbell Register */
588 __le32 IISR; /* 1324h | 24h | Inbound Interrupt
589 Status Register */
590 __le32 IIMR; /* 1328h | 28h | Inbound Interrupt
591 Mask Register */
592 __le32 ODR; /* 132Ch | 2Ch | Outbound Doorbell Register */
593 __le32 OISR; /* 1330h | 30h | Outbound Interrupt
594 Status Register */
595 __le32 OIMR; /* 1334h | 34h | Outbound Interrupt
596 Mask Register */
597 /* * Must access through ATU Inbound
598 Translation Window */
599};
600
601struct rx_inbound {
602 __le32 Mailbox[8];
603};
604
605#define InboundMailbox0 IndexRegs.Mailbox[0]
606#define InboundMailbox1 IndexRegs.Mailbox[1]
607#define InboundMailbox2 IndexRegs.Mailbox[2]
608#define InboundMailbox3 IndexRegs.Mailbox[3]
609#define InboundMailbox4 IndexRegs.Mailbox[4]
610#define InboundMailbox5 IndexRegs.Mailbox[5]
611#define InboundMailbox6 IndexRegs.Mailbox[6]
612
613#define INBOUNDDOORBELL_0 0x00000001
614#define INBOUNDDOORBELL_1 0x00000002
615#define INBOUNDDOORBELL_2 0x00000004
616#define INBOUNDDOORBELL_3 0x00000008
617#define INBOUNDDOORBELL_4 0x00000010
618#define INBOUNDDOORBELL_5 0x00000020
619#define INBOUNDDOORBELL_6 0x00000040
620
621#define OUTBOUNDDOORBELL_0 0x00000001
622#define OUTBOUNDDOORBELL_1 0x00000002
623#define OUTBOUNDDOORBELL_2 0x00000004
624#define OUTBOUNDDOORBELL_3 0x00000008
625#define OUTBOUNDDOORBELL_4 0x00000010
626
627#define InboundDoorbellReg MUnit.IDR
628#define OutboundDoorbellReg MUnit.ODR
629
630struct rx_registers {
631 struct rx_mu_registers MUnit; /* 1300h - 1334h */
632 __le32 reserved1[6]; /* 1338h - 134ch */
633 struct rx_inbound IndexRegs;
634};
635
636#define rx_readb(AEP, CSR) readb(&((AEP)->regs.rx->CSR))
637#define rx_readl(AEP, CSR) readl(&((AEP)->regs.rx->CSR))
638#define rx_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rx->CSR))
639#define rx_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rx->CSR))
640
641/*
642 * Rkt Message Unit Registers (same as Rx, except a larger reserve region)
643 */
644
645#define rkt_mu_registers rx_mu_registers
646#define rkt_inbound rx_inbound
647
648struct rkt_registers {
649 struct rkt_mu_registers MUnit; /* 1300h - 1334h */
650 __le32 reserved1[1010]; /* 1338h - 22fch */
651 struct rkt_inbound IndexRegs; /* 2300h - */
652};
653
654#define rkt_readb(AEP, CSR) readb(&((AEP)->regs.rkt->CSR))
655#define rkt_readl(AEP, CSR) readl(&((AEP)->regs.rkt->CSR))
656#define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR))
657#define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR))
658
659struct fib;
660
661typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
662
663struct aac_fib_context {
664 s16 type; // used for verification of structure
665 s16 size;
666 u32 unique; // unique value representing this context
667 ulong jiffies; // used for cleanup - dmb changed to ulong
668 struct list_head next; // used to link context's into a linked list
669 struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
670 int wait; // Set to true when thread is in WaitForSingleObject
671 unsigned long count; // total number of FIBs on FibList
672 struct list_head fib_list; // this holds fibs and their attachd hw_fibs
673};
674
675struct sense_data {
676 u8 error_code; /* 70h (current errors), 71h(deferred errors) */
677 u8 valid:1; /* A valid bit of one indicates that the information */
678 /* field contains valid information as defined in the
679 * SCSI-2 Standard.
680 */
681 u8 segment_number; /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
682 u8 sense_key:4; /* Sense Key */
683 u8 reserved:1;
684 u8 ILI:1; /* Incorrect Length Indicator */
685 u8 EOM:1; /* End Of Medium - reserved for random access devices */
686 u8 filemark:1; /* Filemark - reserved for random access devices */
687
688 u8 information[4]; /* for direct-access devices, contains the unsigned
689 * logical block address or residue associated with
690 * the sense key
691 */
692 u8 add_sense_len; /* number of additional sense bytes to follow this field */
693 u8 cmnd_info[4]; /* not used */
694 u8 ASC; /* Additional Sense Code */
695 u8 ASCQ; /* Additional Sense Code Qualifier */
696 u8 FRUC; /* Field Replaceable Unit Code - not used */
697 u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
698 * was in error
699 */
700 u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
701 * the bit_ptr field has valid value
702 */
703 u8 reserved2:2;
704 u8 CD:1; /* command data bit: 1- illegal parameter in CDB.
705 * 0- illegal parameter in data.
706 */
707 u8 SKSV:1;
708 u8 field_ptr[2]; /* byte of the CDB or parameter data in error */
709};
710
711struct fsa_dev_info {
712 u64 last;
713 u64 size;
714 u32 type;
715 u16 queue_depth;
716 u8 valid;
717 u8 ro;
718 u8 locked;
719 u8 deleted;
720 char devname[8];
721 struct sense_data sense_data;
722};
723
724struct fib {
725 void *next; /* this is used by the allocator */
726 s16 type;
727 s16 size;
728 /*
729 * The Adapter that this I/O is destined for.
730 */
731 struct aac_dev *dev;
732 /*
733 * This is the event the sendfib routine will wait on if the
734 * caller did not pass one and this is synch io.
735 */
736 struct semaphore event_wait;
737 spinlock_t event_lock;
738
739 u32 done; /* gets set to 1 when fib is complete */
740 fib_callback callback;
741 void *callback_data;
742 u32 flags; // u32 dmb was ulong
743 /*
744 * The following is used to put this fib context onto the
745 * Outstanding I/O queue.
746 */
747 struct list_head queue;
748 /*
749 * And for the internal issue/reply queues (we may be able
750 * to merge these two)
751 */
752 struct list_head fiblink;
753 void *data;
754 struct hw_fib *hw_fib; /* Actual shared object */
755 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
756};
757
758/*
759 * Adapter Information Block
760 *
761 * This is returned by the RequestAdapterInfo block
762 */
763
764struct aac_adapter_info
765{
766 u32 platform;
767 u32 cpu;
768 u32 subcpu;
769 u32 clock;
770 u32 execmem;
771 u32 buffermem;
772 u32 totalmem;
773 u32 kernelrev;
774 u32 kernelbuild;
775 u32 monitorrev;
776 u32 monitorbuild;
777 u32 hwrev;
778 u32 hwbuild;
779 u32 biosrev;
780 u32 biosbuild;
781 u32 cluster;
782 u32 clusterchannelmask;
783 u32 serial[2];
784 u32 battery;
785 u32 options;
786 u32 OEM;
787};
788
789/*
790 * Battery platforms
791 */
792#define AAC_BAT_REQ_PRESENT (1)
793#define AAC_BAT_REQ_NOTPRESENT (2)
794#define AAC_BAT_OPT_PRESENT (3)
795#define AAC_BAT_OPT_NOTPRESENT (4)
796#define AAC_BAT_NOT_SUPPORTED (5)
797/*
798 * cpu types
799 */
800#define AAC_CPU_SIMULATOR (1)
801#define AAC_CPU_I960 (2)
802#define AAC_CPU_STRONGARM (3)
803
804/*
805 * Supported Options
806 */
807#define AAC_OPT_SNAPSHOT cpu_to_le32(1)
808#define AAC_OPT_CLUSTERS cpu_to_le32(1<<1)
809#define AAC_OPT_WRITE_CACHE cpu_to_le32(1<<2)
810#define AAC_OPT_64BIT_DATA cpu_to_le32(1<<3)
811#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4)
812#define AAC_OPT_RAID50 cpu_to_le32(1<<5)
813#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6)
814#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
815#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8)
816#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
817#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10)
818#define AAC_OPT_ALARM cpu_to_le32(1<<11)
819#define AAC_OPT_NONDASD cpu_to_le32(1<<12)
820#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13)
821#define AAC_OPT_RAID_SCSI_MODE cpu_to_le32(1<<14)
822#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16)
823#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
824#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
825
826struct aac_dev
827{
828 struct list_head entry;
829 const char *name;
830 int id;
831
832 u16 irq_mask;
833 /*
834 * Map for 128 fib objects (64k)
835 */
836 dma_addr_t hw_fib_pa;
837 struct hw_fib *hw_fib_va;
838 struct hw_fib *aif_base_va;
839 /*
840 * Fib Headers
841 */
842 struct fib *fibs;
843
844 struct fib *free_fib;
845 struct fib *timeout_fib;
846 spinlock_t fib_lock;
847
848 struct aac_queue_block *queues;
849 /*
850 * The user API will use an IOCTL to register itself to receive
851 * FIBs from the adapter. The following list is used to keep
852 * track of all the threads that have requested these FIBs. The
853 * mutex is used to synchronize access to all data associated
854 * with the adapter fibs.
855 */
856 struct list_head fib_list;
857
858 struct adapter_ops a_ops;
859 unsigned long fsrev; /* Main driver's revision number */
860
861 struct aac_init *init; /* Holds initialization info to communicate with adapter */
862 dma_addr_t init_pa; /* Holds physical address of the init struct */
863
864 struct pci_dev *pdev; /* Our PCI interface */
865 void * printfbuf; /* pointer to buffer used for printf's from the adapter */
866 void * comm_addr; /* Base address of Comm area */
867 dma_addr_t comm_phys; /* Physical Address of Comm area */
868 size_t comm_size;
869
870 struct Scsi_Host *scsi_host_ptr;
871 int maximum_num_containers;
872 struct fsa_dev_info *fsa_dev;
873 pid_t thread_pid;
874 int cardtype;
875
876 /*
877 * The following is the device specific extension.
878 */
879 union
880 {
881 struct sa_registers __iomem *sa;
882 struct rx_registers __iomem *rx;
883 struct rkt_registers __iomem *rkt;
884 } regs;
885 u32 OIMR; /* Mask Register Cache */
886 /*
887 * AIF thread states
888 */
889 u32 aif_thread;
890 struct completion aif_completion;
891 struct aac_adapter_info adapter_info;
892 /* These are in adapter info but they are in the io flow so
893 * lets break them out so we don't have to do an AND to check them
894 */
895 u8 nondasd_support;
896 u8 dac_support;
897 u8 raid_scsi_mode;
898};
899
900#define aac_adapter_interrupt(dev) \
901 (dev)->a_ops.adapter_interrupt(dev)
902
903#define aac_adapter_notify(dev, event) \
904 (dev)->a_ops.adapter_notify(dev, event)
905
906
907#define aac_adapter_check_health(dev) \
908 (dev)->a_ops.adapter_check_health(dev)
909
910
911#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
912
913/*
914 * Define the command values
915 */
916
917#define Null 0
918#define GetAttributes 1
919#define SetAttributes 2
920#define Lookup 3
921#define ReadLink 4
922#define Read 5
923#define Write 6
924#define Create 7
925#define MakeDirectory 8
926#define SymbolicLink 9
927#define MakeNode 10
928#define Removex 11
929#define RemoveDirectoryx 12
930#define Rename 13
931#define Link 14
932#define ReadDirectory 15
933#define ReadDirectoryPlus 16
934#define FileSystemStatus 17
935#define FileSystemInfo 18
936#define PathConfigure 19
937#define Commit 20
938#define Mount 21
939#define UnMount 22
940#define Newfs 23
941#define FsCheck 24
942#define FsSync 25
943#define SimReadWrite 26
944#define SetFileSystemStatus 27
945#define BlockRead 28
946#define BlockWrite 29
947#define NvramIoctl 30
948#define FsSyncWait 31
949#define ClearArchiveBit 32
950#define SetAcl 33
951#define GetAcl 34
952#define AssignAcl 35
953#define FaultInsertion 36 /* Fault Insertion Command */
954#define CrazyCache 37 /* Crazycache */
955
956#define MAX_FSACOMMAND_NUM 38
957
958
959/*
960 * Define the status returns. These are very unixlike although
961 * most are not in fact used
962 */
963
964#define ST_OK 0
965#define ST_PERM 1
966#define ST_NOENT 2
967#define ST_IO 5
968#define ST_NXIO 6
969#define ST_E2BIG 7
970#define ST_ACCES 13
971#define ST_EXIST 17
972#define ST_XDEV 18
973#define ST_NODEV 19
974#define ST_NOTDIR 20
975#define ST_ISDIR 21
976#define ST_INVAL 22
977#define ST_FBIG 27
978#define ST_NOSPC 28
979#define ST_ROFS 30
980#define ST_MLINK 31
981#define ST_WOULDBLOCK 35
982#define ST_NAMETOOLONG 63
983#define ST_NOTEMPTY 66
984#define ST_DQUOT 69
985#define ST_STALE 70
986#define ST_REMOTE 71
987#define ST_BADHANDLE 10001
988#define ST_NOT_SYNC 10002
989#define ST_BAD_COOKIE 10003
990#define ST_NOTSUPP 10004
991#define ST_TOOSMALL 10005
992#define ST_SERVERFAULT 10006
993#define ST_BADTYPE 10007
994#define ST_JUKEBOX 10008
995#define ST_NOTMOUNTED 10009
996#define ST_MAINTMODE 10010
997#define ST_STALEACL 10011
998
999/*
1000 * On writes how does the client want the data written.
1001 */
1002
1003#define CACHE_CSTABLE 1
1004#define CACHE_UNSTABLE 2
1005
1006/*
1007 * Lets the client know at which level the data was commited on
1008 * a write request
1009 */
1010
1011#define CMFILE_SYNCH_NVRAM 1
1012#define CMDATA_SYNCH_NVRAM 2
1013#define CMFILE_SYNCH 3
1014#define CMDATA_SYNCH 4
1015#define CMUNSTABLE 5
1016
1017struct aac_read
1018{
1019 u32 command;
1020 u32 cid;
1021 u32 block;
1022 u32 count;
1023 struct sgmap sg; // Must be last in struct because it is variable
1024};
1025
1026struct aac_read64
1027{
1028 u32 command;
1029 u16 cid;
1030 u16 sector_count;
1031 u32 block;
1032 u16 pad;
1033 u16 flags;
1034 struct sgmap64 sg; // Must be last in struct because it is variable
1035};
1036
1037struct aac_read_reply
1038{
1039 u32 status;
1040 u32 count;
1041};
1042
1043struct aac_write
1044{
1045 u32 command;
1046 u32 cid;
1047 u32 block;
1048 u32 count;
1049 u32 stable; // Not used
1050 struct sgmap sg; // Must be last in struct because it is variable
1051};
1052
1053struct aac_write64
1054{
1055 u32 command;
1056 u16 cid;
1057 u16 sector_count;
1058 u32 block;
1059 u16 pad;
1060 u16 flags;
1061 struct sgmap64 sg; // Must be last in struct because it is variable
1062};
1063struct aac_write_reply
1064{
1065 u32 status;
1066 u32 count;
1067 u32 committed;
1068};
1069
1070#define CT_FLUSH_CACHE 129
1071struct aac_synchronize {
1072 u32 command; /* VM_ContainerConfig */
1073 u32 type; /* CT_FLUSH_CACHE */
1074 u32 cid;
1075 u32 parm1;
1076 u32 parm2;
1077 u32 parm3;
1078 u32 parm4;
1079 u32 count; /* sizeof(((struct aac_synchronize_reply *)NULL)->data) */
1080};
1081
1082struct aac_synchronize_reply {
1083 u32 dummy0;
1084 u32 dummy1;
1085 u32 status; /* CT_OK */
1086 u32 parm1;
1087 u32 parm2;
1088 u32 parm3;
1089 u32 parm4;
1090 u32 parm5;
1091 u8 data[16];
1092};
1093
1094struct aac_srb
1095{
1096 u32 function;
1097 u32 channel;
1098 u32 id;
1099 u32 lun;
1100 u32 timeout;
1101 u32 flags;
1102 u32 count; // Data xfer size
1103 u32 retry_limit;
1104 u32 cdb_size;
1105 u8 cdb[16];
1106 struct sgmap sg;
1107};
1108
1109
1110
1111#define AAC_SENSE_BUFFERSIZE 30
1112
1113struct aac_srb_reply
1114{
1115 u32 status;
1116 u32 srb_status;
1117 u32 scsi_status;
1118 u32 data_xfer_length;
1119 u32 sense_data_size;
1120 u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
1121};
1122/*
1123 * SRB Flags
1124 */
1125#define SRB_NoDataXfer 0x0000
1126#define SRB_DisableDisconnect 0x0004
1127#define SRB_DisableSynchTransfer 0x0008
1128#define SRB_BypassFrozenQueue 0x0010
1129#define SRB_DisableAutosense 0x0020
1130#define SRB_DataIn 0x0040
1131#define SRB_DataOut 0x0080
1132
1133/*
1134 * SRB Functions - set in aac_srb->function
1135 */
1136#define SRBF_ExecuteScsi 0x0000
1137#define SRBF_ClaimDevice 0x0001
1138#define SRBF_IO_Control 0x0002
1139#define SRBF_ReceiveEvent 0x0003
1140#define SRBF_ReleaseQueue 0x0004
1141#define SRBF_AttachDevice 0x0005
1142#define SRBF_ReleaseDevice 0x0006
1143#define SRBF_Shutdown 0x0007
1144#define SRBF_Flush 0x0008
1145#define SRBF_AbortCommand 0x0010
1146#define SRBF_ReleaseRecovery 0x0011
1147#define SRBF_ResetBus 0x0012
1148#define SRBF_ResetDevice 0x0013
1149#define SRBF_TerminateIO 0x0014
1150#define SRBF_FlushQueue 0x0015
1151#define SRBF_RemoveDevice 0x0016
1152#define SRBF_DomainValidation 0x0017
1153
1154/*
1155 * SRB SCSI Status - set in aac_srb->scsi_status
1156 */
1157#define SRB_STATUS_PENDING 0x00
1158#define SRB_STATUS_SUCCESS 0x01
1159#define SRB_STATUS_ABORTED 0x02
1160#define SRB_STATUS_ABORT_FAILED 0x03
1161#define SRB_STATUS_ERROR 0x04
1162#define SRB_STATUS_BUSY 0x05
1163#define SRB_STATUS_INVALID_REQUEST 0x06
1164#define SRB_STATUS_INVALID_PATH_ID 0x07
1165#define SRB_STATUS_NO_DEVICE 0x08
1166#define SRB_STATUS_TIMEOUT 0x09
1167#define SRB_STATUS_SELECTION_TIMEOUT 0x0A
1168#define SRB_STATUS_COMMAND_TIMEOUT 0x0B
1169#define SRB_STATUS_MESSAGE_REJECTED 0x0D
1170#define SRB_STATUS_BUS_RESET 0x0E
1171#define SRB_STATUS_PARITY_ERROR 0x0F
1172#define SRB_STATUS_REQUEST_SENSE_FAILED 0x10
1173#define SRB_STATUS_NO_HBA 0x11
1174#define SRB_STATUS_DATA_OVERRUN 0x12
1175#define SRB_STATUS_UNEXPECTED_BUS_FREE 0x13
1176#define SRB_STATUS_PHASE_SEQUENCE_FAILURE 0x14
1177#define SRB_STATUS_BAD_SRB_BLOCK_LENGTH 0x15
1178#define SRB_STATUS_REQUEST_FLUSHED 0x16
1179#define SRB_STATUS_DELAYED_RETRY 0x17
1180#define SRB_STATUS_INVALID_LUN 0x20
1181#define SRB_STATUS_INVALID_TARGET_ID 0x21
1182#define SRB_STATUS_BAD_FUNCTION 0x22
1183#define SRB_STATUS_ERROR_RECOVERY 0x23
1184#define SRB_STATUS_NOT_STARTED 0x24
1185#define SRB_STATUS_NOT_IN_USE 0x30
1186#define SRB_STATUS_FORCE_ABORT 0x31
1187#define SRB_STATUS_DOMAIN_VALIDATION_FAIL 0x32
1188
1189/*
1190 * Object-Server / Volume-Manager Dispatch Classes
1191 */
1192
1193#define VM_Null 0
1194#define VM_NameServe 1
1195#define VM_ContainerConfig 2
1196#define VM_Ioctl 3
1197#define VM_FilesystemIoctl 4
1198#define VM_CloseAll 5
1199#define VM_CtBlockRead 6
1200#define VM_CtBlockWrite 7
1201#define VM_SliceBlockRead 8 /* raw access to configured "storage objects" */
1202#define VM_SliceBlockWrite 9
1203#define VM_DriveBlockRead 10 /* raw access to physical devices */
1204#define VM_DriveBlockWrite 11
1205#define VM_EnclosureMgt 12 /* enclosure management */
1206#define VM_Unused 13 /* used to be diskset management */
1207#define VM_CtBlockVerify 14
1208#define VM_CtPerf 15 /* performance test */
1209#define VM_CtBlockRead64 16
1210#define VM_CtBlockWrite64 17
1211#define VM_CtBlockVerify64 18
1212#define VM_CtHostRead64 19
1213#define VM_CtHostWrite64 20
1214
1215#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */
1216
1217/*
1218 * Descriptive information (eg, vital stats)
1219 * that a content manager might report. The
1220 * FileArray filesystem component is one example
1221 * of a content manager. Raw mode might be
1222 * another.
1223 */
1224
1225struct aac_fsinfo {
1226 u32 fsTotalSize; /* Consumed by fs, incl. metadata */
1227 u32 fsBlockSize;
1228 u32 fsFragSize;
1229 u32 fsMaxExtendSize;
1230 u32 fsSpaceUnits;
1231 u32 fsMaxNumFiles;
1232 u32 fsNumFreeFiles;
1233 u32 fsInodeDensity;
1234}; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
1235
1236union aac_contentinfo {
1237 struct aac_fsinfo filesys; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
1238};
1239
1240/*
1241 * Query for Container Configuration Status
1242 */
1243
1244#define CT_GET_CONFIG_STATUS 147
1245struct aac_get_config_status {
1246 u32 command; /* VM_ContainerConfig */
1247 u32 type; /* CT_GET_CONFIG_STATUS */
1248 u32 parm1;
1249 u32 parm2;
1250 u32 parm3;
1251 u32 parm4;
1252 u32 parm5;
1253 u32 count; /* sizeof(((struct aac_get_config_status_resp *)NULL)->data) */
1254};
1255
1256#define CFACT_CONTINUE 0
1257#define CFACT_PAUSE 1
1258#define CFACT_ABORT 2
1259struct aac_get_config_status_resp {
1260 u32 response; /* ST_OK */
1261 u32 dummy0;
1262 u32 status; /* CT_OK */
1263 u32 parm1;
1264 u32 parm2;
1265 u32 parm3;
1266 u32 parm4;
1267 u32 parm5;
1268 struct {
1269 u32 action; /* CFACT_CONTINUE, CFACT_PAUSE or CFACT_ABORT */
1270 u16 flags;
1271 s16 count;
1272 } data;
1273};
1274
1275/*
1276 * Accept the configuration as-is
1277 */
1278
1279#define CT_COMMIT_CONFIG 152
1280
1281struct aac_commit_config {
1282 u32 command; /* VM_ContainerConfig */
1283 u32 type; /* CT_COMMIT_CONFIG */
1284};
1285
1286/*
1287 * Query for Container Configuration Count
1288 */
1289
1290#define CT_GET_CONTAINER_COUNT 4
1291struct aac_get_container_count {
1292 u32 command; /* VM_ContainerConfig */
1293 u32 type; /* CT_GET_CONTAINER_COUNT */
1294};
1295
1296struct aac_get_container_count_resp {
1297 u32 response; /* ST_OK */
1298 u32 dummy0;
1299 u32 MaxContainers;
1300 u32 ContainerSwitchEntries;
1301 u32 MaxPartitions;
1302};
1303
1304
1305/*
1306 * Query for "mountable" objects, ie, objects that are typically
1307 * associated with a drive letter on the client (host) side.
1308 */
1309
1310struct aac_mntent {
1311 u32 oid;
1312 u8 name[16]; // if applicable
1313 struct creation_info create_info; // if applicable
1314 u32 capacity;
1315 u32 vol; // substrate structure
1316 u32 obj; // FT_FILESYS, FT_DATABASE, etc.
1317 u32 state; // unready for mounting, readonly, etc.
1318 union aac_contentinfo fileinfo; // Info specific to content manager (eg, filesystem)
1319 u32 altoid; // != oid <==> snapshot or broken mirror exists
1320};
1321
1322#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
1323#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
1324#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */
1325
1326struct aac_query_mount {
1327 u32 command;
1328 u32 type;
1329 u32 count;
1330};
1331
1332struct aac_mount {
1333 u32 status;
1334 u32 type; /* should be same as that requested */
1335 u32 count;
1336 struct aac_mntent mnt[1];
1337};
1338
1339#define CT_READ_NAME 130
1340struct aac_get_name {
1341 u32 command; /* VM_ContainerConfig */
1342 u32 type; /* CT_READ_NAME */
1343 u32 cid;
1344 u32 parm1;
1345 u32 parm2;
1346 u32 parm3;
1347 u32 parm4;
1348 u32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */
1349};
1350
1351#define CT_OK 218
1352struct aac_get_name_resp {
1353 u32 dummy0;
1354 u32 dummy1;
1355 u32 status; /* CT_OK */
1356 u32 parm1;
1357 u32 parm2;
1358 u32 parm3;
1359 u32 parm4;
1360 u32 parm5;
1361 u8 data[16];
1362};
1363
1364/*
1365 * The following command is sent to shut down each container.
1366 */
1367
1368struct aac_close {
1369 u32 command;
1370 u32 cid;
1371};
1372
1373struct aac_query_disk
1374{
1375 s32 cnum;
1376 s32 bus;
1377 s32 id;
1378 s32 lun;
1379 u32 valid;
1380 u32 locked;
1381 u32 deleted;
1382 s32 instance;
1383 s8 name[10];
1384 u32 unmapped;
1385};
1386
1387struct aac_delete_disk {
1388 u32 disknum;
1389 u32 cnum;
1390};
1391
1392struct fib_ioctl
1393{
1394 u32 fibctx;
1395 s32 wait;
1396 char __user *fib;
1397};
1398
1399struct revision
1400{
1401 u32 compat;
1402 u32 version;
1403 u32 build;
1404};
1405
1406/*
1407 * Ugly - non Linux like ioctl coding for back compat.
1408 */
1409
1410#define CTL_CODE(function, method) ( \
1411 (4<< 16) | ((function) << 2) | (method) \
1412)
1413
1414/*
1415 * Define the method codes for how buffers are passed for I/O and FS
1416 * controls
1417 */
1418
1419#define METHOD_BUFFERED 0
1420#define METHOD_NEITHER 3
1421
1422/*
1423 * Filesystem ioctls
1424 */
1425
1426#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED)
1427#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED)
1428#define FSACTL_DELETE_DISK 0x163
1429#define FSACTL_QUERY_DISK 0x173
1430#define FSACTL_OPEN_GET_ADAPTER_FIB CTL_CODE(2100, METHOD_BUFFERED)
1431#define FSACTL_GET_NEXT_ADAPTER_FIB CTL_CODE(2101, METHOD_BUFFERED)
1432#define FSACTL_CLOSE_GET_ADAPTER_FIB CTL_CODE(2102, METHOD_BUFFERED)
1433#define FSACTL_MINIPORT_REV_CHECK CTL_CODE(2107, METHOD_BUFFERED)
1434#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED)
1435#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER)
1436#define FSACTL_GET_CONTAINERS 2131
1437
1438
1439struct aac_common
1440{
1441 /*
1442 * If this value is set to 1 then interrupt moderation will occur
1443 * in the base commuication support.
1444 */
1445 u32 irq_mod;
1446 u32 peak_fibs;
1447 u32 zero_fibs;
1448 u32 fib_timeouts;
1449 /*
1450 * Statistical counters in debug mode
1451 */
1452#ifdef DBG
1453 u32 FibsSent;
1454 u32 FibRecved;
1455 u32 NoResponseSent;
1456 u32 NoResponseRecved;
1457 u32 AsyncSent;
1458 u32 AsyncRecved;
1459 u32 NormalSent;
1460 u32 NormalRecved;
1461#endif
1462};
1463
1464extern struct aac_common aac_config;
1465
1466
1467/*
1468 * The following macro is used when sending and receiving FIBs. It is
1469 * only used for debugging.
1470 */
1471
1472#ifdef DBG
1473#define FIB_COUNTER_INCREMENT(counter) (counter)++
1474#else
1475#define FIB_COUNTER_INCREMENT(counter)
1476#endif
1477
1478/*
1479 * Adapter direct commands
1480 * Monitor/Kernel API
1481 */
1482
1483#define BREAKPOINT_REQUEST 0x00000004
1484#define INIT_STRUCT_BASE_ADDRESS 0x00000005
1485#define READ_PERMANENT_PARAMETERS 0x0000000a
1486#define WRITE_PERMANENT_PARAMETERS 0x0000000b
1487#define HOST_CRASHING 0x0000000d
1488#define SEND_SYNCHRONOUS_FIB 0x0000000c
1489#define COMMAND_POST_RESULTS 0x00000014
1490#define GET_ADAPTER_PROPERTIES 0x00000019
1491#define GET_DRIVER_BUFFER_PROPERTIES 0x00000023
1492#define RCV_TEMP_READINGS 0x00000025
1493#define GET_COMM_PREFERRED_SETTINGS 0x00000026
1494#define IOP_RESET 0x00001000
1495#define RE_INIT_ADAPTER 0x000000ee
1496
1497/*
1498 * Adapter Status Register
1499 *
1500 * Phase Staus mailbox is 32bits:
1501 * <31:16> = Phase Status
1502 * <15:0> = Phase
1503 *
1504 * The adapter reports is present state through the phase. Only
1505 * a single phase should be ever be set. Each phase can have multiple
1506 * phase status bits to provide more detailed information about the
1507 * state of the board. Care should be taken to ensure that any phase
1508 * status bits that are set when changing the phase are also valid
1509 * for the new phase or be cleared out. Adapter software (monitor,
1510 * iflash, kernel) is responsible for properly maintining the phase
1511 * status mailbox when it is running.
1512 *
1513 * MONKER_API Phases
1514 *
1515 * Phases are bit oriented. It is NOT valid to have multiple bits set
1516 */
1517
1518#define SELF_TEST_FAILED 0x00000004
1519#define MONITOR_PANIC 0x00000020
1520#define KERNEL_UP_AND_RUNNING 0x00000080
1521#define KERNEL_PANIC 0x00000100
1522
1523/*
1524 * Doorbell bit defines
1525 */
1526
1527#define DoorBellSyncCmdAvailable (1<<0) /* Host -> Adapter */
1528#define DoorBellPrintfDone (1<<5) /* Host -> Adapter */
1529#define DoorBellAdapterNormCmdReady (1<<1) /* Adapter -> Host */
1530#define DoorBellAdapterNormRespReady (1<<2) /* Adapter -> Host */
1531#define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */
1532#define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */
1533#define DoorBellPrintfReady (1<<5) /* Adapter -> Host */
1534
1535/*
1536 * For FIB communication, we need all of the following things
1537 * to send back to the user.
1538 */
1539
1540#define AifCmdEventNotify 1 /* Notify of event */
1541#define AifEnConfigChange 3 /* Adapter configuration change */
1542#define AifEnContainerChange 4 /* Container configuration change */
1543#define AifEnDeviceFailure 5 /* SCSI device failed */
1544#define AifEnAddContainer 15 /* A new array was created */
1545#define AifEnDeleteContainer 16 /* A container was deleted */
1546#define AifEnExpEvent 23 /* Firmware Event Log */
1547#define AifExeFirmwarePanic 3 /* Firmware Event Panic */
1548#define AifHighPriority 3 /* Highest Priority Event */
1549
1550#define AifCmdJobProgress 2 /* Progress report */
1551#define AifJobCtrZero 101 /* Array Zero progress */
1552#define AifJobStsSuccess 1 /* Job completes */
1553#define AifCmdAPIReport 3 /* Report from other user of API */
1554#define AifCmdDriverNotify 4 /* Notify host driver of event */
1555#define AifDenMorphComplete 200 /* A morph operation completed */
1556#define AifDenVolumeExtendComplete 201 /* A volume extend completed */
1557#define AifReqJobList 100 /* Gets back complete job list */
1558#define AifReqJobsForCtr 101 /* Gets back jobs for specific container */
1559#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */
1560#define AifReqJobReport 103 /* Gets back a specific job report or list of them */
1561#define AifReqTerminateJob 104 /* Terminates job */
1562#define AifReqSuspendJob 105 /* Suspends a job */
1563#define AifReqResumeJob 106 /* Resumes a job */
1564#define AifReqSendAPIReport 107 /* API generic report requests */
1565#define AifReqAPIJobStart 108 /* Start a job from the API */
1566#define AifReqAPIJobUpdate 109 /* Update a job report from the API */
1567#define AifReqAPIJobFinish 110 /* Finish a job from the API */
1568
1569/*
1570 * Adapter Initiated FIB command structures. Start with the adapter
1571 * initiated FIBs that really come from the adapter, and get responded
1572 * to by the host.
1573 */
1574
1575struct aac_aifcmd {
1576 u32 command; /* Tell host what type of notify this is */
1577 u32 seqnum; /* To allow ordering of reports (if necessary) */
1578 u8 data[1]; /* Undefined length (from kernel viewpoint) */
1579};
1580
1581/**
1582 * Convert capacity to cylinders
1583 * accounting for the fact capacity could be a 64 bit value
1584 *
1585 */
1586static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
1587{
1588 sector_div(capacity, divisor);
1589 return (u32)capacity;
1590}
1591
1592struct scsi_cmnd;
1593
1594const char *aac_driverinfo(struct Scsi_Host *);
1595struct fib *fib_alloc(struct aac_dev *dev);
1596int fib_setup(struct aac_dev *dev);
1597void fib_map_free(struct aac_dev *dev);
1598void fib_free(struct fib * context);
1599void fib_init(struct fib * context);
1600void fib_dealloc(struct fib * context);
1601void aac_printf(struct aac_dev *dev, u32 val);
1602int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
1603int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
1604void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1605int fib_complete(struct fib * context);
1606#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
1607struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1608int aac_get_config_status(struct aac_dev *dev);
1609int aac_get_containers(struct aac_dev *dev);
1610int aac_scsi_cmd(struct scsi_cmnd *cmd);
1611int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
1612int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
1613int aac_rx_init(struct aac_dev *dev);
1614int aac_rkt_init(struct aac_dev *dev);
1615int aac_sa_init(struct aac_dev *dev);
1616unsigned int aac_response_normal(struct aac_queue * q);
1617unsigned int aac_command_normal(struct aac_queue * q);
1618int aac_command_thread(struct aac_dev * dev);
1619int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
1620int fib_adapter_complete(struct fib * fibptr, unsigned short size);
1621struct aac_driver_ident* aac_get_driver_ident(int devtype);
1622int aac_get_adapter_info(struct aac_dev* dev);
1623int aac_send_shutdown(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
new file mode 100644
index 000000000000..30dd1f7120f4
--- /dev/null
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -0,0 +1,683 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * commctrl.c
26 *
27 * Abstract: Contains all routines for control of the AFA comm layer
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/sched.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/completion.h>
39#include <linux/dma-mapping.h>
40#include <linux/blkdev.h>
41#include <asm/semaphore.h>
42#include <asm/uaccess.h>
43
44#include "aacraid.h"
45
46/**
47 * ioctl_send_fib - send a FIB from userspace
48 * @dev: adapter is being processed
49 * @arg: arguments to the ioctl call
50 *
51 * This routine sends a fib to the adapter on behalf of a user level
52 * program.
53 */
54
55static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
56{
57 struct hw_fib * kfib;
58 struct fib *fibptr;
59
60 fibptr = fib_alloc(dev);
61 if(fibptr == NULL)
62 return -ENOMEM;
63
64 kfib = fibptr->hw_fib;
65 /*
66 * First copy in the header so that we can check the size field.
67 */
68 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
69 fib_free(fibptr);
70 return -EFAULT;
71 }
72 /*
73 * Since we copy based on the fib header size, make sure that we
74 * will not overrun the buffer when we copy the memory. Return
75 * an error if we would.
76 */
77 if (le16_to_cpu(kfib->header.Size) >
78 sizeof(struct hw_fib) - sizeof(struct aac_fibhdr)) {
79 fib_free(fibptr);
80 return -EINVAL;
81 }
82
83 if (copy_from_user(kfib, arg, le16_to_cpu(kfib->header.Size) +
84 sizeof(struct aac_fibhdr))) {
85 fib_free(fibptr);
86 return -EFAULT;
87 }
88
89 if (kfib->header.Command == cpu_to_le32(TakeABreakPt)) {
90 aac_adapter_interrupt(dev);
91 /*
92 * Since we didn't really send a fib, zero out the state to allow
93 * cleanup code not to assert.
94 */
95 kfib->header.XferState = 0;
96 } else {
97 int retval = fib_send(kfib->header.Command, fibptr,
98 le16_to_cpu(kfib->header.Size) , FsaNormal,
99 1, 1, NULL, NULL);
100 if (retval) {
101 fib_free(fibptr);
102 return retval;
103 }
104 if (fib_complete(fibptr) != 0) {
105 fib_free(fibptr);
106 return -EINVAL;
107 }
108 }
109 /*
110 * Make sure that the size returned by the adapter (which includes
111 * the header) is less than or equal to the size of a fib, so we
112 * don't corrupt application data. Then copy that size to the user
113 * buffer. (Don't try to add the header information again, since it
114 * was already included by the adapter.)
115 */
116
117 if (copy_to_user(arg, (void *)kfib, kfib->header.Size)) {
118 fib_free(fibptr);
119 return -EFAULT;
120 }
121 fib_free(fibptr);
122 return 0;
123}
124
125/**
126 * open_getadapter_fib - Get the next fib
127 *
128 * This routine will get the next Fib, if available, from the AdapterFibContext
129 * passed in from the user.
130 */
131
132static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
133{
134 struct aac_fib_context * fibctx;
135 int status;
136
137 fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
138 if (fibctx == NULL) {
139 status = -ENOMEM;
140 } else {
141 unsigned long flags;
142 struct list_head * entry;
143 struct aac_fib_context * context;
144
145 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
146 fibctx->size = sizeof(struct aac_fib_context);
147 /*
148 * Yes yes, I know this could be an index, but we have a
149 * better guarantee of uniqueness for the locked loop below.
150 * Without the aid of a persistent history, this also helps
151 * reduce the chance that the opaque context would be reused.
152 */
153 fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
154 /*
155 * Initialize the mutex used to wait for the next AIF.
156 */
157 init_MUTEX_LOCKED(&fibctx->wait_sem);
158 fibctx->wait = 0;
159 /*
160 * Initialize the fibs and set the count of fibs on
161 * the list to 0.
162 */
163 fibctx->count = 0;
164 INIT_LIST_HEAD(&fibctx->fib_list);
165 fibctx->jiffies = jiffies/HZ;
166 /*
167 * Now add this context onto the adapter's
168 * AdapterFibContext list.
169 */
170 spin_lock_irqsave(&dev->fib_lock, flags);
171 /* Ensure that we have a unique identifier */
172 entry = dev->fib_list.next;
173 while (entry != &dev->fib_list) {
174 context = list_entry(entry, struct aac_fib_context, next);
175 if (context->unique == fibctx->unique) {
176 /* Not unique (32 bits) */
177 fibctx->unique++;
178 entry = dev->fib_list.next;
179 } else {
180 entry = entry->next;
181 }
182 }
183 list_add_tail(&fibctx->next, &dev->fib_list);
184 spin_unlock_irqrestore(&dev->fib_lock, flags);
185 if (copy_to_user(arg, &fibctx->unique,
186 sizeof(fibctx->unique))) {
187 status = -EFAULT;
188 } else {
189 status = 0;
190 }
191 }
192 return status;
193}
194
195/**
196 * next_getadapter_fib - get the next fib
197 * @dev: adapter to use
198 * @arg: ioctl argument
199 *
200 * This routine will get the next Fib, if available, from the AdapterFibContext
201 * passed in from the user.
202 */
203
204static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
205{
206 struct fib_ioctl f;
207 struct fib *fib;
208 struct aac_fib_context *fibctx;
209 int status;
210 struct list_head * entry;
211 unsigned long flags;
212
213 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
214 return -EFAULT;
215 /*
216 * Verify that the HANDLE passed in was a valid AdapterFibContext
217 *
218 * Search the list of AdapterFibContext addresses on the adapter
219 * to be sure this is a valid address
220 */
221 entry = dev->fib_list.next;
222 fibctx = NULL;
223
224 while (entry != &dev->fib_list) {
225 fibctx = list_entry(entry, struct aac_fib_context, next);
226 /*
227 * Extract the AdapterFibContext from the Input parameters.
228 */
229 if (fibctx->unique == f.fibctx) { /* We found a winner */
230 break;
231 }
232 entry = entry->next;
233 fibctx = NULL;
234 }
235 if (!fibctx) {
236 dprintk ((KERN_INFO "Fib Context not found\n"));
237 return -EINVAL;
238 }
239
240 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
241 (fibctx->size != sizeof(struct aac_fib_context))) {
242 dprintk ((KERN_INFO "Fib Context corrupt?\n"));
243 return -EINVAL;
244 }
245 status = 0;
246 spin_lock_irqsave(&dev->fib_lock, flags);
247 /*
248 * If there are no fibs to send back, then either wait or return
249 * -EAGAIN
250 */
251return_fib:
252 if (!list_empty(&fibctx->fib_list)) {
253 struct list_head * entry;
254 /*
255 * Pull the next fib from the fibs
256 */
257 entry = fibctx->fib_list.next;
258 list_del(entry);
259
260 fib = list_entry(entry, struct fib, fiblink);
261 fibctx->count--;
262 spin_unlock_irqrestore(&dev->fib_lock, flags);
263 if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
264 kfree(fib->hw_fib);
265 kfree(fib);
266 return -EFAULT;
267 }
268 /*
269 * Free the space occupied by this copy of the fib.
270 */
271 kfree(fib->hw_fib);
272 kfree(fib);
273 status = 0;
274 fibctx->jiffies = jiffies/HZ;
275 } else {
276 spin_unlock_irqrestore(&dev->fib_lock, flags);
277 if (f.wait) {
278 if(down_interruptible(&fibctx->wait_sem) < 0) {
279 status = -EINTR;
280 } else {
281 /* Lock again and retry */
282 spin_lock_irqsave(&dev->fib_lock, flags);
283 goto return_fib;
284 }
285 } else {
286 status = -EAGAIN;
287 }
288 }
289 return status;
290}
291
292int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
293{
294 struct fib *fib;
295
296 /*
297 * First free any FIBs that have not been consumed.
298 */
299 while (!list_empty(&fibctx->fib_list)) {
300 struct list_head * entry;
301 /*
302 * Pull the next fib from the fibs
303 */
304 entry = fibctx->fib_list.next;
305 list_del(entry);
306 fib = list_entry(entry, struct fib, fiblink);
307 fibctx->count--;
308 /*
309 * Free the space occupied by this copy of the fib.
310 */
311 kfree(fib->hw_fib);
312 kfree(fib);
313 }
314 /*
315 * Remove the Context from the AdapterFibContext List
316 */
317 list_del(&fibctx->next);
318 /*
319 * Invalidate context
320 */
321 fibctx->type = 0;
322 /*
323 * Free the space occupied by the Context
324 */
325 kfree(fibctx);
326 return 0;
327}
328
329/**
330 * close_getadapter_fib - close down user fib context
331 * @dev: adapter
332 * @arg: ioctl arguments
333 *
334 * This routine will close down the fibctx passed in from the user.
335 */
336
337static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
338{
339 struct aac_fib_context *fibctx;
340 int status;
341 unsigned long flags;
342 struct list_head * entry;
343
344 /*
345 * Verify that the HANDLE passed in was a valid AdapterFibContext
346 *
347 * Search the list of AdapterFibContext addresses on the adapter
348 * to be sure this is a valid address
349 */
350
351 entry = dev->fib_list.next;
352 fibctx = NULL;
353
354 while(entry != &dev->fib_list) {
355 fibctx = list_entry(entry, struct aac_fib_context, next);
356 /*
357 * Extract the fibctx from the input parameters
358 */
359 if (fibctx->unique == (u32)(unsigned long)arg) {
360 /* We found a winner */
361 break;
362 }
363 entry = entry->next;
364 fibctx = NULL;
365 }
366
367 if (!fibctx)
368 return 0; /* Already gone */
369
370 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
371 (fibctx->size != sizeof(struct aac_fib_context)))
372 return -EINVAL;
373 spin_lock_irqsave(&dev->fib_lock, flags);
374 status = aac_close_fib_context(dev, fibctx);
375 spin_unlock_irqrestore(&dev->fib_lock, flags);
376 return status;
377}
378
379/**
380 * check_revision - close down user fib context
381 * @dev: adapter
382 * @arg: ioctl arguments
383 *
384 * This routine returns the driver version.
385 * Under Linux, there have been no version incompatibilities, so this is
386 * simple!
387 */
388
389static int check_revision(struct aac_dev *dev, void __user *arg)
390{
391 struct revision response;
392
393 response.compat = 1;
394 response.version = dev->adapter_info.kernelrev;
395 response.build = dev->adapter_info.kernelbuild;
396
397 if (copy_to_user(arg, &response, sizeof(response)))
398 return -EFAULT;
399 return 0;
400}
401
402/**
403 *
404 * aac_send_raw_scb
405 *
406 */
407
408int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
409{
410 struct fib* srbfib;
411 int status;
412 struct aac_srb *srbcmd;
413 struct aac_srb __user *user_srb = arg;
414 struct aac_srb_reply __user *user_reply;
415 struct aac_srb_reply* reply;
416 u32 fibsize = 0;
417 u32 flags = 0;
418 s32 rcode = 0;
419 u32 data_dir;
420 void __user *sg_user[32];
421 void *sg_list[32];
422 u32 sg_indx = 0;
423 u32 byte_count = 0;
424 u32 actual_fibsize = 0;
425 int i;
426
427
428 if (!capable(CAP_SYS_ADMIN)){
429 printk(KERN_DEBUG"aacraid: No permission to send raw srb\n");
430 return -EPERM;
431 }
432 /*
433 * Allocate and initialize a Fib then setup a BlockWrite command
434 */
435 if (!(srbfib = fib_alloc(dev))) {
436 return -1;
437 }
438 fib_init(srbfib);
439
440 srbcmd = (struct aac_srb*) fib_data(srbfib);
441
442 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
443 printk(KERN_DEBUG"aacraid: Could not copy data size from user\n");
444 rcode = -EFAULT;
445 goto cleanup;
446 }
447
448 if (fibsize > FIB_DATA_SIZE_IN_BYTES) {
449 rcode = -EINVAL;
450 goto cleanup;
451 }
452
453 if(copy_from_user(srbcmd, user_srb,fibsize)){
454 printk(KERN_DEBUG"aacraid: Could not copy srb from user\n");
455 rcode = -EFAULT;
456 goto cleanup;
457 }
458
459 user_reply = arg+fibsize;
460
461 flags = srbcmd->flags;
462 // Fix up srb for endian and force some values
463 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
464 srbcmd->channel = cpu_to_le32(srbcmd->channel);
465 srbcmd->id = cpu_to_le32(srbcmd->id);
466 srbcmd->lun = cpu_to_le32(srbcmd->lun);
467 srbcmd->flags = cpu_to_le32(srbcmd->flags);
468 srbcmd->timeout = cpu_to_le32(srbcmd->timeout);
469 srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
470 srbcmd->cdb_size = cpu_to_le32(srbcmd->cdb_size);
471
472 switch (srbcmd->flags & (SRB_DataIn | SRB_DataOut)) {
473 case SRB_DataOut:
474 data_dir = DMA_TO_DEVICE;
475 break;
476 case (SRB_DataIn | SRB_DataOut):
477 data_dir = DMA_BIDIRECTIONAL;
478 break;
479 case SRB_DataIn:
480 data_dir = DMA_FROM_DEVICE;
481 break;
482 default:
483 data_dir = DMA_NONE;
484 }
485 if (dev->dac_support == 1) {
486 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
487 byte_count = 0;
488
489 /*
490 * This should also catch if user used the 32 bit sgmap
491 */
492 actual_fibsize = sizeof(struct aac_srb) -
493 sizeof(struct sgentry) + ((srbcmd->sg.count & 0xff) *
494 sizeof(struct sgentry64));
495 if(actual_fibsize != fibsize){ // User made a mistake - should not continue
496 printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
497 rcode = -EINVAL;
498 goto cleanup;
499 }
500 if ((data_dir == DMA_NONE) && psg->count) {
501 printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n");
502 rcode = -EINVAL;
503 goto cleanup;
504 }
505
506 for (i = 0; i < psg->count; i++) {
507 dma_addr_t addr;
508 u64 le_addr;
509 void* p;
510 p = kmalloc(psg->sg[i].count,GFP_KERNEL|__GFP_DMA);
511 if(p == 0) {
512 printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
513 psg->sg[i].count,i,psg->count);
514 rcode = -ENOMEM;
515 goto cleanup;
516 }
517 sg_user[i] = (void __user *)psg->sg[i].addr;
518 sg_list[i] = p; // save so we can clean up later
519 sg_indx = i;
520
521 if( flags & SRB_DataOut ){
522 if(copy_from_user(p,sg_user[i],psg->sg[i].count)){
523 printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n");
524 rcode = -EFAULT;
525 goto cleanup;
526 }
527 }
528 addr = pci_map_single(dev->pdev, p, psg->sg[i].count, data_dir);
529
530 le_addr = cpu_to_le64(addr);
531 psg->sg[i].addr[1] = (u32)(le_addr>>32);
532 psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
533 psg->sg[i].count = cpu_to_le32(psg->sg[i].count);
534 byte_count += psg->sg[i].count;
535 }
536
537 srbcmd->count = cpu_to_le32(byte_count);
538 status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
539 } else {
540 struct sgmap* psg = &srbcmd->sg;
541 byte_count = 0;
542
543 actual_fibsize = sizeof (struct aac_srb) +
544 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
545 sizeof (struct sgentry));
546 if(actual_fibsize != fibsize){ // User made a mistake - should not continue
547 printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
548 rcode = -EINVAL;
549 goto cleanup;
550 }
551 if ((data_dir == DMA_NONE) && psg->count) {
552 printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n");
553 rcode = -EINVAL;
554 goto cleanup;
555 }
556 for (i = 0; i < psg->count; i++) {
557 dma_addr_t addr;
558 void* p;
559 p = kmalloc(psg->sg[i].count,GFP_KERNEL);
560 if(p == 0) {
561 printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
562 psg->sg[i].count,i,psg->count);
563 rcode = -ENOMEM;
564 goto cleanup;
565 }
566 sg_user[i] = (void __user *)(psg->sg[i].addr);
567 sg_list[i] = p; // save so we can clean up later
568 sg_indx = i;
569
570 if( flags & SRB_DataOut ){
571 if(copy_from_user(p,sg_user[i],psg->sg[i].count)){
572 printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n");
573 rcode = -EFAULT;
574 goto cleanup;
575 }
576 }
577 addr = pci_map_single(dev->pdev, p, psg->sg[i].count, data_dir);
578
579 psg->sg[i].addr = cpu_to_le32(addr);
580 psg->sg[i].count = cpu_to_le32(psg->sg[i].count);
581 byte_count += psg->sg[i].count;
582 }
583 srbcmd->count = cpu_to_le32(byte_count);
584 status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
585 }
586
587 if (status != 0){
588 printk(KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n");
589 rcode = -1;
590 goto cleanup;
591 }
592
593 if( flags & SRB_DataIn ) {
594 for(i = 0 ; i <= sg_indx; i++){
595 if(copy_to_user(sg_user[i],sg_list[i],le32_to_cpu(srbcmd->sg.sg[i].count))){
596 printk(KERN_DEBUG"aacraid: Could not copy sg data to user\n");
597 rcode = -EFAULT;
598 goto cleanup;
599
600 }
601 }
602 }
603
604 reply = (struct aac_srb_reply *) fib_data(srbfib);
605 if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
606 printk(KERN_DEBUG"aacraid: Could not copy reply to user\n");
607 rcode = -EFAULT;
608 goto cleanup;
609 }
610
611cleanup:
612 for(i=0; i <= sg_indx; i++){
613 kfree(sg_list[i]);
614 }
615 fib_complete(srbfib);
616 fib_free(srbfib);
617
618 return rcode;
619}
620
621
622struct aac_pci_info {
623 u32 bus;
624 u32 slot;
625};
626
627
628int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
629{
630 struct aac_pci_info pci_info;
631
632 pci_info.bus = dev->pdev->bus->number;
633 pci_info.slot = PCI_SLOT(dev->pdev->devfn);
634
635 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
636 printk(KERN_DEBUG "aacraid: Could not copy pci info\n");
637 return -EFAULT;
638 }
639 return 0;
640 }
641
642
643int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
644{
645 int status;
646
647 /*
648 * HBA gets first crack
649 */
650
651 status = aac_dev_ioctl(dev, cmd, arg);
652 if(status != -ENOTTY)
653 return status;
654
655 switch (cmd) {
656 case FSACTL_MINIPORT_REV_CHECK:
657 status = check_revision(dev, arg);
658 break;
659 case FSACTL_SENDFIB:
660 status = ioctl_send_fib(dev, arg);
661 break;
662 case FSACTL_OPEN_GET_ADAPTER_FIB:
663 status = open_getadapter_fib(dev, arg);
664 break;
665 case FSACTL_GET_NEXT_ADAPTER_FIB:
666 status = next_getadapter_fib(dev, arg);
667 break;
668 case FSACTL_CLOSE_GET_ADAPTER_FIB:
669 status = close_getadapter_fib(dev, arg);
670 break;
671 case FSACTL_SEND_RAW_SRB:
672 status = aac_send_raw_srb(dev,arg);
673 break;
674 case FSACTL_GET_PCI_INFO:
675 status = aac_get_pci_info(dev,arg);
676 break;
677 default:
678 status = -ENOTTY;
679 break;
680 }
681 return status;
682}
683
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
new file mode 100644
index 000000000000..6832a55ca907
--- /dev/null
+++ b/drivers/scsi/aacraid/comminit.c
@@ -0,0 +1,325 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * comminit.c
26 *
27 * Abstract: This supports the initialization of the host adapter commuication interface.
28 * This is a platform dependent module for the pci cyclone board.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/blkdev.h>
40#include <linux/completion.h>
41#include <linux/mm.h>
42#include <asm/semaphore.h>
43
44#include "aacraid.h"
45
46struct aac_common aac_config;
47
48static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
49{
50 unsigned char *base;
51 unsigned long size, align;
52 unsigned long fibsize = 4096;
53 unsigned long printfbufsiz = 256;
54 struct aac_init *init;
55 dma_addr_t phys;
56
57 size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
58
59
60 base = pci_alloc_consistent(dev->pdev, size, &phys);
61
62 if(base == NULL)
63 {
64 printk(KERN_ERR "aacraid: unable to create mapping.\n");
65 return 0;
66 }
67 dev->comm_addr = (void *)base;
68 dev->comm_phys = phys;
69 dev->comm_size = size;
70
71 dev->init = (struct aac_init *)(base + fibsize);
72 dev->init_pa = phys + fibsize;
73
74 init = dev->init;
75
76 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
77 init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
78 init->fsrev = cpu_to_le32(dev->fsrev);
79
80 /*
81 * Adapter Fibs are the first thing allocated so that they
82 * start page aligned
83 */
84 dev->aif_base_va = (struct hw_fib *)base;
85
86 init->AdapterFibsVirtualAddress = 0;
87 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
88 init->AdapterFibsSize = cpu_to_le32(fibsize);
89 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
90 /*
91 * number of 4k pages of host physical memory. The aacraid fw needs
92 * this number to be less than 4gb worth of pages. num_physpages is in
93 * system page units. New firmware doesn't have any issues with the
94 * mapping system, but older Firmware did, and had *troubles* dealing
95 * with the math overloading past 32 bits, thus we must limit this
96 * field.
97 *
98 * This assumes the memory is mapped zero->n, which isnt
99 * always true on real computers. It also has some slight problems
100 * with the GART on x86-64. I've btw never tried DMA from PCI space
101 * on this platform but don't be suprised if its problematic.
102 */
103#ifndef CONFIG_GART_IOMMU
104 if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
105 init->HostPhysMemPages =
106 cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
107 } else
108#endif
109 {
110 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
111 }
112
113
114 /*
115 * Increment the base address by the amount already used
116 */
117 base = base + fibsize + sizeof(struct aac_init);
118 phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
119 /*
120 * Align the beginning of Headers to commalign
121 */
122 align = (commalign - ((unsigned long)(base) & (commalign - 1)));
123 base = base + align;
124 phys = phys + align;
125 /*
126 * Fill in addresses of the Comm Area Headers and Queues
127 */
128 *commaddr = base;
129 init->CommHeaderAddress = cpu_to_le32((u32)phys);
130 /*
131 * Increment the base address by the size of the CommArea
132 */
133 base = base + commsize;
134 phys = phys + commsize;
135 /*
136 * Place the Printf buffer area after the Fast I/O comm area.
137 */
138 dev->printfbuf = (void *)base;
139 init->printfbuf = cpu_to_le32(phys);
140 init->printfbufsiz = cpu_to_le32(printfbufsiz);
141 memset(base, 0, printfbufsiz);
142 return 1;
143}
144
145static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
146{
147 q->numpending = 0;
148 q->dev = dev;
149 INIT_LIST_HEAD(&q->pendingq);
150 init_waitqueue_head(&q->cmdready);
151 INIT_LIST_HEAD(&q->cmdq);
152 init_waitqueue_head(&q->qfull);
153 spin_lock_init(&q->lockdata);
154 q->lock = &q->lockdata;
155 q->headers.producer = mem;
156 q->headers.consumer = mem+1;
157 *(q->headers.producer) = cpu_to_le32(qsize);
158 *(q->headers.consumer) = cpu_to_le32(qsize);
159 q->entries = qsize;
160}
161
162/**
163 * aac_send_shutdown - shutdown an adapter
164 * @dev: Adapter to shutdown
165 *
166 * This routine will send a VM_CloseAll (shutdown) request to the adapter.
167 */
168
169int aac_send_shutdown(struct aac_dev * dev)
170{
171 struct fib * fibctx;
172 struct aac_close *cmd;
173 int status;
174
175 fibctx = fib_alloc(dev);
176 fib_init(fibctx);
177
178 cmd = (struct aac_close *) fib_data(fibctx);
179
180 cmd->command = cpu_to_le32(VM_CloseAll);
181 cmd->cid = cpu_to_le32(0xffffffff);
182
183 status = fib_send(ContainerCommand,
184 fibctx,
185 sizeof(struct aac_close),
186 FsaNormal,
187 1, 1,
188 NULL, NULL);
189
190 if (status == 0)
191 fib_complete(fibctx);
192 fib_free(fibctx);
193 return status;
194}
195
196/**
197 * aac_comm_init - Initialise FSA data structures
198 * @dev: Adapter to initialise
199 *
200 * Initializes the data structures that are required for the FSA commuication
201 * interface to operate.
202 * Returns
203 * 1 - if we were able to init the commuication interface.
204 * 0 - If there were errors initing. This is a fatal error.
205 */
206
207int aac_comm_init(struct aac_dev * dev)
208{
209 unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
210 unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
211 u32 *headers;
212 struct aac_entry * queues;
213 unsigned long size;
214 struct aac_queue_block * comm = dev->queues;
215 /*
216 * Now allocate and initialize the zone structures used as our
217 * pool of FIB context records. The size of the zone is based
218 * on the system memory size. We also initialize the mutex used
219 * to protect the zone.
220 */
221 spin_lock_init(&dev->fib_lock);
222
223 /*
224 * Allocate the physically contigous space for the commuication
225 * queue headers.
226 */
227
228 size = hdrsize + queuesize;
229
230 if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
231 return -ENOMEM;
232
233 queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
234
235 /* Adapter to Host normal priority Command queue */
236 comm->queue[HostNormCmdQueue].base = queues;
237 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
238 queues += HOST_NORM_CMD_ENTRIES;
239 headers += 2;
240
241 /* Adapter to Host high priority command queue */
242 comm->queue[HostHighCmdQueue].base = queues;
243 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
244
245 queues += HOST_HIGH_CMD_ENTRIES;
246 headers +=2;
247
248 /* Host to adapter normal priority command queue */
249 comm->queue[AdapNormCmdQueue].base = queues;
250 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
251
252 queues += ADAP_NORM_CMD_ENTRIES;
253 headers += 2;
254
255 /* host to adapter high priority command queue */
256 comm->queue[AdapHighCmdQueue].base = queues;
257 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
258
259 queues += ADAP_HIGH_CMD_ENTRIES;
260 headers += 2;
261
262 /* adapter to host normal priority response queue */
263 comm->queue[HostNormRespQueue].base = queues;
264 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
265 queues += HOST_NORM_RESP_ENTRIES;
266 headers += 2;
267
268 /* adapter to host high priority response queue */
269 comm->queue[HostHighRespQueue].base = queues;
270 aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
271
272 queues += HOST_HIGH_RESP_ENTRIES;
273 headers += 2;
274
275 /* host to adapter normal priority response queue */
276 comm->queue[AdapNormRespQueue].base = queues;
277 aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
278
279 queues += ADAP_NORM_RESP_ENTRIES;
280 headers += 2;
281
282 /* host to adapter high priority response queue */
283 comm->queue[AdapHighRespQueue].base = queues;
284 aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
285
286 comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
287 comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
288 comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
289 comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
290
291 return 0;
292}
293
294struct aac_dev *aac_init_adapter(struct aac_dev *dev)
295{
296 /*
297 * Ok now init the communication subsystem
298 */
299
300 dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
301 if (dev->queues == NULL) {
302 printk(KERN_ERR "Error could not allocate comm region.\n");
303 return NULL;
304 }
305 memset(dev->queues, 0, sizeof(struct aac_queue_block));
306
307 if (aac_comm_init(dev)<0){
308 kfree(dev->queues);
309 return NULL;
310 }
311 /*
312 * Initialize the list of fibs
313 */
314 if(fib_setup(dev)<0){
315 kfree(dev->queues);
316 return NULL;
317 }
318
319 INIT_LIST_HEAD(&dev->fib_list);
320 init_completion(&dev->aif_completion);
321
322 return dev;
323}
324
325
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
new file mode 100644
index 000000000000..3f36dbaa2bb3
--- /dev/null
+++ b/drivers/scsi/aacraid/commsup.c
@@ -0,0 +1,939 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * commsup.c
26 *
27 * Abstract: Contain all routines that are required for FSA host/adapter
28 * commuication.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/completion.h>
40#include <linux/blkdev.h>
41#include <asm/semaphore.h>
42
43#include "aacraid.h"
44
45/**
46 * fib_map_alloc - allocate the fib objects
47 * @dev: Adapter to allocate for
48 *
49 * Allocate and map the shared PCI space for the FIB blocks used to
50 * talk to the Adaptec firmware.
51 */
52
53static int fib_map_alloc(struct aac_dev *dev)
54{
55 if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
56 return -ENOMEM;
57 return 0;
58}
59
60/**
61 * fib_map_free - free the fib objects
62 * @dev: Adapter to free
63 *
64 * Free the PCI mappings and the memory allocated for FIB blocks
65 * on this adapter.
66 */
67
68void fib_map_free(struct aac_dev *dev)
69{
70 pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
71}
72
73/**
74 * fib_setup - setup the fibs
75 * @dev: Adapter to set up
76 *
77 * Allocate the PCI space for the fibs, map it and then intialise the
78 * fib area, the unmapped fib data and also the free list
79 */
80
81int fib_setup(struct aac_dev * dev)
82{
83 struct fib *fibptr;
84 struct hw_fib *hw_fib_va;
85 dma_addr_t hw_fib_pa;
86 int i;
87
88 if(fib_map_alloc(dev)<0)
89 return -ENOMEM;
90
91 hw_fib_va = dev->hw_fib_va;
92 hw_fib_pa = dev->hw_fib_pa;
93 memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
94 /*
95 * Initialise the fibs
96 */
97 for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++)
98 {
99 fibptr->dev = dev;
100 fibptr->hw_fib = hw_fib_va;
101 fibptr->data = (void *) fibptr->hw_fib->data;
102 fibptr->next = fibptr+1; /* Forward chain the fibs */
103 init_MUTEX_LOCKED(&fibptr->event_wait);
104 spin_lock_init(&fibptr->event_lock);
105 hw_fib_va->header.XferState = 0xffffffff;
106 hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
107 fibptr->hw_fib_pa = hw_fib_pa;
108 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
109 hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib);
110 }
111 /*
112 * Add the fib chain to the free list
113 */
114 dev->fibs[AAC_NUM_FIB-1].next = NULL;
115 /*
116 * Enable this to debug out of queue space
117 */
118 dev->free_fib = &dev->fibs[0];
119 return 0;
120}
121
122/**
123 * fib_alloc - allocate a fib
124 * @dev: Adapter to allocate the fib for
125 *
126 * Allocate a fib from the adapter fib pool. If the pool is empty we
127 * wait for fibs to become free.
128 */
129
130struct fib * fib_alloc(struct aac_dev *dev)
131{
132 struct fib * fibptr;
133 unsigned long flags;
134 spin_lock_irqsave(&dev->fib_lock, flags);
135 fibptr = dev->free_fib;
136 /* Cannot sleep here or you get hangs. Instead we did the
137 maths at compile time. */
138 if(!fibptr)
139 BUG();
140 dev->free_fib = fibptr->next;
141 spin_unlock_irqrestore(&dev->fib_lock, flags);
142 /*
143 * Set the proper node type code and node byte size
144 */
145 fibptr->type = FSAFS_NTC_FIB_CONTEXT;
146 fibptr->size = sizeof(struct fib);
147 /*
148 * Null out fields that depend on being zero at the start of
149 * each I/O
150 */
151 fibptr->hw_fib->header.XferState = 0;
152 fibptr->callback = NULL;
153 fibptr->callback_data = NULL;
154
155 return fibptr;
156}
157
158/**
159 * fib_free - free a fib
160 * @fibptr: fib to free up
161 *
162 * Frees up a fib and places it on the appropriate queue
163 * (either free or timed out)
164 */
165
166void fib_free(struct fib * fibptr)
167{
168 unsigned long flags;
169
170 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
171 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
172 aac_config.fib_timeouts++;
173 fibptr->next = fibptr->dev->timeout_fib;
174 fibptr->dev->timeout_fib = fibptr;
175 } else {
176 if (fibptr->hw_fib->header.XferState != 0) {
177 printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
178 (void*)fibptr,
179 le32_to_cpu(fibptr->hw_fib->header.XferState));
180 }
181 fibptr->next = fibptr->dev->free_fib;
182 fibptr->dev->free_fib = fibptr;
183 }
184 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
185}
186
187/**
188 * fib_init - initialise a fib
189 * @fibptr: The fib to initialize
190 *
191 * Set up the generic fib fields ready for use
192 */
193
194void fib_init(struct fib *fibptr)
195{
196 struct hw_fib *hw_fib = fibptr->hw_fib;
197
198 hw_fib->header.StructType = FIB_MAGIC;
199 hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
200 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
201 hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
202 hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
203 hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
204}
205
206/**
207 * fib_deallocate - deallocate a fib
208 * @fibptr: fib to deallocate
209 *
210 * Will deallocate and return to the free pool the FIB pointed to by the
211 * caller.
212 */
213
214void fib_dealloc(struct fib * fibptr)
215{
216 struct hw_fib *hw_fib = fibptr->hw_fib;
217 if(hw_fib->header.StructType != FIB_MAGIC)
218 BUG();
219 hw_fib->header.XferState = 0;
220}
221
222/*
223 * Commuication primitives define and support the queuing method we use to
224 * support host to adapter commuication. All queue accesses happen through
225 * these routines and are the only routines which have a knowledge of the
226 * how these queues are implemented.
227 */
228
229/**
230 * aac_get_entry - get a queue entry
231 * @dev: Adapter
232 * @qid: Queue Number
233 * @entry: Entry return
234 * @index: Index return
235 * @nonotify: notification control
236 *
237 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
238 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
239 * returned.
240 */
241
242static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
243{
244 struct aac_queue * q;
245
246 /*
247 * All of the queues wrap when they reach the end, so we check
248 * to see if they have reached the end and if they have we just
249 * set the index back to zero. This is a wrap. You could or off
250 * the high bits in all updates but this is a bit faster I think.
251 */
252
253 q = &dev->queues->queue[qid];
254
255 *index = le32_to_cpu(*(q->headers.producer));
256 if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
257 *nonotify = 1;
258
259 if (qid == AdapHighCmdQueue) {
260 if (*index >= ADAP_HIGH_CMD_ENTRIES)
261 *index = 0;
262 } else if (qid == AdapNormCmdQueue) {
263 if (*index >= ADAP_NORM_CMD_ENTRIES)
264 *index = 0; /* Wrap to front of the Producer Queue. */
265 }
266 else if (qid == AdapHighRespQueue)
267 {
268 if (*index >= ADAP_HIGH_RESP_ENTRIES)
269 *index = 0;
270 }
271 else if (qid == AdapNormRespQueue)
272 {
273 if (*index >= ADAP_NORM_RESP_ENTRIES)
274 *index = 0; /* Wrap to front of the Producer Queue. */
275 }
276 else {
277 printk("aacraid: invalid qid\n");
278 BUG();
279 }
280
281 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
282 printk(KERN_WARNING "Queue %d full, %d outstanding.\n",
283 qid, q->numpending);
284 return 0;
285 } else {
286 *entry = q->base + *index;
287 return 1;
288 }
289}
290
291/**
292 * aac_queue_get - get the next free QE
293 * @dev: Adapter
294 * @index: Returned index
295 * @priority: Priority of fib
296 * @fib: Fib to associate with the queue entry
297 * @wait: Wait if queue full
298 * @fibptr: Driver fib object to go with fib
299 * @nonotify: Don't notify the adapter
300 *
301 * Gets the next free QE off the requested priorty adapter command
302 * queue and associates the Fib with the QE. The QE represented by
303 * index is ready to insert on the queue when this routine returns
304 * success.
305 */
306
307static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
308{
309 struct aac_entry * entry = NULL;
310 int map = 0;
311 struct aac_queue * q = &dev->queues->queue[qid];
312
313 spin_lock_irqsave(q->lock, q->SavedIrql);
314
315 if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
316 {
317 /* if no entries wait for some if caller wants to */
318 while (!aac_get_entry(dev, qid, &entry, index, nonotify))
319 {
320 printk(KERN_ERR "GetEntries failed\n");
321 }
322 /*
323 * Setup queue entry with a command, status and fib mapped
324 */
325 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
326 map = 1;
327 }
328 else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
329 {
330 while(!aac_get_entry(dev, qid, &entry, index, nonotify))
331 {
332 /* if no entries wait for some if caller wants to */
333 }
334 /*
335 * Setup queue entry with command, status and fib mapped
336 */
337 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
338 entry->addr = hw_fib->header.SenderFibAddress;
339 /* Restore adapters pointer to the FIB */
340 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
341 map = 0;
342 }
343 /*
344 * If MapFib is true than we need to map the Fib and put pointers
345 * in the queue entry.
346 */
347 if (map)
348 entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
349 return 0;
350}
351
352
353/**
354 * aac_insert_entry - insert a queue entry
355 * @dev: Adapter
356 * @index: Index of entry to insert
357 * @qid: Queue number
358 * @nonotify: Suppress adapter notification
359 *
360 * Gets the next free QE off the requested priorty adapter command
361 * queue and associates the Fib with the QE. The QE represented by
362 * index is ready to insert on the queue when this routine returns
363 * success.
364 */
365
366static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
367{
368 struct aac_queue * q = &dev->queues->queue[qid];
369
370 if(q == NULL)
371 BUG();
372 *(q->headers.producer) = cpu_to_le32(index + 1);
373 spin_unlock_irqrestore(q->lock, q->SavedIrql);
374
375 if (qid == AdapHighCmdQueue ||
376 qid == AdapNormCmdQueue ||
377 qid == AdapHighRespQueue ||
378 qid == AdapNormRespQueue)
379 {
380 if (!nonotify)
381 aac_adapter_notify(dev, qid);
382 }
383 else
384 printk("Suprise insert!\n");
385 return 0;
386}
387
388/*
389 * Define the highest level of host to adapter communication routines.
390 * These routines will support host to adapter FS commuication. These
391 * routines have no knowledge of the commuication method used. This level
392 * sends and receives FIBs. This level has no knowledge of how these FIBs
393 * get passed back and forth.
394 */
395
396/**
397 * fib_send - send a fib to the adapter
398 * @command: Command to send
399 * @fibptr: The fib
400 * @size: Size of fib data area
401 * @priority: Priority of Fib
402 * @wait: Async/sync select
403 * @reply: True if a reply is wanted
404 * @callback: Called with reply
405 * @callback_data: Passed to callback
406 *
407 * Sends the requested FIB to the adapter and optionally will wait for a
408 * response FIB. If the caller does not wish to wait for a response than
409 * an event to wait on must be supplied. This event will be set when a
410 * response FIB is received from the adapter.
411 */
412
413int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
414{
415 u32 index;
416 u32 qid;
417 struct aac_dev * dev = fibptr->dev;
418 unsigned long nointr = 0;
419 struct hw_fib * hw_fib = fibptr->hw_fib;
420 struct aac_queue * q;
421 unsigned long flags = 0;
422 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
423 return -EBUSY;
424 /*
425 * There are 5 cases with the wait and reponse requested flags.
426 * The only invalid cases are if the caller requests to wait and
427 * does not request a response and if the caller does not want a
428 * response and the Fib is not allocated from pool. If a response
429 * is not requesed the Fib will just be deallocaed by the DPC
430 * routine when the response comes back from the adapter. No
431 * further processing will be done besides deleting the Fib. We
432 * will have a debug mode where the adapter can notify the host
433 * it had a problem and the host can log that fact.
434 */
435 if (wait && !reply) {
436 return -EINVAL;
437 } else if (!wait && reply) {
438 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
439 FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
440 } else if (!wait && !reply) {
441 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
442 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
443 } else if (wait && reply) {
444 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
445 FIB_COUNTER_INCREMENT(aac_config.NormalSent);
446 }
447 /*
448 * Map the fib into 32bits by using the fib number
449 */
450
451 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
452 hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
453 /*
454 * Set FIB state to indicate where it came from and if we want a
455 * response from the adapter. Also load the command from the
456 * caller.
457 *
458 * Map the hw fib pointer as a 32bit value
459 */
460 hw_fib->header.Command = cpu_to_le16(command);
461 hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
462 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
463 /*
464 * Set the size of the Fib we want to send to the adapter
465 */
466 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
467 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
468 return -EMSGSIZE;
469 }
470 /*
471 * Get a queue entry connect the FIB to it and send an notify
472 * the adapter a command is ready.
473 */
474 if (priority == FsaHigh) {
475 hw_fib->header.XferState |= cpu_to_le32(HighPriority);
476 qid = AdapHighCmdQueue;
477 } else {
478 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
479 qid = AdapNormCmdQueue;
480 }
481 q = &dev->queues->queue[qid];
482
483 if(wait)
484 spin_lock_irqsave(&fibptr->event_lock, flags);
485 if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
486 return -EWOULDBLOCK;
487 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
488 dprintk((KERN_DEBUG "Fib contents:.\n"));
489 dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
490 dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
491 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
492 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
493 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
494 /*
495 * Fill in the Callback and CallbackContext if we are not
496 * going to wait.
497 */
498 if (!wait) {
499 fibptr->callback = callback;
500 fibptr->callback_data = callback_data;
501 }
502 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
503 list_add_tail(&fibptr->queue, &q->pendingq);
504 q->numpending++;
505
506 fibptr->done = 0;
507 fibptr->flags = 0;
508
509 if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
510 return -EWOULDBLOCK;
511 /*
512 * If the caller wanted us to wait for response wait now.
513 */
514
515 if (wait) {
516 spin_unlock_irqrestore(&fibptr->event_lock, flags);
517 down(&fibptr->event_wait);
518 if(fibptr->done == 0)
519 BUG();
520
521 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
522 return -ETIMEDOUT;
523 } else {
524 return 0;
525 }
526 }
527 /*
528 * If the user does not want a response than return success otherwise
529 * return pending
530 */
531 if (reply)
532 return -EINPROGRESS;
533 else
534 return 0;
535}
536
537/**
538 * aac_consumer_get - get the top of the queue
539 * @dev: Adapter
540 * @q: Queue
541 * @entry: Return entry
542 *
543 * Will return a pointer to the entry on the top of the queue requested that
544 * we are a consumer of, and return the address of the queue entry. It does
545 * not change the state of the queue.
546 */
547
548int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
549{
550 u32 index;
551 int status;
552 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
553 status = 0;
554 } else {
555 /*
556 * The consumer index must be wrapped if we have reached
557 * the end of the queue, else we just use the entry
558 * pointed to by the header index
559 */
560 if (le32_to_cpu(*q->headers.consumer) >= q->entries)
561 index = 0;
562 else
563 index = le32_to_cpu(*q->headers.consumer);
564 *entry = q->base + index;
565 status = 1;
566 }
567 return(status);
568}
569
570/**
571 * aac_consumer_free - free consumer entry
572 * @dev: Adapter
573 * @q: Queue
574 * @qid: Queue ident
575 *
576 * Frees up the current top of the queue we are a consumer of. If the
577 * queue was full notify the producer that the queue is no longer full.
578 */
579
580void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
581{
582 int wasfull = 0;
583 u32 notify;
584
585 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
586 wasfull = 1;
587
588 if (le32_to_cpu(*q->headers.consumer) >= q->entries)
589 *q->headers.consumer = cpu_to_le32(1);
590 else
591 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
592
593 if (wasfull) {
594 switch (qid) {
595
596 case HostNormCmdQueue:
597 notify = HostNormCmdNotFull;
598 break;
599 case HostHighCmdQueue:
600 notify = HostHighCmdNotFull;
601 break;
602 case HostNormRespQueue:
603 notify = HostNormRespNotFull;
604 break;
605 case HostHighRespQueue:
606 notify = HostHighRespNotFull;
607 break;
608 default:
609 BUG();
610 return;
611 }
612 aac_adapter_notify(dev, notify);
613 }
614}
615
616/**
617 * fib_adapter_complete - complete adapter issued fib
618 * @fibptr: fib to complete
619 * @size: size of fib
620 *
621 * Will do all necessary work to complete a FIB that was sent from
622 * the adapter.
623 */
624
625int fib_adapter_complete(struct fib * fibptr, unsigned short size)
626{
627 struct hw_fib * hw_fib = fibptr->hw_fib;
628 struct aac_dev * dev = fibptr->dev;
629 unsigned long nointr = 0;
630 if (hw_fib->header.XferState == 0)
631 return 0;
632 /*
633 * If we plan to do anything check the structure type first.
634 */
635 if ( hw_fib->header.StructType != FIB_MAGIC ) {
636 return -EINVAL;
637 }
638 /*
639 * This block handles the case where the adapter had sent us a
640 * command and we have finished processing the command. We
641 * call completeFib when we are done processing the command
642 * and want to send a response back to the adapter. This will
643 * send the completed cdb to the adapter.
644 */
645 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
646 hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
647 if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
648 u32 index;
649 if (size)
650 {
651 size += sizeof(struct aac_fibhdr);
652 if (size > le16_to_cpu(hw_fib->header.SenderSize))
653 return -EMSGSIZE;
654 hw_fib->header.Size = cpu_to_le16(size);
655 }
656 if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
657 return -EWOULDBLOCK;
658 }
659 if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
660 }
661 }
662 else if (hw_fib->header.XferState & NormalPriority)
663 {
664 u32 index;
665
666 if (size) {
667 size += sizeof(struct aac_fibhdr);
668 if (size > le16_to_cpu(hw_fib->header.SenderSize))
669 return -EMSGSIZE;
670 hw_fib->header.Size = cpu_to_le16(size);
671 }
672 if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
673 return -EWOULDBLOCK;
674 if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
675 {
676 }
677 }
678 }
679 else
680 {
681 printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
682 BUG();
683 }
684 return 0;
685}
686
687/**
688 * fib_complete - fib completion handler
689 * @fib: FIB to complete
690 *
691 * Will do all necessary work to complete a FIB.
692 */
693
694int fib_complete(struct fib * fibptr)
695{
696 struct hw_fib * hw_fib = fibptr->hw_fib;
697
698 /*
699 * Check for a fib which has already been completed
700 */
701
702 if (hw_fib->header.XferState == 0)
703 return 0;
704 /*
705 * If we plan to do anything check the structure type first.
706 */
707
708 if (hw_fib->header.StructType != FIB_MAGIC)
709 return -EINVAL;
710 /*
711 * This block completes a cdb which orginated on the host and we
712 * just need to deallocate the cdb or reinit it. At this point the
713 * command is complete that we had sent to the adapter and this
714 * cdb could be reused.
715 */
716 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
717 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
718 {
719 fib_dealloc(fibptr);
720 }
721 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
722 {
723 /*
724 * This handles the case when the host has aborted the I/O
725 * to the adapter because the adapter is not responding
726 */
727 fib_dealloc(fibptr);
728 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
729 fib_dealloc(fibptr);
730 } else {
731 BUG();
732 }
733 return 0;
734}
735
736/**
737 * aac_printf - handle printf from firmware
738 * @dev: Adapter
739 * @val: Message info
740 *
741 * Print a message passed to us by the controller firmware on the
742 * Adaptec board
743 */
744
745void aac_printf(struct aac_dev *dev, u32 val)
746{
747 int length = val & 0xffff;
748 int level = (val >> 16) & 0xffff;
749 char *cp = dev->printfbuf;
750
751 /*
752 * The size of the printfbuf is set in port.c
753 * There is no variable or define for it
754 */
755 if (length > 255)
756 length = 255;
757 if (cp[length] != 0)
758 cp[length] = 0;
759 if (level == LOG_AAC_HIGH_ERROR)
760 printk(KERN_WARNING "aacraid:%s", cp);
761 else
762 printk(KERN_INFO "aacraid:%s", cp);
763 memset(cp, 0, 256);
764}
765
766/**
767 * aac_command_thread - command processing thread
768 * @dev: Adapter to monitor
769 *
770 * Waits on the commandready event in it's queue. When the event gets set
771 * it will pull FIBs off it's queue. It will continue to pull FIBs off
772 * until the queue is empty. When the queue is empty it will wait for
773 * more FIBs.
774 */
775
776int aac_command_thread(struct aac_dev * dev)
777{
778 struct hw_fib *hw_fib, *hw_newfib;
779 struct fib *fib, *newfib;
780 struct aac_queue_block *queues = dev->queues;
781 struct aac_fib_context *fibctx;
782 unsigned long flags;
783 DECLARE_WAITQUEUE(wait, current);
784
785 /*
786 * We can only have one thread per adapter for AIF's.
787 */
788 if (dev->aif_thread)
789 return -EINVAL;
790 /*
791 * Set up the name that will appear in 'ps'
792 * stored in task_struct.comm[16].
793 */
794 daemonize("aacraid");
795 allow_signal(SIGKILL);
796 /*
797 * Let the DPC know it has a place to send the AIF's to.
798 */
799 dev->aif_thread = 1;
800 add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
801 set_current_state(TASK_INTERRUPTIBLE);
802 while(1)
803 {
804 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
805 while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
806 struct list_head *entry;
807 struct aac_aifcmd * aifcmd;
808
809 set_current_state(TASK_RUNNING);
810
811 entry = queues->queue[HostNormCmdQueue].cmdq.next;
812 list_del(entry);
813
814 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
815 fib = list_entry(entry, struct fib, fiblink);
816 /*
817 * We will process the FIB here or pass it to a
818 * worker thread that is TBD. We Really can't
819 * do anything at this point since we don't have
820 * anything defined for this thread to do.
821 */
822 hw_fib = fib->hw_fib;
823 memset(fib, 0, sizeof(struct fib));
824 fib->type = FSAFS_NTC_FIB_CONTEXT;
825 fib->size = sizeof( struct fib );
826 fib->hw_fib = hw_fib;
827 fib->data = hw_fib->data;
828 fib->dev = dev;
829 /*
830 * We only handle AifRequest fibs from the adapter.
831 */
832 aifcmd = (struct aac_aifcmd *) hw_fib->data;
833 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
834 /* Handle Driver Notify Events */
835 *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
836 fib_adapter_complete(fib, sizeof(u32));
837 } else {
838 struct list_head *entry;
839 /* The u32 here is important and intended. We are using
840 32bit wrapping time to fit the adapter field */
841
842 u32 time_now, time_last;
843 unsigned long flagv;
844
845 time_now = jiffies/HZ;
846
847 spin_lock_irqsave(&dev->fib_lock, flagv);
848 entry = dev->fib_list.next;
849 /*
850 * For each Context that is on the
851 * fibctxList, make a copy of the
852 * fib, and then set the event to wake up the
853 * thread that is waiting for it.
854 */
855 while (entry != &dev->fib_list) {
856 /*
857 * Extract the fibctx
858 */
859 fibctx = list_entry(entry, struct aac_fib_context, next);
860 /*
861 * Check if the queue is getting
862 * backlogged
863 */
864 if (fibctx->count > 20)
865 {
866 /*
867 * It's *not* jiffies folks,
868 * but jiffies / HZ so do not
869 * panic ...
870 */
871 time_last = fibctx->jiffies;
872 /*
873 * Has it been > 2 minutes
874 * since the last read off
875 * the queue?
876 */
877 if ((time_now - time_last) > 120) {
878 entry = entry->next;
879 aac_close_fib_context(dev, fibctx);
880 continue;
881 }
882 }
883 /*
884 * Warning: no sleep allowed while
885 * holding spinlock
886 */
887 hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
888 newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
889 if (newfib && hw_newfib) {
890 /*
891 * Make the copy of the FIB
892 */
893 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
894 memcpy(newfib, fib, sizeof(struct fib));
895 newfib->hw_fib = hw_newfib;
896 /*
897 * Put the FIB onto the
898 * fibctx's fibs
899 */
900 list_add_tail(&newfib->fiblink, &fibctx->fib_list);
901 fibctx->count++;
902 /*
903 * Set the event to wake up the
904 * thread that will waiting.
905 */
906 up(&fibctx->wait_sem);
907 } else {
908 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
909 if(newfib)
910 kfree(newfib);
911 if(hw_newfib)
912 kfree(hw_newfib);
913 }
914 entry = entry->next;
915 }
916 /*
917 * Set the status of this FIB
918 */
919 *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
920 fib_adapter_complete(fib, sizeof(u32));
921 spin_unlock_irqrestore(&dev->fib_lock, flagv);
922 }
923 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
924 kfree(fib);
925 }
926 /*
927 * There are no more AIF's
928 */
929 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
930 schedule();
931
932 if(signal_pending(current))
933 break;
934 set_current_state(TASK_INTERRUPTIBLE);
935 }
936 remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
937 dev->aif_thread = 0;
938 complete_and_exit(&dev->aif_completion, 0);
939}
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
new file mode 100644
index 000000000000..8480b427a6d9
--- /dev/null
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -0,0 +1,215 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * dpcsup.c
26 *
27 * Abstract: All DPC processing routines for the cyclone board occur here.
28 *
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/completion.h>
40#include <linux/blkdev.h>
41#include <asm/semaphore.h>
42
43#include "aacraid.h"
44
45/**
46 * aac_response_normal - Handle command replies
47 * @q: Queue to read from
48 *
49 * This DPC routine will be run when the adapter interrupts us to let us
50 * know there is a response on our normal priority queue. We will pull off
51 * all QE there are and wake up all the waiters before exiting. We will
52 * take a spinlock out on the queue before operating on it.
53 */
54
55unsigned int aac_response_normal(struct aac_queue * q)
56{
57 struct aac_dev * dev = q->dev;
58 struct aac_entry *entry;
59 struct hw_fib * hwfib;
60 struct fib * fib;
61 int consumed = 0;
62 unsigned long flags;
63
64 spin_lock_irqsave(q->lock, flags);
65 /*
66 * Keep pulling response QEs off the response queue and waking
67 * up the waiters until there are no more QEs. We then return
68 * back to the system. If no response was requesed we just
69 * deallocate the Fib here and continue.
70 */
71 while(aac_consumer_get(dev, q, &entry))
72 {
73 int fast;
74 u32 index = le32_to_cpu(entry->addr);
75 fast = index & 0x01;
76 fib = &dev->fibs[index >> 1];
77 hwfib = fib->hw_fib;
78
79 aac_consumer_free(dev, q, HostNormRespQueue);
80 /*
81 * Remove this fib from the Outstanding I/O queue.
82 * But only if it has not already been timed out.
83 *
84 * If the fib has been timed out already, then just
85 * continue. The caller has already been notified that
86 * the fib timed out.
87 */
88 if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
89 list_del(&fib->queue);
90 dev->queues->queue[AdapNormCmdQueue].numpending--;
91 } else {
92 printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
93 printk(KERN_DEBUG"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib);
94 continue;
95 }
96 spin_unlock_irqrestore(q->lock, flags);
97
98 if (fast) {
99 /*
100 * Doctor the fib
101 */
102 *(u32 *)hwfib->data = cpu_to_le32(ST_OK);
103 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
104 }
105
106 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
107
108 if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
109 {
110 u32 *pstatus = (u32 *)hwfib->data;
111 if (*pstatus & cpu_to_le32(0xffff0000))
112 *pstatus = cpu_to_le32(ST_OK);
113 }
114 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
115 {
116 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
117 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
118 else
119 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
120 /*
121 * NOTE: we cannot touch the fib after this
122 * call, because it may have been deallocated.
123 */
124 fib->callback(fib->callback_data, fib);
125 } else {
126 unsigned long flagv;
127 spin_lock_irqsave(&fib->event_lock, flagv);
128 fib->done = 1;
129 up(&fib->event_wait);
130 spin_unlock_irqrestore(&fib->event_lock, flagv);
131 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
132 }
133 consumed++;
134 spin_lock_irqsave(q->lock, flags);
135 }
136
137 if (consumed > aac_config.peak_fibs)
138 aac_config.peak_fibs = consumed;
139 if (consumed == 0)
140 aac_config.zero_fibs++;
141
142 spin_unlock_irqrestore(q->lock, flags);
143 return 0;
144}
145
146
147/**
148 * aac_command_normal - handle commands
149 * @q: queue to process
150 *
151 * This DPC routine will be queued when the adapter interrupts us to
152 * let us know there is a command on our normal priority queue. We will
153 * pull off all QE there are and wake up all the waiters before exiting.
154 * We will take a spinlock out on the queue before operating on it.
155 */
156
157unsigned int aac_command_normal(struct aac_queue *q)
158{
159 struct aac_dev * dev = q->dev;
160 struct aac_entry *entry;
161 unsigned long flags;
162
163 spin_lock_irqsave(q->lock, flags);
164
165 /*
166 * Keep pulling response QEs off the response queue and waking
167 * up the waiters until there are no more QEs. We then return
168 * back to the system.
169 */
170 while(aac_consumer_get(dev, q, &entry))
171 {
172 struct fib fibctx;
173 struct hw_fib * hw_fib;
174 u32 index;
175 struct fib *fib = &fibctx;
176
177 index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
178 hw_fib = &dev->aif_base_va[index];
179
180 /*
181 * Allocate a FIB at all costs. For non queued stuff
182 * we can just use the stack so we are happy. We need
183 * a fib object in order to manage the linked lists
184 */
185 if (dev->aif_thread)
186 if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
187 fib = &fibctx;
188
189 memset(fib, 0, sizeof(struct fib));
190 INIT_LIST_HEAD(&fib->fiblink);
191 fib->type = FSAFS_NTC_FIB_CONTEXT;
192 fib->size = sizeof(struct fib);
193 fib->hw_fib = hw_fib;
194 fib->data = hw_fib->data;
195 fib->dev = dev;
196
197
198 if (dev->aif_thread && fib != &fibctx) {
199 list_add_tail(&fib->fiblink, &q->cmdq);
200 aac_consumer_free(dev, q, HostNormCmdQueue);
201 wake_up_interruptible(&q->cmdready);
202 } else {
203 aac_consumer_free(dev, q, HostNormCmdQueue);
204 spin_unlock_irqrestore(q->lock, flags);
205 /*
206 * Set the status of this FIB
207 */
208 *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
209 fib_adapter_complete(fib, sizeof(u32));
210 spin_lock_irqsave(q->lock, flags);
211 }
212 }
213 spin_unlock_irqrestore(q->lock, flags);
214 return 0;
215}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
new file mode 100644
index 000000000000..c9b82687ba1a
--- /dev/null
+++ b/drivers/scsi/aacraid/linit.c
@@ -0,0 +1,749 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * linit.c
26 *
27 * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
28 */
29
30#define AAC_DRIVER_VERSION "1.1.2-lk2"
31#define AAC_DRIVER_BUILD_DATE __DATE__
32#define AAC_DRIVERNAME "aacraid"
33
34#include <linux/compat.h>
35#include <linux/blkdev.h>
36#include <linux/completion.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/pci.h>
43#include <linux/slab.h>
44#include <linux/spinlock.h>
45#include <linux/syscalls.h>
46#include <linux/ioctl32.h>
47#include <linux/delay.h>
48#include <linux/smp_lock.h>
49#include <asm/semaphore.h>
50
51#include <scsi/scsi.h>
52#include <scsi/scsi_cmnd.h>
53#include <scsi/scsi_device.h>
54#include <scsi/scsi_host.h>
55#include <scsi/scsi_tcq.h>
56#include <scsi/scsicam.h>
57#include <scsi/scsi_eh.h>
58
59#include "aacraid.h"
60
61
62MODULE_AUTHOR("Red Hat Inc and Adaptec");
63MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
64 "Adaptec Advanced Raid Products, "
65 "and HP NetRAID-4M SCSI driver");
66MODULE_LICENSE("GPL");
67MODULE_VERSION(AAC_DRIVER_VERSION);
68
69static LIST_HEAD(aac_devices);
70static int aac_cfg_major = -1;
71
72/*
73 * Because of the way Linux names scsi devices, the order in this table has
74 * become important. Check for on-board Raid first, add-in cards second.
75 *
76 * Note: The last field is used to index into aac_drivers below.
77 */
78static struct pci_device_id aac_pci_tbl[] = {
79 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
80 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
81 { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
82 { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
83 { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
84 { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
85 { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
86 { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
87 { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
88 { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
89 { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
90 { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
91 { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
92 { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
93 { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
94 { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
95
96 { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
97 { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
98 { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
99 { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
100 { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
101 { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
102 { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
103 { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
104 { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
105 { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 25 }, /* Callisto Jupiter Platform */
106 { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 26 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
107 { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 27 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
108 { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 28 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
109 { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 29 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
110 { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 30 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
111 { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 31 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
112 { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 32 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
113 { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 33 }, /* AAR-2610SA PCI SATA 6ch */
114 { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 34 }, /* ASR-2240S (SabreExpress) */
115 { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 35 }, /* ASR-4005SAS */
116 { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 36 }, /* IBM 8i (AvonPark) */
117 { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 37 }, /* ASR-4000SAS (BlackBird) */
118 { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 38 }, /* ASR-4800SAS (Marauder-X) */
119 { 0x9005, 0x0285, 0x9005, 0x029A, 0, 0, 39 }, /* ASR-4805SAS (Marauder-E) */
120
121 { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 40 }, /* Perc 320/DC*/
122 { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 41 }, /* Adaptec 5400S (Mustang)*/
123 { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 42 }, /* Adaptec 5400S (Mustang)*/
124 { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 43 }, /* Dell PERC2/QC */
125 { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 44 }, /* HP NetRAID-4M */
126
127 { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 45 }, /* Dell Catchall */
128 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 46 }, /* Legend Catchall */
129 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 47 }, /* Adaptec Catch All */
130 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 48 }, /* Adaptec Rocket Catch All */
131 { 0,}
132};
133MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
134
135/*
136 * dmb - For now we add the number of channels to this structure.
137 * In the future we should add a fib that reports the number of channels
138 * for the card. At that time we can remove the channels from here
139 */
140static struct aac_driver_ident aac_drivers[] = {
141 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 2/Si (Iguana/PERC2Si) */
142 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Opal/PERC3Di) */
143 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Si (SlimFast/PERC3Si */
144 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
145 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Viper/PERC3DiV) */
146 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Lexus/PERC3DiL) */
147 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
148 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Dagger/PERC3DiD) */
149 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Boxster/PERC3DiB) */
150 { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* catapult */
151 { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* tomcat */
152 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
153 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
154 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan-2m) */
155 { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S220 (Legend Crusader) */
156 { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S230 (Legend Vulcan) */
157
158 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
159 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
160 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
161 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
162 { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
163 { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
164 { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
165 { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
166 { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
167 { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
168 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
169 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
170 { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
171 { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
172 { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
173 { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
174 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
175 { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
176 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
177 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */
178 { aac_rx_init, "aacraid", "IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
179 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */
180 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
181 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
182
183 { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
184 { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
185 { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
186 { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell PERC2/QC */
187 { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
188
189 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
190 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
191 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
192 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec Rocket Catch All */
193};
194
195/**
196 * aac_queuecommand - queue a SCSI command
197 * @cmd: SCSI command to queue
198 * @done: Function to call on command completion
199 *
200 * Queues a command for execution by the associated Host Adapter.
201 *
202 * TODO: unify with aac_scsi_cmd().
203 */
204
205static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
206{
207 cmd->scsi_done = done;
208 return (aac_scsi_cmd(cmd) ? FAILED : 0);
209}
210
211/**
212 * aac_info - Returns the host adapter name
213 * @shost: Scsi host to report on
214 *
215 * Returns a static string describing the device in question
216 */
217
218const char *aac_info(struct Scsi_Host *shost)
219{
220 struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
221 return aac_drivers[dev->cardtype].name;
222}
223
224/**
225 * aac_get_driver_ident
226 * @devtype: index into lookup table
227 *
228 * Returns a pointer to the entry in the driver lookup table.
229 */
230
231struct aac_driver_ident* aac_get_driver_ident(int devtype)
232{
233 return &aac_drivers[devtype];
234}
235
236/**
237 * aac_biosparm - return BIOS parameters for disk
238 * @sdev: The scsi device corresponding to the disk
239 * @bdev: the block device corresponding to the disk
240 * @capacity: the sector capacity of the disk
241 * @geom: geometry block to fill in
242 *
243 * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
244 * The default disk geometry is 64 heads, 32 sectors, and the appropriate
245 * number of cylinders so as not to exceed drive capacity. In order for
246 * disks equal to or larger than 1 GB to be addressable by the BIOS
247 * without exceeding the BIOS limitation of 1024 cylinders, Extended
248 * Translation should be enabled. With Extended Translation enabled,
249 * drives between 1 GB inclusive and 2 GB exclusive are given a disk
250 * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
251 * are given a disk geometry of 255 heads and 63 sectors. However, if
252 * the BIOS detects that the Extended Translation setting does not match
253 * the geometry in the partition table, then the translation inferred
254 * from the partition table will be used by the BIOS, and a warning may
255 * be displayed.
256 */
257
258static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
259 sector_t capacity, int *geom)
260{
261 struct diskparm *param = (struct diskparm *)geom;
262 unsigned char *buf;
263
264 dprintk((KERN_DEBUG "aac_biosparm.\n"));
265
266 /*
267 * Assuming extended translation is enabled - #REVISIT#
268 */
269 if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
270 if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
271 param->heads = 255;
272 param->sectors = 63;
273 } else {
274 param->heads = 128;
275 param->sectors = 32;
276 }
277 } else {
278 param->heads = 64;
279 param->sectors = 32;
280 }
281
282 param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
283
284 /*
285 * Read the first 1024 bytes from the disk device, if the boot
286 * sector partition table is valid, search for a partition table
287 * entry whose end_head matches one of the standard geometry
288 * translations ( 64/32, 128/32, 255/63 ).
289 */
290 buf = scsi_bios_ptable(bdev);
291 if(*(unsigned short *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
292 struct partition *first = (struct partition * )buf;
293 struct partition *entry = first;
294 int saved_cylinders = param->cylinders;
295 int num;
296 unsigned char end_head, end_sec;
297
298 for(num = 0; num < 4; num++) {
299 end_head = entry->end_head;
300 end_sec = entry->end_sector & 0x3f;
301
302 if(end_head == 63) {
303 param->heads = 64;
304 param->sectors = 32;
305 break;
306 } else if(end_head == 127) {
307 param->heads = 128;
308 param->sectors = 32;
309 break;
310 } else if(end_head == 254) {
311 param->heads = 255;
312 param->sectors = 63;
313 break;
314 }
315 entry++;
316 }
317
318 if (num == 4) {
319 end_head = first->end_head;
320 end_sec = first->end_sector & 0x3f;
321 }
322
323 param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
324 if (num < 4 && end_sec == param->sectors) {
325 if (param->cylinders != saved_cylinders)
326 dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
327 param->heads, param->sectors, num));
328 } else if (end_head > 0 || end_sec > 0) {
329 dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
330 end_head + 1, end_sec, num));
331 dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
332 param->heads, param->sectors));
333 }
334 }
335 kfree(buf);
336 return 0;
337}
338
339/**
340 * aac_slave_configure - compute queue depths
341 * @sdev: SCSI device we are considering
342 *
343 * Selects queue depths for each target device based on the host adapter's
344 * total capacity and the queue depth supported by the target device.
345 * A queue depth of one automatically disables tagged queueing.
346 */
347
348static int aac_slave_configure(struct scsi_device *sdev)
349{
350 if (sdev->tagged_supported)
351 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128);
352 else
353 scsi_adjust_queue_depth(sdev, 0, 1);
354 return 0;
355}
356
357static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
358{
359 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
360 return aac_do_ioctl(dev, cmd, arg);
361}
362
363/*
364 * XXX: does aac really need no error handling??
365 */
366static int aac_eh_abort(struct scsi_cmnd *cmd)
367{
368 return FAILED;
369}
370
371/*
372 * aac_eh_reset - Reset command handling
373 * @scsi_cmd: SCSI command block causing the reset
374 *
375 */
376static int aac_eh_reset(struct scsi_cmnd* cmd)
377{
378 struct scsi_device * dev = cmd->device;
379 struct Scsi_Host * host = dev->host;
380 struct scsi_cmnd * command;
381 int count;
382 struct aac_dev * aac;
383 unsigned long flags;
384
385 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
386 AAC_DRIVERNAME);
387
388
389 aac = (struct aac_dev *)host->hostdata;
390 if (aac_adapter_check_health(aac)) {
391 printk(KERN_ERR "%s: Host adapter appears dead\n",
392 AAC_DRIVERNAME);
393 return -ENODEV;
394 }
395 /*
396 * Wait for all commands to complete to this specific
397 * target (block maximum 60 seconds).
398 */
399 for (count = 60; count; --count) {
400 int active = 0;
401 __shost_for_each_device(dev, host) {
402 spin_lock_irqsave(&dev->list_lock, flags);
403 list_for_each_entry(command, &dev->cmd_list, list) {
404 if (command->serial_number) {
405 active++;
406 break;
407 }
408 }
409 spin_unlock_irqrestore(&dev->list_lock, flags);
410 if (active)
411 break;
412
413 }
414 /*
415 * We can exit If all the commands are complete
416 */
417 if (active == 0)
418 return SUCCESS;
419 spin_unlock_irq(host->host_lock);
420 ssleep(1);
421 spin_lock_irq(host->host_lock);
422 }
423 printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
424 return -ETIMEDOUT;
425}
426
427/**
428 * aac_cfg_open - open a configuration file
429 * @inode: inode being opened
430 * @file: file handle attached
431 *
432 * Called when the configuration device is opened. Does the needed
433 * set up on the handle and then returns
434 *
435 * Bugs: This needs extending to check a given adapter is present
436 * so we can support hot plugging, and to ref count adapters.
437 */
438
439static int aac_cfg_open(struct inode *inode, struct file *file)
440{
441 struct aac_dev *aac;
442 unsigned minor = iminor(inode);
443 int err = -ENODEV;
444
445 list_for_each_entry(aac, &aac_devices, entry) {
446 if (aac->id == minor) {
447 file->private_data = aac;
448 err = 0;
449 break;
450 }
451 }
452
453 return 0;
454}
455
456/**
457 * aac_cfg_ioctl - AAC configuration request
458 * @inode: inode of device
459 * @file: file handle
460 * @cmd: ioctl command code
461 * @arg: argument
462 *
463 * Handles a configuration ioctl. Currently this involves wrapping it
464 * up and feeding it into the nasty windowsalike glue layer.
465 *
466 * Bugs: Needs locking against parallel ioctls lower down
467 * Bugs: Needs to handle hot plugging
468 */
469
470static int aac_cfg_ioctl(struct inode *inode, struct file *file,
471 unsigned int cmd, unsigned long arg)
472{
473 return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
474}
475
476#ifdef CONFIG_COMPAT
477static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
478{
479 long ret;
480 lock_kernel();
481 switch (cmd) {
482 case FSACTL_MINIPORT_REV_CHECK:
483 case FSACTL_SENDFIB:
484 case FSACTL_OPEN_GET_ADAPTER_FIB:
485 case FSACTL_CLOSE_GET_ADAPTER_FIB:
486 case FSACTL_SEND_RAW_SRB:
487 case FSACTL_GET_PCI_INFO:
488 case FSACTL_QUERY_DISK:
489 case FSACTL_DELETE_DISK:
490 case FSACTL_FORCE_DELETE_DISK:
491 case FSACTL_GET_CONTAINERS:
492 ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
493 break;
494
495 case FSACTL_GET_NEXT_ADAPTER_FIB: {
496 struct fib_ioctl __user *f;
497
498 f = compat_alloc_user_space(sizeof(*f));
499 ret = 0;
500 if (clear_user(f, sizeof(*f) != sizeof(*f)))
501 ret = -EFAULT;
502 if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
503 ret = -EFAULT;
504 if (!ret)
505 ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
506 break;
507 }
508
509 default:
510 ret = -ENOIOCTLCMD;
511 break;
512 }
513 unlock_kernel();
514 return ret;
515}
516
517static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
518{
519 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
520 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
521}
522
523static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
524{
525 return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
526}
527#endif
528
529static struct file_operations aac_cfg_fops = {
530 .owner = THIS_MODULE,
531 .ioctl = aac_cfg_ioctl,
532#ifdef CONFIG_COMPAT
533 .compat_ioctl = aac_compat_cfg_ioctl,
534#endif
535 .open = aac_cfg_open,
536};
537
538static struct scsi_host_template aac_driver_template = {
539 .module = THIS_MODULE,
540 .name = "AAC",
541 .proc_name = "aacraid",
542 .info = aac_info,
543 .ioctl = aac_ioctl,
544#ifdef CONFIG_COMPAT
545 .compat_ioctl = aac_compat_ioctl,
546#endif
547 .queuecommand = aac_queuecommand,
548 .bios_param = aac_biosparm,
549 .slave_configure = aac_slave_configure,
550 .eh_abort_handler = aac_eh_abort,
551 .eh_host_reset_handler = aac_eh_reset,
552 .can_queue = AAC_NUM_IO_FIB,
553 .this_id = 16,
554 .sg_tablesize = 16,
555 .max_sectors = 128,
556#if (AAC_NUM_IO_FIB > 256)
557 .cmd_per_lun = 256,
558#else
559 .cmd_per_lun = AAC_NUM_IO_FIB,
560#endif
561 .use_clustering = ENABLE_CLUSTERING,
562};
563
564
565static int __devinit aac_probe_one(struct pci_dev *pdev,
566 const struct pci_device_id *id)
567{
568 unsigned index = id->driver_data;
569 struct Scsi_Host *shost;
570 struct aac_dev *aac;
571 struct list_head *insert = &aac_devices;
572 int error = -ENODEV;
573 int unique_id = 0;
574
575 list_for_each_entry(aac, &aac_devices, entry) {
576 if (aac->id > unique_id)
577 break;
578 insert = &aac->entry;
579 unique_id++;
580 }
581
582 if (pci_enable_device(pdev))
583 goto out;
584
585 if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
586 pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFULL))
587 goto out;
588 /*
589 * If the quirk31 bit is set, the adapter needs adapter
590 * to driver communication memory to be allocated below 2gig
591 */
592 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
593 if (pci_set_dma_mask(pdev, 0x7FFFFFFFULL) ||
594 pci_set_consistent_dma_mask(pdev, 0x7FFFFFFFULL))
595 goto out;
596
597 pci_set_master(pdev);
598
599 shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
600 if (!shost)
601 goto out_disable_pdev;
602
603 shost->irq = pdev->irq;
604 shost->base = pci_resource_start(pdev, 0);
605 shost->unique_id = unique_id;
606
607 aac = (struct aac_dev *)shost->hostdata;
608 aac->scsi_host_ptr = shost;
609 aac->pdev = pdev;
610 aac->name = aac_driver_template.name;
611 aac->id = shost->unique_id;
612 aac->cardtype = index;
613 INIT_LIST_HEAD(&aac->entry);
614
615 aac->fibs = kmalloc(sizeof(struct fib) * AAC_NUM_FIB, GFP_KERNEL);
616 if (!aac->fibs)
617 goto out_free_host;
618 spin_lock_init(&aac->fib_lock);
619
620 if ((*aac_drivers[index].init)(aac))
621 goto out_free_fibs;
622
623 /*
624 * If we had set a smaller DMA mask earlier, set it to 4gig
625 * now since the adapter can dma data to at least a 4gig
626 * address space.
627 */
628 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
629 if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL))
630 goto out_free_fibs;
631
632 aac_get_adapter_info(aac);
633
634 /*
635 * max channel will be the physical channels plus 1 virtual channel
636 * all containers are on the virtual channel 0
637 * physical channels are address by their actual physical number+1
638 */
639 if (aac->nondasd_support == 1)
640 shost->max_channel = aac_drivers[index].channels+1;
641 else
642 shost->max_channel = 1;
643
644 aac_get_config_status(aac);
645 aac_get_containers(aac);
646 list_add(&aac->entry, insert);
647
648 shost->max_id = aac->maximum_num_containers;
649 if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
650 shost->max_id = MAXIMUM_NUM_CONTAINERS;
651 else
652 shost->this_id = shost->max_id;
653
654 /*
655 * dmb - we may need to move the setting of these parms somewhere else once
656 * we get a fib that can report the actual numbers
657 */
658 shost->max_lun = AAC_MAX_LUN;
659
660 pci_set_drvdata(pdev, shost);
661
662 error = scsi_add_host(shost, &pdev->dev);
663 if (error)
664 goto out_deinit;
665 scsi_scan_host(shost);
666
667 return 0;
668
669out_deinit:
670 kill_proc(aac->thread_pid, SIGKILL, 0);
671 wait_for_completion(&aac->aif_completion);
672
673 aac_send_shutdown(aac);
674 fib_map_free(aac);
675 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
676 kfree(aac->queues);
677 free_irq(pdev->irq, aac);
678 iounmap(aac->regs.sa);
679 out_free_fibs:
680 kfree(aac->fibs);
681 kfree(aac->fsa_dev);
682 out_free_host:
683 scsi_host_put(shost);
684 out_disable_pdev:
685 pci_disable_device(pdev);
686 out:
687 return error;
688}
689
690static void __devexit aac_remove_one(struct pci_dev *pdev)
691{
692 struct Scsi_Host *shost = pci_get_drvdata(pdev);
693 struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
694
695 scsi_remove_host(shost);
696
697 kill_proc(aac->thread_pid, SIGKILL, 0);
698 wait_for_completion(&aac->aif_completion);
699
700 aac_send_shutdown(aac);
701 fib_map_free(aac);
702 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
703 aac->comm_phys);
704 kfree(aac->queues);
705
706 free_irq(pdev->irq, aac);
707 iounmap(aac->regs.sa);
708
709 kfree(aac->fibs);
710
711 list_del(&aac->entry);
712 scsi_host_put(shost);
713 pci_disable_device(pdev);
714}
715
716static struct pci_driver aac_pci_driver = {
717 .name = AAC_DRIVERNAME,
718 .id_table = aac_pci_tbl,
719 .probe = aac_probe_one,
720 .remove = __devexit_p(aac_remove_one),
721};
722
723static int __init aac_init(void)
724{
725 int error;
726
727 printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n",
728 AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE);
729
730 error = pci_module_init(&aac_pci_driver);
731 if (error)
732 return error;
733
734 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
735 if (aac_cfg_major < 0) {
736 printk(KERN_WARNING
737 "aacraid: unable to register \"aac\" device.\n");
738 }
739 return 0;
740}
741
742static void __exit aac_exit(void)
743{
744 unregister_chrdev(aac_cfg_major, "aac");
745 pci_unregister_driver(&aac_pci_driver);
746}
747
748module_init(aac_init);
749module_exit(aac_exit);
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
new file mode 100644
index 000000000000..1b8ed47cfe30
--- /dev/null
+++ b/drivers/scsi/aacraid/rkt.c
@@ -0,0 +1,440 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * rkt.c
26 *
27 * Abstract: Hardware miniport for Drawbridge specific hardware functions.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/sched.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/completion.h>
41#include <linux/time.h>
42#include <linux/interrupt.h>
43#include <asm/semaphore.h>
44
45#include <scsi/scsi_host.h>
46
47#include "aacraid.h"
48
49static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
50{
51 struct aac_dev *dev = dev_id;
52 unsigned long bellbits;
53 u8 intstat, mask;
54 intstat = rkt_readb(dev, MUnit.OISR);
55 /*
56 * Read mask and invert because drawbridge is reversed.
57 * This allows us to only service interrupts that have
58 * been enabled.
59 */
60 mask = ~(dev->OIMR);
61 /* Check to see if this is our interrupt. If it isn't just return */
62 if (intstat & mask)
63 {
64 bellbits = rkt_readl(dev, OutboundDoorbellReg);
65 if (bellbits & DoorBellPrintfReady) {
66 aac_printf(dev, rkt_readl(dev, IndexRegs.Mailbox[5]));
67 rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
68 rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
69 }
70 else if (bellbits & DoorBellAdapterNormCmdReady) {
71 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
72 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
73 }
74 else if (bellbits & DoorBellAdapterNormRespReady) {
75 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
76 rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
77 }
78 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
79 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
80 }
81 else if (bellbits & DoorBellAdapterNormRespNotFull) {
82 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
83 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
84 }
85 return IRQ_HANDLED;
86 }
87 return IRQ_NONE;
88}
89
90/**
91 * rkt_sync_cmd - send a command and wait
92 * @dev: Adapter
93 * @command: Command to execute
94 * @p1: first parameter
95 * @ret: adapter status
96 *
97 * This routine will send a synchronous command to the adapter and wait
98 * for its completion.
99 */
100
101static int rkt_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
102{
103 unsigned long start;
104 int ok;
105 /*
106 * Write the command into Mailbox 0
107 */
108 rkt_writel(dev, InboundMailbox0, command);
109 /*
110 * Write the parameters into Mailboxes 1 - 4
111 */
112 rkt_writel(dev, InboundMailbox1, p1);
113 rkt_writel(dev, InboundMailbox2, 0);
114 rkt_writel(dev, InboundMailbox3, 0);
115 rkt_writel(dev, InboundMailbox4, 0);
116 /*
117 * Clear the synch command doorbell to start on a clean slate.
118 */
119 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
120 /*
121 * Disable doorbell interrupts
122 */
123 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
124 /*
125 * Force the completion of the mask register write before issuing
126 * the interrupt.
127 */
128 rkt_readb (dev, MUnit.OIMR);
129 /*
130 * Signal that there is a new synch command
131 */
132 rkt_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
133
134 ok = 0;
135 start = jiffies;
136
137 /*
138 * Wait up to 30 seconds
139 */
140 while (time_before(jiffies, start+30*HZ))
141 {
142 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
143 /*
144 * Mon960 will set doorbell0 bit when it has completed the command.
145 */
146 if (rkt_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
147 /*
148 * Clear the doorbell.
149 */
150 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
151 ok = 1;
152 break;
153 }
154 /*
155 * Yield the processor in case we are slow
156 */
157 set_current_state(TASK_UNINTERRUPTIBLE);
158 schedule_timeout(1);
159 }
160 if (ok != 1) {
161 /*
162 * Restore interrupt mask even though we timed out
163 */
164 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
165 return -ETIMEDOUT;
166 }
167 /*
168 * Pull the synch status from Mailbox 0.
169 */
170 if (status)
171 *status = rkt_readl(dev, IndexRegs.Mailbox[0]);
172 /*
173 * Clear the synch command doorbell.
174 */
175 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
176 /*
177 * Restore interrupt mask
178 */
179 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
180 return 0;
181
182}
183
184/**
185 * aac_rkt_interrupt_adapter - interrupt adapter
186 * @dev: Adapter
187 *
188 * Send an interrupt to the i960 and breakpoint it.
189 */
190
191static void aac_rkt_interrupt_adapter(struct aac_dev *dev)
192{
193 u32 ret;
194 rkt_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
195}
196
197/**
198 * aac_rkt_notify_adapter - send an event to the adapter
199 * @dev: Adapter
200 * @event: Event to send
201 *
202 * Notify the i960 that something it probably cares about has
203 * happened.
204 */
205
206static void aac_rkt_notify_adapter(struct aac_dev *dev, u32 event)
207{
208 switch (event) {
209
210 case AdapNormCmdQue:
211 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
212 break;
213 case HostNormRespNotFull:
214 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
215 break;
216 case AdapNormRespQue:
217 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
218 break;
219 case HostNormCmdNotFull:
220 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
221 break;
222 case HostShutdown:
223// rkt_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
224 break;
225 case FastIo:
226 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
227 break;
228 case AdapPrintfDone:
229 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
230 break;
231 default:
232 BUG();
233 break;
234 }
235}
236
237/**
238 * aac_rkt_start_adapter - activate adapter
239 * @dev: Adapter
240 *
241 * Start up processing on an i960 based AAC adapter
242 */
243
244static void aac_rkt_start_adapter(struct aac_dev *dev)
245{
246 u32 status;
247 struct aac_init *init;
248
249 init = dev->init;
250 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
251 /*
252 * Tell the adapter we are back and up and running so it will scan
253 * its command queues and enable our interrupts
254 */
255 dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
256 /*
257 * First clear out all interrupts. Then enable the one's that we
258 * can handle.
259 */
260 rkt_writeb(dev, MUnit.OIMR, 0xff);
261 rkt_writel(dev, MUnit.ODR, 0xffffffff);
262// rkt_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
263 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
264
265 // We can only use a 32 bit address here
266 rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
267}
268
269/**
270 * aac_rkt_check_health
271 * @dev: device to check if healthy
272 *
273 * Will attempt to determine if the specified adapter is alive and
274 * capable of handling requests, returning 0 if alive.
275 */
276static int aac_rkt_check_health(struct aac_dev *dev)
277{
278 u32 status = rkt_readl(dev, MUnit.OMRx[0]);
279
280 /*
281 * Check to see if the board failed any self tests.
282 */
283 if (status & SELF_TEST_FAILED)
284 return -1;
285 /*
286 * Check to see if the board panic'd.
287 */
288 if (status & KERNEL_PANIC) {
289 char * buffer;
290 struct POSTSTATUS {
291 u32 Post_Command;
292 u32 Post_Address;
293 } * post;
294 dma_addr_t paddr, baddr;
295 int ret;
296
297 if ((status & 0xFF000000L) == 0xBC000000L)
298 return (status >> 16) & 0xFF;
299 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
300 ret = -2;
301 if (buffer == NULL)
302 return ret;
303 post = pci_alloc_consistent(dev->pdev,
304 sizeof(struct POSTSTATUS), &paddr);
305 if (post == NULL) {
306 pci_free_consistent(dev->pdev, 512, buffer, baddr);
307 return ret;
308 }
309 memset(buffer, 0, 512);
310 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
311 post->Post_Address = cpu_to_le32(baddr);
312 rkt_writel(dev, MUnit.IMRx[0], paddr);
313 rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, &status);
314 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
315 post, paddr);
316 if ((buffer[0] == '0') && (buffer[1] == 'x')) {
317 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
318 ret <<= 4;
319 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
320 }
321 pci_free_consistent(dev->pdev, 512, buffer, baddr);
322 return ret;
323 }
324 /*
325 * Wait for the adapter to be up and running.
326 */
327 if (!(status & KERNEL_UP_AND_RUNNING))
328 return -3;
329 /*
330 * Everything is OK
331 */
332 return 0;
333}
334
335/**
336 * aac_rkt_init - initialize an i960 based AAC card
337 * @dev: device to configure
338 *
339 * Allocate and set up resources for the i960 based AAC variants. The
340 * device_interface in the commregion will be allocated and linked
341 * to the comm region.
342 */
343
344int aac_rkt_init(struct aac_dev *dev)
345{
346 unsigned long start;
347 unsigned long status;
348 int instance;
349 const char * name;
350
351 instance = dev->id;
352 name = dev->name;
353
354 /*
355 * Map in the registers from the adapter.
356 */
357 if((dev->regs.rkt = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
358 {
359 printk(KERN_WARNING "aacraid: unable to map i960.\n" );
360 goto error_iounmap;
361 }
362 /*
363 * Check to see if the board failed any self tests.
364 */
365 if (rkt_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
366 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
367 goto error_iounmap;
368 }
369 /*
370 * Check to see if the monitor panic'd while booting.
371 */
372 if (rkt_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
373 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
374 goto error_iounmap;
375 }
376 /*
377 * Check to see if the board panic'd while booting.
378 */
379 if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
380 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
381 goto error_iounmap;
382 }
383 start = jiffies;
384 /*
385 * Wait for the adapter to be up and running. Wait up to 3 minutes
386 */
387 while (!(rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING))
388 {
389 if(time_after(jiffies, start+180*HZ))
390 {
391 status = rkt_readl(dev, MUnit.OMRx[0]);
392 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
393 dev->name, instance, status);
394 goto error_iounmap;
395 }
396 set_current_state(TASK_UNINTERRUPTIBLE);
397 schedule_timeout(1);
398 }
399 if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
400 {
401 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
402 goto error_iounmap;
403 }
404 /*
405 * Fill in the function dispatch table.
406 */
407 dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter;
408 dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
409 dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
410 dev->a_ops.adapter_check_health = aac_rkt_check_health;
411
412 if (aac_init_adapter(dev) == NULL)
413 goto error_irq;
414 /*
415 * Start any kernel threads needed
416 */
417 dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
418 if(dev->thread_pid < 0)
419 {
420 printk(KERN_ERR "aacraid: Unable to create rkt thread.\n");
421 goto error_kfree;
422 }
423 /*
424 * Tell the adapter that all is configured, and it can start
425 * accepting requests
426 */
427 aac_rkt_start_adapter(dev);
428 return 0;
429
430error_kfree:
431 kfree(dev->queues);
432
433error_irq:
434 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
435
436error_iounmap:
437 iounmap(dev->regs.rkt);
438
439 return -1;
440}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
new file mode 100644
index 000000000000..630b99e1fe83
--- /dev/null
+++ b/drivers/scsi/aacraid/rx.c
@@ -0,0 +1,441 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * rx.c
26 *
27 * Abstract: Hardware miniport for Drawbridge specific hardware functions.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/sched.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/completion.h>
41#include <linux/time.h>
42#include <linux/interrupt.h>
43#include <asm/semaphore.h>
44
45#include <scsi/scsi_host.h>
46
47#include "aacraid.h"
48
49static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
50{
51 struct aac_dev *dev = dev_id;
52 unsigned long bellbits;
53 u8 intstat, mask;
54 intstat = rx_readb(dev, MUnit.OISR);
55 /*
56 * Read mask and invert because drawbridge is reversed.
57 * This allows us to only service interrupts that have
58 * been enabled.
59 */
60 mask = ~(dev->OIMR);
61 /* Check to see if this is our interrupt. If it isn't just return */
62 if (intstat & mask)
63 {
64 bellbits = rx_readl(dev, OutboundDoorbellReg);
65 if (bellbits & DoorBellPrintfReady) {
66 aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
67 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
68 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
69 }
70 else if (bellbits & DoorBellAdapterNormCmdReady) {
71 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
72 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
73 }
74 else if (bellbits & DoorBellAdapterNormRespReady) {
75 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
76 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
77 }
78 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
80 }
81 else if (bellbits & DoorBellAdapterNormRespNotFull) {
82 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
83 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
84 }
85 return IRQ_HANDLED;
86 }
87 return IRQ_NONE;
88}
89
90/**
91 * rx_sync_cmd - send a command and wait
92 * @dev: Adapter
93 * @command: Command to execute
94 * @p1: first parameter
95 * @ret: adapter status
96 *
97 * This routine will send a synchronous command to the adapter and wait
98 * for its completion.
99 */
100
101static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
102{
103 unsigned long start;
104 int ok;
105 /*
106 * Write the command into Mailbox 0
107 */
108 rx_writel(dev, InboundMailbox0, command);
109 /*
110 * Write the parameters into Mailboxes 1 - 4
111 */
112 rx_writel(dev, InboundMailbox1, p1);
113 rx_writel(dev, InboundMailbox2, 0);
114 rx_writel(dev, InboundMailbox3, 0);
115 rx_writel(dev, InboundMailbox4, 0);
116 /*
117 * Clear the synch command doorbell to start on a clean slate.
118 */
119 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
120 /*
121 * Disable doorbell interrupts
122 */
123 rx_writeb(dev, MUnit.OIMR, dev->OIMR |= 0x04);
124 /*
125 * Force the completion of the mask register write before issuing
126 * the interrupt.
127 */
128 rx_readb (dev, MUnit.OIMR);
129 /*
130 * Signal that there is a new synch command
131 */
132 rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
133
134 ok = 0;
135 start = jiffies;
136
137 /*
138 * Wait up to 30 seconds
139 */
140 while (time_before(jiffies, start+30*HZ))
141 {
142 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
143 /*
144 * Mon960 will set doorbell0 bit when it has completed the command.
145 */
146 if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
147 /*
148 * Clear the doorbell.
149 */
150 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
151 ok = 1;
152 break;
153 }
154 /*
155 * Yield the processor in case we are slow
156 */
157 set_current_state(TASK_UNINTERRUPTIBLE);
158 schedule_timeout(1);
159 }
160 if (ok != 1) {
161 /*
162 * Restore interrupt mask even though we timed out
163 */
164 rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
165 return -ETIMEDOUT;
166 }
167 /*
168 * Pull the synch status from Mailbox 0.
169 */
170 if (status)
171 *status = rx_readl(dev, IndexRegs.Mailbox[0]);
172 /*
173 * Clear the synch command doorbell.
174 */
175 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
176 /*
177 * Restore interrupt mask
178 */
179 rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
180 return 0;
181
182}
183
184/**
185 * aac_rx_interrupt_adapter - interrupt adapter
186 * @dev: Adapter
187 *
188 * Send an interrupt to the i960 and breakpoint it.
189 */
190
191static void aac_rx_interrupt_adapter(struct aac_dev *dev)
192{
193 u32 ret;
194 rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
195}
196
197/**
198 * aac_rx_notify_adapter - send an event to the adapter
199 * @dev: Adapter
200 * @event: Event to send
201 *
202 * Notify the i960 that something it probably cares about has
203 * happened.
204 */
205
206static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
207{
208 switch (event) {
209
210 case AdapNormCmdQue:
211 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
212 break;
213 case HostNormRespNotFull:
214 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
215 break;
216 case AdapNormRespQue:
217 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
218 break;
219 case HostNormCmdNotFull:
220 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
221 break;
222 case HostShutdown:
223// rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
224 break;
225 case FastIo:
226 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
227 break;
228 case AdapPrintfDone:
229 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
230 break;
231 default:
232 BUG();
233 break;
234 }
235}
236
237/**
238 * aac_rx_start_adapter - activate adapter
239 * @dev: Adapter
240 *
241 * Start up processing on an i960 based AAC adapter
242 */
243
244static void aac_rx_start_adapter(struct aac_dev *dev)
245{
246 u32 status;
247 struct aac_init *init;
248
249 init = dev->init;
250 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
251 /*
252 * Tell the adapter we are back and up and running so it will scan
253 * its command queues and enable our interrupts
254 */
255 dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
256 /*
257 * First clear out all interrupts. Then enable the one's that we
258 * can handle.
259 */
260 rx_writeb(dev, MUnit.OIMR, 0xff);
261 rx_writel(dev, MUnit.ODR, 0xffffffff);
262// rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
263 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
264
265 // We can only use a 32 bit address here
266 rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
267}
268
269/**
270 * aac_rx_check_health
271 * @dev: device to check if healthy
272 *
273 * Will attempt to determine if the specified adapter is alive and
274 * capable of handling requests, returning 0 if alive.
275 */
276static int aac_rx_check_health(struct aac_dev *dev)
277{
278 u32 status = rx_readl(dev, MUnit.OMRx[0]);
279
280 /*
281 * Check to see if the board failed any self tests.
282 */
283 if (status & SELF_TEST_FAILED)
284 return -1;
285 /*
286 * Check to see if the board panic'd.
287 */
288 if (status & KERNEL_PANIC) {
289 char * buffer;
290 struct POSTSTATUS {
291 u32 Post_Command;
292 u32 Post_Address;
293 } * post;
294 dma_addr_t paddr, baddr;
295 int ret;
296
297 if ((status & 0xFF000000L) == 0xBC000000L)
298 return (status >> 16) & 0xFF;
299 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
300 ret = -2;
301 if (buffer == NULL)
302 return ret;
303 post = pci_alloc_consistent(dev->pdev,
304 sizeof(struct POSTSTATUS), &paddr);
305 if (post == NULL) {
306 pci_free_consistent(dev->pdev, 512, buffer, baddr);
307 return ret;
308 }
309 memset(buffer, 0, 512);
310 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
311 post->Post_Address = cpu_to_le32(baddr);
312 rx_writel(dev, MUnit.IMRx[0], paddr);
313 rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, &status);
314 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
315 post, paddr);
316 if ((buffer[0] == '0') && (buffer[1] == 'x')) {
317 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
318 ret <<= 4;
319 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
320 }
321 pci_free_consistent(dev->pdev, 512, buffer, baddr);
322 return ret;
323 }
324 /*
325 * Wait for the adapter to be up and running.
326 */
327 if (!(status & KERNEL_UP_AND_RUNNING))
328 return -3;
329 /*
330 * Everything is OK
331 */
332 return 0;
333}
334
335/**
336 * aac_rx_init - initialize an i960 based AAC card
337 * @dev: device to configure
338 *
339 * Allocate and set up resources for the i960 based AAC variants. The
340 * device_interface in the commregion will be allocated and linked
341 * to the comm region.
342 */
343
344int aac_rx_init(struct aac_dev *dev)
345{
346 unsigned long start;
347 unsigned long status;
348 int instance;
349 const char * name;
350
351 instance = dev->id;
352 name = dev->name;
353
354 /*
355 * Map in the registers from the adapter.
356 */
357 if((dev->regs.rx = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
358 {
359 printk(KERN_WARNING "aacraid: unable to map i960.\n" );
360 return -1;
361 }
362 /*
363 * Check to see if the board failed any self tests.
364 */
365 if (rx_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
366 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
367 goto error_iounmap;
368 }
369 /*
370 * Check to see if the board panic'd while booting.
371 */
372 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
373 printk(KERN_ERR "%s%d: adapter kernel panic.\n", dev->name, instance);
374 goto error_iounmap;
375 }
376 /*
377 * Check to see if the monitor panic'd while booting.
378 */
379 if (rx_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
380 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
381 goto error_iounmap;
382 }
383 start = jiffies;
384 /*
385 * Wait for the adapter to be up and running. Wait up to 3 minutes
386 */
387 while ((!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING))
388 || (!(rx_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING)))
389 {
390 if(time_after(jiffies, start+180*HZ))
391 {
392 status = rx_readl(dev, IndexRegs.Mailbox[7]);
393 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
394 dev->name, instance, status);
395 goto error_iounmap;
396 }
397 set_current_state(TASK_UNINTERRUPTIBLE);
398 schedule_timeout(1);
399 }
400 if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
401 {
402 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
403 goto error_iounmap;
404 }
405 /*
406 * Fill in the function dispatch table.
407 */
408 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
409 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
410 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
411 dev->a_ops.adapter_check_health = aac_rx_check_health;
412
413 if (aac_init_adapter(dev) == NULL)
414 goto error_irq;
415 /*
416 * Start any kernel threads needed
417 */
418 dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
419 if(dev->thread_pid < 0)
420 {
421 printk(KERN_ERR "aacraid: Unable to create rx thread.\n");
422 goto error_kfree;
423 }
424 /*
425 * Tell the adapter that all is configured, and it can start
426 * accepting requests
427 */
428 aac_rx_start_adapter(dev);
429 return 0;
430
431error_kfree:
432 kfree(dev->queues);
433
434error_irq:
435 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
436
437error_iounmap:
438 iounmap(dev->regs.rx);
439
440 return -1;
441}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
new file mode 100644
index 000000000000..bd6c30723fba
--- /dev/null
+++ b/drivers/scsi/aacraid/sa.c
@@ -0,0 +1,374 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * sa.c
26 *
27 * Abstract: Drawbridge specific support functions
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/sched.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/completion.h>
41#include <linux/time.h>
42#include <linux/interrupt.h>
43#include <asm/semaphore.h>
44
45#include <scsi/scsi_host.h>
46
47#include "aacraid.h"
48
49static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
50{
51 struct aac_dev *dev = dev_id;
52 unsigned short intstat, mask;
53
54 intstat = sa_readw(dev, DoorbellReg_p);
55 /*
56 * Read mask and invert because drawbridge is reversed.
57 * This allows us to only service interrupts that have been enabled.
58 */
59 mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
60
61 /* Check to see if this is our interrupt. If it isn't just return */
62
63 if (intstat & mask) {
64 if (intstat & PrintfReady) {
65 aac_printf(dev, sa_readl(dev, Mailbox5));
66 sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
67 sa_writew(dev, DoorbellReg_s, PrintfDone);
68 } else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready
69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
70 sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
71 } else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready
72 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
73 sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
74 } else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full
75 sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
76 } else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full
77 sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
78 }
79 return IRQ_HANDLED;
80 }
81 return IRQ_NONE;
82}
83
84/**
85 * aac_sa_notify_adapter - handle adapter notification
86 * @dev: Adapter that notification is for
87 * @event: Event to notidy
88 *
89 * Notify the adapter of an event
90 */
91
92void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
93{
94 switch (event) {
95
96 case AdapNormCmdQue:
97 sa_writew(dev, DoorbellReg_s,DOORBELL_1);
98 break;
99 case HostNormRespNotFull:
100 sa_writew(dev, DoorbellReg_s,DOORBELL_4);
101 break;
102 case AdapNormRespQue:
103 sa_writew(dev, DoorbellReg_s,DOORBELL_2);
104 break;
105 case HostNormCmdNotFull:
106 sa_writew(dev, DoorbellReg_s,DOORBELL_3);
107 break;
108 case HostShutdown:
109 //sa_sync_cmd(dev, HOST_CRASHING, 0, &ret);
110 break;
111 case FastIo:
112 sa_writew(dev, DoorbellReg_s,DOORBELL_6);
113 break;
114 case AdapPrintfDone:
115 sa_writew(dev, DoorbellReg_s,DOORBELL_5);
116 break;
117 default:
118 BUG();
119 break;
120 }
121}
122
123
124/**
125 * sa_sync_cmd - send a command and wait
126 * @dev: Adapter
127 * @command: Command to execute
128 * @p1: first parameter
129 * @ret: adapter status
130 *
131 * This routine will send a synchronous command to the adapter and wait
132 * for its completion.
133 */
134
135static int sa_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *ret)
136{
137 unsigned long start;
138 int ok;
139 /*
140 * Write the Command into Mailbox 0
141 */
142 sa_writel(dev, Mailbox0, command);
143 /*
144 * Write the parameters into Mailboxes 1 - 4
145 */
146 sa_writel(dev, Mailbox1, p1);
147 sa_writel(dev, Mailbox2, 0);
148 sa_writel(dev, Mailbox3, 0);
149 sa_writel(dev, Mailbox4, 0);
150 /*
151 * Clear the synch command doorbell to start on a clean slate.
152 */
153 sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
154 /*
155 * Signal that there is a new synch command
156 */
157 sa_writew(dev, DoorbellReg_s, DOORBELL_0);
158
159 ok = 0;
160 start = jiffies;
161
162 while(time_before(jiffies, start+30*HZ))
163 {
164 /*
165 * Delay 5uS so that the monitor gets access
166 */
167 udelay(5);
168 /*
169 * Mon110 will set doorbell0 bit when it has
170 * completed the command.
171 */
172 if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0) {
173 ok = 1;
174 break;
175 }
176 set_current_state(TASK_UNINTERRUPTIBLE);
177 schedule_timeout(1);
178 }
179
180 if (ok != 1)
181 return -ETIMEDOUT;
182 /*
183 * Clear the synch command doorbell.
184 */
185 sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
186 /*
187 * Pull the synch status from Mailbox 0.
188 */
189 if (ret)
190 *ret = sa_readl(dev, Mailbox0);
191 return 0;
192}
193
194/**
195 * aac_sa_interrupt_adapter - interrupt an adapter
196 * @dev: Which adapter to enable.
197 *
198 * Breakpoint an adapter.
199 */
200
201static void aac_sa_interrupt_adapter (struct aac_dev *dev)
202{
203 u32 ret;
204 sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
205}
206
207/**
208 * aac_sa_start_adapter - activate adapter
209 * @dev: Adapter
210 *
211 * Start up processing on an ARM based AAC adapter
212 */
213
214static void aac_sa_start_adapter(struct aac_dev *dev)
215{
216 u32 ret;
217 struct aac_init *init;
218 /*
219 * Fill in the remaining pieces of the init.
220 */
221 init = dev->init;
222 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
223
224 /*
225 * Tell the adapter we are back and up and running so it will scan its command
226 * queues and enable our interrupts
227 */
228 dev->irq_mask = (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4);
229 /*
230 * First clear out all interrupts. Then enable the one's that
231 * we can handle.
232 */
233 sa_writew(dev, SaDbCSR.PRISETIRQMASK, cpu_to_le16(0xffff));
234 sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
235 /* We can only use a 32 bit address here */
236 sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &ret);
237}
238
239/**
240 * aac_sa_check_health
241 * @dev: device to check if healthy
242 *
243 * Will attempt to determine if the specified adapter is alive and
244 * capable of handling requests, returning 0 if alive.
245 */
246static int aac_sa_check_health(struct aac_dev *dev)
247{
248 long status = sa_readl(dev, Mailbox7);
249
250 /*
251 * Check to see if the board failed any self tests.
252 */
253 if (status & SELF_TEST_FAILED)
254 return -1;
255 /*
256 * Check to see if the board panic'd while booting.
257 */
258 if (status & KERNEL_PANIC)
259 return -2;
260 /*
261 * Wait for the adapter to be up and running. Wait up to 3 minutes
262 */
263 if (!(status & KERNEL_UP_AND_RUNNING))
264 return -3;
265 /*
266 * Everything is OK
267 */
268 return 0;
269}
270
271/**
272 * aac_sa_init - initialize an ARM based AAC card
273 * @dev: device to configure
274 *
275 * Allocate and set up resources for the ARM based AAC variants. The
276 * device_interface in the commregion will be allocated and linked
277 * to the comm region.
278 */
279
280int aac_sa_init(struct aac_dev *dev)
281{
282 unsigned long start;
283 unsigned long status;
284 int instance;
285 const char *name;
286
287 instance = dev->id;
288 name = dev->name;
289
290 /*
291 * Map in the registers from the adapter.
292 */
293
294 if((dev->regs.sa = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
295 {
296 printk(KERN_WARNING "aacraid: unable to map ARM.\n" );
297 goto error_iounmap;
298 }
299 /*
300 * Check to see if the board failed any self tests.
301 */
302 if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
303 printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
304 goto error_iounmap;
305 }
306 /*
307 * Check to see if the board panic'd while booting.
308 */
309 if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
310 printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
311 goto error_iounmap;
312 }
313 start = jiffies;
314 /*
315 * Wait for the adapter to be up and running. Wait up to 3 minutes.
316 */
317 while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
318 if (time_after(jiffies, start+180*HZ)) {
319 status = sa_readl(dev, Mailbox7);
320 printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n",
321 name, instance, status);
322 goto error_iounmap;
323 }
324 set_current_state(TASK_UNINTERRUPTIBLE);
325 schedule_timeout(1);
326 }
327
328 if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) {
329 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
330 goto error_iounmap;
331 }
332
333 /*
334 * Fill in the function dispatch table.
335 */
336
337 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
338 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
339 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
340 dev->a_ops.adapter_check_health = aac_sa_check_health;
341
342
343 if(aac_init_adapter(dev) == NULL)
344 goto error_irq;
345
346 /*
347 * Start any kernel threads needed
348 */
349 dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
350 if (dev->thread_pid < 0) {
351 printk(KERN_ERR "aacraid: Unable to create command thread.\n");
352 goto error_kfree;
353 }
354
355 /*
356 * Tell the adapter that all is configure, and it can start
357 * accepting requests
358 */
359 aac_sa_start_adapter(dev);
360 return 0;
361
362
363error_kfree:
364 kfree(dev->queues);
365
366error_irq:
367 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
368
369error_iounmap:
370 iounmap(dev->regs.sa);
371
372 return -1;
373}
374