aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorAnil Ravindranath <anil_ravindranath@pmc-sierra.com>2009-08-25 20:35:18 -0400
committerJames Bottomley <James.Bottomley@suse.de>2009-09-12 10:35:24 -0400
commit89a3681041507773dfee1b88c1c90c8a811a79d3 (patch)
treedf3ed10fee1725722524c66ba2e24cfef5f40ff5 /drivers/scsi
parent073ed91e245d56d71a85e2a49bf0b3962fe74dc4 (diff)
[SCSI] pmcraid: PMC-Sierra MaxRAID driver to support 6Gb/s SAS RAID controller
Signed-off-by: Anil Ravindranath <anil_ravindranath@pmc-sierra.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/pmcraid.c5604
-rw-r--r--drivers/scsi/pmcraid.h1029
4 files changed, 6640 insertions, 0 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9c23122f755f..82bb3b2d207a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1811,6 +1811,12 @@ config ZFCP
1811 called zfcp. If you want to compile it as a module, say M here 1811 called zfcp. If you want to compile it as a module, say M here
1812 and read <file:Documentation/kbuild/modules.txt>. 1812 and read <file:Documentation/kbuild/modules.txt>.
1813 1813
1814config SCSI_PMCRAID
1815 tristate "PMC SIERRA Linux MaxRAID adapter support"
1816 depends on PCI && SCSI
1817 ---help---
1818 This driver supports the PMC SIERRA MaxRAID adapters.
1819
1814config SCSI_SRP 1820config SCSI_SRP
1815 tristate "SCSI RDMA Protocol helper library" 1821 tristate "SCSI RDMA Protocol helper library"
1816 depends on SCSI && PCI 1822 depends on SCSI && PCI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 25429ea63d0a..61a94af3cee7 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
130obj-$(CONFIG_PS3_ROM) += ps3rom.o 130obj-$(CONFIG_PS3_ROM) += ps3rom.o
131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
133obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
133 134
134obj-$(CONFIG_ARM) += arm/ 135obj-$(CONFIG_ARM) += arm/
135 136
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
new file mode 100644
index 000000000000..4302f06e4ec9
--- /dev/null
+++ b/drivers/scsi/pmcraid.c
@@ -0,0 +1,5604 @@
1/*
2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3 *
4 * Written By: PMC Sierra Corporation
5 *
6 * Copyright (C) 2008, 2009 PMC Sierra Inc
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
21 * USA
22 *
23 */
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/types.h>
27#include <linux/errno.h>
28#include <linux/kernel.h>
29#include <linux/ioport.h>
30#include <linux/delay.h>
31#include <linux/pci.h>
32#include <linux/wait.h>
33#include <linux/spinlock.h>
34#include <linux/sched.h>
35#include <linux/interrupt.h>
36#include <linux/blkdev.h>
37#include <linux/firmware.h>
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/hdreg.h>
41#include <linux/version.h>
42#include <linux/io.h>
43#include <asm/irq.h>
44#include <asm/processor.h>
45#include <linux/libata.h>
46#include <linux/mutex.h>
47#include <scsi/scsi.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_tcq.h>
50#include <scsi/scsi_eh.h>
51#include <scsi/scsi_cmnd.h>
52#include <scsi/scsicam.h>
53
54#include "pmcraid.h"
55
56/*
57 * Module configuration parameters
58 */
59static unsigned int pmcraid_debug_log;
60static unsigned int pmcraid_disable_aen;
61static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
62
63/*
64 * Data structures to support multiple adapters by the LLD.
65 * pmcraid_adapter_count - count of configured adapters
66 */
67static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
68
69/*
70 * Supporting user-level control interface through IOCTL commands.
71 * pmcraid_major - major number to use
72 * pmcraid_minor - minor number(s) to use
73 */
74static unsigned int pmcraid_major;
75static struct class *pmcraid_class;
76DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
77
78/*
79 * Module parameters
80 */
81MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com");
82MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
83MODULE_LICENSE("GPL");
84MODULE_VERSION(PMCRAID_DRIVER_VERSION);
85
86module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
87MODULE_PARM_DESC(log_level,
88 "Enables firmware error code logging, default :1 high-severity"
89 " errors, 2: all errors including high-severity errors,"
90 " 0: disables logging");
91
92module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
93MODULE_PARM_DESC(debug,
94 "Enable driver verbose message logging. Set 1 to enable."
95 "(default: 0)");
96
97module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
98MODULE_PARM_DESC(disable_aen,
99 "Disable driver aen notifications to apps. Set 1 to disable."
100 "(default: 0)");
101
102/* chip specific constants for PMC MaxRAID controllers (same for
103 * 0x5220 and 0x8010
104 */
105static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
106 {
107 .ioastatus = 0x0,
108 .ioarrin = 0x00040,
109 .mailbox = 0x7FC30,
110 .global_intr_mask = 0x00034,
111 .ioa_host_intr = 0x0009C,
112 .ioa_host_intr_clr = 0x000A0,
113 .ioa_host_mask = 0x7FC28,
114 .ioa_host_mask_clr = 0x7FC28,
115 .host_ioa_intr = 0x00020,
116 .host_ioa_intr_clr = 0x00020,
117 .transop_timeout = 300
118 }
119};
120
121/*
122 * PCI device ids supported by pmcraid driver
123 */
124static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
125 { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
126 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
127 },
128 {}
129};
130
131MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
132
133
134
135/**
136 * pmcraid_slave_alloc - Prepare for commands to a device
137 * @scsi_dev: scsi device struct
138 *
139 * This function is called by mid-layer prior to sending any command to the new
140 * device. Stores resource entry details of the device in scsi_device struct.
141 * Queuecommand uses the resource handle and other details to fill up IOARCB
142 * while sending commands to the device.
143 *
144 * Return value:
145 * 0 on success / -ENXIO if device does not exist
146 */
147static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
148{
149 struct pmcraid_resource_entry *temp, *res = NULL;
150 struct pmcraid_instance *pinstance;
151 u8 target, bus, lun;
152 unsigned long lock_flags;
153 int rc = -ENXIO;
154 pinstance = shost_priv(scsi_dev->host);
155
156 /* Driver exposes VSET and GSCSI resources only; all other device types
157 * are not exposed. Resource list is synchronized using resource lock
158 * so any traversal or modifications to the list should be done inside
159 * this lock
160 */
161 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
162 list_for_each_entry(temp, &pinstance->used_res_q, queue) {
163
164 /* do not expose VSETs with order-ids >= 240 */
165 if (RES_IS_VSET(temp->cfg_entry)) {
166 target = temp->cfg_entry.unique_flags1;
167 if (target >= PMCRAID_MAX_VSET_TARGETS)
168 continue;
169 bus = PMCRAID_VSET_BUS_ID;
170 lun = 0;
171 } else if (RES_IS_GSCSI(temp->cfg_entry)) {
172 target = RES_TARGET(temp->cfg_entry.resource_address);
173 bus = PMCRAID_PHYS_BUS_ID;
174 lun = RES_LUN(temp->cfg_entry.resource_address);
175 } else {
176 continue;
177 }
178
179 if (bus == scsi_dev->channel &&
180 target == scsi_dev->id &&
181 lun == scsi_dev->lun) {
182 res = temp;
183 break;
184 }
185 }
186
187 if (res) {
188 res->scsi_dev = scsi_dev;
189 scsi_dev->hostdata = res;
190 res->change_detected = 0;
191 atomic_set(&res->read_failures, 0);
192 atomic_set(&res->write_failures, 0);
193 rc = 0;
194 }
195 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
196 return rc;
197}
198
199/**
200 * pmcraid_slave_configure - Configures a SCSI device
201 * @scsi_dev: scsi device struct
202 *
203 * This fucntion is executed by SCSI mid layer just after a device is first
204 * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
205 * timeout value (default 30s) will be over-written to a higher value (60s)
206 * and max_sectors value will be over-written to 512. It also sets queue depth
207 * to host->cmd_per_lun value
208 *
209 * Return value:
210 * 0 on success
211 */
212static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
213{
214 struct pmcraid_resource_entry *res = scsi_dev->hostdata;
215
216 if (!res)
217 return 0;
218
219 /* LLD exposes VSETs and Enclosure devices only */
220 if (RES_IS_GSCSI(res->cfg_entry) &&
221 scsi_dev->type != TYPE_ENCLOSURE)
222 return -ENXIO;
223
224 pmcraid_info("configuring %x:%x:%x:%x\n",
225 scsi_dev->host->unique_id,
226 scsi_dev->channel,
227 scsi_dev->id,
228 scsi_dev->lun);
229
230 if (RES_IS_GSCSI(res->cfg_entry)) {
231 scsi_dev->allow_restart = 1;
232 } else if (RES_IS_VSET(res->cfg_entry)) {
233 scsi_dev->allow_restart = 1;
234 blk_queue_rq_timeout(scsi_dev->request_queue,
235 PMCRAID_VSET_IO_TIMEOUT);
236 blk_queue_max_sectors(scsi_dev->request_queue,
237 PMCRAID_VSET_MAX_SECTORS);
238 }
239
240 if (scsi_dev->tagged_supported &&
241 (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
242 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
243 scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG,
244 scsi_dev->host->cmd_per_lun);
245 } else {
246 scsi_adjust_queue_depth(scsi_dev, 0,
247 scsi_dev->host->cmd_per_lun);
248 }
249
250 return 0;
251}
252
253/**
254 * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
255 *
256 * @scsi_dev: scsi device struct
257 *
258 * This is called by mid-layer before removing a device. Pointer assignments
259 * done in pmcraid_slave_alloc will be reset to NULL here.
260 *
261 * Return value
262 * none
263 */
264static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
265{
266 struct pmcraid_resource_entry *res;
267
268 res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
269
270 if (res)
271 res->scsi_dev = NULL;
272
273 scsi_dev->hostdata = NULL;
274}
275
276/**
277 * pmcraid_change_queue_depth - Change the device's queue depth
278 * @scsi_dev: scsi device struct
279 * @depth: depth to set
280 *
281 * Return value
282 * actual depth set
283 */
284static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
285{
286 if (depth > PMCRAID_MAX_CMD_PER_LUN)
287 depth = PMCRAID_MAX_CMD_PER_LUN;
288
289 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth);
290
291 return scsi_dev->queue_depth;
292}
293
294/**
295 * pmcraid_change_queue_type - Change the device's queue type
296 * @scsi_dev: scsi device struct
297 * @tag: type of tags to use
298 *
299 * Return value:
300 * actual queue type set
301 */
302static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
303{
304 struct pmcraid_resource_entry *res;
305
306 res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
307
308 if ((res) && scsi_dev->tagged_supported &&
309 (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
310 scsi_set_tag_type(scsi_dev, tag);
311
312 if (tag)
313 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
314 else
315 scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth);
316 } else
317 tag = 0;
318
319 return tag;
320}
321
322
323/**
324 * pmcraid_init_cmdblk - initializes a command block
325 *
326 * @cmd: pointer to struct pmcraid_cmd to be initialized
327 * @index: if >=0 first time initialization; otherwise reinitialization
328 *
329 * Return Value
330 * None
331 */
332void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
333{
334 struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
335 dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
336
337 if (index >= 0) {
338 /* first time initialization (called from probe) */
339 u32 ioasa_offset =
340 offsetof(struct pmcraid_control_block, ioasa);
341
342 cmd->index = index;
343 ioarcb->response_handle = cpu_to_le32(index << 2);
344 ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
345 ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
346 ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
347 } else {
348 /* re-initialization of various lengths, called once command is
349 * processed by IOA
350 */
351 memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
352 ioarcb->request_flags0 = 0;
353 ioarcb->request_flags1 = 0;
354 ioarcb->cmd_timeout = 0;
355 ioarcb->ioarcb_bus_addr &= (~0x1FULL);
356 ioarcb->ioadl_bus_addr = 0;
357 ioarcb->ioadl_length = 0;
358 ioarcb->data_transfer_length = 0;
359 ioarcb->add_cmd_param_length = 0;
360 ioarcb->add_cmd_param_offset = 0;
361 cmd->ioa_cb->ioasa.ioasc = 0;
362 cmd->ioa_cb->ioasa.residual_data_length = 0;
363 cmd->u.time_left = 0;
364 }
365
366 cmd->cmd_done = NULL;
367 cmd->scsi_cmd = NULL;
368 cmd->release = 0;
369 cmd->completion_req = 0;
370 cmd->dma_handle = 0;
371 init_timer(&cmd->timer);
372}
373
374/**
375 * pmcraid_reinit_cmdblk - reinitialize a command block
376 *
377 * @cmd: pointer to struct pmcraid_cmd to be reinitialized
378 *
379 * Return Value
380 * None
381 */
382static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
383{
384 pmcraid_init_cmdblk(cmd, -1);
385}
386
387/**
388 * pmcraid_get_free_cmd - get a free cmd block from command block pool
389 * @pinstance: adapter instance structure
390 *
391 * Return Value:
392 * returns pointer to cmd block or NULL if no blocks are available
393 */
394static struct pmcraid_cmd *pmcraid_get_free_cmd(
395 struct pmcraid_instance *pinstance
396)
397{
398 struct pmcraid_cmd *cmd = NULL;
399 unsigned long lock_flags;
400
401 /* free cmd block list is protected by free_pool_lock */
402 spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
403
404 if (!list_empty(&pinstance->free_cmd_pool)) {
405 cmd = list_entry(pinstance->free_cmd_pool.next,
406 struct pmcraid_cmd, free_list);
407 list_del(&cmd->free_list);
408 }
409 spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
410
411 /* Initialize the command block before giving it the caller */
412 if (cmd != NULL)
413 pmcraid_reinit_cmdblk(cmd);
414 return cmd;
415}
416
417/**
418 * pmcraid_return_cmd - return a completed command block back into free pool
419 * @cmd: pointer to the command block
420 *
421 * Return Value:
422 * nothing
423 */
424void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
425{
426 struct pmcraid_instance *pinstance = cmd->drv_inst;
427 unsigned long lock_flags;
428
429 spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
430 list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
431 spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
432}
433
434/**
435 * pmcraid_read_interrupts - reads IOA interrupts
436 *
437 * @pinstance: pointer to adapter instance structure
438 *
439 * Return value
440 * interrupts read from IOA
441 */
442static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
443{
444 return ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
445}
446
447/**
448 * pmcraid_disable_interrupts - Masks and clears all specified interrupts
449 *
450 * @pinstance: pointer to per adapter instance structure
451 * @intrs: interrupts to disable
452 *
453 * Return Value
454 * None
455 */
456static void pmcraid_disable_interrupts(
457 struct pmcraid_instance *pinstance,
458 u32 intrs
459)
460{
461 u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
462 u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
463
464 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
465 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
466 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
467 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
468}
469
470/**
471 * pmcraid_enable_interrupts - Enables specified interrupts
472 *
473 * @pinstance: pointer to per adapter instance structure
474 * @intr: interrupts to enable
475 *
476 * Return Value
477 * None
478 */
479static void pmcraid_enable_interrupts(
480 struct pmcraid_instance *pinstance,
481 u32 intrs
482)
483{
484 u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
485 u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
486
487 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
488 iowrite32(~intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
489 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
490
491 pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
492 ioread32(pinstance->int_regs.global_interrupt_mask_reg),
493 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
494}
495
496/**
497 * pmcraid_reset_type - Determine the required reset type
498 * @pinstance: pointer to adapter instance structure
499 *
500 * IOA requires hard reset if any of the following conditions is true.
501 * 1. If HRRQ valid interrupt is not masked
502 * 2. IOA reset alert doorbell is set
503 * 3. If there are any error interrupts
504 */
505static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
506{
507 u32 mask;
508 u32 intrs;
509 u32 alerts;
510
511 mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
512 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
513 alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
514
515 if ((mask & INTRS_HRRQ_VALID) == 0 ||
516 (alerts & DOORBELL_IOA_RESET_ALERT) ||
517 (intrs & PMCRAID_ERROR_INTERRUPTS)) {
518 pmcraid_info("IOA requires hard reset\n");
519 pinstance->ioa_hard_reset = 1;
520 }
521
522 /* If unit check is active, trigger the dump */
523 if (intrs & INTRS_IOA_UNIT_CHECK)
524 pinstance->ioa_unit_check = 1;
525}
526
527/**
528 * pmcraid_bist_done - completion function for PCI BIST
529 * @cmd: pointer to reset command
530 * Return Value
531 * none
532 */
533
534static void pmcraid_ioa_reset(struct pmcraid_cmd *);
535
536static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
537{
538 struct pmcraid_instance *pinstance = cmd->drv_inst;
539 unsigned long lock_flags;
540 int rc;
541 u16 pci_reg;
542
543 rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
544
545 /* If PCI config space can't be accessed wait for another two secs */
546 if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
547 cmd->u.time_left > 0) {
548 pmcraid_info("BIST not complete, waiting another 2 secs\n");
549 cmd->timer.expires = jiffies + cmd->u.time_left;
550 cmd->u.time_left = 0;
551 cmd->timer.data = (unsigned long)cmd;
552 cmd->timer.function =
553 (void (*)(unsigned long))pmcraid_bist_done;
554 add_timer(&cmd->timer);
555 } else {
556 cmd->u.time_left = 0;
557 pmcraid_info("BIST is complete, proceeding with reset\n");
558 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
559 pmcraid_ioa_reset(cmd);
560 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
561 }
562}
563
564/**
565 * pmcraid_start_bist - starts BIST
566 * @cmd: pointer to reset cmd
567 * Return Value
568 * none
569 */
570static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
571{
572 struct pmcraid_instance *pinstance = cmd->drv_inst;
573 u32 doorbells, intrs;
574
575 /* proceed with bist and wait for 2 seconds */
576 iowrite32(DOORBELL_IOA_START_BIST,
577 pinstance->int_regs.host_ioa_interrupt_reg);
578 doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
579 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
580 pmcraid_info("doorbells after start bist: %x intrs: %x \n",
581 doorbells, intrs);
582
583 cmd->u.time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
584 cmd->timer.data = (unsigned long)cmd;
585 cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
586 cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
587 add_timer(&cmd->timer);
588}
589
590/**
591 * pmcraid_reset_alert_done - completion routine for reset_alert
592 * @cmd: pointer to command block used in reset sequence
593 * Return value
594 * None
595 */
596static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
597{
598 struct pmcraid_instance *pinstance = cmd->drv_inst;
599 u32 status = ioread32(pinstance->ioa_status);
600 unsigned long lock_flags;
601
602 /* if the critical operation in progress bit is set or the wait times
603 * out, invoke reset engine to proceed with hard reset. If there is
604 * some more time to wait, restart the timer
605 */
606 if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
607 cmd->u.time_left <= 0) {
608 pmcraid_info("critical op is reset proceeding with reset\n");
609 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
610 pmcraid_ioa_reset(cmd);
611 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
612 } else {
613 pmcraid_info("critical op is not yet reset waiting again\n");
614 /* restart timer if some more time is available to wait */
615 cmd->u.time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
616 cmd->timer.data = (unsigned long)cmd;
617 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
618 cmd->timer.function =
619 (void (*)(unsigned long))pmcraid_reset_alert_done;
620 add_timer(&cmd->timer);
621 }
622}
623
624/**
625 * pmcraid_reset_alert - alerts IOA for a possible reset
626 * @cmd : command block to be used for reset sequence.
627 *
628 * Return Value
629 * returns 0 if pci config-space is accessible and RESET_DOORBELL is
630 * successfully written to IOA. Returns non-zero in case pci_config_space
631 * is not accessible
632 */
633static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
634{
635 struct pmcraid_instance *pinstance = cmd->drv_inst;
636 u32 doorbells;
637 int rc;
638 u16 pci_reg;
639
640 /* If we are able to access IOA PCI config space, alert IOA that we are
641 * going to reset it soon. This enables IOA to preserv persistent error
642 * data if any. In case memory space is not accessible, proceed with
643 * BIST or slot_reset
644 */
645 rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
646 if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
647
648 /* wait for IOA permission i.e until CRITICAL_OPERATION bit is
649 * reset IOA doesn't generate any interrupts when CRITICAL
650 * OPERATION bit is reset. A timer is started to wait for this
651 * bit to be reset.
652 */
653 cmd->u.time_left = PMCRAID_RESET_TIMEOUT;
654 cmd->timer.data = (unsigned long)cmd;
655 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
656 cmd->timer.function =
657 (void (*)(unsigned long))pmcraid_reset_alert_done;
658 add_timer(&cmd->timer);
659
660 iowrite32(DOORBELL_IOA_RESET_ALERT,
661 pinstance->int_regs.host_ioa_interrupt_reg);
662 doorbells =
663 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
664 pmcraid_info("doorbells after reset alert: %x\n", doorbells);
665 } else {
666 pmcraid_info("PCI config is not accessible starting BIST\n");
667 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
668 pmcraid_start_bist(cmd);
669 }
670}
671
672/**
673 * pmcraid_timeout_handler - Timeout handler for internally generated ops
674 *
675 * @cmd : pointer to command structure, that got timedout
676 *
677 * This function blocks host requests and initiates an adapter reset.
678 *
679 * Return value:
680 * None
681 */
682static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
683{
684 struct pmcraid_instance *pinstance = cmd->drv_inst;
685 unsigned long lock_flags;
686
687 dev_err(&pinstance->pdev->dev,
688 "Adapter being reset due to command timeout.\n");
689
690 /* Command timeouts result in hard reset sequence. The command that got
691 * timed out may be the one used as part of reset sequence. In this
692 * case restart reset sequence using the same command block even if
693 * reset is in progress. Otherwise fail this command and get a free
694 * command block to restart the reset sequence.
695 */
696 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
697 if (!pinstance->ioa_reset_in_progress) {
698 pinstance->ioa_reset_attempts = 0;
699 cmd = pmcraid_get_free_cmd(pinstance);
700
701 /* If we are out of command blocks, just return here itself.
702 * Some other command's timeout handler can do the reset job
703 */
704 if (cmd == NULL) {
705 spin_unlock_irqrestore(pinstance->host->host_lock,
706 lock_flags);
707 pmcraid_err("no free cmnd block for timeout handler\n");
708 return;
709 }
710
711 pinstance->reset_cmd = cmd;
712 pinstance->ioa_reset_in_progress = 1;
713 } else {
714 pmcraid_info("reset is already in progress\n");
715
716 if (pinstance->reset_cmd != cmd) {
717 /* This command should have been given to IOA, this
718 * command will be completed by fail_outstanding_cmds
719 * anyway
720 */
721 pmcraid_err("cmd is pending but reset in progress\n");
722 }
723
724 /* If this command was being used as part of the reset
725 * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
726 * causes fail_outstanding_commands not to return the command
727 * block back to free pool
728 */
729 if (cmd == pinstance->reset_cmd)
730 cmd->cmd_done = pmcraid_ioa_reset;
731
732 }
733
734 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
735 scsi_block_requests(pinstance->host);
736 pmcraid_reset_alert(cmd);
737 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
738}
739
740/**
741 * pmcraid_internal_done - completion routine for internally generated cmds
742 *
743 * @cmd: command that got response from IOA
744 *
745 * Return Value:
746 * none
747 */
748static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
749{
750 pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
751 cmd->ioa_cb->ioarcb.cdb[0],
752 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
753
754 /* Some of the internal commands are sent with callers blocking for the
755 * response. Same will be indicated as part of cmd->completion_req
756 * field. Response path needs to wake up any waiters waiting for cmd
757 * completion if this flag is set.
758 */
759 if (cmd->completion_req) {
760 cmd->completion_req = 0;
761 complete(&cmd->wait_for_completion);
762 }
763
764 /* most of the internal commands are completed by caller itself, so
765 * no need to return the command block back to free pool until we are
766 * required to do so (e.g once done with initialization).
767 */
768 if (cmd->release) {
769 cmd->release = 0;
770 pmcraid_return_cmd(cmd);
771 }
772}
773
774/**
775 * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
776 *
777 * @cmd: command that got response from IOA
778 *
779 * This routine is called after driver re-reads configuration table due to a
780 * lost CCN. It returns the command block back to free pool and schedules
781 * worker thread to add/delete devices into the system.
782 *
783 * Return Value:
784 * none
785 */
786static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
787{
788 pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
789 cmd->ioa_cb->ioarcb.cdb[0],
790 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
791
792 if (cmd->release) {
793 cmd->release = 0;
794 pmcraid_return_cmd(cmd);
795 }
796 pmcraid_info("scheduling worker for config table reinitialization\n");
797 schedule_work(&cmd->drv_inst->worker_q);
798}
799
800/**
801 * pmcraid_erp_done - Process completion of SCSI error response from device
802 * @cmd: pmcraid_command
803 *
804 * This function copies the sense buffer into the scsi_cmd struct and completes
805 * scsi_cmd by calling scsi_done function.
806 *
807 * Return value:
808 * none
809 */
810static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
811{
812 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
813 struct pmcraid_instance *pinstance = cmd->drv_inst;
814 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
815
816 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
817 scsi_cmd->result |= (DID_ERROR << 16);
818 pmcraid_err("command CDB[0] = %x failed with IOASC: 0x%08X\n",
819 cmd->ioa_cb->ioarcb.cdb[0], ioasc);
820 }
821
822 /* if we had allocated sense buffers for request sense, copy the sense
823 * release the buffers
824 */
825 if (cmd->sense_buffer != NULL) {
826 memcpy(scsi_cmd->sense_buffer,
827 cmd->sense_buffer,
828 SCSI_SENSE_BUFFERSIZE);
829 pci_free_consistent(pinstance->pdev,
830 SCSI_SENSE_BUFFERSIZE,
831 cmd->sense_buffer, cmd->sense_buffer_dma);
832 cmd->sense_buffer = NULL;
833 cmd->sense_buffer_dma = 0;
834 }
835
836 scsi_dma_unmap(scsi_cmd);
837 pmcraid_return_cmd(cmd);
838 scsi_cmd->scsi_done(scsi_cmd);
839}
840
841/**
842 * pmcraid_fire_command - sends an IOA command to adapter
843 *
844 * This function adds the given block into pending command list
845 * and returns without waiting
846 *
847 * @cmd : command to be sent to the device
848 *
849 * Return Value
850 * None
851 */
852static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
853{
854 struct pmcraid_instance *pinstance = cmd->drv_inst;
855 unsigned long lock_flags;
856
857 /* Add this command block to pending cmd pool. We do this prior to
858 * writting IOARCB to ioarrin because IOA might complete the command
859 * by the time we are about to add it to the list. Response handler
860 * (isr/tasklet) looks for cmb block in the pending pending list.
861 */
862 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
863 list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
864 spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
865 atomic_inc(&pinstance->outstanding_cmds);
866
867 /* driver writes lower 32-bit value of IOARCB address only */
868 mb();
869 iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
870 pinstance->ioarrin);
871}
872
873/**
874 * pmcraid_send_cmd - fires a command to IOA
875 *
876 * This function also sets up timeout function, and command completion
877 * function
878 *
879 * @cmd: pointer to the command block to be fired to IOA
880 * @cmd_done: command completion function, called once IOA responds
881 * @timeout: timeout to wait for this command completion
882 * @timeout_func: timeout handler
883 *
884 * Return value
885 * none
886 */
887static void pmcraid_send_cmd(
888 struct pmcraid_cmd *cmd,
889 void (*cmd_done) (struct pmcraid_cmd *),
890 unsigned long timeout,
891 void (*timeout_func) (struct pmcraid_cmd *)
892)
893{
894 /* initialize done function */
895 cmd->cmd_done = cmd_done;
896
897 if (timeout_func) {
898 /* setup timeout handler */
899 cmd->timer.data = (unsigned long)cmd;
900 cmd->timer.expires = jiffies + timeout;
901 cmd->timer.function = (void (*)(unsigned long))timeout_func;
902 add_timer(&cmd->timer);
903 }
904
905 /* fire the command to IOA */
906 _pmcraid_fire_command(cmd);
907}
908
909/**
910 * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
911 *
912 * @cmd: pointer to the command block used as part of reset sequence
913 *
914 * Return Value
915 * None
916 */
917static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
918{
919 pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
920 cmd->ioa_cb->ioarcb.cdb[0],
921 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
922
923 /* Note that commands sent during reset require next command to be sent
924 * to IOA. Hence reinit the done function as well as timeout function
925 */
926 pmcraid_reinit_cmdblk(cmd);
927 cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
928 cmd->ioa_cb->ioarcb.resource_handle =
929 cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
930 cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
931 cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
932
933 /* fire shutdown command to hardware. */
934 pmcraid_info("firing normal shutdown command (%d) to IOA\n",
935 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
936
937 pmcraid_send_cmd(cmd, pmcraid_ioa_reset,
938 PMCRAID_SHUTDOWN_TIMEOUT,
939 pmcraid_timeout_handler);
940}
941
942/**
943 * pmcraid_identify_hrrq - registers host rrq buffers with IOA
944 * @cmd: pointer to command block to be used for identify hrrq
945 *
946 * Return Value
947 * 0 in case of success, otherwise non-zero failure code
948 */
949
950static void pmcraid_querycfg(struct pmcraid_cmd *);
951
952static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
953{
954 struct pmcraid_instance *pinstance = cmd->drv_inst;
955 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
956 int index = 0;
957 __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
958 u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
959
960 pmcraid_reinit_cmdblk(cmd);
961
962 /* Initialize ioarcb */
963 ioarcb->request_type = REQ_TYPE_IOACMD;
964 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
965
966 /* initialize the hrrq number where IOA will respond to this command */
967 ioarcb->hrrq_id = index;
968 ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
969 ioarcb->cdb[1] = index;
970
971 /* IOA expects 64-bit pci address to be written in B.E format
972 * (i.e cdb[2]=MSByte..cdb[9]=LSB.
973 */
974 pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb => %llx:%llx\n",
975 hrrq_addr, ioarcb->ioarcb_bus_addr);
976
977 memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
978 memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
979
980 /* Subsequent commands require HRRQ identification to be successful.
981 * Note that this gets called even during reset from SCSI mid-layer
982 * or tasklet
983 */
984 pmcraid_send_cmd(cmd, pmcraid_querycfg,
985 PMCRAID_INTERNAL_TIMEOUT,
986 pmcraid_timeout_handler);
987}
988
989static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
990static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
991
992/**
993 * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
994 *
995 * @cmd: initialized command block pointer
996 *
997 * Return Value
998 * none
999 */
1000static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
1001{
1002 if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
1003 atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
1004 else
1005 atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
1006
1007 pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
1008}
1009
1010/**
1011 * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
1012 *
1013 * @pinstance: pointer to adapter instance structure
1014 * @type: HCAM type
1015 *
1016 * Return Value
1017 * pointer to initialized pmcraid_cmd structure or NULL
1018 */
1019static struct pmcraid_cmd *pmcraid_init_hcam
1020(
1021 struct pmcraid_instance *pinstance,
1022 u8 type
1023)
1024{
1025 struct pmcraid_cmd *cmd;
1026 struct pmcraid_ioarcb *ioarcb;
1027 struct pmcraid_ioadl_desc *ioadl;
1028 struct pmcraid_hostrcb *hcam;
1029 void (*cmd_done) (struct pmcraid_cmd *);
1030 dma_addr_t dma;
1031 int rcb_size;
1032
1033 cmd = pmcraid_get_free_cmd(pinstance);
1034
1035 if (!cmd) {
1036 pmcraid_err("no free command blocks for hcam\n");
1037 return cmd;
1038 }
1039
1040 if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
1041 rcb_size = sizeof(struct pmcraid_hcam_ccn);
1042 cmd_done = pmcraid_process_ccn;
1043 dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
1044 hcam = &pinstance->ccn;
1045 } else {
1046 rcb_size = sizeof(struct pmcraid_hcam_ldn);
1047 cmd_done = pmcraid_process_ldn;
1048 dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
1049 hcam = &pinstance->ldn;
1050 }
1051
1052 /* initialize command pointer used for HCAM registration */
1053 hcam->cmd = cmd;
1054
1055 ioarcb = &cmd->ioa_cb->ioarcb;
1056 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
1057 offsetof(struct pmcraid_ioarcb,
1058 add_data.u.ioadl[0]));
1059 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
1060 ioadl = ioarcb->add_data.u.ioadl;
1061
1062 /* Initialize ioarcb */
1063 ioarcb->request_type = REQ_TYPE_HCAM;
1064 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1065 ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
1066 ioarcb->cdb[1] = type;
1067 ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
1068 ioarcb->cdb[8] = (rcb_size) & 0xFF;
1069
1070 ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
1071
1072 ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST);
1073 ioadl[0].data_len = cpu_to_le32(rcb_size);
1074 ioadl[0].address = cpu_to_le32(dma);
1075
1076 cmd->cmd_done = cmd_done;
1077 return cmd;
1078}
1079
1080/**
1081 * pmcraid_send_hcam - Send an HCAM to IOA
1082 * @pinstance: ioa config struct
1083 * @type: HCAM type
1084 *
1085 * This function will send a Host Controlled Async command to IOA.
1086 *
1087 * Return value:
1088 * none
1089 */
1090static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
1091{
1092 struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
1093 pmcraid_send_hcam_cmd(cmd);
1094}
1095
1096
1097/**
1098 * pmcraid_prepare_cancel_cmd - prepares a command block to abort another
1099 *
1100 * @cmd: pointer to cmd that is used as cancelling command
1101 * @cmd_to_cancel: pointer to the command that needs to be cancelled
1102 */
1103static void pmcraid_prepare_cancel_cmd(
1104 struct pmcraid_cmd *cmd,
1105 struct pmcraid_cmd *cmd_to_cancel
1106)
1107{
1108 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1109 __be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
1110
1111 /* Get the resource handle to where the command to be aborted has been
1112 * sent.
1113 */
1114 ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
1115 ioarcb->request_type = REQ_TYPE_IOACMD;
1116 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
1117 ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
1118
1119 /* IOARCB address of the command to be cancelled is given in
1120 * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
1121 * IOARCB address are not masked.
1122 */
1123 ioarcb_addr = cpu_to_be64(ioarcb_addr);
1124 memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
1125}
1126
1127/**
1128 * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
1129 *
1130 * @cmd: command to be used as cancelling command
1131 * @type: HCAM type
1132 * @cmd_done: op done function for the cancelling command
1133 */
1134static void pmcraid_cancel_hcam(
1135 struct pmcraid_cmd *cmd,
1136 u8 type,
1137 void (*cmd_done) (struct pmcraid_cmd *)
1138)
1139{
1140 struct pmcraid_instance *pinstance;
1141 struct pmcraid_hostrcb *hcam;
1142
1143 pinstance = cmd->drv_inst;
1144 hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
1145 &pinstance->ldn : &pinstance->ccn;
1146
1147 /* prepare for cancelling previous hcam command. If the HCAM is
1148 * currently not pending with IOA, we would have hcam->cmd as non-null
1149 */
1150 if (hcam->cmd == NULL)
1151 return;
1152
1153 pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
1154
1155 /* writing to IOARRIN must be protected by host_lock, as mid-layer
1156 * schedule queuecommand while we are doing this
1157 */
1158 pmcraid_send_cmd(cmd, cmd_done,
1159 PMCRAID_INTERNAL_TIMEOUT,
1160 pmcraid_timeout_handler);
1161}
1162
1163/**
1164 * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
1165 *
1166 * @cmd: command block to be used for cancelling the HCAM
1167 */
1168static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
1169{
1170 pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
1171 cmd->ioa_cb->ioarcb.cdb[0],
1172 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
1173
1174 pmcraid_reinit_cmdblk(cmd);
1175
1176 pmcraid_cancel_hcam(cmd,
1177 PMCRAID_HCAM_CODE_CONFIG_CHANGE,
1178 pmcraid_ioa_shutdown);
1179}
1180
1181/**
1182 * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
1183 *
1184 * @cmd: command block to be used for cancelling the HCAM
1185 */
1186static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
1187{
1188 pmcraid_cancel_hcam(cmd,
1189 PMCRAID_HCAM_CODE_LOG_DATA,
1190 pmcraid_cancel_ccn);
1191}
1192
1193/**
1194 * pmcraid_expose_resource - check if the resource can be exposed to OS
1195 *
1196 * @cfgte: pointer to configuration table entry of the resource
1197 *
1198 * Return value:
1199 * true if resource can be added to midlayer, false(0) otherwise
1200 */
1201static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
1202{
1203 int retval = 0;
1204
1205 if (cfgte->resource_type == RES_TYPE_VSET)
1206 retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE);
1207 else if (cfgte->resource_type == RES_TYPE_GSCSI)
1208 retval = (RES_BUS(cfgte->resource_address) !=
1209 PMCRAID_VIRTUAL_ENCL_BUS_ID);
1210 return retval;
1211}
1212
1213/* attributes supported by pmcraid_event_family */
1214enum {
1215 PMCRAID_AEN_ATTR_UNSPEC,
1216 PMCRAID_AEN_ATTR_EVENT,
1217 __PMCRAID_AEN_ATTR_MAX,
1218};
1219#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
1220
1221/* commands supported by pmcraid_event_family */
1222enum {
1223 PMCRAID_AEN_CMD_UNSPEC,
1224 PMCRAID_AEN_CMD_EVENT,
1225 __PMCRAID_AEN_CMD_MAX,
1226};
1227#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
1228
1229static struct genl_family pmcraid_event_family = {
1230 .id = GENL_ID_GENERATE,
1231 .name = "pmcraid",
1232 .version = 1,
1233 .maxattr = PMCRAID_AEN_ATTR_MAX
1234};
1235
1236/**
1237 * pmcraid_netlink_init - registers pmcraid_event_family
1238 *
1239 * Return value:
1240 * 0 if the pmcraid_event_family is successfully registered
1241 * with netlink generic, non-zero otherwise
1242 */
1243static int pmcraid_netlink_init(void)
1244{
1245 int result;
1246
1247 result = genl_register_family(&pmcraid_event_family);
1248
1249 if (result)
1250 return result;
1251
1252 pmcraid_info("registered NETLINK GENERIC group: %d\n",
1253 pmcraid_event_family.id);
1254
1255 return result;
1256}
1257
1258/**
1259 * pmcraid_netlink_release - unregisters pmcraid_event_family
1260 *
1261 * Return value:
1262 * none
1263 */
1264static void pmcraid_netlink_release(void)
1265{
1266 genl_unregister_family(&pmcraid_event_family);
1267}
1268
1269/**
1270 * pmcraid_notify_aen - sends event msg to user space application
1271 * @pinstance: pointer to adapter instance structure
1272 * @type: HCAM type
1273 *
1274 * Return value:
1275 * 0 if success, error value in case of any failure.
1276 */
1277static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1278{
1279 struct sk_buff *skb;
1280 struct pmcraid_aen_msg *aen_msg;
1281 void *msg_header;
1282 int data_size, total_size;
1283 int result;
1284
1285
1286 if (type == PMCRAID_HCAM_CODE_LOG_DATA) {
1287 aen_msg = pinstance->ldn.msg;
1288 data_size = pinstance->ldn.hcam->data_len;
1289 } else {
1290 aen_msg = pinstance->ccn.msg;
1291 data_size = pinstance->ccn.hcam->data_len;
1292 }
1293
1294 data_size += sizeof(struct pmcraid_hcam_hdr);
1295 aen_msg->hostno = (pinstance->host->unique_id << 16 |
1296 MINOR(pinstance->cdev.dev));
1297 aen_msg->length = data_size;
1298 data_size += sizeof(*aen_msg);
1299
1300 total_size = nla_total_size(data_size);
1301 skb = genlmsg_new(total_size, GFP_ATOMIC);
1302
1303
1304 if (!skb) {
1305 pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
1306 total_size);
1307 return -ENOMEM;
1308 }
1309
1310 /* add the genetlink message header */
1311 msg_header = genlmsg_put(skb, 0, 0,
1312 &pmcraid_event_family, 0,
1313 PMCRAID_AEN_CMD_EVENT);
1314 if (!msg_header) {
1315 pmcraid_err("failed to copy command details\n");
1316 nlmsg_free(skb);
1317 return -ENOMEM;
1318 }
1319
1320 result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
1321
1322 if (result) {
1323 pmcraid_err("failed to copy AEN attribute data \n");
1324 nlmsg_free(skb);
1325 return -EINVAL;
1326 }
1327
1328 /* send genetlink multicast message to notify appplications */
1329 result = genlmsg_end(skb, msg_header);
1330
1331 if (result < 0) {
1332 pmcraid_err("genlmsg_end failed\n");
1333 nlmsg_free(skb);
1334 return result;
1335 }
1336
1337 result =
1338 genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC);
1339
1340 /* If there are no listeners, genlmsg_multicast may return non-zero
1341 * value.
1342 */
1343 if (result)
1344 pmcraid_info("failed to send %s event message %x!\n",
1345 type == PMCRAID_HCAM_CODE_LOG_DATA ? "LDN" : "CCN",
1346 result);
1347 return result;
1348}
1349
1350/**
1351 * pmcraid_handle_config_change - Handle a config change from the adapter
1352 * @pinstance: pointer to per adapter instance structure
1353 *
1354 * Return value:
1355 * none
1356 */
1357static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1358{
1359 struct pmcraid_config_table_entry *cfg_entry;
1360 struct pmcraid_hcam_ccn *ccn_hcam;
1361 struct pmcraid_cmd *cmd;
1362 struct pmcraid_cmd *cfgcmd;
1363 struct pmcraid_resource_entry *res = NULL;
1364 u32 new_entry = 1;
1365 unsigned long lock_flags;
1366 unsigned long host_lock_flags;
1367 int rc;
1368
1369 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
1370 cfg_entry = &ccn_hcam->cfg_entry;
1371
1372 pmcraid_info
1373 ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
1374 pinstance->ccn.hcam->ilid,
1375 pinstance->ccn.hcam->op_code,
1376 pinstance->ccn.hcam->notification_type,
1377 pinstance->ccn.hcam->notification_lost,
1378 pinstance->ccn.hcam->flags,
1379 pinstance->host->unique_id,
1380 RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
1381 (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
1382 RES_BUS(cfg_entry->resource_address)),
1383 RES_IS_VSET(*cfg_entry) ? cfg_entry->unique_flags1 :
1384 RES_TARGET(cfg_entry->resource_address),
1385 RES_LUN(cfg_entry->resource_address));
1386
1387
1388 /* If this HCAM indicates a lost notification, read the config table */
1389 if (pinstance->ccn.hcam->notification_lost) {
1390 cfgcmd = pmcraid_get_free_cmd(pinstance);
1391 if (cfgcmd) {
1392 pmcraid_info("lost CCN, reading config table\b");
1393 pinstance->reinit_cfg_table = 1;
1394 pmcraid_querycfg(cfgcmd);
1395 } else {
1396 pmcraid_err("lost CCN, no free cmd for querycfg\n");
1397 }
1398 goto out_notify_apps;
1399 }
1400
1401 /* If this resource is not going to be added to mid-layer, just notify
1402 * applications and return
1403 */
1404 if (!pmcraid_expose_resource(cfg_entry))
1405 goto out_notify_apps;
1406
1407 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
1408 list_for_each_entry(res, &pinstance->used_res_q, queue) {
1409 rc = memcmp(&res->cfg_entry.resource_address,
1410 &cfg_entry->resource_address,
1411 sizeof(cfg_entry->resource_address));
1412 if (!rc) {
1413 new_entry = 0;
1414 break;
1415 }
1416 }
1417
1418 if (new_entry) {
1419
1420 /* If there are more number of resources than what driver can
1421 * manage, do not notify the applications about the CCN. Just
1422 * ignore this notifications and re-register the same HCAM
1423 */
1424 if (list_empty(&pinstance->free_res_q)) {
1425 spin_unlock_irqrestore(&pinstance->resource_lock,
1426 lock_flags);
1427 pmcraid_err("too many resources attached\n");
1428 spin_lock_irqsave(pinstance->host->host_lock,
1429 host_lock_flags);
1430 pmcraid_send_hcam(pinstance,
1431 PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1432 spin_unlock_irqrestore(pinstance->host->host_lock,
1433 host_lock_flags);
1434 return;
1435 }
1436
1437 res = list_entry(pinstance->free_res_q.next,
1438 struct pmcraid_resource_entry, queue);
1439
1440 list_del(&res->queue);
1441 res->scsi_dev = NULL;
1442 res->reset_progress = 0;
1443 list_add_tail(&res->queue, &pinstance->used_res_q);
1444 }
1445
1446 memcpy(&res->cfg_entry, cfg_entry,
1447 sizeof(struct pmcraid_config_table_entry));
1448
1449 if (pinstance->ccn.hcam->notification_type ==
1450 NOTIFICATION_TYPE_ENTRY_DELETED) {
1451 if (res->scsi_dev) {
1452 res->change_detected = RES_CHANGE_DEL;
1453 res->cfg_entry.resource_handle =
1454 PMCRAID_INVALID_RES_HANDLE;
1455 schedule_work(&pinstance->worker_q);
1456 } else {
1457 /* This may be one of the non-exposed resources */
1458 list_move_tail(&res->queue, &pinstance->free_res_q);
1459 }
1460 } else if (!res->scsi_dev) {
1461 res->change_detected = RES_CHANGE_ADD;
1462 schedule_work(&pinstance->worker_q);
1463 }
1464 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
1465
1466out_notify_apps:
1467
1468 /* Notify configuration changes to registered applications.*/
1469 if (!pmcraid_disable_aen)
1470 pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1471
1472 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1473 if (cmd)
1474 pmcraid_send_hcam_cmd(cmd);
1475}
1476
1477/**
1478 * pmcraid_get_error_info - return error string for an ioasc
1479 * @ioasc: ioasc code
1480 * Return Value
1481 * none
1482 */
1483static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
1484{
1485 int i;
1486 for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
1487 if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
1488 return &pmcraid_ioasc_error_table[i];
1489 }
1490 return NULL;
1491}
1492
1493/**
1494 * pmcraid_ioasc_logger - log IOASC information based user-settings
1495 * @ioasc: ioasc code
1496 * @cmd: pointer to command that resulted in 'ioasc'
1497 */
1498void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
1499{
1500 struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
1501
1502 if (error_info == NULL ||
1503 cmd->drv_inst->current_log_level < error_info->log_level)
1504 return;
1505
1506 /* log the error string */
1507 pmcraid_err("cmd [%d] for resource %x failed with %x(%s)\n",
1508 cmd->ioa_cb->ioarcb.cdb[0],
1509 cmd->ioa_cb->ioarcb.resource_handle,
1510 le32_to_cpu(ioasc), error_info->error_string);
1511}
1512
1513/**
1514 * pmcraid_handle_error_log - Handle a config change (error log) from the IOA
1515 *
1516 * @pinstance: pointer to per adapter instance structure
1517 *
1518 * Return value:
1519 * none
1520 */
1521static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
1522{
1523 struct pmcraid_hcam_ldn *hcam_ldn;
1524 u32 ioasc;
1525
1526 hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1527
1528 pmcraid_info
1529 ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
1530 pinstance->ldn.hcam->ilid,
1531 pinstance->ldn.hcam->op_code,
1532 pinstance->ldn.hcam->notification_type,
1533 pinstance->ldn.hcam->notification_lost,
1534 pinstance->ldn.hcam->flags,
1535 pinstance->ldn.hcam->overlay_id);
1536
1537 /* log only the errors, no need to log informational log entries */
1538 if (pinstance->ldn.hcam->notification_type !=
1539 NOTIFICATION_TYPE_ERROR_LOG)
1540 return;
1541
1542 if (pinstance->ldn.hcam->notification_lost ==
1543 HOSTRCB_NOTIFICATIONS_LOST)
1544 dev_err(&pinstance->pdev->dev, "Error notifications lost\n");
1545
1546 ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
1547
1548 if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
1549 ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
1550 dev_err(&pinstance->pdev->dev,
1551 "UnitAttention due to IOA Bus Reset\n");
1552 scsi_report_bus_reset(
1553 pinstance->host,
1554 RES_BUS(hcam_ldn->error_log.fd_ra));
1555 }
1556
1557 return;
1558}
1559
1560/**
1561 * pmcraid_process_ccn - Op done function for a CCN.
1562 * @cmd: pointer to command struct
1563 *
1564 * This function is the op done function for a configuration
1565 * change notification
1566 *
1567 * Return value:
1568 * none
1569 */
1570static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
1571{
1572 struct pmcraid_instance *pinstance = cmd->drv_inst;
1573 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1574 unsigned long lock_flags;
1575
1576 pinstance->ccn.cmd = NULL;
1577 pmcraid_return_cmd(cmd);
1578
1579 /* If driver initiated IOA reset happened while this hcam was pending
1580 * with IOA, or IOA bringdown sequence is in progress, no need to
1581 * re-register the hcam
1582 */
1583 if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1584 atomic_read(&pinstance->ccn.ignore) == 1) {
1585 return;
1586 } else if (ioasc) {
1587 dev_err(&pinstance->pdev->dev,
1588 "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
1589 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1590 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1591 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
1592 } else {
1593 pmcraid_handle_config_change(pinstance);
1594 }
1595}
1596
1597/**
1598 * pmcraid_process_ldn - op done function for an LDN
1599 * @cmd: pointer to command block
1600 *
1601 * Return value
1602 * none
1603 */
1604static void pmcraid_initiate_reset(struct pmcraid_instance *);
1605
1606static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1607{
1608 struct pmcraid_instance *pinstance = cmd->drv_inst;
1609 struct pmcraid_hcam_ldn *ldn_hcam =
1610 (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1611 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1612 u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
1613 unsigned long lock_flags;
1614
1615 /* return the command block back to freepool */
1616 pinstance->ldn.cmd = NULL;
1617 pmcraid_return_cmd(cmd);
1618
1619 /* If driver initiated IOA reset happened while this hcam was pending
1620 * with IOA, no need to re-register the hcam as reset engine will do it
1621 * once reset sequence is complete
1622 */
1623 if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1624 atomic_read(&pinstance->ccn.ignore) == 1) {
1625 return;
1626 } else if (!ioasc) {
1627 pmcraid_handle_error_log(pinstance);
1628 if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
1629 spin_lock_irqsave(pinstance->host->host_lock,
1630 lock_flags);
1631 pmcraid_initiate_reset(pinstance);
1632 spin_unlock_irqrestore(pinstance->host->host_lock,
1633 lock_flags);
1634 return;
1635 }
1636 } else {
1637 dev_err(&pinstance->pdev->dev,
1638 "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
1639 }
1640 /* send netlink message for HCAM notification if enabled */
1641 if (!pmcraid_disable_aen)
1642 pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1643
1644 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1645 if (cmd)
1646 pmcraid_send_hcam_cmd(cmd);
1647}
1648
1649/**
1650 * pmcraid_register_hcams - register HCAMs for CCN and LDN
1651 *
1652 * @pinstance: pointer per adapter instance structure
1653 *
1654 * Return Value
1655 * none
1656 */
1657static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
1658{
1659 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1660 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1661}
1662
1663/**
1664 * pmcraid_unregister_hcams - cancel HCAMs registered already
1665 * @cmd: pointer to command used as part of reset sequence
1666 */
1667static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
1668{
1669 struct pmcraid_instance *pinstance = cmd->drv_inst;
1670
1671 /* During IOA bringdown, HCAM gets fired and tasklet proceeds with
1672 * handling hcam response though it is not necessary. In order to
1673 * prevent this, set 'ignore', so that bring-down sequence doesn't
1674 * re-send any more hcams
1675 */
1676 atomic_set(&pinstance->ccn.ignore, 1);
1677 atomic_set(&pinstance->ldn.ignore, 1);
1678
1679 /* If adapter reset was forced as part of runtime reset sequence,
1680 * start the reset sequence.
1681 */
1682 if (pinstance->force_ioa_reset && !pinstance->ioa_bringdown) {
1683 pinstance->force_ioa_reset = 0;
1684 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1685 pmcraid_reset_alert(cmd);
1686 return;
1687 }
1688
1689 /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
1690 * one after the other. So CCN cancellation will be triggered by
1691 * pmcraid_cancel_ldn itself.
1692 */
1693 pmcraid_cancel_ldn(cmd);
1694}
1695
1696/**
1697 * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
1698 * @pinstance: pointer to adapter instance structure
1699 * Return Value
1700 * 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
1701 */
1702static void pmcraid_reinit_buffers(struct pmcraid_instance *);
1703
1704static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
1705{
1706 u32 intrs;
1707
1708 pmcraid_reinit_buffers(pinstance);
1709 intrs = pmcraid_read_interrupts(pinstance);
1710
1711 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
1712
1713 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
1714 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1715 pinstance->int_regs.ioa_host_interrupt_mask_reg);
1716 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1717 pinstance->int_regs.ioa_host_interrupt_clr_reg);
1718 return 1;
1719 } else {
1720 return 0;
1721 }
1722}
1723
1724/**
1725 * pmcraid_soft_reset - performs a soft reset and makes IOA become ready
1726 * @cmd : pointer to reset command block
1727 *
1728 * Return Value
1729 * none
1730 */
1731static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
1732{
1733 struct pmcraid_instance *pinstance = cmd->drv_inst;
1734 u32 int_reg;
1735 u32 doorbell;
1736
1737 /* There will be an interrupt when Transition to Operational bit is
1738 * set so tasklet would execute next reset task. The timeout handler
1739 * would re-initiate a reset
1740 */
1741 cmd->cmd_done = pmcraid_ioa_reset;
1742 cmd->timer.data = (unsigned long)cmd;
1743 cmd->timer.expires = jiffies +
1744 msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
1745 cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
1746
1747 if (!timer_pending(&cmd->timer))
1748 add_timer(&cmd->timer);
1749
1750 /* Enable destructive diagnostics on IOA if it is not yet in
1751 * operational state
1752 */
1753 doorbell = DOORBELL_RUNTIME_RESET |
1754 DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
1755
1756 iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
1757 int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
1758 pmcraid_info("Waiting for IOA to become operational %x:%x\n",
1759 ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
1760 int_reg);
1761}
1762
1763/**
1764 * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
1765 *
1766 * @pinstance: pointer to adapter instance structure
1767 *
1768 * Return Value
1769 * none
1770 */
1771static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
1772{
1773 pmcraid_info("%s is not yet implemented\n", __func__);
1774}
1775
1776/**
1777 * pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
1778 * @pinstance: pointer to adapter instance structure
1779 *
1780 * This function fails all outstanding ops. If they are submitted to IOA
1781 * already, it sends cancel all messages if IOA is still accepting IOARCBs,
1782 * otherwise just completes the commands and returns the cmd blocks to free
1783 * pool.
1784 *
1785 * Return value:
1786 * none
1787 */
1788static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
1789{
1790 struct pmcraid_cmd *cmd, *temp;
1791 unsigned long lock_flags;
1792
1793 /* pending command list is protected by pending_pool_lock. Its
1794 * traversal must be done as within this lock
1795 */
1796 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
1797 list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
1798 free_list) {
1799 list_del(&cmd->free_list);
1800 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
1801 lock_flags);
1802 cmd->ioa_cb->ioasa.ioasc =
1803 cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
1804 cmd->ioa_cb->ioasa.ilid =
1805 cpu_to_be32(PMCRAID_DRIVER_ILID);
1806
1807 /* In case the command timer is still running */
1808 del_timer(&cmd->timer);
1809
1810 /* If this is an IO command, complete it by invoking scsi_done
1811 * function. If this is one of the internal commands other
1812 * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
1813 * complete it
1814 */
1815 if (cmd->scsi_cmd) {
1816
1817 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
1818 __le32 resp = cmd->ioa_cb->ioarcb.response_handle;
1819
1820 scsi_cmd->result |= DID_ERROR << 16;
1821
1822 scsi_dma_unmap(scsi_cmd);
1823 pmcraid_return_cmd(cmd);
1824
1825
1826 pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
1827 le32_to_cpu(resp) >> 2,
1828 cmd->ioa_cb->ioarcb.cdb[0],
1829 scsi_cmd->result);
1830 scsi_cmd->scsi_done(scsi_cmd);
1831 } else if (cmd->cmd_done == pmcraid_internal_done ||
1832 cmd->cmd_done == pmcraid_erp_done) {
1833 cmd->cmd_done(cmd);
1834 } else if (cmd->cmd_done != pmcraid_ioa_reset) {
1835 pmcraid_return_cmd(cmd);
1836 }
1837
1838 atomic_dec(&pinstance->outstanding_cmds);
1839 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
1840 }
1841
1842 spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
1843}
1844
1845/**
1846 * pmcraid_ioa_reset - Implementation of IOA reset logic
1847 *
1848 * @cmd: pointer to the cmd block to be used for entire reset process
1849 *
1850 * This function executes most of the steps required for IOA reset. This gets
1851 * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
1852 * 'eh_' thread. Access to variables used for controling the reset sequence is
1853 * synchronized using host lock. Various functions called during reset process
1854 * would make use of a single command block, pointer to which is also stored in
1855 * adapter instance structure.
1856 *
1857 * Return Value
1858 * None
1859 */
1860static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
1861{
1862 struct pmcraid_instance *pinstance = cmd->drv_inst;
1863 u8 reset_complete = 0;
1864
1865 pinstance->ioa_reset_in_progress = 1;
1866
1867 if (pinstance->reset_cmd != cmd) {
1868 pmcraid_err("reset is called with different command block\n");
1869 pinstance->reset_cmd = cmd;
1870 }
1871
1872 pmcraid_info("reset_engine: state = %d, command = %p\n",
1873 pinstance->ioa_state, cmd);
1874
1875 switch (pinstance->ioa_state) {
1876
1877 case IOA_STATE_DEAD:
1878 /* If IOA is offline, whatever may be the reset reason, just
1879 * return. callers might be waiting on the reset wait_q, wake
1880 * up them
1881 */
1882 pmcraid_err("IOA is offline no reset is possible\n");
1883 reset_complete = 1;
1884 break;
1885
1886 case IOA_STATE_IN_BRINGDOWN:
1887 /* we enter here, once ioa shutdown command is processed by IOA
1888 * Alert IOA for a possible reset. If reset alert fails, IOA
1889 * goes through hard-reset
1890 */
1891 pmcraid_disable_interrupts(pinstance, ~0);
1892 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1893 pmcraid_reset_alert(cmd);
1894 break;
1895
1896 case IOA_STATE_UNKNOWN:
1897 /* We may be called during probe or resume. Some pre-processing
1898 * is required for prior to reset
1899 */
1900 scsi_block_requests(pinstance->host);
1901
1902 /* If asked to reset while IOA was processing responses or
1903 * there are any error responses then IOA may require
1904 * hard-reset.
1905 */
1906 if (pinstance->ioa_hard_reset == 0) {
1907 if (ioread32(pinstance->ioa_status) &
1908 INTRS_TRANSITION_TO_OPERATIONAL) {
1909 pmcraid_info("sticky bit set, bring-up\n");
1910 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
1911 pmcraid_reinit_cmdblk(cmd);
1912 pmcraid_identify_hrrq(cmd);
1913 } else {
1914 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
1915 pmcraid_soft_reset(cmd);
1916 }
1917 } else {
1918 /* Alert IOA of a possible reset and wait for critical
1919 * operation in progress bit to reset
1920 */
1921 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1922 pmcraid_reset_alert(cmd);
1923 }
1924 break;
1925
1926 case IOA_STATE_IN_RESET_ALERT:
1927 /* If critical operation in progress bit is reset or wait gets
1928 * timed out, reset proceeds with starting BIST on the IOA.
1929 * pmcraid_ioa_hard_reset keeps a count of reset attempts. If
1930 * they are 3 or more, reset engine marks IOA dead and returns
1931 */
1932 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
1933 pmcraid_start_bist(cmd);
1934 break;
1935
1936 case IOA_STATE_IN_HARD_RESET:
1937 pinstance->ioa_reset_attempts++;
1938
1939 /* retry reset if we haven't reached maximum allowed limit */
1940 if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
1941 pinstance->ioa_reset_attempts = 0;
1942 pmcraid_err("IOA didn't respond marking it as dead\n");
1943 pinstance->ioa_state = IOA_STATE_DEAD;
1944 reset_complete = 1;
1945 break;
1946 }
1947
1948 /* Once either bist or pci reset is done, restore PCI config
1949 * space. If this fails, proceed with hard reset again
1950 */
1951
1952 if (pci_restore_state(pinstance->pdev)) {
1953 pmcraid_info("config-space error resetting again\n");
1954 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1955 pmcraid_reset_alert(cmd);
1956 break;
1957 }
1958
1959 /* fail all pending commands */
1960 pmcraid_fail_outstanding_cmds(pinstance);
1961
1962 /* check if unit check is active, if so extract dump */
1963 if (pinstance->ioa_unit_check) {
1964 pmcraid_info("unit check is active\n");
1965 pinstance->ioa_unit_check = 0;
1966 pmcraid_get_dump(pinstance);
1967 pinstance->ioa_reset_attempts--;
1968 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1969 pmcraid_reset_alert(cmd);
1970 break;
1971 }
1972
1973 /* if the reset reason is to bring-down the ioa, we might be
1974 * done with the reset restore pci_config_space and complete
1975 * the reset
1976 */
1977 if (pinstance->ioa_bringdown) {
1978 pmcraid_info("bringing down the adapter\n");
1979 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
1980 pinstance->ioa_bringdown = 0;
1981 pinstance->ioa_state = IOA_STATE_UNKNOWN;
1982 reset_complete = 1;
1983 } else {
1984 /* bring-up IOA, so proceed with soft reset
1985 * Reinitialize hrrq_buffers and their indices also
1986 * enable interrupts after a pci_restore_state
1987 */
1988 if (pmcraid_reset_enable_ioa(pinstance)) {
1989 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
1990 pmcraid_info("bringing up the adapter\n");
1991 pmcraid_reinit_cmdblk(cmd);
1992 pmcraid_identify_hrrq(cmd);
1993 } else {
1994 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
1995 pmcraid_soft_reset(cmd);
1996 }
1997 }
1998 break;
1999
2000 case IOA_STATE_IN_SOFT_RESET:
2001 /* TRANSITION TO OPERATIONAL is on so start initialization
2002 * sequence
2003 */
2004 pmcraid_info("In softreset proceeding with bring-up\n");
2005 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2006
2007 /* Initialization commands start with HRRQ identification. From
2008 * now on tasklet completes most of the commands as IOA is up
2009 * and intrs are enabled
2010 */
2011 pmcraid_identify_hrrq(cmd);
2012 break;
2013
2014 case IOA_STATE_IN_BRINGUP:
2015 /* we are done with bringing up of IOA, change the ioa_state to
2016 * operational and wake up any waiters
2017 */
2018 pinstance->ioa_state = IOA_STATE_OPERATIONAL;
2019 reset_complete = 1;
2020 break;
2021
2022 case IOA_STATE_OPERATIONAL:
2023 default:
2024 /* When IOA is operational and a reset is requested, check for
2025 * the reset reason. If reset is to bring down IOA, unregister
2026 * HCAMs and initiate shutdown; if adapter reset is forced then
2027 * restart reset sequence again
2028 */
2029 if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
2030 pinstance->force_ioa_reset == 0) {
2031 reset_complete = 1;
2032 } else {
2033 if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
2034 pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
2035 pmcraid_reinit_cmdblk(cmd);
2036 pmcraid_unregister_hcams(cmd);
2037 }
2038 break;
2039 }
2040
2041 /* reset will be completed if ioa_state is either DEAD or UNKNOWN or
2042 * OPERATIONAL. Reset all control variables used during reset, wake up
2043 * any waiting threads and let the SCSI mid-layer send commands. Note
2044 * that host_lock must be held before invoking scsi_report_bus_reset.
2045 */
2046 if (reset_complete) {
2047 pinstance->ioa_reset_in_progress = 0;
2048 pinstance->ioa_reset_attempts = 0;
2049 pinstance->reset_cmd = NULL;
2050 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2051 pinstance->ioa_bringdown = 0;
2052 pmcraid_return_cmd(cmd);
2053
2054 /* If target state is to bring up the adapter, proceed with
2055 * hcam registration and resource exposure to mid-layer.
2056 */
2057 if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
2058 pmcraid_register_hcams(pinstance);
2059
2060 wake_up_all(&pinstance->reset_wait_q);
2061 }
2062
2063 return;
2064}
2065
2066/**
2067 * pmcraid_initiate_reset - initiates reset sequence. This is called from
2068 * ISR/tasklet during error interrupts including IOA unit check. If reset
2069 * is already in progress, it just returns, otherwise initiates IOA reset
2070 * to bring IOA up to operational state.
2071 *
2072 * @pinstance: pointer to adapter instance structure
2073 *
2074 * Return value
2075 * none
2076 */
2077static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
2078{
2079 struct pmcraid_cmd *cmd;
2080
2081 /* If the reset is already in progress, just return, otherwise start
2082 * reset sequence and return
2083 */
2084 if (!pinstance->ioa_reset_in_progress) {
2085 scsi_block_requests(pinstance->host);
2086 cmd = pmcraid_get_free_cmd(pinstance);
2087
2088 if (cmd == NULL) {
2089 pmcraid_err("no cmnd blocks for initiate_reset\n");
2090 return;
2091 }
2092
2093 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2094 pinstance->reset_cmd = cmd;
2095 pinstance->force_ioa_reset = 1;
2096 pmcraid_ioa_reset(cmd);
2097 }
2098}
2099
2100/**
2101 * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
2102 * or bringdown IOA
2103 * @pinstance: pointer adapter instance structure
2104 * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
2105 * @target_state: expected target state after reset
2106 *
2107 * Note: This command initiates reset and waits for its completion. Hence this
2108 * should not be called from isr/timer/tasklet functions (timeout handlers,
2109 * error response handlers and interrupt handlers).
2110 *
2111 * Return Value
2112 * 1 in case ioa_state is not target_state, 0 otherwise.
2113 */
2114static int pmcraid_reset_reload(
2115 struct pmcraid_instance *pinstance,
2116 u8 shutdown_type,
2117 u8 target_state
2118)
2119{
2120 struct pmcraid_cmd *reset_cmd = NULL;
2121 unsigned long lock_flags;
2122 int reset = 1;
2123
2124 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2125
2126 if (pinstance->ioa_reset_in_progress) {
2127 pmcraid_info("reset_reload: reset is already in progress\n");
2128
2129 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2130
2131 wait_event(pinstance->reset_wait_q,
2132 !pinstance->ioa_reset_in_progress);
2133
2134 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2135
2136 if (pinstance->ioa_state == IOA_STATE_DEAD) {
2137 spin_unlock_irqrestore(pinstance->host->host_lock,
2138 lock_flags);
2139 pmcraid_info("reset_reload: IOA is dead\n");
2140 return reset;
2141 } else if (pinstance->ioa_state == target_state) {
2142 reset = 0;
2143 }
2144 }
2145
2146 if (reset) {
2147 pmcraid_info("reset_reload: proceeding with reset\n");
2148 scsi_block_requests(pinstance->host);
2149 reset_cmd = pmcraid_get_free_cmd(pinstance);
2150
2151 if (reset_cmd == NULL) {
2152 pmcraid_err("no free cmnd for reset_reload\n");
2153 spin_unlock_irqrestore(pinstance->host->host_lock,
2154 lock_flags);
2155 return reset;
2156 }
2157
2158 if (shutdown_type == SHUTDOWN_NORMAL)
2159 pinstance->ioa_bringdown = 1;
2160
2161 pinstance->ioa_shutdown_type = shutdown_type;
2162 pinstance->reset_cmd = reset_cmd;
2163 pinstance->force_ioa_reset = reset;
2164 pmcraid_info("reset_reload: initiating reset\n");
2165 pmcraid_ioa_reset(reset_cmd);
2166 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2167 pmcraid_info("reset_reload: waiting for reset to complete\n");
2168 wait_event(pinstance->reset_wait_q,
2169 !pinstance->ioa_reset_in_progress);
2170
2171 pmcraid_info("reset_reload: reset is complete !! \n");
2172 scsi_unblock_requests(pinstance->host);
2173 if (pinstance->ioa_state == target_state)
2174 reset = 0;
2175 }
2176
2177 return reset;
2178}
2179
2180/**
2181 * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
2182 *
2183 * @pinstance: pointer to adapter instance structure
2184 *
2185 * Return Value
2186 * whatever is returned from pmcraid_reset_reload
2187 */
2188static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
2189{
2190 return pmcraid_reset_reload(pinstance,
2191 SHUTDOWN_NORMAL,
2192 IOA_STATE_UNKNOWN);
2193}
2194
2195/**
2196 * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
2197 *
2198 * @pinstance: pointer to adapter instance structure
2199 *
2200 * Return Value
2201 * whatever is returned from pmcraid_reset_reload
2202 */
2203static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
2204{
2205 return pmcraid_reset_reload(pinstance,
2206 SHUTDOWN_NONE,
2207 IOA_STATE_OPERATIONAL);
2208}
2209
2210/**
2211 * pmcraid_request_sense - Send request sense to a device
2212 * @cmd: pmcraid command struct
2213 *
2214 * This function sends a request sense to a device as a result of a check
2215 * condition. This method re-uses the same command block that failed earlier.
2216 */
2217static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2218{
2219 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2220 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2221
2222 /* allocate DMAable memory for sense buffers */
2223 cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
2224 SCSI_SENSE_BUFFERSIZE,
2225 &cmd->sense_buffer_dma);
2226
2227 if (cmd->sense_buffer == NULL) {
2228 pmcraid_err
2229 ("couldn't allocate sense buffer for request sense\n");
2230 pmcraid_erp_done(cmd);
2231 return;
2232 }
2233
2234 /* re-use the command block */
2235 memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
2236 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2237 ioarcb->request_flags0 = (SYNC_COMPLETE |
2238 NO_LINK_DESCS |
2239 INHIBIT_UL_CHECK);
2240 ioarcb->request_type = REQ_TYPE_SCSI;
2241 ioarcb->cdb[0] = REQUEST_SENSE;
2242 ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2243
2244 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
2245 offsetof(struct pmcraid_ioarcb,
2246 add_data.u.ioadl[0]));
2247 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
2248
2249 ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2250
2251 ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
2252 ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2253 ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
2254
2255 /* request sense might be called as part of error response processing
2256 * which runs in tasklets context. It is possible that mid-layer might
2257 * schedule queuecommand during this time, hence, writting to IOARRIN
2258 * must be protect by host_lock
2259 */
2260 pmcraid_send_cmd(cmd, pmcraid_erp_done,
2261 PMCRAID_REQUEST_SENSE_TIMEOUT,
2262 pmcraid_timeout_handler);
2263}
2264
2265/**
2266 * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
2267 * @cmd: command that failed
2268 * @sense: true if request_sense is required after cancel all
2269 *
2270 * This function sends a cancel all to a device to clear the queue.
2271 */
2272static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
2273{
2274 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2275 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2276 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2277 void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
2278 : pmcraid_request_sense;
2279
2280 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2281 ioarcb->request_flags0 = SYNC_OVERRIDE;
2282 ioarcb->request_type = REQ_TYPE_IOACMD;
2283 ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
2284
2285 if (RES_IS_GSCSI(res->cfg_entry))
2286 ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
2287
2288 ioarcb->ioadl_bus_addr = 0;
2289 ioarcb->ioadl_length = 0;
2290 ioarcb->data_transfer_length = 0;
2291 ioarcb->ioarcb_bus_addr &= (~0x1FULL);
2292
2293 /* writing to IOARRIN must be protected by host_lock, as mid-layer
2294 * schedule queuecommand while we are doing this
2295 */
2296 pmcraid_send_cmd(cmd, cmd_done,
2297 PMCRAID_REQUEST_SENSE_TIMEOUT,
2298 pmcraid_timeout_handler);
2299}
2300
2301/**
2302 * pmcraid_frame_auto_sense: frame fixed format sense information
2303 *
2304 * @cmd: pointer to failing command block
2305 *
2306 * Return value
2307 * none
2308 */
2309static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
2310{
2311 u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
2312 struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
2313 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2314 u32 ioasc = le32_to_cpu(ioasa->ioasc);
2315 u32 failing_lba = 0;
2316
2317 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
2318 cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
2319
2320 if (RES_IS_VSET(res->cfg_entry) &&
2321 ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
2322 ioasa->u.vset.failing_lba_hi != 0) {
2323
2324 sense_buf[0] = 0x72;
2325 sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2326 sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2327 sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2328
2329 sense_buf[7] = 12;
2330 sense_buf[8] = 0;
2331 sense_buf[9] = 0x0A;
2332 sense_buf[10] = 0x80;
2333
2334 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
2335
2336 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
2337 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
2338 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
2339 sense_buf[15] = failing_lba & 0x000000ff;
2340
2341 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
2342
2343 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
2344 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
2345 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
2346 sense_buf[19] = failing_lba & 0x000000ff;
2347 } else {
2348 sense_buf[0] = 0x70;
2349 sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2350 sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2351 sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2352
2353 if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
2354 if (RES_IS_VSET(res->cfg_entry))
2355 failing_lba =
2356 le32_to_cpu(ioasa->u.
2357 vset.failing_lba_lo);
2358 sense_buf[0] |= 0x80;
2359 sense_buf[3] = (failing_lba >> 24) & 0xff;
2360 sense_buf[4] = (failing_lba >> 16) & 0xff;
2361 sense_buf[5] = (failing_lba >> 8) & 0xff;
2362 sense_buf[6] = failing_lba & 0xff;
2363 }
2364
2365 sense_buf[7] = 6; /* additional length */
2366 }
2367}
2368
2369/**
2370 * pmcraid_error_handler - Error response handlers for a SCSI op
2371 * @cmd: pointer to pmcraid_cmd that has failed
2372 *
2373 * This function determines whether or not to initiate ERP on the affected
2374 * device. This is called from a tasklet, which doesn't hold any locks.
2375 *
2376 * Return value:
2377 * 0 it caller can complete the request, otherwise 1 where in error
2378 * handler itself completes the request and returns the command block
2379 * back to free-pool
2380 */
2381static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2382{
2383 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2384 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2385 struct pmcraid_instance *pinstance = cmd->drv_inst;
2386 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2387 u32 ioasc = le32_to_cpu(ioasa->ioasc);
2388 u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
2389 u32 sense_copied = 0;
2390
2391 if (!res) {
2392 pmcraid_info("resource pointer is NULL\n");
2393 return 0;
2394 }
2395
2396 /* If this was a SCSI read/write command keep count of errors */
2397 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
2398 atomic_inc(&res->read_failures);
2399 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
2400 atomic_inc(&res->write_failures);
2401
2402 if (!RES_IS_GSCSI(res->cfg_entry) &&
2403 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
2404 pmcraid_frame_auto_sense(cmd);
2405 }
2406
2407 /* Log IOASC/IOASA information based on user settings */
2408 pmcraid_ioasc_logger(ioasc, cmd);
2409
2410 switch (masked_ioasc) {
2411
2412 case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
2413 scsi_cmd->result |= (DID_ABORT << 16);
2414 break;
2415
2416 case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
2417 case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
2418 scsi_cmd->result |= (DID_NO_CONNECT << 16);
2419 break;
2420
2421 case PMCRAID_IOASC_NR_SYNC_REQUIRED:
2422 res->sync_reqd = 1;
2423 scsi_cmd->result |= (DID_IMM_RETRY << 16);
2424 break;
2425
2426 case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
2427 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
2428 break;
2429
2430 case PMCRAID_IOASC_UA_BUS_WAS_RESET:
2431 case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
2432 if (!res->reset_progress)
2433 scsi_report_bus_reset(pinstance->host,
2434 scsi_cmd->device->channel);
2435 scsi_cmd->result |= (DID_ERROR << 16);
2436 break;
2437
2438 case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
2439 scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
2440 res->sync_reqd = 1;
2441
2442 /* if check_condition is not active return with error otherwise
2443 * get/frame the sense buffer
2444 */
2445 if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
2446 SAM_STAT_CHECK_CONDITION &&
2447 PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
2448 return 0;
2449
2450 /* If we have auto sense data as part of IOASA pass it to
2451 * mid-layer
2452 */
2453 if (ioasa->auto_sense_length != 0) {
2454 short sense_len = ioasa->auto_sense_length;
2455 int data_size = min_t(u16, le16_to_cpu(sense_len),
2456 SCSI_SENSE_BUFFERSIZE);
2457
2458 memcpy(scsi_cmd->sense_buffer,
2459 ioasa->sense_data,
2460 data_size);
2461 sense_copied = 1;
2462 }
2463
2464 if (RES_IS_GSCSI(res->cfg_entry)) {
2465 pmcraid_cancel_all(cmd, sense_copied);
2466 } else if (sense_copied) {
2467 pmcraid_erp_done(cmd);
2468 return 0;
2469 } else {
2470 pmcraid_request_sense(cmd);
2471 }
2472
2473 return 1;
2474
2475 case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
2476 break;
2477
2478 default:
2479 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
2480 scsi_cmd->result |= (DID_ERROR << 16);
2481 break;
2482 }
2483 return 0;
2484}
2485
2486/**
2487 * pmcraid_reset_device - device reset handler functions
2488 *
2489 * @scsi_cmd: scsi command struct
2490 * @modifier: reset modifier indicating the reset sequence to be performed
2491 *
2492 * This function issues a device reset to the affected device.
2493 * A LUN reset will be sent to the device first. If that does
2494 * not work, a target reset will be sent.
2495 *
2496 * Return value:
2497 * SUCCESS / FAILED
2498 */
2499static int pmcraid_reset_device(
2500 struct scsi_cmnd *scsi_cmd,
2501 unsigned long timeout,
2502 u8 modifier
2503)
2504{
2505 struct pmcraid_cmd *cmd;
2506 struct pmcraid_instance *pinstance;
2507 struct pmcraid_resource_entry *res;
2508 struct pmcraid_ioarcb *ioarcb;
2509 unsigned long lock_flags;
2510 u32 ioasc;
2511
2512 pinstance =
2513 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2514 res = scsi_cmd->device->hostdata;
2515
2516 if (!res) {
2517 pmcraid_err("reset_device: NULL resource pointer\n");
2518 return FAILED;
2519 }
2520
2521 /* If adapter is currently going through reset/reload, return failed.
2522 * This will force the mid-layer to call _eh_bus/host reset, which
2523 * will then go to sleep and wait for the reset to complete
2524 */
2525 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2526 if (pinstance->ioa_reset_in_progress ||
2527 pinstance->ioa_state == IOA_STATE_DEAD) {
2528 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2529 return FAILED;
2530 }
2531
2532 res->reset_progress = 1;
2533 pmcraid_info("Resetting %s resource with addr %x\n",
2534 ((modifier & RESET_DEVICE_LUN) ? "LUN" :
2535 ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
2536 le32_to_cpu(res->cfg_entry.resource_address));
2537
2538 /* get a free cmd block */
2539 cmd = pmcraid_get_free_cmd(pinstance);
2540
2541 if (cmd == NULL) {
2542 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2543 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2544 return FAILED;
2545 }
2546
2547 ioarcb = &cmd->ioa_cb->ioarcb;
2548 ioarcb->resource_handle = res->cfg_entry.resource_handle;
2549 ioarcb->request_type = REQ_TYPE_IOACMD;
2550 ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
2551
2552 /* Initialize reset modifier bits */
2553 if (modifier)
2554 modifier = ENABLE_RESET_MODIFIER | modifier;
2555
2556 ioarcb->cdb[1] = modifier;
2557
2558 init_completion(&cmd->wait_for_completion);
2559 cmd->completion_req = 1;
2560
2561 pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
2562 cmd->ioa_cb->ioarcb.cdb[0],
2563 le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
2564 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
2565
2566 pmcraid_send_cmd(cmd,
2567 pmcraid_internal_done,
2568 timeout,
2569 pmcraid_timeout_handler);
2570
2571 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2572
2573 /* RESET_DEVICE command completes after all pending IOARCBs are
2574 * completed. Once this command is completed, pmcraind_internal_done
2575 * will wake up the 'completion' queue.
2576 */
2577 wait_for_completion(&cmd->wait_for_completion);
2578
2579 /* complete the command here itself and return the command block
2580 * to free list
2581 */
2582 pmcraid_return_cmd(cmd);
2583 res->reset_progress = 0;
2584 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2585
2586 /* set the return value based on the returned ioasc */
2587 return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2588}
2589
2590/**
2591 * _pmcraid_io_done - helper for pmcraid_io_done function
2592 *
2593 * @cmd: pointer to pmcraid command struct
2594 * @reslen: residual data length to be set in the ioasa
2595 * @ioasc: ioasc either returned by IOA or set by driver itself.
2596 *
2597 * This function is invoked by pmcraid_io_done to complete mid-layer
2598 * scsi ops.
2599 *
2600 * Return value:
2601 * 0 if caller is required to return it to free_pool. Returns 1 if
2602 * caller need not worry about freeing command block as error handler
2603 * will take care of that.
2604 */
2605
2606static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
2607{
2608 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2609 int rc = 0;
2610
2611 scsi_set_resid(scsi_cmd, reslen);
2612
2613 pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
2614 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
2615 cmd->ioa_cb->ioarcb.cdb[0],
2616 ioasc, scsi_cmd->result);
2617
2618 if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
2619 rc = pmcraid_error_handler(cmd);
2620
2621 if (rc == 0) {
2622 scsi_dma_unmap(scsi_cmd);
2623 scsi_cmd->scsi_done(scsi_cmd);
2624 }
2625
2626 return rc;
2627}
2628
2629/**
2630 * pmcraid_io_done - SCSI completion function
2631 *
2632 * @cmd: pointer to pmcraid command struct
2633 *
2634 * This function is invoked by tasklet/mid-layer error handler to completing
2635 * the SCSI ops sent from mid-layer.
2636 *
2637 * Return value
2638 * none
2639 */
2640
2641static void pmcraid_io_done(struct pmcraid_cmd *cmd)
2642{
2643 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2644 u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
2645
2646 if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
2647 pmcraid_return_cmd(cmd);
2648}
2649
2650/**
2651 * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
2652 *
2653 * @cmd: command block of the command to be aborted
2654 *
2655 * Return Value:
2656 * returns pointer to command structure used as cancelling cmd
2657 */
2658static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
2659{
2660 struct pmcraid_cmd *cancel_cmd;
2661 struct pmcraid_instance *pinstance;
2662 struct pmcraid_resource_entry *res;
2663
2664 pinstance = (struct pmcraid_instance *)cmd->drv_inst;
2665 res = cmd->scsi_cmd->device->hostdata;
2666
2667 cancel_cmd = pmcraid_get_free_cmd(pinstance);
2668
2669 if (cancel_cmd == NULL) {
2670 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2671 return NULL;
2672 }
2673
2674 pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
2675
2676 pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
2677 cmd->ioa_cb->ioarcb.cdb[0],
2678 cmd->ioa_cb->ioarcb.response_handle >> 2);
2679
2680 init_completion(&cancel_cmd->wait_for_completion);
2681 cancel_cmd->completion_req = 1;
2682
2683 pmcraid_info("command (%d) CDB[0] = %x for %x\n",
2684 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
2685 cmd->ioa_cb->ioarcb.cdb[0],
2686 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
2687
2688 pmcraid_send_cmd(cancel_cmd,
2689 pmcraid_internal_done,
2690 PMCRAID_INTERNAL_TIMEOUT,
2691 pmcraid_timeout_handler);
2692 return cancel_cmd;
2693}
2694
2695/**
2696 * pmcraid_abort_complete - Waits for ABORT TASK completion
2697 *
2698 * @cancel_cmd: command block use as cancelling command
2699 *
2700 * Return Value:
2701 * returns SUCCESS if ABORT TASK has good completion
2702 * otherwise FAILED
2703 */
2704static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
2705{
2706 struct pmcraid_resource_entry *res;
2707 u32 ioasc;
2708
2709 wait_for_completion(&cancel_cmd->wait_for_completion);
2710 res = cancel_cmd->u.res;
2711 cancel_cmd->u.res = NULL;
2712 ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
2713
2714 /* If the abort task is not timed out we will get a Good completion
2715 * as sense_key, otherwise we may get one the following responses
2716 * due to subsquent bus reset or device reset. In case IOASC is
2717 * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
2718 */
2719 if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
2720 ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
2721 if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
2722 res->sync_reqd = 1;
2723 ioasc = 0;
2724 }
2725
2726 /* complete the command here itself */
2727 pmcraid_return_cmd(cancel_cmd);
2728 return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2729}
2730
2731/**
2732 * pmcraid_eh_abort_handler - entry point for aborting a single task on errors
2733 *
2734 * @scsi_cmd: scsi command struct given by mid-layer. When this is called
2735 * mid-layer ensures that no other commands are queued. This
2736 * never gets called under interrupt, but a separate eh thread.
2737 *
2738 * Return value:
2739 * SUCCESS / FAILED
2740 */
2741static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
2742{
2743 struct pmcraid_instance *pinstance;
2744 struct pmcraid_cmd *cmd;
2745 struct pmcraid_resource_entry *res;
2746 unsigned long host_lock_flags;
2747 unsigned long pending_lock_flags;
2748 struct pmcraid_cmd *cancel_cmd = NULL;
2749 int cmd_found = 0;
2750 int rc = FAILED;
2751
2752 pinstance =
2753 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2754
2755 dev_err(&pinstance->pdev->dev,
2756 "I/O command timed out, aborting it.\n");
2757
2758 res = scsi_cmd->device->hostdata;
2759
2760 if (res == NULL)
2761 return rc;
2762
2763 /* If we are currently going through reset/reload, return failed.
2764 * This will force the mid-layer to eventually call
2765 * pmcraid_eh_host_reset which will then go to sleep and wait for the
2766 * reset to complete
2767 */
2768 spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
2769
2770 if (pinstance->ioa_reset_in_progress ||
2771 pinstance->ioa_state == IOA_STATE_DEAD) {
2772 spin_unlock_irqrestore(pinstance->host->host_lock,
2773 host_lock_flags);
2774 return rc;
2775 }
2776
2777 /* loop over pending cmd list to find cmd corresponding to this
2778 * scsi_cmd. Note that this command might not have been completed
2779 * already. locking: all pending commands are protected with
2780 * pending_pool_lock.
2781 */
2782 spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
2783 list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
2784
2785 if (cmd->scsi_cmd == scsi_cmd) {
2786 cmd_found = 1;
2787 break;
2788 }
2789 }
2790
2791 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
2792 pending_lock_flags);
2793
2794 /* If the command to be aborted was given to IOA and still pending with
2795 * it, send ABORT_TASK to abort this and wait for its completion
2796 */
2797 if (cmd_found)
2798 cancel_cmd = pmcraid_abort_cmd(cmd);
2799
2800 spin_unlock_irqrestore(pinstance->host->host_lock,
2801 host_lock_flags);
2802
2803 if (cancel_cmd) {
2804 cancel_cmd->u.res = cmd->scsi_cmd->device->hostdata;
2805 rc = pmcraid_abort_complete(cancel_cmd);
2806 }
2807
2808 return cmd_found ? rc : SUCCESS;
2809}
2810
2811/**
2812 * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
2813 *
2814 * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
2815 *
2816 * All these routines invokve pmcraid_reset_device with appropriate parameters.
2817 * Since these are called from mid-layer EH thread, no other IO will be queued
2818 * to the resource being reset. However, control path (IOCTL) may be active so
2819 * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
2820 * takes care by locking/unlocking host_lock.
2821 *
2822 * Return value
2823 * SUCCESS or FAILED
2824 */
2825static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
2826{
2827 pmcraid_err("Doing device reset due to an I/O command timeout.\n");
2828 return pmcraid_reset_device(scmd,
2829 PMCRAID_INTERNAL_TIMEOUT,
2830 RESET_DEVICE_LUN);
2831}
2832
2833static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
2834{
2835 pmcraid_err("Doing bus reset due to an I/O command timeout.\n");
2836 return pmcraid_reset_device(scmd,
2837 PMCRAID_RESET_BUS_TIMEOUT,
2838 RESET_DEVICE_BUS);
2839}
2840
2841static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
2842{
2843 pmcraid_err("Doing target reset due to an I/O command timeout.\n");
2844 return pmcraid_reset_device(scmd,
2845 PMCRAID_INTERNAL_TIMEOUT,
2846 RESET_DEVICE_TARGET);
2847}
2848
2849/**
2850 * pmcraid_eh_host_reset_handler - adapter reset handler callback
2851 *
2852 * @scmd: pointer to scsi_cmd that was sent to a resource of adapter
2853 *
2854 * Initiates adapter reset to bring it up to operational state
2855 *
2856 * Return value
2857 * SUCCESS or FAILED
2858 */
2859static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
2860{
2861 unsigned long interval = 10000; /* 10 seconds interval */
2862 int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
2863 struct pmcraid_instance *pinstance =
2864 (struct pmcraid_instance *)(scmd->device->host->hostdata);
2865
2866
2867 /* wait for an additional 150 seconds just in case firmware could come
2868 * up and if it could complete all the pending commands excluding the
2869 * two HCAM (CCN and LDN).
2870 */
2871 while (waits--) {
2872 if (atomic_read(&pinstance->outstanding_cmds) <=
2873 PMCRAID_MAX_HCAM_CMD)
2874 return SUCCESS;
2875 msleep(interval);
2876 }
2877
2878 dev_err(&pinstance->pdev->dev,
2879 "Adapter being reset due to an I/O command timeout.\n");
2880 return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
2881}
2882
2883/**
2884 * pmcraid_task_attributes - Translate SPI Q-Tags to task attributes
2885 * @scsi_cmd: scsi command struct
2886 *
2887 * Return value
2888 * number of tags or 0 if the task is not tagged
2889 */
2890static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd)
2891{
2892 char tag[2];
2893 u8 rc = 0;
2894
2895 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
2896 switch (tag[0]) {
2897 case MSG_SIMPLE_TAG:
2898 rc = TASK_TAG_SIMPLE;
2899 break;
2900 case MSG_HEAD_TAG:
2901 rc = TASK_TAG_QUEUE_HEAD;
2902 break;
2903 case MSG_ORDERED_TAG:
2904 rc = TASK_TAG_ORDERED;
2905 break;
2906 };
2907 }
2908
2909 return rc;
2910}
2911
2912
2913/**
2914 * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
2915 * @cmd: pmcraid command struct
2916 * @sgcount: count of scatter-gather elements
2917 *
2918 * Return value
2919 * returns pointer pmcraid_ioadl_desc, initialized to point to internal
2920 * or external IOADLs
2921 */
2922struct pmcraid_ioadl_desc *
2923pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
2924{
2925 struct pmcraid_ioadl_desc *ioadl;
2926 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2927 int ioadl_count = 0;
2928
2929 if (ioarcb->add_cmd_param_length)
2930 ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
2931 ioarcb->ioadl_length =
2932 sizeof(struct pmcraid_ioadl_desc) * sgcount;
2933
2934 if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
2935 /* external ioadls start at offset 0x80 from control_block
2936 * structure, re-using 24 out of 27 ioadls part of IOARCB.
2937 * It is necessary to indicate to firmware that driver is
2938 * using ioadls to be treated as external to IOARCB.
2939 */
2940 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
2941 ioarcb->ioadl_bus_addr =
2942 cpu_to_le64((cmd->ioa_cb_bus_addr) +
2943 offsetof(struct pmcraid_ioarcb,
2944 add_data.u.ioadl[3]));
2945 ioadl = &ioarcb->add_data.u.ioadl[3];
2946 } else {
2947 ioarcb->ioadl_bus_addr =
2948 cpu_to_le64((cmd->ioa_cb_bus_addr) +
2949 offsetof(struct pmcraid_ioarcb,
2950 add_data.u.ioadl[ioadl_count]));
2951
2952 ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
2953 ioarcb->ioarcb_bus_addr |=
2954 DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
2955 }
2956
2957 return ioadl;
2958}
2959
2960/**
2961 * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
2962 * @pinstance: pointer to adapter instance structure
2963 * @cmd: pmcraid command struct
2964 *
2965 * This function is invoked by queuecommand entry point while sending a command
2966 * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
2967 *
2968 * Return value:
2969 * 0 on success or -1 on failure
2970 */
2971static int pmcraid_build_ioadl(
2972 struct pmcraid_instance *pinstance,
2973 struct pmcraid_cmd *cmd
2974)
2975{
2976 int i, nseg;
2977 struct scatterlist *sglist;
2978
2979 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2980 struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
2981 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2982
2983 u32 length = scsi_bufflen(scsi_cmd);
2984
2985 if (!length)
2986 return 0;
2987
2988 nseg = scsi_dma_map(scsi_cmd);
2989
2990 if (nseg < 0) {
2991 dev_err(&pinstance->pdev->dev, "scsi_map_dma failed!\n");
2992 return -1;
2993 } else if (nseg > PMCRAID_MAX_IOADLS) {
2994 scsi_dma_unmap(scsi_cmd);
2995 dev_err(&pinstance->pdev->dev,
2996 "sg count is (%d) more than allowed!\n", nseg);
2997 return -1;
2998 }
2999
3000 /* Initialize IOARCB data transfer length fields */
3001 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
3002 ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
3003
3004 ioarcb->request_flags0 |= NO_LINK_DESCS;
3005 ioarcb->data_transfer_length = cpu_to_le32(length);
3006 ioadl = pmcraid_init_ioadls(cmd, nseg);
3007
3008 /* Initialize IOADL descriptor addresses */
3009 scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
3010 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
3011 ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
3012 ioadl[i].flags = 0;
3013 }
3014 /* setup last descriptor */
3015 ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
3016
3017 return 0;
3018}
3019
3020/**
3021 * pmcraid_free_sglist - Frees an allocated SG buffer list
3022 * @sglist: scatter/gather list pointer
3023 *
3024 * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
3025 *
3026 * Return value:
3027 * none
3028 */
3029static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
3030{
3031 int i;
3032
3033 for (i = 0; i < sglist->num_sg; i++)
3034 __free_pages(sg_page(&(sglist->scatterlist[i])),
3035 sglist->order);
3036
3037 kfree(sglist);
3038}
3039
3040/**
3041 * pmcraid_alloc_sglist - Allocates memory for a SG list
3042 * @buflen: buffer length
3043 *
3044 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3045 * list.
3046 *
3047 * Return value
3048 * pointer to sglist / NULL on failure
3049 */
3050static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
3051{
3052 struct pmcraid_sglist *sglist;
3053 struct scatterlist *scatterlist;
3054 struct page *page;
3055 int num_elem, i, j;
3056 int sg_size;
3057 int order;
3058 int bsize_elem;
3059
3060 sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
3061 order = (sg_size > 0) ? get_order(sg_size) : 0;
3062 bsize_elem = PAGE_SIZE * (1 << order);
3063
3064 /* Determine the actual number of sg entries needed */
3065 if (buflen % bsize_elem)
3066 num_elem = (buflen / bsize_elem) + 1;
3067 else
3068 num_elem = buflen / bsize_elem;
3069
3070 /* Allocate a scatter/gather list for the DMA */
3071 sglist = kzalloc(sizeof(struct pmcraid_sglist) +
3072 (sizeof(struct scatterlist) * (num_elem - 1)),
3073 GFP_KERNEL);
3074
3075 if (sglist == NULL)
3076 return NULL;
3077
3078 scatterlist = sglist->scatterlist;
3079 sg_init_table(scatterlist, num_elem);
3080 sglist->order = order;
3081 sglist->num_sg = num_elem;
3082 sg_size = buflen;
3083
3084 for (i = 0; i < num_elem; i++) {
3085 page = alloc_pages(GFP_KERNEL|GFP_DMA, order);
3086 if (!page) {
3087 for (j = i - 1; j >= 0; j--)
3088 __free_pages(sg_page(&scatterlist[j]), order);
3089 kfree(sglist);
3090 return NULL;
3091 }
3092
3093 sg_set_page(&scatterlist[i], page,
3094 sg_size < bsize_elem ? sg_size : bsize_elem, 0);
3095 sg_size -= bsize_elem;
3096 }
3097
3098 return sglist;
3099}
3100
3101/**
3102 * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
3103 * @sglist: scatter/gather list pointer
3104 * @buffer: buffer pointer
3105 * @len: buffer length
3106 * @direction: data transfer direction
3107 *
3108 * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
3109 *
3110 * Return value:
3111 * 0 on success / other on failure
3112 */
3113static int pmcraid_copy_sglist(
3114 struct pmcraid_sglist *sglist,
3115 unsigned long buffer,
3116 u32 len,
3117 int direction
3118)
3119{
3120 struct scatterlist *scatterlist;
3121 void *kaddr;
3122 int bsize_elem;
3123 int i;
3124 int rc = 0;
3125
3126 /* Determine the actual number of bytes per element */
3127 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3128
3129 scatterlist = sglist->scatterlist;
3130
3131 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3132 struct page *page = sg_page(&scatterlist[i]);
3133
3134 kaddr = kmap(page);
3135 if (direction == DMA_TO_DEVICE)
3136 rc = __copy_from_user(kaddr,
3137 (void *)buffer,
3138 bsize_elem);
3139 else
3140 rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
3141
3142 kunmap(page);
3143
3144 if (rc) {
3145 pmcraid_err("failed to copy user data into sg list\n");
3146 return -EFAULT;
3147 }
3148
3149 scatterlist[i].length = bsize_elem;
3150 }
3151
3152 if (len % bsize_elem) {
3153 struct page *page = sg_page(&scatterlist[i]);
3154
3155 kaddr = kmap(page);
3156
3157 if (direction == DMA_TO_DEVICE)
3158 rc = __copy_from_user(kaddr,
3159 (void *)buffer,
3160 len % bsize_elem);
3161 else
3162 rc = __copy_to_user((void *)buffer,
3163 kaddr,
3164 len % bsize_elem);
3165
3166 kunmap(page);
3167
3168 scatterlist[i].length = len % bsize_elem;
3169 }
3170
3171 if (rc) {
3172 pmcraid_err("failed to copy user data into sg list\n");
3173 rc = -EFAULT;
3174 }
3175
3176 return rc;
3177}
3178
3179/**
3180 * pmcraid_queuecommand - Queue a mid-layer request
3181 * @scsi_cmd: scsi command struct
3182 * @done: done function
3183 *
3184 * This function queues a request generated by the mid-layer. Midlayer calls
3185 * this routine within host->lock. Some of the functions called by queuecommand
3186 * would use cmd block queue locks (free_pool_lock and pending_pool_lock)
3187 *
3188 * Return value:
3189 * 0 on success
3190 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3191 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3192 */
3193static int pmcraid_queuecommand(
3194 struct scsi_cmnd *scsi_cmd,
3195 void (*done) (struct scsi_cmnd *)
3196)
3197{
3198 struct pmcraid_instance *pinstance;
3199 struct pmcraid_resource_entry *res;
3200 struct pmcraid_ioarcb *ioarcb;
3201 struct pmcraid_cmd *cmd;
3202 int rc = 0;
3203
3204 pinstance =
3205 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
3206
3207 scsi_cmd->scsi_done = done;
3208 res = scsi_cmd->device->hostdata;
3209 scsi_cmd->result = (DID_OK << 16);
3210
3211 /* if adapter is marked as dead, set result to DID_NO_CONNECT complete
3212 * the command
3213 */
3214 if (pinstance->ioa_state == IOA_STATE_DEAD) {
3215 pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
3216 scsi_cmd->result = (DID_NO_CONNECT << 16);
3217 scsi_cmd->scsi_done(scsi_cmd);
3218 return 0;
3219 }
3220
3221 /* If IOA reset is in progress, can't queue the commands */
3222 if (pinstance->ioa_reset_in_progress)
3223 return SCSI_MLQUEUE_HOST_BUSY;
3224
3225 /* initialize the command and IOARCB to be sent to IOA */
3226 cmd = pmcraid_get_free_cmd(pinstance);
3227
3228 if (cmd == NULL) {
3229 pmcraid_err("free command block is not available\n");
3230 return SCSI_MLQUEUE_HOST_BUSY;
3231 }
3232
3233 cmd->scsi_cmd = scsi_cmd;
3234 ioarcb = &(cmd->ioa_cb->ioarcb);
3235 memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3236 ioarcb->resource_handle = res->cfg_entry.resource_handle;
3237 ioarcb->request_type = REQ_TYPE_SCSI;
3238
3239 cmd->cmd_done = pmcraid_io_done;
3240
3241 if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
3242 if (scsi_cmd->underflow == 0)
3243 ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
3244
3245 if (res->sync_reqd) {
3246 ioarcb->request_flags0 |= SYNC_COMPLETE;
3247 res->sync_reqd = 0;
3248 }
3249
3250 ioarcb->request_flags0 |= NO_LINK_DESCS;
3251 ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd);
3252
3253 if (RES_IS_GSCSI(res->cfg_entry))
3254 ioarcb->request_flags1 |= DELAY_AFTER_RESET;
3255 }
3256
3257 rc = pmcraid_build_ioadl(pinstance, cmd);
3258
3259 pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
3260 le32_to_cpu(ioarcb->response_handle) >> 2,
3261 scsi_cmd->cmnd[0], pinstance->host->unique_id,
3262 RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
3263 PMCRAID_PHYS_BUS_ID,
3264 RES_IS_VSET(res->cfg_entry) ?
3265 res->cfg_entry.unique_flags1 :
3266 RES_TARGET(res->cfg_entry.resource_address),
3267 RES_LUN(res->cfg_entry.resource_address));
3268
3269 if (likely(rc == 0)) {
3270 _pmcraid_fire_command(cmd);
3271 } else {
3272 pmcraid_err("queuecommand could not build ioadl\n");
3273 pmcraid_return_cmd(cmd);
3274 rc = SCSI_MLQUEUE_HOST_BUSY;
3275 }
3276
3277 return rc;
3278}
3279
3280/**
3281 * pmcraid_open -char node "open" entry, allowed only users with admin access
3282 */
3283static int pmcraid_chr_open(struct inode *inode, struct file *filep)
3284{
3285 struct pmcraid_instance *pinstance;
3286
3287 if (!capable(CAP_SYS_ADMIN))
3288 return -EACCES;
3289
3290 /* Populate adapter instance * pointer for use by ioctl */
3291 pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
3292 filep->private_data = pinstance;
3293
3294 return 0;
3295}
3296
3297/**
3298 * pmcraid_release - char node "release" entry point
3299 */
3300static int pmcraid_chr_release(struct inode *inode, struct file *filep)
3301{
3302 struct pmcraid_instance *pinstance =
3303 ((struct pmcraid_instance *)filep->private_data);
3304
3305 filep->private_data = NULL;
3306 fasync_helper(-1, filep, 0, &pinstance->aen_queue);
3307
3308 return 0;
3309}
3310
3311/**
3312 * pmcraid_fasync - Async notifier registration from applications
3313 *
3314 * This function adds the calling process to a driver global queue. When an
3315 * event occurs, SIGIO will be sent to all processes in this queue.
3316 */
3317static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
3318{
3319 struct pmcraid_instance *pinstance;
3320 int rc;
3321
3322 pinstance = (struct pmcraid_instance *)filep->private_data;
3323 mutex_lock(&pinstance->aen_queue_lock);
3324 rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
3325 mutex_unlock(&pinstance->aen_queue_lock);
3326
3327 return rc;
3328}
3329
3330
3331/**
3332 * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
3333 * commands sent over IOCTL interface
3334 *
3335 * @cmd : pointer to struct pmcraid_cmd
3336 * @buflen : length of the request buffer
3337 * @direction : data transfer direction
3338 *
3339 * Return value
3340 * 0 on sucess, non-zero error code on failure
3341 */
3342static int pmcraid_build_passthrough_ioadls(
3343 struct pmcraid_cmd *cmd,
3344 int buflen,
3345 int direction
3346)
3347{
3348 struct pmcraid_sglist *sglist = NULL;
3349 struct scatterlist *sg = NULL;
3350 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
3351 struct pmcraid_ioadl_desc *ioadl;
3352 int i;
3353
3354 sglist = pmcraid_alloc_sglist(buflen);
3355
3356 if (!sglist) {
3357 pmcraid_err("can't allocate memory for passthrough SGls\n");
3358 return -ENOMEM;
3359 }
3360
3361 sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
3362 sglist->scatterlist,
3363 sglist->num_sg, direction);
3364
3365 if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
3366 dev_err(&cmd->drv_inst->pdev->dev,
3367 "Failed to map passthrough buffer!\n");
3368 pmcraid_free_sglist(sglist);
3369 return -EIO;
3370 }
3371
3372 cmd->sglist = sglist;
3373 ioarcb->request_flags0 |= NO_LINK_DESCS;
3374
3375 ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
3376
3377 /* Initialize IOADL descriptor addresses */
3378 for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
3379 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
3380 ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
3381 ioadl[i].flags = 0;
3382 }
3383
3384 /* setup the last descriptor */
3385 ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
3386
3387 return 0;
3388}
3389
3390
3391/**
3392 * pmcraid_release_passthrough_ioadls - release passthrough ioadls
3393 *
3394 * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
3395 * @buflen: size of the request buffer
3396 * @direction: data transfer direction
3397 *
3398 * Return value
3399 * 0 on sucess, non-zero error code on failure
3400 */
3401static void pmcraid_release_passthrough_ioadls(
3402 struct pmcraid_cmd *cmd,
3403 int buflen,
3404 int direction
3405)
3406{
3407 struct pmcraid_sglist *sglist = cmd->sglist;
3408
3409 if (buflen > 0) {
3410 pci_unmap_sg(cmd->drv_inst->pdev,
3411 sglist->scatterlist,
3412 sglist->num_sg,
3413 direction);
3414 pmcraid_free_sglist(sglist);
3415 cmd->sglist = NULL;
3416 }
3417}
3418
3419/**
3420 * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
3421 *
3422 * @pinstance: pointer to adapter instance structure
3423 * @cmd: ioctl code
3424 * @arg: pointer to pmcraid_passthrough_buffer user buffer
3425 *
3426 * Return value
3427 * 0 on sucess, non-zero error code on failure
3428 */
3429static long pmcraid_ioctl_passthrough(
3430 struct pmcraid_instance *pinstance,
3431 unsigned int ioctl_cmd,
3432 unsigned int buflen,
3433 unsigned long arg
3434)
3435{
3436 struct pmcraid_passthrough_ioctl_buffer *buffer;
3437 struct pmcraid_ioarcb *ioarcb;
3438 struct pmcraid_cmd *cmd;
3439 struct pmcraid_cmd *cancel_cmd;
3440 unsigned long request_buffer;
3441 unsigned long request_offset;
3442 unsigned long lock_flags;
3443 int request_size;
3444 int buffer_size;
3445 u8 access, direction;
3446 int rc = 0;
3447
3448 /* If IOA reset is in progress, wait 10 secs for reset to complete */
3449 if (pinstance->ioa_reset_in_progress) {
3450 rc = wait_event_interruptible_timeout(
3451 pinstance->reset_wait_q,
3452 !pinstance->ioa_reset_in_progress,
3453 msecs_to_jiffies(10000));
3454
3455 if (!rc)
3456 return -ETIMEDOUT;
3457 else if (rc < 0)
3458 return -ERESTARTSYS;
3459 }
3460
3461 /* If adapter is not in operational state, return error */
3462 if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
3463 pmcraid_err("IOA is not operational\n");
3464 return -ENOTTY;
3465 }
3466
3467 buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
3468 buffer = kmalloc(buffer_size, GFP_KERNEL);
3469
3470 if (!buffer) {
3471 pmcraid_err("no memory for passthrough buffer\n");
3472 return -ENOMEM;
3473 }
3474
3475 request_offset =
3476 offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
3477
3478 request_buffer = arg + request_offset;
3479
3480 rc = __copy_from_user(buffer,
3481 (struct pmcraid_passthrough_ioctl_buffer *) arg,
3482 sizeof(struct pmcraid_passthrough_ioctl_buffer));
3483 if (rc) {
3484 pmcraid_err("ioctl: can't copy passthrough buffer\n");
3485 rc = -EFAULT;
3486 goto out_free_buffer;
3487 }
3488
3489 request_size = buffer->ioarcb.data_transfer_length;
3490
3491 if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
3492 access = VERIFY_READ;
3493 direction = DMA_TO_DEVICE;
3494 } else {
3495 access = VERIFY_WRITE;
3496 direction = DMA_FROM_DEVICE;
3497 }
3498
3499 if (request_size > 0) {
3500 rc = access_ok(access, arg, request_offset + request_size);
3501
3502 if (!rc) {
3503 rc = -EFAULT;
3504 goto out_free_buffer;
3505 }
3506 }
3507
3508 /* check if we have any additional command parameters */
3509 if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
3510 rc = -EINVAL;
3511 goto out_free_buffer;
3512 }
3513
3514 cmd = pmcraid_get_free_cmd(pinstance);
3515
3516 if (!cmd) {
3517 pmcraid_err("free command block is not available\n");
3518 rc = -ENOMEM;
3519 goto out_free_buffer;
3520 }
3521
3522 cmd->scsi_cmd = NULL;
3523 ioarcb = &(cmd->ioa_cb->ioarcb);
3524
3525 /* Copy the user-provided IOARCB stuff field by field */
3526 ioarcb->resource_handle = buffer->ioarcb.resource_handle;
3527 ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
3528 ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
3529 ioarcb->request_type = buffer->ioarcb.request_type;
3530 ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
3531 ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
3532 memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
3533
3534 if (buffer->ioarcb.add_cmd_param_length) {
3535 ioarcb->add_cmd_param_length =
3536 buffer->ioarcb.add_cmd_param_length;
3537 ioarcb->add_cmd_param_offset =
3538 buffer->ioarcb.add_cmd_param_offset;
3539 memcpy(ioarcb->add_data.u.add_cmd_params,
3540 buffer->ioarcb.add_data.u.add_cmd_params,
3541 buffer->ioarcb.add_cmd_param_length);
3542 }
3543
3544 if (request_size) {
3545 rc = pmcraid_build_passthrough_ioadls(cmd,
3546 request_size,
3547 direction);
3548 if (rc) {
3549 pmcraid_err("couldn't build passthrough ioadls\n");
3550 goto out_free_buffer;
3551 }
3552 }
3553
3554 /* If data is being written into the device, copy the data from user
3555 * buffers
3556 */
3557 if (direction == DMA_TO_DEVICE && request_size > 0) {
3558 rc = pmcraid_copy_sglist(cmd->sglist,
3559 request_buffer,
3560 request_size,
3561 direction);
3562 if (rc) {
3563 pmcraid_err("failed to copy user buffer\n");
3564 goto out_free_sglist;
3565 }
3566 }
3567
3568 /* passthrough ioctl is a blocking command so, put the user to sleep
3569 * until timeout. Note that a timeout value of 0 means, do timeout.
3570 */
3571 cmd->cmd_done = pmcraid_internal_done;
3572 init_completion(&cmd->wait_for_completion);
3573 cmd->completion_req = 1;
3574
3575 pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
3576 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
3577 cmd->ioa_cb->ioarcb.cdb[0],
3578 le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
3579
3580 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3581 _pmcraid_fire_command(cmd);
3582 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3583
3584 /* If command timeout is specified put caller to wait till that time,
3585 * otherwise it would be blocking wait. If command gets timed out, it
3586 * will be aborted.
3587 */
3588 if (buffer->ioarcb.cmd_timeout == 0) {
3589 wait_for_completion(&cmd->wait_for_completion);
3590 } else if (!wait_for_completion_timeout(
3591 &cmd->wait_for_completion,
3592 msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
3593
3594 pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
3595 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
3596 cmd->ioa_cb->ioarcb.cdb[0]);
3597
3598 rc = -ETIMEDOUT;
3599 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3600 cancel_cmd = pmcraid_abort_cmd(cmd);
3601 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3602
3603 if (cancel_cmd) {
3604 wait_for_completion(&cancel_cmd->wait_for_completion);
3605 pmcraid_return_cmd(cancel_cmd);
3606 }
3607
3608 goto out_free_sglist;
3609 }
3610
3611 /* If the command failed for any reason, copy entire IOASA buffer and
3612 * return IOCTL success. If copying IOASA to user-buffer fails, return
3613 * EFAULT
3614 */
3615 if (le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)) {
3616
3617 void *ioasa =
3618 (void *)(arg +
3619 offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
3620
3621 pmcraid_info("command failed with %x\n",
3622 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
3623 if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
3624 sizeof(struct pmcraid_ioasa))) {
3625 pmcraid_err("failed to copy ioasa buffer to user\n");
3626 rc = -EFAULT;
3627 }
3628 }
3629 /* If the data transfer was from device, copy the data onto user
3630 * buffers
3631 */
3632 else if (direction == DMA_FROM_DEVICE && request_size > 0) {
3633 rc = pmcraid_copy_sglist(cmd->sglist,
3634 request_buffer,
3635 request_size,
3636 direction);
3637 if (rc) {
3638 pmcraid_err("failed to copy user buffer\n");
3639 rc = -EFAULT;
3640 }
3641 }
3642
3643out_free_sglist:
3644 pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
3645 pmcraid_return_cmd(cmd);
3646
3647out_free_buffer:
3648 kfree(buffer);
3649
3650 return rc;
3651}
3652
3653
3654
3655
3656/**
3657 * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
3658 *
3659 * @pinstance: pointer to adapter instance structure
3660 * @cmd: ioctl command passed in
3661 * @buflen: length of user_buffer
3662 * @user_buffer: user buffer pointer
3663 *
3664 * Return Value
3665 * 0 in case of success, otherwise appropriate error code
3666 */
3667static long pmcraid_ioctl_driver(
3668 struct pmcraid_instance *pinstance,
3669 unsigned int cmd,
3670 unsigned int buflen,
3671 void __user *user_buffer
3672)
3673{
3674 int rc = -ENOSYS;
3675
3676 if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
3677 pmcraid_err("ioctl_driver: access fault in request buffer \n");
3678 return -EFAULT;
3679 }
3680
3681 switch (cmd) {
3682 case PMCRAID_IOCTL_RESET_ADAPTER:
3683 pmcraid_reset_bringup(pinstance);
3684 rc = 0;
3685 break;
3686
3687 default:
3688 break;
3689 }
3690
3691 return rc;
3692}
3693
3694/**
3695 * pmcraid_check_ioctl_buffer - check for proper access to user buffer
3696 *
3697 * @cmd: ioctl command
3698 * @arg: user buffer
3699 * @hdr: pointer to kernel memory for pmcraid_ioctl_header
3700 *
3701 * Return Value
3702 * negetive error code if there are access issues, otherwise zero.
3703 * Upon success, returns ioctl header copied out of user buffer.
3704 */
3705
3706static int pmcraid_check_ioctl_buffer(
3707 int cmd,
3708 void __user *arg,
3709 struct pmcraid_ioctl_header *hdr
3710)
3711{
3712 int rc = 0;
3713 int access = VERIFY_READ;
3714
3715 if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
3716 pmcraid_err("couldn't copy ioctl header from user buffer\n");
3717 return -EFAULT;
3718 }
3719
3720 /* check for valid driver signature */
3721 rc = memcmp(hdr->signature,
3722 PMCRAID_IOCTL_SIGNATURE,
3723 sizeof(hdr->signature));
3724 if (rc) {
3725 pmcraid_err("signature verification failed\n");
3726 return -EINVAL;
3727 }
3728
3729 /* buffer length can't be negetive */
3730 if (hdr->buffer_length < 0) {
3731 pmcraid_err("ioctl: invalid buffer length specified\n");
3732 return -EINVAL;
3733 }
3734
3735 /* check for appropriate buffer access */
3736 if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
3737 access = VERIFY_WRITE;
3738
3739 rc = access_ok(access,
3740 (arg + sizeof(struct pmcraid_ioctl_header)),
3741 hdr->buffer_length);
3742 if (!rc) {
3743 pmcraid_err("access failed for user buffer of size %d\n",
3744 hdr->buffer_length);
3745 return -EFAULT;
3746 }
3747
3748 return 0;
3749}
3750
3751/**
3752 * pmcraid_ioctl - char node ioctl entry point
3753 */
3754static long pmcraid_chr_ioctl(
3755 struct file *filep,
3756 unsigned int cmd,
3757 unsigned long arg
3758)
3759{
3760 struct pmcraid_instance *pinstance = NULL;
3761 struct pmcraid_ioctl_header *hdr = NULL;
3762 int retval = -ENOTTY;
3763
3764 hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
3765
3766 if (!hdr) {
3767 pmcraid_err("faile to allocate memory for ioctl header\n");
3768 return -ENOMEM;
3769 }
3770
3771 retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
3772
3773 if (retval) {
3774 pmcraid_info("chr_ioctl: header check failed\n");
3775 kfree(hdr);
3776 return retval;
3777 }
3778
3779 pinstance = (struct pmcraid_instance *)filep->private_data;
3780
3781 if (!pinstance) {
3782 pmcraid_info("adapter instance is not found\n");
3783 kfree(hdr);
3784 return -ENOTTY;
3785 }
3786
3787 switch (_IOC_TYPE(cmd)) {
3788
3789 case PMCRAID_PASSTHROUGH_IOCTL:
3790 /* If ioctl code is to download microcode, we need to block
3791 * mid-layer requests.
3792 */
3793 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
3794 scsi_block_requests(pinstance->host);
3795
3796 retval = pmcraid_ioctl_passthrough(pinstance,
3797 cmd,
3798 hdr->buffer_length,
3799 arg);
3800
3801 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
3802 scsi_unblock_requests(pinstance->host);
3803 break;
3804
3805 case PMCRAID_DRIVER_IOCTL:
3806 arg += sizeof(struct pmcraid_ioctl_header);
3807 retval = pmcraid_ioctl_driver(pinstance,
3808 cmd,
3809 hdr->buffer_length,
3810 (void __user *)arg);
3811 break;
3812
3813 default:
3814 retval = -ENOTTY;
3815 break;
3816 }
3817
3818 kfree(hdr);
3819
3820 return retval;
3821}
3822
3823/**
3824 * File operations structure for management interface
3825 */
3826static const struct file_operations pmcraid_fops = {
3827 .owner = THIS_MODULE,
3828 .open = pmcraid_chr_open,
3829 .release = pmcraid_chr_release,
3830 .fasync = pmcraid_chr_fasync,
3831 .unlocked_ioctl = pmcraid_chr_ioctl,
3832#ifdef CONFIG_COMPAT
3833 .compat_ioctl = pmcraid_chr_ioctl,
3834#endif
3835};
3836
3837
3838
3839
3840/**
3841 * pmcraid_show_log_level - Display adapter's error logging level
3842 * @dev: class device struct
3843 * @buf: buffer
3844 *
3845 * Return value:
3846 * number of bytes printed to buffer
3847 */
3848static ssize_t pmcraid_show_log_level(
3849 struct device *dev,
3850 struct device_attribute *attr,
3851 char *buf)
3852{
3853 struct Scsi_Host *shost = class_to_shost(dev);
3854 struct pmcraid_instance *pinstance =
3855 (struct pmcraid_instance *)shost->hostdata;
3856 return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
3857}
3858
3859/**
3860 * pmcraid_store_log_level - Change the adapter's error logging level
3861 * @dev: class device struct
3862 * @buf: buffer
3863 * @count: not used
3864 *
3865 * Return value:
3866 * number of bytes printed to buffer
3867 */
3868static ssize_t pmcraid_store_log_level(
3869 struct device *dev,
3870 struct device_attribute *attr,
3871 const char *buf,
3872 size_t count
3873)
3874{
3875 struct Scsi_Host *shost;
3876 struct pmcraid_instance *pinstance;
3877 unsigned long val;
3878
3879 if (strict_strtoul(buf, 10, &val))
3880 return -EINVAL;
3881 /* log-level should be from 0 to 2 */
3882 if (val > 2)
3883 return -EINVAL;
3884
3885 shost = class_to_shost(dev);
3886 pinstance = (struct pmcraid_instance *)shost->hostdata;
3887 pinstance->current_log_level = val;
3888
3889 return strlen(buf);
3890}
3891
3892static struct device_attribute pmcraid_log_level_attr = {
3893 .attr = {
3894 .name = "log_level",
3895 .mode = S_IRUGO | S_IWUSR,
3896 },
3897 .show = pmcraid_show_log_level,
3898 .store = pmcraid_store_log_level,
3899};
3900
3901/**
3902 * pmcraid_show_drv_version - Display driver version
3903 * @dev: class device struct
3904 * @buf: buffer
3905 *
3906 * Return value:
3907 * number of bytes printed to buffer
3908 */
3909static ssize_t pmcraid_show_drv_version(
3910 struct device *dev,
3911 struct device_attribute *attr,
3912 char *buf
3913)
3914{
3915 return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n",
3916 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
3917}
3918
3919static struct device_attribute pmcraid_driver_version_attr = {
3920 .attr = {
3921 .name = "drv_version",
3922 .mode = S_IRUGO,
3923 },
3924 .show = pmcraid_show_drv_version,
3925};
3926
3927/**
3928 * pmcraid_show_io_adapter_id - Display driver assigned adapter id
3929 * @dev: class device struct
3930 * @buf: buffer
3931 *
3932 * Return value:
3933 * number of bytes printed to buffer
3934 */
3935static ssize_t pmcraid_show_adapter_id(
3936 struct device *dev,
3937 struct device_attribute *attr,
3938 char *buf
3939)
3940{
3941 struct Scsi_Host *shost = class_to_shost(dev);
3942 struct pmcraid_instance *pinstance =
3943 (struct pmcraid_instance *)shost->hostdata;
3944 u32 adapter_id = (pinstance->pdev->bus->number << 8) |
3945 pinstance->pdev->devfn;
3946 u32 aen_group = pmcraid_event_family.id;
3947
3948 return snprintf(buf, PAGE_SIZE,
3949 "adapter id: %d\nminor: %d\naen group: %d\n",
3950 adapter_id, MINOR(pinstance->cdev.dev), aen_group);
3951}
3952
3953static struct device_attribute pmcraid_adapter_id_attr = {
3954 .attr = {
3955 .name = "adapter_id",
3956 .mode = S_IRUGO | S_IWUSR,
3957 },
3958 .show = pmcraid_show_adapter_id,
3959};
3960
3961static struct device_attribute *pmcraid_host_attrs[] = {
3962 &pmcraid_log_level_attr,
3963 &pmcraid_driver_version_attr,
3964 &pmcraid_adapter_id_attr,
3965 NULL,
3966};
3967
3968
3969/* host template structure for pmcraid driver */
3970static struct scsi_host_template pmcraid_host_template = {
3971 .module = THIS_MODULE,
3972 .name = PMCRAID_DRIVER_NAME,
3973 .queuecommand = pmcraid_queuecommand,
3974 .eh_abort_handler = pmcraid_eh_abort_handler,
3975 .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
3976 .eh_target_reset_handler = pmcraid_eh_target_reset_handler,
3977 .eh_device_reset_handler = pmcraid_eh_device_reset_handler,
3978 .eh_host_reset_handler = pmcraid_eh_host_reset_handler,
3979
3980 .slave_alloc = pmcraid_slave_alloc,
3981 .slave_configure = pmcraid_slave_configure,
3982 .slave_destroy = pmcraid_slave_destroy,
3983 .change_queue_depth = pmcraid_change_queue_depth,
3984 .change_queue_type = pmcraid_change_queue_type,
3985 .can_queue = PMCRAID_MAX_IO_CMD,
3986 .this_id = -1,
3987 .sg_tablesize = PMCRAID_MAX_IOADLS,
3988 .max_sectors = PMCRAID_IOA_MAX_SECTORS,
3989 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
3990 .use_clustering = ENABLE_CLUSTERING,
3991 .shost_attrs = pmcraid_host_attrs,
3992 .proc_name = PMCRAID_DRIVER_NAME
3993};
3994
3995/**
3996 * pmcraid_isr_common - Common interrupt handler routine
3997 *
3998 * @pinstance: pointer to adapter instance
3999 * @intrs: active interrupts (contents of ioa_host_interrupt register)
4000 * @hrrq_id: Host RRQ index
4001 *
4002 * Return Value
4003 * none
4004 */
4005static void pmcraid_isr_common(
4006 struct pmcraid_instance *pinstance,
4007 u32 intrs,
4008 int hrrq_id
4009)
4010{
4011 u32 intrs_clear =
4012 (intrs & INTRS_CRITICAL_OP_IN_PROGRESS) ? intrs
4013 : INTRS_HRRQ_VALID;
4014 iowrite32(intrs_clear,
4015 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4016 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
4017
4018 /* hrrq valid bit was set, schedule tasklet to handle the response */
4019 if (intrs_clear == INTRS_HRRQ_VALID)
4020 tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
4021}
4022
4023/**
4024 * pmcraid_isr - implements interrupt handling routine
4025 *
4026 * @irq: interrupt vector number
4027 * @dev_id: pointer hrrq_vector
4028 *
4029 * Return Value
4030 * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
4031 */
4032static irqreturn_t pmcraid_isr(int irq, void *dev_id)
4033{
4034 struct pmcraid_isr_param *hrrq_vector;
4035 struct pmcraid_instance *pinstance;
4036 unsigned long lock_flags;
4037 u32 intrs;
4038
4039 /* In case of legacy interrupt mode where interrupts are shared across
4040 * isrs, it may be possible that the current interrupt is not from IOA
4041 */
4042 if (!dev_id) {
4043 printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
4044 return IRQ_NONE;
4045 }
4046
4047 hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4048 pinstance = hrrq_vector->drv_inst;
4049
4050 /* Acquire the lock (currently host_lock) while processing interrupts.
4051 * This interval is small as most of the response processing is done by
4052 * tasklet without the lock.
4053 */
4054 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
4055 intrs = pmcraid_read_interrupts(pinstance);
4056
4057 if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0)) {
4058 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4059 return IRQ_NONE;
4060 }
4061
4062 /* Any error interrupts including unit_check, initiate IOA reset.
4063 * In case of unit check indicate to reset_sequence that IOA unit
4064 * checked and prepare for a dump during reset sequence
4065 */
4066 if (intrs & PMCRAID_ERROR_INTERRUPTS) {
4067
4068 if (intrs & INTRS_IOA_UNIT_CHECK)
4069 pinstance->ioa_unit_check = 1;
4070
4071 iowrite32(intrs,
4072 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4073 pmcraid_err("ISR: error interrupts: %x initiating reset\n",
4074 intrs);
4075 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
4076 pmcraid_initiate_reset(pinstance);
4077 } else {
4078 pmcraid_isr_common(pinstance, intrs, hrrq_vector->hrrq_id);
4079 }
4080
4081 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4082
4083 return IRQ_HANDLED;
4084}
4085
4086
4087/**
4088 * pmcraid_worker_function - worker thread function
4089 *
4090 * @workp: pointer to struct work queue
4091 *
4092 * Return Value
4093 * None
4094 */
4095
4096static void pmcraid_worker_function(struct work_struct *workp)
4097{
4098 struct pmcraid_instance *pinstance;
4099 struct pmcraid_resource_entry *res;
4100 struct pmcraid_resource_entry *temp;
4101 struct scsi_device *sdev;
4102 unsigned long lock_flags;
4103 unsigned long host_lock_flags;
4104 u8 bus, target, lun;
4105
4106 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
4107 /* add resources only after host is added into system */
4108 if (!atomic_read(&pinstance->expose_resources))
4109 return;
4110
4111 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
4112 list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
4113
4114 if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
4115 sdev = res->scsi_dev;
4116
4117 /* host_lock must be held before calling
4118 * scsi_device_get
4119 */
4120 spin_lock_irqsave(pinstance->host->host_lock,
4121 host_lock_flags);
4122 if (!scsi_device_get(sdev)) {
4123 spin_unlock_irqrestore(
4124 pinstance->host->host_lock,
4125 host_lock_flags);
4126 pmcraid_info("deleting %x from midlayer\n",
4127 res->cfg_entry.resource_address);
4128 list_move_tail(&res->queue,
4129 &pinstance->free_res_q);
4130 spin_unlock_irqrestore(
4131 &pinstance->resource_lock,
4132 lock_flags);
4133 scsi_remove_device(sdev);
4134 scsi_device_put(sdev);
4135 spin_lock_irqsave(&pinstance->resource_lock,
4136 lock_flags);
4137 res->change_detected = 0;
4138 } else {
4139 spin_unlock_irqrestore(
4140 pinstance->host->host_lock,
4141 host_lock_flags);
4142 }
4143 }
4144 }
4145
4146 list_for_each_entry(res, &pinstance->used_res_q, queue) {
4147
4148 if (res->change_detected == RES_CHANGE_ADD) {
4149
4150 if (!pmcraid_expose_resource(&res->cfg_entry))
4151 continue;
4152
4153 if (RES_IS_VSET(res->cfg_entry)) {
4154 bus = PMCRAID_VSET_BUS_ID;
4155 target = res->cfg_entry.unique_flags1;
4156 lun = PMCRAID_VSET_LUN_ID;
4157 } else {
4158 bus = PMCRAID_PHYS_BUS_ID;
4159 target =
4160 RES_TARGET(
4161 res->cfg_entry.resource_address);
4162 lun = RES_LUN(res->cfg_entry.resource_address);
4163 }
4164
4165 res->change_detected = 0;
4166 spin_unlock_irqrestore(&pinstance->resource_lock,
4167 lock_flags);
4168 scsi_add_device(pinstance->host, bus, target, lun);
4169 spin_lock_irqsave(&pinstance->resource_lock,
4170 lock_flags);
4171 }
4172 }
4173
4174 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
4175}
4176
4177/**
4178 * pmcraid_tasklet_function - Tasklet function
4179 *
4180 * @instance: pointer to msix param structure
4181 *
4182 * Return Value
4183 * None
4184 */
4185void pmcraid_tasklet_function(unsigned long instance)
4186{
4187 struct pmcraid_isr_param *hrrq_vector;
4188 struct pmcraid_instance *pinstance;
4189 unsigned long hrrq_lock_flags;
4190 unsigned long pending_lock_flags;
4191 unsigned long host_lock_flags;
4192 spinlock_t *lockp; /* hrrq buffer lock */
4193 int id;
4194 u32 intrs;
4195 __le32 resp;
4196
4197 hrrq_vector = (struct pmcraid_isr_param *)instance;
4198 pinstance = hrrq_vector->drv_inst;
4199 id = hrrq_vector->hrrq_id;
4200 lockp = &(pinstance->hrrq_lock[id]);
4201 intrs = pmcraid_read_interrupts(pinstance);
4202
4203 /* If interrupts was as part of the ioa initialization, clear and mask
4204 * it. Delete the timer and wakeup the reset engine to proceed with
4205 * reset sequence
4206 */
4207 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
4208 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
4209 pinstance->int_regs.ioa_host_interrupt_mask_reg);
4210 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
4211 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4212
4213 if (pinstance->reset_cmd != NULL) {
4214 del_timer(&pinstance->reset_cmd->timer);
4215 spin_lock_irqsave(pinstance->host->host_lock,
4216 host_lock_flags);
4217 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
4218 spin_unlock_irqrestore(pinstance->host->host_lock,
4219 host_lock_flags);
4220 }
4221 return;
4222 }
4223
4224 /* loop through each of the commands responded by IOA. Each HRRQ buf is
4225 * protected by its own lock. Traversals must be done within this lock
4226 * as there may be multiple tasklets running on multiple CPUs. Note
4227 * that the lock is held just for picking up the response handle and
4228 * manipulating hrrq_curr/toggle_bit values.
4229 */
4230 spin_lock_irqsave(lockp, hrrq_lock_flags);
4231
4232 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4233
4234 while ((resp & HRRQ_TOGGLE_BIT) ==
4235 pinstance->host_toggle_bit[id]) {
4236
4237 int cmd_index = resp >> 2;
4238 struct pmcraid_cmd *cmd = NULL;
4239
4240 if (cmd_index < PMCRAID_MAX_CMD) {
4241 cmd = pinstance->cmd_list[cmd_index];
4242 } else {
4243 /* In case of invalid response handle, initiate IOA
4244 * reset sequence.
4245 */
4246 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4247
4248 pmcraid_err("Invalid response %d initiating reset\n",
4249 cmd_index);
4250
4251 spin_lock_irqsave(pinstance->host->host_lock,
4252 host_lock_flags);
4253 pmcraid_initiate_reset(pinstance);
4254 spin_unlock_irqrestore(pinstance->host->host_lock,
4255 host_lock_flags);
4256
4257 spin_lock_irqsave(lockp, hrrq_lock_flags);
4258 break;
4259 }
4260
4261 if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
4262 pinstance->hrrq_curr[id]++;
4263 } else {
4264 pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
4265 pinstance->host_toggle_bit[id] ^= 1u;
4266 }
4267
4268 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4269
4270 spin_lock_irqsave(&pinstance->pending_pool_lock,
4271 pending_lock_flags);
4272 list_del(&cmd->free_list);
4273 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
4274 pending_lock_flags);
4275 del_timer(&cmd->timer);
4276 atomic_dec(&pinstance->outstanding_cmds);
4277
4278 if (cmd->cmd_done == pmcraid_ioa_reset) {
4279 spin_lock_irqsave(pinstance->host->host_lock,
4280 host_lock_flags);
4281 cmd->cmd_done(cmd);
4282 spin_unlock_irqrestore(pinstance->host->host_lock,
4283 host_lock_flags);
4284 } else if (cmd->cmd_done != NULL) {
4285 cmd->cmd_done(cmd);
4286 }
4287 /* loop over until we are done with all responses */
4288 spin_lock_irqsave(lockp, hrrq_lock_flags);
4289 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4290 }
4291
4292 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4293}
4294
4295/**
4296 * pmcraid_unregister_interrupt_handler - de-register interrupts handlers
4297 * @pinstance: pointer to adapter instance structure
4298 *
4299 * This routine un-registers registered interrupt handler and
4300 * also frees irqs/vectors.
4301 *
4302 * Retun Value
4303 * None
4304 */
4305static
4306void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4307{
4308 free_irq(pinstance->pdev->irq, &(pinstance->hrrq_vector[0]));
4309}
4310
4311/**
4312 * pmcraid_register_interrupt_handler - registers interrupt handler
4313 * @pinstance: pointer to per-adapter instance structure
4314 *
4315 * Return Value
4316 * 0 on success, non-zero error code otherwise.
4317 */
4318static int
4319pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
4320{
4321 struct pci_dev *pdev = pinstance->pdev;
4322
4323 pinstance->hrrq_vector[0].hrrq_id = 0;
4324 pinstance->hrrq_vector[0].drv_inst = pinstance;
4325 pinstance->hrrq_vector[0].vector = 0;
4326 pinstance->num_hrrq = 1;
4327 return request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
4328 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
4329}
4330
4331/**
4332 * pmcraid_release_cmd_blocks - release buufers allocated for command blocks
4333 * @pinstance: per adapter instance structure pointer
4334 * @max_index: number of buffer blocks to release
4335 *
4336 * Return Value
4337 * None
4338 */
4339static void
4340pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
4341{
4342 int i;
4343 for (i = 0; i < max_index; i++) {
4344 kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
4345 pinstance->cmd_list[i] = NULL;
4346 }
4347 kmem_cache_destroy(pinstance->cmd_cachep);
4348 pinstance->cmd_cachep = NULL;
4349}
4350
4351/**
4352 * pmcraid_release_control_blocks - releases buffers alloced for control blocks
4353 * @pinstance: pointer to per adapter instance structure
4354 * @max_index: number of buffers (from 0 onwards) to release
4355 *
4356 * This function assumes that the command blocks for which control blocks are
4357 * linked are not released.
4358 *
4359 * Return Value
4360 * None
4361 */
4362static void
4363pmcraid_release_control_blocks(
4364 struct pmcraid_instance *pinstance,
4365 int max_index
4366)
4367{
4368 int i;
4369
4370 if (pinstance->control_pool == NULL)
4371 return;
4372
4373 for (i = 0; i < max_index; i++) {
4374 pci_pool_free(pinstance->control_pool,
4375 pinstance->cmd_list[i]->ioa_cb,
4376 pinstance->cmd_list[i]->ioa_cb_bus_addr);
4377 pinstance->cmd_list[i]->ioa_cb = NULL;
4378 pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
4379 }
4380 pci_pool_destroy(pinstance->control_pool);
4381 pinstance->control_pool = NULL;
4382}
4383
4384/**
4385 * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
4386 * @pinstance - pointer to per adapter instance structure
4387 *
4388 * Allocates memory for command blocks using kernel slab allocator.
4389 *
4390 * Return Value
4391 * 0 in case of success; -ENOMEM in case of failure
4392 */
4393static int __devinit
4394pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
4395{
4396 int i;
4397
4398 sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
4399 pinstance->host->unique_id);
4400
4401
4402 pinstance->cmd_cachep = kmem_cache_create(
4403 pinstance->cmd_pool_name,
4404 sizeof(struct pmcraid_cmd), 0,
4405 SLAB_HWCACHE_ALIGN, NULL);
4406 if (!pinstance->cmd_cachep)
4407 return -ENOMEM;
4408
4409 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4410 pinstance->cmd_list[i] =
4411 kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
4412 if (!pinstance->cmd_list[i]) {
4413 pmcraid_release_cmd_blocks(pinstance, i);
4414 return -ENOMEM;
4415 }
4416 }
4417 return 0;
4418}
4419
4420/**
4421 * pmcraid_allocate_control_blocks - allocates memory control blocks
4422 * @pinstance : pointer to per adapter instance structure
4423 *
4424 * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
4425 * and IOASAs. This is called after command blocks are already allocated.
4426 *
4427 * Return Value
4428 * 0 in case it can allocate all control blocks, otherwise -ENOMEM
4429 */
4430static int __devinit
4431pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
4432{
4433 int i;
4434
4435 sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
4436 pinstance->host->unique_id);
4437
4438 pinstance->control_pool =
4439 pci_pool_create(pinstance->ctl_pool_name,
4440 pinstance->pdev,
4441 sizeof(struct pmcraid_control_block),
4442 PMCRAID_IOARCB_ALIGNMENT, 0);
4443
4444 if (!pinstance->control_pool)
4445 return -ENOMEM;
4446
4447 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4448 pinstance->cmd_list[i]->ioa_cb =
4449 pci_pool_alloc(
4450 pinstance->control_pool,
4451 GFP_KERNEL,
4452 &(pinstance->cmd_list[i]->ioa_cb_bus_addr));
4453
4454 if (!pinstance->cmd_list[i]->ioa_cb) {
4455 pmcraid_release_control_blocks(pinstance, i);
4456 return -ENOMEM;
4457 }
4458 memset(pinstance->cmd_list[i]->ioa_cb, 0,
4459 sizeof(struct pmcraid_control_block));
4460 }
4461 return 0;
4462}
4463
4464/**
4465 * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
4466 * @pinstance: pointer to per adapter instance structure
4467 * @maxindex: size of hrrq buffer pointer array
4468 *
4469 * Return Value
4470 * None
4471 */
4472static void
4473pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
4474{
4475 int i;
4476 for (i = 0; i < maxindex; i++) {
4477
4478 pci_free_consistent(pinstance->pdev,
4479 HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
4480 pinstance->hrrq_start[i],
4481 pinstance->hrrq_start_bus_addr[i]);
4482
4483 /* reset pointers and toggle bit to zeros */
4484 pinstance->hrrq_start[i] = NULL;
4485 pinstance->hrrq_start_bus_addr[i] = 0;
4486 pinstance->host_toggle_bit[i] = 0;
4487 }
4488}
4489
4490/**
4491 * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
4492 * @pinstance: pointer to per adapter instance structure
4493 *
4494 * Return value
4495 * 0 hrrq buffers are allocated, -ENOMEM otherwise.
4496 */
4497static int __devinit
4498pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4499{
4500 int i;
4501 int buf_count = PMCRAID_MAX_CMD / pinstance->num_hrrq;
4502
4503 for (i = 0; i < pinstance->num_hrrq; i++) {
4504 int buffer_size = HRRQ_ENTRY_SIZE * buf_count;
4505
4506 pinstance->hrrq_start[i] =
4507 pci_alloc_consistent(
4508 pinstance->pdev,
4509 buffer_size,
4510 &(pinstance->hrrq_start_bus_addr[i]));
4511
4512 if (pinstance->hrrq_start[i] == 0) {
4513 pmcraid_err("could not allocate host rrq: %d\n", i);
4514 pmcraid_release_host_rrqs(pinstance, i);
4515 return -ENOMEM;
4516 }
4517
4518 memset(pinstance->hrrq_start[i], 0, buffer_size);
4519 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4520 pinstance->hrrq_end[i] =
4521 pinstance->hrrq_start[i] + buf_count - 1;
4522 pinstance->host_toggle_bit[i] = 1;
4523 spin_lock_init(&pinstance->hrrq_lock[i]);
4524 }
4525 return 0;
4526}
4527
4528/**
4529 * pmcraid_release_hcams - release HCAM buffers
4530 *
4531 * @pinstance: pointer to per adapter instance structure
4532 *
4533 * Return value
4534 * none
4535 */
4536static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4537{
4538 if (pinstance->ccn.msg != NULL) {
4539 pci_free_consistent(pinstance->pdev,
4540 PMCRAID_AEN_HDR_SIZE +
4541 sizeof(struct pmcraid_hcam_ccn),
4542 pinstance->ccn.msg,
4543 pinstance->ccn.baddr);
4544
4545 pinstance->ccn.msg = NULL;
4546 pinstance->ccn.hcam = NULL;
4547 pinstance->ccn.baddr = 0;
4548 }
4549
4550 if (pinstance->ldn.msg != NULL) {
4551 pci_free_consistent(pinstance->pdev,
4552 PMCRAID_AEN_HDR_SIZE +
4553 sizeof(struct pmcraid_hcam_ldn),
4554 pinstance->ldn.msg,
4555 pinstance->ldn.baddr);
4556
4557 pinstance->ldn.msg = NULL;
4558 pinstance->ldn.hcam = NULL;
4559 pinstance->ldn.baddr = 0;
4560 }
4561}
4562
4563/**
4564 * pmcraid_allocate_hcams - allocates HCAM buffers
4565 * @pinstance : pointer to per adapter instance structure
4566 *
4567 * Return Value:
4568 * 0 in case of successful allocation, non-zero otherwise
4569 */
4570static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
4571{
4572 pinstance->ccn.msg = pci_alloc_consistent(
4573 pinstance->pdev,
4574 PMCRAID_AEN_HDR_SIZE +
4575 sizeof(struct pmcraid_hcam_ccn),
4576 &(pinstance->ccn.baddr));
4577
4578 pinstance->ldn.msg = pci_alloc_consistent(
4579 pinstance->pdev,
4580 PMCRAID_AEN_HDR_SIZE +
4581 sizeof(struct pmcraid_hcam_ldn),
4582 &(pinstance->ldn.baddr));
4583
4584 if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
4585 pmcraid_release_hcams(pinstance);
4586 } else {
4587 pinstance->ccn.hcam =
4588 (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
4589 pinstance->ldn.hcam =
4590 (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
4591
4592 atomic_set(&pinstance->ccn.ignore, 0);
4593 atomic_set(&pinstance->ldn.ignore, 0);
4594 }
4595
4596 return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
4597}
4598
4599/**
4600 * pmcraid_release_config_buffers - release config.table buffers
4601 * @pinstance: pointer to per adapter instance structure
4602 *
4603 * Return Value
4604 * none
4605 */
4606static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
4607{
4608 if (pinstance->cfg_table != NULL &&
4609 pinstance->cfg_table_bus_addr != 0) {
4610 pci_free_consistent(pinstance->pdev,
4611 sizeof(struct pmcraid_config_table),
4612 pinstance->cfg_table,
4613 pinstance->cfg_table_bus_addr);
4614 pinstance->cfg_table = NULL;
4615 pinstance->cfg_table_bus_addr = 0;
4616 }
4617
4618 if (pinstance->res_entries != NULL) {
4619 int i;
4620
4621 for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4622 list_del(&pinstance->res_entries[i].queue);
4623 kfree(pinstance->res_entries);
4624 pinstance->res_entries = NULL;
4625 }
4626
4627 pmcraid_release_hcams(pinstance);
4628}
4629
4630/**
4631 * pmcraid_allocate_config_buffers - allocates DMAable memory for config table
4632 * @pinstance : pointer to per adapter instance structure
4633 *
4634 * Return Value
4635 * 0 for successful allocation, -ENOMEM for any failure
4636 */
4637static int __devinit
4638pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
4639{
4640 int i;
4641
4642 pinstance->res_entries =
4643 kzalloc(sizeof(struct pmcraid_resource_entry) *
4644 PMCRAID_MAX_RESOURCES, GFP_KERNEL);
4645
4646 if (NULL == pinstance->res_entries) {
4647 pmcraid_err("failed to allocate memory for resource table\n");
4648 return -ENOMEM;
4649 }
4650
4651 for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4652 list_add_tail(&pinstance->res_entries[i].queue,
4653 &pinstance->free_res_q);
4654
4655 pinstance->cfg_table =
4656 pci_alloc_consistent(pinstance->pdev,
4657 sizeof(struct pmcraid_config_table),
4658 &pinstance->cfg_table_bus_addr);
4659
4660 if (NULL == pinstance->cfg_table) {
4661 pmcraid_err("couldn't alloc DMA memory for config table\n");
4662 pmcraid_release_config_buffers(pinstance);
4663 return -ENOMEM;
4664 }
4665
4666 if (pmcraid_allocate_hcams(pinstance)) {
4667 pmcraid_err("could not alloc DMA memory for HCAMS\n");
4668 pmcraid_release_config_buffers(pinstance);
4669 return -ENOMEM;
4670 }
4671
4672 return 0;
4673}
4674
4675/**
4676 * pmcraid_init_tasklets - registers tasklets for response handling
4677 *
4678 * @pinstance: pointer adapter instance structure
4679 *
4680 * Return value
4681 * none
4682 */
4683static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
4684{
4685 int i;
4686 for (i = 0; i < pinstance->num_hrrq; i++)
4687 tasklet_init(&pinstance->isr_tasklet[i],
4688 pmcraid_tasklet_function,
4689 (unsigned long)&pinstance->hrrq_vector[i]);
4690}
4691
4692/**
4693 * pmcraid_kill_tasklets - destroys tasklets registered for response handling
4694 *
4695 * @pinstance: pointer to adapter instance structure
4696 *
4697 * Return value
4698 * none
4699 */
4700static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
4701{
4702 int i;
4703 for (i = 0; i < pinstance->num_hrrq; i++)
4704 tasklet_kill(&pinstance->isr_tasklet[i]);
4705}
4706
4707/**
4708 * pmcraid_init_buffers - allocates memory and initializes various structures
4709 * @pinstance: pointer to per adapter instance structure
4710 *
4711 * This routine pre-allocates memory based on the type of block as below:
4712 * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
4713 * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
4714 * config-table entries : DMAable memory using pci_alloc_consistent
4715 * HostRRQs : DMAable memory, using pci_alloc_consistent
4716 *
4717 * Return Value
4718 * 0 in case all of the blocks are allocated, -ENOMEM otherwise.
4719 */
4720static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
4721{
4722 int i;
4723
4724 if (pmcraid_allocate_host_rrqs(pinstance)) {
4725 pmcraid_err("couldn't allocate memory for %d host rrqs\n",
4726 pinstance->num_hrrq);
4727 return -ENOMEM;
4728 }
4729
4730 if (pmcraid_allocate_config_buffers(pinstance)) {
4731 pmcraid_err("couldn't allocate memory for config buffers\n");
4732 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4733 return -ENOMEM;
4734 }
4735
4736 if (pmcraid_allocate_cmd_blocks(pinstance)) {
4737 pmcraid_err("couldn't allocate memory for cmd blocks \n");
4738 pmcraid_release_config_buffers(pinstance);
4739 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4740 return -ENOMEM;
4741 }
4742
4743 if (pmcraid_allocate_control_blocks(pinstance)) {
4744 pmcraid_err("couldn't allocate memory control blocks \n");
4745 pmcraid_release_config_buffers(pinstance);
4746 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4747 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4748 return -ENOMEM;
4749 }
4750
4751 /* Initialize all the command blocks and add them to free pool. No
4752 * need to lock (free_pool_lock) as this is done in initialization
4753 * itself
4754 */
4755 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4756 struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
4757 pmcraid_init_cmdblk(cmdp, i);
4758 cmdp->drv_inst = pinstance;
4759 list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
4760 }
4761
4762 return 0;
4763}
4764
4765/**
4766 * pmcraid_reinit_buffers - resets various buffer pointers
4767 * @pinstance: pointer to adapter instance
4768 * Return value
4769 * none
4770 */
4771static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
4772{
4773 int i;
4774 int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
4775
4776 for (i = 0; i < pinstance->num_hrrq; i++) {
4777 memset(pinstance->hrrq_start[i], 0, buffer_size);
4778 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4779 pinstance->hrrq_end[i] =
4780 pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
4781 pinstance->host_toggle_bit[i] = 1;
4782 }
4783}
4784
4785/**
4786 * pmcraid_init_instance - initialize per instance data structure
4787 * @pdev: pointer to pci device structure
4788 * @host: pointer to Scsi_Host structure
4789 * @mapped_pci_addr: memory mapped IOA configuration registers
4790 *
4791 * Return Value
4792 * 0 on success, non-zero in case of any failure
4793 */
4794static int __devinit pmcraid_init_instance(
4795 struct pci_dev *pdev,
4796 struct Scsi_Host *host,
4797 void __iomem *mapped_pci_addr
4798)
4799{
4800 struct pmcraid_instance *pinstance =
4801 (struct pmcraid_instance *)host->hostdata;
4802
4803 pinstance->host = host;
4804 pinstance->pdev = pdev;
4805
4806 /* Initialize register addresses */
4807 pinstance->mapped_dma_addr = mapped_pci_addr;
4808
4809 /* Initialize chip-specific details */
4810 {
4811 struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
4812 struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
4813
4814 pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
4815
4816 pint_regs->ioa_host_interrupt_reg =
4817 mapped_pci_addr + chip_cfg->ioa_host_intr;
4818 pint_regs->ioa_host_interrupt_clr_reg =
4819 mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
4820 pint_regs->host_ioa_interrupt_reg =
4821 mapped_pci_addr + chip_cfg->host_ioa_intr;
4822 pint_regs->host_ioa_interrupt_clr_reg =
4823 mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
4824
4825 /* Current version of firmware exposes interrupt mask set
4826 * and mask clr registers through memory mapped bar0.
4827 */
4828 pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
4829 pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
4830 pint_regs->ioa_host_interrupt_mask_reg =
4831 mapped_pci_addr + chip_cfg->ioa_host_mask;
4832 pint_regs->ioa_host_interrupt_mask_clr_reg =
4833 mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
4834 pint_regs->global_interrupt_mask_reg =
4835 mapped_pci_addr + chip_cfg->global_intr_mask;
4836 };
4837
4838 pinstance->ioa_reset_attempts = 0;
4839 init_waitqueue_head(&pinstance->reset_wait_q);
4840
4841 atomic_set(&pinstance->outstanding_cmds, 0);
4842 atomic_set(&pinstance->expose_resources, 0);
4843
4844 INIT_LIST_HEAD(&pinstance->free_res_q);
4845 INIT_LIST_HEAD(&pinstance->used_res_q);
4846 INIT_LIST_HEAD(&pinstance->free_cmd_pool);
4847 INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
4848
4849 spin_lock_init(&pinstance->free_pool_lock);
4850 spin_lock_init(&pinstance->pending_pool_lock);
4851 spin_lock_init(&pinstance->resource_lock);
4852 mutex_init(&pinstance->aen_queue_lock);
4853
4854 /* Work-queue (Shared) for deferred processing error handling */
4855 INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
4856
4857 /* Initialize the default log_level */
4858 pinstance->current_log_level = pmcraid_log_level;
4859
4860 /* Setup variables required for reset engine */
4861 pinstance->ioa_state = IOA_STATE_UNKNOWN;
4862 pinstance->reset_cmd = NULL;
4863 return 0;
4864}
4865
4866/**
4867 * pmcraid_release_buffers - release per-adapter buffers allocated
4868 *
4869 * @pinstance: pointer to adapter soft state
4870 *
4871 * Return Value
4872 * none
4873 */
4874static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4875{
4876 pmcraid_release_config_buffers(pinstance);
4877 pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
4878 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4879 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4880
4881}
4882
4883/**
4884 * pmcraid_shutdown - shutdown adapter controller.
4885 * @pdev: pci device struct
4886 *
4887 * Issues an adapter shutdown to the card waits for its completion
4888 *
4889 * Return value
4890 * none
4891 */
4892static void pmcraid_shutdown(struct pci_dev *pdev)
4893{
4894 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
4895 pmcraid_reset_bringdown(pinstance);
4896}
4897
4898
4899/**
4900 * pmcraid_get_minor - returns unused minor number from minor number bitmap
4901 */
4902static unsigned short pmcraid_get_minor(void)
4903{
4904 int minor;
4905
4906 minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
4907 __set_bit(minor, pmcraid_minor);
4908 return minor;
4909}
4910
4911/**
4912 * pmcraid_release_minor - releases given minor back to minor number bitmap
4913 */
4914static void pmcraid_release_minor(unsigned short minor)
4915{
4916 __clear_bit(minor, pmcraid_minor);
4917}
4918
4919/**
4920 * pmcraid_setup_chrdev - allocates a minor number and registers a char device
4921 *
4922 * @pinstance: pointer to adapter instance for which to register device
4923 *
4924 * Return value
4925 * 0 in case of success, otherwise non-zero
4926 */
4927static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
4928{
4929 int minor;
4930 int error;
4931
4932 minor = pmcraid_get_minor();
4933 cdev_init(&pinstance->cdev, &pmcraid_fops);
4934 pinstance->cdev.owner = THIS_MODULE;
4935
4936 error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
4937
4938 if (error)
4939 pmcraid_release_minor(minor);
4940 else
4941 device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
4942 NULL, "pmcsas%u", minor);
4943 return error;
4944}
4945
4946/**
4947 * pmcraid_release_chrdev - unregisters per-adapter management interface
4948 *
4949 * @pinstance: pointer to adapter instance structure
4950 *
4951 * Return value
4952 * none
4953 */
4954static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
4955{
4956 pmcraid_release_minor(MINOR(pinstance->cdev.dev));
4957 device_destroy(pmcraid_class,
4958 MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
4959 cdev_del(&pinstance->cdev);
4960}
4961
4962/**
4963 * pmcraid_remove - IOA hot plug remove entry point
4964 * @pdev: pci device struct
4965 *
4966 * Return value
4967 * none
4968 */
4969static void __devexit pmcraid_remove(struct pci_dev *pdev)
4970{
4971 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
4972
4973 /* remove the management interface (/dev file) for this device */
4974 pmcraid_release_chrdev(pinstance);
4975
4976 /* remove host template from scsi midlayer */
4977 scsi_remove_host(pinstance->host);
4978
4979 /* block requests from mid-layer */
4980 scsi_block_requests(pinstance->host);
4981
4982 /* initiate shutdown adapter */
4983 pmcraid_shutdown(pdev);
4984
4985 pmcraid_disable_interrupts(pinstance, ~0);
4986 flush_scheduled_work();
4987
4988 pmcraid_kill_tasklets(pinstance);
4989 pmcraid_unregister_interrupt_handler(pinstance);
4990 pmcraid_release_buffers(pinstance);
4991 iounmap(pinstance->mapped_dma_addr);
4992 pci_release_regions(pdev);
4993 scsi_host_put(pinstance->host);
4994 pci_disable_device(pdev);
4995
4996 return;
4997}
4998
4999#ifdef CONFIG_PM
5000/**
5001 * pmcraid_suspend - driver suspend entry point for power management
5002 * @pdev: PCI device structure
5003 * @state: PCI power state to suspend routine
5004 *
5005 * Return Value - 0 always
5006 */
5007static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
5008{
5009 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5010
5011 pmcraid_shutdown(pdev);
5012 pmcraid_disable_interrupts(pinstance, ~0);
5013 pmcraid_kill_tasklets(pinstance);
5014 pci_set_drvdata(pinstance->pdev, pinstance);
5015 pmcraid_unregister_interrupt_handler(pinstance);
5016 pci_save_state(pdev);
5017 pci_disable_device(pdev);
5018 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5019
5020 return 0;
5021}
5022
5023/**
5024 * pmcraid_resume - driver resume entry point PCI power management
5025 * @pdev: PCI device structure
5026 *
5027 * Return Value - 0 in case of success. Error code in case of any failure
5028 */
5029static int pmcraid_resume(struct pci_dev *pdev)
5030{
5031 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5032 struct Scsi_Host *host = pinstance->host;
5033 int rc;
5034 int hrrqs;
5035
5036 pci_set_power_state(pdev, PCI_D0);
5037 pci_enable_wake(pdev, PCI_D0, 0);
5038 pci_restore_state(pdev);
5039
5040 rc = pci_enable_device(pdev);
5041
5042 if (rc) {
5043 pmcraid_err("pmcraid: Enable device failed\n");
5044 return rc;
5045 }
5046
5047 pci_set_master(pdev);
5048
5049 if ((sizeof(dma_addr_t) == 4) ||
5050 pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5051 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5052
5053 if (rc == 0)
5054 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5055
5056 if (rc != 0) {
5057 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5058 goto disable_device;
5059 }
5060
5061 atomic_set(&pinstance->outstanding_cmds, 0);
5062 hrrqs = pinstance->num_hrrq;
5063 rc = pmcraid_register_interrupt_handler(pinstance);
5064
5065 if (rc) {
5066 pmcraid_err("resume: couldn't register interrupt handlers\n");
5067 rc = -ENODEV;
5068 goto release_host;
5069 }
5070
5071 pmcraid_init_tasklets(pinstance);
5072 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5073
5074 /* Start with hard reset sequence which brings up IOA to operational
5075 * state as well as completes the reset sequence.
5076 */
5077 pinstance->ioa_hard_reset = 1;
5078
5079 /* Start IOA firmware initialization and bring card to Operational
5080 * state.
5081 */
5082 if (pmcraid_reset_bringup(pinstance)) {
5083 pmcraid_err("couldn't initialize IOA \n");
5084 rc = -ENODEV;
5085 goto release_tasklets;
5086 }
5087
5088 return 0;
5089
5090release_tasklets:
5091 pmcraid_kill_tasklets(pinstance);
5092 pmcraid_unregister_interrupt_handler(pinstance);
5093
5094release_host:
5095 scsi_host_put(host);
5096
5097disable_device:
5098 pci_disable_device(pdev);
5099
5100 return rc;
5101}
5102
5103#else
5104
5105#define pmcraid_suspend NULL
5106#define pmcraid_resume NULL
5107
5108#endif /* CONFIG_PM */
5109
5110/**
5111 * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
5112 * completion of the ioa reset
5113 * @cmd: pointer to reset command block
5114 */
5115static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
5116{
5117 struct pmcraid_instance *pinstance = cmd->drv_inst;
5118 unsigned long flags;
5119
5120 spin_lock_irqsave(pinstance->host->host_lock, flags);
5121 pmcraid_ioa_reset(cmd);
5122 spin_unlock_irqrestore(pinstance->host->host_lock, flags);
5123 scsi_unblock_requests(pinstance->host);
5124 schedule_work(&pinstance->worker_q);
5125}
5126
5127/**
5128 * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
5129 *
5130 * @cmd: pointer to pmcraid_cmd structure
5131 *
5132 * Return Value
5133 * 0 for success or non-zero for failure cases
5134 */
5135static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
5136{
5137 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5138 void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
5139
5140 pmcraid_reinit_cmdblk(cmd);
5141
5142 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5143 ioarcb->request_type = REQ_TYPE_IOACMD;
5144 ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
5145 ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
5146
5147 /* If this was called as part of resource table reinitialization due to
5148 * lost CCN, it is enough to return the command block back to free pool
5149 * as part of set_supported_devs completion function.
5150 */
5151 if (cmd->drv_inst->reinit_cfg_table) {
5152 cmd->drv_inst->reinit_cfg_table = 0;
5153 cmd->release = 1;
5154 cmd_done = pmcraid_reinit_cfgtable_done;
5155 }
5156
5157 /* we will be done with the reset sequence after set supported devices,
5158 * setup the done function to return the command block back to free
5159 * pool
5160 */
5161 pmcraid_send_cmd(cmd,
5162 cmd_done,
5163 PMCRAID_SET_SUP_DEV_TIMEOUT,
5164 pmcraid_timeout_handler);
5165 return;
5166}
5167
5168/**
5169 * pmcraid_init_res_table - Initialize the resource table
5170 * @cmd: pointer to pmcraid command struct
5171 *
5172 * This function looks through the existing resource table, comparing
5173 * it with the config table. This function will take care of old/new
5174 * devices and schedule adding/removing them from the mid-layer
5175 * as appropriate.
5176 *
5177 * Return value
5178 * None
5179 */
5180static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5181{
5182 struct pmcraid_instance *pinstance = cmd->drv_inst;
5183 struct pmcraid_resource_entry *res, *temp;
5184 struct pmcraid_config_table_entry *cfgte;
5185 unsigned long lock_flags;
5186 int found, rc, i;
5187 LIST_HEAD(old_res);
5188
5189 if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
5190 dev_err(&pinstance->pdev->dev, "Require microcode download\n");
5191
5192 /* resource list is protected by pinstance->resource_lock.
5193 * init_res_table can be called from probe (user-thread) or runtime
5194 * reset (timer/tasklet)
5195 */
5196 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
5197
5198 list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
5199 list_move_tail(&res->queue, &old_res);
5200
5201 for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
5202 cfgte = &pinstance->cfg_table->entries[i];
5203
5204 if (!pmcraid_expose_resource(cfgte))
5205 continue;
5206
5207 found = 0;
5208
5209 /* If this entry was already detected and initialized */
5210 list_for_each_entry_safe(res, temp, &old_res, queue) {
5211
5212 rc = memcmp(&res->cfg_entry.resource_address,
5213 &cfgte->resource_address,
5214 sizeof(cfgte->resource_address));
5215 if (!rc) {
5216 list_move_tail(&res->queue,
5217 &pinstance->used_res_q);
5218 found = 1;
5219 break;
5220 }
5221 }
5222
5223 /* If this is new entry, initialize it and add it the queue */
5224 if (!found) {
5225
5226 if (list_empty(&pinstance->free_res_q)) {
5227 dev_err(&pinstance->pdev->dev,
5228 "Too many devices attached\n");
5229 break;
5230 }
5231
5232 found = 1;
5233 res = list_entry(pinstance->free_res_q.next,
5234 struct pmcraid_resource_entry, queue);
5235
5236 res->scsi_dev = NULL;
5237 res->change_detected = RES_CHANGE_ADD;
5238 res->reset_progress = 0;
5239 list_move_tail(&res->queue, &pinstance->used_res_q);
5240 }
5241
5242 /* copy new configuration table entry details into driver
5243 * maintained resource entry
5244 */
5245 if (found) {
5246 memcpy(&res->cfg_entry, cfgte,
5247 sizeof(struct pmcraid_config_table_entry));
5248 pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
5249 res->cfg_entry.resource_type,
5250 res->cfg_entry.unique_flags1,
5251 le32_to_cpu(res->cfg_entry.resource_address));
5252 }
5253 }
5254
5255 /* Detect any deleted entries, mark them for deletion from mid-layer */
5256 list_for_each_entry_safe(res, temp, &old_res, queue) {
5257
5258 if (res->scsi_dev) {
5259 res->change_detected = RES_CHANGE_DEL;
5260 res->cfg_entry.resource_handle =
5261 PMCRAID_INVALID_RES_HANDLE;
5262 list_move_tail(&res->queue, &pinstance->used_res_q);
5263 } else {
5264 list_move_tail(&res->queue, &pinstance->free_res_q);
5265 }
5266 }
5267
5268 /* release the resource list lock */
5269 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
5270 pmcraid_set_supported_devs(cmd);
5271}
5272
5273/**
5274 * pmcraid_querycfg - Send a Query IOA Config to the adapter.
5275 * @cmd: pointer pmcraid_cmd struct
5276 *
5277 * This function sends a Query IOA Configuration command to the adapter to
5278 * retrieve the IOA configuration table.
5279 *
5280 * Return value:
5281 * none
5282 */
5283static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5284{
5285 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5286 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
5287 struct pmcraid_instance *pinstance = cmd->drv_inst;
5288 int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
5289
5290 ioarcb->request_type = REQ_TYPE_IOACMD;
5291 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5292
5293 ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
5294
5295 /* firmware requires 4-byte length field, specified in B.E format */
5296 memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
5297
5298 /* Since entire config table can be described by single IOADL, it can
5299 * be part of IOARCB itself
5300 */
5301 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
5302 offsetof(struct pmcraid_ioarcb,
5303 add_data.u.ioadl[0]));
5304 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
5305 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
5306
5307 ioarcb->request_flags0 |= NO_LINK_DESCS;
5308 ioarcb->data_transfer_length =
5309 cpu_to_le32(sizeof(struct pmcraid_config_table));
5310
5311 ioadl = &(ioarcb->add_data.u.ioadl[0]);
5312 ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
5313 ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
5314 ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
5315
5316 pmcraid_send_cmd(cmd, pmcraid_init_res_table,
5317 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5318}
5319
5320
5321/**
5322 * pmcraid_probe - PCI probe entry pointer for PMC MaxRaid controller driver
5323 * @pdev: pointer to pci device structure
5324 * @dev_id: pointer to device ids structure
5325 *
5326 * Return Value
5327 * returns 0 if the device is claimed and successfully configured.
5328 * returns non-zero error code in case of any failure
5329 */
5330static int __devinit pmcraid_probe(
5331 struct pci_dev *pdev,
5332 const struct pci_device_id *dev_id
5333)
5334{
5335 struct pmcraid_instance *pinstance;
5336 struct Scsi_Host *host;
5337 void __iomem *mapped_pci_addr;
5338 int rc = PCIBIOS_SUCCESSFUL;
5339
5340 if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
5341 pmcraid_err
5342 ("maximum number(%d) of supported adapters reached\n",
5343 atomic_read(&pmcraid_adapter_count));
5344 return -ENOMEM;
5345 }
5346
5347 atomic_inc(&pmcraid_adapter_count);
5348 rc = pci_enable_device(pdev);
5349
5350 if (rc) {
5351 dev_err(&pdev->dev, "Cannot enable adapter\n");
5352 atomic_dec(&pmcraid_adapter_count);
5353 return rc;
5354 }
5355
5356 dev_info(&pdev->dev,
5357 "Found new IOA(%x:%x), Total IOA count: %d\n",
5358 pdev->vendor, pdev->device,
5359 atomic_read(&pmcraid_adapter_count));
5360
5361 rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
5362
5363 if (rc < 0) {
5364 dev_err(&pdev->dev,
5365 "Couldn't register memory range of registers\n");
5366 goto out_disable_device;
5367 }
5368
5369 mapped_pci_addr = pci_iomap(pdev, 0, 0);
5370
5371 if (!mapped_pci_addr) {
5372 dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
5373 rc = -ENOMEM;
5374 goto out_release_regions;
5375 }
5376
5377 pci_set_master(pdev);
5378
5379 /* Firmware requires the system bus address of IOARCB to be within
5380 * 32-bit addressable range though it has 64-bit IOARRIN register.
5381 * However, firmware supports 64-bit streaming DMA buffers, whereas
5382 * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
5383 * returns memory within 4GB (if not, change this logic), coherent
5384 * buffers are within firmware acceptible address ranges.
5385 */
5386 if ((sizeof(dma_addr_t) == 4) ||
5387 pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5388 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5389
5390 /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
5391 * bit mask for pci_alloc_consistent to return addresses within 4GB
5392 */
5393 if (rc == 0)
5394 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5395
5396 if (rc != 0) {
5397 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5398 goto cleanup_nomem;
5399 }
5400
5401 host = scsi_host_alloc(&pmcraid_host_template,
5402 sizeof(struct pmcraid_instance));
5403
5404 if (!host) {
5405 dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
5406 rc = -ENOMEM;
5407 goto cleanup_nomem;
5408 }
5409
5410 host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
5411 host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
5412 host->unique_id = host->host_no;
5413 host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
5414 host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
5415
5416 /* zero out entire instance structure */
5417 pinstance = (struct pmcraid_instance *)host->hostdata;
5418 memset(pinstance, 0, sizeof(*pinstance));
5419
5420 pinstance->chip_cfg =
5421 (struct pmcraid_chip_details *)(dev_id->driver_data);
5422
5423 rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
5424
5425 if (rc < 0) {
5426 dev_err(&pdev->dev, "failed to initialize adapter instance\n");
5427 goto out_scsi_host_put;
5428 }
5429
5430 pci_set_drvdata(pdev, pinstance);
5431
5432 /* Save PCI config-space for use following the reset */
5433 rc = pci_save_state(pinstance->pdev);
5434
5435 if (rc != 0) {
5436 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5437 goto out_scsi_host_put;
5438 }
5439
5440 pmcraid_disable_interrupts(pinstance, ~0);
5441
5442 rc = pmcraid_register_interrupt_handler(pinstance);
5443
5444 if (rc) {
5445 pmcraid_err("couldn't register interrupt handler\n");
5446 goto out_scsi_host_put;
5447 }
5448
5449 pmcraid_init_tasklets(pinstance);
5450
5451 /* allocate verious buffers used by LLD.*/
5452 rc = pmcraid_init_buffers(pinstance);
5453
5454 if (rc) {
5455 pmcraid_err("couldn't allocate memory blocks\n");
5456 goto out_unregister_isr;
5457 }
5458
5459 /* check the reset type required */
5460 pmcraid_reset_type(pinstance);
5461
5462 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5463
5464 /* Start IOA firmware initialization and bring card to Operational
5465 * state.
5466 */
5467 pmcraid_info("starting IOA initialization sequence\n");
5468 if (pmcraid_reset_bringup(pinstance)) {
5469 pmcraid_err("couldn't initialize IOA \n");
5470 rc = 1;
5471 goto out_release_bufs;
5472 }
5473
5474 /* Add adapter instance into mid-layer list */
5475 rc = scsi_add_host(pinstance->host, &pdev->dev);
5476 if (rc != 0) {
5477 pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
5478 goto out_release_bufs;
5479 }
5480
5481 scsi_scan_host(pinstance->host);
5482
5483 rc = pmcraid_setup_chrdev(pinstance);
5484
5485 if (rc != 0) {
5486 pmcraid_err("couldn't create mgmt interface, error: %x\n",
5487 rc);
5488 goto out_remove_host;
5489 }
5490
5491 /* Schedule worker thread to handle CCN and take care of adding and
5492 * removing devices to OS
5493 */
5494 atomic_set(&pinstance->expose_resources, 1);
5495 schedule_work(&pinstance->worker_q);
5496 return rc;
5497
5498out_remove_host:
5499 scsi_remove_host(host);
5500
5501out_release_bufs:
5502 pmcraid_release_buffers(pinstance);
5503
5504out_unregister_isr:
5505 pmcraid_kill_tasklets(pinstance);
5506 pmcraid_unregister_interrupt_handler(pinstance);
5507
5508out_scsi_host_put:
5509 scsi_host_put(host);
5510
5511cleanup_nomem:
5512 iounmap(mapped_pci_addr);
5513
5514out_release_regions:
5515 pci_release_regions(pdev);
5516
5517out_disable_device:
5518 atomic_dec(&pmcraid_adapter_count);
5519 pci_set_drvdata(pdev, NULL);
5520 pci_disable_device(pdev);
5521 return -ENODEV;
5522}
5523
5524/*
5525 * PCI driver structure of pcmraid driver
5526 */
5527static struct pci_driver pmcraid_driver = {
5528 .name = PMCRAID_DRIVER_NAME,
5529 .id_table = pmcraid_pci_table,
5530 .probe = pmcraid_probe,
5531 .remove = pmcraid_remove,
5532 .suspend = pmcraid_suspend,
5533 .resume = pmcraid_resume,
5534 .shutdown = pmcraid_shutdown
5535};
5536
5537
5538/**
5539 * pmcraid_init - module load entry point
5540 */
5541static int __init pmcraid_init(void)
5542{
5543 dev_t dev;
5544 int error;
5545
5546 pmcraid_info("%s Device Driver version: %s %s\n",
5547 PMCRAID_DRIVER_NAME,
5548 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
5549
5550 error = alloc_chrdev_region(&dev, 0,
5551 PMCRAID_MAX_ADAPTERS,
5552 PMCRAID_DEVFILE);
5553
5554 if (error) {
5555 pmcraid_err("failed to get a major number for adapters\n");
5556 goto out_init;
5557 }
5558
5559 pmcraid_major = MAJOR(dev);
5560 pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
5561
5562 if (IS_ERR(pmcraid_class)) {
5563 error = PTR_ERR(pmcraid_class);
5564 pmcraid_err("failed to register with with sysfs, error = %x\n",
5565 error);
5566 goto out_unreg_chrdev;
5567 }
5568
5569
5570 error = pmcraid_netlink_init();
5571
5572 if (error)
5573 goto out_unreg_chrdev;
5574
5575 error = pci_register_driver(&pmcraid_driver);
5576
5577 if (error == 0)
5578 goto out_init;
5579
5580 pmcraid_err("failed to register pmcraid driver, error = %x\n",
5581 error);
5582 class_destroy(pmcraid_class);
5583 pmcraid_netlink_release();
5584
5585out_unreg_chrdev:
5586 unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
5587out_init:
5588 return error;
5589}
5590
5591/**
5592 * pmcraid_exit - module unload entry point
5593 */
5594static void __exit pmcraid_exit(void)
5595{
5596 pmcraid_netlink_release();
5597 class_destroy(pmcraid_class);
5598 unregister_chrdev_region(MKDEV(pmcraid_major, 0),
5599 PMCRAID_MAX_ADAPTERS);
5600 pci_unregister_driver(&pmcraid_driver);
5601}
5602
5603module_init(pmcraid_init);
5604module_exit(pmcraid_exit);
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
new file mode 100644
index 000000000000..614b3a764fed
--- /dev/null
+++ b/drivers/scsi/pmcraid.h
@@ -0,0 +1,1029 @@
1/*
2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
3 *
4 * Copyright (C) 2008, 2009 PMC Sierra Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _PMCRAID_H
22#define _PMCRAID_H
23
24#include <linux/version.h>
25#include <linux/types.h>
26#include <linux/completion.h>
27#include <linux/list.h>
28#include <scsi/scsi.h>
29#include <linux/kref.h>
30#include <scsi/scsi_cmnd.h>
31#include <linux/cdev.h>
32#include <net/netlink.h>
33#include <net/genetlink.h>
34#include <linux/connector.h>
35/*
36 * Driver name : string representing the driver name
37 * Device file : /dev file to be used for management interfaces
38 * Driver version: version string in major_version.minor_version.patch format
39 * Driver date : date information in "Mon dd yyyy" format
40 */
41#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
42#define PMCRAID_DEVFILE "pmcsas"
43#define PMCRAID_DRIVER_VERSION "1.0.2"
44#define PMCRAID_DRIVER_DATE __DATE__
45
46/* Maximum number of adapters supported by current version of the driver */
47#define PMCRAID_MAX_ADAPTERS 1024
48
49/* Bit definitions as per firmware, bit position [0][1][2].....[31] */
50#define PMC_BIT8(n) (1 << (7-n))
51#define PMC_BIT16(n) (1 << (15-n))
52#define PMC_BIT32(n) (1 << (31-n))
53
54/* PMC PCI vendor ID and device ID values */
55#define PCI_VENDOR_ID_PMC 0x11F8
56#define PCI_DEVICE_ID_PMC_MAXRAID 0x5220
57
58/*
59 * MAX_CMD : maximum commands that can be outstanding with IOA
60 * MAX_IO_CMD : command blocks available for IO commands
61 * MAX_HCAM_CMD : command blocks avaibale for HCAMS
62 * MAX_INTERNAL_CMD : command blocks avaible for internal commands like reset
63 */
64#define PMCRAID_MAX_CMD 1024
65#define PMCRAID_MAX_IO_CMD 1020
66#define PMCRAID_MAX_HCAM_CMD 2
67#define PMCRAID_MAX_INTERNAL_CMD 2
68
69/* MAX_IOADLS : max number of scatter-gather lists supported by IOA
70 * IOADLS_INTERNAL : number of ioadls included as part of IOARCB.
71 * IOADLS_EXTERNAL : number of ioadls allocated external to IOARCB
72 */
73#define PMCRAID_IOADLS_INTERNAL 27
74#define PMCRAID_IOADLS_EXTERNAL 37
75#define PMCRAID_MAX_IOADLS PMCRAID_IOADLS_INTERNAL
76
77/* HRRQ_ENTRY_SIZE : size of hrrq buffer
78 * IOARCB_ALIGNMENT : alignment required for IOARCB
79 * IOADL_ALIGNMENT : alignment requirement for IOADLs
80 * MSIX_VECTORS : number of MSIX vectors supported
81 */
82#define HRRQ_ENTRY_SIZE sizeof(__le32)
83#define PMCRAID_IOARCB_ALIGNMENT 32
84#define PMCRAID_IOADL_ALIGNMENT 16
85#define PMCRAID_IOASA_ALIGNMENT 4
86#define PMCRAID_NUM_MSIX_VECTORS 1
87
88/* various other limits */
89#define PMCRAID_VENDOR_ID_LEN 8
90#define PMCRAID_PRODUCT_ID_LEN 16
91#define PMCRAID_SERIAL_NUM_LEN 8
92#define PMCRAID_LUN_LEN 8
93#define PMCRAID_MAX_CDB_LEN 16
94#define PMCRAID_DEVICE_ID_LEN 8
95#define PMCRAID_SENSE_DATA_LEN 256
96#define PMCRAID_ADD_CMD_PARAM_LEN 48
97
98#define PMCRAID_MAX_BUS_TO_SCAN 1
99#define PMCRAID_MAX_NUM_TARGETS_PER_BUS 256
100#define PMCRAID_MAX_NUM_LUNS_PER_TARGET 8
101
102/* IOA bus/target/lun number of IOA resources */
103#define PMCRAID_IOA_BUS_ID 0xfe
104#define PMCRAID_IOA_TARGET_ID 0xff
105#define PMCRAID_IOA_LUN_ID 0xff
106#define PMCRAID_VSET_BUS_ID 0x1
107#define PMCRAID_VSET_LUN_ID 0x0
108#define PMCRAID_PHYS_BUS_ID 0x0
109#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
110#define PMCRAID_MAX_VSET_TARGETS 240
111#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
112
113#define PMCRAID_IOA_MAX_SECTORS 32767
114#define PMCRAID_VSET_MAX_SECTORS 512
115#define PMCRAID_MAX_CMD_PER_LUN 254
116
117/* Number of configuration table entries (resources) */
118#define PMCRAID_MAX_NUM_OF_VSETS 240
119
120/* Todo : Check max limit for Phase 1 */
121#define PMCRAID_MAX_NUM_OF_PHY_DEVS 256
122
123/* MAX_NUM_OF_DEVS includes 1 FP, 1 Dummy Enclosure device */
124#define PMCRAID_MAX_NUM_OF_DEVS \
125 (PMCRAID_MAX_NUM_OF_VSETS + PMCRAID_MAX_NUM_OF_PHY_DEVS + 2)
126
127#define PMCRAID_MAX_RESOURCES PMCRAID_MAX_NUM_OF_DEVS
128
129/* Adapter Commands used by driver */
130#define PMCRAID_QUERY_RESOURCE_STATE 0xC2
131#define PMCRAID_RESET_DEVICE 0xC3
132/* options to select reset target */
133#define ENABLE_RESET_MODIFIER 0x80
134#define RESET_DEVICE_LUN 0x40
135#define RESET_DEVICE_TARGET 0x20
136#define RESET_DEVICE_BUS 0x10
137
138#define PMCRAID_IDENTIFY_HRRQ 0xC4
139#define PMCRAID_QUERY_IOA_CONFIG 0xC5
140#define PMCRAID_QUERY_CMD_STATUS 0xCB
141#define PMCRAID_ABORT_CMD 0xC7
142
143/* CANCEL ALL command, provides option for setting SYNC_COMPLETE
144 * on the target resources for which commands got cancelled
145 */
146#define PMCRAID_CANCEL_ALL_REQUESTS 0xCE
147#define PMCRAID_SYNC_COMPLETE_AFTER_CANCEL PMC_BIT8(0)
148
149/* HCAM command and types of HCAM supported by IOA */
150#define PMCRAID_HOST_CONTROLLED_ASYNC 0xCF
151#define PMCRAID_HCAM_CODE_CONFIG_CHANGE 0x01
152#define PMCRAID_HCAM_CODE_LOG_DATA 0x02
153
154/* IOA shutdown command and various shutdown types */
155#define PMCRAID_IOA_SHUTDOWN 0xF7
156#define PMCRAID_SHUTDOWN_NORMAL 0x00
157#define PMCRAID_SHUTDOWN_PREPARE_FOR_NORMAL 0x40
158#define PMCRAID_SHUTDOWN_NONE 0x100
159#define PMCRAID_SHUTDOWN_ABBREV 0x80
160
161/* SET SUPPORTED DEVICES command and the option to select all the
162 * devices to be supported
163 */
164#define PMCRAID_SET_SUPPORTED_DEVICES 0xFB
165#define ALL_DEVICES_SUPPORTED PMC_BIT8(0)
166
167/* This option is used with SCSI WRITE_BUFFER command */
168#define PMCRAID_WR_BUF_DOWNLOAD_AND_SAVE 0x05
169
170/* IOASC Codes used by driver */
171#define PMCRAID_IOASC_SENSE_MASK 0xFFFFFF00
172#define PMCRAID_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
173#define PMCRAID_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
174#define PMCRAID_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8)
175#define PMCRAID_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff)
176
177#define PMCRAID_IOASC_GOOD_COMPLETION 0x00000000
178#define PMCRAID_IOASC_NR_INIT_CMD_REQUIRED 0x02040200
179#define PMCRAID_IOASC_NR_IOA_RESET_REQUIRED 0x02048000
180#define PMCRAID_IOASC_NR_SYNC_REQUIRED 0x023F0000
181#define PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC 0x03110C00
182#define PMCRAID_IOASC_HW_CANNOT_COMMUNICATE 0x04050000
183#define PMCRAID_IOASC_HW_DEVICE_TIMEOUT 0x04080100
184#define PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR 0x04448500
185#define PMCRAID_IOASC_HW_IOA_RESET_REQUIRED 0x04448600
186#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000
187#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000
188#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000
189#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000
190
191/* Driver defined IOASCs */
192#define PMCRAID_IOASC_IOA_WAS_RESET 0x10000001
193#define PMCRAID_IOASC_PCI_ACCESS_ERROR 0x10000002
194
195/* Various timeout values (in milliseconds) used. If any of these are chip
196 * specific, move them to pmcraid_chip_details structure.
197 */
198#define PMCRAID_PCI_DEASSERT_TIMEOUT 2000
199#define PMCRAID_BIST_TIMEOUT 2000
200#define PMCRAID_AENWAIT_TIMEOUT 5000
201#define PMCRAID_TRANSOP_TIMEOUT 60000
202
203#define PMCRAID_RESET_TIMEOUT (2 * HZ)
204#define PMCRAID_CHECK_FOR_RESET_TIMEOUT ((HZ / 10))
205#define PMCRAID_VSET_IO_TIMEOUT (60 * HZ)
206#define PMCRAID_INTERNAL_TIMEOUT (60 * HZ)
207#define PMCRAID_SHUTDOWN_TIMEOUT (150 * HZ)
208#define PMCRAID_RESET_BUS_TIMEOUT (60 * HZ)
209#define PMCRAID_RESET_HOST_TIMEOUT (150 * HZ)
210#define PMCRAID_REQUEST_SENSE_TIMEOUT (30 * HZ)
211#define PMCRAID_SET_SUP_DEV_TIMEOUT (2 * 60 * HZ)
212
213/* structure to represent a scatter-gather element (IOADL descriptor) */
214struct pmcraid_ioadl_desc {
215 __le64 address;
216 __le32 data_len;
217 __u8 reserved[3];
218 __u8 flags;
219} __attribute__((packed, aligned(PMCRAID_IOADL_ALIGNMENT)));
220
221/* pmcraid_ioadl_desc.flags values */
222#define IOADL_FLAGS_CHAINED PMC_BIT8(0)
223#define IOADL_FLAGS_LAST_DESC PMC_BIT8(1)
224#define IOADL_FLAGS_READ_LAST PMC_BIT8(1)
225#define IOADL_FLAGS_WRITE_LAST PMC_BIT8(1)
226
227
228/* additional IOARCB data which can be CDB or additional request parameters
229 * or list of IOADLs. Firmware supports max of 512 bytes for IOARCB, hence then
230 * number of IOADLs are limted to 27. In case they are more than 27, they will
231 * be used in chained form
232 */
233struct pmcraid_ioarcb_add_data {
234 union {
235 struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_INTERNAL];
236 __u8 add_cmd_params[PMCRAID_ADD_CMD_PARAM_LEN];
237 } u;
238};
239
240/*
241 * IOA Request Control Block
242 */
243struct pmcraid_ioarcb {
244 __le64 ioarcb_bus_addr;
245 __le32 resource_handle;
246 __le32 response_handle;
247 __le64 ioadl_bus_addr;
248 __le32 ioadl_length;
249 __le32 data_transfer_length;
250 __le64 ioasa_bus_addr;
251 __le16 ioasa_len;
252 __le16 cmd_timeout;
253 __le16 add_cmd_param_offset;
254 __le16 add_cmd_param_length;
255 __le32 reserved1[2];
256 __le32 reserved2;
257 __u8 request_type;
258 __u8 request_flags0;
259 __u8 request_flags1;
260 __u8 hrrq_id;
261 __u8 cdb[PMCRAID_MAX_CDB_LEN];
262 struct pmcraid_ioarcb_add_data add_data;
263} __attribute__((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
264
265/* well known resource handle values */
266#define PMCRAID_IOA_RES_HANDLE 0xffffffff
267#define PMCRAID_INVALID_RES_HANDLE 0
268
269/* pmcraid_ioarcb.request_type values */
270#define REQ_TYPE_SCSI 0x00
271#define REQ_TYPE_IOACMD 0x01
272#define REQ_TYPE_HCAM 0x02
273
274/* pmcraid_ioarcb.flags0 values */
275#define TRANSFER_DIR_WRITE PMC_BIT8(0)
276#define INHIBIT_UL_CHECK PMC_BIT8(2)
277#define SYNC_OVERRIDE PMC_BIT8(3)
278#define SYNC_COMPLETE PMC_BIT8(4)
279#define NO_LINK_DESCS PMC_BIT8(5)
280
281/* pmcraid_ioarcb.flags1 values */
282#define DELAY_AFTER_RESET PMC_BIT8(0)
283#define TASK_TAG_SIMPLE 0x10
284#define TASK_TAG_ORDERED 0x20
285#define TASK_TAG_QUEUE_HEAD 0x30
286
287/* toggle bit offset in response handle */
288#define HRRQ_TOGGLE_BIT 0x01
289#define HRRQ_RESPONSE_BIT 0x02
290
291/* IOA Status Area */
292struct pmcraid_ioasa_vset {
293 __le32 failing_lba_hi;
294 __le32 failing_lba_lo;
295 __le32 reserved;
296} __attribute__((packed, aligned(4)));
297
298struct pmcraid_ioasa {
299 __le32 ioasc;
300 __le16 returned_status_length;
301 __le16 available_status_length;
302 __le32 residual_data_length;
303 __le32 ilid;
304 __le32 fd_ioasc;
305 __le32 fd_res_address;
306 __le32 fd_res_handle;
307 __le32 reserved;
308
309 /* resource specific sense information */
310 union {
311 struct pmcraid_ioasa_vset vset;
312 } u;
313
314 /* IOA autosense data */
315 __le16 auto_sense_length;
316 __le16 error_data_length;
317 __u8 sense_data[PMCRAID_SENSE_DATA_LEN];
318} __attribute__((packed, aligned(4)));
319
320#define PMCRAID_DRIVER_ILID 0xffffffff
321
322/* Config Table Entry per Resource */
323struct pmcraid_config_table_entry {
324 __u8 resource_type;
325 __u8 bus_protocol;
326 __le16 array_id;
327 __u8 common_flags0;
328 __u8 common_flags1;
329 __u8 unique_flags0;
330 __u8 unique_flags1; /*also used as vset target_id */
331 __le32 resource_handle;
332 __le32 resource_address;
333 __u8 device_id[PMCRAID_DEVICE_ID_LEN];
334 __u8 lun[PMCRAID_LUN_LEN];
335} __attribute__((packed, aligned(4)));
336
337/* resource types (config_table_entry.resource_type values) */
338#define RES_TYPE_AF_DASD 0x00
339#define RES_TYPE_GSCSI 0x01
340#define RES_TYPE_VSET 0x02
341#define RES_TYPE_IOA_FP 0xFF
342
343#define RES_IS_IOA(res) ((res).resource_type == RES_TYPE_IOA_FP)
344#define RES_IS_GSCSI(res) ((res).resource_type == RES_TYPE_GSCSI)
345#define RES_IS_VSET(res) ((res).resource_type == RES_TYPE_VSET)
346#define RES_IS_AFDASD(res) ((res).resource_type == RES_TYPE_AF_DASD)
347
348/* bus_protocol values used by driver */
349#define RES_TYPE_VENCLOSURE 0x8
350
351/* config_table_entry.common_flags0 */
352#define MULTIPATH_RESOURCE PMC_BIT32(0)
353
354/* unique_flags1 */
355#define IMPORT_MODE_MANUAL PMC_BIT8(0)
356
357/* well known resource handle values */
358#define RES_HANDLE_IOA 0xFFFFFFFF
359#define RES_HANDLE_NONE 0x00000000
360
361/* well known resource address values */
362#define RES_ADDRESS_IOAFP 0xFEFFFFFF
363#define RES_ADDRESS_INVALID 0xFFFFFFFF
364
365/* BUS/TARGET/LUN values from resource_addrr */
366#define RES_BUS(res_addr) (le32_to_cpu(res_addr) & 0xFF)
367#define RES_TARGET(res_addr) ((le32_to_cpu(res_addr) >> 16) & 0xFF)
368#define RES_LUN(res_addr) 0x0
369
370/* configuration table structure */
371struct pmcraid_config_table {
372 __le16 num_entries;
373 __u8 table_format;
374 __u8 reserved1;
375 __u8 flags;
376 __u8 reserved2[11];
377 struct pmcraid_config_table_entry entries[PMCRAID_MAX_RESOURCES];
378} __attribute__((packed, aligned(4)));
379
380/* config_table.flags value */
381#define MICROCODE_UPDATE_REQUIRED PMC_BIT32(0)
382
383/*
384 * HCAM format
385 */
386#define PMCRAID_HOSTRCB_LDNSIZE 4056
387
388/* Error log notification format */
389struct pmcraid_hostrcb_error {
390 __le32 fd_ioasc;
391 __le32 fd_ra;
392 __le32 fd_rh;
393 __le32 prc;
394 union {
395 __u8 data[PMCRAID_HOSTRCB_LDNSIZE];
396 } u;
397} __attribute__ ((packed, aligned(4)));
398
399struct pmcraid_hcam_hdr {
400 __u8 op_code;
401 __u8 notification_type;
402 __u8 notification_lost;
403 __u8 flags;
404 __u8 overlay_id;
405 __u8 reserved1[3];
406 __le32 ilid;
407 __le32 timestamp1;
408 __le32 timestamp2;
409 __le32 data_len;
410} __attribute__((packed, aligned(4)));
411
412#define PMCRAID_AEN_GROUP 0x3
413
414struct pmcraid_hcam_ccn {
415 struct pmcraid_hcam_hdr header;
416 struct pmcraid_config_table_entry cfg_entry;
417} __attribute__((packed, aligned(4)));
418
419struct pmcraid_hcam_ldn {
420 struct pmcraid_hcam_hdr header;
421 struct pmcraid_hostrcb_error error_log;
422} __attribute__((packed, aligned(4)));
423
424/* pmcraid_hcam.op_code values */
425#define HOSTRCB_TYPE_CCN 0xE1
426#define HOSTRCB_TYPE_LDN 0xE2
427
428/* pmcraid_hcam.notification_type values */
429#define NOTIFICATION_TYPE_ENTRY_CHANGED 0x0
430#define NOTIFICATION_TYPE_ENTRY_NEW 0x1
431#define NOTIFICATION_TYPE_ENTRY_DELETED 0x2
432#define NOTIFICATION_TYPE_ERROR_LOG 0x10
433#define NOTIFICATION_TYPE_INFORMATION_LOG 0x11
434
435#define HOSTRCB_NOTIFICATIONS_LOST PMC_BIT8(0)
436
437/* pmcraid_hcam.flags values */
438#define HOSTRCB_INTERNAL_OP_ERROR PMC_BIT8(0)
439#define HOSTRCB_ERROR_RESPONSE_SENT PMC_BIT8(1)
440
441/* pmcraid_hcam.overlay_id values */
442#define HOSTRCB_OVERLAY_ID_08 0x08
443#define HOSTRCB_OVERLAY_ID_09 0x09
444#define HOSTRCB_OVERLAY_ID_11 0x11
445#define HOSTRCB_OVERLAY_ID_12 0x12
446#define HOSTRCB_OVERLAY_ID_13 0x13
447#define HOSTRCB_OVERLAY_ID_14 0x14
448#define HOSTRCB_OVERLAY_ID_16 0x16
449#define HOSTRCB_OVERLAY_ID_17 0x17
450#define HOSTRCB_OVERLAY_ID_20 0x20
451#define HOSTRCB_OVERLAY_ID_FF 0xFF
452
453/* Implementation specific card details */
454struct pmcraid_chip_details {
455 /* hardware register offsets */
456 unsigned long ioastatus;
457 unsigned long ioarrin;
458 unsigned long mailbox;
459 unsigned long global_intr_mask;
460 unsigned long ioa_host_intr;
461 unsigned long ioa_host_intr_clr;
462 unsigned long ioa_host_mask;
463 unsigned long ioa_host_mask_clr;
464 unsigned long host_ioa_intr;
465 unsigned long host_ioa_intr_clr;
466
467 /* timeout used during transitional to operational state */
468 unsigned long transop_timeout;
469};
470
471/* IOA to HOST doorbells (interrupts) */
472#define INTRS_TRANSITION_TO_OPERATIONAL PMC_BIT32(0)
473#define INTRS_IOARCB_TRANSFER_FAILED PMC_BIT32(3)
474#define INTRS_IOA_UNIT_CHECK PMC_BIT32(4)
475#define INTRS_NO_HRRQ_FOR_CMD_RESPONSE PMC_BIT32(5)
476#define INTRS_CRITICAL_OP_IN_PROGRESS PMC_BIT32(6)
477#define INTRS_IO_DEBUG_ACK PMC_BIT32(7)
478#define INTRS_IOARRIN_LOST PMC_BIT32(27)
479#define INTRS_SYSTEM_BUS_MMIO_ERROR PMC_BIT32(28)
480#define INTRS_IOA_PROCESSOR_ERROR PMC_BIT32(29)
481#define INTRS_HRRQ_VALID PMC_BIT32(30)
482#define INTRS_OPERATIONAL_STATUS PMC_BIT32(0)
483
484/* Host to IOA Doorbells */
485#define DOORBELL_RUNTIME_RESET PMC_BIT32(1)
486#define DOORBELL_IOA_RESET_ALERT PMC_BIT32(7)
487#define DOORBELL_IOA_DEBUG_ALERT PMC_BIT32(9)
488#define DOORBELL_ENABLE_DESTRUCTIVE_DIAGS PMC_BIT32(8)
489#define DOORBELL_IOA_START_BIST PMC_BIT32(23)
490#define DOORBELL_RESET_IOA PMC_BIT32(31)
491
492/* Global interrupt mask register value */
493#define GLOBAL_INTERRUPT_MASK 0x4ULL
494
495#define PMCRAID_ERROR_INTERRUPTS (INTRS_IOARCB_TRANSFER_FAILED | \
496 INTRS_IOA_UNIT_CHECK | \
497 INTRS_NO_HRRQ_FOR_CMD_RESPONSE | \
498 INTRS_IOARRIN_LOST | \
499 INTRS_SYSTEM_BUS_MMIO_ERROR | \
500 INTRS_IOA_PROCESSOR_ERROR)
501
502#define PMCRAID_PCI_INTERRUPTS (PMCRAID_ERROR_INTERRUPTS | \
503 INTRS_HRRQ_VALID | \
504 INTRS_CRITICAL_OP_IN_PROGRESS |\
505 INTRS_TRANSITION_TO_OPERATIONAL)
506
507/* control_block, associated with each of the commands contains IOARCB, IOADLs
508 * memory for IOASA. Additional 3 * 16 bytes are allocated in order to support
509 * additional request parameters (of max size 48) any command.
510 */
511struct pmcraid_control_block {
512 struct pmcraid_ioarcb ioarcb;
513 struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_EXTERNAL + 3];
514 struct pmcraid_ioasa ioasa;
515} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
516
517/* pmcraid_sglist - Scatter-gather list allocated for passthrough ioctls
518 */
519struct pmcraid_sglist {
520 u32 order;
521 u32 num_sg;
522 u32 num_dma_sg;
523 u32 buffer_len;
524 struct scatterlist scatterlist[1];
525};
526
527/* pmcraid_cmd - LLD representation of SCSI command */
528struct pmcraid_cmd {
529
530 /* Ptr and bus address of DMA.able control block for this command */
531 struct pmcraid_control_block *ioa_cb;
532 dma_addr_t ioa_cb_bus_addr;
533
534 /* sense buffer for REQUEST SENSE command if firmware is not sending
535 * auto sense data
536 */
537 dma_addr_t sense_buffer_dma;
538 dma_addr_t dma_handle;
539 u8 *sense_buffer;
540
541 /* pointer to mid layer structure of SCSI commands */
542 struct scsi_cmnd *scsi_cmd;
543
544 struct list_head free_list;
545 struct completion wait_for_completion;
546 struct timer_list timer; /* needed for internal commands */
547 u32 timeout; /* current timeout value */
548 u32 index; /* index into the command list */
549 u8 completion_req; /* for handling internal commands */
550 u8 release; /* for handling completions */
551
552 void (*cmd_done) (struct pmcraid_cmd *);
553 struct pmcraid_instance *drv_inst;
554
555 struct pmcraid_sglist *sglist; /* used for passthrough IOCTLs */
556
557 /* scratch used during reset sequence */
558 union {
559 unsigned long time_left;
560 struct pmcraid_resource_entry *res;
561 } u;
562};
563
564/*
565 * Interrupt registers of IOA
566 */
567struct pmcraid_interrupts {
568 void __iomem *ioa_host_interrupt_reg;
569 void __iomem *ioa_host_interrupt_clr_reg;
570 void __iomem *ioa_host_interrupt_mask_reg;
571 void __iomem *ioa_host_interrupt_mask_clr_reg;
572 void __iomem *global_interrupt_mask_reg;
573 void __iomem *host_ioa_interrupt_reg;
574 void __iomem *host_ioa_interrupt_clr_reg;
575};
576
577/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */
578struct pmcraid_isr_param {
579 u8 hrrq_id; /* hrrq entry index */
580 u16 vector; /* allocated msi-x vector */
581 struct pmcraid_instance *drv_inst;
582};
583
584/* AEN message header sent as part of event data to applications */
585struct pmcraid_aen_msg {
586 u32 hostno;
587 u32 length;
588 u8 reserved[8];
589 u8 data[0];
590};
591
592struct pmcraid_hostrcb {
593 struct pmcraid_instance *drv_inst;
594 struct pmcraid_aen_msg *msg;
595 struct pmcraid_hcam_hdr *hcam; /* pointer to hcam buffer */
596 struct pmcraid_cmd *cmd; /* pointer to command block used */
597 dma_addr_t baddr; /* system address of hcam buffer */
598 atomic_t ignore; /* process HCAM response ? */
599};
600
601#define PMCRAID_AEN_HDR_SIZE sizeof(struct pmcraid_aen_msg)
602
603
604
605/*
606 * Per adapter structure maintained by LLD
607 */
608struct pmcraid_instance {
609 /* Array of allowed-to-be-exposed resources, initialized from
610 * Configutation Table, later updated with CCNs
611 */
612 struct pmcraid_resource_entry *res_entries;
613
614 struct list_head free_res_q; /* res_entries lists for easy lookup */
615 struct list_head used_res_q; /* List of to be exposed resources */
616 spinlock_t resource_lock; /* spinlock to protect resource list */
617
618 void __iomem *mapped_dma_addr;
619 void __iomem *ioa_status; /* Iomapped IOA status register */
620 void __iomem *mailbox; /* Iomapped mailbox register */
621 void __iomem *ioarrin; /* IOmapped IOARR IN register */
622
623 struct pmcraid_interrupts int_regs;
624 struct pmcraid_chip_details *chip_cfg;
625
626 /* HostRCBs needed for HCAM */
627 struct pmcraid_hostrcb ldn;
628 struct pmcraid_hostrcb ccn;
629
630
631 /* Bus address of start of HRRQ */
632 dma_addr_t hrrq_start_bus_addr[PMCRAID_NUM_MSIX_VECTORS];
633
634 /* Pointer to 1st entry of HRRQ */
635 __be32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS];
636
637 /* Pointer to last entry of HRRQ */
638 __be32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS];
639
640 /* Pointer to current pointer of hrrq */
641 __be32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS];
642
643 /* Lock for HRRQ access */
644 spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS];
645
646 /* Expected toggle bit at host */
647 u8 host_toggle_bit[PMCRAID_NUM_MSIX_VECTORS];
648
649 /* No of Reset IOA retries . IOA marked dead if threshold exceeds */
650 u8 ioa_reset_attempts;
651#define PMCRAID_RESET_ATTEMPTS 3
652
653 /* Wait Q for threads to wait for Reset IOA completion */
654 wait_queue_head_t reset_wait_q;
655 struct pmcraid_cmd *reset_cmd;
656
657 /* structures for supporting SIGIO based AEN. */
658 struct fasync_struct *aen_queue;
659 struct mutex aen_queue_lock; /* lock for aen subscribers list */
660 struct cdev cdev;
661
662 struct Scsi_Host *host; /* mid layer interface structure handle */
663 struct pci_dev *pdev; /* PCI device structure handle */
664
665 u8 current_log_level; /* default level for logging IOASC errors */
666
667 u8 num_hrrq; /* Number of interrupt vectors allocated */
668 dev_t dev; /* Major-Minor numbers for Char device */
669
670 /* Used as ISR handler argument */
671 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
672
673 /* configuration table */
674 struct pmcraid_config_table *cfg_table;
675 dma_addr_t cfg_table_bus_addr;
676
677 /* structures related to command blocks */
678 struct kmem_cache *cmd_cachep; /* cache for cmd blocks */
679 struct pci_pool *control_pool; /* pool for control blocks */
680 char cmd_pool_name[64]; /* name of cmd cache */
681 char ctl_pool_name[64]; /* name of control cache */
682
683 struct pmcraid_cmd *cmd_list[PMCRAID_MAX_CMD];
684
685 struct list_head free_cmd_pool;
686 struct list_head pending_cmd_pool;
687 spinlock_t free_pool_lock; /* free pool lock */
688 spinlock_t pending_pool_lock; /* pending pool lock */
689
690 /* No of IO commands pending with FW */
691 atomic_t outstanding_cmds;
692
693 /* should add/delete resources to mid-layer now ?*/
694 atomic_t expose_resources;
695
696 /* Tasklet to handle deferred processing */
697 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
698
699 /* Work-queue (Shared) for deferred reset processing */
700 struct work_struct worker_q;
701
702
703 u32 ioa_state:4; /* For IOA Reset sequence FSM */
704#define IOA_STATE_OPERATIONAL 0x0
705#define IOA_STATE_UNKNOWN 0x1
706#define IOA_STATE_DEAD 0x2
707#define IOA_STATE_IN_SOFT_RESET 0x3
708#define IOA_STATE_IN_HARD_RESET 0x4
709#define IOA_STATE_IN_RESET_ALERT 0x5
710#define IOA_STATE_IN_BRINGDOWN 0x6
711#define IOA_STATE_IN_BRINGUP 0x7
712
713 u32 ioa_reset_in_progress:1; /* true if IOA reset is in progress */
714 u32 ioa_hard_reset:1; /* TRUE if Hard Reset is needed */
715 u32 ioa_unit_check:1; /* Indicates Unit Check condition */
716 u32 ioa_bringdown:1; /* whether IOA needs to be brought down */
717 u32 force_ioa_reset:1; /* force adapter reset ? */
718 u32 reinit_cfg_table:1; /* reinit config table due to lost CCN */
719 u32 ioa_shutdown_type:2;/* shutdown type used during reset */
720#define SHUTDOWN_NONE 0x0
721#define SHUTDOWN_NORMAL 0x1
722#define SHUTDOWN_ABBREV 0x2
723
724};
725
726/* LLD maintained resource entry structure */
727struct pmcraid_resource_entry {
728 struct list_head queue; /* link to "to be exposed" resources */
729 struct pmcraid_config_table_entry cfg_entry;
730 struct scsi_device *scsi_dev; /* Link scsi_device structure */
731 atomic_t read_failures; /* count of failed READ commands */
732 atomic_t write_failures; /* count of failed WRITE commands */
733
734 /* To indicate add/delete/modify during CCN */
735 u8 change_detected;
736#define RES_CHANGE_ADD 0x1 /* add this to mid-layer */
737#define RES_CHANGE_DEL 0x2 /* remove this from mid-layer */
738
739 u8 reset_progress; /* Device is resetting */
740
741 /*
742 * When IOA asks for sync (i.e. IOASC = Not Ready, Sync Required), this
743 * flag will be set, mid layer will be asked to retry. In the next
744 * attempt, this flag will be checked in queuecommand() to set
745 * SYNC_COMPLETE flag in IOARCB (flag_0).
746 */
747 u8 sync_reqd;
748
749 /* target indicates the mapped target_id assigned to this resource if
750 * this is VSET resource. For non-VSET resources this will be un-used
751 * or zero
752 */
753 u8 target;
754};
755
756/* Data structures used in IOASC error code logging */
757struct pmcraid_ioasc_error {
758 u32 ioasc_code; /* IOASC code */
759 u8 log_level; /* default log level assignment. */
760 char *error_string;
761};
762
763/* Initial log_level assignments for various IOASCs */
764#define IOASC_LOG_LEVEL_NONE 0x0 /* no logging */
765#define IOASC_LOG_LEVEL_MUST 0x1 /* must log: all high-severity errors */
766#define IOASC_LOG_LEVEL_HARD 0x2 /* optional – low severity errors */
767
768/* Error information maintained by LLD. LLD initializes the pmcraid_error_table
769 * statically.
770 */
771static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
772 {0x01180600, IOASC_LOG_LEVEL_MUST,
773 "Recovered Error, soft media error, sector reassignment suggested"},
774 {0x015D0000, IOASC_LOG_LEVEL_MUST,
775 "Recovered Error, failure prediction thresold exceeded"},
776 {0x015D9200, IOASC_LOG_LEVEL_MUST,
777 "Recovered Error, soft Cache Card Battery error thresold"},
778 {0x015D9200, IOASC_LOG_LEVEL_MUST,
779 "Recovered Error, soft Cache Card Battery error thresold"},
780 {0x02048000, IOASC_LOG_LEVEL_MUST,
781 "Not Ready, IOA Reset Required"},
782 {0x02408500, IOASC_LOG_LEVEL_MUST,
783 "Not Ready, IOA microcode download required"},
784 {0x03110B00, IOASC_LOG_LEVEL_MUST,
785 "Medium Error, data unreadable, reassignment suggested"},
786 {0x03110C00, IOASC_LOG_LEVEL_MUST,
787 "Medium Error, data unreadable do not reassign"},
788 {0x03310000, IOASC_LOG_LEVEL_MUST,
789 "Medium Error, media corrupted"},
790 {0x04050000, IOASC_LOG_LEVEL_MUST,
791 "Hardware Error, IOA can't communicate with device"},
792 {0x04080000, IOASC_LOG_LEVEL_MUST,
793 "Hardware Error, device bus error"},
794 {0x04080000, IOASC_LOG_LEVEL_MUST,
795 "Hardware Error, device bus is not functioning"},
796 {0x04118000, IOASC_LOG_LEVEL_MUST,
797 "Hardware Error, IOA reserved area data check"},
798 {0x04118100, IOASC_LOG_LEVEL_MUST,
799 "Hardware Error, IOA reserved area invalid data pattern"},
800 {0x04118200, IOASC_LOG_LEVEL_MUST,
801 "Hardware Error, IOA reserved area LRC error"},
802 {0x04320000, IOASC_LOG_LEVEL_MUST,
803 "Hardware Error, reassignment space exhausted"},
804 {0x04330000, IOASC_LOG_LEVEL_MUST,
805 "Hardware Error, data transfer underlength error"},
806 {0x04330000, IOASC_LOG_LEVEL_MUST,
807 "Hardware Error, data transfer overlength error"},
808 {0x04418000, IOASC_LOG_LEVEL_MUST,
809 "Hardware Error, PCI bus error"},
810 {0x04440000, IOASC_LOG_LEVEL_MUST,
811 "Hardware Error, device error"},
812 {0x04448300, IOASC_LOG_LEVEL_MUST,
813 "Hardware Error, undefined device response"},
814 {0x04448400, IOASC_LOG_LEVEL_MUST,
815 "Hardware Error, IOA microcode error"},
816 {0x04448600, IOASC_LOG_LEVEL_MUST,
817 "Hardware Error, IOA reset required"},
818 {0x04449200, IOASC_LOG_LEVEL_MUST,
819 "Hardware Error, hard Cache Fearuee Card Battery error"},
820 {0x0444A000, IOASC_LOG_LEVEL_MUST,
821 "Hardware Error, failed device altered"},
822 {0x0444A200, IOASC_LOG_LEVEL_MUST,
823 "Hardware Error, data check after reassignment"},
824 {0x0444A300, IOASC_LOG_LEVEL_MUST,
825 "Hardware Error, LRC error after reassignment"},
826 {0x044A0000, IOASC_LOG_LEVEL_MUST,
827 "Hardware Error, device bus error (msg/cmd phase)"},
828 {0x04670400, IOASC_LOG_LEVEL_MUST,
829 "Hardware Error, new device can't be used"},
830 {0x04678000, IOASC_LOG_LEVEL_MUST,
831 "Hardware Error, invalid multiadapter configuration"},
832 {0x04678100, IOASC_LOG_LEVEL_MUST,
833 "Hardware Error, incorrect connection between enclosures"},
834 {0x04678200, IOASC_LOG_LEVEL_MUST,
835 "Hardware Error, connections exceed IOA design limits"},
836 {0x04678300, IOASC_LOG_LEVEL_MUST,
837 "Hardware Error, incorrect multipath connection"},
838 {0x04679000, IOASC_LOG_LEVEL_MUST,
839 "Hardware Error, command to LUN failed"},
840 {0x064C8000, IOASC_LOG_LEVEL_HARD,
841 "Unit Attention, cache exists for missing/failed device"},
842 {0x06670100, IOASC_LOG_LEVEL_HARD,
843 "Unit Attention, incompatible exposed mode device"},
844 {0x06670600, IOASC_LOG_LEVEL_HARD,
845 "Unit Attention, attachment of logical unit failed"},
846 {0x06678000, IOASC_LOG_LEVEL_MUST,
847 "Unit Attention, cables exceed connective design limit"},
848 {0x06678300, IOASC_LOG_LEVEL_MUST,
849 "Unit Attention, incomplete multipath connection between" \
850 "IOA and enclosure"},
851 {0x06678400, IOASC_LOG_LEVEL_MUST,
852 "Unit Attention, incomplete multipath connection between" \
853 "device and enclosure"},
854 {0x06678500, IOASC_LOG_LEVEL_MUST,
855 "Unit Attention, incomplete multipath connection between" \
856 "IOA and remote IOA"},
857 {0x06678600, IOASC_LOG_LEVEL_HARD,
858 "Unit Attention, missing remote IOA"},
859 {0x06679100, IOASC_LOG_LEVEL_HARD,
860 "Unit Attention, enclosure doesn't support required multipath" \
861 "function"},
862 {0x06698200, IOASC_LOG_LEVEL_HARD,
863 "Unit Attention, corrupt array parity detected on device"},
864 {0x066B0200, IOASC_LOG_LEVEL_MUST,
865 "Unit Attention, array exposed"},
866 {0x066B8200, IOASC_LOG_LEVEL_HARD,
867 "Unit Attention, exposed array is still protected"},
868 {0x066B9200, IOASC_LOG_LEVEL_MUST,
869 "Unit Attention, Multipath redundancy level got worse"},
870 {0x07270000, IOASC_LOG_LEVEL_HARD,
871 "Data Protect, device is read/write protected by IOA"},
872 {0x07278000, IOASC_LOG_LEVEL_HARD,
873 "Data Protect, IOA doesn't support device attribute"},
874 {0x07278100, IOASC_LOG_LEVEL_HARD,
875 "Data Protect, NVRAM mirroring prohibited"},
876 {0x07278400, IOASC_LOG_LEVEL_MUST,
877 "Data Protect, array is short 2 or more devices"},
878 {0x07278600, IOASC_LOG_LEVEL_MUST,
879 "Data Protect, exposed array is short a required device"},
880 {0x07278700, IOASC_LOG_LEVEL_MUST,
881 "Data Protect, array members not at required addresses"},
882 {0x07278800, IOASC_LOG_LEVEL_MUST,
883 "Data Protect, exposed mode device resource address conflict"},
884 {0x07278900, IOASC_LOG_LEVEL_MUST,
885 "Data Protect, incorrect resource address of exposed mode device"},
886 {0x07278A00, IOASC_LOG_LEVEL_MUST,
887 "Data Protect, Array is missing a device and parity is out of sync"},
888 {0x07278B00, IOASC_LOG_LEVEL_MUST,
889 "Data Protect, maximum number of arrays already exist"},
890 {0x07278C00, IOASC_LOG_LEVEL_HARD,
891 "Data Protect, cannot locate cache data for device"},
892 {0x07278D00, IOASC_LOG_LEVEL_HARD,
893 "Data Protect, cache data exits for a changed device"},
894 {0x07279100, IOASC_LOG_LEVEL_MUST,
895 "Data Protect, detection of a device requiring format"},
896 {0x07279200, IOASC_LOG_LEVEL_MUST,
897 "Data Protect, IOA exceeds maximum number of devices"},
898 {0x07279600, IOASC_LOG_LEVEL_MUST,
899 "Data Protect, missing array, volume set is not functional"},
900 {0x07279700, IOASC_LOG_LEVEL_MUST,
901 "Data Protect, single device for a volume set"},
902 {0x07279800, IOASC_LOG_LEVEL_MUST,
903 "Data Protect, missing multiple devices for a volume set"},
904 {0x07279900, IOASC_LOG_LEVEL_HARD,
905 "Data Protect, maximum number of volument sets already exists"},
906 {0x07279A00, IOASC_LOG_LEVEL_MUST,
907 "Data Protect, other volume set problem"},
908};
909
910/* macros to help in debugging */
911#define pmcraid_err(...) \
912 printk(KERN_ERR "MaxRAID: "__VA_ARGS__)
913
914#define pmcraid_info(...) \
915 if (pmcraid_debug_log) \
916 printk(KERN_INFO "MaxRAID: "__VA_ARGS__)
917
918/* check if given command is a SCSI READ or SCSI WRITE command */
919#define SCSI_READ_CMD 0x1 /* any of SCSI READ commands */
920#define SCSI_WRITE_CMD 0x2 /* any of SCSI WRITE commands */
921#define SCSI_CMD_TYPE(opcode) \
922({ u8 op = opcode; u8 __type = 0;\
923 if (op == READ_6 || op == READ_10 || op == READ_12 || op == READ_16)\
924 __type = SCSI_READ_CMD;\
925 else if (op == WRITE_6 || op == WRITE_10 || op == WRITE_12 || \
926 op == WRITE_16)\
927 __type = SCSI_WRITE_CMD;\
928 __type;\
929})
930
931#define IS_SCSI_READ_WRITE(opcode) \
932({ u8 __type = SCSI_CMD_TYPE(opcode); \
933 (__type == SCSI_READ_CMD || __type == SCSI_WRITE_CMD) ? 1 : 0;\
934})
935
936
937/*
938 * pmcraid_ioctl_header - definition of header structure that preceeds all the
939 * buffers given as ioctl arguements.
940 *
941 * .signature : always ASCII string, "PMCRAID"
942 * .reserved : not used
943 * .buffer_length : length of the buffer following the header
944 */
945struct pmcraid_ioctl_header {
946 u8 signature[8];
947 u32 reserved;
948 u32 buffer_length;
949};
950
951#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
952
953
954/*
955 * pmcraid_event_details - defines AEN details that apps can retrieve from LLD
956 *
957 * .rcb_ccn - complete RCB of CCN
958 * .rcb_ldn - complete RCB of CCN
959 */
960struct pmcraid_event_details {
961 struct pmcraid_hcam_ccn rcb_ccn;
962 struct pmcraid_hcam_ldn rcb_ldn;
963};
964
965/*
966 * pmcraid_driver_ioctl_buffer - structure passed as argument to most of the
967 * PMC driver handled ioctls.
968 */
969struct pmcraid_driver_ioctl_buffer {
970 struct pmcraid_ioctl_header ioctl_header;
971 struct pmcraid_event_details event_details;
972};
973
974/*
975 * pmcraid_passthrough_ioctl_buffer - structure given as argument to
976 * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
977 * 32-byte alignment so, it is necessary to pack this structure to avoid any
978 * holes between ioctl_header and passthrough buffer
979 *
980 * .ioactl_header : ioctl header
981 * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer
982 * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware
983 * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
984 * the transfer directions passed in ioarcb.flags0. Contents
985 * of this buffer are valid only when ioarcb.data_transfer_len
986 * is not zero.
987 */
988struct pmcraid_passthrough_ioctl_buffer {
989 struct pmcraid_ioctl_header ioctl_header;
990 struct pmcraid_ioarcb ioarcb;
991 struct pmcraid_ioasa ioasa;
992 u8 request_buffer[1];
993} __attribute__ ((packed));
994
995/*
996 * keys to differentiate between driver handled IOCTLs and passthrough
997 * IOCTLs passed to IOA. driver determines the ioctl type using macro
998 * _IOC_TYPE
999 */
1000#define PMCRAID_DRIVER_IOCTL 'D'
1001#define PMCRAID_PASSTHROUGH_IOCTL 'F'
1002
1003#define DRV_IOCTL(n, size) \
1004 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
1005
1006#define FMW_IOCTL(n, size) \
1007 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
1008
1009/*
1010 * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
1011 * This is to facilitate applications avoiding un-necessary memory allocations.
1012 * For example, most of driver handled ioctls do not require ioarcb, ioasa.
1013 */
1014#define _ARGSIZE(arg) (sizeof(struct pmcraid_ioctl_header) + sizeof(arg))
1015
1016/* Driver handled IOCTL command definitions */
1017
1018#define PMCRAID_IOCTL_RESET_ADAPTER \
1019 DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
1020
1021/* passthrough/firmware handled commands */
1022#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \
1023 FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
1024
1025#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \
1026 FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
1027
1028
1029#endif /* _PMCRAID_H */