aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/hpsa.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/hpsa.h')
-rw-r--r--drivers/scsi/hpsa.h61
1 files changed, 29 insertions, 32 deletions
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 8e06d9e280ec..657713050349 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -32,7 +32,6 @@ struct access_method {
32 void (*submit_command)(struct ctlr_info *h, 32 void (*submit_command)(struct ctlr_info *h,
33 struct CommandList *c); 33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h);
36 bool (*intr_pending)(struct ctlr_info *h); 35 bool (*intr_pending)(struct ctlr_info *h);
37 unsigned long (*command_completed)(struct ctlr_info *h, u8 q); 36 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
38}; 37};
@@ -47,6 +46,11 @@ struct hpsa_scsi_dev_t {
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 46 unsigned char model[16]; /* bytes 16-31 of inquiry data */
48 unsigned char raid_level; /* from inquiry page 0xC1 */ 47 unsigned char raid_level; /* from inquiry page 0xC1 */
49 unsigned char volume_offline; /* discovered via TUR or VPD */ 48 unsigned char volume_offline; /* discovered via TUR or VPD */
49 u16 queue_depth; /* max queue_depth for this device */
50 atomic_t ioaccel_cmds_out; /* Only used for physical devices
51 * counts commands sent to physical
52 * device via "ioaccel" path.
53 */
50 u32 ioaccel_handle; 54 u32 ioaccel_handle;
51 int offload_config; /* I/O accel RAID offload configured */ 55 int offload_config; /* I/O accel RAID offload configured */
52 int offload_enabled; /* I/O accel RAID offload enabled */ 56 int offload_enabled; /* I/O accel RAID offload enabled */
@@ -55,6 +59,15 @@ struct hpsa_scsi_dev_t {
55 */ 59 */
56 struct raid_map_data raid_map; /* I/O accelerator RAID map */ 60 struct raid_map_data raid_map; /* I/O accelerator RAID map */
57 61
62 /*
63 * Pointers from logical drive map indices to the phys drives that
64 * make those logical drives. Note, multiple logical drives may
65 * share physical drives. You can have for instance 5 physical
66 * drives with 3 logical drives each using those same 5 physical
67 * disks. We need these pointers for counting i/o's out to physical
68 * devices in order to honor physical device queue depth limits.
69 */
70 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
58}; 71};
59 72
60struct reply_queue_buffer { 73struct reply_queue_buffer {
@@ -115,9 +128,12 @@ struct ctlr_info {
115 void __iomem *vaddr; 128 void __iomem *vaddr;
116 unsigned long paddr; 129 unsigned long paddr;
117 int nr_cmds; /* Number of commands allowed on this controller */ 130 int nr_cmds; /* Number of commands allowed on this controller */
131#define HPSA_CMDS_RESERVED_FOR_ABORTS 2
132#define HPSA_CMDS_RESERVED_FOR_DRIVER 1
118 struct CfgTable __iomem *cfgtable; 133 struct CfgTable __iomem *cfgtable;
119 int interrupts_enabled; 134 int interrupts_enabled;
120 int max_commands; 135 int max_commands;
136 int last_allocation;
121 atomic_t commands_outstanding; 137 atomic_t commands_outstanding;
122# define PERF_MODE_INT 0 138# define PERF_MODE_INT 0
123# define DOORBELL_INT 1 139# define DOORBELL_INT 1
@@ -131,8 +147,6 @@ struct ctlr_info {
131 char hba_mode_enabled; 147 char hba_mode_enabled;
132 148
133 /* queue and queue Info */ 149 /* queue and queue Info */
134 struct list_head reqQ;
135 struct list_head cmpQ;
136 unsigned int Qdepth; 150 unsigned int Qdepth;
137 unsigned int maxSG; 151 unsigned int maxSG;
138 spinlock_t lock; 152 spinlock_t lock;
@@ -168,9 +182,8 @@ struct ctlr_info {
168 unsigned long transMethod; 182 unsigned long transMethod;
169 183
170 /* cap concurrent passthrus at some reasonable maximum */ 184 /* cap concurrent passthrus at some reasonable maximum */
171#define HPSA_MAX_CONCURRENT_PASSTHRUS (20) 185#define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
172 spinlock_t passthru_count_lock; /* protects passthru_count */ 186 atomic_t passthru_cmds_avail;
173 int passthru_count;
174 187
175 /* 188 /*
176 * Performant mode completion buffers 189 * Performant mode completion buffers
@@ -194,8 +207,8 @@ struct ctlr_info {
194 atomic_t firmware_flash_in_progress; 207 atomic_t firmware_flash_in_progress;
195 u32 __percpu *lockup_detected; 208 u32 __percpu *lockup_detected;
196 struct delayed_work monitor_ctlr_work; 209 struct delayed_work monitor_ctlr_work;
210 struct delayed_work rescan_ctlr_work;
197 int remove_in_progress; 211 int remove_in_progress;
198 u32 fifo_recently_full;
199 /* Address of h->q[x] is passed to intr handler to know which queue */ 212 /* Address of h->q[x] is passed to intr handler to know which queue */
200 u8 q[MAX_REPLY_QUEUES]; 213 u8 q[MAX_REPLY_QUEUES];
201 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 214 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
@@ -237,8 +250,9 @@ struct ctlr_info {
237 spinlock_t offline_device_lock; 250 spinlock_t offline_device_lock;
238 struct list_head offline_device_list; 251 struct list_head offline_device_list;
239 int acciopath_status; 252 int acciopath_status;
240 int drv_req_rescan; /* flag for driver to request rescan event */
241 int raid_offload_debug; 253 int raid_offload_debug;
254 struct workqueue_struct *resubmit_wq;
255 struct workqueue_struct *rescan_ctlr_wq;
242}; 256};
243 257
244struct offline_device_entry { 258struct offline_device_entry {
@@ -297,6 +311,8 @@ struct offline_device_entry {
297 */ 311 */
298#define SA5_DOORBELL 0x20 312#define SA5_DOORBELL 0x20
299#define SA5_REQUEST_PORT_OFFSET 0x40 313#define SA5_REQUEST_PORT_OFFSET 0x40
314#define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
315#define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
300#define SA5_REPLY_INTR_MASK_OFFSET 0x34 316#define SA5_REPLY_INTR_MASK_OFFSET 0x34
301#define SA5_REPLY_PORT_OFFSET 0x44 317#define SA5_REPLY_PORT_OFFSET 0x44
302#define SA5_INTR_STATUS 0x30 318#define SA5_INTR_STATUS 0x30
@@ -353,10 +369,7 @@ static void SA5_submit_command_no_read(struct ctlr_info *h,
353static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 369static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
354 struct CommandList *c) 370 struct CommandList *c)
355{ 371{
356 if (c->cmd_type == CMD_IOACCEL2) 372 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
357 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
358 else
359 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
360} 373}
361 374
362/* 375/*
@@ -398,19 +411,19 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
398 unsigned long register_value = FIFO_EMPTY; 411 unsigned long register_value = FIFO_EMPTY;
399 412
400 /* msi auto clears the interrupt pending bit. */ 413 /* msi auto clears the interrupt pending bit. */
401 if (!(h->msi_vector || h->msix_vector)) { 414 if (unlikely(!(h->msi_vector || h->msix_vector))) {
402 /* flush the controller write of the reply queue by reading 415 /* flush the controller write of the reply queue by reading
403 * outbound doorbell status register. 416 * outbound doorbell status register.
404 */ 417 */
405 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 418 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
406 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 419 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
407 /* Do a read in order to flush the write to the controller 420 /* Do a read in order to flush the write to the controller
408 * (as per spec.) 421 * (as per spec.)
409 */ 422 */
410 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 423 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
411 } 424 }
412 425
413 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 426 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
414 register_value = rq->head[rq->current_entry]; 427 register_value = rq->head[rq->current_entry];
415 rq->current_entry++; 428 rq->current_entry++;
416 atomic_dec(&h->commands_outstanding); 429 atomic_dec(&h->commands_outstanding);
@@ -426,14 +439,6 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
426} 439}
427 440
428/* 441/*
429 * Returns true if fifo is full.
430 *
431 */
432static unsigned long SA5_fifo_full(struct ctlr_info *h)
433{
434 return atomic_read(&h->commands_outstanding) >= h->max_commands;
435}
436/*
437 * returns value read from hardware. 442 * returns value read from hardware.
438 * returns FIFO_EMPTY if there is nothing to read 443 * returns FIFO_EMPTY if there is nothing to read
439 */ 444 */
@@ -473,9 +478,6 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
473 if (!register_value) 478 if (!register_value)
474 return false; 479 return false;
475 480
476 if (h->msi_vector || h->msix_vector)
477 return true;
478
479 /* Read outbound doorbell to flush */ 481 /* Read outbound doorbell to flush */
480 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 482 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
481 return register_value & SA5_OUTDB_STATUS_PERF_BIT; 483 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
@@ -525,7 +527,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
525static struct access_method SA5_access = { 527static struct access_method SA5_access = {
526 SA5_submit_command, 528 SA5_submit_command,
527 SA5_intr_mask, 529 SA5_intr_mask,
528 SA5_fifo_full,
529 SA5_intr_pending, 530 SA5_intr_pending,
530 SA5_completed, 531 SA5_completed,
531}; 532};
@@ -533,7 +534,6 @@ static struct access_method SA5_access = {
533static struct access_method SA5_ioaccel_mode1_access = { 534static struct access_method SA5_ioaccel_mode1_access = {
534 SA5_submit_command, 535 SA5_submit_command,
535 SA5_performant_intr_mask, 536 SA5_performant_intr_mask,
536 SA5_fifo_full,
537 SA5_ioaccel_mode1_intr_pending, 537 SA5_ioaccel_mode1_intr_pending,
538 SA5_ioaccel_mode1_completed, 538 SA5_ioaccel_mode1_completed,
539}; 539};
@@ -541,7 +541,6 @@ static struct access_method SA5_ioaccel_mode1_access = {
541static struct access_method SA5_ioaccel_mode2_access = { 541static struct access_method SA5_ioaccel_mode2_access = {
542 SA5_submit_command_ioaccel2, 542 SA5_submit_command_ioaccel2,
543 SA5_performant_intr_mask, 543 SA5_performant_intr_mask,
544 SA5_fifo_full,
545 SA5_performant_intr_pending, 544 SA5_performant_intr_pending,
546 SA5_performant_completed, 545 SA5_performant_completed,
547}; 546};
@@ -549,7 +548,6 @@ static struct access_method SA5_ioaccel_mode2_access = {
549static struct access_method SA5_performant_access = { 548static struct access_method SA5_performant_access = {
550 SA5_submit_command, 549 SA5_submit_command,
551 SA5_performant_intr_mask, 550 SA5_performant_intr_mask,
552 SA5_fifo_full,
553 SA5_performant_intr_pending, 551 SA5_performant_intr_pending,
554 SA5_performant_completed, 552 SA5_performant_completed,
555}; 553};
@@ -557,7 +555,6 @@ static struct access_method SA5_performant_access = {
557static struct access_method SA5_performant_access_no_read = { 555static struct access_method SA5_performant_access_no_read = {
558 SA5_submit_command_no_read, 556 SA5_submit_command_no_read,
559 SA5_performant_intr_mask, 557 SA5_performant_intr_mask,
560 SA5_fifo_full,
561 SA5_performant_intr_pending, 558 SA5_performant_intr_pending,
562 SA5_performant_completed, 559 SA5_performant_completed,
563}; 560};