diff options
author | Matt Gates <matthew.gates@hp.com> | 2014-02-18 14:55:17 -0500 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2014-03-15 13:19:02 -0400 |
commit | e1f7de0cdd68d246d7008241cd9e443a54f880a8 (patch) | |
tree | 060525e7cdc5f2f86f9fcf4bb5ee3a22861c9e8f /drivers/scsi/hpsa.h | |
parent | e1d9cbfa09cdd39c4821777a6980dd643e5493ad (diff) |
[SCSI] hpsa: add support for 'fastpath' i/o
For certain i/o's to certain devices (unmasked physical disks) we
can bypass the RAID stack firmware and do the i/o to the device
directly and it will be faster.
Signed-off-by: Matt Gates <matthew.gates@hp.com>
Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/hpsa.h')
-rw-r--r-- | drivers/scsi/hpsa.h | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 01c328349c83..c7865f30ffd1 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
@@ -46,6 +46,7 @@ struct hpsa_scsi_dev_t { | |||
46 | unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ | 46 | unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ |
47 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ | 47 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ |
48 | unsigned char raid_level; /* from inquiry page 0xC1 */ | 48 | unsigned char raid_level; /* from inquiry page 0xC1 */ |
49 | u32 ioaccel_handle; | ||
49 | }; | 50 | }; |
50 | 51 | ||
51 | struct reply_pool { | 52 | struct reply_pool { |
@@ -95,6 +96,8 @@ struct ctlr_info { | |||
95 | /* pointers to command and error info pool */ | 96 | /* pointers to command and error info pool */ |
96 | struct CommandList *cmd_pool; | 97 | struct CommandList *cmd_pool; |
97 | dma_addr_t cmd_pool_dhandle; | 98 | dma_addr_t cmd_pool_dhandle; |
99 | struct io_accel1_cmd *ioaccel_cmd_pool; | ||
100 | dma_addr_t ioaccel_cmd_pool_dhandle; | ||
98 | struct ErrorInfo *errinfo_pool; | 101 | struct ErrorInfo *errinfo_pool; |
99 | dma_addr_t errinfo_pool_dhandle; | 102 | dma_addr_t errinfo_pool_dhandle; |
100 | unsigned long *cmd_pool_bits; | 103 | unsigned long *cmd_pool_bits; |
@@ -128,6 +131,7 @@ struct ctlr_info { | |||
128 | u8 nreply_queues; | 131 | u8 nreply_queues; |
129 | dma_addr_t reply_pool_dhandle; | 132 | dma_addr_t reply_pool_dhandle; |
130 | u32 *blockFetchTable; | 133 | u32 *blockFetchTable; |
134 | u32 *ioaccel1_blockFetchTable; | ||
131 | unsigned char *hba_inquiry_data; | 135 | unsigned char *hba_inquiry_data; |
132 | u64 last_intr_timestamp; | 136 | u64 last_intr_timestamp; |
133 | u32 last_heartbeat; | 137 | u32 last_heartbeat; |
@@ -387,6 +391,45 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h) | |||
387 | return register_value & SA5_OUTDB_STATUS_PERF_BIT; | 391 | return register_value & SA5_OUTDB_STATUS_PERF_BIT; |
388 | } | 392 | } |
389 | 393 | ||
394 | #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 | ||
395 | |||
396 | static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) | ||
397 | { | ||
398 | unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); | ||
399 | |||
400 | return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? | ||
401 | true : false; | ||
402 | } | ||
403 | |||
404 | #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 | ||
405 | #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 | ||
406 | #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC | ||
407 | #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL | ||
408 | |||
409 | static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, | ||
410 | u8 q) | ||
411 | { | ||
412 | u64 register_value; | ||
413 | struct reply_pool *rq = &h->reply_queue[q]; | ||
414 | unsigned long flags; | ||
415 | |||
416 | BUG_ON(q >= h->nreply_queues); | ||
417 | |||
418 | register_value = rq->head[rq->current_entry]; | ||
419 | if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { | ||
420 | rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; | ||
421 | if (++rq->current_entry == rq->size) | ||
422 | rq->current_entry = 0; | ||
423 | spin_lock_irqsave(&h->lock, flags); | ||
424 | h->commands_outstanding--; | ||
425 | spin_unlock_irqrestore(&h->lock, flags); | ||
426 | } else { | ||
427 | writel((q << 24) | rq->current_entry, | ||
428 | h->vaddr + IOACCEL_MODE1_CONSUMER_INDEX); | ||
429 | } | ||
430 | return (unsigned long) register_value; | ||
431 | } | ||
432 | |||
390 | static struct access_method SA5_access = { | 433 | static struct access_method SA5_access = { |
391 | SA5_submit_command, | 434 | SA5_submit_command, |
392 | SA5_intr_mask, | 435 | SA5_intr_mask, |
@@ -395,6 +438,14 @@ static struct access_method SA5_access = { | |||
395 | SA5_completed, | 438 | SA5_completed, |
396 | }; | 439 | }; |
397 | 440 | ||
441 | static struct access_method SA5_ioaccel_mode1_access = { | ||
442 | SA5_submit_command, | ||
443 | SA5_performant_intr_mask, | ||
444 | SA5_fifo_full, | ||
445 | SA5_ioaccel_mode1_intr_pending, | ||
446 | SA5_ioaccel_mode1_completed, | ||
447 | }; | ||
448 | |||
398 | static struct access_method SA5_performant_access = { | 449 | static struct access_method SA5_performant_access = { |
399 | SA5_submit_command, | 450 | SA5_submit_command, |
400 | SA5_performant_intr_mask, | 451 | SA5_performant_intr_mask, |