diff options
author | Stephen M. Cameron <scameron@beardog.cce.hp.com> | 2014-05-29 11:53:07 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2014-06-02 03:54:55 -0400 |
commit | 072b0518b0f75296443a0df9fe7f25e052e62652 (patch) | |
tree | fbfb396427d949c0a05440fe8b8870a2e3e51968 /drivers/scsi/hpsa.c | |
parent | f89439bc2e12a0eab57f15411e668525dc7e61ec (diff) |
hpsa: allocate reply queues individually
Now that we can allocate more than 4 reply queues (up to 64)
we shouldn't try to make them share the same allocation but
should allocate them separately.
Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com>
Reviewed-by: Mike Miller <michael.miller@canonical.com>
Reviewed-by: Scott Teel <scott.teel@hp.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r-- | drivers/scsi/hpsa.c | 53 |
1 files changed, 32 insertions, 21 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 87066954a859..b474dbaaa8d0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -695,7 +695,7 @@ static inline void addQ(struct list_head *list, struct CommandList *c) | |||
695 | static inline u32 next_command(struct ctlr_info *h, u8 q) | 695 | static inline u32 next_command(struct ctlr_info *h, u8 q) |
696 | { | 696 | { |
697 | u32 a; | 697 | u32 a; |
698 | struct reply_pool *rq = &h->reply_queue[q]; | 698 | struct reply_queue_buffer *rq = &h->reply_queue[q]; |
699 | unsigned long flags; | 699 | unsigned long flags; |
700 | 700 | ||
701 | if (h->transMethod & CFGTBL_Trans_io_accel1) | 701 | if (h->transMethod & CFGTBL_Trans_io_accel1) |
@@ -6707,6 +6707,20 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) | |||
6707 | #endif /* CONFIG_PCI_MSI */ | 6707 | #endif /* CONFIG_PCI_MSI */ |
6708 | } | 6708 | } |
6709 | 6709 | ||
6710 | static void hpsa_free_reply_queues(struct ctlr_info *h) | ||
6711 | { | ||
6712 | int i; | ||
6713 | |||
6714 | for (i = 0; i < h->nreply_queues; i++) { | ||
6715 | if (!h->reply_queue[i].head) | ||
6716 | continue; | ||
6717 | pci_free_consistent(h->pdev, h->reply_queue_size, | ||
6718 | h->reply_queue[i].head, h->reply_queue[i].busaddr); | ||
6719 | h->reply_queue[i].head = NULL; | ||
6720 | h->reply_queue[i].busaddr = 0; | ||
6721 | } | ||
6722 | } | ||
6723 | |||
6710 | static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | 6724 | static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) |
6711 | { | 6725 | { |
6712 | hpsa_free_irqs_and_disable_msix(h); | 6726 | hpsa_free_irqs_and_disable_msix(h); |
@@ -6714,8 +6728,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | |||
6714 | hpsa_free_cmd_pool(h); | 6728 | hpsa_free_cmd_pool(h); |
6715 | kfree(h->ioaccel1_blockFetchTable); | 6729 | kfree(h->ioaccel1_blockFetchTable); |
6716 | kfree(h->blockFetchTable); | 6730 | kfree(h->blockFetchTable); |
6717 | pci_free_consistent(h->pdev, h->reply_pool_size, | 6731 | hpsa_free_reply_queues(h); |
6718 | h->reply_pool, h->reply_pool_dhandle); | ||
6719 | if (h->vaddr) | 6732 | if (h->vaddr) |
6720 | iounmap(h->vaddr); | 6733 | iounmap(h->vaddr); |
6721 | if (h->transtable) | 6734 | if (h->transtable) |
@@ -7164,8 +7177,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) | |||
7164 | pci_free_consistent(h->pdev, | 7177 | pci_free_consistent(h->pdev, |
7165 | h->nr_cmds * sizeof(struct ErrorInfo), | 7178 | h->nr_cmds * sizeof(struct ErrorInfo), |
7166 | h->errinfo_pool, h->errinfo_pool_dhandle); | 7179 | h->errinfo_pool, h->errinfo_pool_dhandle); |
7167 | pci_free_consistent(h->pdev, h->reply_pool_size, | 7180 | hpsa_free_reply_queues(h); |
7168 | h->reply_pool, h->reply_pool_dhandle); | ||
7169 | kfree(h->cmd_pool_bits); | 7181 | kfree(h->cmd_pool_bits); |
7170 | kfree(h->blockFetchTable); | 7182 | kfree(h->blockFetchTable); |
7171 | kfree(h->ioaccel1_blockFetchTable); | 7183 | kfree(h->ioaccel1_blockFetchTable); |
@@ -7278,7 +7290,8 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7278 | */ | 7290 | */ |
7279 | 7291 | ||
7280 | /* Controller spec: zero out this buffer. */ | 7292 | /* Controller spec: zero out this buffer. */ |
7281 | memset(h->reply_pool, 0, h->reply_pool_size); | 7293 | for (i = 0; i < h->nreply_queues; i++) |
7294 | memset(h->reply_queue[i].head, 0, h->reply_queue_size); | ||
7282 | 7295 | ||
7283 | bft[7] = SG_ENTRIES_IN_CMD + 4; | 7296 | bft[7] = SG_ENTRIES_IN_CMD + 4; |
7284 | calc_bucket_map(bft, ARRAY_SIZE(bft), | 7297 | calc_bucket_map(bft, ARRAY_SIZE(bft), |
@@ -7294,8 +7307,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7294 | 7307 | ||
7295 | for (i = 0; i < h->nreply_queues; i++) { | 7308 | for (i = 0; i < h->nreply_queues; i++) { |
7296 | writel(0, &h->transtable->RepQAddr[i].upper); | 7309 | writel(0, &h->transtable->RepQAddr[i].upper); |
7297 | writel(h->reply_pool_dhandle + | 7310 | writel(h->reply_queue[i].busaddr, |
7298 | (h->max_commands * sizeof(u64) * i), | ||
7299 | &h->transtable->RepQAddr[i].lower); | 7311 | &h->transtable->RepQAddr[i].lower); |
7300 | } | 7312 | } |
7301 | 7313 | ||
@@ -7343,8 +7355,10 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7343 | h->ioaccel1_blockFetchTable); | 7355 | h->ioaccel1_blockFetchTable); |
7344 | 7356 | ||
7345 | /* initialize all reply queue entries to unused */ | 7357 | /* initialize all reply queue entries to unused */ |
7346 | memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, | 7358 | for (i = 0; i < h->nreply_queues; i++) |
7347 | h->reply_pool_size); | 7359 | memset(h->reply_queue[i].head, |
7360 | (u8) IOACCEL_MODE1_REPLY_UNUSED, | ||
7361 | h->reply_queue_size); | ||
7348 | 7362 | ||
7349 | /* set all the constant fields in the accelerator command | 7363 | /* set all the constant fields in the accelerator command |
7350 | * frames once at init time to save CPU cycles later. | 7364 | * frames once at init time to save CPU cycles later. |
@@ -7500,16 +7514,17 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | |||
7500 | } | 7514 | } |
7501 | } | 7515 | } |
7502 | 7516 | ||
7503 | /* TODO, check that this next line h->nreply_queues is correct */ | ||
7504 | h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; | 7517 | h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; |
7505 | hpsa_get_max_perf_mode_cmds(h); | 7518 | hpsa_get_max_perf_mode_cmds(h); |
7506 | /* Performant mode ring buffer and supporting data structures */ | 7519 | /* Performant mode ring buffer and supporting data structures */ |
7507 | h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; | 7520 | h->reply_queue_size = h->max_commands * sizeof(u64); |
7508 | h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, | ||
7509 | &(h->reply_pool_dhandle)); | ||
7510 | 7521 | ||
7511 | for (i = 0; i < h->nreply_queues; i++) { | 7522 | for (i = 0; i < h->nreply_queues; i++) { |
7512 | h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; | 7523 | h->reply_queue[i].head = pci_alloc_consistent(h->pdev, |
7524 | h->reply_queue_size, | ||
7525 | &(h->reply_queue[i].busaddr)); | ||
7526 | if (!h->reply_queue[i].head) | ||
7527 | goto clean_up; | ||
7513 | h->reply_queue[i].size = h->max_commands; | 7528 | h->reply_queue[i].size = h->max_commands; |
7514 | h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ | 7529 | h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ |
7515 | h->reply_queue[i].current_entry = 0; | 7530 | h->reply_queue[i].current_entry = 0; |
@@ -7518,18 +7533,14 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | |||
7518 | /* Need a block fetch table for performant mode */ | 7533 | /* Need a block fetch table for performant mode */ |
7519 | h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * | 7534 | h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * |
7520 | sizeof(u32)), GFP_KERNEL); | 7535 | sizeof(u32)), GFP_KERNEL); |
7521 | 7536 | if (!h->blockFetchTable) | |
7522 | if ((h->reply_pool == NULL) | ||
7523 | || (h->blockFetchTable == NULL)) | ||
7524 | goto clean_up; | 7537 | goto clean_up; |
7525 | 7538 | ||
7526 | hpsa_enter_performant_mode(h, trans_support); | 7539 | hpsa_enter_performant_mode(h, trans_support); |
7527 | return; | 7540 | return; |
7528 | 7541 | ||
7529 | clean_up: | 7542 | clean_up: |
7530 | if (h->reply_pool) | 7543 | hpsa_free_reply_queues(h); |
7531 | pci_free_consistent(h->pdev, h->reply_pool_size, | ||
7532 | h->reply_pool, h->reply_pool_dhandle); | ||
7533 | kfree(h->blockFetchTable); | 7544 | kfree(h->blockFetchTable); |
7534 | } | 7545 | } |
7535 | 7546 | ||