aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDon Brace <don.brace@pmcs.com>2015-01-23 17:43:25 -0500
committerJames Bottomley <JBottomley@Parallels.com>2015-02-02 12:57:40 -0500
commit080ef1cc7fdf5d0800775c8626718da807e7ba99 (patch)
tree0d368443c5783ce08140713cda6300b565761c5b /drivers/scsi
parent574f05d37484038caa989d457fa60c1db4e81683 (diff)
hpsa: use workqueue to resubmit failed ioaccel commands
Instead of kicking the commands all the way back to the mid layer, use a work queue. This enables having a mechanism for the driver to be able to resubmit the commands down the "normal" raid path without turning off the ioaccel feature entirely whenever an error is encountered on the ioaccel path, and prevent excessive rescanning of devices. Reviewed-by: Scott Teel <scott.teel@pmcs.com> Signed-off-by: Don Brace <don.brace@pmcs.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/hpsa.c72
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h1
3 files changed, 54 insertions, 20 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index cc3128ff5dab..dcacb29ff589 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -248,6 +248,7 @@ static void hpsa_flush_cache(struct ctlr_info *h);
248static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 248static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
249 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 249 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
250 u8 *scsi3addr); 250 u8 *scsi3addr);
251static void hpsa_command_resubmit_worker(struct work_struct *work);
251 252
252static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 253static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
253{ 254{
@@ -1619,7 +1620,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1619 struct hpsa_scsi_dev_t *dev) 1620 struct hpsa_scsi_dev_t *dev)
1620{ 1621{
1621 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 1622 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1622 int raid_retry = 0;
1623 1623
1624 /* check for good status */ 1624 /* check for good status */
1625 if (likely(c2->error_data.serv_response == 0 && 1625 if (likely(c2->error_data.serv_response == 0 &&
@@ -1636,24 +1636,22 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1636 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1636 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1637 c2->error_data.serv_response == 1637 c2->error_data.serv_response ==
1638 IOACCEL2_SERV_RESPONSE_FAILURE) { 1638 IOACCEL2_SERV_RESPONSE_FAILURE) {
1639 dev->offload_enabled = 0; 1639 if (c2->error_data.status ==
1640 cmd->result = DID_SOFT_ERROR << 16; 1640 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1641 cmd_free(h, c); 1641 dev->offload_enabled = 0;
1642 cmd->scsi_done(cmd); 1642 goto retry_cmd;
1643 return;
1644 }
1645 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1646 /* If error found, disable Smart Path,
1647 * force a retry on the standard path.
1648 */
1649 if (raid_retry) {
1650 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1651 "HP SSD Smart Path");
1652 dev->offload_enabled = 0; /* Disable Smart Path */
1653 cmd->result = DID_SOFT_ERROR << 16;
1654 } 1643 }
1644
1645 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
1646 goto retry_cmd;
1647
1655 cmd_free(h, c); 1648 cmd_free(h, c);
1656 cmd->scsi_done(cmd); 1649 cmd->scsi_done(cmd);
1650 return;
1651
1652retry_cmd:
1653 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
1654 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
1657} 1655}
1658 1656
1659static void complete_scsi_command(struct CommandList *cp) 1657static void complete_scsi_command(struct CommandList *cp)
@@ -1723,9 +1721,9 @@ static void complete_scsi_command(struct CommandList *cp)
1723 if (is_logical_dev_addr_mode(dev->scsi3addr)) { 1721 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1724 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 1722 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1725 dev->offload_enabled = 0; 1723 dev->offload_enabled = 0;
1726 cmd->result = DID_SOFT_ERROR << 16; 1724 INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
1727 cmd_free(h, cp); 1725 queue_work_on(raw_smp_processor_id(),
1728 cmd->scsi_done(cmd); 1726 h->resubmit_wq, &cp->work);
1729 return; 1727 return;
1730 } 1728 }
1731 } 1729 }
@@ -3873,6 +3871,31 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
3873 return 0; 3871 return 0;
3874} 3872}
3875 3873
3874static void hpsa_command_resubmit_worker(struct work_struct *work)
3875{
3876 struct scsi_cmnd *cmd;
3877 struct hpsa_scsi_dev_t *dev;
3878 struct CommandList *c =
3879 container_of(work, struct CommandList, work);
3880
3881 cmd = c->scsi_cmd;
3882 dev = cmd->device->hostdata;
3883 if (!dev) {
3884 cmd->result = DID_NO_CONNECT << 16;
3885 cmd->scsi_done(cmd);
3886 return;
3887 }
3888 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
3889 /*
3890 * If we get here, it means dma mapping failed. Try
3891 * again via scsi mid layer, which will then get
3892 * SCSI_MLQUEUE_HOST_BUSY.
3893 */
3894 cmd->result = DID_IMM_RETRY << 16;
3895 cmd->scsi_done(cmd);
3896 }
3897}
3898
3876/* Running in struct Scsi_Host->host_lock less mode */ 3899/* Running in struct Scsi_Host->host_lock less mode */
3877static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 3900static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
3878{ 3901{
@@ -6396,6 +6419,7 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h)
6396 int i; 6419 int i;
6397 struct CommandList *c = NULL; 6420 struct CommandList *c = NULL;
6398 6421
6422 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
6399 for (i = 0; i < h->nr_cmds; i++) { 6423 for (i = 0; i < h->nr_cmds; i++) {
6400 if (!test_bit(i & (BITS_PER_LONG - 1), 6424 if (!test_bit(i & (BITS_PER_LONG - 1),
6401 h->cmd_pool_bits + (i / BITS_PER_LONG))) 6425 h->cmd_pool_bits + (i / BITS_PER_LONG)))
@@ -6631,6 +6655,12 @@ reinit_after_soft_reset:
6631 spin_lock_init(&h->scan_lock); 6655 spin_lock_init(&h->scan_lock);
6632 spin_lock_init(&h->passthru_count_lock); 6656 spin_lock_init(&h->passthru_count_lock);
6633 6657
6658 h->resubmit_wq = alloc_workqueue("hpsa", WQ_MEM_RECLAIM, 0);
6659 if (!h->resubmit_wq) {
6660 dev_err(&h->pdev->dev, "Failed to allocate work queue\n");
6661 rc = -ENOMEM;
6662 goto clean1;
6663 }
6634 /* Allocate and clear per-cpu variable lockup_detected */ 6664 /* Allocate and clear per-cpu variable lockup_detected */
6635 h->lockup_detected = alloc_percpu(u32); 6665 h->lockup_detected = alloc_percpu(u32);
6636 if (!h->lockup_detected) { 6666 if (!h->lockup_detected) {
@@ -6763,6 +6793,8 @@ clean2_and_free_irqs:
6763 hpsa_free_irqs(h); 6793 hpsa_free_irqs(h);
6764clean2: 6794clean2:
6765clean1: 6795clean1:
6796 if (h->resubmit_wq)
6797 destroy_workqueue(h->resubmit_wq);
6766 if (h->lockup_detected) 6798 if (h->lockup_detected)
6767 free_percpu(h->lockup_detected); 6799 free_percpu(h->lockup_detected);
6768 kfree(h); 6800 kfree(h);
@@ -6838,9 +6870,9 @@ static void hpsa_remove_one(struct pci_dev *pdev)
6838 h->remove_in_progress = 1; 6870 h->remove_in_progress = 1;
6839 cancel_delayed_work(&h->monitor_ctlr_work); 6871 cancel_delayed_work(&h->monitor_ctlr_work);
6840 spin_unlock_irqrestore(&h->lock, flags); 6872 spin_unlock_irqrestore(&h->lock, flags);
6841
6842 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 6873 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
6843 hpsa_shutdown(pdev); 6874 hpsa_shutdown(pdev);
6875 destroy_workqueue(h->resubmit_wq);
6844 iounmap(h->vaddr); 6876 iounmap(h->vaddr);
6845 iounmap(h->transtable); 6877 iounmap(h->transtable);
6846 iounmap(h->cfgtable); 6878 iounmap(h->cfgtable);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 06a3e812ec38..a0f4268df457 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -236,6 +236,7 @@ struct ctlr_info {
236 struct list_head offline_device_list; 236 struct list_head offline_device_list;
237 int acciopath_status; 237 int acciopath_status;
238 int raid_offload_debug; 238 int raid_offload_debug;
239 struct workqueue_struct *resubmit_wq;
239}; 240};
240 241
241struct offline_device_entry { 242struct offline_device_entry {
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index d78e66629650..3f2f0af6abb2 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -404,6 +404,7 @@ struct CommandList {
404 long cmdindex; 404 long cmdindex;
405 struct completion *waiting; 405 struct completion *waiting;
406 void *scsi_cmd; 406 void *scsi_cmd;
407 struct work_struct work;
407} __aligned(COMMANDLIST_ALIGNMENT); 408} __aligned(COMMANDLIST_ALIGNMENT);
408 409
409/* Max S/G elements in I/O accelerator command */ 410/* Max S/G elements in I/O accelerator command */