diff options
author | Don Brace <don.brace@microsemi.com> | 2019-05-07 14:32:33 -0400 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2019-06-18 19:46:18 -0400 |
commit | c5dfd106414f3e038fee5c6f0800fd55ed07b41d (patch) | |
tree | 57778cd59a7efe4537d656c7fe0f197ae3386e98 | |
parent | 9e33f0d5788fe4aaa42b1abf6536d046c724a8cd (diff) |
scsi: hpsa: correct device resets
Correct a race condition that occurs between the reset handler and the
completion handler. There are times when the wait_event condition is
never met due to this race condition and the reset never completes.
The reset_pending field is NULL initially.
t Reset Handler Thread Completion Thread
-- -------------------- -----------------
t1 if (c->reset_pending)
t2 c->reset_pending = dev; if (atomic_dev_and_test(counter))
t3 atomic_inc(counter) wait_up_all(event_sync_wait_queue)
t4
t5 wait_event(...counter == 0)
Kernel.org Bugzilla:
https://bugzilla.kernel.org/show_bug.cgi?id=1994350
Bug 199435 - HPSA + P420i resetting logical Direct-Access
never complete
Reviewed-by: Justin Lindley <justin.lindley@microsemi.com>
Reviewed-by: David Carroll <david.carroll@microsemi.com>
Reviewed-by: Scott Teel <scott.teel@microsemi.com>
Signed-off-by: Don Brace <don.brace@microsemi.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r-- | drivers/scsi/hpsa.c | 173 | ||||
-rw-r--r-- | drivers/scsi/hpsa.h | 3 | ||||
-rw-r--r-- | drivers/scsi/hpsa_cmd.h | 2 |
3 files changed, 88 insertions, 90 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 42d51951b61a..b4df2475ce9c 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -346,11 +346,6 @@ static inline bool hpsa_is_cmd_idle(struct CommandList *c) | |||
346 | return c->scsi_cmd == SCSI_CMD_IDLE; | 346 | return c->scsi_cmd == SCSI_CMD_IDLE; |
347 | } | 347 | } |
348 | 348 | ||
349 | static inline bool hpsa_is_pending_event(struct CommandList *c) | ||
350 | { | ||
351 | return c->reset_pending; | ||
352 | } | ||
353 | |||
354 | /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ | 349 | /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ |
355 | static void decode_sense_data(const u8 *sense_data, int sense_data_len, | 350 | static void decode_sense_data(const u8 *sense_data, int sense_data_len, |
356 | u8 *sense_key, u8 *asc, u8 *ascq) | 351 | u8 *sense_key, u8 *asc, u8 *ascq) |
@@ -1146,6 +1141,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, | |||
1146 | { | 1141 | { |
1147 | dial_down_lockup_detection_during_fw_flash(h, c); | 1142 | dial_down_lockup_detection_during_fw_flash(h, c); |
1148 | atomic_inc(&h->commands_outstanding); | 1143 | atomic_inc(&h->commands_outstanding); |
1144 | if (c->device) | ||
1145 | atomic_inc(&c->device->commands_outstanding); | ||
1149 | 1146 | ||
1150 | reply_queue = h->reply_map[raw_smp_processor_id()]; | 1147 | reply_queue = h->reply_map[raw_smp_processor_id()]; |
1151 | switch (c->cmd_type) { | 1148 | switch (c->cmd_type) { |
@@ -1169,9 +1166,6 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, | |||
1169 | 1166 | ||
1170 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) | 1167 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) |
1171 | { | 1168 | { |
1172 | if (unlikely(hpsa_is_pending_event(c))) | ||
1173 | return finish_cmd(c); | ||
1174 | |||
1175 | __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); | 1169 | __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); |
1176 | } | 1170 | } |
1177 | 1171 | ||
@@ -2434,13 +2428,16 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, | |||
2434 | break; | 2428 | break; |
2435 | } | 2429 | } |
2436 | 2430 | ||
2431 | if (dev->in_reset) | ||
2432 | retry = 0; | ||
2433 | |||
2437 | return retry; /* retry on raid path? */ | 2434 | return retry; /* retry on raid path? */ |
2438 | } | 2435 | } |
2439 | 2436 | ||
2440 | static void hpsa_cmd_resolve_events(struct ctlr_info *h, | 2437 | static void hpsa_cmd_resolve_events(struct ctlr_info *h, |
2441 | struct CommandList *c) | 2438 | struct CommandList *c) |
2442 | { | 2439 | { |
2443 | bool do_wake = false; | 2440 | struct hpsa_scsi_dev_t *dev = c->device; |
2444 | 2441 | ||
2445 | /* | 2442 | /* |
2446 | * Reset c->scsi_cmd here so that the reset handler will know | 2443 | * Reset c->scsi_cmd here so that the reset handler will know |
@@ -2449,25 +2446,12 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h, | |||
2449 | */ | 2446 | */ |
2450 | c->scsi_cmd = SCSI_CMD_IDLE; | 2447 | c->scsi_cmd = SCSI_CMD_IDLE; |
2451 | mb(); /* Declare command idle before checking for pending events. */ | 2448 | mb(); /* Declare command idle before checking for pending events. */ |
2452 | if (c->reset_pending) { | 2449 | if (dev) { |
2453 | unsigned long flags; | 2450 | atomic_dec(&dev->commands_outstanding); |
2454 | struct hpsa_scsi_dev_t *dev; | 2451 | if (dev->in_reset && |
2455 | 2452 | atomic_read(&dev->commands_outstanding) <= 0) | |
2456 | /* | 2453 | wake_up_all(&h->event_sync_wait_queue); |
2457 | * There appears to be a reset pending; lock the lock and | ||
2458 | * reconfirm. If so, then decrement the count of outstanding | ||
2459 | * commands and wake the reset command if this is the last one. | ||
2460 | */ | ||
2461 | spin_lock_irqsave(&h->lock, flags); | ||
2462 | dev = c->reset_pending; /* Re-fetch under the lock. */ | ||
2463 | if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) | ||
2464 | do_wake = true; | ||
2465 | c->reset_pending = NULL; | ||
2466 | spin_unlock_irqrestore(&h->lock, flags); | ||
2467 | } | 2454 | } |
2468 | |||
2469 | if (do_wake) | ||
2470 | wake_up_all(&h->event_sync_wait_queue); | ||
2471 | } | 2455 | } |
2472 | 2456 | ||
2473 | static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, | 2457 | static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, |
@@ -2516,6 +2500,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h, | |||
2516 | dev->offload_to_be_enabled = 0; | 2500 | dev->offload_to_be_enabled = 0; |
2517 | } | 2501 | } |
2518 | 2502 | ||
2503 | if (dev->in_reset) { | ||
2504 | cmd->result = DID_RESET << 16; | ||
2505 | return hpsa_cmd_free_and_done(h, c, cmd); | ||
2506 | } | ||
2507 | |||
2519 | return hpsa_retry_cmd(h, c); | 2508 | return hpsa_retry_cmd(h, c); |
2520 | } | 2509 | } |
2521 | 2510 | ||
@@ -2621,10 +2610,6 @@ static void complete_scsi_command(struct CommandList *cp) | |||
2621 | return hpsa_cmd_free_and_done(h, cp, cmd); | 2610 | return hpsa_cmd_free_and_done(h, cp, cmd); |
2622 | } | 2611 | } |
2623 | 2612 | ||
2624 | if ((unlikely(hpsa_is_pending_event(cp)))) | ||
2625 | if (cp->reset_pending) | ||
2626 | return hpsa_cmd_free_and_done(h, cp, cmd); | ||
2627 | |||
2628 | if (cp->cmd_type == CMD_IOACCEL2) | 2613 | if (cp->cmd_type == CMD_IOACCEL2) |
2629 | return process_ioaccel2_completion(h, cp, cmd, dev); | 2614 | return process_ioaccel2_completion(h, cp, cmd, dev); |
2630 | 2615 | ||
@@ -3074,7 +3059,7 @@ out: | |||
3074 | return rc; | 3059 | return rc; |
3075 | } | 3060 | } |
3076 | 3061 | ||
3077 | static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, | 3062 | static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, |
3078 | u8 reset_type, int reply_queue) | 3063 | u8 reset_type, int reply_queue) |
3079 | { | 3064 | { |
3080 | int rc = IO_OK; | 3065 | int rc = IO_OK; |
@@ -3082,11 +3067,10 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
3082 | struct ErrorInfo *ei; | 3067 | struct ErrorInfo *ei; |
3083 | 3068 | ||
3084 | c = cmd_alloc(h); | 3069 | c = cmd_alloc(h); |
3085 | 3070 | c->device = dev; | |
3086 | 3071 | ||
3087 | /* fill_cmd can't fail here, no data buffer to map. */ | 3072 | /* fill_cmd can't fail here, no data buffer to map. */ |
3088 | (void) fill_cmd(c, reset_type, h, NULL, 0, 0, | 3073 | (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG); |
3089 | scsi3addr, TYPE_MSG); | ||
3090 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); | 3074 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); |
3091 | if (rc) { | 3075 | if (rc) { |
3092 | dev_warn(&h->pdev->dev, "Failed to send reset command\n"); | 3076 | dev_warn(&h->pdev->dev, "Failed to send reset command\n"); |
@@ -3164,9 +3148,8 @@ static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, | |||
3164 | } | 3148 | } |
3165 | 3149 | ||
3166 | static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, | 3150 | static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, |
3167 | unsigned char *scsi3addr, u8 reset_type, int reply_queue) | 3151 | u8 reset_type, int reply_queue) |
3168 | { | 3152 | { |
3169 | int i; | ||
3170 | int rc = 0; | 3153 | int rc = 0; |
3171 | 3154 | ||
3172 | /* We can really only handle one reset at a time */ | 3155 | /* We can really only handle one reset at a time */ |
@@ -3175,38 +3158,14 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, | |||
3175 | return -EINTR; | 3158 | return -EINTR; |
3176 | } | 3159 | } |
3177 | 3160 | ||
3178 | BUG_ON(atomic_read(&dev->reset_cmds_out) != 0); | 3161 | rc = hpsa_send_reset(h, dev, reset_type, reply_queue); |
3179 | 3162 | if (!rc) { | |
3180 | for (i = 0; i < h->nr_cmds; i++) { | 3163 | /* incremented by sending the reset request */ |
3181 | struct CommandList *c = h->cmd_pool + i; | 3164 | atomic_dec(&dev->commands_outstanding); |
3182 | int refcount = atomic_inc_return(&c->refcount); | ||
3183 | |||
3184 | if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { | ||
3185 | unsigned long flags; | ||
3186 | |||
3187 | /* | ||
3188 | * Mark the target command as having a reset pending, | ||
3189 | * then lock a lock so that the command cannot complete | ||
3190 | * while we're considering it. If the command is not | ||
3191 | * idle then count it; otherwise revoke the event. | ||
3192 | */ | ||
3193 | c->reset_pending = dev; | ||
3194 | spin_lock_irqsave(&h->lock, flags); /* Implied MB */ | ||
3195 | if (!hpsa_is_cmd_idle(c)) | ||
3196 | atomic_inc(&dev->reset_cmds_out); | ||
3197 | else | ||
3198 | c->reset_pending = NULL; | ||
3199 | spin_unlock_irqrestore(&h->lock, flags); | ||
3200 | } | ||
3201 | |||
3202 | cmd_free(h, c); | ||
3203 | } | ||
3204 | |||
3205 | rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); | ||
3206 | if (!rc) | ||
3207 | wait_event(h->event_sync_wait_queue, | 3165 | wait_event(h->event_sync_wait_queue, |
3208 | atomic_read(&dev->reset_cmds_out) == 0 || | 3166 | atomic_read(&dev->commands_outstanding) <= 0 || |
3209 | lockup_detected(h)); | 3167 | lockup_detected(h)); |
3168 | } | ||
3210 | 3169 | ||
3211 | if (unlikely(lockup_detected(h))) { | 3170 | if (unlikely(lockup_detected(h))) { |
3212 | dev_warn(&h->pdev->dev, | 3171 | dev_warn(&h->pdev->dev, |
@@ -3214,10 +3173,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, | |||
3214 | rc = -ENODEV; | 3173 | rc = -ENODEV; |
3215 | } | 3174 | } |
3216 | 3175 | ||
3217 | if (unlikely(rc)) | 3176 | if (!rc) |
3218 | atomic_set(&dev->reset_cmds_out, 0); | 3177 | rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0); |
3219 | else | ||
3220 | rc = wait_for_device_to_become_ready(h, scsi3addr, 0); | ||
3221 | 3178 | ||
3222 | mutex_unlock(&h->reset_mutex); | 3179 | mutex_unlock(&h->reset_mutex); |
3223 | return rc; | 3180 | return rc; |
@@ -4846,6 +4803,9 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, | |||
4846 | 4803 | ||
4847 | c->phys_disk = dev; | 4804 | c->phys_disk = dev; |
4848 | 4805 | ||
4806 | if (dev->in_reset) | ||
4807 | return -1; | ||
4808 | |||
4849 | return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, | 4809 | return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, |
4850 | cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); | 4810 | cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); |
4851 | } | 4811 | } |
@@ -5031,6 +4991,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
5031 | } else | 4991 | } else |
5032 | cp->sg_count = (u8) use_sg; | 4992 | cp->sg_count = (u8) use_sg; |
5033 | 4993 | ||
4994 | if (phys_disk->in_reset) { | ||
4995 | cmd->result = DID_RESET << 16; | ||
4996 | return -1; | ||
4997 | } | ||
4998 | |||
5034 | enqueue_cmd_and_start_io(h, c); | 4999 | enqueue_cmd_and_start_io(h, c); |
5035 | return 0; | 5000 | return 0; |
5036 | } | 5001 | } |
@@ -5048,6 +5013,9 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, | |||
5048 | if (!c->scsi_cmd->device->hostdata) | 5013 | if (!c->scsi_cmd->device->hostdata) |
5049 | return -1; | 5014 | return -1; |
5050 | 5015 | ||
5016 | if (phys_disk->in_reset) | ||
5017 | return -1; | ||
5018 | |||
5051 | /* Try to honor the device's queue depth */ | 5019 | /* Try to honor the device's queue depth */ |
5052 | if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > | 5020 | if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > |
5053 | phys_disk->queue_depth) { | 5021 | phys_disk->queue_depth) { |
@@ -5131,6 +5099,9 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
5131 | if (!dev) | 5099 | if (!dev) |
5132 | return -1; | 5100 | return -1; |
5133 | 5101 | ||
5102 | if (dev->in_reset) | ||
5103 | return -1; | ||
5104 | |||
5134 | /* check for valid opcode, get LBA and block count */ | 5105 | /* check for valid opcode, get LBA and block count */ |
5135 | switch (cmd->cmnd[0]) { | 5106 | switch (cmd->cmnd[0]) { |
5136 | case WRITE_6: | 5107 | case WRITE_6: |
@@ -5435,13 +5406,13 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
5435 | */ | 5406 | */ |
5436 | static int hpsa_ciss_submit(struct ctlr_info *h, | 5407 | static int hpsa_ciss_submit(struct ctlr_info *h, |
5437 | struct CommandList *c, struct scsi_cmnd *cmd, | 5408 | struct CommandList *c, struct scsi_cmnd *cmd, |
5438 | unsigned char scsi3addr[]) | 5409 | struct hpsa_scsi_dev_t *dev) |
5439 | { | 5410 | { |
5440 | cmd->host_scribble = (unsigned char *) c; | 5411 | cmd->host_scribble = (unsigned char *) c; |
5441 | c->cmd_type = CMD_SCSI; | 5412 | c->cmd_type = CMD_SCSI; |
5442 | c->scsi_cmd = cmd; | 5413 | c->scsi_cmd = cmd; |
5443 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | 5414 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
5444 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | 5415 | memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8); |
5445 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); | 5416 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); |
5446 | 5417 | ||
5447 | /* Fill in the request block... */ | 5418 | /* Fill in the request block... */ |
@@ -5492,6 +5463,12 @@ static int hpsa_ciss_submit(struct ctlr_info *h, | |||
5492 | hpsa_cmd_resolve_and_free(h, c); | 5463 | hpsa_cmd_resolve_and_free(h, c); |
5493 | return SCSI_MLQUEUE_HOST_BUSY; | 5464 | return SCSI_MLQUEUE_HOST_BUSY; |
5494 | } | 5465 | } |
5466 | |||
5467 | if (dev->in_reset) { | ||
5468 | hpsa_cmd_resolve_and_free(h, c); | ||
5469 | return SCSI_MLQUEUE_HOST_BUSY; | ||
5470 | } | ||
5471 | |||
5495 | enqueue_cmd_and_start_io(h, c); | 5472 | enqueue_cmd_and_start_io(h, c); |
5496 | /* the cmd'll come back via intr handler in complete_scsi_command() */ | 5473 | /* the cmd'll come back via intr handler in complete_scsi_command() */ |
5497 | return 0; | 5474 | return 0; |
@@ -5543,8 +5520,7 @@ static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, | |||
5543 | } | 5520 | } |
5544 | 5521 | ||
5545 | static int hpsa_ioaccel_submit(struct ctlr_info *h, | 5522 | static int hpsa_ioaccel_submit(struct ctlr_info *h, |
5546 | struct CommandList *c, struct scsi_cmnd *cmd, | 5523 | struct CommandList *c, struct scsi_cmnd *cmd) |
5547 | unsigned char *scsi3addr) | ||
5548 | { | 5524 | { |
5549 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; | 5525 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; |
5550 | int rc = IO_ACCEL_INELIGIBLE; | 5526 | int rc = IO_ACCEL_INELIGIBLE; |
@@ -5552,6 +5528,9 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h, | |||
5552 | if (!dev) | 5528 | if (!dev) |
5553 | return SCSI_MLQUEUE_HOST_BUSY; | 5529 | return SCSI_MLQUEUE_HOST_BUSY; |
5554 | 5530 | ||
5531 | if (dev->in_reset) | ||
5532 | return SCSI_MLQUEUE_HOST_BUSY; | ||
5533 | |||
5555 | if (hpsa_simple_mode) | 5534 | if (hpsa_simple_mode) |
5556 | return IO_ACCEL_INELIGIBLE; | 5535 | return IO_ACCEL_INELIGIBLE; |
5557 | 5536 | ||
@@ -5587,8 +5566,12 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) | |||
5587 | cmd->result = DID_NO_CONNECT << 16; | 5566 | cmd->result = DID_NO_CONNECT << 16; |
5588 | return hpsa_cmd_free_and_done(c->h, c, cmd); | 5567 | return hpsa_cmd_free_and_done(c->h, c, cmd); |
5589 | } | 5568 | } |
5590 | if (c->reset_pending) | 5569 | |
5570 | if (dev->in_reset) { | ||
5571 | cmd->result = DID_RESET << 16; | ||
5591 | return hpsa_cmd_free_and_done(c->h, c, cmd); | 5572 | return hpsa_cmd_free_and_done(c->h, c, cmd); |
5573 | } | ||
5574 | |||
5592 | if (c->cmd_type == CMD_IOACCEL2) { | 5575 | if (c->cmd_type == CMD_IOACCEL2) { |
5593 | struct ctlr_info *h = c->h; | 5576 | struct ctlr_info *h = c->h; |
5594 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | 5577 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
@@ -5596,7 +5579,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) | |||
5596 | 5579 | ||
5597 | if (c2->error_data.serv_response == | 5580 | if (c2->error_data.serv_response == |
5598 | IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { | 5581 | IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { |
5599 | rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); | 5582 | rc = hpsa_ioaccel_submit(h, c, cmd); |
5600 | if (rc == 0) | 5583 | if (rc == 0) |
5601 | return; | 5584 | return; |
5602 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { | 5585 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { |
@@ -5612,7 +5595,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) | |||
5612 | } | 5595 | } |
5613 | } | 5596 | } |
5614 | hpsa_cmd_partial_init(c->h, c->cmdindex, c); | 5597 | hpsa_cmd_partial_init(c->h, c->cmdindex, c); |
5615 | if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { | 5598 | if (hpsa_ciss_submit(c->h, c, cmd, dev)) { |
5616 | /* | 5599 | /* |
5617 | * If we get here, it means dma mapping failed. Try | 5600 | * If we get here, it means dma mapping failed. Try |
5618 | * again via scsi mid layer, which will then get | 5601 | * again via scsi mid layer, which will then get |
@@ -5631,7 +5614,6 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
5631 | { | 5614 | { |
5632 | struct ctlr_info *h; | 5615 | struct ctlr_info *h; |
5633 | struct hpsa_scsi_dev_t *dev; | 5616 | struct hpsa_scsi_dev_t *dev; |
5634 | unsigned char scsi3addr[8]; | ||
5635 | struct CommandList *c; | 5617 | struct CommandList *c; |
5636 | int rc = 0; | 5618 | int rc = 0; |
5637 | 5619 | ||
@@ -5653,13 +5635,15 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
5653 | return 0; | 5635 | return 0; |
5654 | } | 5636 | } |
5655 | 5637 | ||
5656 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | ||
5657 | |||
5658 | if (unlikely(lockup_detected(h))) { | 5638 | if (unlikely(lockup_detected(h))) { |
5659 | cmd->result = DID_NO_CONNECT << 16; | 5639 | cmd->result = DID_NO_CONNECT << 16; |
5660 | cmd->scsi_done(cmd); | 5640 | cmd->scsi_done(cmd); |
5661 | return 0; | 5641 | return 0; |
5662 | } | 5642 | } |
5643 | |||
5644 | if (dev->in_reset) | ||
5645 | return SCSI_MLQUEUE_DEVICE_BUSY; | ||
5646 | |||
5663 | c = cmd_tagged_alloc(h, cmd); | 5647 | c = cmd_tagged_alloc(h, cmd); |
5664 | if (c == NULL) | 5648 | if (c == NULL) |
5665 | return SCSI_MLQUEUE_DEVICE_BUSY; | 5649 | return SCSI_MLQUEUE_DEVICE_BUSY; |
@@ -5671,7 +5655,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
5671 | if (likely(cmd->retries == 0 && | 5655 | if (likely(cmd->retries == 0 && |
5672 | !blk_rq_is_passthrough(cmd->request) && | 5656 | !blk_rq_is_passthrough(cmd->request) && |
5673 | h->acciopath_status)) { | 5657 | h->acciopath_status)) { |
5674 | rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); | 5658 | rc = hpsa_ioaccel_submit(h, c, cmd); |
5675 | if (rc == 0) | 5659 | if (rc == 0) |
5676 | return 0; | 5660 | return 0; |
5677 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { | 5661 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { |
@@ -5679,7 +5663,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
5679 | return SCSI_MLQUEUE_HOST_BUSY; | 5663 | return SCSI_MLQUEUE_HOST_BUSY; |
5680 | } | 5664 | } |
5681 | } | 5665 | } |
5682 | return hpsa_ciss_submit(h, c, cmd, scsi3addr); | 5666 | return hpsa_ciss_submit(h, c, cmd, dev); |
5683 | } | 5667 | } |
5684 | 5668 | ||
5685 | static void hpsa_scan_complete(struct ctlr_info *h) | 5669 | static void hpsa_scan_complete(struct ctlr_info *h) |
@@ -5961,6 +5945,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, | |||
5961 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | 5945 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) |
5962 | { | 5946 | { |
5963 | int rc = SUCCESS; | 5947 | int rc = SUCCESS; |
5948 | int i; | ||
5964 | struct ctlr_info *h; | 5949 | struct ctlr_info *h; |
5965 | struct hpsa_scsi_dev_t *dev; | 5950 | struct hpsa_scsi_dev_t *dev; |
5966 | u8 reset_type; | 5951 | u8 reset_type; |
@@ -6028,9 +6013,19 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
6028 | reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); | 6013 | reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); |
6029 | hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); | 6014 | hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); |
6030 | 6015 | ||
6016 | /* | ||
6017 | * wait to see if any commands will complete before sending reset | ||
6018 | */ | ||
6019 | dev->in_reset = true; /* block any new cmds from OS for this device */ | ||
6020 | for (i = 0; i < 10; i++) { | ||
6021 | if (atomic_read(&dev->commands_outstanding) > 0) | ||
6022 | msleep(1000); | ||
6023 | else | ||
6024 | break; | ||
6025 | } | ||
6026 | |||
6031 | /* send a reset to the SCSI LUN which the command was sent to */ | 6027 | /* send a reset to the SCSI LUN which the command was sent to */ |
6032 | rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, | 6028 | rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE); |
6033 | DEFAULT_REPLY_QUEUE); | ||
6034 | if (rc == 0) | 6029 | if (rc == 0) |
6035 | rc = SUCCESS; | 6030 | rc = SUCCESS; |
6036 | else | 6031 | else |
@@ -6044,6 +6039,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
6044 | return_reset_status: | 6039 | return_reset_status: |
6045 | spin_lock_irqsave(&h->reset_lock, flags); | 6040 | spin_lock_irqsave(&h->reset_lock, flags); |
6046 | h->reset_in_progress = 0; | 6041 | h->reset_in_progress = 0; |
6042 | if (dev) | ||
6043 | dev->in_reset = false; | ||
6047 | spin_unlock_irqrestore(&h->reset_lock, flags); | 6044 | spin_unlock_irqrestore(&h->reset_lock, flags); |
6048 | return rc; | 6045 | return rc; |
6049 | } | 6046 | } |
@@ -6157,6 +6154,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) | |||
6157 | break; /* it's ours now. */ | 6154 | break; /* it's ours now. */ |
6158 | } | 6155 | } |
6159 | hpsa_cmd_partial_init(h, i, c); | 6156 | hpsa_cmd_partial_init(h, i, c); |
6157 | c->device = NULL; | ||
6160 | return c; | 6158 | return c; |
6161 | } | 6159 | } |
6162 | 6160 | ||
@@ -6610,8 +6608,7 @@ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, | |||
6610 | } | 6608 | } |
6611 | } | 6609 | } |
6612 | 6610 | ||
6613 | static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, | 6611 | static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type) |
6614 | u8 reset_type) | ||
6615 | { | 6612 | { |
6616 | struct CommandList *c; | 6613 | struct CommandList *c; |
6617 | 6614 | ||
@@ -8105,7 +8102,7 @@ static int hpsa_request_irqs(struct ctlr_info *h, | |||
8105 | static int hpsa_kdump_soft_reset(struct ctlr_info *h) | 8102 | static int hpsa_kdump_soft_reset(struct ctlr_info *h) |
8106 | { | 8103 | { |
8107 | int rc; | 8104 | int rc; |
8108 | hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); | 8105 | hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER); |
8109 | 8106 | ||
8110 | dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); | 8107 | dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); |
8111 | rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); | 8108 | rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); |
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index a013c16af5f1..f8c88fc7b80a 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
@@ -76,11 +76,12 @@ struct hpsa_scsi_dev_t { | |||
76 | unsigned char raid_level; /* from inquiry page 0xC1 */ | 76 | unsigned char raid_level; /* from inquiry page 0xC1 */ |
77 | unsigned char volume_offline; /* discovered via TUR or VPD */ | 77 | unsigned char volume_offline; /* discovered via TUR or VPD */ |
78 | u16 queue_depth; /* max queue_depth for this device */ | 78 | u16 queue_depth; /* max queue_depth for this device */ |
79 | atomic_t reset_cmds_out; /* Count of commands to-be affected */ | 79 | atomic_t commands_outstanding; /* track commands sent to device */ |
80 | atomic_t ioaccel_cmds_out; /* Only used for physical devices | 80 | atomic_t ioaccel_cmds_out; /* Only used for physical devices |
81 | * counts commands sent to physical | 81 | * counts commands sent to physical |
82 | * device via "ioaccel" path. | 82 | * device via "ioaccel" path. |
83 | */ | 83 | */ |
84 | bool in_reset; | ||
84 | u32 ioaccel_handle; | 85 | u32 ioaccel_handle; |
85 | u8 active_path_index; | 86 | u8 active_path_index; |
86 | u8 path_map; | 87 | u8 path_map; |
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 21a726e2eec6..2daf08f81d80 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h | |||
@@ -448,7 +448,7 @@ struct CommandList { | |||
448 | struct hpsa_scsi_dev_t *phys_disk; | 448 | struct hpsa_scsi_dev_t *phys_disk; |
449 | 449 | ||
450 | int abort_pending; | 450 | int abort_pending; |
451 | struct hpsa_scsi_dev_t *reset_pending; | 451 | struct hpsa_scsi_dev_t *device; |
452 | atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */ | 452 | atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */ |
453 | } __aligned(COMMANDLIST_ALIGNMENT); | 453 | } __aligned(COMMANDLIST_ALIGNMENT); |
454 | 454 | ||