aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorMike Christie <michaelc@cs.wisc.edu>2012-05-18 00:56:57 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-07-20 03:58:22 -0400
commit5d9fb5cc1b88277bb28a2a54e51b34cacaa123c2 (patch)
tree9067ba87ee1455850d97984608daa717f381e0d8 /drivers/scsi/scsi_lib.c
parent1b8d26206134458044b0689f48194af00c96d406 (diff)
[SCSI] core, classes, mpt2sas: have scsi_internal_device_unblock take new state
This has scsi_internal_device_unblock/scsi_target_unblock take the new state to set the devices as an argument instead of always setting to running. The patch also converts users of these functions. This allows the FC and iSCSI class to transition devices from blocked to transport-offline, so that when fast_io_fail/replacement_timeout has fired we do not set the devices back to running. Instead, we set them to SDEV_TRANSPORT_OFFLINE. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c40
1 files changed, 23 insertions, 17 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 340c569d4535..36521a0ac54b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2444,6 +2444,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2444/** 2444/**
2445 * scsi_internal_device_unblock - resume a device after a block request 2445 * scsi_internal_device_unblock - resume a device after a block request
2446 * @sdev: device to resume 2446 * @sdev: device to resume
2447 * @new_state: state to set devices to after unblocking
2447 * 2448 *
2448 * Called by scsi lld's or the midlayer to restart the device queue 2449 * Called by scsi lld's or the midlayer to restart the device queue
2449 * for the previously suspended scsi device. Called from interrupt or 2450 * for the previously suspended scsi device. Called from interrupt or
@@ -2453,25 +2454,30 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2453 * 2454 *
2454 * Notes: 2455 * Notes:
2455 * This routine transitions the device to the SDEV_RUNNING state 2456 * This routine transitions the device to the SDEV_RUNNING state
2456 * (which must be a legal transition) allowing the midlayer to 2457 * or to one of the offline states (which must be a legal transition)
2457 * goose the queue for this device. This routine assumes the 2458 * allowing the midlayer to goose the queue for this device. This
2458 * host_lock is held upon entry. 2459 * routine assumes the host_lock is held upon entry.
2459 */ 2460 */
2460int 2461int
2461scsi_internal_device_unblock(struct scsi_device *sdev) 2462scsi_internal_device_unblock(struct scsi_device *sdev,
2463 enum scsi_device_state new_state)
2462{ 2464{
2463 struct request_queue *q = sdev->request_queue; 2465 struct request_queue *q = sdev->request_queue;
2464 unsigned long flags; 2466 unsigned long flags;
2465 2467
2466 /* 2468 /*
2467 * Try to transition the scsi device to SDEV_RUNNING 2469 * Try to transition the scsi device to SDEV_RUNNING or one of the
2468 * and goose the device queue if successful. 2470 * offlined states and goose the device queue if successful.
2469 */ 2471 */
2470 if (sdev->sdev_state == SDEV_BLOCK) 2472 if (sdev->sdev_state == SDEV_BLOCK)
2471 sdev->sdev_state = SDEV_RUNNING; 2473 sdev->sdev_state = new_state;
2472 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) 2474 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2473 sdev->sdev_state = SDEV_CREATED; 2475 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2474 else if (sdev->sdev_state != SDEV_CANCEL && 2476 new_state == SDEV_OFFLINE)
2477 sdev->sdev_state = new_state;
2478 else
2479 sdev->sdev_state = SDEV_CREATED;
2480 } else if (sdev->sdev_state != SDEV_CANCEL &&
2475 sdev->sdev_state != SDEV_OFFLINE) 2481 sdev->sdev_state != SDEV_OFFLINE)
2476 return -EINVAL; 2482 return -EINVAL;
2477 2483
@@ -2512,26 +2518,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block);
2512static void 2518static void
2513device_unblock(struct scsi_device *sdev, void *data) 2519device_unblock(struct scsi_device *sdev, void *data)
2514{ 2520{
2515 scsi_internal_device_unblock(sdev); 2521 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2516} 2522}
2517 2523
2518static int 2524static int
2519target_unblock(struct device *dev, void *data) 2525target_unblock(struct device *dev, void *data)
2520{ 2526{
2521 if (scsi_is_target_device(dev)) 2527 if (scsi_is_target_device(dev))
2522 starget_for_each_device(to_scsi_target(dev), NULL, 2528 starget_for_each_device(to_scsi_target(dev), data,
2523 device_unblock); 2529 device_unblock);
2524 return 0; 2530 return 0;
2525} 2531}
2526 2532
2527void 2533void
2528scsi_target_unblock(struct device *dev) 2534scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2529{ 2535{
2530 if (scsi_is_target_device(dev)) 2536 if (scsi_is_target_device(dev))
2531 starget_for_each_device(to_scsi_target(dev), NULL, 2537 starget_for_each_device(to_scsi_target(dev), &new_state,
2532 device_unblock); 2538 device_unblock);
2533 else 2539 else
2534 device_for_each_child(dev, NULL, target_unblock); 2540 device_for_each_child(dev, &new_state, target_unblock);
2535} 2541}
2536EXPORT_SYMBOL_GPL(scsi_target_unblock); 2542EXPORT_SYMBOL_GPL(scsi_target_unblock);
2537 2543