diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 104 |
1 files changed, 50 insertions, 54 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6dfb9785d345..b58327758c58 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -109,7 +109,7 @@ static void scsi_unprep_request(struct request *req) | |||
109 | * for a requeue after completion, which should only occur in this | 109 | * for a requeue after completion, which should only occur in this |
110 | * file. | 110 | * file. |
111 | */ | 111 | */ |
112 | static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | 112 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) |
113 | { | 113 | { |
114 | struct Scsi_Host *host = cmd->device->host; | 114 | struct Scsi_Host *host = cmd->device->host; |
115 | struct scsi_device *device = cmd->device; | 115 | struct scsi_device *device = cmd->device; |
@@ -155,15 +155,14 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | |||
155 | 155 | ||
156 | /* | 156 | /* |
157 | * Requeue this command. It will go before all other commands | 157 | * Requeue this command. It will go before all other commands |
158 | * that are already in the queue. | 158 | * that are already in the queue. Schedule requeue work under |
159 | * lock such that the kblockd_schedule_work() call happens | ||
160 | * before blk_cleanup_queue() finishes. | ||
159 | */ | 161 | */ |
160 | spin_lock_irqsave(q->queue_lock, flags); | 162 | spin_lock_irqsave(q->queue_lock, flags); |
161 | blk_requeue_request(q, cmd->request); | 163 | blk_requeue_request(q, cmd->request); |
162 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
163 | |||
164 | kblockd_schedule_work(q, &device->requeue_work); | 164 | kblockd_schedule_work(q, &device->requeue_work); |
165 | 165 | spin_unlock_irqrestore(q->queue_lock, flags); | |
166 | return 0; | ||
167 | } | 166 | } |
168 | 167 | ||
169 | /* | 168 | /* |
@@ -185,9 +184,9 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | |||
185 | * Notes: This could be called either from an interrupt context or a | 184 | * Notes: This could be called either from an interrupt context or a |
186 | * normal process context. | 185 | * normal process context. |
187 | */ | 186 | */ |
188 | int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | 187 | void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) |
189 | { | 188 | { |
190 | return __scsi_queue_insert(cmd, reason, 1); | 189 | __scsi_queue_insert(cmd, reason, 1); |
191 | } | 190 | } |
192 | /** | 191 | /** |
193 | * scsi_execute - insert request and wait for the result | 192 | * scsi_execute - insert request and wait for the result |
@@ -406,10 +405,6 @@ static void scsi_run_queue(struct request_queue *q) | |||
406 | LIST_HEAD(starved_list); | 405 | LIST_HEAD(starved_list); |
407 | unsigned long flags; | 406 | unsigned long flags; |
408 | 407 | ||
409 | /* if the device is dead, sdev will be NULL, so no queue to run */ | ||
410 | if (!sdev) | ||
411 | return; | ||
412 | |||
413 | shost = sdev->host; | 408 | shost = sdev->host; |
414 | if (scsi_target(sdev)->single_lun) | 409 | if (scsi_target(sdev)->single_lun) |
415 | scsi_single_lun_run(sdev); | 410 | scsi_single_lun_run(sdev); |
@@ -483,15 +478,26 @@ void scsi_requeue_run_queue(struct work_struct *work) | |||
483 | */ | 478 | */ |
484 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) | 479 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) |
485 | { | 480 | { |
481 | struct scsi_device *sdev = cmd->device; | ||
486 | struct request *req = cmd->request; | 482 | struct request *req = cmd->request; |
487 | unsigned long flags; | 483 | unsigned long flags; |
488 | 484 | ||
485 | /* | ||
486 | * We need to hold a reference on the device to avoid the queue being | ||
487 | * killed after the unlock and before scsi_run_queue is invoked which | ||
488 | * may happen because scsi_unprep_request() puts the command which | ||
489 | * releases its reference on the device. | ||
490 | */ | ||
491 | get_device(&sdev->sdev_gendev); | ||
492 | |||
489 | spin_lock_irqsave(q->queue_lock, flags); | 493 | spin_lock_irqsave(q->queue_lock, flags); |
490 | scsi_unprep_request(req); | 494 | scsi_unprep_request(req); |
491 | blk_requeue_request(q, req); | 495 | blk_requeue_request(q, req); |
492 | spin_unlock_irqrestore(q->queue_lock, flags); | 496 | spin_unlock_irqrestore(q->queue_lock, flags); |
493 | 497 | ||
494 | scsi_run_queue(q); | 498 | scsi_run_queue(q); |
499 | |||
500 | put_device(&sdev->sdev_gendev); | ||
495 | } | 501 | } |
496 | 502 | ||
497 | void scsi_next_command(struct scsi_cmnd *cmd) | 503 | void scsi_next_command(struct scsi_cmnd *cmd) |
@@ -1173,6 +1179,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | |||
1173 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { | 1179 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { |
1174 | switch (sdev->sdev_state) { | 1180 | switch (sdev->sdev_state) { |
1175 | case SDEV_OFFLINE: | 1181 | case SDEV_OFFLINE: |
1182 | case SDEV_TRANSPORT_OFFLINE: | ||
1176 | /* | 1183 | /* |
1177 | * If the device is offline we refuse to process any | 1184 | * If the device is offline we refuse to process any |
1178 | * commands. The device must be brought online | 1185 | * commands. The device must be brought online |
@@ -1370,16 +1377,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
1370 | * may be changed after request stacking drivers call the function, | 1377 | * may be changed after request stacking drivers call the function, |
1371 | * regardless of taking lock or not. | 1378 | * regardless of taking lock or not. |
1372 | * | 1379 | * |
1373 | * When scsi can't dispatch I/Os anymore and needs to kill I/Os | 1380 | * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi |
1374 | * (e.g. !sdev), scsi needs to return 'not busy'. | 1381 | * needs to return 'not busy'. Otherwise, request stacking drivers |
1375 | * Otherwise, request stacking drivers may hold requests forever. | 1382 | * may hold requests forever. |
1376 | */ | 1383 | */ |
1377 | static int scsi_lld_busy(struct request_queue *q) | 1384 | static int scsi_lld_busy(struct request_queue *q) |
1378 | { | 1385 | { |
1379 | struct scsi_device *sdev = q->queuedata; | 1386 | struct scsi_device *sdev = q->queuedata; |
1380 | struct Scsi_Host *shost; | 1387 | struct Scsi_Host *shost; |
1381 | 1388 | ||
1382 | if (!sdev) | 1389 | if (blk_queue_dead(q)) |
1383 | return 0; | 1390 | return 0; |
1384 | 1391 | ||
1385 | shost = sdev->host; | 1392 | shost = sdev->host; |
@@ -1490,12 +1497,6 @@ static void scsi_request_fn(struct request_queue *q) | |||
1490 | struct scsi_cmnd *cmd; | 1497 | struct scsi_cmnd *cmd; |
1491 | struct request *req; | 1498 | struct request *req; |
1492 | 1499 | ||
1493 | if (!sdev) { | ||
1494 | while ((req = blk_peek_request(q)) != NULL) | ||
1495 | scsi_kill_request(req, q); | ||
1496 | return; | ||
1497 | } | ||
1498 | |||
1499 | if(!get_device(&sdev->sdev_gendev)) | 1500 | if(!get_device(&sdev->sdev_gendev)) |
1500 | /* We must be tearing the block queue down already */ | 1501 | /* We must be tearing the block queue down already */ |
1501 | return; | 1502 | return; |
@@ -1697,20 +1698,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
1697 | return q; | 1698 | return q; |
1698 | } | 1699 | } |
1699 | 1700 | ||
1700 | void scsi_free_queue(struct request_queue *q) | ||
1701 | { | ||
1702 | unsigned long flags; | ||
1703 | |||
1704 | WARN_ON(q->queuedata); | ||
1705 | |||
1706 | /* cause scsi_request_fn() to kill all non-finished requests */ | ||
1707 | spin_lock_irqsave(q->queue_lock, flags); | ||
1708 | q->request_fn(q); | ||
1709 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1710 | |||
1711 | blk_cleanup_queue(q); | ||
1712 | } | ||
1713 | |||
1714 | /* | 1701 | /* |
1715 | * Function: scsi_block_requests() | 1702 | * Function: scsi_block_requests() |
1716 | * | 1703 | * |
@@ -2081,6 +2068,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |||
2081 | switch (oldstate) { | 2068 | switch (oldstate) { |
2082 | case SDEV_CREATED: | 2069 | case SDEV_CREATED: |
2083 | case SDEV_OFFLINE: | 2070 | case SDEV_OFFLINE: |
2071 | case SDEV_TRANSPORT_OFFLINE: | ||
2084 | case SDEV_QUIESCE: | 2072 | case SDEV_QUIESCE: |
2085 | case SDEV_BLOCK: | 2073 | case SDEV_BLOCK: |
2086 | break; | 2074 | break; |
@@ -2093,6 +2081,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |||
2093 | switch (oldstate) { | 2081 | switch (oldstate) { |
2094 | case SDEV_RUNNING: | 2082 | case SDEV_RUNNING: |
2095 | case SDEV_OFFLINE: | 2083 | case SDEV_OFFLINE: |
2084 | case SDEV_TRANSPORT_OFFLINE: | ||
2096 | break; | 2085 | break; |
2097 | default: | 2086 | default: |
2098 | goto illegal; | 2087 | goto illegal; |
@@ -2100,6 +2089,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |||
2100 | break; | 2089 | break; |
2101 | 2090 | ||
2102 | case SDEV_OFFLINE: | 2091 | case SDEV_OFFLINE: |
2092 | case SDEV_TRANSPORT_OFFLINE: | ||
2103 | switch (oldstate) { | 2093 | switch (oldstate) { |
2104 | case SDEV_CREATED: | 2094 | case SDEV_CREATED: |
2105 | case SDEV_RUNNING: | 2095 | case SDEV_RUNNING: |
@@ -2136,6 +2126,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |||
2136 | case SDEV_RUNNING: | 2126 | case SDEV_RUNNING: |
2137 | case SDEV_QUIESCE: | 2127 | case SDEV_QUIESCE: |
2138 | case SDEV_OFFLINE: | 2128 | case SDEV_OFFLINE: |
2129 | case SDEV_TRANSPORT_OFFLINE: | ||
2139 | case SDEV_BLOCK: | 2130 | case SDEV_BLOCK: |
2140 | break; | 2131 | break; |
2141 | default: | 2132 | default: |
@@ -2148,6 +2139,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |||
2148 | case SDEV_CREATED: | 2139 | case SDEV_CREATED: |
2149 | case SDEV_RUNNING: | 2140 | case SDEV_RUNNING: |
2150 | case SDEV_OFFLINE: | 2141 | case SDEV_OFFLINE: |
2142 | case SDEV_TRANSPORT_OFFLINE: | ||
2151 | case SDEV_CANCEL: | 2143 | case SDEV_CANCEL: |
2152 | break; | 2144 | break; |
2153 | default: | 2145 | default: |
@@ -2405,7 +2397,6 @@ EXPORT_SYMBOL(scsi_target_resume); | |||
2405 | * (which must be a legal transition). When the device is in this | 2397 | * (which must be a legal transition). When the device is in this |
2406 | * state, all commands are deferred until the scsi lld reenables | 2398 | * state, all commands are deferred until the scsi lld reenables |
2407 | * the device with scsi_device_unblock or device_block_tmo fires. | 2399 | * the device with scsi_device_unblock or device_block_tmo fires. |
2408 | * This routine assumes the host_lock is held on entry. | ||
2409 | */ | 2400 | */ |
2410 | int | 2401 | int |
2411 | scsi_internal_device_block(struct scsi_device *sdev) | 2402 | scsi_internal_device_block(struct scsi_device *sdev) |
@@ -2438,6 +2429,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); | |||
2438 | /** | 2429 | /** |
2439 | * scsi_internal_device_unblock - resume a device after a block request | 2430 | * scsi_internal_device_unblock - resume a device after a block request |
2440 | * @sdev: device to resume | 2431 | * @sdev: device to resume |
2432 | * @new_state: state to set devices to after unblocking | ||
2441 | * | 2433 | * |
2442 | * Called by scsi lld's or the midlayer to restart the device queue | 2434 | * Called by scsi lld's or the midlayer to restart the device queue |
2443 | * for the previously suspended scsi device. Called from interrupt or | 2435 | * for the previously suspended scsi device. Called from interrupt or |
@@ -2447,25 +2439,29 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); | |||
2447 | * | 2439 | * |
2448 | * Notes: | 2440 | * Notes: |
2449 | * This routine transitions the device to the SDEV_RUNNING state | 2441 | * This routine transitions the device to the SDEV_RUNNING state |
2450 | * (which must be a legal transition) allowing the midlayer to | 2442 | * or to one of the offline states (which must be a legal transition) |
2451 | * goose the queue for this device. This routine assumes the | 2443 | * allowing the midlayer to goose the queue for this device. |
2452 | * host_lock is held upon entry. | ||
2453 | */ | 2444 | */ |
2454 | int | 2445 | int |
2455 | scsi_internal_device_unblock(struct scsi_device *sdev) | 2446 | scsi_internal_device_unblock(struct scsi_device *sdev, |
2447 | enum scsi_device_state new_state) | ||
2456 | { | 2448 | { |
2457 | struct request_queue *q = sdev->request_queue; | 2449 | struct request_queue *q = sdev->request_queue; |
2458 | unsigned long flags; | 2450 | unsigned long flags; |
2459 | 2451 | ||
2460 | /* | 2452 | /* |
2461 | * Try to transition the scsi device to SDEV_RUNNING | 2453 | * Try to transition the scsi device to SDEV_RUNNING or one of the |
2462 | * and goose the device queue if successful. | 2454 | * offlined states and goose the device queue if successful. |
2463 | */ | 2455 | */ |
2464 | if (sdev->sdev_state == SDEV_BLOCK) | 2456 | if (sdev->sdev_state == SDEV_BLOCK) |
2465 | sdev->sdev_state = SDEV_RUNNING; | 2457 | sdev->sdev_state = new_state; |
2466 | else if (sdev->sdev_state == SDEV_CREATED_BLOCK) | 2458 | else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { |
2467 | sdev->sdev_state = SDEV_CREATED; | 2459 | if (new_state == SDEV_TRANSPORT_OFFLINE || |
2468 | else if (sdev->sdev_state != SDEV_CANCEL && | 2460 | new_state == SDEV_OFFLINE) |
2461 | sdev->sdev_state = new_state; | ||
2462 | else | ||
2463 | sdev->sdev_state = SDEV_CREATED; | ||
2464 | } else if (sdev->sdev_state != SDEV_CANCEL && | ||
2469 | sdev->sdev_state != SDEV_OFFLINE) | 2465 | sdev->sdev_state != SDEV_OFFLINE) |
2470 | return -EINVAL; | 2466 | return -EINVAL; |
2471 | 2467 | ||
@@ -2506,26 +2502,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block); | |||
2506 | static void | 2502 | static void |
2507 | device_unblock(struct scsi_device *sdev, void *data) | 2503 | device_unblock(struct scsi_device *sdev, void *data) |
2508 | { | 2504 | { |
2509 | scsi_internal_device_unblock(sdev); | 2505 | scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); |
2510 | } | 2506 | } |
2511 | 2507 | ||
2512 | static int | 2508 | static int |
2513 | target_unblock(struct device *dev, void *data) | 2509 | target_unblock(struct device *dev, void *data) |
2514 | { | 2510 | { |
2515 | if (scsi_is_target_device(dev)) | 2511 | if (scsi_is_target_device(dev)) |
2516 | starget_for_each_device(to_scsi_target(dev), NULL, | 2512 | starget_for_each_device(to_scsi_target(dev), data, |
2517 | device_unblock); | 2513 | device_unblock); |
2518 | return 0; | 2514 | return 0; |
2519 | } | 2515 | } |
2520 | 2516 | ||
2521 | void | 2517 | void |
2522 | scsi_target_unblock(struct device *dev) | 2518 | scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) |
2523 | { | 2519 | { |
2524 | if (scsi_is_target_device(dev)) | 2520 | if (scsi_is_target_device(dev)) |
2525 | starget_for_each_device(to_scsi_target(dev), NULL, | 2521 | starget_for_each_device(to_scsi_target(dev), &new_state, |
2526 | device_unblock); | 2522 | device_unblock); |
2527 | else | 2523 | else |
2528 | device_for_each_child(dev, NULL, target_unblock); | 2524 | device_for_each_child(dev, &new_state, target_unblock); |
2529 | } | 2525 | } |
2530 | EXPORT_SYMBOL_GPL(scsi_target_unblock); | 2526 | EXPORT_SYMBOL_GPL(scsi_target_unblock); |
2531 | 2527 | ||