diff options
Diffstat (limited to 'drivers/s390/block/dasd.c')
-rw-r--r-- | drivers/s390/block/dasd.c | 172 |
1 files changed, 154 insertions, 18 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index f1b7fdc58a5f..82758cbb220b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -246,7 +246,7 @@ static struct dentry *dasd_debugfs_setup(const char *name, | |||
246 | static int dasd_state_known_to_basic(struct dasd_device *device) | 246 | static int dasd_state_known_to_basic(struct dasd_device *device) |
247 | { | 247 | { |
248 | struct dasd_block *block = device->block; | 248 | struct dasd_block *block = device->block; |
249 | int rc; | 249 | int rc = 0; |
250 | 250 | ||
251 | /* Allocate and register gendisk structure. */ | 251 | /* Allocate and register gendisk structure. */ |
252 | if (block) { | 252 | if (block) { |
@@ -273,7 +273,8 @@ static int dasd_state_known_to_basic(struct dasd_device *device) | |||
273 | DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); | 273 | DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); |
274 | 274 | ||
275 | device->state = DASD_STATE_BASIC; | 275 | device->state = DASD_STATE_BASIC; |
276 | return 0; | 276 | |
277 | return rc; | ||
277 | } | 278 | } |
278 | 279 | ||
279 | /* | 280 | /* |
@@ -282,6 +283,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device) | |||
282 | static int dasd_state_basic_to_known(struct dasd_device *device) | 283 | static int dasd_state_basic_to_known(struct dasd_device *device) |
283 | { | 284 | { |
284 | int rc; | 285 | int rc; |
286 | |||
285 | if (device->block) { | 287 | if (device->block) { |
286 | dasd_profile_exit(&device->block->profile); | 288 | dasd_profile_exit(&device->block->profile); |
287 | if (device->block->debugfs_dentry) | 289 | if (device->block->debugfs_dentry) |
@@ -332,8 +334,10 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) | |||
332 | if (block->base->discipline->do_analysis != NULL) | 334 | if (block->base->discipline->do_analysis != NULL) |
333 | rc = block->base->discipline->do_analysis(block); | 335 | rc = block->base->discipline->do_analysis(block); |
334 | if (rc) { | 336 | if (rc) { |
335 | if (rc != -EAGAIN) | 337 | if (rc != -EAGAIN) { |
336 | device->state = DASD_STATE_UNFMT; | 338 | device->state = DASD_STATE_UNFMT; |
339 | goto out; | ||
340 | } | ||
337 | return rc; | 341 | return rc; |
338 | } | 342 | } |
339 | dasd_setup_queue(block); | 343 | dasd_setup_queue(block); |
@@ -341,11 +345,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) | |||
341 | block->blocks << block->s2b_shift); | 345 | block->blocks << block->s2b_shift); |
342 | device->state = DASD_STATE_READY; | 346 | device->state = DASD_STATE_READY; |
343 | rc = dasd_scan_partitions(block); | 347 | rc = dasd_scan_partitions(block); |
344 | if (rc) | 348 | if (rc) { |
345 | device->state = DASD_STATE_BASIC; | 349 | device->state = DASD_STATE_BASIC; |
350 | return rc; | ||
351 | } | ||
346 | } else { | 352 | } else { |
347 | device->state = DASD_STATE_READY; | 353 | device->state = DASD_STATE_READY; |
348 | } | 354 | } |
355 | out: | ||
356 | if (device->discipline->basic_to_ready) | ||
357 | rc = device->discipline->basic_to_ready(device); | ||
349 | return rc; | 358 | return rc; |
350 | } | 359 | } |
351 | 360 | ||
@@ -368,6 +377,11 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) | |||
368 | { | 377 | { |
369 | int rc; | 378 | int rc; |
370 | 379 | ||
380 | if (device->discipline->ready_to_basic) { | ||
381 | rc = device->discipline->ready_to_basic(device); | ||
382 | if (rc) | ||
383 | return rc; | ||
384 | } | ||
371 | device->state = DASD_STATE_BASIC; | 385 | device->state = DASD_STATE_BASIC; |
372 | if (device->block) { | 386 | if (device->block) { |
373 | struct dasd_block *block = device->block; | 387 | struct dasd_block *block = device->block; |
@@ -402,16 +416,10 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device) | |||
402 | static int | 416 | static int |
403 | dasd_state_ready_to_online(struct dasd_device * device) | 417 | dasd_state_ready_to_online(struct dasd_device * device) |
404 | { | 418 | { |
405 | int rc; | ||
406 | struct gendisk *disk; | 419 | struct gendisk *disk; |
407 | struct disk_part_iter piter; | 420 | struct disk_part_iter piter; |
408 | struct hd_struct *part; | 421 | struct hd_struct *part; |
409 | 422 | ||
410 | if (device->discipline->ready_to_online) { | ||
411 | rc = device->discipline->ready_to_online(device); | ||
412 | if (rc) | ||
413 | return rc; | ||
414 | } | ||
415 | device->state = DASD_STATE_ONLINE; | 423 | device->state = DASD_STATE_ONLINE; |
416 | if (device->block) { | 424 | if (device->block) { |
417 | dasd_schedule_block_bh(device->block); | 425 | dasd_schedule_block_bh(device->block); |
@@ -444,6 +452,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device) | |||
444 | if (rc) | 452 | if (rc) |
445 | return rc; | 453 | return rc; |
446 | } | 454 | } |
455 | |||
447 | device->state = DASD_STATE_READY; | 456 | device->state = DASD_STATE_READY; |
448 | if (device->block && !(device->features & DASD_FEATURE_USERAW)) { | 457 | if (device->block && !(device->features & DASD_FEATURE_USERAW)) { |
449 | disk = device->block->bdev->bd_disk; | 458 | disk = device->block->bdev->bd_disk; |
@@ -2223,6 +2232,77 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) | |||
2223 | return rc; | 2232 | return rc; |
2224 | } | 2233 | } |
2225 | 2234 | ||
2235 | static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) | ||
2236 | { | ||
2237 | struct dasd_ccw_req *cqr; | ||
2238 | |||
2239 | list_for_each_entry(cqr, ccw_queue, blocklist) { | ||
2240 | if (cqr->callback_data != DASD_SLEEPON_END_TAG) | ||
2241 | return 0; | ||
2242 | } | ||
2243 | |||
2244 | return 1; | ||
2245 | } | ||
2246 | |||
2247 | static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) | ||
2248 | { | ||
2249 | struct dasd_device *device; | ||
2250 | int rc; | ||
2251 | struct dasd_ccw_req *cqr, *n; | ||
2252 | |||
2253 | retry: | ||
2254 | list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { | ||
2255 | device = cqr->startdev; | ||
2256 | if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ | ||
2257 | continue; | ||
2258 | |||
2259 | if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && | ||
2260 | !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { | ||
2261 | cqr->status = DASD_CQR_FAILED; | ||
2262 | cqr->intrc = -EPERM; | ||
2263 | continue; | ||
2264 | } | ||
2265 | /*Non-temporary stop condition will trigger fail fast*/ | ||
2266 | if (device->stopped & ~DASD_STOPPED_PENDING && | ||
2267 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && | ||
2268 | !dasd_eer_enabled(device)) { | ||
2269 | cqr->status = DASD_CQR_FAILED; | ||
2270 | cqr->intrc = -EAGAIN; | ||
2271 | continue; | ||
2272 | } | ||
2273 | |||
2274 | /*Don't try to start requests if device is stopped*/ | ||
2275 | if (interruptible) { | ||
2276 | rc = wait_event_interruptible( | ||
2277 | generic_waitq, !device->stopped); | ||
2278 | if (rc == -ERESTARTSYS) { | ||
2279 | cqr->status = DASD_CQR_FAILED; | ||
2280 | cqr->intrc = rc; | ||
2281 | continue; | ||
2282 | } | ||
2283 | } else | ||
2284 | wait_event(generic_waitq, !(device->stopped)); | ||
2285 | |||
2286 | if (!cqr->callback) | ||
2287 | cqr->callback = dasd_wakeup_cb; | ||
2288 | cqr->callback_data = DASD_SLEEPON_START_TAG; | ||
2289 | dasd_add_request_tail(cqr); | ||
2290 | } | ||
2291 | |||
2292 | wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); | ||
2293 | |||
2294 | rc = 0; | ||
2295 | list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { | ||
2296 | if (__dasd_sleep_on_erp(cqr)) | ||
2297 | rc = 1; | ||
2298 | } | ||
2299 | if (rc) | ||
2300 | goto retry; | ||
2301 | |||
2302 | |||
2303 | return 0; | ||
2304 | } | ||
2305 | |||
2226 | /* | 2306 | /* |
2227 | * Queue a request to the tail of the device ccw_queue and wait for | 2307 | * Queue a request to the tail of the device ccw_queue and wait for |
2228 | * it's completion. | 2308 | * it's completion. |
@@ -2233,6 +2313,15 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr) | |||
2233 | } | 2313 | } |
2234 | 2314 | ||
2235 | /* | 2315 | /* |
2316 | * Start requests from a ccw_queue and wait for their completion. | ||
2317 | */ | ||
2318 | int dasd_sleep_on_queue(struct list_head *ccw_queue) | ||
2319 | { | ||
2320 | return _dasd_sleep_on_queue(ccw_queue, 0); | ||
2321 | } | ||
2322 | EXPORT_SYMBOL(dasd_sleep_on_queue); | ||
2323 | |||
2324 | /* | ||
2236 | * Queue a request to the tail of the device ccw_queue and wait | 2325 | * Queue a request to the tail of the device ccw_queue and wait |
2237 | * interruptible for it's completion. | 2326 | * interruptible for it's completion. |
2238 | */ | 2327 | */ |
@@ -2663,6 +2752,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) | |||
2663 | } | 2752 | } |
2664 | 2753 | ||
2665 | /* | 2754 | /* |
2755 | * Requeue a request back to the block request queue | ||
2756 | * only works for block requests | ||
2757 | */ | ||
2758 | static int _dasd_requeue_request(struct dasd_ccw_req *cqr) | ||
2759 | { | ||
2760 | struct dasd_block *block = cqr->block; | ||
2761 | struct request *req; | ||
2762 | unsigned long flags; | ||
2763 | |||
2764 | if (!block) | ||
2765 | return -EINVAL; | ||
2766 | spin_lock_irqsave(&block->queue_lock, flags); | ||
2767 | req = (struct request *) cqr->callback_data; | ||
2768 | blk_requeue_request(block->request_queue, req); | ||
2769 | spin_unlock_irqrestore(&block->queue_lock, flags); | ||
2770 | |||
2771 | return 0; | ||
2772 | } | ||
2773 | |||
2774 | /* | ||
2666 | * Go through all request on the dasd_block request queue, cancel them | 2775 | * Go through all request on the dasd_block request queue, cancel them |
2667 | * on the respective dasd_device, and return them to the generic | 2776 | * on the respective dasd_device, and return them to the generic |
2668 | * block layer. | 2777 | * block layer. |
@@ -3380,10 +3489,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path); | |||
3380 | 3489 | ||
3381 | int dasd_generic_pm_freeze(struct ccw_device *cdev) | 3490 | int dasd_generic_pm_freeze(struct ccw_device *cdev) |
3382 | { | 3491 | { |
3492 | struct dasd_device *device = dasd_device_from_cdev(cdev); | ||
3493 | struct list_head freeze_queue; | ||
3383 | struct dasd_ccw_req *cqr, *n; | 3494 | struct dasd_ccw_req *cqr, *n; |
3495 | struct dasd_ccw_req *refers; | ||
3384 | int rc; | 3496 | int rc; |
3385 | struct list_head freeze_queue; | ||
3386 | struct dasd_device *device = dasd_device_from_cdev(cdev); | ||
3387 | 3497 | ||
3388 | if (IS_ERR(device)) | 3498 | if (IS_ERR(device)) |
3389 | return PTR_ERR(device); | 3499 | return PTR_ERR(device); |
@@ -3396,7 +3506,8 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) | |||
3396 | 3506 | ||
3397 | /* disallow new I/O */ | 3507 | /* disallow new I/O */ |
3398 | dasd_device_set_stop_bits(device, DASD_STOPPED_PM); | 3508 | dasd_device_set_stop_bits(device, DASD_STOPPED_PM); |
3399 | /* clear active requests */ | 3509 | |
3510 | /* clear active requests and requeue them to block layer if possible */ | ||
3400 | INIT_LIST_HEAD(&freeze_queue); | 3511 | INIT_LIST_HEAD(&freeze_queue); |
3401 | spin_lock_irq(get_ccwdev_lock(cdev)); | 3512 | spin_lock_irq(get_ccwdev_lock(cdev)); |
3402 | rc = 0; | 3513 | rc = 0; |
@@ -3416,7 +3527,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) | |||
3416 | } | 3527 | } |
3417 | list_move_tail(&cqr->devlist, &freeze_queue); | 3528 | list_move_tail(&cqr->devlist, &freeze_queue); |
3418 | } | 3529 | } |
3419 | |||
3420 | spin_unlock_irq(get_ccwdev_lock(cdev)); | 3530 | spin_unlock_irq(get_ccwdev_lock(cdev)); |
3421 | 3531 | ||
3422 | list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { | 3532 | list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { |
@@ -3424,12 +3534,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) | |||
3424 | (cqr->status != DASD_CQR_CLEAR_PENDING)); | 3534 | (cqr->status != DASD_CQR_CLEAR_PENDING)); |
3425 | if (cqr->status == DASD_CQR_CLEARED) | 3535 | if (cqr->status == DASD_CQR_CLEARED) |
3426 | cqr->status = DASD_CQR_QUEUED; | 3536 | cqr->status = DASD_CQR_QUEUED; |
3537 | |||
3538 | /* requeue requests to blocklayer will only work for | ||
3539 | block device requests */ | ||
3540 | if (_dasd_requeue_request(cqr)) | ||
3541 | continue; | ||
3542 | |||
3543 | /* remove requests from device and block queue */ | ||
3544 | list_del_init(&cqr->devlist); | ||
3545 | while (cqr->refers != NULL) { | ||
3546 | refers = cqr->refers; | ||
3547 | /* remove the request from the block queue */ | ||
3548 | list_del(&cqr->blocklist); | ||
3549 | /* free the finished erp request */ | ||
3550 | dasd_free_erp_request(cqr, cqr->memdev); | ||
3551 | cqr = refers; | ||
3552 | } | ||
3553 | if (cqr->block) | ||
3554 | list_del_init(&cqr->blocklist); | ||
3555 | cqr->block->base->discipline->free_cp( | ||
3556 | cqr, (struct request *) cqr->callback_data); | ||
3427 | } | 3557 | } |
3428 | /* move freeze_queue to start of the ccw_queue */ | ||
3429 | spin_lock_irq(get_ccwdev_lock(cdev)); | ||
3430 | list_splice_tail(&freeze_queue, &device->ccw_queue); | ||
3431 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
3432 | 3558 | ||
3559 | /* | ||
3560 | * if requests remain then they are internal request | ||
3561 | * and go back to the device queue | ||
3562 | */ | ||
3563 | if (!list_empty(&freeze_queue)) { | ||
3564 | /* move freeze_queue to start of the ccw_queue */ | ||
3565 | spin_lock_irq(get_ccwdev_lock(cdev)); | ||
3566 | list_splice_tail(&freeze_queue, &device->ccw_queue); | ||
3567 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
3568 | } | ||
3433 | dasd_put_device(device); | 3569 | dasd_put_device(device); |
3434 | return rc; | 3570 | return rc; |
3435 | } | 3571 | } |