aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorStefan Haberland <stefan.haberland@de.ibm.com>2013-04-15 10:41:31 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-04-17 08:07:34 -0400
commitc55768765e7b488ff20832c6ba89ea4e017a7b1b (patch)
tree5be7cd2ec6cf0c34fe05c19a29161702093c4031 /drivers/s390
parentd42e17129b9f473386d67c6a6549c28bd0e2b52e (diff)
s390/dasd: fix hanging device after resume with internal error 13
If too many ccw requests are pre-build before a suspend/resume cycle the device might not get enough memory to do path verification during resume. Requeue requests to the block device request queue on suspend and free pre-build ccw requests. Signed-off-by: Stefan Haberland <stefan.haberland@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c63
1 files changed, 55 insertions, 8 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4195cc05efeb..82758cbb220b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2752,6 +2752,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2752} 2752}
2753 2753
2754/* 2754/*
2755 * Requeue a request back to the block request queue
2756 * only works for block requests
2757 */
2758static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
2759{
2760 struct dasd_block *block = cqr->block;
2761 struct request *req;
2762 unsigned long flags;
2763
2764 if (!block)
2765 return -EINVAL;
2766 spin_lock_irqsave(&block->queue_lock, flags);
2767 req = (struct request *) cqr->callback_data;
2768 blk_requeue_request(block->request_queue, req);
2769 spin_unlock_irqrestore(&block->queue_lock, flags);
2770
2771 return 0;
2772}
2773
2774/*
2755 * Go through all request on the dasd_block request queue, cancel them 2775 * Go through all request on the dasd_block request queue, cancel them
2756 * on the respective dasd_device, and return them to the generic 2776 * on the respective dasd_device, and return them to the generic
2757 * block layer. 2777 * block layer.
@@ -3469,10 +3489,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3469 3489
3470int dasd_generic_pm_freeze(struct ccw_device *cdev) 3490int dasd_generic_pm_freeze(struct ccw_device *cdev)
3471{ 3491{
3492 struct dasd_device *device = dasd_device_from_cdev(cdev);
3493 struct list_head freeze_queue;
3472 struct dasd_ccw_req *cqr, *n; 3494 struct dasd_ccw_req *cqr, *n;
3495 struct dasd_ccw_req *refers;
3473 int rc; 3496 int rc;
3474 struct list_head freeze_queue;
3475 struct dasd_device *device = dasd_device_from_cdev(cdev);
3476 3497
3477 if (IS_ERR(device)) 3498 if (IS_ERR(device))
3478 return PTR_ERR(device); 3499 return PTR_ERR(device);
@@ -3485,7 +3506,8 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
3485 3506
3486 /* disallow new I/O */ 3507 /* disallow new I/O */
3487 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3508 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
3488 /* clear active requests */ 3509
3510 /* clear active requests and requeue them to block layer if possible */
3489 INIT_LIST_HEAD(&freeze_queue); 3511 INIT_LIST_HEAD(&freeze_queue);
3490 spin_lock_irq(get_ccwdev_lock(cdev)); 3512 spin_lock_irq(get_ccwdev_lock(cdev));
3491 rc = 0; 3513 rc = 0;
@@ -3505,7 +3527,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
3505 } 3527 }
3506 list_move_tail(&cqr->devlist, &freeze_queue); 3528 list_move_tail(&cqr->devlist, &freeze_queue);
3507 } 3529 }
3508
3509 spin_unlock_irq(get_ccwdev_lock(cdev)); 3530 spin_unlock_irq(get_ccwdev_lock(cdev));
3510 3531
3511 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3532 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
@@ -3513,12 +3534,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
3513 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3534 (cqr->status != DASD_CQR_CLEAR_PENDING));
3514 if (cqr->status == DASD_CQR_CLEARED) 3535 if (cqr->status == DASD_CQR_CLEARED)
3515 cqr->status = DASD_CQR_QUEUED; 3536 cqr->status = DASD_CQR_QUEUED;
3537
3538 /* requeue requests to blocklayer will only work for
3539 block device requests */
3540 if (_dasd_requeue_request(cqr))
3541 continue;
3542
3543 /* remove requests from device and block queue */
3544 list_del_init(&cqr->devlist);
3545 while (cqr->refers != NULL) {
3546 refers = cqr->refers;
3547 /* remove the request from the block queue */
3548 list_del(&cqr->blocklist);
3549 /* free the finished erp request */
3550 dasd_free_erp_request(cqr, cqr->memdev);
3551 cqr = refers;
3552 }
3553 if (cqr->block)
3554 list_del_init(&cqr->blocklist);
3555 cqr->block->base->discipline->free_cp(
3556 cqr, (struct request *) cqr->callback_data);
3516 } 3557 }
3517 /* move freeze_queue to start of the ccw_queue */
3518 spin_lock_irq(get_ccwdev_lock(cdev));
3519 list_splice_tail(&freeze_queue, &device->ccw_queue);
3520 spin_unlock_irq(get_ccwdev_lock(cdev));
3521 3558
3559 /*
3560 * if requests remain then they are internal request
3561 * and go back to the device queue
3562 */
3563 if (!list_empty(&freeze_queue)) {
3564 /* move freeze_queue to start of the ccw_queue */
3565 spin_lock_irq(get_ccwdev_lock(cdev));
3566 list_splice_tail(&freeze_queue, &device->ccw_queue);
3567 spin_unlock_irq(get_ccwdev_lock(cdev));
3568 }
3522 dasd_put_device(device); 3569 dasd_put_device(device);
3523 return rc; 3570 return rc;
3524} 3571}