aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.ibm.com>2018-05-16 05:25:21 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2018-05-25 02:12:50 -0400
commit1bcdb5354aee2c0abcd13d912be35ae39a4144b6 (patch)
tree80c2825c2bb008202e437261492229531f4848de
parenta166c368e7dbc84a5d8f013d9fda99f47f9c9f13 (diff)
s390/dasd: simplify locking in dasd_times_out
Provide __dasd_cancel_req that is called with the ccw device lock held to simplify the locking in dasd_times_out. Also this removes the following sparse warning: context imbalance in 'dasd_times_out' - different lock contexts for basic block Note: with this change dasd_schedule_device_bh is now called (via dasd_cancel_req) with the ccw device lock held. But is is already the case for other codepaths. Signed-off-by: Sebastian Ott <sebott@linux.ibm.com> Reviewed-by: Stefan Haberland <sth@linux.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--drivers/s390/block/dasd.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 04143c08bd6e..f401b4c38cfc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2569,14 +2569,11 @@ EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2569 * Cancellation of a request is an asynchronous operation! The calling 2569 * Cancellation of a request is an asynchronous operation! The calling
2570 * function has to wait until the request is properly returned via callback. 2570 * function has to wait until the request is properly returned via callback.
2571 */ 2571 */
2572int dasd_cancel_req(struct dasd_ccw_req *cqr) 2572static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2573{ 2573{
2574 struct dasd_device *device = cqr->startdev; 2574 struct dasd_device *device = cqr->startdev;
2575 unsigned long flags; 2575 int rc = 0;
2576 int rc;
2577 2576
2578 rc = 0;
2579 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2580 switch (cqr->status) { 2577 switch (cqr->status) {
2581 case DASD_CQR_QUEUED: 2578 case DASD_CQR_QUEUED:
2582 /* request was not started - just set to cleared */ 2579 /* request was not started - just set to cleared */
@@ -2596,11 +2593,21 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
2596 default: /* already finished or clear pending - do nothing */ 2593 default: /* already finished or clear pending - do nothing */
2597 break; 2594 break;
2598 } 2595 }
2599 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2600 dasd_schedule_device_bh(device); 2596 dasd_schedule_device_bh(device);
2601 return rc; 2597 return rc;
2602} 2598}
2603EXPORT_SYMBOL(dasd_cancel_req); 2599
2600int dasd_cancel_req(struct dasd_ccw_req *cqr)
2601{
2602 struct dasd_device *device = cqr->startdev;
2603 unsigned long flags;
2604 int rc;
2605
2606 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2607 rc = __dasd_cancel_req(cqr);
2608 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2609 return rc;
2610}
2604 2611
2605/* 2612/*
2606 * SECTION: Operations of the dasd_block layer. 2613 * SECTION: Operations of the dasd_block layer.
@@ -3082,12 +3089,10 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3082 cqr->retries = -1; 3089 cqr->retries = -1;
3083 cqr->intrc = -ETIMEDOUT; 3090 cqr->intrc = -ETIMEDOUT;
3084 if (cqr->status >= DASD_CQR_QUEUED) { 3091 if (cqr->status >= DASD_CQR_QUEUED) {
3085 spin_unlock(get_ccwdev_lock(device->cdev)); 3092 rc = __dasd_cancel_req(cqr);
3086 rc = dasd_cancel_req(cqr);
3087 } else if (cqr->status == DASD_CQR_FILLED || 3093 } else if (cqr->status == DASD_CQR_FILLED ||
3088 cqr->status == DASD_CQR_NEED_ERP) { 3094 cqr->status == DASD_CQR_NEED_ERP) {
3089 cqr->status = DASD_CQR_TERMINATED; 3095 cqr->status = DASD_CQR_TERMINATED;
3090 spin_unlock(get_ccwdev_lock(device->cdev));
3091 } else if (cqr->status == DASD_CQR_IN_ERP) { 3096 } else if (cqr->status == DASD_CQR_IN_ERP) {
3092 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3097 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3093 3098
@@ -3102,9 +3107,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3102 searchcqr->retries = -1; 3107 searchcqr->retries = -1;
3103 searchcqr->intrc = -ETIMEDOUT; 3108 searchcqr->intrc = -ETIMEDOUT;
3104 if (searchcqr->status >= DASD_CQR_QUEUED) { 3109 if (searchcqr->status >= DASD_CQR_QUEUED) {
3105 spin_unlock(get_ccwdev_lock(device->cdev)); 3110 rc = __dasd_cancel_req(searchcqr);
3106 rc = dasd_cancel_req(searchcqr);
3107 spin_lock(get_ccwdev_lock(device->cdev));
3108 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3111 } else if ((searchcqr->status == DASD_CQR_FILLED) ||
3109 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3112 (searchcqr->status == DASD_CQR_NEED_ERP)) {
3110 searchcqr->status = DASD_CQR_TERMINATED; 3113 searchcqr->status = DASD_CQR_TERMINATED;
@@ -3118,8 +3121,8 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3118 } 3121 }
3119 break; 3122 break;
3120 } 3123 }
3121 spin_unlock(get_ccwdev_lock(device->cdev));
3122 } 3124 }
3125 spin_unlock(get_ccwdev_lock(device->cdev));
3123 dasd_schedule_block_bh(block); 3126 dasd_schedule_block_bh(block);
3124 spin_unlock(&block->queue_lock); 3127 spin_unlock(&block->queue_lock);
3125 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3128 spin_unlock_irqrestore(&cqr->dq->lock, flags);