aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2013-02-28 06:07:27 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-03-07 03:52:20 -0500
commit8360cb5f389ebd36b708978e0f776a285a2deb5a (patch)
treeb7b9a241facfc1097297154ea1bae4ffc4fe12ad /drivers
parentf6a70a07079518280022286a1dceb797d12e1edf (diff)
s390/scm_blk: fix request number accounting
If a block device driver cannot fetch all requests from the blocklayer it's in his responsibility to call the request function at a later time. Normally this would be done after the next irq for the underlying device is handled. However in situations where we have no outstanding request we have to schedule the request function for a later time. This is determined using an internal counter of requests issued to the hardware. In some cases where we give a request back to the block layer unhandled the number of queued requests was not adjusted. Fix this class of failures by adjusting queued_requests in all functions used to give a request back to the block layer. Reviewed-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/s390/block/scm_blk.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9978ad4433cb..d9c7e940fa35 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -195,14 +195,18 @@ void scm_request_requeue(struct scm_request *scmrq)
195 195
196 scm_release_cluster(scmrq); 196 scm_release_cluster(scmrq);
197 blk_requeue_request(bdev->rq, scmrq->request); 197 blk_requeue_request(bdev->rq, scmrq->request);
198 atomic_dec(&bdev->queued_reqs);
198 scm_request_done(scmrq); 199 scm_request_done(scmrq);
199 scm_ensure_queue_restart(bdev); 200 scm_ensure_queue_restart(bdev);
200} 201}
201 202
202void scm_request_finish(struct scm_request *scmrq) 203void scm_request_finish(struct scm_request *scmrq)
203{ 204{
205 struct scm_blk_dev *bdev = scmrq->bdev;
206
204 scm_release_cluster(scmrq); 207 scm_release_cluster(scmrq);
205 blk_end_request_all(scmrq->request, scmrq->error); 208 blk_end_request_all(scmrq->request, scmrq->error);
209 atomic_dec(&bdev->queued_reqs);
206 scm_request_done(scmrq); 210 scm_request_done(scmrq);
207} 211}
208 212
@@ -231,11 +235,13 @@ static void scm_blk_request(struct request_queue *rq)
231 return; 235 return;
232 } 236 }
233 if (scm_need_cluster_request(scmrq)) { 237 if (scm_need_cluster_request(scmrq)) {
238 atomic_inc(&bdev->queued_reqs);
234 blk_start_request(req); 239 blk_start_request(req);
235 scm_initiate_cluster_request(scmrq); 240 scm_initiate_cluster_request(scmrq);
236 return; 241 return;
237 } 242 }
238 scm_request_prepare(scmrq); 243 scm_request_prepare(scmrq);
244 atomic_inc(&bdev->queued_reqs);
239 blk_start_request(req); 245 blk_start_request(req);
240 246
241 ret = scm_start_aob(scmrq->aob); 247 ret = scm_start_aob(scmrq->aob);
@@ -244,7 +250,6 @@ static void scm_blk_request(struct request_queue *rq)
244 scm_request_requeue(scmrq); 250 scm_request_requeue(scmrq);
245 return; 251 return;
246 } 252 }
247 atomic_inc(&bdev->queued_reqs);
248 } 253 }
249} 254}
250 255
@@ -310,7 +315,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
310 } 315 }
311 316
312 scm_request_finish(scmrq); 317 scm_request_finish(scmrq);
313 atomic_dec(&bdev->queued_reqs);
314 spin_lock_irqsave(&bdev->lock, flags); 318 spin_lock_irqsave(&bdev->lock, flags);
315 } 319 }
316 spin_unlock_irqrestore(&bdev->lock, flags); 320 spin_unlock_irqrestore(&bdev->lock, flags);