diff options
author | Tejun Heo <tj@kernel.org> | 2009-05-07 22:54:10 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-11 03:52:16 -0400 |
commit | bab2a807a489822ded0c9d4a5344c80bcac10b0a (patch) | |
tree | cf93330ae12f820d1dd53bc9d484fa6cad38962c /drivers/block/xd.c | |
parent | 06b0608e2b46465e8e663214e7db982ddb000346 (diff) |
xd: dequeue in-flight request
xd processes requests one-by-one synchronously and can be easily
converted to dequeueing model. Convert it.
While at it, use rq_cur_bytes instead of rq_bytes when checking for
sector overflow. This is for for consistency and better behavior for
merged requests.
[ Impact: dequeue in-flight request ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block/xd.c')
-rw-r--r-- | drivers/block/xd.c | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 4ef88018bcde..d4c4352354b5 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c | |||
@@ -305,26 +305,31 @@ static void do_xd_request (struct request_queue * q) | |||
305 | if (xdc_busy) | 305 | if (xdc_busy) |
306 | return; | 306 | return; |
307 | 307 | ||
308 | while ((req = elv_next_request(q)) != NULL) { | 308 | req = elv_next_request(q); |
309 | if (req) | ||
310 | blkdev_dequeue_request(req); | ||
311 | |||
312 | while (req) { | ||
309 | unsigned block = blk_rq_pos(req); | 313 | unsigned block = blk_rq_pos(req); |
310 | unsigned count = blk_rq_sectors(req); | 314 | unsigned count = blk_rq_cur_sectors(req); |
311 | XD_INFO *disk = req->rq_disk->private_data; | 315 | XD_INFO *disk = req->rq_disk->private_data; |
312 | int res = 0; | 316 | int res = -EIO; |
313 | int retry; | 317 | int retry; |
314 | 318 | ||
315 | if (!blk_fs_request(req)) { | 319 | if (!blk_fs_request(req)) |
316 | __blk_end_request_cur(req, -EIO); | 320 | goto done; |
317 | continue; | 321 | if (block + count > get_capacity(req->rq_disk)) |
318 | } | 322 | goto done; |
319 | if (block + count > get_capacity(req->rq_disk)) { | ||
320 | __blk_end_request_cur(req, -EIO); | ||
321 | continue; | ||
322 | } | ||
323 | for (retry = 0; (retry < XD_RETRIES) && !res; retry++) | 323 | for (retry = 0; (retry < XD_RETRIES) && !res; retry++) |
324 | res = xd_readwrite(rq_data_dir(req), disk, req->buffer, | 324 | res = xd_readwrite(rq_data_dir(req), disk, req->buffer, |
325 | block, count); | 325 | block, count); |
326 | done: | ||
326 | /* wrap up, 0 = success, -errno = fail */ | 327 | /* wrap up, 0 = success, -errno = fail */ |
327 | __blk_end_request_cur(req, res); | 328 | if (!__blk_end_request_cur(req, res)) { |
329 | req = elv_next_request(q); | ||
330 | if (req) | ||
331 | blkdev_dequeue_request(req); | ||
332 | } | ||
328 | } | 333 | } |
329 | } | 334 | } |
330 | 335 | ||