diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2007-12-11 17:41:54 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 04:35:57 -0500 |
commit | 9e6e39f2c478fff2e9d3430cdfe6730877942ed6 (patch) | |
tree | b01b289b331ff899393afcd4651fa75aaec19e1c /block | |
parent | 3b11313a6c2a42425bf06e92528bda6affd58dec (diff) |
blk_end_request: changing block layer core (take 4)
This patch converts core parts of block layer to use blk_end_request
interfaces. Related 'uptodate' arguments are converted to 'error'.
'dequeue' argument was originally introduced for end_dequeued_request(),
where no attempt should be made to dequeue the request as it's already
dequeued.
However, it's not necessary as it can be checked with
list_empty(&rq->queuelist).
(Dequeued request has empty list and queued request doesn't.)
And it has been done in blk_end_request interfaces.
As a result of this patch, end_queued_request() and
end_dequeued_request() become identical. A future patch will merge
and rename them and change users of those functions.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 35 |
1 files changed, 15 insertions, 20 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8b2b2509f60e..fb951198c70e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -347,7 +347,6 @@ unsigned blk_ordered_req_seq(struct request *rq) | |||
347 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | 347 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) |
348 | { | 348 | { |
349 | struct request *rq; | 349 | struct request *rq; |
350 | int uptodate; | ||
351 | 350 | ||
352 | if (error && !q->orderr) | 351 | if (error && !q->orderr) |
353 | q->orderr = error; | 352 | q->orderr = error; |
@@ -361,15 +360,11 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | |||
361 | /* | 360 | /* |
362 | * Okay, sequence complete. | 361 | * Okay, sequence complete. |
363 | */ | 362 | */ |
364 | uptodate = 1; | ||
365 | if (q->orderr) | ||
366 | uptodate = q->orderr; | ||
367 | |||
368 | q->ordseq = 0; | 363 | q->ordseq = 0; |
369 | rq = q->orig_bar_rq; | 364 | rq = q->orig_bar_rq; |
370 | 365 | ||
371 | end_that_request_first(rq, uptodate, rq->hard_nr_sectors); | 366 | if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) |
372 | end_that_request_last(rq, uptodate); | 367 | BUG(); |
373 | } | 368 | } |
374 | 369 | ||
375 | static void pre_flush_end_io(struct request *rq, int error) | 370 | static void pre_flush_end_io(struct request *rq, int error) |
@@ -486,9 +481,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
486 | * ORDERED_NONE while this request is on it. | 481 | * ORDERED_NONE while this request is on it. |
487 | */ | 482 | */ |
488 | blkdev_dequeue_request(rq); | 483 | blkdev_dequeue_request(rq); |
489 | end_that_request_first(rq, -EOPNOTSUPP, | 484 | if (__blk_end_request(rq, -EOPNOTSUPP, |
490 | rq->hard_nr_sectors); | 485 | blk_rq_bytes(rq))) |
491 | end_that_request_last(rq, -EOPNOTSUPP); | 486 | BUG(); |
492 | *rqp = NULL; | 487 | *rqp = NULL; |
493 | return 0; | 488 | return 0; |
494 | } | 489 | } |
@@ -3713,14 +3708,14 @@ void end_that_request_last(struct request *req, int uptodate) | |||
3713 | EXPORT_SYMBOL(end_that_request_last); | 3708 | EXPORT_SYMBOL(end_that_request_last); |
3714 | 3709 | ||
3715 | static inline void __end_request(struct request *rq, int uptodate, | 3710 | static inline void __end_request(struct request *rq, int uptodate, |
3716 | unsigned int nr_bytes, int dequeue) | 3711 | unsigned int nr_bytes) |
3717 | { | 3712 | { |
3718 | if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { | 3713 | int error = 0; |
3719 | if (dequeue) | 3714 | |
3720 | blkdev_dequeue_request(rq); | 3715 | if (uptodate <= 0) |
3721 | add_disk_randomness(rq->rq_disk); | 3716 | error = uptodate ? uptodate : -EIO; |
3722 | end_that_request_last(rq, uptodate); | 3717 | |
3723 | } | 3718 | __blk_end_request(rq, error, nr_bytes); |
3724 | } | 3719 | } |
3725 | 3720 | ||
3726 | /** | 3721 | /** |
@@ -3763,7 +3758,7 @@ EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); | |||
3763 | **/ | 3758 | **/ |
3764 | void end_queued_request(struct request *rq, int uptodate) | 3759 | void end_queued_request(struct request *rq, int uptodate) |
3765 | { | 3760 | { |
3766 | __end_request(rq, uptodate, blk_rq_bytes(rq), 1); | 3761 | __end_request(rq, uptodate, blk_rq_bytes(rq)); |
3767 | } | 3762 | } |
3768 | EXPORT_SYMBOL(end_queued_request); | 3763 | EXPORT_SYMBOL(end_queued_request); |
3769 | 3764 | ||
@@ -3780,7 +3775,7 @@ EXPORT_SYMBOL(end_queued_request); | |||
3780 | **/ | 3775 | **/ |
3781 | void end_dequeued_request(struct request *rq, int uptodate) | 3776 | void end_dequeued_request(struct request *rq, int uptodate) |
3782 | { | 3777 | { |
3783 | __end_request(rq, uptodate, blk_rq_bytes(rq), 0); | 3778 | __end_request(rq, uptodate, blk_rq_bytes(rq)); |
3784 | } | 3779 | } |
3785 | EXPORT_SYMBOL(end_dequeued_request); | 3780 | EXPORT_SYMBOL(end_dequeued_request); |
3786 | 3781 | ||
@@ -3806,7 +3801,7 @@ EXPORT_SYMBOL(end_dequeued_request); | |||
3806 | **/ | 3801 | **/ |
3807 | void end_request(struct request *req, int uptodate) | 3802 | void end_request(struct request *req, int uptodate) |
3808 | { | 3803 | { |
3809 | __end_request(req, uptodate, req->hard_cur_sectors << 9, 1); | 3804 | __end_request(req, uptodate, req->hard_cur_sectors << 9); |
3810 | } | 3805 | } |
3811 | EXPORT_SYMBOL(end_request); | 3806 | EXPORT_SYMBOL(end_request); |
3812 | 3807 | ||