aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2006-01-06 03:49:03 -0500
committerJens Axboe <axboe@suse.de>2006-01-06 03:49:03 -0500
commit8ffdc6550c47f75ca4e6c9f30a2a89063e035cf2 (patch)
treea478b9acef5c66242a964154f7ad3a0ea750ef0f /block
parent64100099ed22f71cce656c5c2caecf5c9cf255dc (diff)
[BLOCK] add @uptodate to end_that_request_last() and @error to rq_end_io_fn()
add @uptodate argument to end_that_request_last() and @error to rq_end_io_fn(). there's no generic way to pass error code to request completion function, making generic error handling of non-fs request difficult (rq->errors is driver-specific and each driver uses it differently). this patch adds @uptodate to end_that_request_last() and @error to rq_end_io_fn(). for fs requests, this doesn't really matter, so just using the same uptodate argument used in the last call to end_that_request_first() should suffice. imho, this can also help the generic command-carrying request jens is working on. Signed-off-by: tejun heo <htejun@gmail.com> Signed-Off-By: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/elevator.c2
-rw-r--r--block/ll_rw_blk.c22
2 files changed, 16 insertions, 8 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 6c3fc8a10bf2..85a11cee7d1c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -498,7 +498,7 @@ struct request *elv_next_request(request_queue_t *q)
498 blkdev_dequeue_request(rq); 498 blkdev_dequeue_request(rq);
499 rq->flags |= REQ_QUIET; 499 rq->flags |= REQ_QUIET;
500 end_that_request_chunk(rq, 0, nr_bytes); 500 end_that_request_chunk(rq, 0, nr_bytes);
501 end_that_request_last(rq); 501 end_that_request_last(rq, 0);
502 } else { 502 } else {
503 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, 503 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
504 ret); 504 ret);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e02c88ca8fb5..8b1ae69bc5ac 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -344,7 +344,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
344/* 344/*
345 * Cache flushing for ordered writes handling 345 * Cache flushing for ordered writes handling
346 */ 346 */
347static void blk_pre_flush_end_io(struct request *flush_rq) 347static void blk_pre_flush_end_io(struct request *flush_rq, int error)
348{ 348{
349 struct request *rq = flush_rq->end_io_data; 349 struct request *rq = flush_rq->end_io_data;
350 request_queue_t *q = rq->q; 350 request_queue_t *q = rq->q;
@@ -362,7 +362,7 @@ static void blk_pre_flush_end_io(struct request *flush_rq)
362 } 362 }
363} 363}
364 364
365static void blk_post_flush_end_io(struct request *flush_rq) 365static void blk_post_flush_end_io(struct request *flush_rq, int error)
366{ 366{
367 struct request *rq = flush_rq->end_io_data; 367 struct request *rq = flush_rq->end_io_data;
368 request_queue_t *q = rq->q; 368 request_queue_t *q = rq->q;
@@ -2317,7 +2317,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
2317 */ 2317 */
2318void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, 2318void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2319 struct request *rq, int at_head, 2319 struct request *rq, int at_head,
2320 void (*done)(struct request *)) 2320 rq_end_io_fn *done)
2321{ 2321{
2322 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2322 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2323 2323
@@ -2521,7 +2521,7 @@ EXPORT_SYMBOL(blk_put_request);
2521 * blk_end_sync_rq - executes a completion event on a request 2521 * blk_end_sync_rq - executes a completion event on a request
2522 * @rq: request to complete 2522 * @rq: request to complete
2523 */ 2523 */
2524void blk_end_sync_rq(struct request *rq) 2524void blk_end_sync_rq(struct request *rq, int error)
2525{ 2525{
2526 struct completion *waiting = rq->waiting; 2526 struct completion *waiting = rq->waiting;
2527 2527
@@ -3183,9 +3183,17 @@ EXPORT_SYMBOL(end_that_request_chunk);
3183/* 3183/*
3184 * queue lock must be held 3184 * queue lock must be held
3185 */ 3185 */
3186void end_that_request_last(struct request *req) 3186void end_that_request_last(struct request *req, int uptodate)
3187{ 3187{
3188 struct gendisk *disk = req->rq_disk; 3188 struct gendisk *disk = req->rq_disk;
3189 int error;
3190
3191 /*
3192 * extend uptodate bool to allow < 0 value to be direct io error
3193 */
3194 error = 0;
3195 if (end_io_error(uptodate))
3196 error = !uptodate ? -EIO : uptodate;
3189 3197
3190 if (unlikely(laptop_mode) && blk_fs_request(req)) 3198 if (unlikely(laptop_mode) && blk_fs_request(req))
3191 laptop_io_completion(); 3199 laptop_io_completion();
@@ -3200,7 +3208,7 @@ void end_that_request_last(struct request *req)
3200 disk->in_flight--; 3208 disk->in_flight--;
3201 } 3209 }
3202 if (req->end_io) 3210 if (req->end_io)
3203 req->end_io(req); 3211 req->end_io(req, error);
3204 else 3212 else
3205 __blk_put_request(req->q, req); 3213 __blk_put_request(req->q, req);
3206} 3214}
@@ -3212,7 +3220,7 @@ void end_request(struct request *req, int uptodate)
3212 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { 3220 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
3213 add_disk_randomness(req->rq_disk); 3221 add_disk_randomness(req->rq_disk);
3214 blkdev_dequeue_request(req); 3222 blkdev_dequeue_request(req);
3215 end_that_request_last(req); 3223 end_that_request_last(req, uptodate);
3216 } 3224 }
3217} 3225}
3218 3226