aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2006-12-19 05:12:46 -0500
committerJens Axboe <jens.axboe@oracle.com>2006-12-19 05:12:46 -0500
commit8e5cfc45e7527eb5c8a9a22d56a7b9227e7c0913 (patch)
tree7be7c45168af77518bd6a47601e2f9e31b967dba /block/ll_rw_blk.c
parent48785bb9fa39415d7553e234946442579dfcf591 (diff)
[PATCH] Fixup blk_rq_unmap_user() API
The blk_rq_unmap_user() API is not very nice. It expects the caller to know that rq->bio has to be reset to the original bio, and it will silently do nothing if that is not done. Instead make it explicit that we need to pass in the first bio, by expecting a bio argument. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a66ec30855d8..e07c079e07e6 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2405,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2405 unsigned long len) 2405 unsigned long len)
2406{ 2406{
2407 unsigned long bytes_read = 0; 2407 unsigned long bytes_read = 0;
2408 struct bio *bio = NULL;
2408 int ret; 2409 int ret;
2409 2410
2410 if (len > (q->max_hw_sectors << 9)) 2411 if (len > (q->max_hw_sectors << 9))
@@ -2431,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2431 ret = __blk_rq_map_user(q, rq, ubuf, map_len); 2432 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
2432 if (ret < 0) 2433 if (ret < 0)
2433 goto unmap_rq; 2434 goto unmap_rq;
2435 if (!bio)
2436 bio = rq->bio;
2434 bytes_read += ret; 2437 bytes_read += ret;
2435 ubuf += ret; 2438 ubuf += ret;
2436 } 2439 }
@@ -2438,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2438 rq->buffer = rq->data = NULL; 2441 rq->buffer = rq->data = NULL;
2439 return 0; 2442 return 0;
2440unmap_rq: 2443unmap_rq:
2441 blk_rq_unmap_user(rq); 2444 blk_rq_unmap_user(bio);
2442 return ret; 2445 return ret;
2443} 2446}
2444 2447
@@ -2495,29 +2498,30 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
2495 2498
2496/** 2499/**
2497 * blk_rq_unmap_user - unmap a request with user data 2500 * blk_rq_unmap_user - unmap a request with user data
2498 * @rq: rq to be unmapped 2501 * @bio: start of bio list
2499 * 2502 *
2500 * Description: 2503 * Description:
2501 * Unmap a rq previously mapped by blk_rq_map_user(). 2504 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
2502 * rq->bio must be set to the original head of the request. 2505 * supply the original rq->bio from the blk_rq_map_user() return, since
2506 * the io completion may have changed rq->bio.
2503 */ 2507 */
2504int blk_rq_unmap_user(struct request *rq) 2508int blk_rq_unmap_user(struct bio *bio)
2505{ 2509{
2506 struct bio *bio, *mapped_bio; 2510 struct bio *mapped_bio;
2507 int ret = 0, ret2; 2511 int ret = 0, ret2;
2508 2512
2509 while ((bio = rq->bio)) { 2513 while (bio) {
2510 if (bio_flagged(bio, BIO_BOUNCED)) 2514 mapped_bio = bio;
2515 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
2511 mapped_bio = bio->bi_private; 2516 mapped_bio = bio->bi_private;
2512 else
2513 mapped_bio = bio;
2514 2517
2515 ret2 = __blk_rq_unmap_user(mapped_bio); 2518 ret2 = __blk_rq_unmap_user(mapped_bio);
2516 if (ret2 && !ret) 2519 if (ret2 && !ret)
2517 ret = ret2; 2520 ret = ret2;
2518 2521
2519 rq->bio = bio->bi_next; 2522 mapped_bio = bio;
2520 bio_put(bio); 2523 bio = bio->bi_next;
2524 bio_put(mapped_bio);
2521 } 2525 }
2522 2526
2523 return ret; 2527 return ret;