aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-map.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-09-02 03:20:19 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:11 -0400
commit818827669d85b84241696ffef2de485db46b0b5e (patch)
tree694d09728733e65d604bf3e1f13679db73fc1d9a /block/blk-map.c
parent839e96afba87117befd39cf4e43f156edc8047a7 (diff)
block: make blk_rq_map_user take a NULL user-space buffer
This patch changes blk_rq_map_user to accept a NULL user-space buffer with a READ command if rq_map_data is not NULL. Thus a caller can pass page frames to lk_rq_map_user to just set up a request and bios with page frames propely. bio_uncopy_user (called via blk_rq_unmap_user) doesn't copy data to user space with such request. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 572140cda5ff..4849fa36161e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -42,7 +42,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
42 42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 struct rq_map_data *map_data, void __user *ubuf, 44 struct rq_map_data *map_data, void __user *ubuf,
45 unsigned int len, gfp_t gfp_mask) 45 unsigned int len, int null_mapped, gfp_t gfp_mask)
46{ 46{
47 unsigned long uaddr; 47 unsigned long uaddr;
48 struct bio *bio, *orig_bio; 48 struct bio *bio, *orig_bio;
@@ -63,6 +63,9 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
63 if (IS_ERR(bio)) 63 if (IS_ERR(bio))
64 return PTR_ERR(bio); 64 return PTR_ERR(bio);
65 65
66 if (null_mapped)
67 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
68
66 orig_bio = bio; 69 orig_bio = bio;
67 blk_queue_bounce(q, &bio); 70 blk_queue_bounce(q, &bio);
68 71
@@ -111,12 +114,17 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
111{ 114{
112 unsigned long bytes_read = 0; 115 unsigned long bytes_read = 0;
113 struct bio *bio = NULL; 116 struct bio *bio = NULL;
114 int ret; 117 int ret, null_mapped = 0;
115 118
116 if (len > (q->max_hw_sectors << 9)) 119 if (len > (q->max_hw_sectors << 9))
117 return -EINVAL; 120 return -EINVAL;
118 if (!len || !ubuf) 121 if (!len)
119 return -EINVAL; 122 return -EINVAL;
123 if (!ubuf) {
124 if (!map_data || rq_data_dir(rq) != READ)
125 return -EINVAL;
126 null_mapped = 1;
127 }
120 128
121 while (bytes_read != len) { 129 while (bytes_read != len) {
122 unsigned long map_len, end, start; 130 unsigned long map_len, end, start;
@@ -135,7 +143,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
135 map_len -= PAGE_SIZE; 143 map_len -= PAGE_SIZE;
136 144
137 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, 145 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
138 gfp_mask); 146 null_mapped, gfp_mask);
139 if (ret < 0) 147 if (ret < 0)
140 goto unmap_rq; 148 goto unmap_rq;
141 if (!bio) 149 if (!bio)