diff options
author | Christoph Hellwig <hch@lst.de> | 2015-01-18 10:16:29 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-02-05 11:30:37 -0500 |
commit | ddad8dd0a162fde61646a627a3017c258601dc8a (patch) | |
tree | 62a0c931c0efb7cd86457e1908ceb1655997c595 /block | |
parent | 42d2683a2704ef4bbbb07fd0b9486ab312dd8c56 (diff) |
block: use blk_rq_map_user_iov to implement blk_rq_map_user
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/bio.c | 53 | ||||
-rw-r--r-- | block/blk-map.c | 137 |
2 files changed, 14 insertions, 176 deletions
diff --git a/block/bio.c b/block/bio.c index 54da51ed43de..879921e6b049 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -1102,7 +1102,7 @@ static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_c | |||
1102 | * bio_uncopy_user - finish previously mapped bio | 1102 | * bio_uncopy_user - finish previously mapped bio |
1103 | * @bio: bio being terminated | 1103 | * @bio: bio being terminated |
1104 | * | 1104 | * |
1105 | * Free pages allocated from bio_copy_user() and write back data | 1105 | * Free pages allocated from bio_copy_user_iov() and write back data |
1106 | * to user space in case of a read. | 1106 | * to user space in case of a read. |
1107 | */ | 1107 | */ |
1108 | int bio_uncopy_user(struct bio *bio) | 1108 | int bio_uncopy_user(struct bio *bio) |
@@ -1256,32 +1256,6 @@ out_bmd: | |||
1256 | return ERR_PTR(ret); | 1256 | return ERR_PTR(ret); |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | /** | ||
1260 | * bio_copy_user - copy user data to bio | ||
1261 | * @q: destination block queue | ||
1262 | * @map_data: pointer to the rq_map_data holding pages (if necessary) | ||
1263 | * @uaddr: start of user address | ||
1264 | * @len: length in bytes | ||
1265 | * @write_to_vm: bool indicating writing to pages or not | ||
1266 | * @gfp_mask: memory allocation flags | ||
1267 | * | ||
1268 | * Prepares and returns a bio for indirect user io, bouncing data | ||
1269 | * to/from kernel pages as necessary. Must be paired with | ||
1270 | * call bio_uncopy_user() on io completion. | ||
1271 | */ | ||
1272 | struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, | ||
1273 | unsigned long uaddr, unsigned int len, | ||
1274 | int write_to_vm, gfp_t gfp_mask) | ||
1275 | { | ||
1276 | struct sg_iovec iov; | ||
1277 | |||
1278 | iov.iov_base = (void __user *)uaddr; | ||
1279 | iov.iov_len = len; | ||
1280 | |||
1281 | return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); | ||
1282 | } | ||
1283 | EXPORT_SYMBOL(bio_copy_user); | ||
1284 | |||
1285 | static struct bio *__bio_map_user_iov(struct request_queue *q, | 1259 | static struct bio *__bio_map_user_iov(struct request_queue *q, |
1286 | struct block_device *bdev, | 1260 | struct block_device *bdev, |
1287 | const struct sg_iovec *iov, int iov_count, | 1261 | const struct sg_iovec *iov, int iov_count, |
@@ -1395,31 +1369,6 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
1395 | } | 1369 | } |
1396 | 1370 | ||
1397 | /** | 1371 | /** |
1398 | * bio_map_user - map user address into bio | ||
1399 | * @q: the struct request_queue for the bio | ||
1400 | * @bdev: destination block device | ||
1401 | * @uaddr: start of user address | ||
1402 | * @len: length in bytes | ||
1403 | * @write_to_vm: bool indicating writing to pages or not | ||
1404 | * @gfp_mask: memory allocation flags | ||
1405 | * | ||
1406 | * Map the user space address into a bio suitable for io to a block | ||
1407 | * device. Returns an error pointer in case of error. | ||
1408 | */ | ||
1409 | struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, | ||
1410 | unsigned long uaddr, unsigned int len, int write_to_vm, | ||
1411 | gfp_t gfp_mask) | ||
1412 | { | ||
1413 | struct sg_iovec iov; | ||
1414 | |||
1415 | iov.iov_base = (void __user *)uaddr; | ||
1416 | iov.iov_len = len; | ||
1417 | |||
1418 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); | ||
1419 | } | ||
1420 | EXPORT_SYMBOL(bio_map_user); | ||
1421 | |||
1422 | /** | ||
1423 | * bio_map_user_iov - map user sg_iovec table into bio | 1372 | * bio_map_user_iov - map user sg_iovec table into bio |
1424 | * @q: the struct request_queue for the bio | 1373 | * @q: the struct request_queue for the bio |
1425 | * @bdev: destination block device | 1374 | * @bdev: destination block device |
diff --git a/block/blk-map.c b/block/blk-map.c index f890d4345b0c..152a5fe5d85e 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -39,130 +39,6 @@ static int __blk_rq_unmap_user(struct bio *bio) | |||
39 | return ret; | 39 | return ret; |
40 | } | 40 | } |
41 | 41 | ||
42 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
43 | struct rq_map_data *map_data, void __user *ubuf, | ||
44 | unsigned int len, gfp_t gfp_mask) | ||
45 | { | ||
46 | unsigned long uaddr; | ||
47 | struct bio *bio, *orig_bio; | ||
48 | int reading, ret; | ||
49 | |||
50 | reading = rq_data_dir(rq) == READ; | ||
51 | |||
52 | /* | ||
53 | * if alignment requirement is satisfied, map in user pages for | ||
54 | * direct dma. else, set up kernel bounce buffers | ||
55 | */ | ||
56 | uaddr = (unsigned long) ubuf; | ||
57 | if (blk_rq_aligned(q, uaddr, len) && !map_data) | ||
58 | bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); | ||
59 | else | ||
60 | bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); | ||
61 | |||
62 | if (IS_ERR(bio)) | ||
63 | return PTR_ERR(bio); | ||
64 | |||
65 | if (map_data && map_data->null_mapped) | ||
66 | bio->bi_flags |= (1 << BIO_NULL_MAPPED); | ||
67 | |||
68 | orig_bio = bio; | ||
69 | blk_queue_bounce(q, &bio); | ||
70 | |||
71 | /* | ||
72 | * We link the bounce buffer in and could have to traverse it | ||
73 | * later so we have to get a ref to prevent it from being freed | ||
74 | */ | ||
75 | bio_get(bio); | ||
76 | |||
77 | ret = blk_rq_append_bio(q, rq, bio); | ||
78 | if (!ret) | ||
79 | return bio->bi_iter.bi_size; | ||
80 | |||
81 | /* if it was boucned we must call the end io function */ | ||
82 | bio_endio(bio, 0); | ||
83 | __blk_rq_unmap_user(orig_bio); | ||
84 | bio_put(bio); | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage | ||
90 | * @q: request queue where request should be inserted | ||
91 | * @rq: request structure to fill | ||
92 | * @map_data: pointer to the rq_map_data holding pages (if necessary) | ||
93 | * @ubuf: the user buffer | ||
94 | * @len: length of user data | ||
95 | * @gfp_mask: memory allocation flags | ||
96 | * | ||
97 | * Description: | ||
98 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise | ||
99 | * a kernel bounce buffer is used. | ||
100 | * | ||
101 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while | ||
102 | * still in process context. | ||
103 | * | ||
104 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
105 | * before being submitted to the device, as pages mapped may be out of | ||
106 | * reach. It's the callers responsibility to make sure this happens. The | ||
107 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
108 | * unmapping. | ||
109 | */ | ||
110 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
111 | struct rq_map_data *map_data, void __user *ubuf, | ||
112 | unsigned long len, gfp_t gfp_mask) | ||
113 | { | ||
114 | unsigned long bytes_read = 0; | ||
115 | struct bio *bio = NULL; | ||
116 | int ret; | ||
117 | |||
118 | if (len > (queue_max_hw_sectors(q) << 9)) | ||
119 | return -EINVAL; | ||
120 | if (!len) | ||
121 | return -EINVAL; | ||
122 | |||
123 | if (!ubuf && (!map_data || !map_data->null_mapped)) | ||
124 | return -EINVAL; | ||
125 | |||
126 | while (bytes_read != len) { | ||
127 | unsigned long map_len, end, start; | ||
128 | |||
129 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); | ||
130 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) | ||
131 | >> PAGE_SHIFT; | ||
132 | start = (unsigned long)ubuf >> PAGE_SHIFT; | ||
133 | |||
134 | /* | ||
135 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 | ||
136 | * pages. If this happens we just lower the requested | ||
137 | * mapping len by a page so that we can fit | ||
138 | */ | ||
139 | if (end - start > BIO_MAX_PAGES) | ||
140 | map_len -= PAGE_SIZE; | ||
141 | |||
142 | ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, | ||
143 | gfp_mask); | ||
144 | if (ret < 0) | ||
145 | goto unmap_rq; | ||
146 | if (!bio) | ||
147 | bio = rq->bio; | ||
148 | bytes_read += ret; | ||
149 | ubuf += ret; | ||
150 | |||
151 | if (map_data) | ||
152 | map_data->offset += ret; | ||
153 | } | ||
154 | |||
155 | if (!bio_flagged(bio, BIO_USER_MAPPED)) | ||
156 | rq->cmd_flags |= REQ_COPY_USER; | ||
157 | |||
158 | return 0; | ||
159 | unmap_rq: | ||
160 | blk_rq_unmap_user(bio); | ||
161 | rq->bio = NULL; | ||
162 | return ret; | ||
163 | } | ||
164 | EXPORT_SYMBOL(blk_rq_map_user); | ||
165 | |||
166 | /** | 42 | /** |
167 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage | 43 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
168 | * @q: request queue where request should be inserted | 44 | * @q: request queue where request should be inserted |
@@ -241,6 +117,19 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
241 | } | 117 | } |
242 | EXPORT_SYMBOL(blk_rq_map_user_iov); | 118 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
243 | 119 | ||
120 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
121 | struct rq_map_data *map_data, void __user *ubuf, | ||
122 | unsigned long len, gfp_t gfp_mask) | ||
123 | { | ||
124 | struct sg_iovec iov; | ||
125 | |||
126 | iov.iov_base = (void __user *)ubuf; | ||
127 | iov.iov_len = len; | ||
128 | |||
129 | return blk_rq_map_user_iov(q, rq, map_data, &iov, 1, len, gfp_mask); | ||
130 | } | ||
131 | EXPORT_SYMBOL(blk_rq_map_user); | ||
132 | |||
244 | /** | 133 | /** |
245 | * blk_rq_unmap_user - unmap a request with user data | 134 | * blk_rq_unmap_user - unmap a request with user data |
246 | * @bio: start of bio list | 135 | * @bio: start of bio list |