aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-map.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index f103729b462f..9083cf0180cc 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
20 rq->biotail->bi_next = bio; 20 rq->biotail->bi_next = bio;
21 rq->biotail = bio; 21 rq->biotail = bio;
22 22
23 rq->data_len += bio->bi_size; 23 rq->__data_len += bio->bi_size;
24 } 24 }
25 return 0; 25 return 0;
26} 26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28 27
29static int __blk_rq_unmap_user(struct bio *bio) 28static int __blk_rq_unmap_user(struct bio *bio)
30{ 29{
@@ -116,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
116 struct bio *bio = NULL; 115 struct bio *bio = NULL;
117 int ret; 116 int ret;
118 117
119 if (len > (q->max_hw_sectors << 9)) 118 if (len > (queue_max_hw_sectors(q) << 9))
120 return -EINVAL; 119 return -EINVAL;
121 if (!len) 120 if (!len)
122 return -EINVAL; 121 return -EINVAL;
@@ -156,7 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
156 if (!bio_flagged(bio, BIO_USER_MAPPED)) 155 if (!bio_flagged(bio, BIO_USER_MAPPED))
157 rq->cmd_flags |= REQ_COPY_USER; 156 rq->cmd_flags |= REQ_COPY_USER;
158 157
159 rq->buffer = rq->data = NULL; 158 rq->buffer = NULL;
160 return 0; 159 return 0;
161unmap_rq: 160unmap_rq:
162 blk_rq_unmap_user(bio); 161 blk_rq_unmap_user(bio);
@@ -235,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
235 blk_queue_bounce(q, &bio); 234 blk_queue_bounce(q, &bio);
236 bio_get(bio); 235 bio_get(bio);
237 blk_rq_bio_prep(q, rq, bio); 236 blk_rq_bio_prep(q, rq, bio);
238 rq->buffer = rq->data = NULL; 237 rq->buffer = NULL;
239 return 0; 238 return 0;
240} 239}
241EXPORT_SYMBOL(blk_rq_map_user_iov); 240EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -282,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
282 * 281 *
283 * Description: 282 * Description:
284 * Data will be mapped directly if possible. Otherwise a bounce 283 * Data will be mapped directly if possible. Otherwise a bounce
285 * buffer is used. 284 * buffer is used. Can be called multple times to append multple
285 * buffers.
286 */ 286 */
287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
288 unsigned int len, gfp_t gfp_mask) 288 unsigned int len, gfp_t gfp_mask)
@@ -290,8 +290,9 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
290 int reading = rq_data_dir(rq) == READ; 290 int reading = rq_data_dir(rq) == READ;
291 int do_copy = 0; 291 int do_copy = 0;
292 struct bio *bio; 292 struct bio *bio;
293 int ret;
293 294
294 if (len > (q->max_hw_sectors << 9)) 295 if (len > (queue_max_hw_sectors(q) << 9))
295 return -EINVAL; 296 return -EINVAL;
296 if (!len || !kbuf) 297 if (!len || !kbuf)
297 return -EINVAL; 298 return -EINVAL;
@@ -311,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
311 if (do_copy) 312 if (do_copy)
312 rq->cmd_flags |= REQ_COPY_USER; 313 rq->cmd_flags |= REQ_COPY_USER;
313 314
314 blk_rq_bio_prep(q, rq, bio); 315 ret = blk_rq_append_bio(q, rq, bio);
316 if (unlikely(ret)) {
317 /* request is too big */
318 bio_put(bio);
319 return ret;
320 }
321
315 blk_queue_bounce(q, &rq->bio); 322 blk_queue_bounce(q, &rq->bio);
316 rq->buffer = rq->data = NULL; 323 rq->buffer = NULL;
317 return 0; 324 return 0;
318} 325}
319EXPORT_SYMBOL(blk_rq_map_kern); 326EXPORT_SYMBOL(blk_rq_map_kern);