diff options
-rw-r--r-- | block/blk-map.c | 26 | ||||
-rw-r--r-- | block/bsg.c | 7 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 4 | ||||
-rw-r--r-- | drivers/cdrom/cdrom.c | 2 | ||||
-rw-r--r-- | drivers/scsi/scsi_tgt_lib.c | 2 | ||||
-rw-r--r-- | fs/bio.c | 58 | ||||
-rw-r--r-- | include/linux/bio.h | 8 | ||||
-rw-r--r-- | include/linux/blkdev.h | 12 |
8 files changed, 80 insertions, 39 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index ac21b7397e15..dad6a2907835 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -41,8 +41,8 @@ static int __blk_rq_unmap_user(struct bio *bio) | |||
41 | } | 41 | } |
42 | 42 | ||
43 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | 43 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, |
44 | void __user *ubuf, unsigned int len, | 44 | struct rq_map_data *map_data, void __user *ubuf, |
45 | gfp_t gfp_mask) | 45 | unsigned int len, gfp_t gfp_mask) |
46 | { | 46 | { |
47 | unsigned long uaddr; | 47 | unsigned long uaddr; |
48 | unsigned int alignment; | 48 | unsigned int alignment; |
@@ -57,10 +57,10 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
57 | */ | 57 | */ |
58 | uaddr = (unsigned long) ubuf; | 58 | uaddr = (unsigned long) ubuf; |
59 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 59 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
60 | if (!(uaddr & alignment) && !(len & alignment)) | 60 | if (!(uaddr & alignment) && !(len & alignment) && !map_data) |
61 | bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); | 61 | bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); |
62 | else | 62 | else |
63 | bio = bio_copy_user(q, uaddr, len, reading, gfp_mask); | 63 | bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); |
64 | 64 | ||
65 | if (IS_ERR(bio)) | 65 | if (IS_ERR(bio)) |
66 | return PTR_ERR(bio); | 66 | return PTR_ERR(bio); |
@@ -89,6 +89,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
89 | * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage | 89 | * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
90 | * @q: request queue where request should be inserted | 90 | * @q: request queue where request should be inserted |
91 | * @rq: request structure to fill | 91 | * @rq: request structure to fill |
92 | * @map_data: pointer to the rq_map_data holding pages (if necessary) | ||
92 | * @ubuf: the user buffer | 93 | * @ubuf: the user buffer |
93 | * @len: length of user data | 94 | * @len: length of user data |
94 | * @gfp_mask: memory allocation flags | 95 | * @gfp_mask: memory allocation flags |
@@ -107,7 +108,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
107 | * unmapping. | 108 | * unmapping. |
108 | */ | 109 | */ |
109 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | 110 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
110 | void __user *ubuf, unsigned long len, gfp_t gfp_mask) | 111 | struct rq_map_data *map_data, void __user *ubuf, |
112 | unsigned long len, gfp_t gfp_mask) | ||
111 | { | 113 | { |
112 | unsigned long bytes_read = 0; | 114 | unsigned long bytes_read = 0; |
113 | struct bio *bio = NULL; | 115 | struct bio *bio = NULL; |
@@ -134,7 +136,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
134 | if (end - start > BIO_MAX_PAGES) | 136 | if (end - start > BIO_MAX_PAGES) |
135 | map_len -= PAGE_SIZE; | 137 | map_len -= PAGE_SIZE; |
136 | 138 | ||
137 | ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask); | 139 | ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, |
140 | gfp_mask); | ||
138 | if (ret < 0) | 141 | if (ret < 0) |
139 | goto unmap_rq; | 142 | goto unmap_rq; |
140 | if (!bio) | 143 | if (!bio) |
@@ -159,6 +162,7 @@ EXPORT_SYMBOL(blk_rq_map_user); | |||
159 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage | 162 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
160 | * @q: request queue where request should be inserted | 163 | * @q: request queue where request should be inserted |
161 | * @rq: request to map data to | 164 | * @rq: request to map data to |
165 | * @map_data: pointer to the rq_map_data holding pages (if necessary) | ||
162 | * @iov: pointer to the iovec | 166 | * @iov: pointer to the iovec |
163 | * @iov_count: number of elements in the iovec | 167 | * @iov_count: number of elements in the iovec |
164 | * @len: I/O byte count | 168 | * @len: I/O byte count |
@@ -178,8 +182,8 @@ EXPORT_SYMBOL(blk_rq_map_user); | |||
178 | * unmapping. | 182 | * unmapping. |
179 | */ | 183 | */ |
180 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | 184 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
181 | struct sg_iovec *iov, int iov_count, unsigned int len, | 185 | struct rq_map_data *map_data, struct sg_iovec *iov, |
182 | gfp_t gfp_mask) | 186 | int iov_count, unsigned int len, gfp_t gfp_mask) |
183 | { | 187 | { |
184 | struct bio *bio; | 188 | struct bio *bio; |
185 | int i, read = rq_data_dir(rq) == READ; | 189 | int i, read = rq_data_dir(rq) == READ; |
@@ -197,8 +201,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
197 | } | 201 | } |
198 | } | 202 | } |
199 | 203 | ||
200 | if (unaligned || (q->dma_pad_mask & len)) | 204 | if (unaligned || (q->dma_pad_mask & len) || map_data) |
201 | bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask); | 205 | bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, |
206 | gfp_mask); | ||
202 | else | 207 | else |
203 | bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); | 208 | bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); |
204 | 209 | ||
@@ -220,6 +225,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
220 | rq->buffer = rq->data = NULL; | 225 | rq->buffer = rq->data = NULL; |
221 | return 0; | 226 | return 0; |
222 | } | 227 | } |
228 | EXPORT_SYMBOL(blk_rq_map_user_iov); | ||
223 | 229 | ||
224 | /** | 230 | /** |
225 | * blk_rq_unmap_user - unmap a request with user data | 231 | * blk_rq_unmap_user - unmap a request with user data |
diff --git a/block/bsg.c b/block/bsg.c index e7a142e9916c..56cb343c76d8 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -283,8 +283,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm) | |||
283 | next_rq->cmd_type = rq->cmd_type; | 283 | next_rq->cmd_type = rq->cmd_type; |
284 | 284 | ||
285 | dxferp = (void*)(unsigned long)hdr->din_xferp; | 285 | dxferp = (void*)(unsigned long)hdr->din_xferp; |
286 | ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len, | 286 | ret = blk_rq_map_user(q, next_rq, NULL, dxferp, |
287 | GFP_KERNEL); | 287 | hdr->din_xfer_len, GFP_KERNEL); |
288 | if (ret) | 288 | if (ret) |
289 | goto out; | 289 | goto out; |
290 | } | 290 | } |
@@ -299,7 +299,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm) | |||
299 | dxfer_len = 0; | 299 | dxfer_len = 0; |
300 | 300 | ||
301 | if (dxfer_len) { | 301 | if (dxfer_len) { |
302 | ret = blk_rq_map_user(q, rq, dxferp, dxfer_len, GFP_KERNEL); | 302 | ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, |
303 | GFP_KERNEL); | ||
303 | if (ret) | 304 | if (ret) |
304 | goto out; | 305 | goto out; |
305 | } | 306 | } |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index f49d6a11a69e..c34272a348fe 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -314,11 +314,11 @@ static int sg_io(struct file *file, struct request_queue *q, | |||
314 | goto out; | 314 | goto out; |
315 | } | 315 | } |
316 | 316 | ||
317 | ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, | 317 | ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, |
318 | hdr->dxfer_len, GFP_KERNEL); | 318 | hdr->dxfer_len, GFP_KERNEL); |
319 | kfree(iov); | 319 | kfree(iov); |
320 | } else if (hdr->dxfer_len) | 320 | } else if (hdr->dxfer_len) |
321 | ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len, | 321 | ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, |
322 | GFP_KERNEL); | 322 | GFP_KERNEL); |
323 | 323 | ||
324 | if (ret) | 324 | if (ret) |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index e861d24a6d32..d47f2f80accd 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -2097,7 +2097,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, | |||
2097 | 2097 | ||
2098 | len = nr * CD_FRAMESIZE_RAW; | 2098 | len = nr * CD_FRAMESIZE_RAW; |
2099 | 2099 | ||
2100 | ret = blk_rq_map_user(q, rq, ubuf, len, GFP_KERNEL); | 2100 | ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); |
2101 | if (ret) | 2101 | if (ret) |
2102 | break; | 2102 | break; |
2103 | 2103 | ||
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 2a4fd820d616..3117bb106b5d 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, | |||
362 | int err; | 362 | int err; |
363 | 363 | ||
364 | dprintk("%lx %u\n", uaddr, len); | 364 | dprintk("%lx %u\n", uaddr, len); |
365 | err = blk_rq_map_user(q, rq, (void *)uaddr, len, GFP_KERNEL); | 365 | err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL); |
366 | if (err) { | 366 | if (err) { |
367 | /* | 367 | /* |
368 | * TODO: need to fixup sg_tablesize, max_segment_size, | 368 | * TODO: need to fixup sg_tablesize, max_segment_size, |
@@ -439,16 +439,19 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
439 | 439 | ||
440 | struct bio_map_data { | 440 | struct bio_map_data { |
441 | struct bio_vec *iovecs; | 441 | struct bio_vec *iovecs; |
442 | int nr_sgvecs; | ||
443 | struct sg_iovec *sgvecs; | 442 | struct sg_iovec *sgvecs; |
443 | int nr_sgvecs; | ||
444 | int is_our_pages; | ||
444 | }; | 445 | }; |
445 | 446 | ||
446 | static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, | 447 | static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, |
447 | struct sg_iovec *iov, int iov_count) | 448 | struct sg_iovec *iov, int iov_count, |
449 | int is_our_pages) | ||
448 | { | 450 | { |
449 | memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); | 451 | memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); |
450 | memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); | 452 | memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); |
451 | bmd->nr_sgvecs = iov_count; | 453 | bmd->nr_sgvecs = iov_count; |
454 | bmd->is_our_pages = is_our_pages; | ||
452 | bio->bi_private = bmd; | 455 | bio->bi_private = bmd; |
453 | } | 456 | } |
454 | 457 | ||
@@ -483,7 +486,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, | |||
483 | } | 486 | } |
484 | 487 | ||
485 | static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, | 488 | static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, |
486 | struct sg_iovec *iov, int iov_count, int uncopy) | 489 | struct sg_iovec *iov, int iov_count, int uncopy, |
490 | int do_free_page) | ||
487 | { | 491 | { |
488 | int ret = 0, i; | 492 | int ret = 0, i; |
489 | struct bio_vec *bvec; | 493 | struct bio_vec *bvec; |
@@ -526,7 +530,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, | |||
526 | } | 530 | } |
527 | } | 531 | } |
528 | 532 | ||
529 | if (uncopy) | 533 | if (do_free_page) |
530 | __free_page(bvec->bv_page); | 534 | __free_page(bvec->bv_page); |
531 | } | 535 | } |
532 | 536 | ||
@@ -545,7 +549,8 @@ int bio_uncopy_user(struct bio *bio) | |||
545 | struct bio_map_data *bmd = bio->bi_private; | 549 | struct bio_map_data *bmd = bio->bi_private; |
546 | int ret; | 550 | int ret; |
547 | 551 | ||
548 | ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1); | 552 | ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1, |
553 | bmd->is_our_pages); | ||
549 | 554 | ||
550 | bio_free_map_data(bmd); | 555 | bio_free_map_data(bmd); |
551 | bio_put(bio); | 556 | bio_put(bio); |
@@ -555,6 +560,7 @@ int bio_uncopy_user(struct bio *bio) | |||
555 | /** | 560 | /** |
556 | * bio_copy_user_iov - copy user data to bio | 561 | * bio_copy_user_iov - copy user data to bio |
557 | * @q: destination block queue | 562 | * @q: destination block queue |
563 | * @map_data: pointer to the rq_map_data holding pages (if necessary) | ||
558 | * @iov: the iovec. | 564 | * @iov: the iovec. |
559 | * @iov_count: number of elements in the iovec | 565 | * @iov_count: number of elements in the iovec |
560 | * @write_to_vm: bool indicating writing to pages or not | 566 | * @write_to_vm: bool indicating writing to pages or not |
@@ -564,8 +570,10 @@ int bio_uncopy_user(struct bio *bio) | |||
564 | * to/from kernel pages as necessary. Must be paired with | 570 | * to/from kernel pages as necessary. Must be paired with |
565 | * call bio_uncopy_user() on io completion. | 571 | * call bio_uncopy_user() on io completion. |
566 | */ | 572 | */ |
567 | struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | 573 | struct bio *bio_copy_user_iov(struct request_queue *q, |
568 | int iov_count, int write_to_vm, gfp_t gfp_mask) | 574 | struct rq_map_data *map_data, |
575 | struct sg_iovec *iov, int iov_count, | ||
576 | int write_to_vm, gfp_t gfp_mask) | ||
569 | { | 577 | { |
570 | struct bio_map_data *bmd; | 578 | struct bio_map_data *bmd; |
571 | struct bio_vec *bvec; | 579 | struct bio_vec *bvec; |
@@ -600,13 +608,26 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | |||
600 | bio->bi_rw |= (!write_to_vm << BIO_RW); | 608 | bio->bi_rw |= (!write_to_vm << BIO_RW); |
601 | 609 | ||
602 | ret = 0; | 610 | ret = 0; |
611 | i = 0; | ||
603 | while (len) { | 612 | while (len) { |
604 | unsigned int bytes = PAGE_SIZE; | 613 | unsigned int bytes; |
614 | |||
615 | if (map_data) | ||
616 | bytes = 1U << (PAGE_SHIFT + map_data->page_order); | ||
617 | else | ||
618 | bytes = PAGE_SIZE; | ||
605 | 619 | ||
606 | if (bytes > len) | 620 | if (bytes > len) |
607 | bytes = len; | 621 | bytes = len; |
608 | 622 | ||
609 | page = alloc_page(q->bounce_gfp | gfp_mask); | 623 | if (map_data) { |
624 | if (i == map_data->nr_entries) { | ||
625 | ret = -ENOMEM; | ||
626 | break; | ||
627 | } | ||
628 | page = map_data->pages[i++]; | ||
629 | } else | ||
630 | page = alloc_page(q->bounce_gfp | gfp_mask); | ||
610 | if (!page) { | 631 | if (!page) { |
611 | ret = -ENOMEM; | 632 | ret = -ENOMEM; |
612 | break; | 633 | break; |
@@ -625,16 +646,17 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | |||
625 | * success | 646 | * success |
626 | */ | 647 | */ |
627 | if (!write_to_vm) { | 648 | if (!write_to_vm) { |
628 | ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0); | 649 | ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0); |
629 | if (ret) | 650 | if (ret) |
630 | goto cleanup; | 651 | goto cleanup; |
631 | } | 652 | } |
632 | 653 | ||
633 | bio_set_map_data(bmd, bio, iov, iov_count); | 654 | bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1); |
634 | return bio; | 655 | return bio; |
635 | cleanup: | 656 | cleanup: |
636 | bio_for_each_segment(bvec, bio, i) | 657 | if (!map_data) |
637 | __free_page(bvec->bv_page); | 658 | bio_for_each_segment(bvec, bio, i) |
659 | __free_page(bvec->bv_page); | ||
638 | 660 | ||
639 | bio_put(bio); | 661 | bio_put(bio); |
640 | out_bmd: | 662 | out_bmd: |
@@ -645,6 +667,7 @@ out_bmd: | |||
645 | /** | 667 | /** |
646 | * bio_copy_user - copy user data to bio | 668 | * bio_copy_user - copy user data to bio |
647 | * @q: destination block queue | 669 | * @q: destination block queue |
670 | * @map_data: pointer to the rq_map_data holding pages (if necessary) | ||
648 | * @uaddr: start of user address | 671 | * @uaddr: start of user address |
649 | * @len: length in bytes | 672 | * @len: length in bytes |
650 | * @write_to_vm: bool indicating writing to pages or not | 673 | * @write_to_vm: bool indicating writing to pages or not |
@@ -654,15 +677,16 @@ out_bmd: | |||
654 | * to/from kernel pages as necessary. Must be paired with | 677 | * to/from kernel pages as necessary. Must be paired with |
655 | * call bio_uncopy_user() on io completion. | 678 | * call bio_uncopy_user() on io completion. |
656 | */ | 679 | */ |
657 | struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, | 680 | struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, |
658 | unsigned int len, int write_to_vm, gfp_t gfp_mask) | 681 | unsigned long uaddr, unsigned int len, |
682 | int write_to_vm, gfp_t gfp_mask) | ||
659 | { | 683 | { |
660 | struct sg_iovec iov; | 684 | struct sg_iovec iov; |
661 | 685 | ||
662 | iov.iov_base = (void __user *)uaddr; | 686 | iov.iov_base = (void __user *)uaddr; |
663 | iov.iov_len = len; | 687 | iov.iov_len = len; |
664 | 688 | ||
665 | return bio_copy_user_iov(q, &iov, 1, write_to_vm, gfp_mask); | 689 | return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); |
666 | } | 690 | } |
667 | 691 | ||
668 | static struct bio *__bio_map_user_iov(struct request_queue *q, | 692 | static struct bio *__bio_map_user_iov(struct request_queue *q, |
@@ -1028,7 +1052,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
1028 | bio->bi_private = bmd; | 1052 | bio->bi_private = bmd; |
1029 | bio->bi_end_io = bio_copy_kern_endio; | 1053 | bio->bi_end_io = bio_copy_kern_endio; |
1030 | 1054 | ||
1031 | bio_set_map_data(bmd, bio, &iov, 1); | 1055 | bio_set_map_data(bmd, bio, &iov, 1, 1); |
1032 | return bio; | 1056 | return bio; |
1033 | cleanup: | 1057 | cleanup: |
1034 | bio_for_each_segment(bvec, bio, i) | 1058 | bio_for_each_segment(bvec, bio, i) |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 200b185c3e83..bc386cd5e996 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -327,6 +327,7 @@ extern int bio_get_nr_vecs(struct block_device *); | |||
327 | extern struct bio *bio_map_user(struct request_queue *, struct block_device *, | 327 | extern struct bio *bio_map_user(struct request_queue *, struct block_device *, |
328 | unsigned long, unsigned int, int, gfp_t); | 328 | unsigned long, unsigned int, int, gfp_t); |
329 | struct sg_iovec; | 329 | struct sg_iovec; |
330 | struct rq_map_data; | ||
330 | extern struct bio *bio_map_user_iov(struct request_queue *, | 331 | extern struct bio *bio_map_user_iov(struct request_queue *, |
331 | struct block_device *, | 332 | struct block_device *, |
332 | struct sg_iovec *, int, int, gfp_t); | 333 | struct sg_iovec *, int, int, gfp_t); |
@@ -337,9 +338,10 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, | |||
337 | gfp_t, int); | 338 | gfp_t, int); |
338 | extern void bio_set_pages_dirty(struct bio *bio); | 339 | extern void bio_set_pages_dirty(struct bio *bio); |
339 | extern void bio_check_pages_dirty(struct bio *bio); | 340 | extern void bio_check_pages_dirty(struct bio *bio); |
340 | extern struct bio *bio_copy_user(struct request_queue *, unsigned long, | 341 | extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, |
341 | unsigned int, int, gfp_t); | 342 | unsigned long, unsigned int, int, gfp_t); |
342 | extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, | 343 | extern struct bio *bio_copy_user_iov(struct request_queue *, |
344 | struct rq_map_data *, struct sg_iovec *, | ||
343 | int, int, gfp_t); | 345 | int, int, gfp_t); |
344 | extern int bio_uncopy_user(struct bio *); | 346 | extern int bio_uncopy_user(struct bio *); |
345 | void zero_fill_bio(struct bio *bio); | 347 | void zero_fill_bio(struct bio *bio); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 00e388d0e221..358ac423ed2f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -642,6 +642,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | |||
642 | } | 642 | } |
643 | #endif /* CONFIG_MMU */ | 643 | #endif /* CONFIG_MMU */ |
644 | 644 | ||
645 | struct rq_map_data { | ||
646 | struct page **pages; | ||
647 | int page_order; | ||
648 | int nr_entries; | ||
649 | }; | ||
650 | |||
645 | struct req_iterator { | 651 | struct req_iterator { |
646 | int i; | 652 | int i; |
647 | struct bio *bio; | 653 | struct bio *bio; |
@@ -711,11 +717,13 @@ extern void __blk_run_queue(struct request_queue *); | |||
711 | extern void blk_run_queue(struct request_queue *); | 717 | extern void blk_run_queue(struct request_queue *); |
712 | extern void blk_start_queueing(struct request_queue *); | 718 | extern void blk_start_queueing(struct request_queue *); |
713 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 719 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
714 | void __user *, unsigned long, gfp_t); | 720 | struct rq_map_data *, void __user *, unsigned long, |
721 | gfp_t); | ||
715 | extern int blk_rq_unmap_user(struct bio *); | 722 | extern int blk_rq_unmap_user(struct bio *); |
716 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 723 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
717 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 724 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
718 | struct sg_iovec *, int, unsigned int, gfp_t); | 725 | struct rq_map_data *, struct sg_iovec *, int, |
726 | unsigned int, gfp_t); | ||
719 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 727 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
720 | struct request *, int); | 728 | struct request *, int); |
721 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 729 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |