aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-map.c20
-rw-r--r--block/bsg.c5
-rw-r--r--block/scsi_ioctl.c5
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--fs/bio.c33
-rw-r--r--include/linux/bio.h9
-rw-r--r--include/linux/blkdev.h5
8 files changed, 47 insertions, 34 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index ea1bf53929e4..ac21b7397e15 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -41,7 +41,8 @@ static int __blk_rq_unmap_user(struct bio *bio)
41} 41}
42 42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 void __user *ubuf, unsigned int len) 44 void __user *ubuf, unsigned int len,
45 gfp_t gfp_mask)
45{ 46{
46 unsigned long uaddr; 47 unsigned long uaddr;
47 unsigned int alignment; 48 unsigned int alignment;
@@ -57,9 +58,9 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
57 uaddr = (unsigned long) ubuf; 58 uaddr = (unsigned long) ubuf;
58 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 59 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
59 if (!(uaddr & alignment) && !(len & alignment)) 60 if (!(uaddr & alignment) && !(len & alignment))
60 bio = bio_map_user(q, NULL, uaddr, len, reading); 61 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
61 else 62 else
62 bio = bio_copy_user(q, uaddr, len, reading); 63 bio = bio_copy_user(q, uaddr, len, reading, gfp_mask);
63 64
64 if (IS_ERR(bio)) 65 if (IS_ERR(bio))
65 return PTR_ERR(bio); 66 return PTR_ERR(bio);
@@ -90,6 +91,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
90 * @rq: request structure to fill 91 * @rq: request structure to fill
91 * @ubuf: the user buffer 92 * @ubuf: the user buffer
92 * @len: length of user data 93 * @len: length of user data
94 * @gfp_mask: memory allocation flags
93 * 95 *
94 * Description: 96 * Description:
95 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 97 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
@@ -105,7 +107,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
105 * unmapping. 107 * unmapping.
106 */ 108 */
107int blk_rq_map_user(struct request_queue *q, struct request *rq, 109int blk_rq_map_user(struct request_queue *q, struct request *rq,
108 void __user *ubuf, unsigned long len) 110 void __user *ubuf, unsigned long len, gfp_t gfp_mask)
109{ 111{
110 unsigned long bytes_read = 0; 112 unsigned long bytes_read = 0;
111 struct bio *bio = NULL; 113 struct bio *bio = NULL;
@@ -132,7 +134,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
132 if (end - start > BIO_MAX_PAGES) 134 if (end - start > BIO_MAX_PAGES)
133 map_len -= PAGE_SIZE; 135 map_len -= PAGE_SIZE;
134 136
135 ret = __blk_rq_map_user(q, rq, ubuf, map_len); 137 ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask);
136 if (ret < 0) 138 if (ret < 0)
137 goto unmap_rq; 139 goto unmap_rq;
138 if (!bio) 140 if (!bio)
@@ -160,6 +162,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
160 * @iov: pointer to the iovec 162 * @iov: pointer to the iovec
161 * @iov_count: number of elements in the iovec 163 * @iov_count: number of elements in the iovec
162 * @len: I/O byte count 164 * @len: I/O byte count
165 * @gfp_mask: memory allocation flags
163 * 166 *
164 * Description: 167 * Description:
165 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 168 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
@@ -175,7 +178,8 @@ EXPORT_SYMBOL(blk_rq_map_user);
175 * unmapping. 178 * unmapping.
176 */ 179 */
177int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 180int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
178 struct sg_iovec *iov, int iov_count, unsigned int len) 181 struct sg_iovec *iov, int iov_count, unsigned int len,
182 gfp_t gfp_mask)
179{ 183{
180 struct bio *bio; 184 struct bio *bio;
181 int i, read = rq_data_dir(rq) == READ; 185 int i, read = rq_data_dir(rq) == READ;
@@ -194,9 +198,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
194 } 198 }
195 199
196 if (unaligned || (q->dma_pad_mask & len)) 200 if (unaligned || (q->dma_pad_mask & len))
197 bio = bio_copy_user_iov(q, iov, iov_count, read); 201 bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask);
198 else 202 else
199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read); 203 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
200 204
201 if (IS_ERR(bio)) 205 if (IS_ERR(bio))
202 return PTR_ERR(bio); 206 return PTR_ERR(bio);
diff --git a/block/bsg.c b/block/bsg.c
index 0aae8d7ba99c..e7a142e9916c 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -283,7 +283,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
283 next_rq->cmd_type = rq->cmd_type; 283 next_rq->cmd_type = rq->cmd_type;
284 284
285 dxferp = (void*)(unsigned long)hdr->din_xferp; 285 dxferp = (void*)(unsigned long)hdr->din_xferp;
286 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); 286 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len,
287 GFP_KERNEL);
287 if (ret) 288 if (ret)
288 goto out; 289 goto out;
289 } 290 }
@@ -298,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
298 dxfer_len = 0; 299 dxfer_len = 0;
299 300
300 if (dxfer_len) { 301 if (dxfer_len) {
301 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); 302 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len, GFP_KERNEL);
302 if (ret) 303 if (ret)
303 goto out; 304 goto out;
304 } 305 }
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 3aab80a4c484..f49d6a11a69e 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -315,10 +315,11 @@ static int sg_io(struct file *file, struct request_queue *q,
315 } 315 }
316 316
317 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, 317 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
318 hdr->dxfer_len); 318 hdr->dxfer_len, GFP_KERNEL);
319 kfree(iov); 319 kfree(iov);
320 } else if (hdr->dxfer_len) 320 } else if (hdr->dxfer_len)
321 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); 321 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len,
322 GFP_KERNEL);
322 323
323 if (ret) 324 if (ret)
324 goto out; 325 goto out;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 74031de517e6..e861d24a6d32 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2097,7 +2097,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2097 2097
2098 len = nr * CD_FRAMESIZE_RAW; 2098 len = nr * CD_FRAMESIZE_RAW;
2099 2099
2100 ret = blk_rq_map_user(q, rq, ubuf, len); 2100 ret = blk_rq_map_user(q, rq, ubuf, len, GFP_KERNEL);
2101 if (ret) 2101 if (ret)
2102 break; 2102 break;
2103 2103
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 257e097c39af..2a4fd820d616 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
362 int err; 362 int err;
363 363
364 dprintk("%lx %u\n", uaddr, len); 364 dprintk("%lx %u\n", uaddr, len);
365 err = blk_rq_map_user(q, rq, (void *)uaddr, len); 365 err = blk_rq_map_user(q, rq, (void *)uaddr, len, GFP_KERNEL);
366 if (err) { 366 if (err) {
367 /* 367 /*
368 * TODO: need to fixup sg_tablesize, max_segment_size, 368 * TODO: need to fixup sg_tablesize, max_segment_size,
diff --git a/fs/bio.c b/fs/bio.c
index 6a637b5c24b5..3d2e9ad24728 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -558,13 +558,14 @@ int bio_uncopy_user(struct bio *bio)
558 * @iov: the iovec. 558 * @iov: the iovec.
559 * @iov_count: number of elements in the iovec 559 * @iov_count: number of elements in the iovec
560 * @write_to_vm: bool indicating writing to pages or not 560 * @write_to_vm: bool indicating writing to pages or not
561 * @gfp_mask: memory allocation flags
561 * 562 *
562 * Prepares and returns a bio for indirect user io, bouncing data 563 * Prepares and returns a bio for indirect user io, bouncing data
563 * to/from kernel pages as necessary. Must be paired with 564 * to/from kernel pages as necessary. Must be paired with
564 * call bio_uncopy_user() on io completion. 565 * call bio_uncopy_user() on io completion.
565 */ 566 */
566struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, 567struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
567 int iov_count, int write_to_vm) 568 int iov_count, int write_to_vm, gfp_t gfp_mask)
568{ 569{
569 struct bio_map_data *bmd; 570 struct bio_map_data *bmd;
570 struct bio_vec *bvec; 571 struct bio_vec *bvec;
@@ -587,12 +588,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
587 len += iov[i].iov_len; 588 len += iov[i].iov_len;
588 } 589 }
589 590
590 bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL); 591 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
591 if (!bmd) 592 if (!bmd)
592 return ERR_PTR(-ENOMEM); 593 return ERR_PTR(-ENOMEM);
593 594
594 ret = -ENOMEM; 595 ret = -ENOMEM;
595 bio = bio_alloc(GFP_KERNEL, nr_pages); 596 bio = bio_alloc(gfp_mask, nr_pages);
596 if (!bio) 597 if (!bio)
597 goto out_bmd; 598 goto out_bmd;
598 599
@@ -605,7 +606,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
605 if (bytes > len) 606 if (bytes > len)
606 bytes = len; 607 bytes = len;
607 608
608 page = alloc_page(q->bounce_gfp | GFP_KERNEL); 609 page = alloc_page(q->bounce_gfp | gfp_mask);
609 if (!page) { 610 if (!page) {
610 ret = -ENOMEM; 611 ret = -ENOMEM;
611 break; 612 break;
@@ -647,26 +648,27 @@ out_bmd:
647 * @uaddr: start of user address 648 * @uaddr: start of user address
648 * @len: length in bytes 649 * @len: length in bytes
649 * @write_to_vm: bool indicating writing to pages or not 650 * @write_to_vm: bool indicating writing to pages or not
651 * @gfp_mask: memory allocation flags
650 * 652 *
651 * Prepares and returns a bio for indirect user io, bouncing data 653 * Prepares and returns a bio for indirect user io, bouncing data
652 * to/from kernel pages as necessary. Must be paired with 654 * to/from kernel pages as necessary. Must be paired with
653 * call bio_uncopy_user() on io completion. 655 * call bio_uncopy_user() on io completion.
654 */ 656 */
655struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, 657struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
656 unsigned int len, int write_to_vm) 658 unsigned int len, int write_to_vm, gfp_t gfp_mask)
657{ 659{
658 struct sg_iovec iov; 660 struct sg_iovec iov;
659 661
660 iov.iov_base = (void __user *)uaddr; 662 iov.iov_base = (void __user *)uaddr;
661 iov.iov_len = len; 663 iov.iov_len = len;
662 664
663 return bio_copy_user_iov(q, &iov, 1, write_to_vm); 665 return bio_copy_user_iov(q, &iov, 1, write_to_vm, gfp_mask);
664} 666}
665 667
666static struct bio *__bio_map_user_iov(struct request_queue *q, 668static struct bio *__bio_map_user_iov(struct request_queue *q,
667 struct block_device *bdev, 669 struct block_device *bdev,
668 struct sg_iovec *iov, int iov_count, 670 struct sg_iovec *iov, int iov_count,
669 int write_to_vm) 671 int write_to_vm, gfp_t gfp_mask)
670{ 672{
671 int i, j; 673 int i, j;
672 int nr_pages = 0; 674 int nr_pages = 0;
@@ -692,12 +694,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
692 if (!nr_pages) 694 if (!nr_pages)
693 return ERR_PTR(-EINVAL); 695 return ERR_PTR(-EINVAL);
694 696
695 bio = bio_alloc(GFP_KERNEL, nr_pages); 697 bio = bio_alloc(gfp_mask, nr_pages);
696 if (!bio) 698 if (!bio)
697 return ERR_PTR(-ENOMEM); 699 return ERR_PTR(-ENOMEM);
698 700
699 ret = -ENOMEM; 701 ret = -ENOMEM;
700 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 702 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
701 if (!pages) 703 if (!pages)
702 goto out; 704 goto out;
703 705
@@ -776,19 +778,21 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
776 * @uaddr: start of user address 778 * @uaddr: start of user address
777 * @len: length in bytes 779 * @len: length in bytes
778 * @write_to_vm: bool indicating writing to pages or not 780 * @write_to_vm: bool indicating writing to pages or not
781 * @gfp_mask: memory allocation flags
779 * 782 *
780 * Map the user space address into a bio suitable for io to a block 783 * Map the user space address into a bio suitable for io to a block
781 * device. Returns an error pointer in case of error. 784 * device. Returns an error pointer in case of error.
782 */ 785 */
783struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, 786struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
784 unsigned long uaddr, unsigned int len, int write_to_vm) 787 unsigned long uaddr, unsigned int len, int write_to_vm,
788 gfp_t gfp_mask)
785{ 789{
786 struct sg_iovec iov; 790 struct sg_iovec iov;
787 791
788 iov.iov_base = (void __user *)uaddr; 792 iov.iov_base = (void __user *)uaddr;
789 iov.iov_len = len; 793 iov.iov_len = len;
790 794
791 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); 795 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
792} 796}
793 797
794/** 798/**
@@ -798,18 +802,19 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
798 * @iov: the iovec. 802 * @iov: the iovec.
799 * @iov_count: number of elements in the iovec 803 * @iov_count: number of elements in the iovec
800 * @write_to_vm: bool indicating writing to pages or not 804 * @write_to_vm: bool indicating writing to pages or not
805 * @gfp_mask: memory allocation flags
801 * 806 *
802 * Map the user space address into a bio suitable for io to a block 807 * Map the user space address into a bio suitable for io to a block
803 * device. Returns an error pointer in case of error. 808 * device. Returns an error pointer in case of error.
804 */ 809 */
805struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 810struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
806 struct sg_iovec *iov, int iov_count, 811 struct sg_iovec *iov, int iov_count,
807 int write_to_vm) 812 int write_to_vm, gfp_t gfp_mask)
808{ 813{
809 struct bio *bio; 814 struct bio *bio;
810 815
811 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); 816 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
812 817 gfp_mask);
813 if (IS_ERR(bio)) 818 if (IS_ERR(bio))
814 return bio; 819 return bio;
815 820
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 13aba20edb2d..200b185c3e83 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -325,11 +325,11 @@ extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
325 unsigned int, unsigned int); 325 unsigned int, unsigned int);
326extern int bio_get_nr_vecs(struct block_device *); 326extern int bio_get_nr_vecs(struct block_device *);
327extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 327extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
328 unsigned long, unsigned int, int); 328 unsigned long, unsigned int, int, gfp_t);
329struct sg_iovec; 329struct sg_iovec;
330extern struct bio *bio_map_user_iov(struct request_queue *, 330extern struct bio *bio_map_user_iov(struct request_queue *,
331 struct block_device *, 331 struct block_device *,
332 struct sg_iovec *, int, int); 332 struct sg_iovec *, int, int, gfp_t);
333extern void bio_unmap_user(struct bio *); 333extern void bio_unmap_user(struct bio *);
334extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 334extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
335 gfp_t); 335 gfp_t);
@@ -337,9 +337,10 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
337 gfp_t, int); 337 gfp_t, int);
338extern void bio_set_pages_dirty(struct bio *bio); 338extern void bio_set_pages_dirty(struct bio *bio);
339extern void bio_check_pages_dirty(struct bio *bio); 339extern void bio_check_pages_dirty(struct bio *bio);
340extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 340extern struct bio *bio_copy_user(struct request_queue *, unsigned long,
341 unsigned int, int, gfp_t);
341extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, 342extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
342 int, int); 343 int, int, gfp_t);
343extern int bio_uncopy_user(struct bio *); 344extern int bio_uncopy_user(struct bio *);
344void zero_fill_bio(struct bio *bio); 345void zero_fill_bio(struct bio *bio);
345extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); 346extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 12df8efeef19..00e388d0e221 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -710,11 +710,12 @@ extern void __blk_stop_queue(struct request_queue *q);
710extern void __blk_run_queue(struct request_queue *); 710extern void __blk_run_queue(struct request_queue *);
711extern void blk_run_queue(struct request_queue *); 711extern void blk_run_queue(struct request_queue *);
712extern void blk_start_queueing(struct request_queue *); 712extern void blk_start_queueing(struct request_queue *);
713extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 713extern int blk_rq_map_user(struct request_queue *, struct request *,
714 void __user *, unsigned long, gfp_t);
714extern int blk_rq_unmap_user(struct bio *); 715extern int blk_rq_unmap_user(struct bio *);
715extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 716extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
716extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 717extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
717 struct sg_iovec *, int, unsigned int); 718 struct sg_iovec *, int, unsigned int, gfp_t);
718extern int blk_execute_rq(struct request_queue *, struct gendisk *, 719extern int blk_execute_rq(struct request_queue *, struct gendisk *,
719 struct request *, int); 720 struct request *, int);
720extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 721extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,