diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-08-28 03:17:05 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:10 -0400 |
commit | a3bce90edd8f6cafe3f63b1a943800792e830178 (patch) | |
tree | c07a2962987e57997b4ff6f6c63febe1feaa0a9c /fs | |
parent | 45333d5a31296d0af886d94f1d08f128231cab8e (diff) |
block: add gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov
Currently, blk_rq_map_user and blk_rq_map_user_iov always do
GFP_KERNEL allocation.
This adds gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov
so sg can use it (sg always does GFP_ATOMIC allocation).
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Douglas Gilbert <dougg@torque.net>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bio.c | 33 |
1 files changed, 19 insertions, 14 deletions
@@ -558,13 +558,14 @@ int bio_uncopy_user(struct bio *bio) | |||
558 | * @iov: the iovec. | 558 | * @iov: the iovec. |
559 | * @iov_count: number of elements in the iovec | 559 | * @iov_count: number of elements in the iovec |
560 | * @write_to_vm: bool indicating writing to pages or not | 560 | * @write_to_vm: bool indicating writing to pages or not |
561 | * @gfp_mask: memory allocation flags | ||
561 | * | 562 | * |
562 | * Prepares and returns a bio for indirect user io, bouncing data | 563 | * Prepares and returns a bio for indirect user io, bouncing data |
563 | * to/from kernel pages as necessary. Must be paired with | 564 | * to/from kernel pages as necessary. Must be paired with |
564 | * call bio_uncopy_user() on io completion. | 565 | * call bio_uncopy_user() on io completion. |
565 | */ | 566 | */ |
566 | struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | 567 | struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, |
567 | int iov_count, int write_to_vm) | 568 | int iov_count, int write_to_vm, gfp_t gfp_mask) |
568 | { | 569 | { |
569 | struct bio_map_data *bmd; | 570 | struct bio_map_data *bmd; |
570 | struct bio_vec *bvec; | 571 | struct bio_vec *bvec; |
@@ -587,12 +588,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | |||
587 | len += iov[i].iov_len; | 588 | len += iov[i].iov_len; |
588 | } | 589 | } |
589 | 590 | ||
590 | bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL); | 591 | bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask); |
591 | if (!bmd) | 592 | if (!bmd) |
592 | return ERR_PTR(-ENOMEM); | 593 | return ERR_PTR(-ENOMEM); |
593 | 594 | ||
594 | ret = -ENOMEM; | 595 | ret = -ENOMEM; |
595 | bio = bio_alloc(GFP_KERNEL, nr_pages); | 596 | bio = bio_alloc(gfp_mask, nr_pages); |
596 | if (!bio) | 597 | if (!bio) |
597 | goto out_bmd; | 598 | goto out_bmd; |
598 | 599 | ||
@@ -605,7 +606,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | |||
605 | if (bytes > len) | 606 | if (bytes > len) |
606 | bytes = len; | 607 | bytes = len; |
607 | 608 | ||
608 | page = alloc_page(q->bounce_gfp | GFP_KERNEL); | 609 | page = alloc_page(q->bounce_gfp | gfp_mask); |
609 | if (!page) { | 610 | if (!page) { |
610 | ret = -ENOMEM; | 611 | ret = -ENOMEM; |
611 | break; | 612 | break; |
@@ -647,26 +648,27 @@ out_bmd: | |||
647 | * @uaddr: start of user address | 648 | * @uaddr: start of user address |
648 | * @len: length in bytes | 649 | * @len: length in bytes |
649 | * @write_to_vm: bool indicating writing to pages or not | 650 | * @write_to_vm: bool indicating writing to pages or not |
651 | * @gfp_mask: memory allocation flags | ||
650 | * | 652 | * |
651 | * Prepares and returns a bio for indirect user io, bouncing data | 653 | * Prepares and returns a bio for indirect user io, bouncing data |
652 | * to/from kernel pages as necessary. Must be paired with | 654 | * to/from kernel pages as necessary. Must be paired with |
653 | * call bio_uncopy_user() on io completion. | 655 | * call bio_uncopy_user() on io completion. |
654 | */ | 656 | */ |
655 | struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, | 657 | struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, |
656 | unsigned int len, int write_to_vm) | 658 | unsigned int len, int write_to_vm, gfp_t gfp_mask) |
657 | { | 659 | { |
658 | struct sg_iovec iov; | 660 | struct sg_iovec iov; |
659 | 661 | ||
660 | iov.iov_base = (void __user *)uaddr; | 662 | iov.iov_base = (void __user *)uaddr; |
661 | iov.iov_len = len; | 663 | iov.iov_len = len; |
662 | 664 | ||
663 | return bio_copy_user_iov(q, &iov, 1, write_to_vm); | 665 | return bio_copy_user_iov(q, &iov, 1, write_to_vm, gfp_mask); |
664 | } | 666 | } |
665 | 667 | ||
666 | static struct bio *__bio_map_user_iov(struct request_queue *q, | 668 | static struct bio *__bio_map_user_iov(struct request_queue *q, |
667 | struct block_device *bdev, | 669 | struct block_device *bdev, |
668 | struct sg_iovec *iov, int iov_count, | 670 | struct sg_iovec *iov, int iov_count, |
669 | int write_to_vm) | 671 | int write_to_vm, gfp_t gfp_mask) |
670 | { | 672 | { |
671 | int i, j; | 673 | int i, j; |
672 | int nr_pages = 0; | 674 | int nr_pages = 0; |
@@ -692,12 +694,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
692 | if (!nr_pages) | 694 | if (!nr_pages) |
693 | return ERR_PTR(-EINVAL); | 695 | return ERR_PTR(-EINVAL); |
694 | 696 | ||
695 | bio = bio_alloc(GFP_KERNEL, nr_pages); | 697 | bio = bio_alloc(gfp_mask, nr_pages); |
696 | if (!bio) | 698 | if (!bio) |
697 | return ERR_PTR(-ENOMEM); | 699 | return ERR_PTR(-ENOMEM); |
698 | 700 | ||
699 | ret = -ENOMEM; | 701 | ret = -ENOMEM; |
700 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | 702 | pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); |
701 | if (!pages) | 703 | if (!pages) |
702 | goto out; | 704 | goto out; |
703 | 705 | ||
@@ -776,19 +778,21 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
776 | * @uaddr: start of user address | 778 | * @uaddr: start of user address |
777 | * @len: length in bytes | 779 | * @len: length in bytes |
778 | * @write_to_vm: bool indicating writing to pages or not | 780 | * @write_to_vm: bool indicating writing to pages or not |
781 | * @gfp_mask: memory allocation flags | ||
779 | * | 782 | * |
780 | * Map the user space address into a bio suitable for io to a block | 783 | * Map the user space address into a bio suitable for io to a block |
781 | * device. Returns an error pointer in case of error. | 784 | * device. Returns an error pointer in case of error. |
782 | */ | 785 | */ |
783 | struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, | 786 | struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, |
784 | unsigned long uaddr, unsigned int len, int write_to_vm) | 787 | unsigned long uaddr, unsigned int len, int write_to_vm, |
788 | gfp_t gfp_mask) | ||
785 | { | 789 | { |
786 | struct sg_iovec iov; | 790 | struct sg_iovec iov; |
787 | 791 | ||
788 | iov.iov_base = (void __user *)uaddr; | 792 | iov.iov_base = (void __user *)uaddr; |
789 | iov.iov_len = len; | 793 | iov.iov_len = len; |
790 | 794 | ||
791 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); | 795 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); |
792 | } | 796 | } |
793 | 797 | ||
794 | /** | 798 | /** |
@@ -798,18 +802,19 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, | |||
798 | * @iov: the iovec. | 802 | * @iov: the iovec. |
799 | * @iov_count: number of elements in the iovec | 803 | * @iov_count: number of elements in the iovec |
800 | * @write_to_vm: bool indicating writing to pages or not | 804 | * @write_to_vm: bool indicating writing to pages or not |
805 | * @gfp_mask: memory allocation flags | ||
801 | * | 806 | * |
802 | * Map the user space address into a bio suitable for io to a block | 807 | * Map the user space address into a bio suitable for io to a block |
803 | * device. Returns an error pointer in case of error. | 808 | * device. Returns an error pointer in case of error. |
804 | */ | 809 | */ |
805 | struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, | 810 | struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, |
806 | struct sg_iovec *iov, int iov_count, | 811 | struct sg_iovec *iov, int iov_count, |
807 | int write_to_vm) | 812 | int write_to_vm, gfp_t gfp_mask) |
808 | { | 813 | { |
809 | struct bio *bio; | 814 | struct bio *bio; |
810 | 815 | ||
811 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); | 816 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm, |
812 | 817 | gfp_mask); | |
813 | if (IS_ERR(bio)) | 818 | if (IS_ERR(bio)) |
814 | return bio; | 819 | return bio; |
815 | 820 | ||