aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c297
1 files changed, 179 insertions, 118 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 3cba7ae34d75..77a55bcceedb 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
30 30
31static struct kmem_cache *bio_slab __read_mostly; 31static struct kmem_cache *bio_slab __read_mostly;
32 32
33mempool_t *bio_split_pool __read_mostly; 33static mempool_t *bio_split_pool __read_mostly;
34 34
35/* 35/*
36 * if you change this list, also change bvec_alloc or things will 36 * if you change this list, also change bvec_alloc or things will
@@ -60,25 +60,46 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct
60 struct bio_vec *bvl; 60 struct bio_vec *bvl;
61 61
62 /* 62 /*
63 * see comment near bvec_array define! 63 * If 'bs' is given, lookup the pool and do the mempool alloc.
64 * If not, this is a bio_kmalloc() allocation and just do a
65 * kzalloc() for the exact number of vecs right away.
64 */ 66 */
65 switch (nr) { 67 if (bs) {
66 case 1 : *idx = 0; break; 68 /*
67 case 2 ... 4: *idx = 1; break; 69 * see comment near bvec_array define!
68 case 5 ... 16: *idx = 2; break; 70 */
69 case 17 ... 64: *idx = 3; break; 71 switch (nr) {
70 case 65 ... 128: *idx = 4; break; 72 case 1:
71 case 129 ... BIO_MAX_PAGES: *idx = 5; break; 73 *idx = 0;
74 break;
75 case 2 ... 4:
76 *idx = 1;
77 break;
78 case 5 ... 16:
79 *idx = 2;
80 break;
81 case 17 ... 64:
82 *idx = 3;
83 break;
84 case 65 ... 128:
85 *idx = 4;
86 break;
87 case 129 ... BIO_MAX_PAGES:
88 *idx = 5;
89 break;
72 default: 90 default:
73 return NULL; 91 return NULL;
74 } 92 }
75 /*
76 * idx now points to the pool we want to allocate from
77 */
78 93
79 bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); 94 /*
80 if (bvl) 95 * idx now points to the pool we want to allocate from
81 memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); 96 */
97 bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
98 if (bvl)
99 memset(bvl, 0,
100 bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
101 } else
102 bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
82 103
83 return bvl; 104 return bvl;
84} 105}
@@ -107,10 +128,17 @@ static void bio_fs_destructor(struct bio *bio)
107 bio_free(bio, fs_bio_set); 128 bio_free(bio, fs_bio_set);
108} 129}
109 130
131static void bio_kmalloc_destructor(struct bio *bio)
132{
133 kfree(bio->bi_io_vec);
134 kfree(bio);
135}
136
110void bio_init(struct bio *bio) 137void bio_init(struct bio *bio)
111{ 138{
112 memset(bio, 0, sizeof(*bio)); 139 memset(bio, 0, sizeof(*bio));
113 bio->bi_flags = 1 << BIO_UPTODATE; 140 bio->bi_flags = 1 << BIO_UPTODATE;
141 bio->bi_comp_cpu = -1;
114 atomic_set(&bio->bi_cnt, 1); 142 atomic_set(&bio->bi_cnt, 1);
115} 143}
116 144
@@ -118,19 +146,25 @@ void bio_init(struct bio *bio)
118 * bio_alloc_bioset - allocate a bio for I/O 146 * bio_alloc_bioset - allocate a bio for I/O
119 * @gfp_mask: the GFP_ mask given to the slab allocator 147 * @gfp_mask: the GFP_ mask given to the slab allocator
120 * @nr_iovecs: number of iovecs to pre-allocate 148 * @nr_iovecs: number of iovecs to pre-allocate
121 * @bs: the bio_set to allocate from 149 * @bs: the bio_set to allocate from. If %NULL, just use kmalloc
122 * 150 *
123 * Description: 151 * Description:
124 * bio_alloc_bioset will first try it's on mempool to satisfy the allocation. 152 * bio_alloc_bioset will first try its own mempool to satisfy the allocation.
125 * If %__GFP_WAIT is set then we will block on the internal pool waiting 153 * If %__GFP_WAIT is set then we will block on the internal pool waiting
126 * for a &struct bio to become free. 154 * for a &struct bio to become free. If a %NULL @bs is passed in, we will
155 * fall back to just using @kmalloc to allocate the required memory.
127 * 156 *
128 * allocate bio and iovecs from the memory pools specified by the 157 * allocate bio and iovecs from the memory pools specified by the
129 * bio_set structure. 158 * bio_set structure, or @kmalloc if none given.
130 **/ 159 **/
131struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 160struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
132{ 161{
133 struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); 162 struct bio *bio;
163
164 if (bs)
165 bio = mempool_alloc(bs->bio_pool, gfp_mask);
166 else
167 bio = kmalloc(sizeof(*bio), gfp_mask);
134 168
135 if (likely(bio)) { 169 if (likely(bio)) {
136 struct bio_vec *bvl = NULL; 170 struct bio_vec *bvl = NULL;
@@ -141,7 +175,10 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
141 175
142 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); 176 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
143 if (unlikely(!bvl)) { 177 if (unlikely(!bvl)) {
144 mempool_free(bio, bs->bio_pool); 178 if (bs)
179 mempool_free(bio, bs->bio_pool);
180 else
181 kfree(bio);
145 bio = NULL; 182 bio = NULL;
146 goto out; 183 goto out;
147 } 184 }
@@ -164,6 +201,23 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
164 return bio; 201 return bio;
165} 202}
166 203
204/*
205 * Like bio_alloc(), but doesn't use a mempool backing. This means that
206 * it CAN fail, but while bio_alloc() can only be used for allocations
207 * that have a short (finite) life span, bio_kmalloc() should be used
208 * for more permanent bio allocations (like allocating some bio's for
209 * initalization or setup purposes).
210 */
211struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
212{
213 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
214
215 if (bio)
216 bio->bi_destructor = bio_kmalloc_destructor;
217
218 return bio;
219}
220
167void zero_fill_bio(struct bio *bio) 221void zero_fill_bio(struct bio *bio)
168{ 222{
169 unsigned long flags; 223 unsigned long flags;
@@ -208,14 +262,6 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
208 return bio->bi_phys_segments; 262 return bio->bi_phys_segments;
209} 263}
210 264
211inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
212{
213 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
214 blk_recount_segments(q, bio);
215
216 return bio->bi_hw_segments;
217}
218
219/** 265/**
220 * __bio_clone - clone a bio 266 * __bio_clone - clone a bio
221 * @bio: destination bio 267 * @bio: destination bio
@@ -350,8 +396,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
350 */ 396 */
351 397
352 while (bio->bi_phys_segments >= q->max_phys_segments 398 while (bio->bi_phys_segments >= q->max_phys_segments
353 || bio->bi_hw_segments >= q->max_hw_segments 399 || bio->bi_phys_segments >= q->max_hw_segments) {
354 || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
355 400
356 if (retried_segments) 401 if (retried_segments)
357 return 0; 402 return 0;
@@ -395,13 +440,11 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
395 } 440 }
396 441
397 /* If we may be able to merge these biovecs, force a recount */ 442 /* If we may be able to merge these biovecs, force a recount */
398 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) || 443 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
399 BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
400 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 444 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
401 445
402 bio->bi_vcnt++; 446 bio->bi_vcnt++;
403 bio->bi_phys_segments++; 447 bio->bi_phys_segments++;
404 bio->bi_hw_segments++;
405 done: 448 done:
406 bio->bi_size += len; 449 bio->bi_size += len;
407 return len; 450 return len;
@@ -449,16 +492,19 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
449 492
450struct bio_map_data { 493struct bio_map_data {
451 struct bio_vec *iovecs; 494 struct bio_vec *iovecs;
452 int nr_sgvecs;
453 struct sg_iovec *sgvecs; 495 struct sg_iovec *sgvecs;
496 int nr_sgvecs;
497 int is_our_pages;
454}; 498};
455 499
456static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, 500static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
457 struct sg_iovec *iov, int iov_count) 501 struct sg_iovec *iov, int iov_count,
502 int is_our_pages)
458{ 503{
459 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); 504 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
460 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); 505 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
461 bmd->nr_sgvecs = iov_count; 506 bmd->nr_sgvecs = iov_count;
507 bmd->is_our_pages = is_our_pages;
462 bio->bi_private = bmd; 508 bio->bi_private = bmd;
463} 509}
464 510
@@ -493,7 +539,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
493} 539}
494 540
495static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, 541static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
496 struct sg_iovec *iov, int iov_count, int uncopy) 542 struct sg_iovec *iov, int iov_count, int uncopy,
543 int do_free_page)
497{ 544{
498 int ret = 0, i; 545 int ret = 0, i;
499 struct bio_vec *bvec; 546 struct bio_vec *bvec;
@@ -536,7 +583,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
536 } 583 }
537 } 584 }
538 585
539 if (uncopy) 586 if (do_free_page)
540 __free_page(bvec->bv_page); 587 __free_page(bvec->bv_page);
541 } 588 }
542 589
@@ -553,10 +600,11 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
553int bio_uncopy_user(struct bio *bio) 600int bio_uncopy_user(struct bio *bio)
554{ 601{
555 struct bio_map_data *bmd = bio->bi_private; 602 struct bio_map_data *bmd = bio->bi_private;
556 int ret; 603 int ret = 0;
557
558 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1);
559 604
605 if (!bio_flagged(bio, BIO_NULL_MAPPED))
606 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
607 bmd->nr_sgvecs, 1, bmd->is_our_pages);
560 bio_free_map_data(bmd); 608 bio_free_map_data(bmd);
561 bio_put(bio); 609 bio_put(bio);
562 return ret; 610 return ret;
@@ -565,16 +613,20 @@ int bio_uncopy_user(struct bio *bio)
565/** 613/**
566 * bio_copy_user_iov - copy user data to bio 614 * bio_copy_user_iov - copy user data to bio
567 * @q: destination block queue 615 * @q: destination block queue
616 * @map_data: pointer to the rq_map_data holding pages (if necessary)
568 * @iov: the iovec. 617 * @iov: the iovec.
569 * @iov_count: number of elements in the iovec 618 * @iov_count: number of elements in the iovec
570 * @write_to_vm: bool indicating writing to pages or not 619 * @write_to_vm: bool indicating writing to pages or not
620 * @gfp_mask: memory allocation flags
571 * 621 *
572 * Prepares and returns a bio for indirect user io, bouncing data 622 * Prepares and returns a bio for indirect user io, bouncing data
573 * to/from kernel pages as necessary. Must be paired with 623 * to/from kernel pages as necessary. Must be paired with
574 * call bio_uncopy_user() on io completion. 624 * call bio_uncopy_user() on io completion.
575 */ 625 */
576struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, 626struct bio *bio_copy_user_iov(struct request_queue *q,
577 int iov_count, int write_to_vm) 627 struct rq_map_data *map_data,
628 struct sg_iovec *iov, int iov_count,
629 int write_to_vm, gfp_t gfp_mask)
578{ 630{
579 struct bio_map_data *bmd; 631 struct bio_map_data *bmd;
580 struct bio_vec *bvec; 632 struct bio_vec *bvec;
@@ -597,25 +649,38 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
597 len += iov[i].iov_len; 649 len += iov[i].iov_len;
598 } 650 }
599 651
600 bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL); 652 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
601 if (!bmd) 653 if (!bmd)
602 return ERR_PTR(-ENOMEM); 654 return ERR_PTR(-ENOMEM);
603 655
604 ret = -ENOMEM; 656 ret = -ENOMEM;
605 bio = bio_alloc(GFP_KERNEL, nr_pages); 657 bio = bio_alloc(gfp_mask, nr_pages);
606 if (!bio) 658 if (!bio)
607 goto out_bmd; 659 goto out_bmd;
608 660
609 bio->bi_rw |= (!write_to_vm << BIO_RW); 661 bio->bi_rw |= (!write_to_vm << BIO_RW);
610 662
611 ret = 0; 663 ret = 0;
664 i = 0;
612 while (len) { 665 while (len) {
613 unsigned int bytes = PAGE_SIZE; 666 unsigned int bytes;
667
668 if (map_data)
669 bytes = 1U << (PAGE_SHIFT + map_data->page_order);
670 else
671 bytes = PAGE_SIZE;
614 672
615 if (bytes > len) 673 if (bytes > len)
616 bytes = len; 674 bytes = len;
617 675
618 page = alloc_page(q->bounce_gfp | GFP_KERNEL); 676 if (map_data) {
677 if (i == map_data->nr_entries) {
678 ret = -ENOMEM;
679 break;
680 }
681 page = map_data->pages[i++];
682 } else
683 page = alloc_page(q->bounce_gfp | gfp_mask);
619 if (!page) { 684 if (!page) {
620 ret = -ENOMEM; 685 ret = -ENOMEM;
621 break; 686 break;
@@ -634,16 +699,17 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
634 * success 699 * success
635 */ 700 */
636 if (!write_to_vm) { 701 if (!write_to_vm) {
637 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0); 702 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
638 if (ret) 703 if (ret)
639 goto cleanup; 704 goto cleanup;
640 } 705 }
641 706
642 bio_set_map_data(bmd, bio, iov, iov_count); 707 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
643 return bio; 708 return bio;
644cleanup: 709cleanup:
645 bio_for_each_segment(bvec, bio, i) 710 if (!map_data)
646 __free_page(bvec->bv_page); 711 bio_for_each_segment(bvec, bio, i)
712 __free_page(bvec->bv_page);
647 713
648 bio_put(bio); 714 bio_put(bio);
649out_bmd: 715out_bmd:
@@ -654,29 +720,32 @@ out_bmd:
654/** 720/**
655 * bio_copy_user - copy user data to bio 721 * bio_copy_user - copy user data to bio
656 * @q: destination block queue 722 * @q: destination block queue
723 * @map_data: pointer to the rq_map_data holding pages (if necessary)
657 * @uaddr: start of user address 724 * @uaddr: start of user address
658 * @len: length in bytes 725 * @len: length in bytes
659 * @write_to_vm: bool indicating writing to pages or not 726 * @write_to_vm: bool indicating writing to pages or not
727 * @gfp_mask: memory allocation flags
660 * 728 *
661 * Prepares and returns a bio for indirect user io, bouncing data 729 * Prepares and returns a bio for indirect user io, bouncing data
662 * to/from kernel pages as necessary. Must be paired with 730 * to/from kernel pages as necessary. Must be paired with
663 * call bio_uncopy_user() on io completion. 731 * call bio_uncopy_user() on io completion.
664 */ 732 */
665struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, 733struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
666 unsigned int len, int write_to_vm) 734 unsigned long uaddr, unsigned int len,
735 int write_to_vm, gfp_t gfp_mask)
667{ 736{
668 struct sg_iovec iov; 737 struct sg_iovec iov;
669 738
670 iov.iov_base = (void __user *)uaddr; 739 iov.iov_base = (void __user *)uaddr;
671 iov.iov_len = len; 740 iov.iov_len = len;
672 741
673 return bio_copy_user_iov(q, &iov, 1, write_to_vm); 742 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
674} 743}
675 744
676static struct bio *__bio_map_user_iov(struct request_queue *q, 745static struct bio *__bio_map_user_iov(struct request_queue *q,
677 struct block_device *bdev, 746 struct block_device *bdev,
678 struct sg_iovec *iov, int iov_count, 747 struct sg_iovec *iov, int iov_count,
679 int write_to_vm) 748 int write_to_vm, gfp_t gfp_mask)
680{ 749{
681 int i, j; 750 int i, j;
682 int nr_pages = 0; 751 int nr_pages = 0;
@@ -702,12 +771,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
702 if (!nr_pages) 771 if (!nr_pages)
703 return ERR_PTR(-EINVAL); 772 return ERR_PTR(-EINVAL);
704 773
705 bio = bio_alloc(GFP_KERNEL, nr_pages); 774 bio = bio_alloc(gfp_mask, nr_pages);
706 if (!bio) 775 if (!bio)
707 return ERR_PTR(-ENOMEM); 776 return ERR_PTR(-ENOMEM);
708 777
709 ret = -ENOMEM; 778 ret = -ENOMEM;
710 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 779 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
711 if (!pages) 780 if (!pages)
712 goto out; 781 goto out;
713 782
@@ -786,19 +855,21 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
786 * @uaddr: start of user address 855 * @uaddr: start of user address
787 * @len: length in bytes 856 * @len: length in bytes
788 * @write_to_vm: bool indicating writing to pages or not 857 * @write_to_vm: bool indicating writing to pages or not
858 * @gfp_mask: memory allocation flags
789 * 859 *
790 * Map the user space address into a bio suitable for io to a block 860 * Map the user space address into a bio suitable for io to a block
791 * device. Returns an error pointer in case of error. 861 * device. Returns an error pointer in case of error.
792 */ 862 */
793struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, 863struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
794 unsigned long uaddr, unsigned int len, int write_to_vm) 864 unsigned long uaddr, unsigned int len, int write_to_vm,
865 gfp_t gfp_mask)
795{ 866{
796 struct sg_iovec iov; 867 struct sg_iovec iov;
797 868
798 iov.iov_base = (void __user *)uaddr; 869 iov.iov_base = (void __user *)uaddr;
799 iov.iov_len = len; 870 iov.iov_len = len;
800 871
801 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); 872 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
802} 873}
803 874
804/** 875/**
@@ -808,18 +879,19 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
808 * @iov: the iovec. 879 * @iov: the iovec.
809 * @iov_count: number of elements in the iovec 880 * @iov_count: number of elements in the iovec
810 * @write_to_vm: bool indicating writing to pages or not 881 * @write_to_vm: bool indicating writing to pages or not
882 * @gfp_mask: memory allocation flags
811 * 883 *
812 * Map the user space address into a bio suitable for io to a block 884 * Map the user space address into a bio suitable for io to a block
813 * device. Returns an error pointer in case of error. 885 * device. Returns an error pointer in case of error.
814 */ 886 */
815struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 887struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
816 struct sg_iovec *iov, int iov_count, 888 struct sg_iovec *iov, int iov_count,
817 int write_to_vm) 889 int write_to_vm, gfp_t gfp_mask)
818{ 890{
819 struct bio *bio; 891 struct bio *bio;
820 892
821 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); 893 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
822 894 gfp_mask);
823 if (IS_ERR(bio)) 895 if (IS_ERR(bio))
824 return bio; 896 return bio;
825 897
@@ -976,48 +1048,13 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
976struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1048struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
977 gfp_t gfp_mask, int reading) 1049 gfp_t gfp_mask, int reading)
978{ 1050{
979 unsigned long kaddr = (unsigned long)data;
980 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
981 unsigned long start = kaddr >> PAGE_SHIFT;
982 const int nr_pages = end - start;
983 struct bio *bio; 1051 struct bio *bio;
984 struct bio_vec *bvec; 1052 struct bio_vec *bvec;
985 struct bio_map_data *bmd; 1053 int i;
986 int i, ret;
987 struct sg_iovec iov;
988
989 iov.iov_base = data;
990 iov.iov_len = len;
991
992 bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask);
993 if (!bmd)
994 return ERR_PTR(-ENOMEM);
995
996 ret = -ENOMEM;
997 bio = bio_alloc(gfp_mask, nr_pages);
998 if (!bio)
999 goto out_bmd;
1000
1001 while (len) {
1002 struct page *page;
1003 unsigned int bytes = PAGE_SIZE;
1004
1005 if (bytes > len)
1006 bytes = len;
1007
1008 page = alloc_page(q->bounce_gfp | gfp_mask);
1009 if (!page) {
1010 ret = -ENOMEM;
1011 goto cleanup;
1012 }
1013
1014 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
1015 ret = -EINVAL;
1016 goto cleanup;
1017 }
1018 1054
1019 len -= bytes; 1055 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1020 } 1056 if (IS_ERR(bio))
1057 return bio;
1021 1058
1022 if (!reading) { 1059 if (!reading) {
1023 void *p = data; 1060 void *p = data;
@@ -1030,20 +1067,9 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1030 } 1067 }
1031 } 1068 }
1032 1069
1033 bio->bi_private = bmd;
1034 bio->bi_end_io = bio_copy_kern_endio; 1070 bio->bi_end_io = bio_copy_kern_endio;
1035 1071
1036 bio_set_map_data(bmd, bio, &iov, 1);
1037 return bio; 1072 return bio;
1038cleanup:
1039 bio_for_each_segment(bvec, bio, i)
1040 __free_page(bvec->bv_page);
1041
1042 bio_put(bio);
1043out_bmd:
1044 bio_free_map_data(bmd);
1045
1046 return ERR_PTR(ret);
1047} 1073}
1048 1074
1049/* 1075/*
@@ -1230,9 +1256,9 @@ static void bio_pair_end_2(struct bio *bi, int err)
1230 * split a bio - only worry about a bio with a single page 1256 * split a bio - only worry about a bio with a single page
1231 * in it's iovec 1257 * in it's iovec
1232 */ 1258 */
1233struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) 1259struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1234{ 1260{
1235 struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO); 1261 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
1236 1262
1237 if (!bp) 1263 if (!bp)
1238 return bp; 1264 return bp;
@@ -1266,7 +1292,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1266 bp->bio2.bi_end_io = bio_pair_end_2; 1292 bp->bio2.bi_end_io = bio_pair_end_2;
1267 1293
1268 bp->bio1.bi_private = bi; 1294 bp->bio1.bi_private = bi;
1269 bp->bio2.bi_private = pool; 1295 bp->bio2.bi_private = bio_split_pool;
1270 1296
1271 if (bio_integrity(bi)) 1297 if (bio_integrity(bi))
1272 bio_integrity_split(bi, bp, first_sectors); 1298 bio_integrity_split(bi, bp, first_sectors);
@@ -1274,6 +1300,42 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1274 return bp; 1300 return bp;
1275} 1301}
1276 1302
1303/**
1304 * bio_sector_offset - Find hardware sector offset in bio
1305 * @bio: bio to inspect
1306 * @index: bio_vec index
1307 * @offset: offset in bv_page
1308 *
1309 * Return the number of hardware sectors between beginning of bio
1310 * and an end point indicated by a bio_vec index and an offset
1311 * within that vector's page.
1312 */
1313sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1314 unsigned int offset)
1315{
1316 unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue);
1317 struct bio_vec *bv;
1318 sector_t sectors;
1319 int i;
1320
1321 sectors = 0;
1322
1323 if (index >= bio->bi_idx)
1324 index = bio->bi_vcnt - 1;
1325
1326 __bio_for_each_segment(bv, bio, i, 0) {
1327 if (i == index) {
1328 if (offset > bv->bv_offset)
1329 sectors += (offset - bv->bv_offset) / sector_sz;
1330 break;
1331 }
1332
1333 sectors += bv->bv_len / sector_sz;
1334 }
1335
1336 return sectors;
1337}
1338EXPORT_SYMBOL(bio_sector_offset);
1277 1339
1278/* 1340/*
1279 * create memory pools for biovec's in a bio_set. 1341 * create memory pools for biovec's in a bio_set.
@@ -1376,6 +1438,7 @@ static int __init init_bio(void)
1376subsys_initcall(init_bio); 1438subsys_initcall(init_bio);
1377 1439
1378EXPORT_SYMBOL(bio_alloc); 1440EXPORT_SYMBOL(bio_alloc);
1441EXPORT_SYMBOL(bio_kmalloc);
1379EXPORT_SYMBOL(bio_put); 1442EXPORT_SYMBOL(bio_put);
1380EXPORT_SYMBOL(bio_free); 1443EXPORT_SYMBOL(bio_free);
1381EXPORT_SYMBOL(bio_endio); 1444EXPORT_SYMBOL(bio_endio);
@@ -1383,7 +1446,6 @@ EXPORT_SYMBOL(bio_init);
1383EXPORT_SYMBOL(__bio_clone); 1446EXPORT_SYMBOL(__bio_clone);
1384EXPORT_SYMBOL(bio_clone); 1447EXPORT_SYMBOL(bio_clone);
1385EXPORT_SYMBOL(bio_phys_segments); 1448EXPORT_SYMBOL(bio_phys_segments);
1386EXPORT_SYMBOL(bio_hw_segments);
1387EXPORT_SYMBOL(bio_add_page); 1449EXPORT_SYMBOL(bio_add_page);
1388EXPORT_SYMBOL(bio_add_pc_page); 1450EXPORT_SYMBOL(bio_add_pc_page);
1389EXPORT_SYMBOL(bio_get_nr_vecs); 1451EXPORT_SYMBOL(bio_get_nr_vecs);
@@ -1393,7 +1455,6 @@ EXPORT_SYMBOL(bio_map_kern);
1393EXPORT_SYMBOL(bio_copy_kern); 1455EXPORT_SYMBOL(bio_copy_kern);
1394EXPORT_SYMBOL(bio_pair_release); 1456EXPORT_SYMBOL(bio_pair_release);
1395EXPORT_SYMBOL(bio_split); 1457EXPORT_SYMBOL(bio_split);
1396EXPORT_SYMBOL(bio_split_pool);
1397EXPORT_SYMBOL(bio_copy_user); 1458EXPORT_SYMBOL(bio_copy_user);
1398EXPORT_SYMBOL(bio_uncopy_user); 1459EXPORT_SYMBOL(bio_uncopy_user);
1399EXPORT_SYMBOL(bioset_create); 1460EXPORT_SYMBOL(bioset_create);