aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-08-28 03:17:06 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:10 -0400
commit152e283fdfea0cd11e297d982378b55937842dde (patch)
treea97a57108353f167a1e2911e8ee09c527ef42d3e /fs
parenta3bce90edd8f6cafe3f63b1a943800792e830178 (diff)
block: introduce struct rq_map_data to use reserved pages
This patch introduces struct rq_map_data to enable bio_copy_use_iov() use reserved pages. Currently, bio_copy_user_iov allocates bounce pages but drivers/scsi/sg.c wants to allocate pages by itself and use them. struct rq_map_data can be used to pass allocated pages to bio_copy_user_iov. The current users of bio_copy_user_iov simply passes NULL (they don't want to use pre-allocated pages). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Douglas Gilbert <dougg@torque.net> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c58
1 files changed, 41 insertions, 17 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 3d2e9ad24728..a2f072647cdf 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -439,16 +439,19 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
439 439
440struct bio_map_data { 440struct bio_map_data {
441 struct bio_vec *iovecs; 441 struct bio_vec *iovecs;
442 int nr_sgvecs;
443 struct sg_iovec *sgvecs; 442 struct sg_iovec *sgvecs;
443 int nr_sgvecs;
444 int is_our_pages;
444}; 445};
445 446
446static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, 447static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
447 struct sg_iovec *iov, int iov_count) 448 struct sg_iovec *iov, int iov_count,
449 int is_our_pages)
448{ 450{
449 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); 451 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
450 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); 452 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
451 bmd->nr_sgvecs = iov_count; 453 bmd->nr_sgvecs = iov_count;
454 bmd->is_our_pages = is_our_pages;
452 bio->bi_private = bmd; 455 bio->bi_private = bmd;
453} 456}
454 457
@@ -483,7 +486,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
483} 486}
484 487
485static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, 488static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
486 struct sg_iovec *iov, int iov_count, int uncopy) 489 struct sg_iovec *iov, int iov_count, int uncopy,
490 int do_free_page)
487{ 491{
488 int ret = 0, i; 492 int ret = 0, i;
489 struct bio_vec *bvec; 493 struct bio_vec *bvec;
@@ -526,7 +530,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
526 } 530 }
527 } 531 }
528 532
529 if (uncopy) 533 if (do_free_page)
530 __free_page(bvec->bv_page); 534 __free_page(bvec->bv_page);
531 } 535 }
532 536
@@ -545,7 +549,8 @@ int bio_uncopy_user(struct bio *bio)
545 struct bio_map_data *bmd = bio->bi_private; 549 struct bio_map_data *bmd = bio->bi_private;
546 int ret; 550 int ret;
547 551
548 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1); 552 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1,
553 bmd->is_our_pages);
549 554
550 bio_free_map_data(bmd); 555 bio_free_map_data(bmd);
551 bio_put(bio); 556 bio_put(bio);
@@ -555,6 +560,7 @@ int bio_uncopy_user(struct bio *bio)
555/** 560/**
556 * bio_copy_user_iov - copy user data to bio 561 * bio_copy_user_iov - copy user data to bio
557 * @q: destination block queue 562 * @q: destination block queue
563 * @map_data: pointer to the rq_map_data holding pages (if necessary)
558 * @iov: the iovec. 564 * @iov: the iovec.
559 * @iov_count: number of elements in the iovec 565 * @iov_count: number of elements in the iovec
560 * @write_to_vm: bool indicating writing to pages or not 566 * @write_to_vm: bool indicating writing to pages or not
@@ -564,8 +570,10 @@ int bio_uncopy_user(struct bio *bio)
564 * to/from kernel pages as necessary. Must be paired with 570 * to/from kernel pages as necessary. Must be paired with
565 * call bio_uncopy_user() on io completion. 571 * call bio_uncopy_user() on io completion.
566 */ 572 */
567struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, 573struct bio *bio_copy_user_iov(struct request_queue *q,
568 int iov_count, int write_to_vm, gfp_t gfp_mask) 574 struct rq_map_data *map_data,
575 struct sg_iovec *iov, int iov_count,
576 int write_to_vm, gfp_t gfp_mask)
569{ 577{
570 struct bio_map_data *bmd; 578 struct bio_map_data *bmd;
571 struct bio_vec *bvec; 579 struct bio_vec *bvec;
@@ -600,13 +608,26 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
600 bio->bi_rw |= (!write_to_vm << BIO_RW); 608 bio->bi_rw |= (!write_to_vm << BIO_RW);
601 609
602 ret = 0; 610 ret = 0;
611 i = 0;
603 while (len) { 612 while (len) {
604 unsigned int bytes = PAGE_SIZE; 613 unsigned int bytes;
614
615 if (map_data)
616 bytes = 1U << (PAGE_SHIFT + map_data->page_order);
617 else
618 bytes = PAGE_SIZE;
605 619
606 if (bytes > len) 620 if (bytes > len)
607 bytes = len; 621 bytes = len;
608 622
609 page = alloc_page(q->bounce_gfp | gfp_mask); 623 if (map_data) {
624 if (i == map_data->nr_entries) {
625 ret = -ENOMEM;
626 break;
627 }
628 page = map_data->pages[i++];
629 } else
630 page = alloc_page(q->bounce_gfp | gfp_mask);
610 if (!page) { 631 if (!page) {
611 ret = -ENOMEM; 632 ret = -ENOMEM;
612 break; 633 break;
@@ -625,16 +646,17 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
625 * success 646 * success
626 */ 647 */
627 if (!write_to_vm) { 648 if (!write_to_vm) {
628 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0); 649 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
629 if (ret) 650 if (ret)
630 goto cleanup; 651 goto cleanup;
631 } 652 }
632 653
633 bio_set_map_data(bmd, bio, iov, iov_count); 654 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
634 return bio; 655 return bio;
635cleanup: 656cleanup:
636 bio_for_each_segment(bvec, bio, i) 657 if (!map_data)
637 __free_page(bvec->bv_page); 658 bio_for_each_segment(bvec, bio, i)
659 __free_page(bvec->bv_page);
638 660
639 bio_put(bio); 661 bio_put(bio);
640out_bmd: 662out_bmd:
@@ -645,6 +667,7 @@ out_bmd:
645/** 667/**
646 * bio_copy_user - copy user data to bio 668 * bio_copy_user - copy user data to bio
647 * @q: destination block queue 669 * @q: destination block queue
670 * @map_data: pointer to the rq_map_data holding pages (if necessary)
648 * @uaddr: start of user address 671 * @uaddr: start of user address
649 * @len: length in bytes 672 * @len: length in bytes
650 * @write_to_vm: bool indicating writing to pages or not 673 * @write_to_vm: bool indicating writing to pages or not
@@ -654,15 +677,16 @@ out_bmd:
654 * to/from kernel pages as necessary. Must be paired with 677 * to/from kernel pages as necessary. Must be paired with
655 * call bio_uncopy_user() on io completion. 678 * call bio_uncopy_user() on io completion.
656 */ 679 */
657struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, 680struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
658 unsigned int len, int write_to_vm, gfp_t gfp_mask) 681 unsigned long uaddr, unsigned int len,
682 int write_to_vm, gfp_t gfp_mask)
659{ 683{
660 struct sg_iovec iov; 684 struct sg_iovec iov;
661 685
662 iov.iov_base = (void __user *)uaddr; 686 iov.iov_base = (void __user *)uaddr;
663 iov.iov_len = len; 687 iov.iov_len = len;
664 688
665 return bio_copy_user_iov(q, &iov, 1, write_to_vm, gfp_mask); 689 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
666} 690}
667 691
668static struct bio *__bio_map_user_iov(struct request_queue *q, 692static struct bio *__bio_map_user_iov(struct request_queue *q,
@@ -1028,7 +1052,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1028 bio->bi_private = bmd; 1052 bio->bi_private = bmd;
1029 bio->bi_end_io = bio_copy_kern_endio; 1053 bio->bi_end_io = bio_copy_kern_endio;
1030 1054
1031 bio_set_map_data(bmd, bio, &iov, 1); 1055 bio_set_map_data(bmd, bio, &iov, 1, 1);
1032 return bio; 1056 return bio;
1033cleanup: 1057cleanup:
1034 bio_for_each_segment(bvec, bio, i) 1058 bio_for_each_segment(bvec, bio, i)