diff options
author | James Bottomley <jejb@titanic.(none)> | 2005-08-28 11:43:07 -0400 |
---|---|---|
committer | James Bottomley <jejb@titanic.(none)> | 2005-08-28 11:43:07 -0400 |
commit | 31151ba2cef171344beac254e65bd7e00138bb0d (patch) | |
tree | a8f9cd3c0395656d974125c0ca7ed635aacddeee /fs/bio.c | |
parent | 3d52acb34247816c453f94596e6c7fc4499b76dc (diff) | |
parent | 73747aed04d3b3fb694961d025f81863b99c6898 (diff) |
fix mismerge in ll_rw_blk.c
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 227 |
1 files changed, 182 insertions, 45 deletions
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/mempool.h> | 26 | #include <linux/mempool.h> |
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <scsi/sg.h> /* for struct sg_iovec */ | ||
28 | 29 | ||
29 | #define BIO_POOL_SIZE 256 | 30 | #define BIO_POOL_SIZE 256 |
30 | 31 | ||
@@ -546,22 +547,34 @@ out_bmd: | |||
546 | return ERR_PTR(ret); | 547 | return ERR_PTR(ret); |
547 | } | 548 | } |
548 | 549 | ||
549 | static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, | 550 | static struct bio *__bio_map_user_iov(request_queue_t *q, |
550 | unsigned long uaddr, unsigned int len, | 551 | struct block_device *bdev, |
551 | int write_to_vm) | 552 | struct sg_iovec *iov, int iov_count, |
553 | int write_to_vm) | ||
552 | { | 554 | { |
553 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 555 | int i, j; |
554 | unsigned long start = uaddr >> PAGE_SHIFT; | 556 | int nr_pages = 0; |
555 | const int nr_pages = end - start; | ||
556 | int ret, offset, i; | ||
557 | struct page **pages; | 557 | struct page **pages; |
558 | struct bio *bio; | 558 | struct bio *bio; |
559 | int cur_page = 0; | ||
560 | int ret, offset; | ||
559 | 561 | ||
560 | /* | 562 | for (i = 0; i < iov_count; i++) { |
561 | * transfer and buffer must be aligned to at least hardsector | 563 | unsigned long uaddr = (unsigned long)iov[i].iov_base; |
562 | * size for now, in the future we can relax this restriction | 564 | unsigned long len = iov[i].iov_len; |
563 | */ | 565 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
564 | if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) | 566 | unsigned long start = uaddr >> PAGE_SHIFT; |
567 | |||
568 | nr_pages += end - start; | ||
569 | /* | ||
570 | * transfer and buffer must be aligned to at least hardsector | ||
571 | * size for now, in the future we can relax this restriction | ||
572 | */ | ||
573 | if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) | ||
574 | return ERR_PTR(-EINVAL); | ||
575 | } | ||
576 | |||
577 | if (!nr_pages) | ||
565 | return ERR_PTR(-EINVAL); | 578 | return ERR_PTR(-EINVAL); |
566 | 579 | ||
567 | bio = bio_alloc(GFP_KERNEL, nr_pages); | 580 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
@@ -573,42 +586,54 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, | |||
573 | if (!pages) | 586 | if (!pages) |
574 | goto out; | 587 | goto out; |
575 | 588 | ||
576 | down_read(¤t->mm->mmap_sem); | 589 | memset(pages, 0, nr_pages * sizeof(struct page *)); |
577 | ret = get_user_pages(current, current->mm, uaddr, nr_pages, | 590 | |
578 | write_to_vm, 0, pages, NULL); | 591 | for (i = 0; i < iov_count; i++) { |
579 | up_read(¤t->mm->mmap_sem); | 592 | unsigned long uaddr = (unsigned long)iov[i].iov_base; |
580 | 593 | unsigned long len = iov[i].iov_len; | |
581 | if (ret < nr_pages) | 594 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
582 | goto out; | 595 | unsigned long start = uaddr >> PAGE_SHIFT; |
583 | 596 | const int local_nr_pages = end - start; | |
584 | bio->bi_bdev = bdev; | 597 | const int page_limit = cur_page + local_nr_pages; |
585 | 598 | ||
586 | offset = uaddr & ~PAGE_MASK; | 599 | down_read(¤t->mm->mmap_sem); |
587 | for (i = 0; i < nr_pages; i++) { | 600 | ret = get_user_pages(current, current->mm, uaddr, |
588 | unsigned int bytes = PAGE_SIZE - offset; | 601 | local_nr_pages, |
589 | 602 | write_to_vm, 0, &pages[cur_page], NULL); | |
590 | if (len <= 0) | 603 | up_read(¤t->mm->mmap_sem); |
591 | break; | 604 | |
592 | 605 | if (ret < local_nr_pages) | |
593 | if (bytes > len) | 606 | goto out_unmap; |
594 | bytes = len; | 607 | |
608 | |||
609 | offset = uaddr & ~PAGE_MASK; | ||
610 | for (j = cur_page; j < page_limit; j++) { | ||
611 | unsigned int bytes = PAGE_SIZE - offset; | ||
612 | |||
613 | if (len <= 0) | ||
614 | break; | ||
615 | |||
616 | if (bytes > len) | ||
617 | bytes = len; | ||
618 | |||
619 | /* | ||
620 | * sorry... | ||
621 | */ | ||
622 | if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes) | ||
623 | break; | ||
624 | |||
625 | len -= bytes; | ||
626 | offset = 0; | ||
627 | } | ||
595 | 628 | ||
629 | cur_page = j; | ||
596 | /* | 630 | /* |
597 | * sorry... | 631 | * release the pages we didn't map into the bio, if any |
598 | */ | 632 | */ |
599 | if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes) | 633 | while (j < page_limit) |
600 | break; | 634 | page_cache_release(pages[j++]); |
601 | |||
602 | len -= bytes; | ||
603 | offset = 0; | ||
604 | } | 635 | } |
605 | 636 | ||
606 | /* | ||
607 | * release the pages we didn't map into the bio, if any | ||
608 | */ | ||
609 | while (i < nr_pages) | ||
610 | page_cache_release(pages[i++]); | ||
611 | |||
612 | kfree(pages); | 637 | kfree(pages); |
613 | 638 | ||
614 | /* | 639 | /* |
@@ -617,9 +642,17 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, | |||
617 | if (!write_to_vm) | 642 | if (!write_to_vm) |
618 | bio->bi_rw |= (1 << BIO_RW); | 643 | bio->bi_rw |= (1 << BIO_RW); |
619 | 644 | ||
645 | bio->bi_bdev = bdev; | ||
620 | bio->bi_flags |= (1 << BIO_USER_MAPPED); | 646 | bio->bi_flags |= (1 << BIO_USER_MAPPED); |
621 | return bio; | 647 | return bio; |
622 | out: | 648 | |
649 | out_unmap: | ||
650 | for (i = 0; i < nr_pages; i++) { | ||
651 | if(!pages[i]) | ||
652 | break; | ||
653 | page_cache_release(pages[i]); | ||
654 | } | ||
655 | out: | ||
623 | kfree(pages); | 656 | kfree(pages); |
624 | bio_put(bio); | 657 | bio_put(bio); |
625 | return ERR_PTR(ret); | 658 | return ERR_PTR(ret); |
@@ -639,9 +672,33 @@ out: | |||
639 | struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, | 672 | struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, |
640 | unsigned long uaddr, unsigned int len, int write_to_vm) | 673 | unsigned long uaddr, unsigned int len, int write_to_vm) |
641 | { | 674 | { |
675 | struct sg_iovec iov; | ||
676 | |||
677 | iov.iov_base = (__user void *)uaddr; | ||
678 | iov.iov_len = len; | ||
679 | |||
680 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); | ||
681 | } | ||
682 | |||
683 | /** | ||
684 | * bio_map_user_iov - map user sg_iovec table into bio | ||
685 | * @q: the request_queue_t for the bio | ||
686 | * @bdev: destination block device | ||
687 | * @iov: the iovec. | ||
688 | * @iov_count: number of elements in the iovec | ||
689 | * @write_to_vm: bool indicating writing to pages or not | ||
690 | * | ||
691 | * Map the user space address into a bio suitable for io to a block | ||
692 | * device. Returns an error pointer in case of error. | ||
693 | */ | ||
694 | struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, | ||
695 | struct sg_iovec *iov, int iov_count, | ||
696 | int write_to_vm) | ||
697 | { | ||
642 | struct bio *bio; | 698 | struct bio *bio; |
699 | int len = 0, i; | ||
643 | 700 | ||
644 | bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm); | 701 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); |
645 | 702 | ||
646 | if (IS_ERR(bio)) | 703 | if (IS_ERR(bio)) |
647 | return bio; | 704 | return bio; |
@@ -654,6 +711,9 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, | |||
654 | */ | 711 | */ |
655 | bio_get(bio); | 712 | bio_get(bio); |
656 | 713 | ||
714 | for (i = 0; i < iov_count; i++) | ||
715 | len += iov[i].iov_len; | ||
716 | |||
657 | if (bio->bi_size == len) | 717 | if (bio->bi_size == len) |
658 | return bio; | 718 | return bio; |
659 | 719 | ||
@@ -698,6 +758,82 @@ void bio_unmap_user(struct bio *bio) | |||
698 | bio_put(bio); | 758 | bio_put(bio); |
699 | } | 759 | } |
700 | 760 | ||
761 | static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) | ||
762 | { | ||
763 | if (bio->bi_size) | ||
764 | return 1; | ||
765 | |||
766 | bio_put(bio); | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | |||
771 | static struct bio *__bio_map_kern(request_queue_t *q, void *data, | ||
772 | unsigned int len, unsigned int gfp_mask) | ||
773 | { | ||
774 | unsigned long kaddr = (unsigned long)data; | ||
775 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
776 | unsigned long start = kaddr >> PAGE_SHIFT; | ||
777 | const int nr_pages = end - start; | ||
778 | int offset, i; | ||
779 | struct bio *bio; | ||
780 | |||
781 | bio = bio_alloc(gfp_mask, nr_pages); | ||
782 | if (!bio) | ||
783 | return ERR_PTR(-ENOMEM); | ||
784 | |||
785 | offset = offset_in_page(kaddr); | ||
786 | for (i = 0; i < nr_pages; i++) { | ||
787 | unsigned int bytes = PAGE_SIZE - offset; | ||
788 | |||
789 | if (len <= 0) | ||
790 | break; | ||
791 | |||
792 | if (bytes > len) | ||
793 | bytes = len; | ||
794 | |||
795 | if (__bio_add_page(q, bio, virt_to_page(data), bytes, | ||
796 | offset) < bytes) | ||
797 | break; | ||
798 | |||
799 | data += bytes; | ||
800 | len -= bytes; | ||
801 | offset = 0; | ||
802 | } | ||
803 | |||
804 | bio->bi_end_io = bio_map_kern_endio; | ||
805 | return bio; | ||
806 | } | ||
807 | |||
808 | /** | ||
809 | * bio_map_kern - map kernel address into bio | ||
810 | * @q: the request_queue_t for the bio | ||
811 | * @data: pointer to buffer to map | ||
812 | * @len: length in bytes | ||
813 | * @gfp_mask: allocation flags for bio allocation | ||
814 | * | ||
815 | * Map the kernel address into a bio suitable for io to a block | ||
816 | * device. Returns an error pointer in case of error. | ||
817 | */ | ||
818 | struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, | ||
819 | unsigned int gfp_mask) | ||
820 | { | ||
821 | struct bio *bio; | ||
822 | |||
823 | bio = __bio_map_kern(q, data, len, gfp_mask); | ||
824 | if (IS_ERR(bio)) | ||
825 | return bio; | ||
826 | |||
827 | if (bio->bi_size == len) | ||
828 | return bio; | ||
829 | |||
830 | /* | ||
831 | * Don't support partial mappings. | ||
832 | */ | ||
833 | bio_put(bio); | ||
834 | return ERR_PTR(-EINVAL); | ||
835 | } | ||
836 | |||
701 | /* | 837 | /* |
702 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | 838 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions |
703 | * for performing direct-IO in BIOs. | 839 | * for performing direct-IO in BIOs. |
@@ -1085,6 +1221,7 @@ EXPORT_SYMBOL(bio_add_page); | |||
1085 | EXPORT_SYMBOL(bio_get_nr_vecs); | 1221 | EXPORT_SYMBOL(bio_get_nr_vecs); |
1086 | EXPORT_SYMBOL(bio_map_user); | 1222 | EXPORT_SYMBOL(bio_map_user); |
1087 | EXPORT_SYMBOL(bio_unmap_user); | 1223 | EXPORT_SYMBOL(bio_unmap_user); |
1224 | EXPORT_SYMBOL(bio_map_kern); | ||
1088 | EXPORT_SYMBOL(bio_pair_release); | 1225 | EXPORT_SYMBOL(bio_pair_release); |
1089 | EXPORT_SYMBOL(bio_split); | 1226 | EXPORT_SYMBOL(bio_split); |
1090 | EXPORT_SYMBOL(bio_split_pool); | 1227 | EXPORT_SYMBOL(bio_split_pool); |