aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 20:31:27 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 20:31:27 -0400
commit0481990b758628e12f4b0a9e15094e70cefc7cd1 (patch)
tree67a4b4b7acc6a688b87ef2a2d3ec0e296e6e480c /fs
parentdb400b3c4ee89d384d9163836a55577abdae772d (diff)
parent17fa53da1239b8712c5cebbd72a74c713b6c2db9 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c227
1 files changed, 182 insertions, 45 deletions
diff --git a/fs/bio.c b/fs/bio.c
index bf3ec9d2b54c..a7d4fd3a3299 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mempool.h> 26#include <linux/mempool.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <scsi/sg.h> /* for struct sg_iovec */
28 29
29#define BIO_POOL_SIZE 256 30#define BIO_POOL_SIZE 256
30 31
@@ -555,22 +556,34 @@ out_bmd:
555 return ERR_PTR(ret); 556 return ERR_PTR(ret);
556} 557}
557 558
558static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, 559static struct bio *__bio_map_user_iov(request_queue_t *q,
559 unsigned long uaddr, unsigned int len, 560 struct block_device *bdev,
560 int write_to_vm) 561 struct sg_iovec *iov, int iov_count,
562 int write_to_vm)
561{ 563{
562 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 564 int i, j;
563 unsigned long start = uaddr >> PAGE_SHIFT; 565 int nr_pages = 0;
564 const int nr_pages = end - start;
565 int ret, offset, i;
566 struct page **pages; 566 struct page **pages;
567 struct bio *bio; 567 struct bio *bio;
568 int cur_page = 0;
569 int ret, offset;
568 570
569 /* 571 for (i = 0; i < iov_count; i++) {
570 * transfer and buffer must be aligned to at least hardsector 572 unsigned long uaddr = (unsigned long)iov[i].iov_base;
571 * size for now, in the future we can relax this restriction 573 unsigned long len = iov[i].iov_len;
572 */ 574 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
573 if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) 575 unsigned long start = uaddr >> PAGE_SHIFT;
576
577 nr_pages += end - start;
578 /*
579 * transfer and buffer must be aligned to at least hardsector
580 * size for now, in the future we can relax this restriction
581 */
582 if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
583 return ERR_PTR(-EINVAL);
584 }
585
586 if (!nr_pages)
574 return ERR_PTR(-EINVAL); 587 return ERR_PTR(-EINVAL);
575 588
576 bio = bio_alloc(GFP_KERNEL, nr_pages); 589 bio = bio_alloc(GFP_KERNEL, nr_pages);
@@ -582,42 +595,54 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
582 if (!pages) 595 if (!pages)
583 goto out; 596 goto out;
584 597
585 down_read(&current->mm->mmap_sem); 598 memset(pages, 0, nr_pages * sizeof(struct page *));
586 ret = get_user_pages(current, current->mm, uaddr, nr_pages, 599
587 write_to_vm, 0, pages, NULL); 600 for (i = 0; i < iov_count; i++) {
588 up_read(&current->mm->mmap_sem); 601 unsigned long uaddr = (unsigned long)iov[i].iov_base;
589 602 unsigned long len = iov[i].iov_len;
590 if (ret < nr_pages) 603 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
591 goto out; 604 unsigned long start = uaddr >> PAGE_SHIFT;
592 605 const int local_nr_pages = end - start;
593 bio->bi_bdev = bdev; 606 const int page_limit = cur_page + local_nr_pages;
594 607
595 offset = uaddr & ~PAGE_MASK; 608 down_read(&current->mm->mmap_sem);
596 for (i = 0; i < nr_pages; i++) { 609 ret = get_user_pages(current, current->mm, uaddr,
597 unsigned int bytes = PAGE_SIZE - offset; 610 local_nr_pages,
598 611 write_to_vm, 0, &pages[cur_page], NULL);
599 if (len <= 0) 612 up_read(&current->mm->mmap_sem);
600 break; 613
601 614 if (ret < local_nr_pages)
602 if (bytes > len) 615 goto out_unmap;
603 bytes = len; 616
617
618 offset = uaddr & ~PAGE_MASK;
619 for (j = cur_page; j < page_limit; j++) {
620 unsigned int bytes = PAGE_SIZE - offset;
621
622 if (len <= 0)
623 break;
624
625 if (bytes > len)
626 bytes = len;
627
628 /*
629 * sorry...
630 */
631 if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
632 break;
633
634 len -= bytes;
635 offset = 0;
636 }
604 637
638 cur_page = j;
605 /* 639 /*
606 * sorry... 640 * release the pages we didn't map into the bio, if any
607 */ 641 */
608 if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes) 642 while (j < page_limit)
609 break; 643 page_cache_release(pages[j++]);
610
611 len -= bytes;
612 offset = 0;
613 } 644 }
614 645
615 /*
616 * release the pages we didn't map into the bio, if any
617 */
618 while (i < nr_pages)
619 page_cache_release(pages[i++]);
620
621 kfree(pages); 646 kfree(pages);
622 647
623 /* 648 /*
@@ -626,9 +651,17 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
626 if (!write_to_vm) 651 if (!write_to_vm)
627 bio->bi_rw |= (1 << BIO_RW); 652 bio->bi_rw |= (1 << BIO_RW);
628 653
654 bio->bi_bdev = bdev;
629 bio->bi_flags |= (1 << BIO_USER_MAPPED); 655 bio->bi_flags |= (1 << BIO_USER_MAPPED);
630 return bio; 656 return bio;
631out: 657
658 out_unmap:
659 for (i = 0; i < nr_pages; i++) {
660 if(!pages[i])
661 break;
662 page_cache_release(pages[i]);
663 }
664 out:
632 kfree(pages); 665 kfree(pages);
633 bio_put(bio); 666 bio_put(bio);
634 return ERR_PTR(ret); 667 return ERR_PTR(ret);
@@ -648,9 +681,33 @@ out:
648struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, 681struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
649 unsigned long uaddr, unsigned int len, int write_to_vm) 682 unsigned long uaddr, unsigned int len, int write_to_vm)
650{ 683{
684 struct sg_iovec iov;
685
686 iov.iov_base = (__user void *)uaddr;
687 iov.iov_len = len;
688
689 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);
690}
691
692/**
693 * bio_map_user_iov - map user sg_iovec table into bio
694 * @q: the request_queue_t for the bio
695 * @bdev: destination block device
696 * @iov: the iovec.
697 * @iov_count: number of elements in the iovec
698 * @write_to_vm: bool indicating writing to pages or not
699 *
700 * Map the user space address into a bio suitable for io to a block
701 * device. Returns an error pointer in case of error.
702 */
703struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
704 struct sg_iovec *iov, int iov_count,
705 int write_to_vm)
706{
651 struct bio *bio; 707 struct bio *bio;
708 int len = 0, i;
652 709
653 bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm); 710 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
654 711
655 if (IS_ERR(bio)) 712 if (IS_ERR(bio))
656 return bio; 713 return bio;
@@ -663,6 +720,9 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
663 */ 720 */
664 bio_get(bio); 721 bio_get(bio);
665 722
723 for (i = 0; i < iov_count; i++)
724 len += iov[i].iov_len;
725
666 if (bio->bi_size == len) 726 if (bio->bi_size == len)
667 return bio; 727 return bio;
668 728
@@ -707,6 +767,82 @@ void bio_unmap_user(struct bio *bio)
707 bio_put(bio); 767 bio_put(bio);
708} 768}
709 769
770static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
771{
772 if (bio->bi_size)
773 return 1;
774
775 bio_put(bio);
776 return 0;
777}
778
779
780static struct bio *__bio_map_kern(request_queue_t *q, void *data,
781 unsigned int len, unsigned int gfp_mask)
782{
783 unsigned long kaddr = (unsigned long)data;
784 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
785 unsigned long start = kaddr >> PAGE_SHIFT;
786 const int nr_pages = end - start;
787 int offset, i;
788 struct bio *bio;
789
790 bio = bio_alloc(gfp_mask, nr_pages);
791 if (!bio)
792 return ERR_PTR(-ENOMEM);
793
794 offset = offset_in_page(kaddr);
795 for (i = 0; i < nr_pages; i++) {
796 unsigned int bytes = PAGE_SIZE - offset;
797
798 if (len <= 0)
799 break;
800
801 if (bytes > len)
802 bytes = len;
803
804 if (__bio_add_page(q, bio, virt_to_page(data), bytes,
805 offset) < bytes)
806 break;
807
808 data += bytes;
809 len -= bytes;
810 offset = 0;
811 }
812
813 bio->bi_end_io = bio_map_kern_endio;
814 return bio;
815}
816
817/**
818 * bio_map_kern - map kernel address into bio
819 * @q: the request_queue_t for the bio
820 * @data: pointer to buffer to map
821 * @len: length in bytes
822 * @gfp_mask: allocation flags for bio allocation
823 *
824 * Map the kernel address into a bio suitable for io to a block
825 * device. Returns an error pointer in case of error.
826 */
827struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
828 unsigned int gfp_mask)
829{
830 struct bio *bio;
831
832 bio = __bio_map_kern(q, data, len, gfp_mask);
833 if (IS_ERR(bio))
834 return bio;
835
836 if (bio->bi_size == len)
837 return bio;
838
839 /*
840 * Don't support partial mappings.
841 */
842 bio_put(bio);
843 return ERR_PTR(-EINVAL);
844}
845
710/* 846/*
711 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 847 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
712 * for performing direct-IO in BIOs. 848 * for performing direct-IO in BIOs.
@@ -1095,6 +1231,7 @@ EXPORT_SYMBOL(bio_add_page);
1095EXPORT_SYMBOL(bio_get_nr_vecs); 1231EXPORT_SYMBOL(bio_get_nr_vecs);
1096EXPORT_SYMBOL(bio_map_user); 1232EXPORT_SYMBOL(bio_map_user);
1097EXPORT_SYMBOL(bio_unmap_user); 1233EXPORT_SYMBOL(bio_unmap_user);
1234EXPORT_SYMBOL(bio_map_kern);
1098EXPORT_SYMBOL(bio_pair_release); 1235EXPORT_SYMBOL(bio_pair_release);
1099EXPORT_SYMBOL(bio_split); 1236EXPORT_SYMBOL(bio_split);
1100EXPORT_SYMBOL(bio_split_pool); 1237EXPORT_SYMBOL(bio_split_pool);