diff options
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 35 |
1 files changed, 10 insertions, 25 deletions
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #define BIO_POOL_SIZE 256 | 31 | #define BIO_POOL_SIZE 256 |
32 | 32 | ||
33 | static kmem_cache_t *bio_slab __read_mostly; | 33 | static struct kmem_cache *bio_slab __read_mostly; |
34 | 34 | ||
35 | #define BIOVEC_NR_POOLS 6 | 35 | #define BIOVEC_NR_POOLS 6 |
36 | 36 | ||
@@ -44,7 +44,7 @@ mempool_t *bio_split_pool __read_mostly; | |||
44 | struct biovec_slab { | 44 | struct biovec_slab { |
45 | int nr_vecs; | 45 | int nr_vecs; |
46 | char *name; | 46 | char *name; |
47 | kmem_cache_t *slab; | 47 | struct kmem_cache *slab; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | /* | 50 | /* |
@@ -560,10 +560,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, | |||
560 | break; | 560 | break; |
561 | } | 561 | } |
562 | 562 | ||
563 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { | 563 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) |
564 | ret = -EINVAL; | ||
565 | break; | 564 | break; |
566 | } | ||
567 | 565 | ||
568 | len -= bytes; | 566 | len -= bytes; |
569 | } | 567 | } |
@@ -622,10 +620,9 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, | |||
622 | 620 | ||
623 | nr_pages += end - start; | 621 | nr_pages += end - start; |
624 | /* | 622 | /* |
625 | * transfer and buffer must be aligned to at least hardsector | 623 | * buffer must be aligned to at least hardsector size for now |
626 | * size for now, in the future we can relax this restriction | ||
627 | */ | 624 | */ |
628 | if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) | 625 | if (uaddr & queue_dma_alignment(q)) |
629 | return ERR_PTR(-EINVAL); | 626 | return ERR_PTR(-EINVAL); |
630 | } | 627 | } |
631 | 628 | ||
@@ -751,7 +748,6 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, | |||
751 | int write_to_vm) | 748 | int write_to_vm) |
752 | { | 749 | { |
753 | struct bio *bio; | 750 | struct bio *bio; |
754 | int len = 0, i; | ||
755 | 751 | ||
756 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); | 752 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); |
757 | 753 | ||
@@ -766,18 +762,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, | |||
766 | */ | 762 | */ |
767 | bio_get(bio); | 763 | bio_get(bio); |
768 | 764 | ||
769 | for (i = 0; i < iov_count; i++) | 765 | return bio; |
770 | len += iov[i].iov_len; | ||
771 | |||
772 | if (bio->bi_size == len) | ||
773 | return bio; | ||
774 | |||
775 | /* | ||
776 | * don't support partial mappings | ||
777 | */ | ||
778 | bio_endio(bio, bio->bi_size, 0); | ||
779 | bio_unmap_user(bio); | ||
780 | return ERR_PTR(-EINVAL); | ||
781 | } | 766 | } |
782 | 767 | ||
783 | static void __bio_unmap_user(struct bio *bio) | 768 | static void __bio_unmap_user(struct bio *bio) |
@@ -931,7 +916,7 @@ void bio_set_pages_dirty(struct bio *bio) | |||
931 | } | 916 | } |
932 | } | 917 | } |
933 | 918 | ||
934 | static void bio_release_pages(struct bio *bio) | 919 | void bio_release_pages(struct bio *bio) |
935 | { | 920 | { |
936 | struct bio_vec *bvec = bio->bi_io_vec; | 921 | struct bio_vec *bvec = bio->bi_io_vec; |
937 | int i; | 922 | int i; |
@@ -955,16 +940,16 @@ static void bio_release_pages(struct bio *bio) | |||
955 | * run one bio_put() against the BIO. | 940 | * run one bio_put() against the BIO. |
956 | */ | 941 | */ |
957 | 942 | ||
958 | static void bio_dirty_fn(void *data); | 943 | static void bio_dirty_fn(struct work_struct *work); |
959 | 944 | ||
960 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); | 945 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); |
961 | static DEFINE_SPINLOCK(bio_dirty_lock); | 946 | static DEFINE_SPINLOCK(bio_dirty_lock); |
962 | static struct bio *bio_dirty_list; | 947 | static struct bio *bio_dirty_list; |
963 | 948 | ||
964 | /* | 949 | /* |
965 | * This runs in process context | 950 | * This runs in process context |
966 | */ | 951 | */ |
967 | static void bio_dirty_fn(void *data) | 952 | static void bio_dirty_fn(struct work_struct *work) |
968 | { | 953 | { |
969 | unsigned long flags; | 954 | unsigned long flags; |
970 | struct bio *bio; | 955 | struct bio *bio; |