diff options
author | Mike Christie <mchristi@redhat.com> | 2016-06-05 15:31:54 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-06-07 15:41:38 -0400 |
commit | 81a75f6781deb7a3b5274b4c683e327e5cb5b883 (patch) | |
tree | c1e479b4a6621782209966d0402884ff1c82a85a /fs/btrfs/disk-io.c | |
parent | b3d3fa51990599732571bf1a6b1509f7ee887865 (diff) |
btrfs: use bio fields for op and flags
The bio REQ_OP and bi_rw rq_flag_bits are now always setup, so there is
no need to pass around the rq_flag_bits bits too. btrfs users should
should access the bio insead.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r-- | fs/btrfs/disk-io.c | 30 |
1 files changed, 12 insertions, 18 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 93278c2f6af2..e80ef6eb17e6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -124,7 +124,6 @@ struct async_submit_bio { | |||
124 | struct list_head list; | 124 | struct list_head list; |
125 | extent_submit_bio_hook_t *submit_bio_start; | 125 | extent_submit_bio_hook_t *submit_bio_start; |
126 | extent_submit_bio_hook_t *submit_bio_done; | 126 | extent_submit_bio_hook_t *submit_bio_done; |
127 | int rw; | ||
128 | int mirror_num; | 127 | int mirror_num; |
129 | unsigned long bio_flags; | 128 | unsigned long bio_flags; |
130 | /* | 129 | /* |
@@ -797,7 +796,7 @@ static void run_one_async_start(struct btrfs_work *work) | |||
797 | int ret; | 796 | int ret; |
798 | 797 | ||
799 | async = container_of(work, struct async_submit_bio, work); | 798 | async = container_of(work, struct async_submit_bio, work); |
800 | ret = async->submit_bio_start(async->inode, async->rw, async->bio, | 799 | ret = async->submit_bio_start(async->inode, async->bio, |
801 | async->mirror_num, async->bio_flags, | 800 | async->mirror_num, async->bio_flags, |
802 | async->bio_offset); | 801 | async->bio_offset); |
803 | if (ret) | 802 | if (ret) |
@@ -830,9 +829,8 @@ static void run_one_async_done(struct btrfs_work *work) | |||
830 | return; | 829 | return; |
831 | } | 830 | } |
832 | 831 | ||
833 | async->submit_bio_done(async->inode, async->rw, async->bio, | 832 | async->submit_bio_done(async->inode, async->bio, async->mirror_num, |
834 | async->mirror_num, async->bio_flags, | 833 | async->bio_flags, async->bio_offset); |
835 | async->bio_offset); | ||
836 | } | 834 | } |
837 | 835 | ||
838 | static void run_one_async_free(struct btrfs_work *work) | 836 | static void run_one_async_free(struct btrfs_work *work) |
@@ -844,7 +842,7 @@ static void run_one_async_free(struct btrfs_work *work) | |||
844 | } | 842 | } |
845 | 843 | ||
846 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | 844 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, |
847 | int rw, struct bio *bio, int mirror_num, | 845 | struct bio *bio, int mirror_num, |
848 | unsigned long bio_flags, | 846 | unsigned long bio_flags, |
849 | u64 bio_offset, | 847 | u64 bio_offset, |
850 | extent_submit_bio_hook_t *submit_bio_start, | 848 | extent_submit_bio_hook_t *submit_bio_start, |
@@ -857,7 +855,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
857 | return -ENOMEM; | 855 | return -ENOMEM; |
858 | 856 | ||
859 | async->inode = inode; | 857 | async->inode = inode; |
860 | async->rw = rw; | ||
861 | async->bio = bio; | 858 | async->bio = bio; |
862 | async->mirror_num = mirror_num; | 859 | async->mirror_num = mirror_num; |
863 | async->submit_bio_start = submit_bio_start; | 860 | async->submit_bio_start = submit_bio_start; |
@@ -903,9 +900,8 @@ static int btree_csum_one_bio(struct bio *bio) | |||
903 | return ret; | 900 | return ret; |
904 | } | 901 | } |
905 | 902 | ||
906 | static int __btree_submit_bio_start(struct inode *inode, int rw, | 903 | static int __btree_submit_bio_start(struct inode *inode, struct bio *bio, |
907 | struct bio *bio, int mirror_num, | 904 | int mirror_num, unsigned long bio_flags, |
908 | unsigned long bio_flags, | ||
909 | u64 bio_offset) | 905 | u64 bio_offset) |
910 | { | 906 | { |
911 | /* | 907 | /* |
@@ -915,7 +911,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw, | |||
915 | return btree_csum_one_bio(bio); | 911 | return btree_csum_one_bio(bio); |
916 | } | 912 | } |
917 | 913 | ||
918 | static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | 914 | static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, |
919 | int mirror_num, unsigned long bio_flags, | 915 | int mirror_num, unsigned long bio_flags, |
920 | u64 bio_offset) | 916 | u64 bio_offset) |
921 | { | 917 | { |
@@ -925,7 +921,7 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | |||
925 | * when we're called for a write, we're already in the async | 921 | * when we're called for a write, we're already in the async |
926 | * submission context. Just jump into btrfs_map_bio | 922 | * submission context. Just jump into btrfs_map_bio |
927 | */ | 923 | */ |
928 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); | 924 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1); |
929 | if (ret) { | 925 | if (ret) { |
930 | bio->bi_error = ret; | 926 | bio->bi_error = ret; |
931 | bio_endio(bio); | 927 | bio_endio(bio); |
@@ -944,7 +940,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags) | |||
944 | return 1; | 940 | return 1; |
945 | } | 941 | } |
946 | 942 | ||
947 | static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | 943 | static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, |
948 | int mirror_num, unsigned long bio_flags, | 944 | int mirror_num, unsigned long bio_flags, |
949 | u64 bio_offset) | 945 | u64 bio_offset) |
950 | { | 946 | { |
@@ -960,21 +956,19 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
960 | bio, BTRFS_WQ_ENDIO_METADATA); | 956 | bio, BTRFS_WQ_ENDIO_METADATA); |
961 | if (ret) | 957 | if (ret) |
962 | goto out_w_error; | 958 | goto out_w_error; |
963 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, | 959 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0); |
964 | mirror_num, 0); | ||
965 | } else if (!async) { | 960 | } else if (!async) { |
966 | ret = btree_csum_one_bio(bio); | 961 | ret = btree_csum_one_bio(bio); |
967 | if (ret) | 962 | if (ret) |
968 | goto out_w_error; | 963 | goto out_w_error; |
969 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, | 964 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0); |
970 | mirror_num, 0); | ||
971 | } else { | 965 | } else { |
972 | /* | 966 | /* |
973 | * kthread helpers are used to submit writes so that | 967 | * kthread helpers are used to submit writes so that |
974 | * checksumming can happen in parallel across all CPUs | 968 | * checksumming can happen in parallel across all CPUs |
975 | */ | 969 | */ |
976 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, | 970 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, |
977 | inode, rw, bio, mirror_num, 0, | 971 | inode, bio, mirror_num, 0, |
978 | bio_offset, | 972 | bio_offset, |
979 | __btree_submit_bio_start, | 973 | __btree_submit_bio_start, |
980 | __btree_submit_bio_done); | 974 | __btree_submit_bio_done); |