diff options
author | Zhao Lei <zhaolei@cn.fujitsu.com> | 2015-01-20 02:11:33 -0500 |
---|---|---|
committer | Chris Mason <clm@fb.com> | 2015-01-21 21:06:47 -0500 |
commit | 8e5cfb55d3f7dc764cd7f4c966d4c2687eaf7569 (patch) | |
tree | a5df5cec020c57973b914fd0ef3fa5891b81b9b4 | |
parent | cc7539edea6dd02536d56f0a3405b8bb7ae24168 (diff) |
Btrfs: Make raid_map array be inlined in btrfs_bio structure
It can make code more simple and clear, we need not care about
free bbio and raid_map together.
Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Zhao Lei <zhaolei@cn.fujitsu.com>
Signed-off-by: Chris Mason <clm@fb.com>
-rw-r--r-- | fs/btrfs/raid56.c | 77 | ||||
-rw-r--r-- | fs/btrfs/raid56.h | 11 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 31 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 100 | ||||
-rw-r--r-- | fs/btrfs/volumes.h | 11 |
5 files changed, 105 insertions, 125 deletions
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 8ab2a17bbba8..e301d3302edf 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
@@ -79,13 +79,6 @@ struct btrfs_raid_bio { | |||
79 | struct btrfs_fs_info *fs_info; | 79 | struct btrfs_fs_info *fs_info; |
80 | struct btrfs_bio *bbio; | 80 | struct btrfs_bio *bbio; |
81 | 81 | ||
82 | /* | ||
83 | * logical block numbers for the start of each stripe | ||
84 | * The last one or two are p/q. These are sorted, | ||
85 | * so raid_map[0] is the start of our full stripe | ||
86 | */ | ||
87 | u64 *raid_map; | ||
88 | |||
89 | /* while we're doing rmw on a stripe | 82 | /* while we're doing rmw on a stripe |
90 | * we put it into a hash table so we can | 83 | * we put it into a hash table so we can |
91 | * lock the stripe and merge more rbios | 84 | * lock the stripe and merge more rbios |
@@ -303,7 +296,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) | |||
303 | */ | 296 | */ |
304 | static int rbio_bucket(struct btrfs_raid_bio *rbio) | 297 | static int rbio_bucket(struct btrfs_raid_bio *rbio) |
305 | { | 298 | { |
306 | u64 num = rbio->raid_map[0]; | 299 | u64 num = rbio->bbio->raid_map[0]; |
307 | 300 | ||
308 | /* | 301 | /* |
309 | * we shift down quite a bit. We're using byte | 302 | * we shift down quite a bit. We're using byte |
@@ -606,8 +599,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, | |||
606 | test_bit(RBIO_CACHE_BIT, &cur->flags)) | 599 | test_bit(RBIO_CACHE_BIT, &cur->flags)) |
607 | return 0; | 600 | return 0; |
608 | 601 | ||
609 | if (last->raid_map[0] != | 602 | if (last->bbio->raid_map[0] != |
610 | cur->raid_map[0]) | 603 | cur->bbio->raid_map[0]) |
611 | return 0; | 604 | return 0; |
612 | 605 | ||
613 | /* we can't merge with different operations */ | 606 | /* we can't merge with different operations */ |
@@ -689,7 +682,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) | |||
689 | spin_lock_irqsave(&h->lock, flags); | 682 | spin_lock_irqsave(&h->lock, flags); |
690 | list_for_each_entry(cur, &h->hash_list, hash_list) { | 683 | list_for_each_entry(cur, &h->hash_list, hash_list) { |
691 | walk++; | 684 | walk++; |
692 | if (cur->raid_map[0] == rbio->raid_map[0]) { | 685 | if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { |
693 | spin_lock(&cur->bio_list_lock); | 686 | spin_lock(&cur->bio_list_lock); |
694 | 687 | ||
695 | /* can we steal this cached rbio's pages? */ | 688 | /* can we steal this cached rbio's pages? */ |
@@ -842,18 +835,16 @@ done_nolock: | |||
842 | } | 835 | } |
843 | 836 | ||
844 | static inline void | 837 | static inline void |
845 | __free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need) | 838 | __free_bbio(struct btrfs_bio *bbio, int need) |
846 | { | 839 | { |
847 | if (need) { | 840 | if (need) |
848 | kfree(raid_map); | ||
849 | kfree(bbio); | 841 | kfree(bbio); |
850 | } | ||
851 | } | 842 | } |
852 | 843 | ||
853 | static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio) | 844 | static inline void free_bbio(struct btrfs_raid_bio *rbio) |
854 | { | 845 | { |
855 | __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map, | 846 | __free_bbio(rbio->bbio, |
856 | !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); | 847 | !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); |
857 | } | 848 | } |
858 | 849 | ||
859 | static void __free_raid_bio(struct btrfs_raid_bio *rbio) | 850 | static void __free_raid_bio(struct btrfs_raid_bio *rbio) |
@@ -875,7 +866,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio) | |||
875 | } | 866 | } |
876 | } | 867 | } |
877 | 868 | ||
878 | free_bbio_and_raid_map(rbio); | 869 | free_bbio(rbio); |
879 | 870 | ||
880 | kfree(rbio); | 871 | kfree(rbio); |
881 | } | 872 | } |
@@ -985,8 +976,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) | |||
985 | * this does not allocate any pages for rbio->pages. | 976 | * this does not allocate any pages for rbio->pages. |
986 | */ | 977 | */ |
987 | static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, | 978 | static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, |
988 | struct btrfs_bio *bbio, u64 *raid_map, | 979 | struct btrfs_bio *bbio, u64 stripe_len) |
989 | u64 stripe_len) | ||
990 | { | 980 | { |
991 | struct btrfs_raid_bio *rbio; | 981 | struct btrfs_raid_bio *rbio; |
992 | int nr_data = 0; | 982 | int nr_data = 0; |
@@ -1007,7 +997,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, | |||
1007 | INIT_LIST_HEAD(&rbio->stripe_cache); | 997 | INIT_LIST_HEAD(&rbio->stripe_cache); |
1008 | INIT_LIST_HEAD(&rbio->hash_list); | 998 | INIT_LIST_HEAD(&rbio->hash_list); |
1009 | rbio->bbio = bbio; | 999 | rbio->bbio = bbio; |
1010 | rbio->raid_map = raid_map; | ||
1011 | rbio->fs_info = root->fs_info; | 1000 | rbio->fs_info = root->fs_info; |
1012 | rbio->stripe_len = stripe_len; | 1001 | rbio->stripe_len = stripe_len; |
1013 | rbio->nr_pages = num_pages; | 1002 | rbio->nr_pages = num_pages; |
@@ -1028,7 +1017,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, | |||
1028 | rbio->bio_pages = p + sizeof(struct page *) * num_pages; | 1017 | rbio->bio_pages = p + sizeof(struct page *) * num_pages; |
1029 | rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; | 1018 | rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; |
1030 | 1019 | ||
1031 | if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) | 1020 | if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) |
1032 | nr_data = real_stripes - 2; | 1021 | nr_data = real_stripes - 2; |
1033 | else | 1022 | else |
1034 | nr_data = real_stripes - 1; | 1023 | nr_data = real_stripes - 1; |
@@ -1182,7 +1171,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) | |||
1182 | spin_lock_irq(&rbio->bio_list_lock); | 1171 | spin_lock_irq(&rbio->bio_list_lock); |
1183 | bio_list_for_each(bio, &rbio->bio_list) { | 1172 | bio_list_for_each(bio, &rbio->bio_list) { |
1184 | start = (u64)bio->bi_iter.bi_sector << 9; | 1173 | start = (u64)bio->bi_iter.bi_sector << 9; |
1185 | stripe_offset = start - rbio->raid_map[0]; | 1174 | stripe_offset = start - rbio->bbio->raid_map[0]; |
1186 | page_index = stripe_offset >> PAGE_CACHE_SHIFT; | 1175 | page_index = stripe_offset >> PAGE_CACHE_SHIFT; |
1187 | 1176 | ||
1188 | for (i = 0; i < bio->bi_vcnt; i++) { | 1177 | for (i = 0; i < bio->bi_vcnt; i++) { |
@@ -1402,7 +1391,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, | |||
1402 | logical <<= 9; | 1391 | logical <<= 9; |
1403 | 1392 | ||
1404 | for (i = 0; i < rbio->nr_data; i++) { | 1393 | for (i = 0; i < rbio->nr_data; i++) { |
1405 | stripe_start = rbio->raid_map[i]; | 1394 | stripe_start = rbio->bbio->raid_map[i]; |
1406 | if (logical >= stripe_start && | 1395 | if (logical >= stripe_start && |
1407 | logical < stripe_start + rbio->stripe_len) { | 1396 | logical < stripe_start + rbio->stripe_len) { |
1408 | return i; | 1397 | return i; |
@@ -1776,17 +1765,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
1776 | * our main entry point for writes from the rest of the FS. | 1765 | * our main entry point for writes from the rest of the FS. |
1777 | */ | 1766 | */ |
1778 | int raid56_parity_write(struct btrfs_root *root, struct bio *bio, | 1767 | int raid56_parity_write(struct btrfs_root *root, struct bio *bio, |
1779 | struct btrfs_bio *bbio, u64 *raid_map, | 1768 | struct btrfs_bio *bbio, u64 stripe_len) |
1780 | u64 stripe_len) | ||
1781 | { | 1769 | { |
1782 | struct btrfs_raid_bio *rbio; | 1770 | struct btrfs_raid_bio *rbio; |
1783 | struct btrfs_plug_cb *plug = NULL; | 1771 | struct btrfs_plug_cb *plug = NULL; |
1784 | struct blk_plug_cb *cb; | 1772 | struct blk_plug_cb *cb; |
1785 | int ret; | 1773 | int ret; |
1786 | 1774 | ||
1787 | rbio = alloc_rbio(root, bbio, raid_map, stripe_len); | 1775 | rbio = alloc_rbio(root, bbio, stripe_len); |
1788 | if (IS_ERR(rbio)) { | 1776 | if (IS_ERR(rbio)) { |
1789 | __free_bbio_and_raid_map(bbio, raid_map, 1); | 1777 | __free_bbio(bbio, 1); |
1790 | return PTR_ERR(rbio); | 1778 | return PTR_ERR(rbio); |
1791 | } | 1779 | } |
1792 | bio_list_add(&rbio->bio_list, bio); | 1780 | bio_list_add(&rbio->bio_list, bio); |
@@ -1885,7 +1873,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) | |||
1885 | } | 1873 | } |
1886 | 1874 | ||
1887 | /* all raid6 handling here */ | 1875 | /* all raid6 handling here */ |
1888 | if (rbio->raid_map[rbio->real_stripes - 1] == | 1876 | if (rbio->bbio->raid_map[rbio->real_stripes - 1] == |
1889 | RAID6_Q_STRIPE) { | 1877 | RAID6_Q_STRIPE) { |
1890 | 1878 | ||
1891 | /* | 1879 | /* |
@@ -1922,8 +1910,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) | |||
1922 | * here due to a crc mismatch and we can't give them the | 1910 | * here due to a crc mismatch and we can't give them the |
1923 | * data they want | 1911 | * data they want |
1924 | */ | 1912 | */ |
1925 | if (rbio->raid_map[failb] == RAID6_Q_STRIPE) { | 1913 | if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { |
1926 | if (rbio->raid_map[faila] == RAID5_P_STRIPE) { | 1914 | if (rbio->bbio->raid_map[faila] == |
1915 | RAID5_P_STRIPE) { | ||
1927 | err = -EIO; | 1916 | err = -EIO; |
1928 | goto cleanup; | 1917 | goto cleanup; |
1929 | } | 1918 | } |
@@ -1934,7 +1923,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) | |||
1934 | goto pstripe; | 1923 | goto pstripe; |
1935 | } | 1924 | } |
1936 | 1925 | ||
1937 | if (rbio->raid_map[failb] == RAID5_P_STRIPE) { | 1926 | if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { |
1938 | raid6_datap_recov(rbio->real_stripes, | 1927 | raid6_datap_recov(rbio->real_stripes, |
1939 | PAGE_SIZE, faila, pointers); | 1928 | PAGE_SIZE, faila, pointers); |
1940 | } else { | 1929 | } else { |
@@ -2156,15 +2145,15 @@ cleanup: | |||
2156 | * of the drive. | 2145 | * of the drive. |
2157 | */ | 2146 | */ |
2158 | int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, | 2147 | int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, |
2159 | struct btrfs_bio *bbio, u64 *raid_map, | 2148 | struct btrfs_bio *bbio, u64 stripe_len, |
2160 | u64 stripe_len, int mirror_num, int generic_io) | 2149 | int mirror_num, int generic_io) |
2161 | { | 2150 | { |
2162 | struct btrfs_raid_bio *rbio; | 2151 | struct btrfs_raid_bio *rbio; |
2163 | int ret; | 2152 | int ret; |
2164 | 2153 | ||
2165 | rbio = alloc_rbio(root, bbio, raid_map, stripe_len); | 2154 | rbio = alloc_rbio(root, bbio, stripe_len); |
2166 | if (IS_ERR(rbio)) { | 2155 | if (IS_ERR(rbio)) { |
2167 | __free_bbio_and_raid_map(bbio, raid_map, generic_io); | 2156 | __free_bbio(bbio, generic_io); |
2168 | return PTR_ERR(rbio); | 2157 | return PTR_ERR(rbio); |
2169 | } | 2158 | } |
2170 | 2159 | ||
@@ -2175,7 +2164,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, | |||
2175 | rbio->faila = find_logical_bio_stripe(rbio, bio); | 2164 | rbio->faila = find_logical_bio_stripe(rbio, bio); |
2176 | if (rbio->faila == -1) { | 2165 | if (rbio->faila == -1) { |
2177 | BUG(); | 2166 | BUG(); |
2178 | __free_bbio_and_raid_map(bbio, raid_map, generic_io); | 2167 | __free_bbio(bbio, generic_io); |
2179 | kfree(rbio); | 2168 | kfree(rbio); |
2180 | return -EIO; | 2169 | return -EIO; |
2181 | } | 2170 | } |
@@ -2240,14 +2229,14 @@ static void read_rebuild_work(struct btrfs_work *work) | |||
2240 | 2229 | ||
2241 | struct btrfs_raid_bio * | 2230 | struct btrfs_raid_bio * |
2242 | raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, | 2231 | raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, |
2243 | struct btrfs_bio *bbio, u64 *raid_map, | 2232 | struct btrfs_bio *bbio, u64 stripe_len, |
2244 | u64 stripe_len, struct btrfs_device *scrub_dev, | 2233 | struct btrfs_device *scrub_dev, |
2245 | unsigned long *dbitmap, int stripe_nsectors) | 2234 | unsigned long *dbitmap, int stripe_nsectors) |
2246 | { | 2235 | { |
2247 | struct btrfs_raid_bio *rbio; | 2236 | struct btrfs_raid_bio *rbio; |
2248 | int i; | 2237 | int i; |
2249 | 2238 | ||
2250 | rbio = alloc_rbio(root, bbio, raid_map, stripe_len); | 2239 | rbio = alloc_rbio(root, bbio, stripe_len); |
2251 | if (IS_ERR(rbio)) | 2240 | if (IS_ERR(rbio)) |
2252 | return NULL; | 2241 | return NULL; |
2253 | bio_list_add(&rbio->bio_list, bio); | 2242 | bio_list_add(&rbio->bio_list, bio); |
@@ -2279,10 +2268,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, | |||
2279 | int stripe_offset; | 2268 | int stripe_offset; |
2280 | int index; | 2269 | int index; |
2281 | 2270 | ||
2282 | ASSERT(logical >= rbio->raid_map[0]); | 2271 | ASSERT(logical >= rbio->bbio->raid_map[0]); |
2283 | ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] + | 2272 | ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + |
2284 | rbio->stripe_len * rbio->nr_data); | 2273 | rbio->stripe_len * rbio->nr_data); |
2285 | stripe_offset = (int)(logical - rbio->raid_map[0]); | 2274 | stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); |
2286 | index = stripe_offset >> PAGE_CACHE_SHIFT; | 2275 | index = stripe_offset >> PAGE_CACHE_SHIFT; |
2287 | rbio->bio_pages[index] = page; | 2276 | rbio->bio_pages[index] = page; |
2288 | } | 2277 | } |
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h index 31d4a157b5e3..2b5d7977d83b 100644 --- a/fs/btrfs/raid56.h +++ b/fs/btrfs/raid56.h | |||
@@ -43,16 +43,15 @@ struct btrfs_raid_bio; | |||
43 | struct btrfs_device; | 43 | struct btrfs_device; |
44 | 44 | ||
45 | int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, | 45 | int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, |
46 | struct btrfs_bio *bbio, u64 *raid_map, | 46 | struct btrfs_bio *bbio, u64 stripe_len, |
47 | u64 stripe_len, int mirror_num, int generic_io); | 47 | int mirror_num, int generic_io); |
48 | int raid56_parity_write(struct btrfs_root *root, struct bio *bio, | 48 | int raid56_parity_write(struct btrfs_root *root, struct bio *bio, |
49 | struct btrfs_bio *bbio, u64 *raid_map, | 49 | struct btrfs_bio *bbio, u64 stripe_len); |
50 | u64 stripe_len); | ||
51 | 50 | ||
52 | struct btrfs_raid_bio * | 51 | struct btrfs_raid_bio * |
53 | raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, | 52 | raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, |
54 | struct btrfs_bio *bbio, u64 *raid_map, | 53 | struct btrfs_bio *bbio, u64 stripe_len, |
55 | u64 stripe_len, struct btrfs_device *scrub_dev, | 54 | struct btrfs_device *scrub_dev, |
56 | unsigned long *dbitmap, int stripe_nsectors); | 55 | unsigned long *dbitmap, int stripe_nsectors); |
57 | void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, | 56 | void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, |
58 | struct page *page, u64 logical); | 57 | struct page *page, u64 logical); |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 673e32be88fa..9d07c981ec82 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -66,7 +66,6 @@ struct scrub_ctx; | |||
66 | struct scrub_recover { | 66 | struct scrub_recover { |
67 | atomic_t refs; | 67 | atomic_t refs; |
68 | struct btrfs_bio *bbio; | 68 | struct btrfs_bio *bbio; |
69 | u64 *raid_map; | ||
70 | u64 map_length; | 69 | u64 map_length; |
71 | }; | 70 | }; |
72 | 71 | ||
@@ -857,7 +856,6 @@ static inline void scrub_put_recover(struct scrub_recover *recover) | |||
857 | { | 856 | { |
858 | if (atomic_dec_and_test(&recover->refs)) { | 857 | if (atomic_dec_and_test(&recover->refs)) { |
859 | kfree(recover->bbio); | 858 | kfree(recover->bbio); |
860 | kfree(recover->raid_map); | ||
861 | kfree(recover); | 859 | kfree(recover); |
862 | } | 860 | } |
863 | } | 861 | } |
@@ -1296,12 +1294,12 @@ out: | |||
1296 | return 0; | 1294 | return 0; |
1297 | } | 1295 | } |
1298 | 1296 | ||
1299 | static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) | 1297 | static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) |
1300 | { | 1298 | { |
1301 | if (raid_map) { | 1299 | if (bbio->raid_map) { |
1302 | int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; | 1300 | int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; |
1303 | 1301 | ||
1304 | if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) | 1302 | if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) |
1305 | return 3; | 1303 | return 3; |
1306 | else | 1304 | else |
1307 | return 2; | 1305 | return 2; |
@@ -1347,7 +1345,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, | |||
1347 | { | 1345 | { |
1348 | struct scrub_recover *recover; | 1346 | struct scrub_recover *recover; |
1349 | struct btrfs_bio *bbio; | 1347 | struct btrfs_bio *bbio; |
1350 | u64 *raid_map; | ||
1351 | u64 sublen; | 1348 | u64 sublen; |
1352 | u64 mapped_length; | 1349 | u64 mapped_length; |
1353 | u64 stripe_offset; | 1350 | u64 stripe_offset; |
@@ -1368,35 +1365,31 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, | |||
1368 | sublen = min_t(u64, length, PAGE_SIZE); | 1365 | sublen = min_t(u64, length, PAGE_SIZE); |
1369 | mapped_length = sublen; | 1366 | mapped_length = sublen; |
1370 | bbio = NULL; | 1367 | bbio = NULL; |
1371 | raid_map = NULL; | ||
1372 | 1368 | ||
1373 | /* | 1369 | /* |
1374 | * with a length of PAGE_SIZE, each returned stripe | 1370 | * with a length of PAGE_SIZE, each returned stripe |
1375 | * represents one mirror | 1371 | * represents one mirror |
1376 | */ | 1372 | */ |
1377 | ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, | 1373 | ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, |
1378 | &mapped_length, &bbio, 0, &raid_map); | 1374 | &mapped_length, &bbio, 0, 1); |
1379 | if (ret || !bbio || mapped_length < sublen) { | 1375 | if (ret || !bbio || mapped_length < sublen) { |
1380 | kfree(bbio); | 1376 | kfree(bbio); |
1381 | kfree(raid_map); | ||
1382 | return -EIO; | 1377 | return -EIO; |
1383 | } | 1378 | } |
1384 | 1379 | ||
1385 | recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); | 1380 | recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); |
1386 | if (!recover) { | 1381 | if (!recover) { |
1387 | kfree(bbio); | 1382 | kfree(bbio); |
1388 | kfree(raid_map); | ||
1389 | return -ENOMEM; | 1383 | return -ENOMEM; |
1390 | } | 1384 | } |
1391 | 1385 | ||
1392 | atomic_set(&recover->refs, 1); | 1386 | atomic_set(&recover->refs, 1); |
1393 | recover->bbio = bbio; | 1387 | recover->bbio = bbio; |
1394 | recover->raid_map = raid_map; | ||
1395 | recover->map_length = mapped_length; | 1388 | recover->map_length = mapped_length; |
1396 | 1389 | ||
1397 | BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); | 1390 | BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); |
1398 | 1391 | ||
1399 | nmirrors = scrub_nr_raid_mirrors(bbio, raid_map); | 1392 | nmirrors = scrub_nr_raid_mirrors(bbio); |
1400 | for (mirror_index = 0; mirror_index < nmirrors; | 1393 | for (mirror_index = 0; mirror_index < nmirrors; |
1401 | mirror_index++) { | 1394 | mirror_index++) { |
1402 | struct scrub_block *sblock; | 1395 | struct scrub_block *sblock; |
@@ -1420,7 +1413,7 @@ leave_nomem: | |||
1420 | sblock->pagev[page_index] = page; | 1413 | sblock->pagev[page_index] = page; |
1421 | page->logical = logical; | 1414 | page->logical = logical; |
1422 | 1415 | ||
1423 | scrub_stripe_index_and_offset(logical, raid_map, | 1416 | scrub_stripe_index_and_offset(logical, bbio->raid_map, |
1424 | mapped_length, | 1417 | mapped_length, |
1425 | bbio->num_stripes - | 1418 | bbio->num_stripes - |
1426 | bbio->num_tgtdevs, | 1419 | bbio->num_tgtdevs, |
@@ -1469,7 +1462,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) | |||
1469 | 1462 | ||
1470 | static inline int scrub_is_page_on_raid56(struct scrub_page *page) | 1463 | static inline int scrub_is_page_on_raid56(struct scrub_page *page) |
1471 | { | 1464 | { |
1472 | return page->recover && page->recover->raid_map; | 1465 | return page->recover && page->recover->bbio->raid_map; |
1473 | } | 1466 | } |
1474 | 1467 | ||
1475 | static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, | 1468 | static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, |
@@ -1486,7 +1479,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, | |||
1486 | bio->bi_end_io = scrub_bio_wait_endio; | 1479 | bio->bi_end_io = scrub_bio_wait_endio; |
1487 | 1480 | ||
1488 | ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, | 1481 | ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, |
1489 | page->recover->raid_map, | ||
1490 | page->recover->map_length, | 1482 | page->recover->map_length, |
1491 | page->mirror_num, 0); | 1483 | page->mirror_num, 0); |
1492 | if (ret) | 1484 | if (ret) |
@@ -2716,7 +2708,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) | |||
2716 | struct btrfs_raid_bio *rbio; | 2708 | struct btrfs_raid_bio *rbio; |
2717 | struct scrub_page *spage; | 2709 | struct scrub_page *spage; |
2718 | struct btrfs_bio *bbio = NULL; | 2710 | struct btrfs_bio *bbio = NULL; |
2719 | u64 *raid_map = NULL; | ||
2720 | u64 length; | 2711 | u64 length; |
2721 | int ret; | 2712 | int ret; |
2722 | 2713 | ||
@@ -2727,8 +2718,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) | |||
2727 | length = sparity->logic_end - sparity->logic_start + 1; | 2718 | length = sparity->logic_end - sparity->logic_start + 1; |
2728 | ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, | 2719 | ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, |
2729 | sparity->logic_start, | 2720 | sparity->logic_start, |
2730 | &length, &bbio, 0, &raid_map); | 2721 | &length, &bbio, 0, 1); |
2731 | if (ret || !bbio || !raid_map) | 2722 | if (ret || !bbio || !bbio->raid_map) |
2732 | goto bbio_out; | 2723 | goto bbio_out; |
2733 | 2724 | ||
2734 | bio = btrfs_io_bio_alloc(GFP_NOFS, 0); | 2725 | bio = btrfs_io_bio_alloc(GFP_NOFS, 0); |
@@ -2740,8 +2731,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) | |||
2740 | bio->bi_end_io = scrub_parity_bio_endio; | 2731 | bio->bi_end_io = scrub_parity_bio_endio; |
2741 | 2732 | ||
2742 | rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, | 2733 | rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, |
2743 | raid_map, length, | 2734 | length, sparity->scrub_dev, |
2744 | sparity->scrub_dev, | ||
2745 | sparity->dbitmap, | 2735 | sparity->dbitmap, |
2746 | sparity->nsectors); | 2736 | sparity->nsectors); |
2747 | if (!rbio) | 2737 | if (!rbio) |
@@ -2759,7 +2749,6 @@ rbio_out: | |||
2759 | bio_put(bio); | 2749 | bio_put(bio); |
2760 | bbio_out: | 2750 | bbio_out: |
2761 | kfree(bbio); | 2751 | kfree(bbio); |
2762 | kfree(raid_map); | ||
2763 | bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, | 2752 | bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, |
2764 | sparity->nsectors); | 2753 | sparity->nsectors); |
2765 | spin_lock(&sctx->stat_lock); | 2754 | spin_lock(&sctx->stat_lock); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 711ce38543a1..c0f1d524c371 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -4876,8 +4876,7 @@ static inline int parity_smaller(u64 a, u64 b) | |||
4876 | } | 4876 | } |
4877 | 4877 | ||
4878 | /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ | 4878 | /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ |
4879 | static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, | 4879 | static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) |
4880 | int num_stripes) | ||
4881 | { | 4880 | { |
4882 | struct btrfs_bio_stripe s; | 4881 | struct btrfs_bio_stripe s; |
4883 | int i; | 4882 | int i; |
@@ -4887,13 +4886,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, | |||
4887 | while (again) { | 4886 | while (again) { |
4888 | again = 0; | 4887 | again = 0; |
4889 | for (i = 0; i < num_stripes - 1; i++) { | 4888 | for (i = 0; i < num_stripes - 1; i++) { |
4890 | if (parity_smaller(raid_map[i], raid_map[i+1])) { | 4889 | if (parity_smaller(bbio->raid_map[i], |
4890 | bbio->raid_map[i+1])) { | ||
4891 | s = bbio->stripes[i]; | 4891 | s = bbio->stripes[i]; |
4892 | l = raid_map[i]; | 4892 | l = bbio->raid_map[i]; |
4893 | bbio->stripes[i] = bbio->stripes[i+1]; | 4893 | bbio->stripes[i] = bbio->stripes[i+1]; |
4894 | raid_map[i] = raid_map[i+1]; | 4894 | bbio->raid_map[i] = bbio->raid_map[i+1]; |
4895 | bbio->stripes[i+1] = s; | 4895 | bbio->stripes[i+1] = s; |
4896 | raid_map[i+1] = l; | 4896 | bbio->raid_map[i+1] = l; |
4897 | 4897 | ||
4898 | again = 1; | 4898 | again = 1; |
4899 | } | 4899 | } |
@@ -4904,7 +4904,7 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, | |||
4904 | static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | 4904 | static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, |
4905 | u64 logical, u64 *length, | 4905 | u64 logical, u64 *length, |
4906 | struct btrfs_bio **bbio_ret, | 4906 | struct btrfs_bio **bbio_ret, |
4907 | int mirror_num, u64 **raid_map_ret) | 4907 | int mirror_num, int need_raid_map) |
4908 | { | 4908 | { |
4909 | struct extent_map *em; | 4909 | struct extent_map *em; |
4910 | struct map_lookup *map; | 4910 | struct map_lookup *map; |
@@ -4917,7 +4917,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
4917 | u64 stripe_nr_orig; | 4917 | u64 stripe_nr_orig; |
4918 | u64 stripe_nr_end; | 4918 | u64 stripe_nr_end; |
4919 | u64 stripe_len; | 4919 | u64 stripe_len; |
4920 | u64 *raid_map = NULL; | ||
4921 | int stripe_index; | 4920 | int stripe_index; |
4922 | int i; | 4921 | int i; |
4923 | int ret = 0; | 4922 | int ret = 0; |
@@ -5039,7 +5038,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5039 | u64 physical_of_found = 0; | 5038 | u64 physical_of_found = 0; |
5040 | 5039 | ||
5041 | ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, | 5040 | ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, |
5042 | logical, &tmp_length, &tmp_bbio, 0, NULL); | 5041 | logical, &tmp_length, &tmp_bbio, 0, 0); |
5043 | if (ret) { | 5042 | if (ret) { |
5044 | WARN_ON(tmp_bbio != NULL); | 5043 | WARN_ON(tmp_bbio != NULL); |
5045 | goto out; | 5044 | goto out; |
@@ -5160,13 +5159,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5160 | 5159 | ||
5161 | } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | | 5160 | } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | |
5162 | BTRFS_BLOCK_GROUP_RAID6)) { | 5161 | BTRFS_BLOCK_GROUP_RAID6)) { |
5163 | u64 tmp; | 5162 | if (need_raid_map && |
5164 | |||
5165 | if (raid_map_ret && | ||
5166 | ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || | 5163 | ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || |
5167 | mirror_num > 1)) { | 5164 | mirror_num > 1)) { |
5168 | int i, rot; | ||
5169 | |||
5170 | /* push stripe_nr back to the start of the full stripe */ | 5165 | /* push stripe_nr back to the start of the full stripe */ |
5171 | stripe_nr = raid56_full_stripe_start; | 5166 | stripe_nr = raid56_full_stripe_start; |
5172 | do_div(stripe_nr, stripe_len * nr_data_stripes(map)); | 5167 | do_div(stripe_nr, stripe_len * nr_data_stripes(map)); |
@@ -5175,32 +5170,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5175 | num_stripes = map->num_stripes; | 5170 | num_stripes = map->num_stripes; |
5176 | max_errors = nr_parity_stripes(map); | 5171 | max_errors = nr_parity_stripes(map); |
5177 | 5172 | ||
5178 | raid_map = kmalloc_array(num_stripes, sizeof(u64), | ||
5179 | GFP_NOFS); | ||
5180 | if (!raid_map) { | ||
5181 | ret = -ENOMEM; | ||
5182 | goto out; | ||
5183 | } | ||
5184 | |||
5185 | /* Work out the disk rotation on this stripe-set */ | ||
5186 | tmp = stripe_nr; | ||
5187 | rot = do_div(tmp, num_stripes); | ||
5188 | |||
5189 | /* Fill in the logical address of each stripe */ | ||
5190 | tmp = stripe_nr * nr_data_stripes(map); | ||
5191 | for (i = 0; i < nr_data_stripes(map); i++) | ||
5192 | raid_map[(i+rot) % num_stripes] = | ||
5193 | em->start + (tmp + i) * map->stripe_len; | ||
5194 | |||
5195 | raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; | ||
5196 | if (map->type & BTRFS_BLOCK_GROUP_RAID6) | ||
5197 | raid_map[(i+rot+1) % num_stripes] = | ||
5198 | RAID6_Q_STRIPE; | ||
5199 | |||
5200 | *length = map->stripe_len; | 5173 | *length = map->stripe_len; |
5201 | stripe_index = 0; | 5174 | stripe_index = 0; |
5202 | stripe_offset = 0; | 5175 | stripe_offset = 0; |
5203 | } else { | 5176 | } else { |
5177 | u64 tmp; | ||
5178 | |||
5204 | /* | 5179 | /* |
5205 | * Mirror #0 or #1 means the original data block. | 5180 | * Mirror #0 or #1 means the original data block. |
5206 | * Mirror #2 is RAID5 parity block. | 5181 | * Mirror #2 is RAID5 parity block. |
@@ -5241,7 +5216,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5241 | bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), | 5216 | bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), |
5242 | GFP_NOFS); | 5217 | GFP_NOFS); |
5243 | if (!bbio) { | 5218 | if (!bbio) { |
5244 | kfree(raid_map); | ||
5245 | ret = -ENOMEM; | 5219 | ret = -ENOMEM; |
5246 | goto out; | 5220 | goto out; |
5247 | } | 5221 | } |
@@ -5249,6 +5223,34 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5249 | if (dev_replace_is_ongoing) | 5223 | if (dev_replace_is_ongoing) |
5250 | bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); | 5224 | bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); |
5251 | 5225 | ||
5226 | /* build raid_map */ | ||
5227 | if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && | ||
5228 | need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || | ||
5229 | mirror_num > 1)) { | ||
5230 | u64 tmp; | ||
5231 | int i, rot; | ||
5232 | |||
5233 | bbio->raid_map = (u64 *)((void *)bbio->stripes + | ||
5234 | sizeof(struct btrfs_bio_stripe) * | ||
5235 | num_alloc_stripes + | ||
5236 | sizeof(int) * tgtdev_indexes); | ||
5237 | |||
5238 | /* Work out the disk rotation on this stripe-set */ | ||
5239 | tmp = stripe_nr; | ||
5240 | rot = do_div(tmp, num_stripes); | ||
5241 | |||
5242 | /* Fill in the logical address of each stripe */ | ||
5243 | tmp = stripe_nr * nr_data_stripes(map); | ||
5244 | for (i = 0; i < nr_data_stripes(map); i++) | ||
5245 | bbio->raid_map[(i+rot) % num_stripes] = | ||
5246 | em->start + (tmp + i) * map->stripe_len; | ||
5247 | |||
5248 | bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; | ||
5249 | if (map->type & BTRFS_BLOCK_GROUP_RAID6) | ||
5250 | bbio->raid_map[(i+rot+1) % num_stripes] = | ||
5251 | RAID6_Q_STRIPE; | ||
5252 | } | ||
5253 | |||
5252 | if (rw & REQ_DISCARD) { | 5254 | if (rw & REQ_DISCARD) { |
5253 | int factor = 0; | 5255 | int factor = 0; |
5254 | int sub_stripes = 0; | 5256 | int sub_stripes = 0; |
@@ -5332,8 +5334,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5332 | if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) | 5334 | if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) |
5333 | max_errors = btrfs_chunk_max_errors(map); | 5335 | max_errors = btrfs_chunk_max_errors(map); |
5334 | 5336 | ||
5335 | if (raid_map) | 5337 | if (bbio->raid_map) |
5336 | sort_parity_stripes(bbio, raid_map, num_stripes); | 5338 | sort_parity_stripes(bbio, num_stripes); |
5337 | 5339 | ||
5338 | tgtdev_indexes = 0; | 5340 | tgtdev_indexes = 0; |
5339 | if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && | 5341 | if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && |
@@ -5438,9 +5440,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5438 | bbio->stripes[0].physical = physical_to_patch_in_first_stripe; | 5440 | bbio->stripes[0].physical = physical_to_patch_in_first_stripe; |
5439 | bbio->mirror_num = map->num_stripes + 1; | 5441 | bbio->mirror_num = map->num_stripes + 1; |
5440 | } | 5442 | } |
5441 | |||
5442 | if (raid_map_ret) | ||
5443 | *raid_map_ret = raid_map; | ||
5444 | out: | 5443 | out: |
5445 | if (dev_replace_is_ongoing) | 5444 | if (dev_replace_is_ongoing) |
5446 | btrfs_dev_replace_unlock(dev_replace); | 5445 | btrfs_dev_replace_unlock(dev_replace); |
@@ -5453,17 +5452,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5453 | struct btrfs_bio **bbio_ret, int mirror_num) | 5452 | struct btrfs_bio **bbio_ret, int mirror_num) |
5454 | { | 5453 | { |
5455 | return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, | 5454 | return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, |
5456 | mirror_num, NULL); | 5455 | mirror_num, 0); |
5457 | } | 5456 | } |
5458 | 5457 | ||
5459 | /* For Scrub/replace */ | 5458 | /* For Scrub/replace */ |
5460 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, | 5459 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, |
5461 | u64 logical, u64 *length, | 5460 | u64 logical, u64 *length, |
5462 | struct btrfs_bio **bbio_ret, int mirror_num, | 5461 | struct btrfs_bio **bbio_ret, int mirror_num, |
5463 | u64 **raid_map_ret) | 5462 | int need_raid_map) |
5464 | { | 5463 | { |
5465 | return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, | 5464 | return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, |
5466 | mirror_num, raid_map_ret); | 5465 | mirror_num, need_raid_map); |
5467 | } | 5466 | } |
5468 | 5467 | ||
5469 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | 5468 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, |
@@ -5802,7 +5801,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
5802 | u64 logical = (u64)bio->bi_iter.bi_sector << 9; | 5801 | u64 logical = (u64)bio->bi_iter.bi_sector << 9; |
5803 | u64 length = 0; | 5802 | u64 length = 0; |
5804 | u64 map_length; | 5803 | u64 map_length; |
5805 | u64 *raid_map = NULL; | ||
5806 | int ret; | 5804 | int ret; |
5807 | int dev_nr = 0; | 5805 | int dev_nr = 0; |
5808 | int total_devs = 1; | 5806 | int total_devs = 1; |
@@ -5813,7 +5811,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
5813 | 5811 | ||
5814 | btrfs_bio_counter_inc_blocked(root->fs_info); | 5812 | btrfs_bio_counter_inc_blocked(root->fs_info); |
5815 | ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, | 5813 | ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, |
5816 | mirror_num, &raid_map); | 5814 | mirror_num, 1); |
5817 | if (ret) { | 5815 | if (ret) { |
5818 | btrfs_bio_counter_dec(root->fs_info); | 5816 | btrfs_bio_counter_dec(root->fs_info); |
5819 | return ret; | 5817 | return ret; |
@@ -5826,15 +5824,13 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
5826 | bbio->fs_info = root->fs_info; | 5824 | bbio->fs_info = root->fs_info; |
5827 | atomic_set(&bbio->stripes_pending, bbio->num_stripes); | 5825 | atomic_set(&bbio->stripes_pending, bbio->num_stripes); |
5828 | 5826 | ||
5829 | if (raid_map) { | 5827 | if (bbio->raid_map) { |
5830 | /* In this case, map_length has been set to the length of | 5828 | /* In this case, map_length has been set to the length of |
5831 | a single stripe; not the whole write */ | 5829 | a single stripe; not the whole write */ |
5832 | if (rw & WRITE) { | 5830 | if (rw & WRITE) { |
5833 | ret = raid56_parity_write(root, bio, bbio, | 5831 | ret = raid56_parity_write(root, bio, bbio, map_length); |
5834 | raid_map, map_length); | ||
5835 | } else { | 5832 | } else { |
5836 | ret = raid56_parity_recover(root, bio, bbio, | 5833 | ret = raid56_parity_recover(root, bio, bbio, map_length, |
5837 | raid_map, map_length, | ||
5838 | mirror_num, 1); | 5834 | mirror_num, 1); |
5839 | } | 5835 | } |
5840 | 5836 | ||
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index d6fe73c0f4a2..fb0e8c3f296e 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -307,6 +307,12 @@ struct btrfs_bio { | |||
307 | int mirror_num; | 307 | int mirror_num; |
308 | int num_tgtdevs; | 308 | int num_tgtdevs; |
309 | int *tgtdev_map; | 309 | int *tgtdev_map; |
310 | /* | ||
311 | * logical block numbers for the start of each stripe | ||
312 | * The last one or two are p/q. These are sorted, | ||
313 | * so raid_map[0] is the start of our full stripe | ||
314 | */ | ||
315 | u64 *raid_map; | ||
310 | struct btrfs_bio_stripe stripes[]; | 316 | struct btrfs_bio_stripe stripes[]; |
311 | }; | 317 | }; |
312 | 318 | ||
@@ -392,7 +398,8 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, | |||
392 | #define btrfs_bio_size(total_stripes, real_stripes) \ | 398 | #define btrfs_bio_size(total_stripes, real_stripes) \ |
393 | (sizeof(struct btrfs_bio) + \ | 399 | (sizeof(struct btrfs_bio) + \ |
394 | (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \ | 400 | (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \ |
395 | (sizeof(int) * (real_stripes))) | 401 | (sizeof(int) * (real_stripes)) + \ |
402 | (sizeof(u64) * (real_stripes))) | ||
396 | 403 | ||
397 | int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | 404 | int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, |
398 | u64 logical, u64 *length, | 405 | u64 logical, u64 *length, |
@@ -400,7 +407,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
400 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, | 407 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, |
401 | u64 logical, u64 *length, | 408 | u64 logical, u64 *length, |
402 | struct btrfs_bio **bbio_ret, int mirror_num, | 409 | struct btrfs_bio **bbio_ret, int mirror_num, |
403 | u64 **raid_map_ret); | 410 | int need_raid_map); |
404 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | 411 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, |
405 | u64 chunk_start, u64 physical, u64 devid, | 412 | u64 chunk_start, u64 physical, u64 devid, |
406 | u64 **logical, int *naddrs, int *stripe_len); | 413 | u64 **logical, int *naddrs, int *stripe_len); |