aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2014-11-14 03:06:25 -0500
committerMiao Xie <miaox@cn.fujitsu.com>2014-12-02 21:18:46 -0500
commit2c8cdd6ee4e7f637b0486c6798117e7859dee586 (patch)
treeae10c7af5f98e4b60d55676b8d59b8b543980ae8
parent5a6ac9eacb49143cbad3bbfda72263101cb1f3df (diff)
Btrfs, replace: write dirty pages into the replace target device
The implementation is simple: - In order to avoid changing the code logic of btrfs_map_bio and RAID56, we add the stripes of the replace target devices at the end of the stripe array in btrfs bio, and we sort those target device stripes in the array. And we keep the number of the target device stripes in the btrfs bio. - Except write operation on RAID56, all the other operation don't take the target device stripes into account. - When we do write operation, we read the data from the common devices and calculate the parity. Then write the dirty data and new parity out, at this time, we will find the relative replace target stripes and wirte the relative data into it. Note: The function that copying old data on the source device to the target device was implemented in the past, it is similar to the other RAID type. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
-rw-r--r--fs/btrfs/raid56.c104
-rw-r--r--fs/btrfs/volumes.c26
-rw-r--r--fs/btrfs/volumes.h10
3 files changed, 97 insertions, 43 deletions
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index b85d68f721b8..89a8486c34b3 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -131,6 +131,8 @@ struct btrfs_raid_bio {
131 /* number of data stripes (no p/q) */ 131 /* number of data stripes (no p/q) */
132 int nr_data; 132 int nr_data;
133 133
134 int real_stripes;
135
134 int stripe_npages; 136 int stripe_npages;
135 /* 137 /*
136 * set if we're doing a parity rebuild 138 * set if we're doing a parity rebuild
@@ -638,7 +640,7 @@ static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
638 */ 640 */
639static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) 641static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
640{ 642{
641 if (rbio->nr_data + 1 == rbio->bbio->num_stripes) 643 if (rbio->nr_data + 1 == rbio->real_stripes)
642 return NULL; 644 return NULL;
643 645
644 index += ((rbio->nr_data + 1) * rbio->stripe_len) >> 646 index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
@@ -981,7 +983,8 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
981{ 983{
982 struct btrfs_raid_bio *rbio; 984 struct btrfs_raid_bio *rbio;
983 int nr_data = 0; 985 int nr_data = 0;
984 int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes); 986 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
987 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
985 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); 988 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
986 void *p; 989 void *p;
987 990
@@ -1001,6 +1004,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
1001 rbio->fs_info = root->fs_info; 1004 rbio->fs_info = root->fs_info;
1002 rbio->stripe_len = stripe_len; 1005 rbio->stripe_len = stripe_len;
1003 rbio->nr_pages = num_pages; 1006 rbio->nr_pages = num_pages;
1007 rbio->real_stripes = real_stripes;
1004 rbio->stripe_npages = stripe_npages; 1008 rbio->stripe_npages = stripe_npages;
1005 rbio->faila = -1; 1009 rbio->faila = -1;
1006 rbio->failb = -1; 1010 rbio->failb = -1;
@@ -1017,10 +1021,10 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
1017 rbio->bio_pages = p + sizeof(struct page *) * num_pages; 1021 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1018 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; 1022 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1019 1023
1020 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE) 1024 if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
1021 nr_data = bbio->num_stripes - 2; 1025 nr_data = real_stripes - 2;
1022 else 1026 else
1023 nr_data = bbio->num_stripes - 1; 1027 nr_data = real_stripes - 1;
1024 1028
1025 rbio->nr_data = nr_data; 1029 rbio->nr_data = nr_data;
1026 return rbio; 1030 return rbio;
@@ -1132,7 +1136,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1132static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 1136static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1133{ 1137{
1134 if (rbio->faila >= 0 || rbio->failb >= 0) { 1138 if (rbio->faila >= 0 || rbio->failb >= 0) {
1135 BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1); 1139 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1136 __raid56_parity_recover(rbio); 1140 __raid56_parity_recover(rbio);
1137 } else { 1141 } else {
1138 finish_rmw(rbio); 1142 finish_rmw(rbio);
@@ -1193,7 +1197,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1193static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 1197static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1194{ 1198{
1195 struct btrfs_bio *bbio = rbio->bbio; 1199 struct btrfs_bio *bbio = rbio->bbio;
1196 void *pointers[bbio->num_stripes]; 1200 void *pointers[rbio->real_stripes];
1197 int stripe_len = rbio->stripe_len; 1201 int stripe_len = rbio->stripe_len;
1198 int nr_data = rbio->nr_data; 1202 int nr_data = rbio->nr_data;
1199 int stripe; 1203 int stripe;
@@ -1207,11 +1211,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1207 1211
1208 bio_list_init(&bio_list); 1212 bio_list_init(&bio_list);
1209 1213
1210 if (bbio->num_stripes - rbio->nr_data == 1) { 1214 if (rbio->real_stripes - rbio->nr_data == 1) {
1211 p_stripe = bbio->num_stripes - 1; 1215 p_stripe = rbio->real_stripes - 1;
1212 } else if (bbio->num_stripes - rbio->nr_data == 2) { 1216 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1213 p_stripe = bbio->num_stripes - 2; 1217 p_stripe = rbio->real_stripes - 2;
1214 q_stripe = bbio->num_stripes - 1; 1218 q_stripe = rbio->real_stripes - 1;
1215 } else { 1219 } else {
1216 BUG(); 1220 BUG();
1217 } 1221 }
@@ -1268,7 +1272,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1268 SetPageUptodate(p); 1272 SetPageUptodate(p);
1269 pointers[stripe++] = kmap(p); 1273 pointers[stripe++] = kmap(p);
1270 1274
1271 raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE, 1275 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1272 pointers); 1276 pointers);
1273 } else { 1277 } else {
1274 /* raid5 */ 1278 /* raid5 */
@@ -1277,7 +1281,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1277 } 1281 }
1278 1282
1279 1283
1280 for (stripe = 0; stripe < bbio->num_stripes; stripe++) 1284 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1281 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 1285 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1282 } 1286 }
1283 1287
@@ -1286,7 +1290,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1286 * higher layers (the bio_list in our rbio) and our p/q. Ignore 1290 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1287 * everything else. 1291 * everything else.
1288 */ 1292 */
1289 for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 1293 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1290 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { 1294 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1291 struct page *page; 1295 struct page *page;
1292 if (stripe < rbio->nr_data) { 1296 if (stripe < rbio->nr_data) {
@@ -1304,6 +1308,32 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1304 } 1308 }
1305 } 1309 }
1306 1310
1311 if (likely(!bbio->num_tgtdevs))
1312 goto write_data;
1313
1314 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1315 if (!bbio->tgtdev_map[stripe])
1316 continue;
1317
1318 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1319 struct page *page;
1320 if (stripe < rbio->nr_data) {
1321 page = page_in_rbio(rbio, stripe, pagenr, 1);
1322 if (!page)
1323 continue;
1324 } else {
1325 page = rbio_stripe_page(rbio, stripe, pagenr);
1326 }
1327
1328 ret = rbio_add_io_page(rbio, &bio_list, page,
1329 rbio->bbio->tgtdev_map[stripe],
1330 pagenr, rbio->stripe_len);
1331 if (ret)
1332 goto cleanup;
1333 }
1334 }
1335
1336write_data:
1307 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); 1337 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1308 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); 1338 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1309 1339
@@ -1342,7 +1372,8 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1342 stripe = &rbio->bbio->stripes[i]; 1372 stripe = &rbio->bbio->stripes[i];
1343 stripe_start = stripe->physical; 1373 stripe_start = stripe->physical;
1344 if (physical >= stripe_start && 1374 if (physical >= stripe_start &&
1345 physical < stripe_start + rbio->stripe_len) { 1375 physical < stripe_start + rbio->stripe_len &&
1376 bio->bi_bdev == stripe->dev->bdev) {
1346 return i; 1377 return i;
1347 } 1378 }
1348 } 1379 }
@@ -1791,7 +1822,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1791 int err; 1822 int err;
1792 int i; 1823 int i;
1793 1824
1794 pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *), 1825 pointers = kzalloc(rbio->real_stripes * sizeof(void *),
1795 GFP_NOFS); 1826 GFP_NOFS);
1796 if (!pointers) { 1827 if (!pointers) {
1797 err = -ENOMEM; 1828 err = -ENOMEM;
@@ -1821,7 +1852,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1821 /* setup our array of pointers with pages 1852 /* setup our array of pointers with pages
1822 * from each stripe 1853 * from each stripe
1823 */ 1854 */
1824 for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) { 1855 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1825 /* 1856 /*
1826 * if we're rebuilding a read, we have to use 1857 * if we're rebuilding a read, we have to use
1827 * pages from the bio list 1858 * pages from the bio list
@@ -1836,7 +1867,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1836 } 1867 }
1837 1868
1838 /* all raid6 handling here */ 1869 /* all raid6 handling here */
1839 if (rbio->raid_map[rbio->bbio->num_stripes - 1] == 1870 if (rbio->raid_map[rbio->real_stripes - 1] ==
1840 RAID6_Q_STRIPE) { 1871 RAID6_Q_STRIPE) {
1841 1872
1842 /* 1873 /*
@@ -1886,10 +1917,10 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1886 } 1917 }
1887 1918
1888 if (rbio->raid_map[failb] == RAID5_P_STRIPE) { 1919 if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
1889 raid6_datap_recov(rbio->bbio->num_stripes, 1920 raid6_datap_recov(rbio->real_stripes,
1890 PAGE_SIZE, faila, pointers); 1921 PAGE_SIZE, faila, pointers);
1891 } else { 1922 } else {
1892 raid6_2data_recov(rbio->bbio->num_stripes, 1923 raid6_2data_recov(rbio->real_stripes,
1893 PAGE_SIZE, faila, failb, 1924 PAGE_SIZE, faila, failb,
1894 pointers); 1925 pointers);
1895 } 1926 }
@@ -1931,7 +1962,7 @@ pstripe:
1931 } 1962 }
1932 } 1963 }
1933 } 1964 }
1934 for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) { 1965 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1935 /* 1966 /*
1936 * if we're rebuilding a read, we have to use 1967 * if we're rebuilding a read, we have to use
1937 * pages from the bio list 1968 * pages from the bio list
@@ -2012,7 +2043,6 @@ static void raid_recover_end_io(struct bio *bio, int err)
2012static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 2043static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2013{ 2044{
2014 int bios_to_read = 0; 2045 int bios_to_read = 0;
2015 struct btrfs_bio *bbio = rbio->bbio;
2016 struct bio_list bio_list; 2046 struct bio_list bio_list;
2017 int ret; 2047 int ret;
2018 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); 2048 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
@@ -2033,7 +2063,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2033 * stripe cache, it is possible that some or all of these 2063 * stripe cache, it is possible that some or all of these
2034 * pages are going to be uptodate. 2064 * pages are going to be uptodate.
2035 */ 2065 */
2036 for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 2066 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2037 if (rbio->faila == stripe || rbio->failb == stripe) { 2067 if (rbio->faila == stripe || rbio->failb == stripe) {
2038 atomic_inc(&rbio->error); 2068 atomic_inc(&rbio->error);
2039 continue; 2069 continue;
@@ -2139,7 +2169,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2139 * asking for mirror 3 2169 * asking for mirror 3
2140 */ 2170 */
2141 if (mirror_num == 3) 2171 if (mirror_num == 3)
2142 rbio->failb = bbio->num_stripes - 2; 2172 rbio->failb = rbio->real_stripes - 2;
2143 2173
2144 ret = lock_stripe_add(rbio); 2174 ret = lock_stripe_add(rbio);
2145 2175
@@ -2205,7 +2235,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2205 ASSERT(!bio->bi_iter.bi_size); 2235 ASSERT(!bio->bi_iter.bi_size);
2206 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 2236 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2207 2237
2208 for (i = 0; i < bbio->num_stripes; i++) { 2238 for (i = 0; i < rbio->real_stripes; i++) {
2209 if (bbio->stripes[i].dev == scrub_dev) { 2239 if (bbio->stripes[i].dev == scrub_dev) {
2210 rbio->scrubp = i; 2240 rbio->scrubp = i;
2211 break; 2241 break;
@@ -2246,7 +2276,7 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2246 struct page *page; 2276 struct page *page;
2247 2277
2248 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { 2278 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2249 for (i = 0; i < rbio->bbio->num_stripes; i++) { 2279 for (i = 0; i < rbio->real_stripes; i++) {
2250 index = i * rbio->stripe_npages + bit; 2280 index = i * rbio->stripe_npages + bit;
2251 if (rbio->stripe_pages[index]) 2281 if (rbio->stripe_pages[index])
2252 continue; 2282 continue;
@@ -2288,8 +2318,7 @@ static void raid_write_parity_end_io(struct bio *bio, int err)
2288static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 2318static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2289 int need_check) 2319 int need_check)
2290{ 2320{
2291 struct btrfs_bio *bbio = rbio->bbio; 2321 void *pointers[rbio->real_stripes];
2292 void *pointers[bbio->num_stripes];
2293 int nr_data = rbio->nr_data; 2322 int nr_data = rbio->nr_data;
2294 int stripe; 2323 int stripe;
2295 int pagenr; 2324 int pagenr;
@@ -2303,11 +2332,11 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2303 2332
2304 bio_list_init(&bio_list); 2333 bio_list_init(&bio_list);
2305 2334
2306 if (bbio->num_stripes - rbio->nr_data == 1) { 2335 if (rbio->real_stripes - rbio->nr_data == 1) {
2307 p_stripe = bbio->num_stripes - 1; 2336 p_stripe = rbio->real_stripes - 1;
2308 } else if (bbio->num_stripes - rbio->nr_data == 2) { 2337 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2309 p_stripe = bbio->num_stripes - 2; 2338 p_stripe = rbio->real_stripes - 2;
2310 q_stripe = bbio->num_stripes - 1; 2339 q_stripe = rbio->real_stripes - 1;
2311 } else { 2340 } else {
2312 BUG(); 2341 BUG();
2313 } 2342 }
@@ -2358,7 +2387,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2358 */ 2387 */
2359 pointers[stripe++] = kmap(q_page); 2388 pointers[stripe++] = kmap(q_page);
2360 2389
2361 raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE, 2390 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2362 pointers); 2391 pointers);
2363 } else { 2392 } else {
2364 /* raid5 */ 2393 /* raid5 */
@@ -2376,7 +2405,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2376 bitmap_clear(rbio->dbitmap, pagenr, 1); 2405 bitmap_clear(rbio->dbitmap, pagenr, 1);
2377 kunmap(p); 2406 kunmap(p);
2378 2407
2379 for (stripe = 0; stripe < bbio->num_stripes; stripe++) 2408 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2380 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 2409 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2381 } 2410 }
2382 2411
@@ -2526,7 +2555,6 @@ static void raid56_parity_scrub_end_io(struct bio *bio, int err)
2526static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) 2555static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2527{ 2556{
2528 int bios_to_read = 0; 2557 int bios_to_read = 0;
2529 struct btrfs_bio *bbio = rbio->bbio;
2530 struct bio_list bio_list; 2558 struct bio_list bio_list;
2531 int ret; 2559 int ret;
2532 int pagenr; 2560 int pagenr;
@@ -2544,7 +2572,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2544 * build a list of bios to read all the missing parts of this 2572 * build a list of bios to read all the missing parts of this
2545 * stripe 2573 * stripe
2546 */ 2574 */
2547 for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 2575 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2548 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 2576 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2549 struct page *page; 2577 struct page *page;
2550 /* 2578 /*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 217c42ea90b0..6d8a5e8d8c39 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4881,13 +4881,15 @@ static inline int parity_smaller(u64 a, u64 b)
4881static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) 4881static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4882{ 4882{
4883 struct btrfs_bio_stripe s; 4883 struct btrfs_bio_stripe s;
4884 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
4884 int i; 4885 int i;
4885 u64 l; 4886 u64 l;
4886 int again = 1; 4887 int again = 1;
4888 int m;
4887 4889
4888 while (again) { 4890 while (again) {
4889 again = 0; 4891 again = 0;
4890 for (i = 0; i < bbio->num_stripes - 1; i++) { 4892 for (i = 0; i < real_stripes - 1; i++) {
4891 if (parity_smaller(raid_map[i], raid_map[i+1])) { 4893 if (parity_smaller(raid_map[i], raid_map[i+1])) {
4892 s = bbio->stripes[i]; 4894 s = bbio->stripes[i];
4893 l = raid_map[i]; 4895 l = raid_map[i];
@@ -4895,6 +4897,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4895 raid_map[i] = raid_map[i+1]; 4897 raid_map[i] = raid_map[i+1];
4896 bbio->stripes[i+1] = s; 4898 bbio->stripes[i+1] = s;
4897 raid_map[i+1] = l; 4899 raid_map[i+1] = l;
4900
4901 if (bbio->tgtdev_map) {
4902 m = bbio->tgtdev_map[i];
4903 bbio->tgtdev_map[i] =
4904 bbio->tgtdev_map[i + 1];
4905 bbio->tgtdev_map[i + 1] = m;
4906 }
4907
4898 again = 1; 4908 again = 1;
4899 } 4909 }
4900 } 4910 }
@@ -4923,6 +4933,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4923 int ret = 0; 4933 int ret = 0;
4924 int num_stripes; 4934 int num_stripes;
4925 int max_errors = 0; 4935 int max_errors = 0;
4936 int tgtdev_indexes = 0;
4926 struct btrfs_bio *bbio = NULL; 4937 struct btrfs_bio *bbio = NULL;
4927 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 4938 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4928 int dev_replace_is_ongoing = 0; 4939 int dev_replace_is_ongoing = 0;
@@ -5234,14 +5245,19 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5234 num_alloc_stripes <<= 1; 5245 num_alloc_stripes <<= 1;
5235 if (rw & REQ_GET_READ_MIRRORS) 5246 if (rw & REQ_GET_READ_MIRRORS)
5236 num_alloc_stripes++; 5247 num_alloc_stripes++;
5248 tgtdev_indexes = num_stripes;
5237 } 5249 }
5238 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS); 5250
5251 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes),
5252 GFP_NOFS);
5239 if (!bbio) { 5253 if (!bbio) {
5240 kfree(raid_map); 5254 kfree(raid_map);
5241 ret = -ENOMEM; 5255 ret = -ENOMEM;
5242 goto out; 5256 goto out;
5243 } 5257 }
5244 atomic_set(&bbio->error, 0); 5258 atomic_set(&bbio->error, 0);
5259 if (dev_replace_is_ongoing)
5260 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5245 5261
5246 if (rw & REQ_DISCARD) { 5262 if (rw & REQ_DISCARD) {
5247 int factor = 0; 5263 int factor = 0;
@@ -5326,6 +5342,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5326 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) 5342 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5327 max_errors = btrfs_chunk_max_errors(map); 5343 max_errors = btrfs_chunk_max_errors(map);
5328 5344
5345 tgtdev_indexes = 0;
5329 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && 5346 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5330 dev_replace->tgtdev != NULL) { 5347 dev_replace->tgtdev != NULL) {
5331 int index_where_to_add; 5348 int index_where_to_add;
@@ -5354,8 +5371,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5354 new->physical = old->physical; 5371 new->physical = old->physical;
5355 new->length = old->length; 5372 new->length = old->length;
5356 new->dev = dev_replace->tgtdev; 5373 new->dev = dev_replace->tgtdev;
5374 bbio->tgtdev_map[i] = index_where_to_add;
5357 index_where_to_add++; 5375 index_where_to_add++;
5358 max_errors++; 5376 max_errors++;
5377 tgtdev_indexes++;
5359 } 5378 }
5360 } 5379 }
5361 num_stripes = index_where_to_add; 5380 num_stripes = index_where_to_add;
@@ -5401,7 +5420,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5401 tgtdev_stripe->length = 5420 tgtdev_stripe->length =
5402 bbio->stripes[index_srcdev].length; 5421 bbio->stripes[index_srcdev].length;
5403 tgtdev_stripe->dev = dev_replace->tgtdev; 5422 tgtdev_stripe->dev = dev_replace->tgtdev;
5423 bbio->tgtdev_map[index_srcdev] = num_stripes;
5404 5424
5425 tgtdev_indexes++;
5405 num_stripes++; 5426 num_stripes++;
5406 } 5427 }
5407 } 5428 }
@@ -5411,6 +5432,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5411 bbio->num_stripes = num_stripes; 5432 bbio->num_stripes = num_stripes;
5412 bbio->max_errors = max_errors; 5433 bbio->max_errors = max_errors;
5413 bbio->mirror_num = mirror_num; 5434 bbio->mirror_num = mirror_num;
5435 bbio->num_tgtdevs = tgtdev_indexes;
5414 5436
5415 /* 5437 /*
5416 * this is the case that REQ_READ && dev_replace_is_ongoing && 5438 * this is the case that REQ_READ && dev_replace_is_ongoing &&
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 01094bb804c7..70be2571cedf 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -292,7 +292,7 @@ struct btrfs_bio_stripe {
292struct btrfs_bio; 292struct btrfs_bio;
293typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err); 293typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
294 294
295#define BTRFS_BIO_ORIG_BIO_SUBMITTED 0x1 295#define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0)
296 296
297struct btrfs_bio { 297struct btrfs_bio {
298 atomic_t stripes_pending; 298 atomic_t stripes_pending;
@@ -305,6 +305,8 @@ struct btrfs_bio {
305 int max_errors; 305 int max_errors;
306 int num_stripes; 306 int num_stripes;
307 int mirror_num; 307 int mirror_num;
308 int num_tgtdevs;
309 int *tgtdev_map;
308 struct btrfs_bio_stripe stripes[]; 310 struct btrfs_bio_stripe stripes[];
309}; 311};
310 312
@@ -387,8 +389,10 @@ struct btrfs_balance_control {
387int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 389int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
388 u64 end, u64 *length); 390 u64 end, u64 *length);
389 391
390#define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \ 392#define btrfs_bio_size(total_stripes, real_stripes) \
391 (sizeof(struct btrfs_bio_stripe) * (n))) 393 (sizeof(struct btrfs_bio) + \
394 (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \
395 (sizeof(int) * (real_stripes)))
392 396
393int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 397int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
394 u64 logical, u64 *length, 398 u64 logical, u64 *length,