aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/raid56.c52
-rw-r--r--fs/btrfs/raid56.h2
-rw-r--r--fs/btrfs/scrub.c194
-rw-r--r--fs/btrfs/volumes.c16
-rw-r--r--fs/btrfs/volumes.h4
5 files changed, 235 insertions, 33 deletions
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index c54b0e64c590..95053a903474 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -58,6 +58,15 @@
58 */ 58 */
59#define RBIO_CACHE_READY_BIT 3 59#define RBIO_CACHE_READY_BIT 3
60 60
61/*
62 * bbio and raid_map is managed by the caller, so we shouldn't free
63 * them here. And besides that, all rbios with this flag should not
64 * be cached, because we need raid_map to check the rbios' stripe
65 * is the same or not, but it is very likely that the caller has
66 * free raid_map, so don't cache those rbios.
67 */
68#define RBIO_HOLD_BBIO_MAP_BIT 4
69
61#define RBIO_CACHE_SIZE 1024 70#define RBIO_CACHE_SIZE 1024
62 71
63struct btrfs_raid_bio { 72struct btrfs_raid_bio {
@@ -799,6 +808,21 @@ done_nolock:
799 remove_rbio_from_cache(rbio); 808 remove_rbio_from_cache(rbio);
800} 809}
801 810
811static inline void
812__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
813{
814 if (need) {
815 kfree(raid_map);
816 kfree(bbio);
817 }
818}
819
820static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
821{
822 __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
823 !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
824}
825
802static void __free_raid_bio(struct btrfs_raid_bio *rbio) 826static void __free_raid_bio(struct btrfs_raid_bio *rbio)
803{ 827{
804 int i; 828 int i;
@@ -817,8 +841,9 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
817 rbio->stripe_pages[i] = NULL; 841 rbio->stripe_pages[i] = NULL;
818 } 842 }
819 } 843 }
820 kfree(rbio->raid_map); 844
821 kfree(rbio->bbio); 845 free_bbio_and_raid_map(rbio);
846
822 kfree(rbio); 847 kfree(rbio);
823} 848}
824 849
@@ -933,11 +958,8 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
933 958
934 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2, 959 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
935 GFP_NOFS); 960 GFP_NOFS);
936 if (!rbio) { 961 if (!rbio)
937 kfree(raid_map);
938 kfree(bbio);
939 return ERR_PTR(-ENOMEM); 962 return ERR_PTR(-ENOMEM);
940 }
941 963
942 bio_list_init(&rbio->bio_list); 964 bio_list_init(&rbio->bio_list);
943 INIT_LIST_HEAD(&rbio->plug_list); 965 INIT_LIST_HEAD(&rbio->plug_list);
@@ -1692,8 +1714,10 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1692 struct blk_plug_cb *cb; 1714 struct blk_plug_cb *cb;
1693 1715
1694 rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 1716 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
1695 if (IS_ERR(rbio)) 1717 if (IS_ERR(rbio)) {
1718 __free_bbio_and_raid_map(bbio, raid_map, 1);
1696 return PTR_ERR(rbio); 1719 return PTR_ERR(rbio);
1720 }
1697 bio_list_add(&rbio->bio_list, bio); 1721 bio_list_add(&rbio->bio_list, bio);
1698 rbio->bio_list_bytes = bio->bi_iter.bi_size; 1722 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1699 1723
@@ -1888,7 +1912,8 @@ cleanup:
1888cleanup_io: 1912cleanup_io:
1889 1913
1890 if (rbio->read_rebuild) { 1914 if (rbio->read_rebuild) {
1891 if (err == 0) 1915 if (err == 0 &&
1916 !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags))
1892 cache_rbio_pages(rbio); 1917 cache_rbio_pages(rbio);
1893 else 1918 else
1894 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 1919 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -2038,15 +2063,19 @@ cleanup:
2038 */ 2063 */
2039int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 2064int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2040 struct btrfs_bio *bbio, u64 *raid_map, 2065 struct btrfs_bio *bbio, u64 *raid_map,
2041 u64 stripe_len, int mirror_num) 2066 u64 stripe_len, int mirror_num, int hold_bbio)
2042{ 2067{
2043 struct btrfs_raid_bio *rbio; 2068 struct btrfs_raid_bio *rbio;
2044 int ret; 2069 int ret;
2045 2070
2046 rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 2071 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
2047 if (IS_ERR(rbio)) 2072 if (IS_ERR(rbio)) {
2073 __free_bbio_and_raid_map(bbio, raid_map, !hold_bbio);
2048 return PTR_ERR(rbio); 2074 return PTR_ERR(rbio);
2075 }
2049 2076
2077 if (hold_bbio)
2078 set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags);
2050 rbio->read_rebuild = 1; 2079 rbio->read_rebuild = 1;
2051 bio_list_add(&rbio->bio_list, bio); 2080 bio_list_add(&rbio->bio_list, bio);
2052 rbio->bio_list_bytes = bio->bi_iter.bi_size; 2081 rbio->bio_list_bytes = bio->bi_iter.bi_size;
@@ -2054,8 +2083,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2054 rbio->faila = find_logical_bio_stripe(rbio, bio); 2083 rbio->faila = find_logical_bio_stripe(rbio, bio);
2055 if (rbio->faila == -1) { 2084 if (rbio->faila == -1) {
2056 BUG(); 2085 BUG();
2057 kfree(raid_map); 2086 __free_bbio_and_raid_map(bbio, raid_map, !hold_bbio);
2058 kfree(bbio);
2059 kfree(rbio); 2087 kfree(rbio);
2060 return -EIO; 2088 return -EIO;
2061 } 2089 }
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index ea5d73bfdfbe..b310e8c830d1 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -41,7 +41,7 @@ static inline int nr_data_stripes(struct map_lookup *map)
41 41
42int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 42int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
43 struct btrfs_bio *bbio, u64 *raid_map, 43 struct btrfs_bio *bbio, u64 *raid_map,
44 u64 stripe_len, int mirror_num); 44 u64 stripe_len, int mirror_num, int hold_bbio);
45int raid56_parity_write(struct btrfs_root *root, struct bio *bio, 45int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
46 struct btrfs_bio *bbio, u64 *raid_map, 46 struct btrfs_bio *bbio, u64 *raid_map,
47 u64 stripe_len); 47 u64 stripe_len);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index efa083113827..ca4b9eb8b5da 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -63,6 +63,13 @@ struct scrub_ctx;
63 */ 63 */
64#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ 64#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
65 65
66struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
69 u64 *raid_map;
70 u64 map_length;
71};
72
66struct scrub_page { 73struct scrub_page {
67 struct scrub_block *sblock; 74 struct scrub_block *sblock;
68 struct page *page; 75 struct page *page;
@@ -79,6 +86,8 @@ struct scrub_page {
79 unsigned int io_error:1; 86 unsigned int io_error:1;
80 }; 87 };
81 u8 csum[BTRFS_CSUM_SIZE]; 88 u8 csum[BTRFS_CSUM_SIZE];
89
90 struct scrub_recover *recover;
82}; 91};
83 92
84struct scrub_bio { 93struct scrub_bio {
@@ -196,7 +205,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
196static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 205static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
197 struct scrub_block *sblock, int is_metadata, 206 struct scrub_block *sblock, int is_metadata,
198 int have_csum, u8 *csum, u64 generation, 207 int have_csum, u8 *csum, u64 generation,
199 u16 csum_size); 208 u16 csum_size, int retry_failed_mirror);
200static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, 209static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
201 struct scrub_block *sblock, 210 struct scrub_block *sblock,
202 int is_metadata, int have_csum, 211 int is_metadata, int have_csum,
@@ -790,6 +799,20 @@ out:
790 scrub_pending_trans_workers_dec(sctx); 799 scrub_pending_trans_workers_dec(sctx);
791} 800}
792 801
802static inline void scrub_get_recover(struct scrub_recover *recover)
803{
804 atomic_inc(&recover->refs);
805}
806
807static inline void scrub_put_recover(struct scrub_recover *recover)
808{
809 if (atomic_dec_and_test(&recover->refs)) {
810 kfree(recover->bbio);
811 kfree(recover->raid_map);
812 kfree(recover);
813 }
814}
815
793/* 816/*
794 * scrub_handle_errored_block gets called when either verification of the 817 * scrub_handle_errored_block gets called when either verification of the
795 * pages failed or the bio failed to read, e.g. with EIO. In the latter 818 * pages failed or the bio failed to read, e.g. with EIO. In the latter
@@ -906,7 +929,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
906 929
907 /* build and submit the bios for the failed mirror, check checksums */ 930 /* build and submit the bios for the failed mirror, check checksums */
908 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, 931 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
909 csum, generation, sctx->csum_size); 932 csum, generation, sctx->csum_size, 1);
910 933
911 if (!sblock_bad->header_error && !sblock_bad->checksum_error && 934 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
912 sblock_bad->no_io_error_seen) { 935 sblock_bad->no_io_error_seen) {
@@ -1019,7 +1042,7 @@ nodatasum_case:
1019 /* build and submit the bios, check checksums */ 1042 /* build and submit the bios, check checksums */
1020 scrub_recheck_block(fs_info, sblock_other, is_metadata, 1043 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1021 have_csum, csum, generation, 1044 have_csum, csum, generation,
1022 sctx->csum_size); 1045 sctx->csum_size, 0);
1023 1046
1024 if (!sblock_other->header_error && 1047 if (!sblock_other->header_error &&
1025 !sblock_other->checksum_error && 1048 !sblock_other->checksum_error &&
@@ -1169,7 +1192,7 @@ nodatasum_case:
1169 */ 1192 */
1170 scrub_recheck_block(fs_info, sblock_bad, 1193 scrub_recheck_block(fs_info, sblock_bad,
1171 is_metadata, have_csum, csum, 1194 is_metadata, have_csum, csum,
1172 generation, sctx->csum_size); 1195 generation, sctx->csum_size, 1);
1173 if (!sblock_bad->header_error && 1196 if (!sblock_bad->header_error &&
1174 !sblock_bad->checksum_error && 1197 !sblock_bad->checksum_error &&
1175 sblock_bad->no_io_error_seen) 1198 sblock_bad->no_io_error_seen)
@@ -1201,11 +1224,18 @@ out:
1201 mirror_index++) { 1224 mirror_index++) {
1202 struct scrub_block *sblock = sblocks_for_recheck + 1225 struct scrub_block *sblock = sblocks_for_recheck +
1203 mirror_index; 1226 mirror_index;
1227 struct scrub_recover *recover;
1204 int page_index; 1228 int page_index;
1205 1229
1206 for (page_index = 0; page_index < sblock->page_count; 1230 for (page_index = 0; page_index < sblock->page_count;
1207 page_index++) { 1231 page_index++) {
1208 sblock->pagev[page_index]->sblock = NULL; 1232 sblock->pagev[page_index]->sblock = NULL;
1233 recover = sblock->pagev[page_index]->recover;
1234 if (recover) {
1235 scrub_put_recover(recover);
1236 sblock->pagev[page_index]->recover =
1237 NULL;
1238 }
1209 scrub_page_put(sblock->pagev[page_index]); 1239 scrub_page_put(sblock->pagev[page_index]);
1210 } 1240 }
1211 } 1241 }
@@ -1215,14 +1245,63 @@ out:
1215 return 0; 1245 return 0;
1216} 1246}
1217 1247
1248static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
1249{
1250 if (raid_map) {
1251 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
1252 return 3;
1253 else
1254 return 2;
1255 } else {
1256 return (int)bbio->num_stripes;
1257 }
1258}
1259
1260static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1261 u64 mapped_length,
1262 int nstripes, int mirror,
1263 int *stripe_index,
1264 u64 *stripe_offset)
1265{
1266 int i;
1267
1268 if (raid_map) {
1269 /* RAID5/6 */
1270 for (i = 0; i < nstripes; i++) {
1271 if (raid_map[i] == RAID6_Q_STRIPE ||
1272 raid_map[i] == RAID5_P_STRIPE)
1273 continue;
1274
1275 if (logical >= raid_map[i] &&
1276 logical < raid_map[i] + mapped_length)
1277 break;
1278 }
1279
1280 *stripe_index = i;
1281 *stripe_offset = logical - raid_map[i];
1282 } else {
1283 /* The other RAID type */
1284 *stripe_index = mirror;
1285 *stripe_offset = 0;
1286 }
1287}
1288
1218static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 1289static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1219 struct btrfs_fs_info *fs_info, 1290 struct btrfs_fs_info *fs_info,
1220 struct scrub_block *original_sblock, 1291 struct scrub_block *original_sblock,
1221 u64 length, u64 logical, 1292 u64 length, u64 logical,
1222 struct scrub_block *sblocks_for_recheck) 1293 struct scrub_block *sblocks_for_recheck)
1223{ 1294{
1295 struct scrub_recover *recover;
1296 struct btrfs_bio *bbio;
1297 u64 *raid_map;
1298 u64 sublen;
1299 u64 mapped_length;
1300 u64 stripe_offset;
1301 int stripe_index;
1224 int page_index; 1302 int page_index;
1225 int mirror_index; 1303 int mirror_index;
1304 int nmirrors;
1226 int ret; 1305 int ret;
1227 1306
1228 /* 1307 /*
@@ -1233,23 +1312,39 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1233 1312
1234 page_index = 0; 1313 page_index = 0;
1235 while (length > 0) { 1314 while (length > 0) {
1236 u64 sublen = min_t(u64, length, PAGE_SIZE); 1315 sublen = min_t(u64, length, PAGE_SIZE);
1237 u64 mapped_length = sublen; 1316 mapped_length = sublen;
1238 struct btrfs_bio *bbio = NULL; 1317 bbio = NULL;
1318 raid_map = NULL;
1239 1319
1240 /* 1320 /*
1241 * with a length of PAGE_SIZE, each returned stripe 1321 * with a length of PAGE_SIZE, each returned stripe
1242 * represents one mirror 1322 * represents one mirror
1243 */ 1323 */
1244 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, 1324 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1245 &mapped_length, &bbio, 0); 1325 &mapped_length, &bbio, 0, &raid_map);
1246 if (ret || !bbio || mapped_length < sublen) { 1326 if (ret || !bbio || mapped_length < sublen) {
1247 kfree(bbio); 1327 kfree(bbio);
1328 kfree(raid_map);
1248 return -EIO; 1329 return -EIO;
1249 } 1330 }
1250 1331
1332 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1333 if (!recover) {
1334 kfree(bbio);
1335 kfree(raid_map);
1336 return -ENOMEM;
1337 }
1338
1339 atomic_set(&recover->refs, 1);
1340 recover->bbio = bbio;
1341 recover->raid_map = raid_map;
1342 recover->map_length = mapped_length;
1343
1251 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); 1344 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1252 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes; 1345
1346 nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
1347 for (mirror_index = 0; mirror_index < nmirrors;
1253 mirror_index++) { 1348 mirror_index++) {
1254 struct scrub_block *sblock; 1349 struct scrub_block *sblock;
1255 struct scrub_page *page; 1350 struct scrub_page *page;
@@ -1265,26 +1360,38 @@ leave_nomem:
1265 spin_lock(&sctx->stat_lock); 1360 spin_lock(&sctx->stat_lock);
1266 sctx->stat.malloc_errors++; 1361 sctx->stat.malloc_errors++;
1267 spin_unlock(&sctx->stat_lock); 1362 spin_unlock(&sctx->stat_lock);
1268 kfree(bbio); 1363 scrub_put_recover(recover);
1269 return -ENOMEM; 1364 return -ENOMEM;
1270 } 1365 }
1271 scrub_page_get(page); 1366 scrub_page_get(page);
1272 sblock->pagev[page_index] = page; 1367 sblock->pagev[page_index] = page;
1273 page->logical = logical; 1368 page->logical = logical;
1274 page->physical = bbio->stripes[mirror_index].physical; 1369
1370 scrub_stripe_index_and_offset(logical, raid_map,
1371 mapped_length,
1372 bbio->num_stripes,
1373 mirror_index,
1374 &stripe_index,
1375 &stripe_offset);
1376 page->physical = bbio->stripes[stripe_index].physical +
1377 stripe_offset;
1378 page->dev = bbio->stripes[stripe_index].dev;
1379
1275 BUG_ON(page_index >= original_sblock->page_count); 1380 BUG_ON(page_index >= original_sblock->page_count);
1276 page->physical_for_dev_replace = 1381 page->physical_for_dev_replace =
1277 original_sblock->pagev[page_index]-> 1382 original_sblock->pagev[page_index]->
1278 physical_for_dev_replace; 1383 physical_for_dev_replace;
1279 /* for missing devices, dev->bdev is NULL */ 1384 /* for missing devices, dev->bdev is NULL */
1280 page->dev = bbio->stripes[mirror_index].dev;
1281 page->mirror_num = mirror_index + 1; 1385 page->mirror_num = mirror_index + 1;
1282 sblock->page_count++; 1386 sblock->page_count++;
1283 page->page = alloc_page(GFP_NOFS); 1387 page->page = alloc_page(GFP_NOFS);
1284 if (!page->page) 1388 if (!page->page)
1285 goto leave_nomem; 1389 goto leave_nomem;
1390
1391 scrub_get_recover(recover);
1392 page->recover = recover;
1286 } 1393 }
1287 kfree(bbio); 1394 scrub_put_recover(recover);
1288 length -= sublen; 1395 length -= sublen;
1289 logical += sublen; 1396 logical += sublen;
1290 page_index++; 1397 page_index++;
@@ -1293,6 +1400,51 @@ leave_nomem:
1293 return 0; 1400 return 0;
1294} 1401}
1295 1402
1403struct scrub_bio_ret {
1404 struct completion event;
1405 int error;
1406};
1407
1408static void scrub_bio_wait_endio(struct bio *bio, int error)
1409{
1410 struct scrub_bio_ret *ret = bio->bi_private;
1411
1412 ret->error = error;
1413 complete(&ret->event);
1414}
1415
1416static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1417{
1418 return page->recover && page->recover->raid_map;
1419}
1420
1421static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1422 struct bio *bio,
1423 struct scrub_page *page)
1424{
1425 struct scrub_bio_ret done;
1426 int ret;
1427
1428 init_completion(&done.event);
1429 done.error = 0;
1430 bio->bi_iter.bi_sector = page->logical >> 9;
1431 bio->bi_private = &done;
1432 bio->bi_end_io = scrub_bio_wait_endio;
1433
1434 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1435 page->recover->raid_map,
1436 page->recover->map_length,
1437 page->mirror_num, 1);
1438 if (ret)
1439 return ret;
1440
1441 wait_for_completion(&done.event);
1442 if (done.error)
1443 return -EIO;
1444
1445 return 0;
1446}
1447
1296/* 1448/*
1297 * this function will check the on disk data for checksum errors, header 1449 * this function will check the on disk data for checksum errors, header
1298 * errors and read I/O errors. If any I/O errors happen, the exact pages 1450 * errors and read I/O errors. If any I/O errors happen, the exact pages
@@ -1303,7 +1455,7 @@ leave_nomem:
1303static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 1455static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1304 struct scrub_block *sblock, int is_metadata, 1456 struct scrub_block *sblock, int is_metadata,
1305 int have_csum, u8 *csum, u64 generation, 1457 int have_csum, u8 *csum, u64 generation,
1306 u16 csum_size) 1458 u16 csum_size, int retry_failed_mirror)
1307{ 1459{
1308 int page_num; 1460 int page_num;
1309 1461
@@ -1329,11 +1481,17 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1329 continue; 1481 continue;
1330 } 1482 }
1331 bio->bi_bdev = page->dev->bdev; 1483 bio->bi_bdev = page->dev->bdev;
1332 bio->bi_iter.bi_sector = page->physical >> 9;
1333 1484
1334 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1485 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1335 if (btrfsic_submit_bio_wait(READ, bio)) 1486 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1336 sblock->no_io_error_seen = 0; 1487 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1488 sblock->no_io_error_seen = 0;
1489 } else {
1490 bio->bi_iter.bi_sector = page->physical >> 9;
1491
1492 if (btrfsic_submit_bio_wait(READ, bio))
1493 sblock->no_io_error_seen = 0;
1494 }
1337 1495
1338 bio_put(bio); 1496 bio_put(bio);
1339 } 1497 }
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6f5b302a08cf..217c42ea90b0 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5161,7 +5161,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5161 BTRFS_BLOCK_GROUP_RAID6)) { 5161 BTRFS_BLOCK_GROUP_RAID6)) {
5162 u64 tmp; 5162 u64 tmp;
5163 5163
5164 if (raid_map_ret && ((rw & REQ_WRITE) || mirror_num > 1)) { 5164 if (raid_map_ret &&
5165 ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5166 mirror_num > 1)) {
5165 int i, rot; 5167 int i, rot;
5166 5168
5167 /* push stripe_nr back to the start of the full stripe */ 5169 /* push stripe_nr back to the start of the full stripe */
@@ -5440,6 +5442,16 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5440 mirror_num, NULL); 5442 mirror_num, NULL);
5441} 5443}
5442 5444
5445/* For Scrub/replace */
5446int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5447 u64 logical, u64 *length,
5448 struct btrfs_bio **bbio_ret, int mirror_num,
5449 u64 **raid_map_ret)
5450{
5451 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5452 mirror_num, raid_map_ret);
5453}
5454
5443int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 5455int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5444 u64 chunk_start, u64 physical, u64 devid, 5456 u64 chunk_start, u64 physical, u64 devid,
5445 u64 **logical, int *naddrs, int *stripe_len) 5457 u64 **logical, int *naddrs, int *stripe_len)
@@ -5809,7 +5821,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5809 } else { 5821 } else {
5810 ret = raid56_parity_recover(root, bio, bbio, 5822 ret = raid56_parity_recover(root, bio, bbio,
5811 raid_map, map_length, 5823 raid_map, map_length,
5812 mirror_num); 5824 mirror_num, 0);
5813 } 5825 }
5814 /* 5826 /*
5815 * FIXME, replace dosen't support raid56 yet, please fix 5827 * FIXME, replace dosen't support raid56 yet, please fix
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 08980fa23039..01094bb804c7 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -393,6 +393,10 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
393int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 393int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
394 u64 logical, u64 *length, 394 u64 logical, u64 *length,
395 struct btrfs_bio **bbio_ret, int mirror_num); 395 struct btrfs_bio **bbio_ret, int mirror_num);
396int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
397 u64 logical, u64 *length,
398 struct btrfs_bio **bbio_ret, int mirror_num,
399 u64 **raid_map_ret);
396int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 400int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
397 u64 chunk_start, u64 physical, u64 devid, 401 u64 chunk_start, u64 physical, u64 devid,
398 u64 **logical, int *naddrs, int *stripe_len); 402 u64 **logical, int *naddrs, int *stripe_len);