aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorZhao Lei <zhaolei@cn.fujitsu.com>2015-01-20 02:11:33 -0500
committerChris Mason <clm@fb.com>2015-01-21 21:06:47 -0500
commit8e5cfb55d3f7dc764cd7f4c966d4c2687eaf7569 (patch)
treea5df5cec020c57973b914fd0ef3fa5891b81b9b4 /fs/btrfs/scrub.c
parentcc7539edea6dd02536d56f0a3405b8bb7ae24168 (diff)
Btrfs: Make raid_map array be inlined in btrfs_bio structure
It can make code more simple and clear, we need not care about free bbio and raid_map together. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Zhao Lei <zhaolei@cn.fujitsu.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 673e32be88fa..9d07c981ec82 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -66,7 +66,6 @@ struct scrub_ctx;
66struct scrub_recover { 66struct scrub_recover {
67 atomic_t refs; 67 atomic_t refs;
68 struct btrfs_bio *bbio; 68 struct btrfs_bio *bbio;
69 u64 *raid_map;
70 u64 map_length; 69 u64 map_length;
71}; 70};
72 71
@@ -857,7 +856,6 @@ static inline void scrub_put_recover(struct scrub_recover *recover)
857{ 856{
858 if (atomic_dec_and_test(&recover->refs)) { 857 if (atomic_dec_and_test(&recover->refs)) {
859 kfree(recover->bbio); 858 kfree(recover->bbio);
860 kfree(recover->raid_map);
861 kfree(recover); 859 kfree(recover);
862 } 860 }
863} 861}
@@ -1296,12 +1294,12 @@ out:
1296 return 0; 1294 return 0;
1297} 1295}
1298 1296
1299static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) 1297static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1300{ 1298{
1301 if (raid_map) { 1299 if (bbio->raid_map) {
1302 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; 1300 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
1303 1301
1304 if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) 1302 if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
1305 return 3; 1303 return 3;
1306 else 1304 else
1307 return 2; 1305 return 2;
@@ -1347,7 +1345,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1347{ 1345{
1348 struct scrub_recover *recover; 1346 struct scrub_recover *recover;
1349 struct btrfs_bio *bbio; 1347 struct btrfs_bio *bbio;
1350 u64 *raid_map;
1351 u64 sublen; 1348 u64 sublen;
1352 u64 mapped_length; 1349 u64 mapped_length;
1353 u64 stripe_offset; 1350 u64 stripe_offset;
@@ -1368,35 +1365,31 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1368 sublen = min_t(u64, length, PAGE_SIZE); 1365 sublen = min_t(u64, length, PAGE_SIZE);
1369 mapped_length = sublen; 1366 mapped_length = sublen;
1370 bbio = NULL; 1367 bbio = NULL;
1371 raid_map = NULL;
1372 1368
1373 /* 1369 /*
1374 * with a length of PAGE_SIZE, each returned stripe 1370 * with a length of PAGE_SIZE, each returned stripe
1375 * represents one mirror 1371 * represents one mirror
1376 */ 1372 */
1377 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, 1373 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1378 &mapped_length, &bbio, 0, &raid_map); 1374 &mapped_length, &bbio, 0, 1);
1379 if (ret || !bbio || mapped_length < sublen) { 1375 if (ret || !bbio || mapped_length < sublen) {
1380 kfree(bbio); 1376 kfree(bbio);
1381 kfree(raid_map);
1382 return -EIO; 1377 return -EIO;
1383 } 1378 }
1384 1379
1385 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); 1380 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1386 if (!recover) { 1381 if (!recover) {
1387 kfree(bbio); 1382 kfree(bbio);
1388 kfree(raid_map);
1389 return -ENOMEM; 1383 return -ENOMEM;
1390 } 1384 }
1391 1385
1392 atomic_set(&recover->refs, 1); 1386 atomic_set(&recover->refs, 1);
1393 recover->bbio = bbio; 1387 recover->bbio = bbio;
1394 recover->raid_map = raid_map;
1395 recover->map_length = mapped_length; 1388 recover->map_length = mapped_length;
1396 1389
1397 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); 1390 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1398 1391
1399 nmirrors = scrub_nr_raid_mirrors(bbio, raid_map); 1392 nmirrors = scrub_nr_raid_mirrors(bbio);
1400 for (mirror_index = 0; mirror_index < nmirrors; 1393 for (mirror_index = 0; mirror_index < nmirrors;
1401 mirror_index++) { 1394 mirror_index++) {
1402 struct scrub_block *sblock; 1395 struct scrub_block *sblock;
@@ -1420,7 +1413,7 @@ leave_nomem:
1420 sblock->pagev[page_index] = page; 1413 sblock->pagev[page_index] = page;
1421 page->logical = logical; 1414 page->logical = logical;
1422 1415
1423 scrub_stripe_index_and_offset(logical, raid_map, 1416 scrub_stripe_index_and_offset(logical, bbio->raid_map,
1424 mapped_length, 1417 mapped_length,
1425 bbio->num_stripes - 1418 bbio->num_stripes -
1426 bbio->num_tgtdevs, 1419 bbio->num_tgtdevs,
@@ -1469,7 +1462,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error)
1469 1462
1470static inline int scrub_is_page_on_raid56(struct scrub_page *page) 1463static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1471{ 1464{
1472 return page->recover && page->recover->raid_map; 1465 return page->recover && page->recover->bbio->raid_map;
1473} 1466}
1474 1467
1475static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, 1468static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
@@ -1486,7 +1479,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1486 bio->bi_end_io = scrub_bio_wait_endio; 1479 bio->bi_end_io = scrub_bio_wait_endio;
1487 1480
1488 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, 1481 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1489 page->recover->raid_map,
1490 page->recover->map_length, 1482 page->recover->map_length,
1491 page->mirror_num, 0); 1483 page->mirror_num, 0);
1492 if (ret) 1484 if (ret)
@@ -2716,7 +2708,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2716 struct btrfs_raid_bio *rbio; 2708 struct btrfs_raid_bio *rbio;
2717 struct scrub_page *spage; 2709 struct scrub_page *spage;
2718 struct btrfs_bio *bbio = NULL; 2710 struct btrfs_bio *bbio = NULL;
2719 u64 *raid_map = NULL;
2720 u64 length; 2711 u64 length;
2721 int ret; 2712 int ret;
2722 2713
@@ -2727,8 +2718,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2727 length = sparity->logic_end - sparity->logic_start + 1; 2718 length = sparity->logic_end - sparity->logic_start + 1;
2728 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, 2719 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2729 sparity->logic_start, 2720 sparity->logic_start,
2730 &length, &bbio, 0, &raid_map); 2721 &length, &bbio, 0, 1);
2731 if (ret || !bbio || !raid_map) 2722 if (ret || !bbio || !bbio->raid_map)
2732 goto bbio_out; 2723 goto bbio_out;
2733 2724
2734 bio = btrfs_io_bio_alloc(GFP_NOFS, 0); 2725 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
@@ -2740,8 +2731,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2740 bio->bi_end_io = scrub_parity_bio_endio; 2731 bio->bi_end_io = scrub_parity_bio_endio;
2741 2732
2742 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, 2733 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2743 raid_map, length, 2734 length, sparity->scrub_dev,
2744 sparity->scrub_dev,
2745 sparity->dbitmap, 2735 sparity->dbitmap,
2746 sparity->nsectors); 2736 sparity->nsectors);
2747 if (!rbio) 2737 if (!rbio)
@@ -2759,7 +2749,6 @@ rbio_out:
2759 bio_put(bio); 2749 bio_put(bio);
2760bbio_out: 2750bbio_out:
2761 kfree(bbio); 2751 kfree(bbio);
2762 kfree(raid_map);
2763 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, 2752 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2764 sparity->nsectors); 2753 sparity->nsectors);
2765 spin_lock(&sctx->stat_lock); 2754 spin_lock(&sctx->stat_lock);