aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorStefan Behrens <sbehrens@giantdisaster.de>2012-11-02 08:26:57 -0400
committerJosef Bacik <jbacik@fusionio.com>2012-12-12 17:15:30 -0500
commita36cf8b8933e4a7a7f2f2cbc3c70b097e97f7fd1 (patch)
treed4948095c8c777d669503048213e3fe3bfdf62b5 /fs
parentd9d181c1ba7aa09a6d2698e8c7e75b515524d504 (diff)
Btrfs: remove the block device pointer from the scrub context struct
The block device is removed from the scrub context state structure. The scrub code as it is used for the device replace procedure reads the source data from whereever it is optimal. The source device might even be gone (disconnected, for instance due to a hardware failure). Or the drive can be so faulty so that the device replace procedure tries to avoid access to the faulty source drive as much as possible, and only if all other mirrors are damaged, as a last resort, the source disk is accessed. The modified scrub code operates as if it would handle the source drive and thereby generates an exact copy of the source disk on the target disk, even if the source disk is not present at all. Therefore the block device pointer to the source disk is removed in the scrub context struct and moved into the lower level scope of scrub_bio, fixup and page structures where the block device context is known. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/scrub.c133
1 files changed, 73 insertions, 60 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 29c8aac5bda7..822c08a420c2 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -67,6 +67,7 @@ struct scrub_page {
67struct scrub_bio { 67struct scrub_bio {
68 int index; 68 int index;
69 struct scrub_ctx *sctx; 69 struct scrub_ctx *sctx;
70 struct btrfs_device *dev;
70 struct bio *bio; 71 struct bio *bio;
71 int err; 72 int err;
72 u64 logical; 73 u64 logical;
@@ -93,7 +94,7 @@ struct scrub_block {
93 94
94struct scrub_ctx { 95struct scrub_ctx {
95 struct scrub_bio *bios[SCRUB_BIOS_PER_CTX]; 96 struct scrub_bio *bios[SCRUB_BIOS_PER_CTX];
96 struct btrfs_device *dev; 97 struct btrfs_root *dev_root;
97 int first_free; 98 int first_free;
98 int curr; 99 int curr;
99 atomic_t in_flight; 100 atomic_t in_flight;
@@ -117,6 +118,7 @@ struct scrub_ctx {
117 118
118struct scrub_fixup_nodatasum { 119struct scrub_fixup_nodatasum {
119 struct scrub_ctx *sctx; 120 struct scrub_ctx *sctx;
121 struct btrfs_device *dev;
120 u64 logical; 122 u64 logical;
121 struct btrfs_root *root; 123 struct btrfs_root *root;
122 struct btrfs_work work; 124 struct btrfs_work work;
@@ -166,8 +168,8 @@ static void scrub_block_put(struct scrub_block *sblock);
166static int scrub_add_page_to_bio(struct scrub_ctx *sctx, 168static int scrub_add_page_to_bio(struct scrub_ctx *sctx,
167 struct scrub_page *spage); 169 struct scrub_page *spage);
168static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 170static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
169 u64 physical, u64 flags, u64 gen, int mirror_num, 171 u64 physical, struct btrfs_device *dev, u64 flags,
170 u8 *csum, int force); 172 u64 gen, int mirror_num, u8 *csum, int force);
171static void scrub_bio_end_io(struct bio *bio, int err); 173static void scrub_bio_end_io(struct bio *bio, int err);
172static void scrub_bio_end_io_worker(struct btrfs_work *work); 174static void scrub_bio_end_io_worker(struct btrfs_work *work);
173static void scrub_block_complete(struct scrub_block *sblock); 175static void scrub_block_complete(struct scrub_block *sblock);
@@ -228,9 +230,9 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev)
228 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); 230 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
229 if (!sctx) 231 if (!sctx)
230 goto nomem; 232 goto nomem;
231 sctx->dev = dev;
232 sctx->pages_per_bio = pages_per_bio; 233 sctx->pages_per_bio = pages_per_bio;
233 sctx->curr = -1; 234 sctx->curr = -1;
235 sctx->dev_root = dev->dev_root;
234 for (i = 0; i < SCRUB_BIOS_PER_CTX; ++i) { 236 for (i = 0; i < SCRUB_BIOS_PER_CTX; ++i) {
235 struct scrub_bio *sbio; 237 struct scrub_bio *sbio;
236 238
@@ -345,8 +347,8 @@ err:
345 347
346static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) 348static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
347{ 349{
348 struct btrfs_device *dev = sblock->sctx->dev; 350 struct btrfs_device *dev;
349 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 351 struct btrfs_fs_info *fs_info;
350 struct btrfs_path *path; 352 struct btrfs_path *path;
351 struct btrfs_key found_key; 353 struct btrfs_key found_key;
352 struct extent_buffer *eb; 354 struct extent_buffer *eb;
@@ -361,15 +363,18 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
361 const int bufsize = 4096; 363 const int bufsize = 4096;
362 int ret; 364 int ret;
363 365
366 WARN_ON(sblock->page_count < 1);
367 dev = sblock->pagev[0].dev;
368 fs_info = sblock->sctx->dev_root->fs_info;
369
364 path = btrfs_alloc_path(); 370 path = btrfs_alloc_path();
365 371
366 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS); 372 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
367 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS); 373 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
368 BUG_ON(sblock->page_count < 1);
369 swarn.sector = (sblock->pagev[0].physical) >> 9; 374 swarn.sector = (sblock->pagev[0].physical) >> 9;
370 swarn.logical = sblock->pagev[0].logical; 375 swarn.logical = sblock->pagev[0].logical;
371 swarn.errstr = errstr; 376 swarn.errstr = errstr;
372 swarn.dev = dev; 377 swarn.dev = NULL;
373 swarn.msg_bufsize = bufsize; 378 swarn.msg_bufsize = bufsize;
374 swarn.scratch_bufsize = bufsize; 379 swarn.scratch_bufsize = bufsize;
375 380
@@ -405,6 +410,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
405 } while (ret != 1); 410 } while (ret != 1);
406 } else { 411 } else {
407 swarn.path = path; 412 swarn.path = path;
413 swarn.dev = dev;
408 iterate_extent_inodes(fs_info, found_key.objectid, 414 iterate_extent_inodes(fs_info, found_key.objectid,
409 extent_item_pos, 1, 415 extent_item_pos, 1,
410 scrub_print_warning_inode, &swarn); 416 scrub_print_warning_inode, &swarn);
@@ -588,7 +594,7 @@ out:
588 printk_ratelimited_in_rcu(KERN_ERR 594 printk_ratelimited_in_rcu(KERN_ERR
589 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", 595 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
590 (unsigned long long)fixup->logical, 596 (unsigned long long)fixup->logical,
591 rcu_str_deref(sctx->dev->name)); 597 rcu_str_deref(fixup->dev->name));
592 } 598 }
593 599
594 btrfs_free_path(path); 600 btrfs_free_path(path);
@@ -615,6 +621,7 @@ out:
615static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) 621static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
616{ 622{
617 struct scrub_ctx *sctx = sblock_to_check->sctx; 623 struct scrub_ctx *sctx = sblock_to_check->sctx;
624 struct btrfs_device *dev;
618 struct btrfs_fs_info *fs_info; 625 struct btrfs_fs_info *fs_info;
619 u64 length; 626 u64 length;
620 u64 logical; 627 u64 logical;
@@ -633,7 +640,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
633 DEFAULT_RATELIMIT_BURST); 640 DEFAULT_RATELIMIT_BURST);
634 641
635 BUG_ON(sblock_to_check->page_count < 1); 642 BUG_ON(sblock_to_check->page_count < 1);
636 fs_info = sctx->dev->dev_root->fs_info; 643 fs_info = sctx->dev_root->fs_info;
637 length = sblock_to_check->page_count * PAGE_SIZE; 644 length = sblock_to_check->page_count * PAGE_SIZE;
638 logical = sblock_to_check->pagev[0].logical; 645 logical = sblock_to_check->pagev[0].logical;
639 generation = sblock_to_check->pagev[0].generation; 646 generation = sblock_to_check->pagev[0].generation;
@@ -643,6 +650,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
643 BTRFS_EXTENT_FLAG_DATA); 650 BTRFS_EXTENT_FLAG_DATA);
644 have_csum = sblock_to_check->pagev[0].have_csum; 651 have_csum = sblock_to_check->pagev[0].have_csum;
645 csum = sblock_to_check->pagev[0].csum; 652 csum = sblock_to_check->pagev[0].csum;
653 dev = sblock_to_check->pagev[0].dev;
646 654
647 /* 655 /*
648 * read all mirrors one after the other. This includes to 656 * read all mirrors one after the other. This includes to
@@ -682,8 +690,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
682 sctx->stat.read_errors++; 690 sctx->stat.read_errors++;
683 sctx->stat.uncorrectable_errors++; 691 sctx->stat.uncorrectable_errors++;
684 spin_unlock(&sctx->stat_lock); 692 spin_unlock(&sctx->stat_lock);
685 btrfs_dev_stat_inc_and_print(sctx->dev, 693 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
686 BTRFS_DEV_STAT_READ_ERRS);
687 goto out; 694 goto out;
688 } 695 }
689 696
@@ -695,8 +702,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
695 sctx->stat.read_errors++; 702 sctx->stat.read_errors++;
696 sctx->stat.uncorrectable_errors++; 703 sctx->stat.uncorrectable_errors++;
697 spin_unlock(&sctx->stat_lock); 704 spin_unlock(&sctx->stat_lock);
698 btrfs_dev_stat_inc_and_print(sctx->dev, 705 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
699 BTRFS_DEV_STAT_READ_ERRS);
700 goto out; 706 goto out;
701 } 707 }
702 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); 708 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
@@ -710,8 +716,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
710 sctx->stat.read_errors++; 716 sctx->stat.read_errors++;
711 sctx->stat.uncorrectable_errors++; 717 sctx->stat.uncorrectable_errors++;
712 spin_unlock(&sctx->stat_lock); 718 spin_unlock(&sctx->stat_lock);
713 btrfs_dev_stat_inc_and_print(sctx->dev, 719 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
714 BTRFS_DEV_STAT_READ_ERRS);
715 goto out; 720 goto out;
716 } 721 }
717 722
@@ -738,15 +743,14 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
738 spin_unlock(&sctx->stat_lock); 743 spin_unlock(&sctx->stat_lock);
739 if (__ratelimit(&_rs)) 744 if (__ratelimit(&_rs))
740 scrub_print_warning("i/o error", sblock_to_check); 745 scrub_print_warning("i/o error", sblock_to_check);
741 btrfs_dev_stat_inc_and_print(sctx->dev, 746 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
742 BTRFS_DEV_STAT_READ_ERRS);
743 } else if (sblock_bad->checksum_error) { 747 } else if (sblock_bad->checksum_error) {
744 spin_lock(&sctx->stat_lock); 748 spin_lock(&sctx->stat_lock);
745 sctx->stat.csum_errors++; 749 sctx->stat.csum_errors++;
746 spin_unlock(&sctx->stat_lock); 750 spin_unlock(&sctx->stat_lock);
747 if (__ratelimit(&_rs)) 751 if (__ratelimit(&_rs))
748 scrub_print_warning("checksum error", sblock_to_check); 752 scrub_print_warning("checksum error", sblock_to_check);
749 btrfs_dev_stat_inc_and_print(sctx->dev, 753 btrfs_dev_stat_inc_and_print(dev,
750 BTRFS_DEV_STAT_CORRUPTION_ERRS); 754 BTRFS_DEV_STAT_CORRUPTION_ERRS);
751 } else if (sblock_bad->header_error) { 755 } else if (sblock_bad->header_error) {
752 spin_lock(&sctx->stat_lock); 756 spin_lock(&sctx->stat_lock);
@@ -756,10 +760,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
756 scrub_print_warning("checksum/header error", 760 scrub_print_warning("checksum/header error",
757 sblock_to_check); 761 sblock_to_check);
758 if (sblock_bad->generation_error) 762 if (sblock_bad->generation_error)
759 btrfs_dev_stat_inc_and_print(sctx->dev, 763 btrfs_dev_stat_inc_and_print(dev,
760 BTRFS_DEV_STAT_GENERATION_ERRS); 764 BTRFS_DEV_STAT_GENERATION_ERRS);
761 else 765 else
762 btrfs_dev_stat_inc_and_print(sctx->dev, 766 btrfs_dev_stat_inc_and_print(dev,
763 BTRFS_DEV_STAT_CORRUPTION_ERRS); 767 BTRFS_DEV_STAT_CORRUPTION_ERRS);
764 } 768 }
765 769
@@ -780,6 +784,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
780 if (!fixup_nodatasum) 784 if (!fixup_nodatasum)
781 goto did_not_correct_error; 785 goto did_not_correct_error;
782 fixup_nodatasum->sctx = sctx; 786 fixup_nodatasum->sctx = sctx;
787 fixup_nodatasum->dev = dev;
783 fixup_nodatasum->logical = logical; 788 fixup_nodatasum->logical = logical;
784 fixup_nodatasum->root = fs_info->extent_root; 789 fixup_nodatasum->root = fs_info->extent_root;
785 fixup_nodatasum->mirror_num = failed_mirror_index + 1; 790 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
@@ -945,7 +950,7 @@ corrected_error:
945 printk_ratelimited_in_rcu(KERN_ERR 950 printk_ratelimited_in_rcu(KERN_ERR
946 "btrfs: fixed up error at logical %llu on dev %s\n", 951 "btrfs: fixed up error at logical %llu on dev %s\n",
947 (unsigned long long)logical, 952 (unsigned long long)logical,
948 rcu_str_deref(sctx->dev->name)); 953 rcu_str_deref(dev->name));
949 } 954 }
950 } else { 955 } else {
951did_not_correct_error: 956did_not_correct_error:
@@ -955,7 +960,7 @@ did_not_correct_error:
955 printk_ratelimited_in_rcu(KERN_ERR 960 printk_ratelimited_in_rcu(KERN_ERR
956 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", 961 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
957 (unsigned long long)logical, 962 (unsigned long long)logical,
958 rcu_str_deref(sctx->dev->name)); 963 rcu_str_deref(dev->name));
959 } 964 }
960 965
961out: 966out:
@@ -1266,7 +1271,7 @@ static int scrub_checksum_data(struct scrub_block *sblock)
1266 void *buffer; 1271 void *buffer;
1267 u32 crc = ~(u32)0; 1272 u32 crc = ~(u32)0;
1268 int fail = 0; 1273 int fail = 0;
1269 struct btrfs_root *root = sctx->dev->dev_root; 1274 struct btrfs_root *root = sctx->dev_root;
1270 u64 len; 1275 u64 len;
1271 int index; 1276 int index;
1272 1277
@@ -1306,7 +1311,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
1306{ 1311{
1307 struct scrub_ctx *sctx = sblock->sctx; 1312 struct scrub_ctx *sctx = sblock->sctx;
1308 struct btrfs_header *h; 1313 struct btrfs_header *h;
1309 struct btrfs_root *root = sctx->dev->dev_root; 1314 struct btrfs_root *root = sctx->dev_root;
1310 struct btrfs_fs_info *fs_info = root->fs_info; 1315 struct btrfs_fs_info *fs_info = root->fs_info;
1311 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1316 u8 calculated_csum[BTRFS_CSUM_SIZE];
1312 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1317 u8 on_disk_csum[BTRFS_CSUM_SIZE];
@@ -1378,7 +1383,7 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1378{ 1383{
1379 struct btrfs_super_block *s; 1384 struct btrfs_super_block *s;
1380 struct scrub_ctx *sctx = sblock->sctx; 1385 struct scrub_ctx *sctx = sblock->sctx;
1381 struct btrfs_root *root = sctx->dev->dev_root; 1386 struct btrfs_root *root = sctx->dev_root;
1382 struct btrfs_fs_info *fs_info = root->fs_info; 1387 struct btrfs_fs_info *fs_info = root->fs_info;
1383 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1388 u8 calculated_csum[BTRFS_CSUM_SIZE];
1384 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1389 u8 on_disk_csum[BTRFS_CSUM_SIZE];
@@ -1442,10 +1447,10 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1442 ++sctx->stat.super_errors; 1447 ++sctx->stat.super_errors;
1443 spin_unlock(&sctx->stat_lock); 1448 spin_unlock(&sctx->stat_lock);
1444 if (fail_cor) 1449 if (fail_cor)
1445 btrfs_dev_stat_inc_and_print(sctx->dev, 1450 btrfs_dev_stat_inc_and_print(sblock->pagev[0].dev,
1446 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1451 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1447 else 1452 else
1448 btrfs_dev_stat_inc_and_print(sctx->dev, 1453 btrfs_dev_stat_inc_and_print(sblock->pagev[0].dev,
1449 BTRFS_DEV_STAT_GENERATION_ERRS); 1454 BTRFS_DEV_STAT_GENERATION_ERRS);
1450 } 1455 }
1451 1456
@@ -1513,6 +1518,7 @@ again:
1513 1518
1514 sbio->physical = spage->physical; 1519 sbio->physical = spage->physical;
1515 sbio->logical = spage->logical; 1520 sbio->logical = spage->logical;
1521 sbio->dev = spage->dev;
1516 bio = sbio->bio; 1522 bio = sbio->bio;
1517 if (!bio) { 1523 if (!bio) {
1518 bio = bio_alloc(GFP_NOFS, sctx->pages_per_bio); 1524 bio = bio_alloc(GFP_NOFS, sctx->pages_per_bio);
@@ -1523,13 +1529,14 @@ again:
1523 1529
1524 bio->bi_private = sbio; 1530 bio->bi_private = sbio;
1525 bio->bi_end_io = scrub_bio_end_io; 1531 bio->bi_end_io = scrub_bio_end_io;
1526 bio->bi_bdev = sctx->dev->bdev; 1532 bio->bi_bdev = sbio->dev->bdev;
1527 bio->bi_sector = spage->physical >> 9; 1533 bio->bi_sector = sbio->physical >> 9;
1528 sbio->err = 0; 1534 sbio->err = 0;
1529 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1535 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1530 spage->physical || 1536 spage->physical ||
1531 sbio->logical + sbio->page_count * PAGE_SIZE != 1537 sbio->logical + sbio->page_count * PAGE_SIZE !=
1532 spage->logical) { 1538 spage->logical ||
1539 sbio->dev != spage->dev) {
1533 scrub_submit(sctx); 1540 scrub_submit(sctx);
1534 goto again; 1541 goto again;
1535 } 1542 }
@@ -1556,8 +1563,8 @@ again:
1556} 1563}
1557 1564
1558static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 1565static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
1559 u64 physical, u64 flags, u64 gen, int mirror_num, 1566 u64 physical, struct btrfs_device *dev, u64 flags,
1560 u8 *csum, int force) 1567 u64 gen, int mirror_num, u8 *csum, int force)
1561{ 1568{
1562 struct scrub_block *sblock; 1569 struct scrub_block *sblock;
1563 int index; 1570 int index;
@@ -1593,7 +1600,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
1593 return -ENOMEM; 1600 return -ENOMEM;
1594 } 1601 }
1595 spage->sblock = sblock; 1602 spage->sblock = sblock;
1596 spage->dev = sctx->dev; 1603 spage->dev = dev;
1597 spage->flags = flags; 1604 spage->flags = flags;
1598 spage->generation = gen; 1605 spage->generation = gen;
1599 spage->logical = logical; 1606 spage->logical = logical;
@@ -1634,8 +1641,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
1634static void scrub_bio_end_io(struct bio *bio, int err) 1641static void scrub_bio_end_io(struct bio *bio, int err)
1635{ 1642{
1636 struct scrub_bio *sbio = bio->bi_private; 1643 struct scrub_bio *sbio = bio->bi_private;
1637 struct scrub_ctx *sctx = sbio->sctx; 1644 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1638 struct btrfs_fs_info *fs_info = sctx->dev->dev_root->fs_info;
1639 1645
1640 sbio->err = err; 1646 sbio->err = err;
1641 sbio->bio = bio; 1647 sbio->bio = bio;
@@ -1728,7 +1734,8 @@ static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
1728 1734
1729/* scrub extent tries to collect up to 64 kB for each bio */ 1735/* scrub extent tries to collect up to 64 kB for each bio */
1730static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, 1736static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
1731 u64 physical, u64 flags, u64 gen, int mirror_num) 1737 u64 physical, struct btrfs_device *dev, u64 flags,
1738 u64 gen, int mirror_num)
1732{ 1739{
1733 int ret; 1740 int ret;
1734 u8 csum[BTRFS_CSUM_SIZE]; 1741 u8 csum[BTRFS_CSUM_SIZE];
@@ -1762,7 +1769,7 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
1762 if (have_csum == 0) 1769 if (have_csum == 0)
1763 ++sctx->stat.no_csum; 1770 ++sctx->stat.no_csum;
1764 } 1771 }
1765 ret = scrub_pages(sctx, logical, l, physical, flags, gen, 1772 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
1766 mirror_num, have_csum ? csum : NULL, 0); 1773 mirror_num, have_csum ? csum : NULL, 0);
1767 if (ret) 1774 if (ret)
1768 return ret; 1775 return ret;
@@ -1774,10 +1781,12 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
1774} 1781}
1775 1782
1776static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 1783static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
1777 struct map_lookup *map, int num, u64 base, u64 length) 1784 struct map_lookup *map,
1785 struct btrfs_device *scrub_dev,
1786 int num, u64 base, u64 length)
1778{ 1787{
1779 struct btrfs_path *path; 1788 struct btrfs_path *path;
1780 struct btrfs_fs_info *fs_info = sctx->dev->dev_root->fs_info; 1789 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1781 struct btrfs_root *root = fs_info->extent_root; 1790 struct btrfs_root *root = fs_info->extent_root;
1782 struct btrfs_root *csum_root = fs_info->csum_root; 1791 struct btrfs_root *csum_root = fs_info->csum_root;
1783 struct btrfs_extent_item *extent; 1792 struct btrfs_extent_item *extent;
@@ -1797,7 +1806,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
1797 struct reada_control *reada2; 1806 struct reada_control *reada2;
1798 struct btrfs_key key_start; 1807 struct btrfs_key key_start;
1799 struct btrfs_key key_end; 1808 struct btrfs_key key_end;
1800
1801 u64 increment = map->stripe_len; 1809 u64 increment = map->stripe_len;
1802 u64 offset; 1810 u64 offset;
1803 1811
@@ -2006,7 +2014,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2006 2014
2007 ret = scrub_extent(sctx, key.objectid, key.offset, 2015 ret = scrub_extent(sctx, key.objectid, key.offset,
2008 key.objectid - logical + physical, 2016 key.objectid - logical + physical,
2009 flags, generation, mirror_num); 2017 scrub_dev, flags, generation,
2018 mirror_num);
2010 if (ret) 2019 if (ret)
2011 goto out; 2020 goto out;
2012 2021
@@ -2030,11 +2039,13 @@ out:
2030} 2039}
2031 2040
2032static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 2041static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2033 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length, 2042 struct btrfs_device *scrub_dev,
2034 u64 dev_offset) 2043 u64 chunk_tree, u64 chunk_objectid,
2044 u64 chunk_offset, u64 length,
2045 u64 dev_offset)
2035{ 2046{
2036 struct btrfs_mapping_tree *map_tree = 2047 struct btrfs_mapping_tree *map_tree =
2037 &sctx->dev->dev_root->fs_info->mapping_tree; 2048 &sctx->dev_root->fs_info->mapping_tree;
2038 struct map_lookup *map; 2049 struct map_lookup *map;
2039 struct extent_map *em; 2050 struct extent_map *em;
2040 int i; 2051 int i;
@@ -2055,9 +2066,10 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2055 goto out; 2066 goto out;
2056 2067
2057 for (i = 0; i < map->num_stripes; ++i) { 2068 for (i = 0; i < map->num_stripes; ++i) {
2058 if (map->stripes[i].dev == sctx->dev && 2069 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2059 map->stripes[i].physical == dev_offset) { 2070 map->stripes[i].physical == dev_offset) {
2060 ret = scrub_stripe(sctx, map, i, chunk_offset, length); 2071 ret = scrub_stripe(sctx, map, scrub_dev, i,
2072 chunk_offset, length);
2061 if (ret) 2073 if (ret)
2062 goto out; 2074 goto out;
2063 } 2075 }
@@ -2069,11 +2081,12 @@ out:
2069} 2081}
2070 2082
2071static noinline_for_stack 2083static noinline_for_stack
2072int scrub_enumerate_chunks(struct scrub_ctx *sctx, u64 start, u64 end) 2084int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2085 struct btrfs_device *scrub_dev, u64 start, u64 end)
2073{ 2086{
2074 struct btrfs_dev_extent *dev_extent = NULL; 2087 struct btrfs_dev_extent *dev_extent = NULL;
2075 struct btrfs_path *path; 2088 struct btrfs_path *path;
2076 struct btrfs_root *root = sctx->dev->dev_root; 2089 struct btrfs_root *root = sctx->dev_root;
2077 struct btrfs_fs_info *fs_info = root->fs_info; 2090 struct btrfs_fs_info *fs_info = root->fs_info;
2078 u64 length; 2091 u64 length;
2079 u64 chunk_tree; 2092 u64 chunk_tree;
@@ -2094,11 +2107,10 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, u64 start, u64 end)
2094 path->search_commit_root = 1; 2107 path->search_commit_root = 1;
2095 path->skip_locking = 1; 2108 path->skip_locking = 1;
2096 2109
2097 key.objectid = sctx->dev->devid; 2110 key.objectid = scrub_dev->devid;
2098 key.offset = 0ull; 2111 key.offset = 0ull;
2099 key.type = BTRFS_DEV_EXTENT_KEY; 2112 key.type = BTRFS_DEV_EXTENT_KEY;
2100 2113
2101
2102 while (1) { 2114 while (1) {
2103 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2115 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2104 if (ret < 0) 2116 if (ret < 0)
@@ -2117,7 +2129,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, u64 start, u64 end)
2117 2129
2118 btrfs_item_key_to_cpu(l, &found_key, slot); 2130 btrfs_item_key_to_cpu(l, &found_key, slot);
2119 2131
2120 if (found_key.objectid != sctx->dev->devid) 2132 if (found_key.objectid != scrub_dev->devid)
2121 break; 2133 break;
2122 2134
2123 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) 2135 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
@@ -2151,7 +2163,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, u64 start, u64 end)
2151 ret = -ENOENT; 2163 ret = -ENOENT;
2152 break; 2164 break;
2153 } 2165 }
2154 ret = scrub_chunk(sctx, chunk_tree, chunk_objectid, 2166 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
2155 chunk_offset, length, found_key.offset); 2167 chunk_offset, length, found_key.offset);
2156 btrfs_put_block_group(cache); 2168 btrfs_put_block_group(cache);
2157 if (ret) 2169 if (ret)
@@ -2170,14 +2182,14 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, u64 start, u64 end)
2170 return ret < 0 ? ret : 0; 2182 return ret < 0 ? ret : 0;
2171} 2183}
2172 2184
2173static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx) 2185static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2186 struct btrfs_device *scrub_dev)
2174{ 2187{
2175 int i; 2188 int i;
2176 u64 bytenr; 2189 u64 bytenr;
2177 u64 gen; 2190 u64 gen;
2178 int ret; 2191 int ret;
2179 struct btrfs_device *device = sctx->dev; 2192 struct btrfs_root *root = sctx->dev_root;
2180 struct btrfs_root *root = device->dev_root;
2181 2193
2182 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 2194 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2183 return -EIO; 2195 return -EIO;
@@ -2186,11 +2198,12 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx)
2186 2198
2187 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2199 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2188 bytenr = btrfs_sb_offset(i); 2200 bytenr = btrfs_sb_offset(i);
2189 if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes) 2201 if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
2190 break; 2202 break;
2191 2203
2192 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, 2204 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2193 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); 2205 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
2206 NULL, 1);
2194 if (ret) 2207 if (ret)
2195 return ret; 2208 return ret;
2196 } 2209 }
@@ -2317,11 +2330,11 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2317 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2330 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2318 2331
2319 down_read(&fs_info->scrub_super_lock); 2332 down_read(&fs_info->scrub_super_lock);
2320 ret = scrub_supers(sctx); 2333 ret = scrub_supers(sctx, dev);
2321 up_read(&fs_info->scrub_super_lock); 2334 up_read(&fs_info->scrub_super_lock);
2322 2335
2323 if (!ret) 2336 if (!ret)
2324 ret = scrub_enumerate_chunks(sctx, start, end); 2337 ret = scrub_enumerate_chunks(sctx, dev, start, end);
2325 2338
2326 wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0); 2339 wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0);
2327 atomic_dec(&fs_info->scrubs_running); 2340 atomic_dec(&fs_info->scrubs_running);