aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorStefan Behrens <sbehrens@giantdisaster.de>2012-11-02 04:58:09 -0400
committerJosef Bacik <jbacik@fusionio.com>2012-12-12 17:15:29 -0500
commitd9d181c1ba7aa09a6d2698e8c7e75b515524d504 (patch)
treea235f7b7f23708b0d7bd29de49b819ea64c262fc /fs
parentd25628bdd66aedd6e07729d8dc6c8ee846d66d72 (diff)
Btrfs: rename the scrub context structure
The device replace procedure makes use of the scrub code. The scrub code is the most efficient code to read the allocated data of a disk, i.e. it reads sequentially in order to avoid disk head movements, it skips unallocated blocks, it uses read ahead mechanisms, and it contains all the code to detect and repair defects. This commit is a first preparation step to adapt the scrub code to be shareable for the device replace procedure. The block device will be removed from the scrub context state structure in a later step. It used to be the source block device. The scrub code as it is used for the device replace procedure reads the source data from whereever it is optimal. The source device might even be gone (disconnected, for instance due to a hardware failure). Or the drive can be so faulty so that the device replace procedure tries to avoid access to the faulty source drive as much as possible, and only if all other mirrors are damaged, as a last resort, the source disk is accessed. The modified scrub code operates as if it would handle the source drive and thereby generates an exact copy of the source disk on the target disk, even if the source disk is not present at all. Therefore the block device pointer to the source disk is removed in a later patch, and therefore the context structure is renamed (this is the goal of the current patch) to reflect that no source block device scope is there anymore. Summary: This first preparation step consists of a textual substitution of the term "dev" to the term "ctx" whereever the scrub context is used. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/scrub.c504
-rw-r--r--fs/btrfs/volumes.h2
2 files changed, 253 insertions, 253 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 27892f67e69b..29c8aac5bda7 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -42,10 +42,10 @@
42 */ 42 */
43 43
44struct scrub_block; 44struct scrub_block;
45struct scrub_dev; 45struct scrub_ctx;
46 46
47#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ 47#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
48#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ 48#define SCRUB_BIOS_PER_CTX 16 /* 1 MB per device in flight */
49#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ 49#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
50 50
51struct scrub_page { 51struct scrub_page {
@@ -66,7 +66,7 @@ struct scrub_page {
66 66
67struct scrub_bio { 67struct scrub_bio {
68 int index; 68 int index;
69 struct scrub_dev *sdev; 69 struct scrub_ctx *sctx;
70 struct bio *bio; 70 struct bio *bio;
71 int err; 71 int err;
72 u64 logical; 72 u64 logical;
@@ -82,7 +82,7 @@ struct scrub_block {
82 int page_count; 82 int page_count;
83 atomic_t outstanding_pages; 83 atomic_t outstanding_pages;
84 atomic_t ref_count; /* free mem on transition to zero */ 84 atomic_t ref_count; /* free mem on transition to zero */
85 struct scrub_dev *sdev; 85 struct scrub_ctx *sctx;
86 struct { 86 struct {
87 unsigned int header_error:1; 87 unsigned int header_error:1;
88 unsigned int checksum_error:1; 88 unsigned int checksum_error:1;
@@ -91,8 +91,8 @@ struct scrub_block {
91 }; 91 };
92}; 92};
93 93
94struct scrub_dev { 94struct scrub_ctx {
95 struct scrub_bio *bios[SCRUB_BIOS_PER_DEV]; 95 struct scrub_bio *bios[SCRUB_BIOS_PER_CTX];
96 struct btrfs_device *dev; 96 struct btrfs_device *dev;
97 int first_free; 97 int first_free;
98 int curr; 98 int curr;
@@ -116,7 +116,7 @@ struct scrub_dev {
116}; 116};
117 117
118struct scrub_fixup_nodatasum { 118struct scrub_fixup_nodatasum {
119 struct scrub_dev *sdev; 119 struct scrub_ctx *sctx;
120 u64 logical; 120 u64 logical;
121 struct btrfs_root *root; 121 struct btrfs_root *root;
122 struct btrfs_work work; 122 struct btrfs_work work;
@@ -138,7 +138,7 @@ struct scrub_warning {
138 138
139 139
140static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); 140static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
141static int scrub_setup_recheck_block(struct scrub_dev *sdev, 141static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
142 struct btrfs_mapping_tree *map_tree, 142 struct btrfs_mapping_tree *map_tree,
143 u64 length, u64 logical, 143 u64 length, u64 logical,
144 struct scrub_block *sblock); 144 struct scrub_block *sblock);
@@ -163,9 +163,9 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock);
163static int scrub_checksum_super(struct scrub_block *sblock); 163static int scrub_checksum_super(struct scrub_block *sblock);
164static void scrub_block_get(struct scrub_block *sblock); 164static void scrub_block_get(struct scrub_block *sblock);
165static void scrub_block_put(struct scrub_block *sblock); 165static void scrub_block_put(struct scrub_block *sblock);
166static int scrub_add_page_to_bio(struct scrub_dev *sdev, 166static int scrub_add_page_to_bio(struct scrub_ctx *sctx,
167 struct scrub_page *spage); 167 struct scrub_page *spage);
168static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len, 168static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
169 u64 physical, u64 flags, u64 gen, int mirror_num, 169 u64 physical, u64 flags, u64 gen, int mirror_num,
170 u8 *csum, int force); 170 u8 *csum, int force);
171static void scrub_bio_end_io(struct bio *bio, int err); 171static void scrub_bio_end_io(struct bio *bio, int err);
@@ -173,27 +173,27 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work);
173static void scrub_block_complete(struct scrub_block *sblock); 173static void scrub_block_complete(struct scrub_block *sblock);
174 174
175 175
176static void scrub_free_csums(struct scrub_dev *sdev) 176static void scrub_free_csums(struct scrub_ctx *sctx)
177{ 177{
178 while (!list_empty(&sdev->csum_list)) { 178 while (!list_empty(&sctx->csum_list)) {
179 struct btrfs_ordered_sum *sum; 179 struct btrfs_ordered_sum *sum;
180 sum = list_first_entry(&sdev->csum_list, 180 sum = list_first_entry(&sctx->csum_list,
181 struct btrfs_ordered_sum, list); 181 struct btrfs_ordered_sum, list);
182 list_del(&sum->list); 182 list_del(&sum->list);
183 kfree(sum); 183 kfree(sum);
184 } 184 }
185} 185}
186 186
187static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) 187static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
188{ 188{
189 int i; 189 int i;
190 190
191 if (!sdev) 191 if (!sctx)
192 return; 192 return;
193 193
194 /* this can happen when scrub is cancelled */ 194 /* this can happen when scrub is cancelled */
195 if (sdev->curr != -1) { 195 if (sctx->curr != -1) {
196 struct scrub_bio *sbio = sdev->bios[sdev->curr]; 196 struct scrub_bio *sbio = sctx->bios[sctx->curr];
197 197
198 for (i = 0; i < sbio->page_count; i++) { 198 for (i = 0; i < sbio->page_count; i++) {
199 BUG_ON(!sbio->pagev[i]); 199 BUG_ON(!sbio->pagev[i]);
@@ -203,69 +203,69 @@ static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
203 bio_put(sbio->bio); 203 bio_put(sbio->bio);
204 } 204 }
205 205
206 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 206 for (i = 0; i < SCRUB_BIOS_PER_CTX; ++i) {
207 struct scrub_bio *sbio = sdev->bios[i]; 207 struct scrub_bio *sbio = sctx->bios[i];
208 208
209 if (!sbio) 209 if (!sbio)
210 break; 210 break;
211 kfree(sbio); 211 kfree(sbio);
212 } 212 }
213 213
214 scrub_free_csums(sdev); 214 scrub_free_csums(sctx);
215 kfree(sdev); 215 kfree(sctx);
216} 216}
217 217
218static noinline_for_stack 218static noinline_for_stack
219struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) 219struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev)
220{ 220{
221 struct scrub_dev *sdev; 221 struct scrub_ctx *sctx;
222 int i; 222 int i;
223 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 223 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
224 int pages_per_bio; 224 int pages_per_bio;
225 225
226 pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO, 226 pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
227 bio_get_nr_vecs(dev->bdev)); 227 bio_get_nr_vecs(dev->bdev));
228 sdev = kzalloc(sizeof(*sdev), GFP_NOFS); 228 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
229 if (!sdev) 229 if (!sctx)
230 goto nomem; 230 goto nomem;
231 sdev->dev = dev; 231 sctx->dev = dev;
232 sdev->pages_per_bio = pages_per_bio; 232 sctx->pages_per_bio = pages_per_bio;
233 sdev->curr = -1; 233 sctx->curr = -1;
234 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 234 for (i = 0; i < SCRUB_BIOS_PER_CTX; ++i) {
235 struct scrub_bio *sbio; 235 struct scrub_bio *sbio;
236 236
237 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 237 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
238 if (!sbio) 238 if (!sbio)
239 goto nomem; 239 goto nomem;
240 sdev->bios[i] = sbio; 240 sctx->bios[i] = sbio;
241 241
242 sbio->index = i; 242 sbio->index = i;
243 sbio->sdev = sdev; 243 sbio->sctx = sctx;
244 sbio->page_count = 0; 244 sbio->page_count = 0;
245 sbio->work.func = scrub_bio_end_io_worker; 245 sbio->work.func = scrub_bio_end_io_worker;
246 246
247 if (i != SCRUB_BIOS_PER_DEV-1) 247 if (i != SCRUB_BIOS_PER_CTX - 1)
248 sdev->bios[i]->next_free = i + 1; 248 sctx->bios[i]->next_free = i + 1;
249 else 249 else
250 sdev->bios[i]->next_free = -1; 250 sctx->bios[i]->next_free = -1;
251 } 251 }
252 sdev->first_free = 0; 252 sctx->first_free = 0;
253 sdev->nodesize = dev->dev_root->nodesize; 253 sctx->nodesize = dev->dev_root->nodesize;
254 sdev->leafsize = dev->dev_root->leafsize; 254 sctx->leafsize = dev->dev_root->leafsize;
255 sdev->sectorsize = dev->dev_root->sectorsize; 255 sctx->sectorsize = dev->dev_root->sectorsize;
256 atomic_set(&sdev->in_flight, 0); 256 atomic_set(&sctx->in_flight, 0);
257 atomic_set(&sdev->fixup_cnt, 0); 257 atomic_set(&sctx->fixup_cnt, 0);
258 atomic_set(&sdev->cancel_req, 0); 258 atomic_set(&sctx->cancel_req, 0);
259 sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy); 259 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
260 INIT_LIST_HEAD(&sdev->csum_list); 260 INIT_LIST_HEAD(&sctx->csum_list);
261 261
262 spin_lock_init(&sdev->list_lock); 262 spin_lock_init(&sctx->list_lock);
263 spin_lock_init(&sdev->stat_lock); 263 spin_lock_init(&sctx->stat_lock);
264 init_waitqueue_head(&sdev->list_wait); 264 init_waitqueue_head(&sctx->list_wait);
265 return sdev; 265 return sctx;
266 266
267nomem: 267nomem:
268 scrub_free_dev(sdev); 268 scrub_free_ctx(sctx);
269 return ERR_PTR(-ENOMEM); 269 return ERR_PTR(-ENOMEM);
270} 270}
271 271
@@ -345,7 +345,7 @@ err:
345 345
346static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) 346static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
347{ 347{
348 struct btrfs_device *dev = sblock->sdev->dev; 348 struct btrfs_device *dev = sblock->sctx->dev;
349 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 349 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
350 struct btrfs_path *path; 350 struct btrfs_path *path;
351 struct btrfs_key found_key; 351 struct btrfs_key found_key;
@@ -530,21 +530,21 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
530{ 530{
531 int ret; 531 int ret;
532 struct scrub_fixup_nodatasum *fixup; 532 struct scrub_fixup_nodatasum *fixup;
533 struct scrub_dev *sdev; 533 struct scrub_ctx *sctx;
534 struct btrfs_trans_handle *trans = NULL; 534 struct btrfs_trans_handle *trans = NULL;
535 struct btrfs_fs_info *fs_info; 535 struct btrfs_fs_info *fs_info;
536 struct btrfs_path *path; 536 struct btrfs_path *path;
537 int uncorrectable = 0; 537 int uncorrectable = 0;
538 538
539 fixup = container_of(work, struct scrub_fixup_nodatasum, work); 539 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
540 sdev = fixup->sdev; 540 sctx = fixup->sctx;
541 fs_info = fixup->root->fs_info; 541 fs_info = fixup->root->fs_info;
542 542
543 path = btrfs_alloc_path(); 543 path = btrfs_alloc_path();
544 if (!path) { 544 if (!path) {
545 spin_lock(&sdev->stat_lock); 545 spin_lock(&sctx->stat_lock);
546 ++sdev->stat.malloc_errors; 546 ++sctx->stat.malloc_errors;
547 spin_unlock(&sdev->stat_lock); 547 spin_unlock(&sctx->stat_lock);
548 uncorrectable = 1; 548 uncorrectable = 1;
549 goto out; 549 goto out;
550 } 550 }
@@ -573,22 +573,22 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
573 } 573 }
574 WARN_ON(ret != 1); 574 WARN_ON(ret != 1);
575 575
576 spin_lock(&sdev->stat_lock); 576 spin_lock(&sctx->stat_lock);
577 ++sdev->stat.corrected_errors; 577 ++sctx->stat.corrected_errors;
578 spin_unlock(&sdev->stat_lock); 578 spin_unlock(&sctx->stat_lock);
579 579
580out: 580out:
581 if (trans && !IS_ERR(trans)) 581 if (trans && !IS_ERR(trans))
582 btrfs_end_transaction(trans, fixup->root); 582 btrfs_end_transaction(trans, fixup->root);
583 if (uncorrectable) { 583 if (uncorrectable) {
584 spin_lock(&sdev->stat_lock); 584 spin_lock(&sctx->stat_lock);
585 ++sdev->stat.uncorrectable_errors; 585 ++sctx->stat.uncorrectable_errors;
586 spin_unlock(&sdev->stat_lock); 586 spin_unlock(&sctx->stat_lock);
587 587
588 printk_ratelimited_in_rcu(KERN_ERR 588 printk_ratelimited_in_rcu(KERN_ERR
589 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", 589 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
590 (unsigned long long)fixup->logical, 590 (unsigned long long)fixup->logical,
591 rcu_str_deref(sdev->dev->name)); 591 rcu_str_deref(sctx->dev->name));
592 } 592 }
593 593
594 btrfs_free_path(path); 594 btrfs_free_path(path);
@@ -599,9 +599,9 @@ out:
599 atomic_dec(&fs_info->scrubs_running); 599 atomic_dec(&fs_info->scrubs_running);
600 atomic_dec(&fs_info->scrubs_paused); 600 atomic_dec(&fs_info->scrubs_paused);
601 mutex_unlock(&fs_info->scrub_lock); 601 mutex_unlock(&fs_info->scrub_lock);
602 atomic_dec(&sdev->fixup_cnt); 602 atomic_dec(&sctx->fixup_cnt);
603 wake_up(&fs_info->scrub_pause_wait); 603 wake_up(&fs_info->scrub_pause_wait);
604 wake_up(&sdev->list_wait); 604 wake_up(&sctx->list_wait);
605} 605}
606 606
607/* 607/*
@@ -614,7 +614,7 @@ out:
614 */ 614 */
615static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) 615static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
616{ 616{
617 struct scrub_dev *sdev = sblock_to_check->sdev; 617 struct scrub_ctx *sctx = sblock_to_check->sctx;
618 struct btrfs_fs_info *fs_info; 618 struct btrfs_fs_info *fs_info;
619 u64 length; 619 u64 length;
620 u64 logical; 620 u64 logical;
@@ -633,7 +633,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
633 DEFAULT_RATELIMIT_BURST); 633 DEFAULT_RATELIMIT_BURST);
634 634
635 BUG_ON(sblock_to_check->page_count < 1); 635 BUG_ON(sblock_to_check->page_count < 1);
636 fs_info = sdev->dev->dev_root->fs_info; 636 fs_info = sctx->dev->dev_root->fs_info;
637 length = sblock_to_check->page_count * PAGE_SIZE; 637 length = sblock_to_check->page_count * PAGE_SIZE;
638 logical = sblock_to_check->pagev[0].logical; 638 logical = sblock_to_check->pagev[0].logical;
639 generation = sblock_to_check->pagev[0].generation; 639 generation = sblock_to_check->pagev[0].generation;
@@ -677,25 +677,25 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
677 sizeof(*sblocks_for_recheck), 677 sizeof(*sblocks_for_recheck),
678 GFP_NOFS); 678 GFP_NOFS);
679 if (!sblocks_for_recheck) { 679 if (!sblocks_for_recheck) {
680 spin_lock(&sdev->stat_lock); 680 spin_lock(&sctx->stat_lock);
681 sdev->stat.malloc_errors++; 681 sctx->stat.malloc_errors++;
682 sdev->stat.read_errors++; 682 sctx->stat.read_errors++;
683 sdev->stat.uncorrectable_errors++; 683 sctx->stat.uncorrectable_errors++;
684 spin_unlock(&sdev->stat_lock); 684 spin_unlock(&sctx->stat_lock);
685 btrfs_dev_stat_inc_and_print(sdev->dev, 685 btrfs_dev_stat_inc_and_print(sctx->dev,
686 BTRFS_DEV_STAT_READ_ERRS); 686 BTRFS_DEV_STAT_READ_ERRS);
687 goto out; 687 goto out;
688 } 688 }
689 689
690 /* setup the context, map the logical blocks and alloc the pages */ 690 /* setup the context, map the logical blocks and alloc the pages */
691 ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length, 691 ret = scrub_setup_recheck_block(sctx, &fs_info->mapping_tree, length,
692 logical, sblocks_for_recheck); 692 logical, sblocks_for_recheck);
693 if (ret) { 693 if (ret) {
694 spin_lock(&sdev->stat_lock); 694 spin_lock(&sctx->stat_lock);
695 sdev->stat.read_errors++; 695 sctx->stat.read_errors++;
696 sdev->stat.uncorrectable_errors++; 696 sctx->stat.uncorrectable_errors++;
697 spin_unlock(&sdev->stat_lock); 697 spin_unlock(&sctx->stat_lock);
698 btrfs_dev_stat_inc_and_print(sdev->dev, 698 btrfs_dev_stat_inc_and_print(sctx->dev,
699 BTRFS_DEV_STAT_READ_ERRS); 699 BTRFS_DEV_STAT_READ_ERRS);
700 goto out; 700 goto out;
701 } 701 }
@@ -704,13 +704,13 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
704 704
705 /* build and submit the bios for the failed mirror, check checksums */ 705 /* build and submit the bios for the failed mirror, check checksums */
706 ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, 706 ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
707 csum, generation, sdev->csum_size); 707 csum, generation, sctx->csum_size);
708 if (ret) { 708 if (ret) {
709 spin_lock(&sdev->stat_lock); 709 spin_lock(&sctx->stat_lock);
710 sdev->stat.read_errors++; 710 sctx->stat.read_errors++;
711 sdev->stat.uncorrectable_errors++; 711 sctx->stat.uncorrectable_errors++;
712 spin_unlock(&sdev->stat_lock); 712 spin_unlock(&sctx->stat_lock);
713 btrfs_dev_stat_inc_and_print(sdev->dev, 713 btrfs_dev_stat_inc_and_print(sctx->dev,
714 BTRFS_DEV_STAT_READ_ERRS); 714 BTRFS_DEV_STAT_READ_ERRS);
715 goto out; 715 goto out;
716 } 716 }
@@ -725,45 +725,45 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
725 * different bio (usually one of the two latter cases is 725 * different bio (usually one of the two latter cases is
726 * the cause) 726 * the cause)
727 */ 727 */
728 spin_lock(&sdev->stat_lock); 728 spin_lock(&sctx->stat_lock);
729 sdev->stat.unverified_errors++; 729 sctx->stat.unverified_errors++;
730 spin_unlock(&sdev->stat_lock); 730 spin_unlock(&sctx->stat_lock);
731 731
732 goto out; 732 goto out;
733 } 733 }
734 734
735 if (!sblock_bad->no_io_error_seen) { 735 if (!sblock_bad->no_io_error_seen) {
736 spin_lock(&sdev->stat_lock); 736 spin_lock(&sctx->stat_lock);
737 sdev->stat.read_errors++; 737 sctx->stat.read_errors++;
738 spin_unlock(&sdev->stat_lock); 738 spin_unlock(&sctx->stat_lock);
739 if (__ratelimit(&_rs)) 739 if (__ratelimit(&_rs))
740 scrub_print_warning("i/o error", sblock_to_check); 740 scrub_print_warning("i/o error", sblock_to_check);
741 btrfs_dev_stat_inc_and_print(sdev->dev, 741 btrfs_dev_stat_inc_and_print(sctx->dev,
742 BTRFS_DEV_STAT_READ_ERRS); 742 BTRFS_DEV_STAT_READ_ERRS);
743 } else if (sblock_bad->checksum_error) { 743 } else if (sblock_bad->checksum_error) {
744 spin_lock(&sdev->stat_lock); 744 spin_lock(&sctx->stat_lock);
745 sdev->stat.csum_errors++; 745 sctx->stat.csum_errors++;
746 spin_unlock(&sdev->stat_lock); 746 spin_unlock(&sctx->stat_lock);
747 if (__ratelimit(&_rs)) 747 if (__ratelimit(&_rs))
748 scrub_print_warning("checksum error", sblock_to_check); 748 scrub_print_warning("checksum error", sblock_to_check);
749 btrfs_dev_stat_inc_and_print(sdev->dev, 749 btrfs_dev_stat_inc_and_print(sctx->dev,
750 BTRFS_DEV_STAT_CORRUPTION_ERRS); 750 BTRFS_DEV_STAT_CORRUPTION_ERRS);
751 } else if (sblock_bad->header_error) { 751 } else if (sblock_bad->header_error) {
752 spin_lock(&sdev->stat_lock); 752 spin_lock(&sctx->stat_lock);
753 sdev->stat.verify_errors++; 753 sctx->stat.verify_errors++;
754 spin_unlock(&sdev->stat_lock); 754 spin_unlock(&sctx->stat_lock);
755 if (__ratelimit(&_rs)) 755 if (__ratelimit(&_rs))
756 scrub_print_warning("checksum/header error", 756 scrub_print_warning("checksum/header error",
757 sblock_to_check); 757 sblock_to_check);
758 if (sblock_bad->generation_error) 758 if (sblock_bad->generation_error)
759 btrfs_dev_stat_inc_and_print(sdev->dev, 759 btrfs_dev_stat_inc_and_print(sctx->dev,
760 BTRFS_DEV_STAT_GENERATION_ERRS); 760 BTRFS_DEV_STAT_GENERATION_ERRS);
761 else 761 else
762 btrfs_dev_stat_inc_and_print(sdev->dev, 762 btrfs_dev_stat_inc_and_print(sctx->dev,
763 BTRFS_DEV_STAT_CORRUPTION_ERRS); 763 BTRFS_DEV_STAT_CORRUPTION_ERRS);
764 } 764 }
765 765
766 if (sdev->readonly) 766 if (sctx->readonly)
767 goto did_not_correct_error; 767 goto did_not_correct_error;
768 768
769 if (!is_metadata && !have_csum) { 769 if (!is_metadata && !have_csum) {
@@ -779,7 +779,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
779 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS); 779 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
780 if (!fixup_nodatasum) 780 if (!fixup_nodatasum)
781 goto did_not_correct_error; 781 goto did_not_correct_error;
782 fixup_nodatasum->sdev = sdev; 782 fixup_nodatasum->sctx = sctx;
783 fixup_nodatasum->logical = logical; 783 fixup_nodatasum->logical = logical;
784 fixup_nodatasum->root = fs_info->extent_root; 784 fixup_nodatasum->root = fs_info->extent_root;
785 fixup_nodatasum->mirror_num = failed_mirror_index + 1; 785 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
@@ -796,7 +796,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
796 atomic_inc(&fs_info->scrubs_running); 796 atomic_inc(&fs_info->scrubs_running);
797 atomic_inc(&fs_info->scrubs_paused); 797 atomic_inc(&fs_info->scrubs_paused);
798 mutex_unlock(&fs_info->scrub_lock); 798 mutex_unlock(&fs_info->scrub_lock);
799 atomic_inc(&sdev->fixup_cnt); 799 atomic_inc(&sctx->fixup_cnt);
800 fixup_nodatasum->work.func = scrub_fixup_nodatasum; 800 fixup_nodatasum->work.func = scrub_fixup_nodatasum;
801 btrfs_queue_worker(&fs_info->scrub_workers, 801 btrfs_queue_worker(&fs_info->scrub_workers,
802 &fixup_nodatasum->work); 802 &fixup_nodatasum->work);
@@ -818,7 +818,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
818 ret = scrub_recheck_block(fs_info, 818 ret = scrub_recheck_block(fs_info,
819 sblocks_for_recheck + mirror_index, 819 sblocks_for_recheck + mirror_index,
820 is_metadata, have_csum, csum, 820 is_metadata, have_csum, csum,
821 generation, sdev->csum_size); 821 generation, sctx->csum_size);
822 if (ret) 822 if (ret)
823 goto did_not_correct_error; 823 goto did_not_correct_error;
824 } 824 }
@@ -930,7 +930,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
930 */ 930 */
931 ret = scrub_recheck_block(fs_info, sblock_bad, 931 ret = scrub_recheck_block(fs_info, sblock_bad,
932 is_metadata, have_csum, csum, 932 is_metadata, have_csum, csum,
933 generation, sdev->csum_size); 933 generation, sctx->csum_size);
934 if (!ret && !sblock_bad->header_error && 934 if (!ret && !sblock_bad->header_error &&
935 !sblock_bad->checksum_error && 935 !sblock_bad->checksum_error &&
936 sblock_bad->no_io_error_seen) 936 sblock_bad->no_io_error_seen)
@@ -939,23 +939,23 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
939 goto did_not_correct_error; 939 goto did_not_correct_error;
940 } else { 940 } else {
941corrected_error: 941corrected_error:
942 spin_lock(&sdev->stat_lock); 942 spin_lock(&sctx->stat_lock);
943 sdev->stat.corrected_errors++; 943 sctx->stat.corrected_errors++;
944 spin_unlock(&sdev->stat_lock); 944 spin_unlock(&sctx->stat_lock);
945 printk_ratelimited_in_rcu(KERN_ERR 945 printk_ratelimited_in_rcu(KERN_ERR
946 "btrfs: fixed up error at logical %llu on dev %s\n", 946 "btrfs: fixed up error at logical %llu on dev %s\n",
947 (unsigned long long)logical, 947 (unsigned long long)logical,
948 rcu_str_deref(sdev->dev->name)); 948 rcu_str_deref(sctx->dev->name));
949 } 949 }
950 } else { 950 } else {
951did_not_correct_error: 951did_not_correct_error:
952 spin_lock(&sdev->stat_lock); 952 spin_lock(&sctx->stat_lock);
953 sdev->stat.uncorrectable_errors++; 953 sctx->stat.uncorrectable_errors++;
954 spin_unlock(&sdev->stat_lock); 954 spin_unlock(&sctx->stat_lock);
955 printk_ratelimited_in_rcu(KERN_ERR 955 printk_ratelimited_in_rcu(KERN_ERR
956 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", 956 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
957 (unsigned long long)logical, 957 (unsigned long long)logical,
958 rcu_str_deref(sdev->dev->name)); 958 rcu_str_deref(sctx->dev->name));
959 } 959 }
960 960
961out: 961out:
@@ -978,7 +978,7 @@ out:
978 return 0; 978 return 0;
979} 979}
980 980
981static int scrub_setup_recheck_block(struct scrub_dev *sdev, 981static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
982 struct btrfs_mapping_tree *map_tree, 982 struct btrfs_mapping_tree *map_tree,
983 u64 length, u64 logical, 983 u64 length, u64 logical,
984 struct scrub_block *sblocks_for_recheck) 984 struct scrub_block *sblocks_for_recheck)
@@ -988,7 +988,7 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
988 int ret; 988 int ret;
989 989
990 /* 990 /*
991 * note: the three members sdev, ref_count and outstanding_pages 991 * note: the three members sctx, ref_count and outstanding_pages
992 * are not used (and not set) in the blocks that are used for 992 * are not used (and not set) in the blocks that are used for
993 * the recheck procedure 993 * the recheck procedure
994 */ 994 */
@@ -1028,9 +1028,9 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
1028 page->mirror_num = mirror_index + 1; 1028 page->mirror_num = mirror_index + 1;
1029 page->page = alloc_page(GFP_NOFS); 1029 page->page = alloc_page(GFP_NOFS);
1030 if (!page->page) { 1030 if (!page->page) {
1031 spin_lock(&sdev->stat_lock); 1031 spin_lock(&sctx->stat_lock);
1032 sdev->stat.malloc_errors++; 1032 sctx->stat.malloc_errors++;
1033 spin_unlock(&sdev->stat_lock); 1033 spin_unlock(&sctx->stat_lock);
1034 kfree(bbio); 1034 kfree(bbio);
1035 return -ENOMEM; 1035 return -ENOMEM;
1036 } 1036 }
@@ -1259,14 +1259,14 @@ static void scrub_checksum(struct scrub_block *sblock)
1259 1259
1260static int scrub_checksum_data(struct scrub_block *sblock) 1260static int scrub_checksum_data(struct scrub_block *sblock)
1261{ 1261{
1262 struct scrub_dev *sdev = sblock->sdev; 1262 struct scrub_ctx *sctx = sblock->sctx;
1263 u8 csum[BTRFS_CSUM_SIZE]; 1263 u8 csum[BTRFS_CSUM_SIZE];
1264 u8 *on_disk_csum; 1264 u8 *on_disk_csum;
1265 struct page *page; 1265 struct page *page;
1266 void *buffer; 1266 void *buffer;
1267 u32 crc = ~(u32)0; 1267 u32 crc = ~(u32)0;
1268 int fail = 0; 1268 int fail = 0;
1269 struct btrfs_root *root = sdev->dev->dev_root; 1269 struct btrfs_root *root = sctx->dev->dev_root;
1270 u64 len; 1270 u64 len;
1271 int index; 1271 int index;
1272 1272
@@ -1278,7 +1278,7 @@ static int scrub_checksum_data(struct scrub_block *sblock)
1278 page = sblock->pagev[0].page; 1278 page = sblock->pagev[0].page;
1279 buffer = kmap_atomic(page); 1279 buffer = kmap_atomic(page);
1280 1280
1281 len = sdev->sectorsize; 1281 len = sctx->sectorsize;
1282 index = 0; 1282 index = 0;
1283 for (;;) { 1283 for (;;) {
1284 u64 l = min_t(u64, len, PAGE_SIZE); 1284 u64 l = min_t(u64, len, PAGE_SIZE);
@@ -1296,7 +1296,7 @@ static int scrub_checksum_data(struct scrub_block *sblock)
1296 } 1296 }
1297 1297
1298 btrfs_csum_final(crc, csum); 1298 btrfs_csum_final(crc, csum);
1299 if (memcmp(csum, on_disk_csum, sdev->csum_size)) 1299 if (memcmp(csum, on_disk_csum, sctx->csum_size))
1300 fail = 1; 1300 fail = 1;
1301 1301
1302 return fail; 1302 return fail;
@@ -1304,9 +1304,9 @@ static int scrub_checksum_data(struct scrub_block *sblock)
1304 1304
1305static int scrub_checksum_tree_block(struct scrub_block *sblock) 1305static int scrub_checksum_tree_block(struct scrub_block *sblock)
1306{ 1306{
1307 struct scrub_dev *sdev = sblock->sdev; 1307 struct scrub_ctx *sctx = sblock->sctx;
1308 struct btrfs_header *h; 1308 struct btrfs_header *h;
1309 struct btrfs_root *root = sdev->dev->dev_root; 1309 struct btrfs_root *root = sctx->dev->dev_root;
1310 struct btrfs_fs_info *fs_info = root->fs_info; 1310 struct btrfs_fs_info *fs_info = root->fs_info;
1311 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1311 u8 calculated_csum[BTRFS_CSUM_SIZE];
1312 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1312 u8 on_disk_csum[BTRFS_CSUM_SIZE];
@@ -1324,7 +1324,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
1324 page = sblock->pagev[0].page; 1324 page = sblock->pagev[0].page;
1325 mapped_buffer = kmap_atomic(page); 1325 mapped_buffer = kmap_atomic(page);
1326 h = (struct btrfs_header *)mapped_buffer; 1326 h = (struct btrfs_header *)mapped_buffer;
1327 memcpy(on_disk_csum, h->csum, sdev->csum_size); 1327 memcpy(on_disk_csum, h->csum, sctx->csum_size);
1328 1328
1329 /* 1329 /*
1330 * we don't use the getter functions here, as we 1330 * we don't use the getter functions here, as we
@@ -1345,8 +1345,8 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
1345 BTRFS_UUID_SIZE)) 1345 BTRFS_UUID_SIZE))
1346 ++fail; 1346 ++fail;
1347 1347
1348 BUG_ON(sdev->nodesize != sdev->leafsize); 1348 BUG_ON(sctx->nodesize != sctx->leafsize);
1349 len = sdev->nodesize - BTRFS_CSUM_SIZE; 1349 len = sctx->nodesize - BTRFS_CSUM_SIZE;
1350 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; 1350 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1351 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; 1351 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1352 index = 0; 1352 index = 0;
@@ -1368,7 +1368,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
1368 } 1368 }
1369 1369
1370 btrfs_csum_final(crc, calculated_csum); 1370 btrfs_csum_final(crc, calculated_csum);
1371 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) 1371 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1372 ++crc_fail; 1372 ++crc_fail;
1373 1373
1374 return fail || crc_fail; 1374 return fail || crc_fail;
@@ -1377,8 +1377,8 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
1377static int scrub_checksum_super(struct scrub_block *sblock) 1377static int scrub_checksum_super(struct scrub_block *sblock)
1378{ 1378{
1379 struct btrfs_super_block *s; 1379 struct btrfs_super_block *s;
1380 struct scrub_dev *sdev = sblock->sdev; 1380 struct scrub_ctx *sctx = sblock->sctx;
1381 struct btrfs_root *root = sdev->dev->dev_root; 1381 struct btrfs_root *root = sctx->dev->dev_root;
1382 struct btrfs_fs_info *fs_info = root->fs_info; 1382 struct btrfs_fs_info *fs_info = root->fs_info;
1383 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1383 u8 calculated_csum[BTRFS_CSUM_SIZE];
1384 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1384 u8 on_disk_csum[BTRFS_CSUM_SIZE];
@@ -1396,7 +1396,7 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1396 page = sblock->pagev[0].page; 1396 page = sblock->pagev[0].page;
1397 mapped_buffer = kmap_atomic(page); 1397 mapped_buffer = kmap_atomic(page);
1398 s = (struct btrfs_super_block *)mapped_buffer; 1398 s = (struct btrfs_super_block *)mapped_buffer;
1399 memcpy(on_disk_csum, s->csum, sdev->csum_size); 1399 memcpy(on_disk_csum, s->csum, sctx->csum_size);
1400 1400
1401 if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr)) 1401 if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
1402 ++fail_cor; 1402 ++fail_cor;
@@ -1429,7 +1429,7 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1429 } 1429 }
1430 1430
1431 btrfs_csum_final(crc, calculated_csum); 1431 btrfs_csum_final(crc, calculated_csum);
1432 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) 1432 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1433 ++fail_cor; 1433 ++fail_cor;
1434 1434
1435 if (fail_cor + fail_gen) { 1435 if (fail_cor + fail_gen) {
@@ -1438,14 +1438,14 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1438 * They will get written with the next transaction commit 1438 * They will get written with the next transaction commit
1439 * anyway 1439 * anyway
1440 */ 1440 */
1441 spin_lock(&sdev->stat_lock); 1441 spin_lock(&sctx->stat_lock);
1442 ++sdev->stat.super_errors; 1442 ++sctx->stat.super_errors;
1443 spin_unlock(&sdev->stat_lock); 1443 spin_unlock(&sctx->stat_lock);
1444 if (fail_cor) 1444 if (fail_cor)
1445 btrfs_dev_stat_inc_and_print(sdev->dev, 1445 btrfs_dev_stat_inc_and_print(sctx->dev,
1446 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1446 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1447 else 1447 else
1448 btrfs_dev_stat_inc_and_print(sdev->dev, 1448 btrfs_dev_stat_inc_and_print(sctx->dev,
1449 BTRFS_DEV_STAT_GENERATION_ERRS); 1449 BTRFS_DEV_STAT_GENERATION_ERRS);
1450 } 1450 }
1451 1451
@@ -1469,21 +1469,21 @@ static void scrub_block_put(struct scrub_block *sblock)
1469 } 1469 }
1470} 1470}
1471 1471
1472static void scrub_submit(struct scrub_dev *sdev) 1472static void scrub_submit(struct scrub_ctx *sctx)
1473{ 1473{
1474 struct scrub_bio *sbio; 1474 struct scrub_bio *sbio;
1475 1475
1476 if (sdev->curr == -1) 1476 if (sctx->curr == -1)
1477 return; 1477 return;
1478 1478
1479 sbio = sdev->bios[sdev->curr]; 1479 sbio = sctx->bios[sctx->curr];
1480 sdev->curr = -1; 1480 sctx->curr = -1;
1481 atomic_inc(&sdev->in_flight); 1481 atomic_inc(&sctx->in_flight);
1482 1482
1483 btrfsic_submit_bio(READ, sbio->bio); 1483 btrfsic_submit_bio(READ, sbio->bio);
1484} 1484}
1485 1485
1486static int scrub_add_page_to_bio(struct scrub_dev *sdev, 1486static int scrub_add_page_to_bio(struct scrub_ctx *sctx,
1487 struct scrub_page *spage) 1487 struct scrub_page *spage)
1488{ 1488{
1489 struct scrub_block *sblock = spage->sblock; 1489 struct scrub_block *sblock = spage->sblock;
@@ -1494,20 +1494,20 @@ again:
1494 /* 1494 /*
1495 * grab a fresh bio or wait for one to become available 1495 * grab a fresh bio or wait for one to become available
1496 */ 1496 */
1497 while (sdev->curr == -1) { 1497 while (sctx->curr == -1) {
1498 spin_lock(&sdev->list_lock); 1498 spin_lock(&sctx->list_lock);
1499 sdev->curr = sdev->first_free; 1499 sctx->curr = sctx->first_free;
1500 if (sdev->curr != -1) { 1500 if (sctx->curr != -1) {
1501 sdev->first_free = sdev->bios[sdev->curr]->next_free; 1501 sctx->first_free = sctx->bios[sctx->curr]->next_free;
1502 sdev->bios[sdev->curr]->next_free = -1; 1502 sctx->bios[sctx->curr]->next_free = -1;
1503 sdev->bios[sdev->curr]->page_count = 0; 1503 sctx->bios[sctx->curr]->page_count = 0;
1504 spin_unlock(&sdev->list_lock); 1504 spin_unlock(&sctx->list_lock);
1505 } else { 1505 } else {
1506 spin_unlock(&sdev->list_lock); 1506 spin_unlock(&sctx->list_lock);
1507 wait_event(sdev->list_wait, sdev->first_free != -1); 1507 wait_event(sctx->list_wait, sctx->first_free != -1);
1508 } 1508 }
1509 } 1509 }
1510 sbio = sdev->bios[sdev->curr]; 1510 sbio = sctx->bios[sctx->curr];
1511 if (sbio->page_count == 0) { 1511 if (sbio->page_count == 0) {
1512 struct bio *bio; 1512 struct bio *bio;
1513 1513
@@ -1515,7 +1515,7 @@ again:
1515 sbio->logical = spage->logical; 1515 sbio->logical = spage->logical;
1516 bio = sbio->bio; 1516 bio = sbio->bio;
1517 if (!bio) { 1517 if (!bio) {
1518 bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio); 1518 bio = bio_alloc(GFP_NOFS, sctx->pages_per_bio);
1519 if (!bio) 1519 if (!bio)
1520 return -ENOMEM; 1520 return -ENOMEM;
1521 sbio->bio = bio; 1521 sbio->bio = bio;
@@ -1523,14 +1523,14 @@ again:
1523 1523
1524 bio->bi_private = sbio; 1524 bio->bi_private = sbio;
1525 bio->bi_end_io = scrub_bio_end_io; 1525 bio->bi_end_io = scrub_bio_end_io;
1526 bio->bi_bdev = sdev->dev->bdev; 1526 bio->bi_bdev = sctx->dev->bdev;
1527 bio->bi_sector = spage->physical >> 9; 1527 bio->bi_sector = spage->physical >> 9;
1528 sbio->err = 0; 1528 sbio->err = 0;
1529 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1529 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1530 spage->physical || 1530 spage->physical ||
1531 sbio->logical + sbio->page_count * PAGE_SIZE != 1531 sbio->logical + sbio->page_count * PAGE_SIZE !=
1532 spage->logical) { 1532 spage->logical) {
1533 scrub_submit(sdev); 1533 scrub_submit(sctx);
1534 goto again; 1534 goto again;
1535 } 1535 }
1536 1536
@@ -1542,20 +1542,20 @@ again:
1542 sbio->bio = NULL; 1542 sbio->bio = NULL;
1543 return -EIO; 1543 return -EIO;
1544 } 1544 }
1545 scrub_submit(sdev); 1545 scrub_submit(sctx);
1546 goto again; 1546 goto again;
1547 } 1547 }
1548 1548
1549 scrub_block_get(sblock); /* one for the added page */ 1549 scrub_block_get(sblock); /* one for the added page */
1550 atomic_inc(&sblock->outstanding_pages); 1550 atomic_inc(&sblock->outstanding_pages);
1551 sbio->page_count++; 1551 sbio->page_count++;
1552 if (sbio->page_count == sdev->pages_per_bio) 1552 if (sbio->page_count == sctx->pages_per_bio)
1553 scrub_submit(sdev); 1553 scrub_submit(sctx);
1554 1554
1555 return 0; 1555 return 0;
1556} 1556}
1557 1557
1558static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len, 1558static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
1559 u64 physical, u64 flags, u64 gen, int mirror_num, 1559 u64 physical, u64 flags, u64 gen, int mirror_num,
1560 u8 *csum, int force) 1560 u8 *csum, int force)
1561{ 1561{
@@ -1564,15 +1564,15 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1564 1564
1565 sblock = kzalloc(sizeof(*sblock), GFP_NOFS); 1565 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1566 if (!sblock) { 1566 if (!sblock) {
1567 spin_lock(&sdev->stat_lock); 1567 spin_lock(&sctx->stat_lock);
1568 sdev->stat.malloc_errors++; 1568 sctx->stat.malloc_errors++;
1569 spin_unlock(&sdev->stat_lock); 1569 spin_unlock(&sctx->stat_lock);
1570 return -ENOMEM; 1570 return -ENOMEM;
1571 } 1571 }
1572 1572
1573 /* one ref inside this function, plus one for each page later on */ 1573 /* one ref inside this function, plus one for each page later on */
1574 atomic_set(&sblock->ref_count, 1); 1574 atomic_set(&sblock->ref_count, 1);
1575 sblock->sdev = sdev; 1575 sblock->sctx = sctx;
1576 sblock->no_io_error_seen = 1; 1576 sblock->no_io_error_seen = 1;
1577 1577
1578 for (index = 0; len > 0; index++) { 1578 for (index = 0; len > 0; index++) {
@@ -1582,9 +1582,9 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1582 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); 1582 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1583 spage->page = alloc_page(GFP_NOFS); 1583 spage->page = alloc_page(GFP_NOFS);
1584 if (!spage->page) { 1584 if (!spage->page) {
1585 spin_lock(&sdev->stat_lock); 1585 spin_lock(&sctx->stat_lock);
1586 sdev->stat.malloc_errors++; 1586 sctx->stat.malloc_errors++;
1587 spin_unlock(&sdev->stat_lock); 1587 spin_unlock(&sctx->stat_lock);
1588 while (index > 0) { 1588 while (index > 0) {
1589 index--; 1589 index--;
1590 __free_page(sblock->pagev[index].page); 1590 __free_page(sblock->pagev[index].page);
@@ -1593,7 +1593,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1593 return -ENOMEM; 1593 return -ENOMEM;
1594 } 1594 }
1595 spage->sblock = sblock; 1595 spage->sblock = sblock;
1596 spage->dev = sdev->dev; 1596 spage->dev = sctx->dev;
1597 spage->flags = flags; 1597 spage->flags = flags;
1598 spage->generation = gen; 1598 spage->generation = gen;
1599 spage->logical = logical; 1599 spage->logical = logical;
@@ -1601,7 +1601,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1601 spage->mirror_num = mirror_num; 1601 spage->mirror_num = mirror_num;
1602 if (csum) { 1602 if (csum) {
1603 spage->have_csum = 1; 1603 spage->have_csum = 1;
1604 memcpy(spage->csum, csum, sdev->csum_size); 1604 memcpy(spage->csum, csum, sctx->csum_size);
1605 } else { 1605 } else {
1606 spage->have_csum = 0; 1606 spage->have_csum = 0;
1607 } 1607 }
@@ -1616,7 +1616,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1616 struct scrub_page *spage = sblock->pagev + index; 1616 struct scrub_page *spage = sblock->pagev + index;
1617 int ret; 1617 int ret;
1618 1618
1619 ret = scrub_add_page_to_bio(sdev, spage); 1619 ret = scrub_add_page_to_bio(sctx, spage);
1620 if (ret) { 1620 if (ret) {
1621 scrub_block_put(sblock); 1621 scrub_block_put(sblock);
1622 return ret; 1622 return ret;
@@ -1624,7 +1624,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1624 } 1624 }
1625 1625
1626 if (force) 1626 if (force)
1627 scrub_submit(sdev); 1627 scrub_submit(sctx);
1628 1628
1629 /* last one frees, either here or in bio completion for last page */ 1629 /* last one frees, either here or in bio completion for last page */
1630 scrub_block_put(sblock); 1630 scrub_block_put(sblock);
@@ -1634,8 +1634,8 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1634static void scrub_bio_end_io(struct bio *bio, int err) 1634static void scrub_bio_end_io(struct bio *bio, int err)
1635{ 1635{
1636 struct scrub_bio *sbio = bio->bi_private; 1636 struct scrub_bio *sbio = bio->bi_private;
1637 struct scrub_dev *sdev = sbio->sdev; 1637 struct scrub_ctx *sctx = sbio->sctx;
1638 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 1638 struct btrfs_fs_info *fs_info = sctx->dev->dev_root->fs_info;
1639 1639
1640 sbio->err = err; 1640 sbio->err = err;
1641 sbio->bio = bio; 1641 sbio->bio = bio;
@@ -1646,7 +1646,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
1646static void scrub_bio_end_io_worker(struct btrfs_work *work) 1646static void scrub_bio_end_io_worker(struct btrfs_work *work)
1647{ 1647{
1648 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 1648 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1649 struct scrub_dev *sdev = sbio->sdev; 1649 struct scrub_ctx *sctx = sbio->sctx;
1650 int i; 1650 int i;
1651 1651
1652 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO); 1652 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
@@ -1671,12 +1671,12 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
1671 1671
1672 bio_put(sbio->bio); 1672 bio_put(sbio->bio);
1673 sbio->bio = NULL; 1673 sbio->bio = NULL;
1674 spin_lock(&sdev->list_lock); 1674 spin_lock(&sctx->list_lock);
1675 sbio->next_free = sdev->first_free; 1675 sbio->next_free = sctx->first_free;
1676 sdev->first_free = sbio->index; 1676 sctx->first_free = sbio->index;
1677 spin_unlock(&sdev->list_lock); 1677 spin_unlock(&sctx->list_lock);
1678 atomic_dec(&sdev->in_flight); 1678 atomic_dec(&sctx->in_flight);
1679 wake_up(&sdev->list_wait); 1679 wake_up(&sctx->list_wait);
1680} 1680}
1681 1681
1682static void scrub_block_complete(struct scrub_block *sblock) 1682static void scrub_block_complete(struct scrub_block *sblock)
@@ -1687,7 +1687,7 @@ static void scrub_block_complete(struct scrub_block *sblock)
1687 scrub_checksum(sblock); 1687 scrub_checksum(sblock);
1688} 1688}
1689 1689
1690static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, 1690static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
1691 u8 *csum) 1691 u8 *csum)
1692{ 1692{
1693 struct btrfs_ordered_sum *sum = NULL; 1693 struct btrfs_ordered_sum *sum = NULL;
@@ -1695,15 +1695,15 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1695 unsigned long i; 1695 unsigned long i;
1696 unsigned long num_sectors; 1696 unsigned long num_sectors;
1697 1697
1698 while (!list_empty(&sdev->csum_list)) { 1698 while (!list_empty(&sctx->csum_list)) {
1699 sum = list_first_entry(&sdev->csum_list, 1699 sum = list_first_entry(&sctx->csum_list,
1700 struct btrfs_ordered_sum, list); 1700 struct btrfs_ordered_sum, list);
1701 if (sum->bytenr > logical) 1701 if (sum->bytenr > logical)
1702 return 0; 1702 return 0;
1703 if (sum->bytenr + sum->len > logical) 1703 if (sum->bytenr + sum->len > logical)
1704 break; 1704 break;
1705 1705
1706 ++sdev->stat.csum_discards; 1706 ++sctx->stat.csum_discards;
1707 list_del(&sum->list); 1707 list_del(&sum->list);
1708 kfree(sum); 1708 kfree(sum);
1709 sum = NULL; 1709 sum = NULL;
@@ -1711,10 +1711,10 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1711 if (!sum) 1711 if (!sum)
1712 return 0; 1712 return 0;
1713 1713
1714 num_sectors = sum->len / sdev->sectorsize; 1714 num_sectors = sum->len / sctx->sectorsize;
1715 for (i = 0; i < num_sectors; ++i) { 1715 for (i = 0; i < num_sectors; ++i) {
1716 if (sum->sums[i].bytenr == logical) { 1716 if (sum->sums[i].bytenr == logical) {
1717 memcpy(csum, &sum->sums[i].sum, sdev->csum_size); 1717 memcpy(csum, &sum->sums[i].sum, sctx->csum_size);
1718 ret = 1; 1718 ret = 1;
1719 break; 1719 break;
1720 } 1720 }
@@ -1727,7 +1727,7 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1727} 1727}
1728 1728
1729/* scrub extent tries to collect up to 64 kB for each bio */ 1729/* scrub extent tries to collect up to 64 kB for each bio */
1730static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len, 1730static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
1731 u64 physical, u64 flags, u64 gen, int mirror_num) 1731 u64 physical, u64 flags, u64 gen, int mirror_num)
1732{ 1732{
1733 int ret; 1733 int ret;
@@ -1735,20 +1735,20 @@ static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1735 u32 blocksize; 1735 u32 blocksize;
1736 1736
1737 if (flags & BTRFS_EXTENT_FLAG_DATA) { 1737 if (flags & BTRFS_EXTENT_FLAG_DATA) {
1738 blocksize = sdev->sectorsize; 1738 blocksize = sctx->sectorsize;
1739 spin_lock(&sdev->stat_lock); 1739 spin_lock(&sctx->stat_lock);
1740 sdev->stat.data_extents_scrubbed++; 1740 sctx->stat.data_extents_scrubbed++;
1741 sdev->stat.data_bytes_scrubbed += len; 1741 sctx->stat.data_bytes_scrubbed += len;
1742 spin_unlock(&sdev->stat_lock); 1742 spin_unlock(&sctx->stat_lock);
1743 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1743 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1744 BUG_ON(sdev->nodesize != sdev->leafsize); 1744 BUG_ON(sctx->nodesize != sctx->leafsize);
1745 blocksize = sdev->nodesize; 1745 blocksize = sctx->nodesize;
1746 spin_lock(&sdev->stat_lock); 1746 spin_lock(&sctx->stat_lock);
1747 sdev->stat.tree_extents_scrubbed++; 1747 sctx->stat.tree_extents_scrubbed++;
1748 sdev->stat.tree_bytes_scrubbed += len; 1748 sctx->stat.tree_bytes_scrubbed += len;
1749 spin_unlock(&sdev->stat_lock); 1749 spin_unlock(&sctx->stat_lock);
1750 } else { 1750 } else {
1751 blocksize = sdev->sectorsize; 1751 blocksize = sctx->sectorsize;
1752 BUG_ON(1); 1752 BUG_ON(1);
1753 } 1753 }
1754 1754
@@ -1758,11 +1758,11 @@ static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1758 1758
1759 if (flags & BTRFS_EXTENT_FLAG_DATA) { 1759 if (flags & BTRFS_EXTENT_FLAG_DATA) {
1760 /* push csums to sbio */ 1760 /* push csums to sbio */
1761 have_csum = scrub_find_csum(sdev, logical, l, csum); 1761 have_csum = scrub_find_csum(sctx, logical, l, csum);
1762 if (have_csum == 0) 1762 if (have_csum == 0)
1763 ++sdev->stat.no_csum; 1763 ++sctx->stat.no_csum;
1764 } 1764 }
1765 ret = scrub_pages(sdev, logical, l, physical, flags, gen, 1765 ret = scrub_pages(sctx, logical, l, physical, flags, gen,
1766 mirror_num, have_csum ? csum : NULL, 0); 1766 mirror_num, have_csum ? csum : NULL, 0);
1767 if (ret) 1767 if (ret)
1768 return ret; 1768 return ret;
@@ -1773,11 +1773,11 @@ static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1773 return 0; 1773 return 0;
1774} 1774}
1775 1775
1776static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, 1776static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
1777 struct map_lookup *map, int num, u64 base, u64 length) 1777 struct map_lookup *map, int num, u64 base, u64 length)
1778{ 1778{
1779 struct btrfs_path *path; 1779 struct btrfs_path *path;
1780 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 1780 struct btrfs_fs_info *fs_info = sctx->dev->dev_root->fs_info;
1781 struct btrfs_root *root = fs_info->extent_root; 1781 struct btrfs_root *root = fs_info->extent_root;
1782 struct btrfs_root *csum_root = fs_info->csum_root; 1782 struct btrfs_root *csum_root = fs_info->csum_root;
1783 struct btrfs_extent_item *extent; 1783 struct btrfs_extent_item *extent;
@@ -1843,8 +1843,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1843 */ 1843 */
1844 logical = base + offset; 1844 logical = base + offset;
1845 1845
1846 wait_event(sdev->list_wait, 1846 wait_event(sctx->list_wait,
1847 atomic_read(&sdev->in_flight) == 0); 1847 atomic_read(&sctx->in_flight) == 0);
1848 atomic_inc(&fs_info->scrubs_paused); 1848 atomic_inc(&fs_info->scrubs_paused);
1849 wake_up(&fs_info->scrub_pause_wait); 1849 wake_up(&fs_info->scrub_pause_wait);
1850 1850
@@ -1898,7 +1898,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1898 * canceled? 1898 * canceled?
1899 */ 1899 */
1900 if (atomic_read(&fs_info->scrub_cancel_req) || 1900 if (atomic_read(&fs_info->scrub_cancel_req) ||
1901 atomic_read(&sdev->cancel_req)) { 1901 atomic_read(&sctx->cancel_req)) {
1902 ret = -ECANCELED; 1902 ret = -ECANCELED;
1903 goto out; 1903 goto out;
1904 } 1904 }
@@ -1907,9 +1907,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1907 */ 1907 */
1908 if (atomic_read(&fs_info->scrub_pause_req)) { 1908 if (atomic_read(&fs_info->scrub_pause_req)) {
1909 /* push queued extents */ 1909 /* push queued extents */
1910 scrub_submit(sdev); 1910 scrub_submit(sctx);
1911 wait_event(sdev->list_wait, 1911 wait_event(sctx->list_wait,
1912 atomic_read(&sdev->in_flight) == 0); 1912 atomic_read(&sctx->in_flight) == 0);
1913 atomic_inc(&fs_info->scrubs_paused); 1913 atomic_inc(&fs_info->scrubs_paused);
1914 wake_up(&fs_info->scrub_pause_wait); 1914 wake_up(&fs_info->scrub_pause_wait);
1915 mutex_lock(&fs_info->scrub_lock); 1915 mutex_lock(&fs_info->scrub_lock);
@@ -1926,7 +1926,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1926 1926
1927 ret = btrfs_lookup_csums_range(csum_root, logical, 1927 ret = btrfs_lookup_csums_range(csum_root, logical,
1928 logical + map->stripe_len - 1, 1928 logical + map->stripe_len - 1,
1929 &sdev->csum_list, 1); 1929 &sctx->csum_list, 1);
1930 if (ret) 1930 if (ret)
1931 goto out; 1931 goto out;
1932 1932
@@ -2004,7 +2004,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
2004 key.objectid; 2004 key.objectid;
2005 } 2005 }
2006 2006
2007 ret = scrub_extent(sdev, key.objectid, key.offset, 2007 ret = scrub_extent(sctx, key.objectid, key.offset,
2008 key.objectid - logical + physical, 2008 key.objectid - logical + physical,
2009 flags, generation, mirror_num); 2009 flags, generation, mirror_num);
2010 if (ret) 2010 if (ret)
@@ -2016,12 +2016,12 @@ next:
2016 btrfs_release_path(path); 2016 btrfs_release_path(path);
2017 logical += increment; 2017 logical += increment;
2018 physical += map->stripe_len; 2018 physical += map->stripe_len;
2019 spin_lock(&sdev->stat_lock); 2019 spin_lock(&sctx->stat_lock);
2020 sdev->stat.last_physical = physical; 2020 sctx->stat.last_physical = physical;
2021 spin_unlock(&sdev->stat_lock); 2021 spin_unlock(&sctx->stat_lock);
2022 } 2022 }
2023 /* push queued extents */ 2023 /* push queued extents */
2024 scrub_submit(sdev); 2024 scrub_submit(sctx);
2025 2025
2026out: 2026out:
2027 blk_finish_plug(&plug); 2027 blk_finish_plug(&plug);
@@ -2029,12 +2029,12 @@ out:
2029 return ret < 0 ? ret : 0; 2029 return ret < 0 ? ret : 0;
2030} 2030}
2031 2031
2032static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, 2032static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2033 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length, 2033 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
2034 u64 dev_offset) 2034 u64 dev_offset)
2035{ 2035{
2036 struct btrfs_mapping_tree *map_tree = 2036 struct btrfs_mapping_tree *map_tree =
2037 &sdev->dev->dev_root->fs_info->mapping_tree; 2037 &sctx->dev->dev_root->fs_info->mapping_tree;
2038 struct map_lookup *map; 2038 struct map_lookup *map;
2039 struct extent_map *em; 2039 struct extent_map *em;
2040 int i; 2040 int i;
@@ -2055,9 +2055,9 @@ static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
2055 goto out; 2055 goto out;
2056 2056
2057 for (i = 0; i < map->num_stripes; ++i) { 2057 for (i = 0; i < map->num_stripes; ++i) {
2058 if (map->stripes[i].dev == sdev->dev && 2058 if (map->stripes[i].dev == sctx->dev &&
2059 map->stripes[i].physical == dev_offset) { 2059 map->stripes[i].physical == dev_offset) {
2060 ret = scrub_stripe(sdev, map, i, chunk_offset, length); 2060 ret = scrub_stripe(sctx, map, i, chunk_offset, length);
2061 if (ret) 2061 if (ret)
2062 goto out; 2062 goto out;
2063 } 2063 }
@@ -2069,11 +2069,11 @@ out:
2069} 2069}
2070 2070
2071static noinline_for_stack 2071static noinline_for_stack
2072int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) 2072int scrub_enumerate_chunks(struct scrub_ctx *sctx, u64 start, u64 end)
2073{ 2073{
2074 struct btrfs_dev_extent *dev_extent = NULL; 2074 struct btrfs_dev_extent *dev_extent = NULL;
2075 struct btrfs_path *path; 2075 struct btrfs_path *path;
2076 struct btrfs_root *root = sdev->dev->dev_root; 2076 struct btrfs_root *root = sctx->dev->dev_root;
2077 struct btrfs_fs_info *fs_info = root->fs_info; 2077 struct btrfs_fs_info *fs_info = root->fs_info;
2078 u64 length; 2078 u64 length;
2079 u64 chunk_tree; 2079 u64 chunk_tree;
@@ -2094,7 +2094,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
2094 path->search_commit_root = 1; 2094 path->search_commit_root = 1;
2095 path->skip_locking = 1; 2095 path->skip_locking = 1;
2096 2096
2097 key.objectid = sdev->dev->devid; 2097 key.objectid = sctx->dev->devid;
2098 key.offset = 0ull; 2098 key.offset = 0ull;
2099 key.type = BTRFS_DEV_EXTENT_KEY; 2099 key.type = BTRFS_DEV_EXTENT_KEY;
2100 2100
@@ -2117,7 +2117,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
2117 2117
2118 btrfs_item_key_to_cpu(l, &found_key, slot); 2118 btrfs_item_key_to_cpu(l, &found_key, slot);
2119 2119
2120 if (found_key.objectid != sdev->dev->devid) 2120 if (found_key.objectid != sctx->dev->devid)
2121 break; 2121 break;
2122 2122
2123 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) 2123 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
@@ -2151,7 +2151,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
2151 ret = -ENOENT; 2151 ret = -ENOENT;
2152 break; 2152 break;
2153 } 2153 }
2154 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, 2154 ret = scrub_chunk(sctx, chunk_tree, chunk_objectid,
2155 chunk_offset, length, found_key.offset); 2155 chunk_offset, length, found_key.offset);
2156 btrfs_put_block_group(cache); 2156 btrfs_put_block_group(cache);
2157 if (ret) 2157 if (ret)
@@ -2170,13 +2170,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
2170 return ret < 0 ? ret : 0; 2170 return ret < 0 ? ret : 0;
2171} 2171}
2172 2172
2173static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) 2173static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx)
2174{ 2174{
2175 int i; 2175 int i;
2176 u64 bytenr; 2176 u64 bytenr;
2177 u64 gen; 2177 u64 gen;
2178 int ret; 2178 int ret;
2179 struct btrfs_device *device = sdev->dev; 2179 struct btrfs_device *device = sctx->dev;
2180 struct btrfs_root *root = device->dev_root; 2180 struct btrfs_root *root = device->dev_root;
2181 2181
2182 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 2182 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
@@ -2189,12 +2189,12 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
2189 if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes) 2189 if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
2190 break; 2190 break;
2191 2191
2192 ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, 2192 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2193 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); 2193 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
2194 if (ret) 2194 if (ret)
2195 return ret; 2195 return ret;
2196 } 2196 }
2197 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); 2197 wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0);
2198 2198
2199 return 0; 2199 return 0;
2200} 2200}
@@ -2238,7 +2238,7 @@ static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
2238int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, 2238int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2239 struct btrfs_scrub_progress *progress, int readonly) 2239 struct btrfs_scrub_progress *progress, int readonly)
2240{ 2240{
2241 struct scrub_dev *sdev; 2241 struct scrub_ctx *sctx;
2242 struct btrfs_fs_info *fs_info = root->fs_info; 2242 struct btrfs_fs_info *fs_info = root->fs_info;
2243 int ret; 2243 int ret;
2244 struct btrfs_device *dev; 2244 struct btrfs_device *dev;
@@ -2302,41 +2302,41 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2302 scrub_workers_put(root); 2302 scrub_workers_put(root);
2303 return -EINPROGRESS; 2303 return -EINPROGRESS;
2304 } 2304 }
2305 sdev = scrub_setup_dev(dev); 2305 sctx = scrub_setup_ctx(dev);
2306 if (IS_ERR(sdev)) { 2306 if (IS_ERR(sctx)) {
2307 mutex_unlock(&fs_info->scrub_lock); 2307 mutex_unlock(&fs_info->scrub_lock);
2308 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2308 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2309 scrub_workers_put(root); 2309 scrub_workers_put(root);
2310 return PTR_ERR(sdev); 2310 return PTR_ERR(sctx);
2311 } 2311 }
2312 sdev->readonly = readonly; 2312 sctx->readonly = readonly;
2313 dev->scrub_device = sdev; 2313 dev->scrub_device = sctx;
2314 2314
2315 atomic_inc(&fs_info->scrubs_running); 2315 atomic_inc(&fs_info->scrubs_running);
2316 mutex_unlock(&fs_info->scrub_lock); 2316 mutex_unlock(&fs_info->scrub_lock);
2317 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2317 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2318 2318
2319 down_read(&fs_info->scrub_super_lock); 2319 down_read(&fs_info->scrub_super_lock);
2320 ret = scrub_supers(sdev); 2320 ret = scrub_supers(sctx);
2321 up_read(&fs_info->scrub_super_lock); 2321 up_read(&fs_info->scrub_super_lock);
2322 2322
2323 if (!ret) 2323 if (!ret)
2324 ret = scrub_enumerate_chunks(sdev, start, end); 2324 ret = scrub_enumerate_chunks(sctx, start, end);
2325 2325
2326 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); 2326 wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0);
2327 atomic_dec(&fs_info->scrubs_running); 2327 atomic_dec(&fs_info->scrubs_running);
2328 wake_up(&fs_info->scrub_pause_wait); 2328 wake_up(&fs_info->scrub_pause_wait);
2329 2329
2330 wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0); 2330 wait_event(sctx->list_wait, atomic_read(&sctx->fixup_cnt) == 0);
2331 2331
2332 if (progress) 2332 if (progress)
2333 memcpy(progress, &sdev->stat, sizeof(*progress)); 2333 memcpy(progress, &sctx->stat, sizeof(*progress));
2334 2334
2335 mutex_lock(&fs_info->scrub_lock); 2335 mutex_lock(&fs_info->scrub_lock);
2336 dev->scrub_device = NULL; 2336 dev->scrub_device = NULL;
2337 mutex_unlock(&fs_info->scrub_lock); 2337 mutex_unlock(&fs_info->scrub_lock);
2338 2338
2339 scrub_free_dev(sdev); 2339 scrub_free_ctx(sctx);
2340 scrub_workers_put(root); 2340 scrub_workers_put(root);
2341 2341
2342 return ret; 2342 return ret;
@@ -2407,15 +2407,15 @@ int btrfs_scrub_cancel(struct btrfs_root *root)
2407int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) 2407int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2408{ 2408{
2409 struct btrfs_fs_info *fs_info = root->fs_info; 2409 struct btrfs_fs_info *fs_info = root->fs_info;
2410 struct scrub_dev *sdev; 2410 struct scrub_ctx *sctx;
2411 2411
2412 mutex_lock(&fs_info->scrub_lock); 2412 mutex_lock(&fs_info->scrub_lock);
2413 sdev = dev->scrub_device; 2413 sctx = dev->scrub_device;
2414 if (!sdev) { 2414 if (!sctx) {
2415 mutex_unlock(&fs_info->scrub_lock); 2415 mutex_unlock(&fs_info->scrub_lock);
2416 return -ENOTCONN; 2416 return -ENOTCONN;
2417 } 2417 }
2418 atomic_inc(&sdev->cancel_req); 2418 atomic_inc(&sctx->cancel_req);
2419 while (dev->scrub_device) { 2419 while (dev->scrub_device) {
2420 mutex_unlock(&fs_info->scrub_lock); 2420 mutex_unlock(&fs_info->scrub_lock);
2421 wait_event(fs_info->scrub_pause_wait, 2421 wait_event(fs_info->scrub_pause_wait,
@@ -2453,15 +2453,15 @@ int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2453 struct btrfs_scrub_progress *progress) 2453 struct btrfs_scrub_progress *progress)
2454{ 2454{
2455 struct btrfs_device *dev; 2455 struct btrfs_device *dev;
2456 struct scrub_dev *sdev = NULL; 2456 struct scrub_ctx *sctx = NULL;
2457 2457
2458 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2458 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2459 dev = btrfs_find_device(root, devid, NULL, NULL); 2459 dev = btrfs_find_device(root, devid, NULL, NULL);
2460 if (dev) 2460 if (dev)
2461 sdev = dev->scrub_device; 2461 sctx = dev->scrub_device;
2462 if (sdev) 2462 if (sctx)
2463 memcpy(progress, &sdev->stat, sizeof(*progress)); 2463 memcpy(progress, &sctx->stat, sizeof(*progress));
2464 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2464 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2465 2465
2466 return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV; 2466 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
2467} 2467}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 53c06af92e8d..1789cda57efb 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -88,7 +88,7 @@ struct btrfs_device {
88 u8 uuid[BTRFS_UUID_SIZE]; 88 u8 uuid[BTRFS_UUID_SIZE];
89 89
90 /* per-device scrub information */ 90 /* per-device scrub information */
91 struct scrub_dev *scrub_device; 91 struct scrub_ctx *scrub_device;
92 92
93 struct btrfs_work work; 93 struct btrfs_work work;
94 struct rcu_head rcu; 94 struct rcu_head rcu;