aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorArne Jansen <sensille@gmx.net>2011-05-28 15:57:55 -0400
committerChris Mason <chris.mason@oracle.com>2011-06-04 08:03:17 -0400
commit1bc8779349d6278e2713a1ff94418c2a6746a791 (patch)
tree2caf24db7d669825538c15efe17473159da57593 /fs/btrfs
parentff5714cca971848963b87d6b477c16ca8abbaa54 (diff)
btrfs: scrub: don't reuse bios and pages
The current scrub implementation reuses bios and pages as often as possible, allocating them only on start and releasing them when finished. This leads to more problems with the block layer than it's worth. The elevator gets confused when there are more pages added to the bio than bi_size suggests. This patch completely rips out the reuse of bios and pages and allocates them freshly for each submit. Signed-off-by: Arne Jansen <sensille@gmx.net> Signed-off-by: Chris Maosn <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/scrub.c114
1 files changed, 65 insertions, 49 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6dfed0c27ac3..2d1f8909a8e1 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -117,33 +117,37 @@ static void scrub_free_csums(struct scrub_dev *sdev)
117 } 117 }
118} 118}
119 119
120static void scrub_free_bio(struct bio *bio)
121{
122 int i;
123 struct page *last_page = NULL;
124
125 if (!bio)
126 return;
127
128 for (i = 0; i < bio->bi_vcnt; ++i) {
129 if (bio->bi_io_vec[i].bv_page == last_page)
130 continue;
131 last_page = bio->bi_io_vec[i].bv_page;
132 __free_page(last_page);
133 }
134 bio_put(bio);
135}
136
120static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) 137static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
121{ 138{
122 int i; 139 int i;
123 int j;
124 struct page *last_page;
125 140
126 if (!sdev) 141 if (!sdev)
127 return; 142 return;
128 143
129 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 144 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
130 struct scrub_bio *sbio = sdev->bios[i]; 145 struct scrub_bio *sbio = sdev->bios[i];
131 struct bio *bio;
132 146
133 if (!sbio) 147 if (!sbio)
134 break; 148 break;
135 149
136 bio = sbio->bio; 150 scrub_free_bio(sbio->bio);
137 if (bio) {
138 last_page = NULL;
139 for (j = 0; j < bio->bi_vcnt; ++j) {
140 if (bio->bi_io_vec[j].bv_page == last_page)
141 continue;
142 last_page = bio->bi_io_vec[j].bv_page;
143 __free_page(last_page);
144 }
145 bio_put(bio);
146 }
147 kfree(sbio); 151 kfree(sbio);
148 } 152 }
149 153
@@ -156,8 +160,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
156{ 160{
157 struct scrub_dev *sdev; 161 struct scrub_dev *sdev;
158 int i; 162 int i;
159 int j;
160 int ret;
161 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 163 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
162 164
163 sdev = kzalloc(sizeof(*sdev), GFP_NOFS); 165 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
@@ -165,7 +167,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
165 goto nomem; 167 goto nomem;
166 sdev->dev = dev; 168 sdev->dev = dev;
167 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 169 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
168 struct bio *bio;
169 struct scrub_bio *sbio; 170 struct scrub_bio *sbio;
170 171
171 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 172 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
@@ -173,32 +174,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
173 goto nomem; 174 goto nomem;
174 sdev->bios[i] = sbio; 175 sdev->bios[i] = sbio;
175 176
176 bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
177 if (!bio)
178 goto nomem;
179
180 sbio->index = i; 177 sbio->index = i;
181 sbio->sdev = sdev; 178 sbio->sdev = sdev;
182 sbio->bio = bio;
183 sbio->count = 0; 179 sbio->count = 0;
184 sbio->work.func = scrub_checksum; 180 sbio->work.func = scrub_checksum;
185 bio->bi_private = sdev->bios[i];
186 bio->bi_end_io = scrub_bio_end_io;
187 bio->bi_sector = 0;
188 bio->bi_bdev = dev->bdev;
189 bio->bi_size = 0;
190
191 for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
192 struct page *page;
193 page = alloc_page(GFP_NOFS);
194 if (!page)
195 goto nomem;
196
197 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
198 if (!ret)
199 goto nomem;
200 }
201 WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
202 181
203 if (i != SCRUB_BIOS_PER_DEV-1) 182 if (i != SCRUB_BIOS_PER_DEV-1)
204 sdev->bios[i]->next_free = i + 1; 183 sdev->bios[i]->next_free = i + 1;
@@ -394,6 +373,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
394 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 373 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
395 374
396 sbio->err = err; 375 sbio->err = err;
376 sbio->bio = bio;
397 377
398 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 378 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
399} 379}
@@ -453,6 +433,8 @@ static void scrub_checksum(struct btrfs_work *work)
453 } 433 }
454 434
455out: 435out:
436 scrub_free_bio(sbio->bio);
437 sbio->bio = NULL;
456 spin_lock(&sdev->list_lock); 438 spin_lock(&sdev->list_lock);
457 sbio->next_free = sdev->first_free; 439 sbio->next_free = sdev->first_free;
458 sdev->first_free = sbio->index; 440 sdev->first_free = sbio->index;
@@ -583,25 +565,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
583static int scrub_submit(struct scrub_dev *sdev) 565static int scrub_submit(struct scrub_dev *sdev)
584{ 566{
585 struct scrub_bio *sbio; 567 struct scrub_bio *sbio;
568 struct bio *bio;
569 int i;
586 570
587 if (sdev->curr == -1) 571 if (sdev->curr == -1)
588 return 0; 572 return 0;
589 573
590 sbio = sdev->bios[sdev->curr]; 574 sbio = sdev->bios[sdev->curr];
591 575
592 sbio->bio->bi_sector = sbio->physical >> 9; 576 bio = bio_alloc(GFP_NOFS, sbio->count);
593 sbio->bio->bi_size = sbio->count * PAGE_SIZE; 577 if (!bio)
594 sbio->bio->bi_next = NULL; 578 goto nomem;
595 sbio->bio->bi_flags |= 1 << BIO_UPTODATE; 579
596 sbio->bio->bi_comp_cpu = -1; 580 bio->bi_private = sbio;
597 sbio->bio->bi_bdev = sdev->dev->bdev; 581 bio->bi_end_io = scrub_bio_end_io;
582 bio->bi_bdev = sdev->dev->bdev;
583 bio->bi_sector = sbio->physical >> 9;
584
585 for (i = 0; i < sbio->count; ++i) {
586 struct page *page;
587 int ret;
588
589 page = alloc_page(GFP_NOFS);
590 if (!page)
591 goto nomem;
592
593 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
594 if (!ret) {
595 __free_page(page);
596 goto nomem;
597 }
598 }
599
598 sbio->err = 0; 600 sbio->err = 0;
599 sdev->curr = -1; 601 sdev->curr = -1;
600 atomic_inc(&sdev->in_flight); 602 atomic_inc(&sdev->in_flight);
601 603
602 submit_bio(0, sbio->bio); 604 submit_bio(READ, bio);
603 605
604 return 0; 606 return 0;
607
608nomem:
609 scrub_free_bio(bio);
610
611 return -ENOMEM;
605} 612}
606 613
607static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 614static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -633,7 +640,11 @@ again:
633 sbio->logical = logical; 640 sbio->logical = logical;
634 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 641 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
635 sbio->logical + sbio->count * PAGE_SIZE != logical) { 642 sbio->logical + sbio->count * PAGE_SIZE != logical) {
636 scrub_submit(sdev); 643 int ret;
644
645 ret = scrub_submit(sdev);
646 if (ret)
647 return ret;
637 goto again; 648 goto again;
638 } 649 }
639 sbio->spag[sbio->count].flags = flags; 650 sbio->spag[sbio->count].flags = flags;
@@ -645,8 +656,13 @@ again:
645 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 656 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
646 } 657 }
647 ++sbio->count; 658 ++sbio->count;
648 if (sbio->count == SCRUB_PAGES_PER_BIO || force) 659 if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
649 scrub_submit(sdev); 660 int ret;
661
662 ret = scrub_submit(sdev);
663 if (ret)
664 return ret;
665 }
650 666
651 return 0; 667 return 0;
652} 668}