aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorJan Schmidt <list.btrfs@jan-o-sch.net>2011-06-13 13:56:13 -0400
committerJan Schmidt <list.btrfs@jan-o-sch.net>2011-09-29 06:54:27 -0400
commit13db62b7a1e8c64763a93c155091620f85ff8920 (patch)
treefdc56259e5675f6fe70c555138d6d3b0ebbbefb5 /fs/btrfs/scrub.c
parenta542ad1bafc7df9fc16de8a6894b350a4df75572 (diff)
btrfs scrub: added unverified_errors
In normal operation, scrub is reading data sequentially in large portions. In case of an i/o error, we try to find the corrupted area(s) by issuing page sized read requests. With this commit we increment the unverified_errors counter if all of the small size requests succeed. Userland patches carrying such conspicous events to the administrator should already be around. Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c37
1 files changed, 26 insertions, 11 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index a8d03d5efb5..35099fa97d5 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -201,18 +201,25 @@ nomem:
201 * recheck_error gets called for every page in the bio, even though only 201 * recheck_error gets called for every page in the bio, even though only
202 * one may be bad 202 * one may be bad
203 */ 203 */
204static void scrub_recheck_error(struct scrub_bio *sbio, int ix) 204static int scrub_recheck_error(struct scrub_bio *sbio, int ix)
205{ 205{
206 struct scrub_dev *sdev = sbio->sdev;
207 u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
208
206 if (sbio->err) { 209 if (sbio->err) {
207 if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, 210 if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector,
208 (sbio->physical + ix * PAGE_SIZE) >> 9,
209 sbio->bio->bi_io_vec[ix].bv_page) == 0) { 211 sbio->bio->bi_io_vec[ix].bv_page) == 0) {
210 if (scrub_fixup_check(sbio, ix) == 0) 212 if (scrub_fixup_check(sbio, ix) == 0)
211 return; 213 return 0;
212 } 214 }
213 } 215 }
214 216
217 spin_lock(&sdev->stat_lock);
218 ++sdev->stat.read_errors;
219 spin_unlock(&sdev->stat_lock);
220
215 scrub_fixup(sbio, ix); 221 scrub_fixup(sbio, ix);
222 return 1;
216} 223}
217 224
218static int scrub_fixup_check(struct scrub_bio *sbio, int ix) 225static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
@@ -382,8 +389,14 @@ static void scrub_checksum(struct btrfs_work *work)
382 int ret; 389 int ret;
383 390
384 if (sbio->err) { 391 if (sbio->err) {
392 ret = 0;
385 for (i = 0; i < sbio->count; ++i) 393 for (i = 0; i < sbio->count; ++i)
386 scrub_recheck_error(sbio, i); 394 ret |= scrub_recheck_error(sbio, i);
395 if (!ret) {
396 spin_lock(&sdev->stat_lock);
397 ++sdev->stat.unverified_errors;
398 spin_unlock(&sdev->stat_lock);
399 }
387 400
388 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); 401 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
389 sbio->bio->bi_flags |= 1 << BIO_UPTODATE; 402 sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
@@ -396,10 +409,6 @@ static void scrub_checksum(struct btrfs_work *work)
396 bi->bv_offset = 0; 409 bi->bv_offset = 0;
397 bi->bv_len = PAGE_SIZE; 410 bi->bv_len = PAGE_SIZE;
398 } 411 }
399
400 spin_lock(&sdev->stat_lock);
401 ++sdev->stat.read_errors;
402 spin_unlock(&sdev->stat_lock);
403 goto out; 412 goto out;
404 } 413 }
405 for (i = 0; i < sbio->count; ++i) { 414 for (i = 0; i < sbio->count; ++i) {
@@ -420,8 +429,14 @@ static void scrub_checksum(struct btrfs_work *work)
420 WARN_ON(1); 429 WARN_ON(1);
421 } 430 }
422 kunmap_atomic(buffer, KM_USER0); 431 kunmap_atomic(buffer, KM_USER0);
423 if (ret) 432 if (ret) {
424 scrub_recheck_error(sbio, i); 433 ret = scrub_recheck_error(sbio, i);
434 if (!ret) {
435 spin_lock(&sdev->stat_lock);
436 ++sdev->stat.unverified_errors;
437 spin_unlock(&sdev->stat_lock);
438 }
439 }
425 } 440 }
426 441
427out: 442out: