diff options
author | Stefan Behrens <sbehrens@giantdisaster.de> | 2012-05-25 10:06:08 -0400 |
---|---|---|
committer | Josef Bacik <josef@redhat.com> | 2012-05-30 10:23:39 -0400 |
commit | 442a4f6308e694e0fa6025708bd5e4e424bbf51c (patch) | |
tree | e782db1bcbec25283048d77871e0bed7ad04567c /fs | |
parent | d07eb9117050c9ed3f78296ebcc06128b52693be (diff) |
Btrfs: add device counters for detected IO and checksum errors
The goal is to detect when drives start to get an increased error rate,
when drives should be replaced soon. Therefore statistic counters are
added that count IO errors (read, write and flush). Additionally, the
software detected errors like checksum errors and corrupted blocks are
counted.
Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/disk-io.c | 13 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 18 | ||||
-rw-r--r-- | fs/btrfs/ioctl.h | 19 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 65 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 94 | ||||
-rw-r--r-- | fs/btrfs/volumes.h | 45 |
6 files changed, 230 insertions, 24 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0f788c059063..46d474e74aa4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -2557,18 +2557,19 @@ recovery_tree_root: | |||
2557 | 2557 | ||
2558 | static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | 2558 | static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) |
2559 | { | 2559 | { |
2560 | char b[BDEVNAME_SIZE]; | ||
2561 | |||
2562 | if (uptodate) { | 2560 | if (uptodate) { |
2563 | set_buffer_uptodate(bh); | 2561 | set_buffer_uptodate(bh); |
2564 | } else { | 2562 | } else { |
2563 | struct btrfs_device *device = (struct btrfs_device *) | ||
2564 | bh->b_private; | ||
2565 | |||
2565 | printk_ratelimited(KERN_WARNING "lost page write due to " | 2566 | printk_ratelimited(KERN_WARNING "lost page write due to " |
2566 | "I/O error on %s\n", | 2567 | "I/O error on %s\n", device->name); |
2567 | bdevname(bh->b_bdev, b)); | ||
2568 | /* note, we dont' set_buffer_write_io_error because we have | 2568 | /* note, we dont' set_buffer_write_io_error because we have |
2569 | * our own ways of dealing with the IO errors | 2569 | * our own ways of dealing with the IO errors |
2570 | */ | 2570 | */ |
2571 | clear_buffer_uptodate(bh); | 2571 | clear_buffer_uptodate(bh); |
2572 | btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); | ||
2572 | } | 2573 | } |
2573 | unlock_buffer(bh); | 2574 | unlock_buffer(bh); |
2574 | put_bh(bh); | 2575 | put_bh(bh); |
@@ -2683,6 +2684,7 @@ static int write_dev_supers(struct btrfs_device *device, | |||
2683 | set_buffer_uptodate(bh); | 2684 | set_buffer_uptodate(bh); |
2684 | lock_buffer(bh); | 2685 | lock_buffer(bh); |
2685 | bh->b_end_io = btrfs_end_buffer_write_sync; | 2686 | bh->b_end_io = btrfs_end_buffer_write_sync; |
2687 | bh->b_private = device; | ||
2686 | } | 2688 | } |
2687 | 2689 | ||
2688 | /* | 2690 | /* |
@@ -2741,6 +2743,9 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
2741 | } | 2743 | } |
2742 | if (!bio_flagged(bio, BIO_UPTODATE)) { | 2744 | if (!bio_flagged(bio, BIO_UPTODATE)) { |
2743 | ret = -EIO; | 2745 | ret = -EIO; |
2746 | if (!bio_flagged(bio, BIO_EOPNOTSUPP)) | ||
2747 | btrfs_dev_stat_inc_and_print(device, | ||
2748 | BTRFS_DEV_STAT_FLUSH_ERRS); | ||
2744 | } | 2749 | } |
2745 | 2750 | ||
2746 | /* drop the reference from the wait == 0 run */ | 2751 | /* drop the reference from the wait == 0 run */ |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 69a527c7a0b3..b3692c1373aa 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1913,6 +1913,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, | |||
1913 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { | 1913 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { |
1914 | /* try to remap that extent elsewhere? */ | 1914 | /* try to remap that extent elsewhere? */ |
1915 | bio_put(bio); | 1915 | bio_put(bio); |
1916 | btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); | ||
1916 | return -EIO; | 1917 | return -EIO; |
1917 | } | 1918 | } |
1918 | 1919 | ||
@@ -2327,10 +2328,23 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2327 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { | 2328 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { |
2328 | ret = tree->ops->readpage_end_io_hook(page, start, end, | 2329 | ret = tree->ops->readpage_end_io_hook(page, start, end, |
2329 | state, mirror); | 2330 | state, mirror); |
2330 | if (ret) | 2331 | if (ret) { |
2332 | /* no IO indicated but software detected errors | ||
2333 | * in the block, either checksum errors or | ||
2334 | * issues with the contents */ | ||
2335 | struct btrfs_root *root = | ||
2336 | BTRFS_I(page->mapping->host)->root; | ||
2337 | struct btrfs_device *device; | ||
2338 | |||
2331 | uptodate = 0; | 2339 | uptodate = 0; |
2332 | else | 2340 | device = btrfs_find_device_for_logical( |
2341 | root, start, mirror); | ||
2342 | if (device) | ||
2343 | btrfs_dev_stat_inc_and_print(device, | ||
2344 | BTRFS_DEV_STAT_CORRUPTION_ERRS); | ||
2345 | } else { | ||
2333 | clean_io_failure(start, page); | 2346 | clean_io_failure(start, page); |
2347 | } | ||
2334 | } | 2348 | } |
2335 | 2349 | ||
2336 | if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { | 2350 | if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { |
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 086e6bdae1c4..5bf05e28b829 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h | |||
@@ -266,6 +266,25 @@ struct btrfs_ioctl_logical_ino_args { | |||
266 | __u64 inodes; | 266 | __u64 inodes; |
267 | }; | 267 | }; |
268 | 268 | ||
269 | enum btrfs_dev_stat_values { | ||
270 | /* disk I/O failure stats */ | ||
271 | BTRFS_DEV_STAT_WRITE_ERRS, /* EIO or EREMOTEIO from lower layers */ | ||
272 | BTRFS_DEV_STAT_READ_ERRS, /* EIO or EREMOTEIO from lower layers */ | ||
273 | BTRFS_DEV_STAT_FLUSH_ERRS, /* EIO or EREMOTEIO from lower layers */ | ||
274 | |||
275 | /* stats for indirect indications for I/O failures */ | ||
276 | BTRFS_DEV_STAT_CORRUPTION_ERRS, /* checksum error, bytenr error or | ||
277 | * contents is illegal: this is an | ||
278 | * indication that the block was damaged | ||
279 | * during read or write, or written to | ||
280 | * wrong location or read from wrong | ||
281 | * location */ | ||
282 | BTRFS_DEV_STAT_GENERATION_ERRS, /* an indication that blocks have not | ||
283 | * been written */ | ||
284 | |||
285 | BTRFS_DEV_STAT_VALUES_MAX | ||
286 | }; | ||
287 | |||
269 | #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ | 288 | #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ |
270 | struct btrfs_ioctl_vol_args) | 289 | struct btrfs_ioctl_vol_args) |
271 | #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ | 290 | #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2f3d6f917fb3..a38cfa4f251e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -50,7 +50,7 @@ struct scrub_dev; | |||
50 | struct scrub_page { | 50 | struct scrub_page { |
51 | struct scrub_block *sblock; | 51 | struct scrub_block *sblock; |
52 | struct page *page; | 52 | struct page *page; |
53 | struct block_device *bdev; | 53 | struct btrfs_device *dev; |
54 | u64 flags; /* extent flags */ | 54 | u64 flags; /* extent flags */ |
55 | u64 generation; | 55 | u64 generation; |
56 | u64 logical; | 56 | u64 logical; |
@@ -86,6 +86,7 @@ struct scrub_block { | |||
86 | unsigned int header_error:1; | 86 | unsigned int header_error:1; |
87 | unsigned int checksum_error:1; | 87 | unsigned int checksum_error:1; |
88 | unsigned int no_io_error_seen:1; | 88 | unsigned int no_io_error_seen:1; |
89 | unsigned int generation_error:1; /* also sets header_error */ | ||
89 | }; | 90 | }; |
90 | }; | 91 | }; |
91 | 92 | ||
@@ -675,6 +676,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
675 | sdev->stat.read_errors++; | 676 | sdev->stat.read_errors++; |
676 | sdev->stat.uncorrectable_errors++; | 677 | sdev->stat.uncorrectable_errors++; |
677 | spin_unlock(&sdev->stat_lock); | 678 | spin_unlock(&sdev->stat_lock); |
679 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
680 | BTRFS_DEV_STAT_READ_ERRS); | ||
678 | goto out; | 681 | goto out; |
679 | } | 682 | } |
680 | 683 | ||
@@ -686,6 +689,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
686 | sdev->stat.read_errors++; | 689 | sdev->stat.read_errors++; |
687 | sdev->stat.uncorrectable_errors++; | 690 | sdev->stat.uncorrectable_errors++; |
688 | spin_unlock(&sdev->stat_lock); | 691 | spin_unlock(&sdev->stat_lock); |
692 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
693 | BTRFS_DEV_STAT_READ_ERRS); | ||
689 | goto out; | 694 | goto out; |
690 | } | 695 | } |
691 | BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); | 696 | BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); |
@@ -699,6 +704,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
699 | sdev->stat.read_errors++; | 704 | sdev->stat.read_errors++; |
700 | sdev->stat.uncorrectable_errors++; | 705 | sdev->stat.uncorrectable_errors++; |
701 | spin_unlock(&sdev->stat_lock); | 706 | spin_unlock(&sdev->stat_lock); |
707 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
708 | BTRFS_DEV_STAT_READ_ERRS); | ||
702 | goto out; | 709 | goto out; |
703 | } | 710 | } |
704 | 711 | ||
@@ -725,12 +732,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
725 | spin_unlock(&sdev->stat_lock); | 732 | spin_unlock(&sdev->stat_lock); |
726 | if (__ratelimit(&_rs)) | 733 | if (__ratelimit(&_rs)) |
727 | scrub_print_warning("i/o error", sblock_to_check); | 734 | scrub_print_warning("i/o error", sblock_to_check); |
735 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
736 | BTRFS_DEV_STAT_READ_ERRS); | ||
728 | } else if (sblock_bad->checksum_error) { | 737 | } else if (sblock_bad->checksum_error) { |
729 | spin_lock(&sdev->stat_lock); | 738 | spin_lock(&sdev->stat_lock); |
730 | sdev->stat.csum_errors++; | 739 | sdev->stat.csum_errors++; |
731 | spin_unlock(&sdev->stat_lock); | 740 | spin_unlock(&sdev->stat_lock); |
732 | if (__ratelimit(&_rs)) | 741 | if (__ratelimit(&_rs)) |
733 | scrub_print_warning("checksum error", sblock_to_check); | 742 | scrub_print_warning("checksum error", sblock_to_check); |
743 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
744 | BTRFS_DEV_STAT_CORRUPTION_ERRS); | ||
734 | } else if (sblock_bad->header_error) { | 745 | } else if (sblock_bad->header_error) { |
735 | spin_lock(&sdev->stat_lock); | 746 | spin_lock(&sdev->stat_lock); |
736 | sdev->stat.verify_errors++; | 747 | sdev->stat.verify_errors++; |
@@ -738,6 +749,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
738 | if (__ratelimit(&_rs)) | 749 | if (__ratelimit(&_rs)) |
739 | scrub_print_warning("checksum/header error", | 750 | scrub_print_warning("checksum/header error", |
740 | sblock_to_check); | 751 | sblock_to_check); |
752 | if (sblock_bad->generation_error) | ||
753 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
754 | BTRFS_DEV_STAT_GENERATION_ERRS); | ||
755 | else | ||
756 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
757 | BTRFS_DEV_STAT_CORRUPTION_ERRS); | ||
741 | } | 758 | } |
742 | 759 | ||
743 | if (sdev->readonly) | 760 | if (sdev->readonly) |
@@ -998,8 +1015,8 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev, | |||
998 | page = sblock->pagev + page_index; | 1015 | page = sblock->pagev + page_index; |
999 | page->logical = logical; | 1016 | page->logical = logical; |
1000 | page->physical = bbio->stripes[mirror_index].physical; | 1017 | page->physical = bbio->stripes[mirror_index].physical; |
1001 | /* for missing devices, bdev is NULL */ | 1018 | /* for missing devices, dev->bdev is NULL */ |
1002 | page->bdev = bbio->stripes[mirror_index].dev->bdev; | 1019 | page->dev = bbio->stripes[mirror_index].dev; |
1003 | page->mirror_num = mirror_index + 1; | 1020 | page->mirror_num = mirror_index + 1; |
1004 | page->page = alloc_page(GFP_NOFS); | 1021 | page->page = alloc_page(GFP_NOFS); |
1005 | if (!page->page) { | 1022 | if (!page->page) { |
@@ -1043,7 +1060,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info, | |||
1043 | struct scrub_page *page = sblock->pagev + page_num; | 1060 | struct scrub_page *page = sblock->pagev + page_num; |
1044 | DECLARE_COMPLETION_ONSTACK(complete); | 1061 | DECLARE_COMPLETION_ONSTACK(complete); |
1045 | 1062 | ||
1046 | if (page->bdev == NULL) { | 1063 | if (page->dev->bdev == NULL) { |
1047 | page->io_error = 1; | 1064 | page->io_error = 1; |
1048 | sblock->no_io_error_seen = 0; | 1065 | sblock->no_io_error_seen = 0; |
1049 | continue; | 1066 | continue; |
@@ -1053,7 +1070,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info, | |||
1053 | bio = bio_alloc(GFP_NOFS, 1); | 1070 | bio = bio_alloc(GFP_NOFS, 1); |
1054 | if (!bio) | 1071 | if (!bio) |
1055 | return -EIO; | 1072 | return -EIO; |
1056 | bio->bi_bdev = page->bdev; | 1073 | bio->bi_bdev = page->dev->bdev; |
1057 | bio->bi_sector = page->physical >> 9; | 1074 | bio->bi_sector = page->physical >> 9; |
1058 | bio->bi_end_io = scrub_complete_bio_end_io; | 1075 | bio->bi_end_io = scrub_complete_bio_end_io; |
1059 | bio->bi_private = &complete; | 1076 | bio->bi_private = &complete; |
@@ -1102,11 +1119,14 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, | |||
1102 | h = (struct btrfs_header *)mapped_buffer; | 1119 | h = (struct btrfs_header *)mapped_buffer; |
1103 | 1120 | ||
1104 | if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) || | 1121 | if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) || |
1105 | generation != le64_to_cpu(h->generation) || | ||
1106 | memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) || | 1122 | memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) || |
1107 | memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, | 1123 | memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, |
1108 | BTRFS_UUID_SIZE)) | 1124 | BTRFS_UUID_SIZE)) { |
1109 | sblock->header_error = 1; | 1125 | sblock->header_error = 1; |
1126 | } else if (generation != le64_to_cpu(h->generation)) { | ||
1127 | sblock->header_error = 1; | ||
1128 | sblock->generation_error = 1; | ||
1129 | } | ||
1110 | csum = h->csum; | 1130 | csum = h->csum; |
1111 | } else { | 1131 | } else { |
1112 | if (!have_csum) | 1132 | if (!have_csum) |
@@ -1182,7 +1202,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | |||
1182 | bio = bio_alloc(GFP_NOFS, 1); | 1202 | bio = bio_alloc(GFP_NOFS, 1); |
1183 | if (!bio) | 1203 | if (!bio) |
1184 | return -EIO; | 1204 | return -EIO; |
1185 | bio->bi_bdev = page_bad->bdev; | 1205 | bio->bi_bdev = page_bad->dev->bdev; |
1186 | bio->bi_sector = page_bad->physical >> 9; | 1206 | bio->bi_sector = page_bad->physical >> 9; |
1187 | bio->bi_end_io = scrub_complete_bio_end_io; | 1207 | bio->bi_end_io = scrub_complete_bio_end_io; |
1188 | bio->bi_private = &complete; | 1208 | bio->bi_private = &complete; |
@@ -1196,6 +1216,12 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | |||
1196 | 1216 | ||
1197 | /* this will also unplug the queue */ | 1217 | /* this will also unplug the queue */ |
1198 | wait_for_completion(&complete); | 1218 | wait_for_completion(&complete); |
1219 | if (!bio_flagged(bio, BIO_UPTODATE)) { | ||
1220 | btrfs_dev_stat_inc_and_print(page_bad->dev, | ||
1221 | BTRFS_DEV_STAT_WRITE_ERRS); | ||
1222 | bio_put(bio); | ||
1223 | return -EIO; | ||
1224 | } | ||
1199 | bio_put(bio); | 1225 | bio_put(bio); |
1200 | } | 1226 | } |
1201 | 1227 | ||
@@ -1352,7 +1378,8 @@ static int scrub_checksum_super(struct scrub_block *sblock) | |||
1352 | u64 mapped_size; | 1378 | u64 mapped_size; |
1353 | void *p; | 1379 | void *p; |
1354 | u32 crc = ~(u32)0; | 1380 | u32 crc = ~(u32)0; |
1355 | int fail = 0; | 1381 | int fail_gen = 0; |
1382 | int fail_cor = 0; | ||
1356 | u64 len; | 1383 | u64 len; |
1357 | int index; | 1384 | int index; |
1358 | 1385 | ||
@@ -1363,13 +1390,13 @@ static int scrub_checksum_super(struct scrub_block *sblock) | |||
1363 | memcpy(on_disk_csum, s->csum, sdev->csum_size); | 1390 | memcpy(on_disk_csum, s->csum, sdev->csum_size); |
1364 | 1391 | ||
1365 | if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr)) | 1392 | if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr)) |
1366 | ++fail; | 1393 | ++fail_cor; |
1367 | 1394 | ||
1368 | if (sblock->pagev[0].generation != le64_to_cpu(s->generation)) | 1395 | if (sblock->pagev[0].generation != le64_to_cpu(s->generation)) |
1369 | ++fail; | 1396 | ++fail_gen; |
1370 | 1397 | ||
1371 | if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) | 1398 | if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) |
1372 | ++fail; | 1399 | ++fail_cor; |
1373 | 1400 | ||
1374 | len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; | 1401 | len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; |
1375 | mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; | 1402 | mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; |
@@ -1394,9 +1421,9 @@ static int scrub_checksum_super(struct scrub_block *sblock) | |||
1394 | 1421 | ||
1395 | btrfs_csum_final(crc, calculated_csum); | 1422 | btrfs_csum_final(crc, calculated_csum); |
1396 | if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) | 1423 | if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) |
1397 | ++fail; | 1424 | ++fail_cor; |
1398 | 1425 | ||
1399 | if (fail) { | 1426 | if (fail_cor + fail_gen) { |
1400 | /* | 1427 | /* |
1401 | * if we find an error in a super block, we just report it. | 1428 | * if we find an error in a super block, we just report it. |
1402 | * They will get written with the next transaction commit | 1429 | * They will get written with the next transaction commit |
@@ -1405,9 +1432,15 @@ static int scrub_checksum_super(struct scrub_block *sblock) | |||
1405 | spin_lock(&sdev->stat_lock); | 1432 | spin_lock(&sdev->stat_lock); |
1406 | ++sdev->stat.super_errors; | 1433 | ++sdev->stat.super_errors; |
1407 | spin_unlock(&sdev->stat_lock); | 1434 | spin_unlock(&sdev->stat_lock); |
1435 | if (fail_cor) | ||
1436 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
1437 | BTRFS_DEV_STAT_CORRUPTION_ERRS); | ||
1438 | else | ||
1439 | btrfs_dev_stat_inc_and_print(sdev->dev, | ||
1440 | BTRFS_DEV_STAT_GENERATION_ERRS); | ||
1408 | } | 1441 | } |
1409 | 1442 | ||
1410 | return fail; | 1443 | return fail_cor + fail_gen; |
1411 | } | 1444 | } |
1412 | 1445 | ||
1413 | static void scrub_block_get(struct scrub_block *sblock) | 1446 | static void scrub_block_get(struct scrub_block *sblock) |
@@ -1551,7 +1584,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len, | |||
1551 | return -ENOMEM; | 1584 | return -ENOMEM; |
1552 | } | 1585 | } |
1553 | spage->sblock = sblock; | 1586 | spage->sblock = sblock; |
1554 | spage->bdev = sdev->dev->bdev; | 1587 | spage->dev = sdev->dev; |
1555 | spage->flags = flags; | 1588 | spage->flags = flags; |
1556 | spage->generation = gen; | 1589 | spage->generation = gen; |
1557 | spage->logical = logical; | 1590 | spage->logical = logical; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 48a06d1fc067..2915521f44ee 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/random.h> | 23 | #include <linux/random.h> |
24 | #include <linux/iocontext.h> | 24 | #include <linux/iocontext.h> |
25 | #include <linux/capability.h> | 25 | #include <linux/capability.h> |
26 | #include <linux/ratelimit.h> | ||
26 | #include <linux/kthread.h> | 27 | #include <linux/kthread.h> |
27 | #include <asm/div64.h> | 28 | #include <asm/div64.h> |
28 | #include "compat.h" | 29 | #include "compat.h" |
@@ -4001,13 +4002,58 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | |||
4001 | return 0; | 4002 | return 0; |
4002 | } | 4003 | } |
4003 | 4004 | ||
4005 | static void *merge_stripe_index_into_bio_private(void *bi_private, | ||
4006 | unsigned int stripe_index) | ||
4007 | { | ||
4008 | /* | ||
4009 | * with single, dup, RAID0, RAID1 and RAID10, stripe_index is | ||
4010 | * at most 1. | ||
4011 | * The alternative solution (instead of stealing bits from the | ||
4012 | * pointer) would be to allocate an intermediate structure | ||
4013 | * that contains the old private pointer plus the stripe_index. | ||
4014 | */ | ||
4015 | BUG_ON((((uintptr_t)bi_private) & 3) != 0); | ||
4016 | BUG_ON(stripe_index > 3); | ||
4017 | return (void *)(((uintptr_t)bi_private) | stripe_index); | ||
4018 | } | ||
4019 | |||
4020 | static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private) | ||
4021 | { | ||
4022 | return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3)); | ||
4023 | } | ||
4024 | |||
4025 | static unsigned int extract_stripe_index_from_bio_private(void *bi_private) | ||
4026 | { | ||
4027 | return (unsigned int)((uintptr_t)bi_private) & 3; | ||
4028 | } | ||
4029 | |||
4004 | static void btrfs_end_bio(struct bio *bio, int err) | 4030 | static void btrfs_end_bio(struct bio *bio, int err) |
4005 | { | 4031 | { |
4006 | struct btrfs_bio *bbio = bio->bi_private; | 4032 | struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private); |
4007 | int is_orig_bio = 0; | 4033 | int is_orig_bio = 0; |
4008 | 4034 | ||
4009 | if (err) | 4035 | if (err) { |
4010 | atomic_inc(&bbio->error); | 4036 | atomic_inc(&bbio->error); |
4037 | if (err == -EIO || err == -EREMOTEIO) { | ||
4038 | unsigned int stripe_index = | ||
4039 | extract_stripe_index_from_bio_private( | ||
4040 | bio->bi_private); | ||
4041 | struct btrfs_device *dev; | ||
4042 | |||
4043 | BUG_ON(stripe_index >= bbio->num_stripes); | ||
4044 | dev = bbio->stripes[stripe_index].dev; | ||
4045 | if (bio->bi_rw & WRITE) | ||
4046 | btrfs_dev_stat_inc(dev, | ||
4047 | BTRFS_DEV_STAT_WRITE_ERRS); | ||
4048 | else | ||
4049 | btrfs_dev_stat_inc(dev, | ||
4050 | BTRFS_DEV_STAT_READ_ERRS); | ||
4051 | if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH) | ||
4052 | btrfs_dev_stat_inc(dev, | ||
4053 | BTRFS_DEV_STAT_FLUSH_ERRS); | ||
4054 | btrfs_dev_stat_print_on_error(dev); | ||
4055 | } | ||
4056 | } | ||
4011 | 4057 | ||
4012 | if (bio == bbio->orig_bio) | 4058 | if (bio == bbio->orig_bio) |
4013 | is_orig_bio = 1; | 4059 | is_orig_bio = 1; |
@@ -4149,6 +4195,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
4149 | bio = first_bio; | 4195 | bio = first_bio; |
4150 | } | 4196 | } |
4151 | bio->bi_private = bbio; | 4197 | bio->bi_private = bbio; |
4198 | bio->bi_private = merge_stripe_index_into_bio_private( | ||
4199 | bio->bi_private, (unsigned int)dev_nr); | ||
4152 | bio->bi_end_io = btrfs_end_bio; | 4200 | bio->bi_end_io = btrfs_end_bio; |
4153 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; | 4201 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; |
4154 | dev = bbio->stripes[dev_nr].dev; | 4202 | dev = bbio->stripes[dev_nr].dev; |
@@ -4509,6 +4557,28 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
4509 | return ret; | 4557 | return ret; |
4510 | } | 4558 | } |
4511 | 4559 | ||
4560 | struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, | ||
4561 | u64 logical, int mirror_num) | ||
4562 | { | ||
4563 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | ||
4564 | int ret; | ||
4565 | u64 map_length = 0; | ||
4566 | struct btrfs_bio *bbio = NULL; | ||
4567 | struct btrfs_device *device; | ||
4568 | |||
4569 | BUG_ON(mirror_num == 0); | ||
4570 | ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio, | ||
4571 | mirror_num); | ||
4572 | if (ret) { | ||
4573 | BUG_ON(bbio != NULL); | ||
4574 | return NULL; | ||
4575 | } | ||
4576 | BUG_ON(mirror_num != bbio->mirror_num); | ||
4577 | device = bbio->stripes[mirror_num - 1].dev; | ||
4578 | kfree(bbio); | ||
4579 | return device; | ||
4580 | } | ||
4581 | |||
4512 | int btrfs_read_chunk_tree(struct btrfs_root *root) | 4582 | int btrfs_read_chunk_tree(struct btrfs_root *root) |
4513 | { | 4583 | { |
4514 | struct btrfs_path *path; | 4584 | struct btrfs_path *path; |
@@ -4583,3 +4653,23 @@ error: | |||
4583 | btrfs_free_path(path); | 4653 | btrfs_free_path(path); |
4584 | return ret; | 4654 | return ret; |
4585 | } | 4655 | } |
4656 | |||
4657 | void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) | ||
4658 | { | ||
4659 | btrfs_dev_stat_inc(dev, index); | ||
4660 | btrfs_dev_stat_print_on_error(dev); | ||
4661 | } | ||
4662 | |||
4663 | void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | ||
4664 | { | ||
4665 | printk_ratelimited(KERN_ERR | ||
4666 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | ||
4667 | dev->name, | ||
4668 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | ||
4669 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | ||
4670 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | ||
4671 | btrfs_dev_stat_read(dev, | ||
4672 | BTRFS_DEV_STAT_CORRUPTION_ERRS), | ||
4673 | btrfs_dev_stat_read(dev, | ||
4674 | BTRFS_DEV_STAT_GENERATION_ERRS)); | ||
4675 | } | ||
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index bb6b03f97aaa..193b2835e6ae 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/bio.h> | 22 | #include <linux/bio.h> |
23 | #include <linux/sort.h> | 23 | #include <linux/sort.h> |
24 | #include "async-thread.h" | 24 | #include "async-thread.h" |
25 | #include "ioctl.h" | ||
25 | 26 | ||
26 | #define BTRFS_STRIPE_LEN (64 * 1024) | 27 | #define BTRFS_STRIPE_LEN (64 * 1024) |
27 | 28 | ||
@@ -106,6 +107,10 @@ struct btrfs_device { | |||
106 | struct completion flush_wait; | 107 | struct completion flush_wait; |
107 | int nobarriers; | 108 | int nobarriers; |
108 | 109 | ||
110 | /* disk I/O failure stats. For detailed description refer to | ||
111 | * enum btrfs_dev_stat_values in ioctl.h */ | ||
112 | int dev_stats_dirty; /* counters need to be written to disk */ | ||
113 | atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX]; | ||
109 | }; | 114 | }; |
110 | 115 | ||
111 | struct btrfs_fs_devices { | 116 | struct btrfs_fs_devices { |
@@ -281,4 +286,44 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); | |||
281 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); | 286 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); |
282 | int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, | 287 | int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, |
283 | u64 *start, u64 *max_avail); | 288 | u64 *start, u64 *max_avail); |
289 | struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, | ||
290 | u64 logical, int mirror_num); | ||
291 | void btrfs_dev_stat_print_on_error(struct btrfs_device *device); | ||
292 | void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); | ||
293 | |||
294 | static inline void btrfs_dev_stat_inc(struct btrfs_device *dev, | ||
295 | int index) | ||
296 | { | ||
297 | atomic_inc(dev->dev_stat_values + index); | ||
298 | dev->dev_stats_dirty = 1; | ||
299 | } | ||
300 | |||
301 | static inline int btrfs_dev_stat_read(struct btrfs_device *dev, | ||
302 | int index) | ||
303 | { | ||
304 | return atomic_read(dev->dev_stat_values + index); | ||
305 | } | ||
306 | |||
307 | static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev, | ||
308 | int index) | ||
309 | { | ||
310 | int ret; | ||
311 | |||
312 | ret = atomic_xchg(dev->dev_stat_values + index, 0); | ||
313 | dev->dev_stats_dirty = 1; | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | static inline void btrfs_dev_stat_set(struct btrfs_device *dev, | ||
318 | int index, unsigned long val) | ||
319 | { | ||
320 | atomic_set(dev->dev_stat_values + index, val); | ||
321 | dev->dev_stats_dirty = 1; | ||
322 | } | ||
323 | |||
324 | static inline void btrfs_dev_stat_reset(struct btrfs_device *dev, | ||
325 | int index) | ||
326 | { | ||
327 | btrfs_dev_stat_set(dev, index, 0); | ||
328 | } | ||
284 | #endif | 329 | #endif |