aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorArne Jansen <sensille@gmx.net>2011-11-11 08:17:10 -0500
committerChris Mason <chris.mason@oracle.com>2011-11-11 08:17:10 -0500
commit69f4cb526bd02ae5af35846f9a710c099eec3347 (patch)
tree0fc400c20ab6293603b8b615467893a13426c82c /fs/btrfs/scrub.c
parent62f30c5462374b991e7e3f42d49ce2265c1b82f1 (diff)
Btrfs: handle bio_add_page failure gracefully in scrub
Currently scrub fails with ENOMEM when bio_add_page fails. Unfortunately dm based targets accept only one page per bio, thus making scrub always fails. This patch just submits the current bio when an error is encountered and starts a new one. Signed-off-by: Arne Jansen <sensille@gmx.net> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c64
1 files changed, 29 insertions, 35 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ed11d3866afd..f4190f22edfb 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
944static int scrub_submit(struct scrub_dev *sdev) 944static int scrub_submit(struct scrub_dev *sdev)
945{ 945{
946 struct scrub_bio *sbio; 946 struct scrub_bio *sbio;
947 struct bio *bio;
948 int i;
949 947
950 if (sdev->curr == -1) 948 if (sdev->curr == -1)
951 return 0; 949 return 0;
952 950
953 sbio = sdev->bios[sdev->curr]; 951 sbio = sdev->bios[sdev->curr];
954
955 bio = bio_alloc(GFP_NOFS, sbio->count);
956 if (!bio)
957 goto nomem;
958
959 bio->bi_private = sbio;
960 bio->bi_end_io = scrub_bio_end_io;
961 bio->bi_bdev = sdev->dev->bdev;
962 bio->bi_sector = sbio->physical >> 9;
963
964 for (i = 0; i < sbio->count; ++i) {
965 struct page *page;
966 int ret;
967
968 page = alloc_page(GFP_NOFS);
969 if (!page)
970 goto nomem;
971
972 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
973 if (!ret) {
974 __free_page(page);
975 goto nomem;
976 }
977 }
978
979 sbio->err = 0; 952 sbio->err = 0;
980 sdev->curr = -1; 953 sdev->curr = -1;
981 atomic_inc(&sdev->in_flight); 954 atomic_inc(&sdev->in_flight);
982 955
983 submit_bio(READ, bio); 956 submit_bio(READ, sbio->bio);
984 957
985 return 0; 958 return 0;
986
987nomem:
988 scrub_free_bio(bio);
989
990 return -ENOMEM;
991} 959}
992 960
993static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 961static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
995 u8 *csum, int force) 963 u8 *csum, int force)
996{ 964{
997 struct scrub_bio *sbio; 965 struct scrub_bio *sbio;
966 struct page *page;
967 int ret;
998 968
999again: 969again:
1000 /* 970 /*
@@ -1015,12 +985,22 @@ again:
1015 } 985 }
1016 sbio = sdev->bios[sdev->curr]; 986 sbio = sdev->bios[sdev->curr];
1017 if (sbio->count == 0) { 987 if (sbio->count == 0) {
988 struct bio *bio;
989
1018 sbio->physical = physical; 990 sbio->physical = physical;
1019 sbio->logical = logical; 991 sbio->logical = logical;
992 bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
993 if (!bio)
994 return -ENOMEM;
995
996 bio->bi_private = sbio;
997 bio->bi_end_io = scrub_bio_end_io;
998 bio->bi_bdev = sdev->dev->bdev;
999 bio->bi_sector = sbio->physical >> 9;
1000 sbio->err = 0;
1001 sbio->bio = bio;
1020 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 1002 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
1021 sbio->logical + sbio->count * PAGE_SIZE != logical) { 1003 sbio->logical + sbio->count * PAGE_SIZE != logical) {
1022 int ret;
1023
1024 ret = scrub_submit(sdev); 1004 ret = scrub_submit(sdev);
1025 if (ret) 1005 if (ret)
1026 return ret; 1006 return ret;
@@ -1030,6 +1010,20 @@ again:
1030 sbio->spag[sbio->count].generation = gen; 1010 sbio->spag[sbio->count].generation = gen;
1031 sbio->spag[sbio->count].have_csum = 0; 1011 sbio->spag[sbio->count].have_csum = 0;
1032 sbio->spag[sbio->count].mirror_num = mirror_num; 1012 sbio->spag[sbio->count].mirror_num = mirror_num;
1013
1014 page = alloc_page(GFP_NOFS);
1015 if (!page)
1016 return -ENOMEM;
1017
1018 ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
1019 if (!ret) {
1020 __free_page(page);
1021 ret = scrub_submit(sdev);
1022 if (ret)
1023 return ret;
1024 goto again;
1025 }
1026
1033 if (csum) { 1027 if (csum) {
1034 sbio->spag[sbio->count].have_csum = 1; 1028 sbio->spag[sbio->count].have_csum = 1;
1035 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 1029 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);