aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2014-10-14 23:18:44 -0400
committerMiao Xie <miaox@cn.fujitsu.com>2014-12-02 21:18:44 -0500
commitb89e1b012c7f81123344058d5f245b844464d30c (patch)
treecffad28619632e95a9a59aa7ec50d774f012222c
parent6de65650758e819d3dfdc621010dcd6117e8d186 (diff)
Btrfs, raid56: don't change bbio and raid_map
Because we will reuse bbio and raid_map during the scrub later, it is better that we don't change any variant of bbio and don't free it at the end of IO request. So we introduced similar variants into the raid bio, and don't access those bbio's variants any more. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
-rw-r--r--fs/btrfs/raid56.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 6a41631cb959..c54b0e64c590 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -58,7 +58,6 @@
58 */ 58 */
59#define RBIO_CACHE_READY_BIT 3 59#define RBIO_CACHE_READY_BIT 3
60 60
61
62#define RBIO_CACHE_SIZE 1024 61#define RBIO_CACHE_SIZE 1024
63 62
64struct btrfs_raid_bio { 63struct btrfs_raid_bio {
@@ -146,6 +145,10 @@ struct btrfs_raid_bio {
146 145
147 atomic_t refs; 146 atomic_t refs;
148 147
148
149 atomic_t stripes_pending;
150
151 atomic_t error;
149 /* 152 /*
150 * these are two arrays of pointers. We allocate the 153 * these are two arrays of pointers. We allocate the
151 * rbio big enough to hold them both and setup their 154 * rbio big enough to hold them both and setup their
@@ -858,13 +861,13 @@ static void raid_write_end_io(struct bio *bio, int err)
858 861
859 bio_put(bio); 862 bio_put(bio);
860 863
861 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 864 if (!atomic_dec_and_test(&rbio->stripes_pending))
862 return; 865 return;
863 866
864 err = 0; 867 err = 0;
865 868
866 /* OK, we have read all the stripes we need to. */ 869 /* OK, we have read all the stripes we need to. */
867 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 870 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
868 err = -EIO; 871 err = -EIO;
869 872
870 rbio_orig_end_io(rbio, err, 0); 873 rbio_orig_end_io(rbio, err, 0);
@@ -949,6 +952,8 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
949 rbio->faila = -1; 952 rbio->faila = -1;
950 rbio->failb = -1; 953 rbio->failb = -1;
951 atomic_set(&rbio->refs, 1); 954 atomic_set(&rbio->refs, 1);
955 atomic_set(&rbio->error, 0);
956 atomic_set(&rbio->stripes_pending, 0);
952 957
953 /* 958 /*
954 * the stripe_pages and bio_pages array point to the extra 959 * the stripe_pages and bio_pages array point to the extra
@@ -1169,7 +1174,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1169 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 1174 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1170 spin_unlock_irq(&rbio->bio_list_lock); 1175 spin_unlock_irq(&rbio->bio_list_lock);
1171 1176
1172 atomic_set(&rbio->bbio->error, 0); 1177 atomic_set(&rbio->error, 0);
1173 1178
1174 /* 1179 /*
1175 * now that we've set rmw_locked, run through the 1180 * now that we've set rmw_locked, run through the
@@ -1245,8 +1250,8 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1245 } 1250 }
1246 } 1251 }
1247 1252
1248 atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list)); 1253 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1249 BUG_ON(atomic_read(&bbio->stripes_pending) == 0); 1254 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1250 1255
1251 while (1) { 1256 while (1) {
1252 bio = bio_list_pop(&bio_list); 1257 bio = bio_list_pop(&bio_list);
@@ -1331,11 +1336,11 @@ static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1331 if (rbio->faila == -1) { 1336 if (rbio->faila == -1) {
1332 /* first failure on this rbio */ 1337 /* first failure on this rbio */
1333 rbio->faila = failed; 1338 rbio->faila = failed;
1334 atomic_inc(&rbio->bbio->error); 1339 atomic_inc(&rbio->error);
1335 } else if (rbio->failb == -1) { 1340 } else if (rbio->failb == -1) {
1336 /* second failure on this rbio */ 1341 /* second failure on this rbio */
1337 rbio->failb = failed; 1342 rbio->failb = failed;
1338 atomic_inc(&rbio->bbio->error); 1343 atomic_inc(&rbio->error);
1339 } else { 1344 } else {
1340 ret = -EIO; 1345 ret = -EIO;
1341 } 1346 }
@@ -1394,11 +1399,11 @@ static void raid_rmw_end_io(struct bio *bio, int err)
1394 1399
1395 bio_put(bio); 1400 bio_put(bio);
1396 1401
1397 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 1402 if (!atomic_dec_and_test(&rbio->stripes_pending))
1398 return; 1403 return;
1399 1404
1400 err = 0; 1405 err = 0;
1401 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 1406 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1402 goto cleanup; 1407 goto cleanup;
1403 1408
1404 /* 1409 /*
@@ -1439,7 +1444,6 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1439static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 1444static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1440{ 1445{
1441 int bios_to_read = 0; 1446 int bios_to_read = 0;
1442 struct btrfs_bio *bbio = rbio->bbio;
1443 struct bio_list bio_list; 1447 struct bio_list bio_list;
1444 int ret; 1448 int ret;
1445 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); 1449 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
@@ -1455,7 +1459,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1455 1459
1456 index_rbio_pages(rbio); 1460 index_rbio_pages(rbio);
1457 1461
1458 atomic_set(&rbio->bbio->error, 0); 1462 atomic_set(&rbio->error, 0);
1459 /* 1463 /*
1460 * build a list of bios to read all the missing parts of this 1464 * build a list of bios to read all the missing parts of this
1461 * stripe 1465 * stripe
@@ -1503,7 +1507,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1503 * the bbio may be freed once we submit the last bio. Make sure 1507 * the bbio may be freed once we submit the last bio. Make sure
1504 * not to touch it after that 1508 * not to touch it after that
1505 */ 1509 */
1506 atomic_set(&bbio->stripes_pending, bios_to_read); 1510 atomic_set(&rbio->stripes_pending, bios_to_read);
1507 while (1) { 1511 while (1) {
1508 bio = bio_list_pop(&bio_list); 1512 bio = bio_list_pop(&bio_list);
1509 if (!bio) 1513 if (!bio)
@@ -1917,10 +1921,10 @@ static void raid_recover_end_io(struct bio *bio, int err)
1917 set_bio_pages_uptodate(bio); 1921 set_bio_pages_uptodate(bio);
1918 bio_put(bio); 1922 bio_put(bio);
1919 1923
1920 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 1924 if (!atomic_dec_and_test(&rbio->stripes_pending))
1921 return; 1925 return;
1922 1926
1923 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 1927 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1924 rbio_orig_end_io(rbio, -EIO, 0); 1928 rbio_orig_end_io(rbio, -EIO, 0);
1925 else 1929 else
1926 __raid_recover_end_io(rbio); 1930 __raid_recover_end_io(rbio);
@@ -1951,7 +1955,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1951 if (ret) 1955 if (ret)
1952 goto cleanup; 1956 goto cleanup;
1953 1957
1954 atomic_set(&rbio->bbio->error, 0); 1958 atomic_set(&rbio->error, 0);
1955 1959
1956 /* 1960 /*
1957 * read everything that hasn't failed. Thanks to the 1961 * read everything that hasn't failed. Thanks to the
@@ -1960,7 +1964,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1960 */ 1964 */
1961 for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 1965 for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
1962 if (rbio->faila == stripe || rbio->failb == stripe) { 1966 if (rbio->faila == stripe || rbio->failb == stripe) {
1963 atomic_inc(&rbio->bbio->error); 1967 atomic_inc(&rbio->error);
1964 continue; 1968 continue;
1965 } 1969 }
1966 1970
@@ -1990,7 +1994,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1990 * were up to date, or we might have no bios to read because 1994 * were up to date, or we might have no bios to read because
1991 * the devices were gone. 1995 * the devices were gone.
1992 */ 1996 */
1993 if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) { 1997 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
1994 __raid_recover_end_io(rbio); 1998 __raid_recover_end_io(rbio);
1995 goto out; 1999 goto out;
1996 } else { 2000 } else {
@@ -2002,7 +2006,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2002 * the bbio may be freed once we submit the last bio. Make sure 2006 * the bbio may be freed once we submit the last bio. Make sure
2003 * not to touch it after that 2007 * not to touch it after that
2004 */ 2008 */
2005 atomic_set(&bbio->stripes_pending, bios_to_read); 2009 atomic_set(&rbio->stripes_pending, bios_to_read);
2006 while (1) { 2010 while (1) {
2007 bio = bio_list_pop(&bio_list); 2011 bio = bio_list_pop(&bio_list);
2008 if (!bio) 2012 if (!bio)