aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-03-31 00:19:07 -0400
committerNeilBrown <neilb@suse.de>2009-03-31 00:19:07 -0400
commit784052ecc6ade6b6acf4f67e4ada8e5f2e6df446 (patch)
treef0b1060215922ba30a4d676a00dbdad8ddb724c9 /drivers/md/raid5.c
parent86b42c713be3e5f6807aa14b4cbdb005d35c64d5 (diff)
md/raid5: prepare for allowing reshape to change chunksize.
Add "prev_chunk" to raid5_conf_t, similar to "previous_raid_disks", to remember what the chunk size was before the reshape that is currently underway. This seems like duplication with "chunk_size" and "new_chunk" in mddev_t, and to some extent it is, but there are differences. The values in mddev_t are always defined and often the same. The prev* values are only defined if a reshape is underway. Also (and more significantly) the raid5_conf_t values will be changed at the same time (inside an appropriate lock) that the reshape is started by setting reshape_position. In contrast, the new_chunk value is set when the sysfs file is written which could be well before the reshape starts. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c43
1 files changed, 27 insertions, 16 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 73cdf43a6479..7638cc31e7e8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -299,7 +299,7 @@ static int grow_buffers(struct stripe_head *sh, int num)
299 return 0; 299 return 0;
300} 300}
301 301
302static void raid5_build_block(struct stripe_head *sh, int i); 302static void raid5_build_block(struct stripe_head *sh, int i, int previous);
303static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 303static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
304 struct stripe_head *sh); 304 struct stripe_head *sh);
305 305
@@ -337,7 +337,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
337 BUG(); 337 BUG();
338 } 338 }
339 dev->flags = 0; 339 dev->flags = 0;
340 raid5_build_block(sh, i); 340 raid5_build_block(sh, i, previous);
341 } 341 }
342 insert_hash(conf, sh); 342 insert_hash(conf, sh);
343} 343}
@@ -1212,9 +1212,9 @@ static void raid5_end_write_request(struct bio *bi, int error)
1212} 1212}
1213 1213
1214 1214
1215static sector_t compute_blocknr(struct stripe_head *sh, int i); 1215static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1216 1216
1217static void raid5_build_block(struct stripe_head *sh, int i) 1217static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1218{ 1218{
1219 struct r5dev *dev = &sh->dev[i]; 1219 struct r5dev *dev = &sh->dev[i];
1220 1220
@@ -1230,7 +1230,7 @@ static void raid5_build_block(struct stripe_head *sh, int i)
1230 dev->req.bi_private = sh; 1230 dev->req.bi_private = sh;
1231 1231
1232 dev->flags = 0; 1232 dev->flags = 0;
1233 dev->sector = compute_blocknr(sh, i); 1233 dev->sector = compute_blocknr(sh, i, previous);
1234} 1234}
1235 1235
1236static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1236static void error(mddev_t *mddev, mdk_rdev_t *rdev)
@@ -1273,7 +1273,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1273 int pd_idx, qd_idx; 1273 int pd_idx, qd_idx;
1274 int ddf_layout = 0; 1274 int ddf_layout = 0;
1275 sector_t new_sector; 1275 sector_t new_sector;
1276 int sectors_per_chunk = conf->chunk_size >> 9; 1276 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
1277 : (conf->chunk_size >> 9);
1277 int raid_disks = previous ? conf->previous_raid_disks 1278 int raid_disks = previous ? conf->previous_raid_disks
1278 : conf->raid_disks; 1279 : conf->raid_disks;
1279 int data_disks = raid_disks - conf->max_degraded; 1280 int data_disks = raid_disks - conf->max_degraded;
@@ -1472,13 +1473,14 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1472} 1473}
1473 1474
1474 1475
1475static sector_t compute_blocknr(struct stripe_head *sh, int i) 1476static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1476{ 1477{
1477 raid5_conf_t *conf = sh->raid_conf; 1478 raid5_conf_t *conf = sh->raid_conf;
1478 int raid_disks = sh->disks; 1479 int raid_disks = sh->disks;
1479 int data_disks = raid_disks - conf->max_degraded; 1480 int data_disks = raid_disks - conf->max_degraded;
1480 sector_t new_sector = sh->sector, check; 1481 sector_t new_sector = sh->sector, check;
1481 int sectors_per_chunk = conf->chunk_size >> 9; 1482 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
1483 : (conf->chunk_size >> 9);
1482 sector_t stripe; 1484 sector_t stripe;
1483 int chunk_offset; 1485 int chunk_offset;
1484 int chunk_number, dummy1, dd_idx = i; 1486 int chunk_number, dummy1, dd_idx = i;
@@ -1579,8 +1581,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
1579 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1581 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1580 1582
1581 check = raid5_compute_sector(conf, r_sector, 1583 check = raid5_compute_sector(conf, r_sector,
1582 (raid_disks != conf->raid_disks), 1584 previous, &dummy1, &sh2);
1583 &dummy1, &sh2);
1584 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 1585 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1585 || sh2.qd_idx != sh->qd_idx) { 1586 || sh2.qd_idx != sh->qd_idx) {
1586 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1587 printk(KERN_ERR "compute_blocknr: map not correct\n");
@@ -1992,7 +1993,9 @@ static int page_is_zero(struct page *p)
1992static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 1993static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
1993 struct stripe_head *sh) 1994 struct stripe_head *sh)
1994{ 1995{
1995 int sectors_per_chunk = conf->chunk_size >> 9; 1996 int sectors_per_chunk =
1997 previous ? (conf->prev_chunk >> 9)
1998 : (conf->chunk_size >> 9);
1996 int dd_idx; 1999 int dd_idx;
1997 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2000 int chunk_offset = sector_div(stripe, sectors_per_chunk);
1998 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2001 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
@@ -2662,7 +2665,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2662 int dd_idx, j; 2665 int dd_idx, j;
2663 struct stripe_head *sh2; 2666 struct stripe_head *sh2;
2664 2667
2665 sector_t bn = compute_blocknr(sh, i); 2668 sector_t bn = compute_blocknr(sh, i, 1);
2666 sector_t s = raid5_compute_sector(conf, bn, 0, 2669 sector_t s = raid5_compute_sector(conf, bn, 0,
2667 &dd_idx, NULL); 2670 &dd_idx, NULL);
2668 sh2 = get_active_stripe(conf, s, 0, 1); 2671 sh2 = get_active_stripe(conf, s, 0, 1);
@@ -3318,6 +3321,8 @@ static int raid5_mergeable_bvec(struct request_queue *q,
3318 if ((bvm->bi_rw & 1) == WRITE) 3321 if ((bvm->bi_rw & 1) == WRITE)
3319 return biovec->bv_len; /* always allow writes to be mergeable */ 3322 return biovec->bv_len; /* always allow writes to be mergeable */
3320 3323
3324 if (mddev->new_chunk < mddev->chunk_size)
3325 chunk_sectors = mddev->new_chunk >> 9;
3321 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3326 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3322 if (max < 0) max = 0; 3327 if (max < 0) max = 0;
3323 if (max <= biovec->bv_len && bio_sectors == 0) 3328 if (max <= biovec->bv_len && bio_sectors == 0)
@@ -3333,6 +3338,8 @@ static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3333 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3338 unsigned int chunk_sectors = mddev->chunk_size >> 9;
3334 unsigned int bio_sectors = bio->bi_size >> 9; 3339 unsigned int bio_sectors = bio->bi_size >> 9;
3335 3340
3341 if (mddev->new_chunk < mddev->chunk_size)
3342 chunk_sectors = mddev->new_chunk >> 9;
3336 return chunk_sectors >= 3343 return chunk_sectors >=
3337 ((sector & (chunk_sectors - 1)) + bio_sectors); 3344 ((sector & (chunk_sectors - 1)) + bio_sectors);
3338} 3345}
@@ -3788,7 +3795,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3788 BUG_ON(conf->reshape_progress == 0); 3795 BUG_ON(conf->reshape_progress == 0);
3789 stripe_addr = writepos; 3796 stripe_addr = writepos;
3790 BUG_ON((mddev->dev_sectors & 3797 BUG_ON((mddev->dev_sectors &
3791 ~((sector_t)mddev->chunk_size / 512 - 1)) 3798 ~((sector_t)conf->chunk_size / 512 - 1))
3792 - (conf->chunk_size / 512) - stripe_addr 3799 - (conf->chunk_size / 512) - stripe_addr
3793 != sector_nr); 3800 != sector_nr);
3794 } else { 3801 } else {
@@ -3811,7 +3818,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3811 if (conf->level == 6 && 3818 if (conf->level == 6 &&
3812 j == sh->qd_idx) 3819 j == sh->qd_idx)
3813 continue; 3820 continue;
3814 s = compute_blocknr(sh, j); 3821 s = compute_blocknr(sh, j, 0);
3815 if (s < raid5_size(mddev, 0, 0)) { 3822 if (s < raid5_size(mddev, 0, 0)) {
3816 skipped = 1; 3823 skipped = 1;
3817 continue; 3824 continue;
@@ -4217,6 +4224,7 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4217 } 4224 }
4218 4225
4219 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4226 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
4227 sectors &= ~((sector_t)mddev->new_chunk/512 - 1);
4220 return sectors * (raid_disks - conf->max_degraded); 4228 return sectors * (raid_disks - conf->max_degraded);
4221} 4229}
4222 4230
@@ -4322,6 +4330,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4322 conf->algorithm = mddev->new_layout; 4330 conf->algorithm = mddev->new_layout;
4323 conf->max_nr_stripes = NR_STRIPES; 4331 conf->max_nr_stripes = NR_STRIPES;
4324 conf->reshape_progress = mddev->reshape_position; 4332 conf->reshape_progress = mddev->reshape_position;
4333 if (conf->reshape_progress != MaxSector)
4334 conf->prev_chunk = mddev->chunk_size;
4325 4335
4326 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4336 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4327 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4337 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
@@ -4385,7 +4395,7 @@ static int run(mddev_t *mddev)
4385 * geometry. 4395 * geometry.
4386 */ 4396 */
4387 here_new = mddev->reshape_position; 4397 here_new = mddev->reshape_position;
4388 if (sector_div(here_new, (mddev->chunk_size>>9)* 4398 if (sector_div(here_new, (mddev->new_chunk>>9)*
4389 (mddev->raid_disks - max_degraded))) { 4399 (mddev->raid_disks - max_degraded))) {
4390 printk(KERN_ERR "raid5: reshape_position not " 4400 printk(KERN_ERR "raid5: reshape_position not "
4391 "on a stripe boundary\n"); 4401 "on a stripe boundary\n");
@@ -4789,7 +4799,8 @@ static int raid5_check_reshape(mddev_t *mddev)
4789 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4799 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
4790 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4800 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
4791 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4801 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
4792 (mddev->chunk_size / STRIPE_SIZE)*4); 4802 (max(mddev->chunk_size, mddev->new_chunk)
4803 / STRIPE_SIZE)*4);
4793 return -ENOSPC; 4804 return -ENOSPC;
4794 } 4805 }
4795 4806