aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-03-30 23:39:38 -0400
committerNeilBrown <neilb@suse.de>2009-03-30 23:39:38 -0400
commit112bf8970dbdfc00bd4667da5996e57c2ce58066 (patch)
tree504afc2dec2ef28d27ef9248eb80b6165cdf95de /drivers
parentb5663ba405fe3e51176ddb6c91a5e186590c26b5 (diff)
md/raid5: change raid5_compute_sector and stripe_to_pdidx to take a 'previous' argument
This similar to the recent change to get_active_stripe. There is no functional change, just come rearrangement to make future patches cleaner. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/raid5.c78
1 files changed, 34 insertions, 44 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c38310be0d95..c33073fe7426 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -274,7 +274,7 @@ static int grow_buffers(struct stripe_head *sh, int num)
274} 274}
275 275
276static void raid5_build_block(struct stripe_head *sh, int i); 276static void raid5_build_block(struct stripe_head *sh, int i);
277static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks); 277static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous);
278 278
279static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 279static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
280{ 280{
@@ -293,7 +293,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
293 293
294 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 294 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
295 sh->sector = sector; 295 sh->sector = sector;
296 sh->pd_idx = stripe_to_pdidx(sector, conf, sh->disks); 296 sh->pd_idx = stripe_to_pdidx(sector, conf, previous);
297 sh->state = 0; 297 sh->state = 0;
298 298
299 299
@@ -1233,15 +1233,18 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1233 * Input: a 'big' sector number, 1233 * Input: a 'big' sector number,
1234 * Output: index of the data and parity disk, and the sector # in them. 1234 * Output: index of the data and parity disk, and the sector # in them.
1235 */ 1235 */
1236static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 1236static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1237 unsigned int data_disks, unsigned int * dd_idx, 1237 int previous,
1238 unsigned int * pd_idx, raid5_conf_t *conf) 1238 int *dd_idx, int *pd_idx)
1239{ 1239{
1240 long stripe; 1240 long stripe;
1241 unsigned long chunk_number; 1241 unsigned long chunk_number;
1242 unsigned int chunk_offset; 1242 unsigned int chunk_offset;
1243 sector_t new_sector; 1243 sector_t new_sector;
1244 int sectors_per_chunk = conf->chunk_size >> 9; 1244 int sectors_per_chunk = conf->chunk_size >> 9;
1245 int raid_disks = previous ? conf->previous_raid_disks
1246 : conf->raid_disks;
1247 int data_disks = raid_disks - conf->max_degraded;
1245 1248
1246 /* First compute the information on this sector */ 1249 /* First compute the information on this sector */
1247 1250
@@ -1406,7 +1409,9 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
1406 chunk_number = stripe * data_disks + i; 1409 chunk_number = stripe * data_disks + i;
1407 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1410 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1408 1411
1409 check = raid5_compute_sector(r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 1412 check = raid5_compute_sector(conf, r_sector,
1413 (raid_disks != conf->raid_disks),
1414 &dummy1, &dummy2);
1410 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 1415 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
1411 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1416 printk(KERN_ERR "compute_blocknr: map not correct\n");
1412 return 0; 1417 return 0;
@@ -1806,16 +1811,18 @@ static int page_is_zero(struct page *p)
1806 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1811 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1807} 1812}
1808 1813
1809static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) 1814static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous)
1810{ 1815{
1811 int sectors_per_chunk = conf->chunk_size >> 9; 1816 int sectors_per_chunk = conf->chunk_size >> 9;
1812 int pd_idx, dd_idx; 1817 int pd_idx, dd_idx;
1813 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1818 int chunk_offset = sector_div(stripe, sectors_per_chunk);
1819 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
1814 1820
1815 raid5_compute_sector(stripe * (disks - conf->max_degraded) 1821 raid5_compute_sector(conf,
1822 stripe * (disks - conf->max_degraded)
1816 *sectors_per_chunk + chunk_offset, 1823 *sectors_per_chunk + chunk_offset,
1817 disks, disks - conf->max_degraded, 1824 previous,
1818 &dd_idx, &pd_idx, conf); 1825 &dd_idx, &pd_idx);
1819 return pd_idx; 1826 return pd_idx;
1820} 1827}
1821 1828
@@ -2478,10 +2485,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2478 struct stripe_head *sh2; 2485 struct stripe_head *sh2;
2479 2486
2480 sector_t bn = compute_blocknr(sh, i); 2487 sector_t bn = compute_blocknr(sh, i);
2481 sector_t s = raid5_compute_sector(bn, conf->raid_disks, 2488 sector_t s = raid5_compute_sector(conf, bn, 0,
2482 conf->raid_disks - 2489 &dd_idx, &pd_idx);
2483 conf->max_degraded, &dd_idx,
2484 &pd_idx, conf);
2485 sh2 = get_active_stripe(conf, s, 0, 1); 2490 sh2 = get_active_stripe(conf, s, 0, 1);
2486 if (sh2 == NULL) 2491 if (sh2 == NULL)
2487 /* so far only the early blocks of this stripe 2492 /* so far only the early blocks of this stripe
@@ -2768,8 +2773,7 @@ static bool handle_stripe5(struct stripe_head *sh)
2768 !sh->reconstruct_state) { 2773 !sh->reconstruct_state) {
2769 /* Need to write out all blocks after computing parity */ 2774 /* Need to write out all blocks after computing parity */
2770 sh->disks = conf->raid_disks; 2775 sh->disks = conf->raid_disks;
2771 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 2776 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0);
2772 conf->raid_disks);
2773 schedule_reconstruction5(sh, &s, 1, 1); 2777 schedule_reconstruction5(sh, &s, 1, 1);
2774 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 2778 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
2775 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2779 clear_bit(STRIPE_EXPAND_READY, &sh->state);
@@ -2987,8 +2991,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2987 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 2991 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
2988 /* Need to write out all blocks after computing P&Q */ 2992 /* Need to write out all blocks after computing P&Q */
2989 sh->disks = conf->raid_disks; 2993 sh->disks = conf->raid_disks;
2990 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 2994 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0);
2991 conf->raid_disks);
2992 compute_parity6(sh, RECONSTRUCT_WRITE); 2995 compute_parity6(sh, RECONSTRUCT_WRITE);
2993 for (i = conf->raid_disks ; i-- ; ) { 2996 for (i = conf->raid_disks ; i-- ; ) {
2994 set_bit(R5_LOCKED, &sh->dev[i].flags); 2997 set_bit(R5_LOCKED, &sh->dev[i].flags);
@@ -3260,8 +3263,6 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3260{ 3263{
3261 mddev_t *mddev = q->queuedata; 3264 mddev_t *mddev = q->queuedata;
3262 raid5_conf_t *conf = mddev_to_conf(mddev); 3265 raid5_conf_t *conf = mddev_to_conf(mddev);
3263 const unsigned int raid_disks = conf->raid_disks;
3264 const unsigned int data_disks = raid_disks - conf->max_degraded;
3265 unsigned int dd_idx, pd_idx; 3266 unsigned int dd_idx, pd_idx;
3266 struct bio* align_bi; 3267 struct bio* align_bi;
3267 mdk_rdev_t *rdev; 3268 mdk_rdev_t *rdev;
@@ -3285,12 +3286,9 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3285 /* 3286 /*
3286 * compute position 3287 * compute position
3287 */ 3288 */
3288 align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, 3289 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3289 raid_disks, 3290 0,
3290 data_disks, 3291 &dd_idx, &pd_idx);
3291 &dd_idx,
3292 &pd_idx,
3293 conf);
3294 3292
3295 rcu_read_lock(); 3293 rcu_read_lock();
3296 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3294 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
@@ -3447,8 +3445,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
3447 } 3445 }
3448 data_disks = disks - conf->max_degraded; 3446 data_disks = disks - conf->max_degraded;
3449 3447
3450 new_sector = raid5_compute_sector(logical_sector, disks, data_disks, 3448 new_sector = raid5_compute_sector(conf, logical_sector,
3451 &dd_idx, &pd_idx, conf); 3449 previous,
3450 &dd_idx, &pd_idx);
3452 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3451 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3453 (unsigned long long)new_sector, 3452 (unsigned long long)new_sector,
3454 (unsigned long long)logical_sector); 3453 (unsigned long long)logical_sector);
@@ -3625,14 +3624,12 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3625 * block on the destination stripes. 3624 * block on the destination stripes.
3626 */ 3625 */
3627 first_sector = 3626 first_sector =
3628 raid5_compute_sector(sector_nr*(new_data_disks), 3627 raid5_compute_sector(conf, sector_nr*(new_data_disks),
3629 raid_disks, data_disks, 3628 1, &dd_idx, &pd_idx);
3630 &dd_idx, &pd_idx, conf);
3631 last_sector = 3629 last_sector =
3632 raid5_compute_sector((sector_nr+conf->chunk_size/512) 3630 raid5_compute_sector(conf, ((sector_nr+conf->chunk_size/512)
3633 *(new_data_disks) -1, 3631 *(new_data_disks) - 1),
3634 raid_disks, data_disks, 3632 1, &dd_idx, &pd_idx);
3635 &dd_idx, &pd_idx, conf);
3636 if (last_sector >= mddev->dev_sectors) 3633 if (last_sector >= mddev->dev_sectors)
3637 last_sector = mddev->dev_sectors - 1; 3634 last_sector = mddev->dev_sectors - 1;
3638 while (first_sector <= last_sector) { 3635 while (first_sector <= last_sector) {
@@ -3669,8 +3666,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3669{ 3666{
3670 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3667 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3671 struct stripe_head *sh; 3668 struct stripe_head *sh;
3672 int pd_idx;
3673 int raid_disks = conf->raid_disks;
3674 sector_t max_sector = mddev->dev_sectors; 3669 sector_t max_sector = mddev->dev_sectors;
3675 int sync_blocks; 3670 int sync_blocks;
3676 int still_degraded = 0; 3671 int still_degraded = 0;
@@ -3725,7 +3720,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3725 3720
3726 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 3721 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3727 3722
3728 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
3729 sh = get_active_stripe(conf, sector_nr, 0, 1); 3723 sh = get_active_stripe(conf, sector_nr, 0, 1);
3730 if (sh == NULL) { 3724 if (sh == NULL) {
3731 sh = get_active_stripe(conf, sector_nr, 0, 0); 3725 sh = get_active_stripe(conf, sector_nr, 0, 0);
@@ -3777,12 +3771,8 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3777 int handled = 0; 3771 int handled = 0;
3778 3772
3779 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3773 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3780 sector = raid5_compute_sector( logical_sector, 3774 sector = raid5_compute_sector(conf, logical_sector,
3781 conf->raid_disks, 3775 0, &dd_idx, &pd_idx);
3782 conf->raid_disks - conf->max_degraded,
3783 &dd_idx,
3784 &pd_idx,
3785 conf);
3786 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 3776 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
3787 3777
3788 for (; logical_sector < last_sector; 3778 for (; logical_sector < last_sector;