aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-03-30 23:39:38 -0400
committerNeilBrown <neilb@suse.de>2009-03-30 23:39:38 -0400
commit911d4ee8536d89ea8a6cd3e96b1c95a3ebc5ea66 (patch)
tree222c79b83b7cdcd0b73501c40ed11092b9af10cf
parentd0dabf7e577411c2bf6b616c751544dc241213d4 (diff)
md/raid5: simplify raid5_compute_sector interface
Rather than passing 'pd_idx' and 'qd_idx' to be filled in, pass a 'struct stripe_head *' and fill in the relevant fields. This is more extensible. Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r--drivers/md/raid5.c118
1 files changed, 58 insertions, 60 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cb3e157b52d3..2e2e64f6ef71 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -299,14 +299,13 @@ static int grow_buffers(struct stripe_head *sh, int num)
299} 299}
300 300
301static void raid5_build_block(struct stripe_head *sh, int i); 301static void raid5_build_block(struct stripe_head *sh, int i);
302static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous, 302static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
303 int *qd_idx); 303 struct stripe_head *sh);
304 304
305static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 305static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
306{ 306{
307 raid5_conf_t *conf = sh->raid_conf; 307 raid5_conf_t *conf = sh->raid_conf;
308 int i; 308 int i;
309 int qd_idx;
310 309
311 BUG_ON(atomic_read(&sh->count) != 0); 310 BUG_ON(atomic_read(&sh->count) != 0);
312 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 311 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
@@ -320,8 +319,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
320 319
321 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 320 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
322 sh->sector = sector; 321 sh->sector = sector;
323 sh->pd_idx = stripe_to_pdidx(sector, conf, previous, &qd_idx); 322 stripe_set_idx(sector, conf, previous, sh);
324 sh->qd_idx = qd_idx;
325 sh->state = 0; 323 sh->state = 0;
326 324
327 325
@@ -1262,12 +1260,13 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1262 * Output: index of the data and parity disk, and the sector # in them. 1260 * Output: index of the data and parity disk, and the sector # in them.
1263 */ 1261 */
1264static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, 1262static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1265 int previous, 1263 int previous, int *dd_idx,
1266 int *dd_idx, int *pd_idx, int *qd_idx) 1264 struct stripe_head *sh)
1267{ 1265{
1268 long stripe; 1266 long stripe;
1269 unsigned long chunk_number; 1267 unsigned long chunk_number;
1270 unsigned int chunk_offset; 1268 unsigned int chunk_offset;
1269 int pd_idx, qd_idx;
1271 sector_t new_sector; 1270 sector_t new_sector;
1272 int sectors_per_chunk = conf->chunk_size >> 9; 1271 int sectors_per_chunk = conf->chunk_size >> 9;
1273 int raid_disks = previous ? conf->previous_raid_disks 1272 int raid_disks = previous ? conf->previous_raid_disks
@@ -1296,30 +1295,30 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1296 /* 1295 /*
1297 * Select the parity disk based on the user selected algorithm. 1296 * Select the parity disk based on the user selected algorithm.
1298 */ 1297 */
1299 *qd_idx = ~0; 1298 pd_idx = qd_idx = ~0;
1300 switch(conf->level) { 1299 switch(conf->level) {
1301 case 4: 1300 case 4:
1302 *pd_idx = data_disks; 1301 pd_idx = data_disks;
1303 break; 1302 break;
1304 case 5: 1303 case 5:
1305 switch (conf->algorithm) { 1304 switch (conf->algorithm) {
1306 case ALGORITHM_LEFT_ASYMMETRIC: 1305 case ALGORITHM_LEFT_ASYMMETRIC:
1307 *pd_idx = data_disks - stripe % raid_disks; 1306 pd_idx = data_disks - stripe % raid_disks;
1308 if (*dd_idx >= *pd_idx) 1307 if (*dd_idx >= pd_idx)
1309 (*dd_idx)++; 1308 (*dd_idx)++;
1310 break; 1309 break;
1311 case ALGORITHM_RIGHT_ASYMMETRIC: 1310 case ALGORITHM_RIGHT_ASYMMETRIC:
1312 *pd_idx = stripe % raid_disks; 1311 pd_idx = stripe % raid_disks;
1313 if (*dd_idx >= *pd_idx) 1312 if (*dd_idx >= pd_idx)
1314 (*dd_idx)++; 1313 (*dd_idx)++;
1315 break; 1314 break;
1316 case ALGORITHM_LEFT_SYMMETRIC: 1315 case ALGORITHM_LEFT_SYMMETRIC:
1317 *pd_idx = data_disks - stripe % raid_disks; 1316 pd_idx = data_disks - stripe % raid_disks;
1318 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1317 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1319 break; 1318 break;
1320 case ALGORITHM_RIGHT_SYMMETRIC: 1319 case ALGORITHM_RIGHT_SYMMETRIC:
1321 *pd_idx = stripe % raid_disks; 1320 pd_idx = stripe % raid_disks;
1322 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1321 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1323 break; 1322 break;
1324 default: 1323 default:
1325 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1324 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
@@ -1331,32 +1330,32 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1331 /**** FIX THIS ****/ 1330 /**** FIX THIS ****/
1332 switch (conf->algorithm) { 1331 switch (conf->algorithm) {
1333 case ALGORITHM_LEFT_ASYMMETRIC: 1332 case ALGORITHM_LEFT_ASYMMETRIC:
1334 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1333 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1335 *qd_idx = *pd_idx + 1; 1334 qd_idx = pd_idx + 1;
1336 if (*pd_idx == raid_disks-1) { 1335 if (pd_idx == raid_disks-1) {
1337 (*dd_idx)++; /* Q D D D P */ 1336 (*dd_idx)++; /* Q D D D P */
1338 *qd_idx = 0; 1337 qd_idx = 0;
1339 } else if (*dd_idx >= *pd_idx) 1338 } else if (*dd_idx >= pd_idx)
1340 (*dd_idx) += 2; /* D D P Q D */ 1339 (*dd_idx) += 2; /* D D P Q D */
1341 break; 1340 break;
1342 case ALGORITHM_RIGHT_ASYMMETRIC: 1341 case ALGORITHM_RIGHT_ASYMMETRIC:
1343 *pd_idx = stripe % raid_disks; 1342 pd_idx = stripe % raid_disks;
1344 *qd_idx = *pd_idx + 1; 1343 qd_idx = pd_idx + 1;
1345 if (*pd_idx == raid_disks-1) { 1344 if (pd_idx == raid_disks-1) {
1346 (*dd_idx)++; /* Q D D D P */ 1345 (*dd_idx)++; /* Q D D D P */
1347 *qd_idx = 0; 1346 qd_idx = 0;
1348 } else if (*dd_idx >= *pd_idx) 1347 } else if (*dd_idx >= pd_idx)
1349 (*dd_idx) += 2; /* D D P Q D */ 1348 (*dd_idx) += 2; /* D D P Q D */
1350 break; 1349 break;
1351 case ALGORITHM_LEFT_SYMMETRIC: 1350 case ALGORITHM_LEFT_SYMMETRIC:
1352 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1351 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1353 *qd_idx = (*pd_idx + 1) % raid_disks; 1352 qd_idx = (pd_idx + 1) % raid_disks;
1354 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1353 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1355 break; 1354 break;
1356 case ALGORITHM_RIGHT_SYMMETRIC: 1355 case ALGORITHM_RIGHT_SYMMETRIC:
1357 *pd_idx = stripe % raid_disks; 1356 pd_idx = stripe % raid_disks;
1358 *qd_idx = (*pd_idx + 1) % raid_disks; 1357 qd_idx = (pd_idx + 1) % raid_disks;
1359 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1358 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1360 break; 1359 break;
1361 default: 1360 default:
1362 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1361 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
@@ -1365,6 +1364,10 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1365 break; 1364 break;
1366 } 1365 }
1367 1366
1367 if (sh) {
1368 sh->pd_idx = pd_idx;
1369 sh->qd_idx = qd_idx;
1370 }
1368 /* 1371 /*
1369 * Finally, compute the new sector number 1372 * Finally, compute the new sector number
1370 */ 1373 */
@@ -1382,8 +1385,9 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
1382 int sectors_per_chunk = conf->chunk_size >> 9; 1385 int sectors_per_chunk = conf->chunk_size >> 9;
1383 sector_t stripe; 1386 sector_t stripe;
1384 int chunk_offset; 1387 int chunk_offset;
1385 int chunk_number, dummy1, dummy2, dummy3, dd_idx = i; 1388 int chunk_number, dummy1, dd_idx = i;
1386 sector_t r_sector; 1389 sector_t r_sector;
1390 struct stripe_head sh2;
1387 1391
1388 1392
1389 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1393 chunk_offset = sector_div(new_sector, sectors_per_chunk);
@@ -1446,8 +1450,9 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
1446 1450
1447 check = raid5_compute_sector(conf, r_sector, 1451 check = raid5_compute_sector(conf, r_sector,
1448 (raid_disks != conf->raid_disks), 1452 (raid_disks != conf->raid_disks),
1449 &dummy1, &dummy2, &dummy3); 1453 &dummy1, &sh2);
1450 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 1454 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1455 || sh2.qd_idx != sh->qd_idx) {
1451 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1456 printk(KERN_ERR "compute_blocknr: map not correct\n");
1452 return 0; 1457 return 0;
1453 } 1458 }
@@ -1843,11 +1848,11 @@ static int page_is_zero(struct page *p)
1843 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1848 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1844} 1849}
1845 1850
1846static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous, 1851static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
1847 int *qd_idxp) 1852 struct stripe_head *sh)
1848{ 1853{
1849 int sectors_per_chunk = conf->chunk_size >> 9; 1854 int sectors_per_chunk = conf->chunk_size >> 9;
1850 int pd_idx, dd_idx; 1855 int dd_idx;
1851 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1856 int chunk_offset = sector_div(stripe, sectors_per_chunk);
1852 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 1857 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
1853 1858
@@ -1855,8 +1860,7 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous,
1855 stripe * (disks - conf->max_degraded) 1860 stripe * (disks - conf->max_degraded)
1856 *sectors_per_chunk + chunk_offset, 1861 *sectors_per_chunk + chunk_offset,
1857 previous, 1862 previous,
1858 &dd_idx, &pd_idx, qd_idxp); 1863 &dd_idx, sh);
1859 return pd_idx;
1860} 1864}
1861 1865
1862static void 1866static void
@@ -2514,13 +2518,12 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2514 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2518 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2515 for (i = 0; i < sh->disks; i++) 2519 for (i = 0; i < sh->disks; i++)
2516 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) { 2520 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
2517 int dd_idx, pd_idx, qd_idx, j; 2521 int dd_idx, j;
2518 struct stripe_head *sh2; 2522 struct stripe_head *sh2;
2519 2523
2520 sector_t bn = compute_blocknr(sh, i); 2524 sector_t bn = compute_blocknr(sh, i);
2521 sector_t s = 2525 sector_t s = raid5_compute_sector(conf, bn, 0,
2522 raid5_compute_sector(conf, bn, 0, 2526 &dd_idx, NULL);
2523 &dd_idx, &pd_idx, &qd_idx);
2524 sh2 = get_active_stripe(conf, s, 0, 1); 2527 sh2 = get_active_stripe(conf, s, 0, 1);
2525 if (sh2 == NULL) 2528 if (sh2 == NULL)
2526 /* so far only the early blocks of this stripe 2529 /* so far only the early blocks of this stripe
@@ -2804,11 +2807,9 @@ static bool handle_stripe5(struct stripe_head *sh)
2804 2807
2805 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2808 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
2806 !sh->reconstruct_state) { 2809 !sh->reconstruct_state) {
2807 int qd_idx;
2808 /* Need to write out all blocks after computing parity */ 2810 /* Need to write out all blocks after computing parity */
2809 sh->disks = conf->raid_disks; 2811 sh->disks = conf->raid_disks;
2810 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0, &qd_idx); 2812 stripe_set_idx(sh->sector, conf, 0, sh);
2811 sh->qd_idx = qd_idx;
2812 schedule_reconstruction5(sh, &s, 1, 1); 2813 schedule_reconstruction5(sh, &s, 1, 1);
2813 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 2814 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
2814 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2815 clear_bit(STRIPE_EXPAND_READY, &sh->state);
@@ -3025,10 +3026,8 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
3025 3026
3026 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3027 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
3027 /* Need to write out all blocks after computing P&Q */ 3028 /* Need to write out all blocks after computing P&Q */
3028 int qd_idx;
3029 sh->disks = conf->raid_disks; 3029 sh->disks = conf->raid_disks;
3030 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0, &qd_idx); 3030 stripe_set_idx(sh->sector, conf, 0, sh);
3031 sh->qd_idx = qd_idx;
3032 compute_parity6(sh, RECONSTRUCT_WRITE); 3031 compute_parity6(sh, RECONSTRUCT_WRITE);
3033 for (i = conf->raid_disks ; i-- ; ) { 3032 for (i = conf->raid_disks ; i-- ; ) {
3034 set_bit(R5_LOCKED, &sh->dev[i].flags); 3033 set_bit(R5_LOCKED, &sh->dev[i].flags);
@@ -3300,7 +3299,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3300{ 3299{
3301 mddev_t *mddev = q->queuedata; 3300 mddev_t *mddev = q->queuedata;
3302 raid5_conf_t *conf = mddev_to_conf(mddev); 3301 raid5_conf_t *conf = mddev_to_conf(mddev);
3303 unsigned int dd_idx, pd_idx, qd_idx; 3302 unsigned int dd_idx;
3304 struct bio* align_bi; 3303 struct bio* align_bi;
3305 mdk_rdev_t *rdev; 3304 mdk_rdev_t *rdev;
3306 3305
@@ -3325,7 +3324,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3325 */ 3324 */
3326 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 3325 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3327 0, 3326 0,
3328 &dd_idx, &pd_idx, &qd_idx); 3327 &dd_idx, NULL);
3329 3328
3330 rcu_read_lock(); 3329 rcu_read_lock();
3331 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3330 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
@@ -3417,7 +3416,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3417{ 3416{
3418 mddev_t *mddev = q->queuedata; 3417 mddev_t *mddev = q->queuedata;
3419 raid5_conf_t *conf = mddev_to_conf(mddev); 3418 raid5_conf_t *conf = mddev_to_conf(mddev);
3420 int dd_idx, pd_idx, qd_idx; 3419 int dd_idx;
3421 sector_t new_sector; 3420 sector_t new_sector;
3422 sector_t logical_sector, last_sector; 3421 sector_t logical_sector, last_sector;
3423 struct stripe_head *sh; 3422 struct stripe_head *sh;
@@ -3484,7 +3483,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3484 3483
3485 new_sector = raid5_compute_sector(conf, logical_sector, 3484 new_sector = raid5_compute_sector(conf, logical_sector,
3486 previous, 3485 previous,
3487 &dd_idx, &pd_idx, &qd_idx); 3486 &dd_idx, NULL);
3488 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3487 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3489 (unsigned long long)new_sector, 3488 (unsigned long long)new_sector,
3490 (unsigned long long)logical_sector); 3489 (unsigned long long)logical_sector);
@@ -3572,7 +3571,6 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3572 */ 3571 */
3573 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3572 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3574 struct stripe_head *sh; 3573 struct stripe_head *sh;
3575 int pd_idx, qd_idx;
3576 sector_t first_sector, last_sector; 3574 sector_t first_sector, last_sector;
3577 int raid_disks = conf->previous_raid_disks; 3575 int raid_disks = conf->previous_raid_disks;
3578 int data_disks = raid_disks - conf->max_degraded; 3576 int data_disks = raid_disks - conf->max_degraded;
@@ -3662,11 +3660,11 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3662 */ 3660 */
3663 first_sector = 3661 first_sector =
3664 raid5_compute_sector(conf, sector_nr*(new_data_disks), 3662 raid5_compute_sector(conf, sector_nr*(new_data_disks),
3665 1, &dd_idx, &pd_idx, &qd_idx); 3663 1, &dd_idx, NULL);
3666 last_sector = 3664 last_sector =
3667 raid5_compute_sector(conf, ((sector_nr+conf->chunk_size/512) 3665 raid5_compute_sector(conf, ((sector_nr+conf->chunk_size/512)
3668 *(new_data_disks) - 1), 3666 *(new_data_disks) - 1),
3669 1, &dd_idx, &pd_idx, &qd_idx); 3667 1, &dd_idx, NULL);
3670 if (last_sector >= mddev->dev_sectors) 3668 if (last_sector >= mddev->dev_sectors)
3671 last_sector = mddev->dev_sectors - 1; 3669 last_sector = mddev->dev_sectors - 1;
3672 while (first_sector <= last_sector) { 3670 while (first_sector <= last_sector) {
@@ -3801,7 +3799,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3801 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 3799 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
3802 */ 3800 */
3803 struct stripe_head *sh; 3801 struct stripe_head *sh;
3804 int dd_idx, pd_idx, qd_idx; 3802 int dd_idx;
3805 sector_t sector, logical_sector, last_sector; 3803 sector_t sector, logical_sector, last_sector;
3806 int scnt = 0; 3804 int scnt = 0;
3807 int remaining; 3805 int remaining;
@@ -3809,7 +3807,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3809 3807
3810 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3808 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3811 sector = raid5_compute_sector(conf, logical_sector, 3809 sector = raid5_compute_sector(conf, logical_sector,
3812 0, &dd_idx, &pd_idx, &qd_idx); 3810 0, &dd_idx, NULL);
3813 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 3811 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
3814 3812
3815 for (; logical_sector < last_sector; 3813 for (; logical_sector < last_sector;