diff options
author | NeilBrown <neilb@suse.de> | 2012-05-20 19:28:20 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2012-05-20 19:28:20 -0400 |
commit | 5cf00fcd3c98d2eafb58ac7a649bbdb9dbc4902b (patch) | |
tree | 0a96aa84d276e5e627603c7ad4740e1d607ebbad /drivers | |
parent | b5254dd5fdd9abcacadb5101beb35df9ae8cc564 (diff) |
md/raid10: collect some geometry fields into a dedicated structure.
We will shortly be adding reshape support for RAID10 which will
require it having 2 concurrent geometries (before and after).
To make that easier, collect most geometry fields into 'struct geom'
and access them from there. Then we will more easily be able to add
a second set of fields.
Note that 'copies' is not in this struct and so cannot be changed.
There is little need to change this number and doing so is a lot
more difficult as it requires reallocating more things.
So leave it out for now.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/raid10.c | 200 | ||||
-rw-r--r-- | drivers/md/raid10.h | 23 |
2 files changed, 115 insertions, 108 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 832fb4d56657..36f445f9e11d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -511,42 +511,43 @@ static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) | |||
511 | sector_t chunk; | 511 | sector_t chunk; |
512 | sector_t stripe; | 512 | sector_t stripe; |
513 | int dev; | 513 | int dev; |
514 | struct geom *geo = &conf->geo; | ||
514 | 515 | ||
515 | int slot = 0; | 516 | int slot = 0; |
516 | 517 | ||
517 | /* now calculate first sector/dev */ | 518 | /* now calculate first sector/dev */ |
518 | chunk = r10bio->sector >> conf->chunk_shift; | 519 | chunk = r10bio->sector >> geo->chunk_shift; |
519 | sector = r10bio->sector & conf->chunk_mask; | 520 | sector = r10bio->sector & geo->chunk_mask; |
520 | 521 | ||
521 | chunk *= conf->near_copies; | 522 | chunk *= geo->near_copies; |
522 | stripe = chunk; | 523 | stripe = chunk; |
523 | dev = sector_div(stripe, conf->raid_disks); | 524 | dev = sector_div(stripe, geo->raid_disks); |
524 | if (conf->far_offset) | 525 | if (geo->far_offset) |
525 | stripe *= conf->far_copies; | 526 | stripe *= geo->far_copies; |
526 | 527 | ||
527 | sector += stripe << conf->chunk_shift; | 528 | sector += stripe << geo->chunk_shift; |
528 | 529 | ||
529 | /* and calculate all the others */ | 530 | /* and calculate all the others */ |
530 | for (n=0; n < conf->near_copies; n++) { | 531 | for (n = 0; n < geo->near_copies; n++) { |
531 | int d = dev; | 532 | int d = dev; |
532 | sector_t s = sector; | 533 | sector_t s = sector; |
533 | r10bio->devs[slot].addr = sector; | 534 | r10bio->devs[slot].addr = sector; |
534 | r10bio->devs[slot].devnum = d; | 535 | r10bio->devs[slot].devnum = d; |
535 | slot++; | 536 | slot++; |
536 | 537 | ||
537 | for (f = 1; f < conf->far_copies; f++) { | 538 | for (f = 1; f < geo->far_copies; f++) { |
538 | d += conf->near_copies; | 539 | d += geo->near_copies; |
539 | if (d >= conf->raid_disks) | 540 | if (d >= geo->raid_disks) |
540 | d -= conf->raid_disks; | 541 | d -= geo->raid_disks; |
541 | s += conf->stride; | 542 | s += geo->stride; |
542 | r10bio->devs[slot].devnum = d; | 543 | r10bio->devs[slot].devnum = d; |
543 | r10bio->devs[slot].addr = s; | 544 | r10bio->devs[slot].addr = s; |
544 | slot++; | 545 | slot++; |
545 | } | 546 | } |
546 | dev++; | 547 | dev++; |
547 | if (dev >= conf->raid_disks) { | 548 | if (dev >= geo->raid_disks) { |
548 | dev = 0; | 549 | dev = 0; |
549 | sector += (conf->chunk_mask + 1); | 550 | sector += (geo->chunk_mask + 1); |
550 | } | 551 | } |
551 | } | 552 | } |
552 | BUG_ON(slot != conf->copies); | 553 | BUG_ON(slot != conf->copies); |
@@ -555,28 +556,29 @@ static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) | |||
555 | static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) | 556 | static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) |
556 | { | 557 | { |
557 | sector_t offset, chunk, vchunk; | 558 | sector_t offset, chunk, vchunk; |
559 | struct geom *geo = &conf->geo; | ||
558 | 560 | ||
559 | offset = sector & conf->chunk_mask; | 561 | offset = sector & geo->chunk_mask; |
560 | if (conf->far_offset) { | 562 | if (geo->far_offset) { |
561 | int fc; | 563 | int fc; |
562 | chunk = sector >> conf->chunk_shift; | 564 | chunk = sector >> geo->chunk_shift; |
563 | fc = sector_div(chunk, conf->far_copies); | 565 | fc = sector_div(chunk, geo->far_copies); |
564 | dev -= fc * conf->near_copies; | 566 | dev -= fc * geo->near_copies; |
565 | if (dev < 0) | 567 | if (dev < 0) |
566 | dev += conf->raid_disks; | 568 | dev += geo->raid_disks; |
567 | } else { | 569 | } else { |
568 | while (sector >= conf->stride) { | 570 | while (sector >= geo->stride) { |
569 | sector -= conf->stride; | 571 | sector -= geo->stride; |
570 | if (dev < conf->near_copies) | 572 | if (dev < geo->near_copies) |
571 | dev += conf->raid_disks - conf->near_copies; | 573 | dev += geo->raid_disks - geo->near_copies; |
572 | else | 574 | else |
573 | dev -= conf->near_copies; | 575 | dev -= geo->near_copies; |
574 | } | 576 | } |
575 | chunk = sector >> conf->chunk_shift; | 577 | chunk = sector >> geo->chunk_shift; |
576 | } | 578 | } |
577 | vchunk = chunk * conf->raid_disks + dev; | 579 | vchunk = chunk * geo->raid_disks + dev; |
578 | sector_div(vchunk, conf->near_copies); | 580 | sector_div(vchunk, geo->near_copies); |
579 | return (vchunk << conf->chunk_shift) + offset; | 581 | return (vchunk << geo->chunk_shift) + offset; |
580 | } | 582 | } |
581 | 583 | ||
582 | /** | 584 | /** |
@@ -599,8 +601,9 @@ static int raid10_mergeable_bvec(struct request_queue *q, | |||
599 | int max; | 601 | int max; |
600 | unsigned int chunk_sectors = mddev->chunk_sectors; | 602 | unsigned int chunk_sectors = mddev->chunk_sectors; |
601 | unsigned int bio_sectors = bvm->bi_size >> 9; | 603 | unsigned int bio_sectors = bvm->bi_size >> 9; |
604 | struct geom *geo = &conf->geo; | ||
602 | 605 | ||
603 | if (conf->near_copies < conf->raid_disks) { | 606 | if (geo->near_copies < geo->raid_disks) { |
604 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) | 607 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) |
605 | + bio_sectors)) << 9; | 608 | + bio_sectors)) << 9; |
606 | if (max < 0) | 609 | if (max < 0) |
@@ -681,6 +684,7 @@ static struct md_rdev *read_balance(struct r10conf *conf, | |||
681 | struct md_rdev *rdev, *best_rdev; | 684 | struct md_rdev *rdev, *best_rdev; |
682 | int do_balance; | 685 | int do_balance; |
683 | int best_slot; | 686 | int best_slot; |
687 | struct geom *geo = &conf->geo; | ||
684 | 688 | ||
685 | raid10_find_phys(conf, r10_bio); | 689 | raid10_find_phys(conf, r10_bio); |
686 | rcu_read_lock(); | 690 | rcu_read_lock(); |
@@ -761,11 +765,11 @@ retry: | |||
761 | * sequential read speed for 'far copies' arrays. So only | 765 | * sequential read speed for 'far copies' arrays. So only |
762 | * keep it for 'near' arrays, and review those later. | 766 | * keep it for 'near' arrays, and review those later. |
763 | */ | 767 | */ |
764 | if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) | 768 | if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) |
765 | break; | 769 | break; |
766 | 770 | ||
767 | /* for far > 1 always use the lowest address */ | 771 | /* for far > 1 always use the lowest address */ |
768 | if (conf->far_copies > 1) | 772 | if (geo->far_copies > 1) |
769 | new_distance = r10_bio->devs[slot].addr; | 773 | new_distance = r10_bio->devs[slot].addr; |
770 | else | 774 | else |
771 | new_distance = abs(r10_bio->devs[slot].addr - | 775 | new_distance = abs(r10_bio->devs[slot].addr - |
@@ -812,7 +816,7 @@ static int raid10_congested(void *data, int bits) | |||
812 | if (mddev_congested(mddev, bits)) | 816 | if (mddev_congested(mddev, bits)) |
813 | return 1; | 817 | return 1; |
814 | rcu_read_lock(); | 818 | rcu_read_lock(); |
815 | for (i = 0; i < conf->raid_disks && ret == 0; i++) { | 819 | for (i = 0; i < conf->geo.raid_disks && ret == 0; i++) { |
816 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); | 820 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
817 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | 821 | if (rdev && !test_bit(Faulty, &rdev->flags)) { |
818 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 822 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
@@ -979,7 +983,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
979 | struct r10bio *r10_bio; | 983 | struct r10bio *r10_bio; |
980 | struct bio *read_bio; | 984 | struct bio *read_bio; |
981 | int i; | 985 | int i; |
982 | int chunk_sects = conf->chunk_mask + 1; | 986 | sector_t chunk_mask = conf->geo.chunk_mask; |
987 | int chunk_sects = chunk_mask + 1; | ||
983 | const int rw = bio_data_dir(bio); | 988 | const int rw = bio_data_dir(bio); |
984 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 989 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
985 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 990 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
@@ -997,9 +1002,9 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
997 | /* If this request crosses a chunk boundary, we need to | 1002 | /* If this request crosses a chunk boundary, we need to |
998 | * split it. This will only happen for 1 PAGE (or less) requests. | 1003 | * split it. This will only happen for 1 PAGE (or less) requests. |
999 | */ | 1004 | */ |
1000 | if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9) | 1005 | if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9) |
1001 | > chunk_sects && | 1006 | > chunk_sects |
1002 | conf->near_copies < conf->raid_disks)) { | 1007 | && conf->geo.near_copies < conf->geo.raid_disks)) { |
1003 | struct bio_pair *bp; | 1008 | struct bio_pair *bp; |
1004 | /* Sanity check -- queue functions should prevent this happening */ | 1009 | /* Sanity check -- queue functions should prevent this happening */ |
1005 | if (bio->bi_vcnt != 1 || | 1010 | if (bio->bi_vcnt != 1 || |
@@ -1368,19 +1373,19 @@ static void status(struct seq_file *seq, struct mddev *mddev) | |||
1368 | struct r10conf *conf = mddev->private; | 1373 | struct r10conf *conf = mddev->private; |
1369 | int i; | 1374 | int i; |
1370 | 1375 | ||
1371 | if (conf->near_copies < conf->raid_disks) | 1376 | if (conf->geo.near_copies < conf->geo.raid_disks) |
1372 | seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); | 1377 | seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); |
1373 | if (conf->near_copies > 1) | 1378 | if (conf->geo.near_copies > 1) |
1374 | seq_printf(seq, " %d near-copies", conf->near_copies); | 1379 | seq_printf(seq, " %d near-copies", conf->geo.near_copies); |
1375 | if (conf->far_copies > 1) { | 1380 | if (conf->geo.far_copies > 1) { |
1376 | if (conf->far_offset) | 1381 | if (conf->geo.far_offset) |
1377 | seq_printf(seq, " %d offset-copies", conf->far_copies); | 1382 | seq_printf(seq, " %d offset-copies", conf->geo.far_copies); |
1378 | else | 1383 | else |
1379 | seq_printf(seq, " %d far-copies", conf->far_copies); | 1384 | seq_printf(seq, " %d far-copies", conf->geo.far_copies); |
1380 | } | 1385 | } |
1381 | seq_printf(seq, " [%d/%d] [", conf->raid_disks, | 1386 | seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, |
1382 | conf->raid_disks - mddev->degraded); | 1387 | conf->geo.raid_disks - mddev->degraded); |
1383 | for (i = 0; i < conf->raid_disks; i++) | 1388 | for (i = 0; i < conf->geo.raid_disks; i++) |
1384 | seq_printf(seq, "%s", | 1389 | seq_printf(seq, "%s", |
1385 | conf->mirrors[i].rdev && | 1390 | conf->mirrors[i].rdev && |
1386 | test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); | 1391 | test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); |
@@ -1403,7 +1408,7 @@ static int enough(struct r10conf *conf, int ignore) | |||
1403 | if (conf->mirrors[first].rdev && | 1408 | if (conf->mirrors[first].rdev && |
1404 | first != ignore) | 1409 | first != ignore) |
1405 | cnt++; | 1410 | cnt++; |
1406 | first = (first+1) % conf->raid_disks; | 1411 | first = (first+1) % conf->geo.raid_disks; |
1407 | } | 1412 | } |
1408 | if (cnt == 0) | 1413 | if (cnt == 0) |
1409 | return 0; | 1414 | return 0; |
@@ -1445,7 +1450,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) | |||
1445 | "md/raid10:%s: Disk failure on %s, disabling device.\n" | 1450 | "md/raid10:%s: Disk failure on %s, disabling device.\n" |
1446 | "md/raid10:%s: Operation continuing on %d devices.\n", | 1451 | "md/raid10:%s: Operation continuing on %d devices.\n", |
1447 | mdname(mddev), bdevname(rdev->bdev, b), | 1452 | mdname(mddev), bdevname(rdev->bdev, b), |
1448 | mdname(mddev), conf->raid_disks - mddev->degraded); | 1453 | mdname(mddev), conf->geo.raid_disks - mddev->degraded); |
1449 | } | 1454 | } |
1450 | 1455 | ||
1451 | static void print_conf(struct r10conf *conf) | 1456 | static void print_conf(struct r10conf *conf) |
@@ -1458,10 +1463,10 @@ static void print_conf(struct r10conf *conf) | |||
1458 | printk(KERN_DEBUG "(!conf)\n"); | 1463 | printk(KERN_DEBUG "(!conf)\n"); |
1459 | return; | 1464 | return; |
1460 | } | 1465 | } |
1461 | printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, | 1466 | printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, |
1462 | conf->raid_disks); | 1467 | conf->geo.raid_disks); |
1463 | 1468 | ||
1464 | for (i = 0; i < conf->raid_disks; i++) { | 1469 | for (i = 0; i < conf->geo.raid_disks; i++) { |
1465 | char b[BDEVNAME_SIZE]; | 1470 | char b[BDEVNAME_SIZE]; |
1466 | tmp = conf->mirrors + i; | 1471 | tmp = conf->mirrors + i; |
1467 | if (tmp->rdev) | 1472 | if (tmp->rdev) |
@@ -1493,7 +1498,7 @@ static int raid10_spare_active(struct mddev *mddev) | |||
1493 | * Find all non-in_sync disks within the RAID10 configuration | 1498 | * Find all non-in_sync disks within the RAID10 configuration |
1494 | * and mark them in_sync | 1499 | * and mark them in_sync |
1495 | */ | 1500 | */ |
1496 | for (i = 0; i < conf->raid_disks; i++) { | 1501 | for (i = 0; i < conf->geo.raid_disks; i++) { |
1497 | tmp = conf->mirrors + i; | 1502 | tmp = conf->mirrors + i; |
1498 | if (tmp->replacement | 1503 | if (tmp->replacement |
1499 | && tmp->replacement->recovery_offset == MaxSector | 1504 | && tmp->replacement->recovery_offset == MaxSector |
@@ -1535,7 +1540,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1535 | int err = -EEXIST; | 1540 | int err = -EEXIST; |
1536 | int mirror; | 1541 | int mirror; |
1537 | int first = 0; | 1542 | int first = 0; |
1538 | int last = conf->raid_disks - 1; | 1543 | int last = conf->geo.raid_disks - 1; |
1539 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 1544 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
1540 | 1545 | ||
1541 | if (mddev->recovery_cp < MaxSector) | 1546 | if (mddev->recovery_cp < MaxSector) |
@@ -2603,7 +2608,7 @@ static int init_resync(struct r10conf *conf) | |||
2603 | buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; | 2608 | buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; |
2604 | BUG_ON(conf->r10buf_pool); | 2609 | BUG_ON(conf->r10buf_pool); |
2605 | conf->have_replacement = 0; | 2610 | conf->have_replacement = 0; |
2606 | for (i = 0; i < conf->raid_disks; i++) | 2611 | for (i = 0; i < conf->geo.raid_disks; i++) |
2607 | if (conf->mirrors[i].replacement) | 2612 | if (conf->mirrors[i].replacement) |
2608 | conf->have_replacement = 1; | 2613 | conf->have_replacement = 1; |
2609 | conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); | 2614 | conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); |
@@ -2657,6 +2662,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2657 | sector_t sync_blocks; | 2662 | sector_t sync_blocks; |
2658 | sector_t sectors_skipped = 0; | 2663 | sector_t sectors_skipped = 0; |
2659 | int chunks_skipped = 0; | 2664 | int chunks_skipped = 0; |
2665 | sector_t chunk_mask = conf->geo.chunk_mask; | ||
2660 | 2666 | ||
2661 | if (!conf->r10buf_pool) | 2667 | if (!conf->r10buf_pool) |
2662 | if (init_resync(conf)) | 2668 | if (init_resync(conf)) |
@@ -2680,7 +2686,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2680 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) | 2686 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) |
2681 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, | 2687 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, |
2682 | &sync_blocks, 1); | 2688 | &sync_blocks, 1); |
2683 | else for (i=0; i<conf->raid_disks; i++) { | 2689 | else for (i = 0; i < conf->geo.raid_disks; i++) { |
2684 | sector_t sect = | 2690 | sector_t sect = |
2685 | raid10_find_virt(conf, mddev->curr_resync, i); | 2691 | raid10_find_virt(conf, mddev->curr_resync, i); |
2686 | bitmap_end_sync(mddev->bitmap, sect, | 2692 | bitmap_end_sync(mddev->bitmap, sect, |
@@ -2694,7 +2700,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2694 | /* Completed a full sync so the replacements | 2700 | /* Completed a full sync so the replacements |
2695 | * are now fully recovered. | 2701 | * are now fully recovered. |
2696 | */ | 2702 | */ |
2697 | for (i = 0; i < conf->raid_disks; i++) | 2703 | for (i = 0; i < conf->geo.raid_disks; i++) |
2698 | if (conf->mirrors[i].replacement) | 2704 | if (conf->mirrors[i].replacement) |
2699 | conf->mirrors[i].replacement | 2705 | conf->mirrors[i].replacement |
2700 | ->recovery_offset | 2706 | ->recovery_offset |
@@ -2707,7 +2713,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2707 | *skipped = 1; | 2713 | *skipped = 1; |
2708 | return sectors_skipped; | 2714 | return sectors_skipped; |
2709 | } | 2715 | } |
2710 | if (chunks_skipped >= conf->raid_disks) { | 2716 | if (chunks_skipped >= conf->geo.raid_disks) { |
2711 | /* if there has been nothing to do on any drive, | 2717 | /* if there has been nothing to do on any drive, |
2712 | * then there is nothing to do at all.. | 2718 | * then there is nothing to do at all.. |
2713 | */ | 2719 | */ |
@@ -2721,9 +2727,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2721 | /* make sure whole request will fit in a chunk - if chunks | 2727 | /* make sure whole request will fit in a chunk - if chunks |
2722 | * are meaningful | 2728 | * are meaningful |
2723 | */ | 2729 | */ |
2724 | if (conf->near_copies < conf->raid_disks && | 2730 | if (conf->geo.near_copies < conf->geo.raid_disks && |
2725 | max_sector > (sector_nr | conf->chunk_mask)) | 2731 | max_sector > (sector_nr | chunk_mask)) |
2726 | max_sector = (sector_nr | conf->chunk_mask) + 1; | 2732 | max_sector = (sector_nr | chunk_mask) + 1; |
2727 | /* | 2733 | /* |
2728 | * If there is non-resync activity waiting for us then | 2734 | * If there is non-resync activity waiting for us then |
2729 | * put in a delay to throttle resync. | 2735 | * put in a delay to throttle resync. |
@@ -2752,7 +2758,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2752 | int j; | 2758 | int j; |
2753 | r10_bio = NULL; | 2759 | r10_bio = NULL; |
2754 | 2760 | ||
2755 | for (i=0 ; i<conf->raid_disks; i++) { | 2761 | for (i = 0 ; i < conf->geo.raid_disks; i++) { |
2756 | int still_degraded; | 2762 | int still_degraded; |
2757 | struct r10bio *rb2; | 2763 | struct r10bio *rb2; |
2758 | sector_t sect; | 2764 | sector_t sect; |
@@ -2806,7 +2812,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2806 | /* Need to check if the array will still be | 2812 | /* Need to check if the array will still be |
2807 | * degraded | 2813 | * degraded |
2808 | */ | 2814 | */ |
2809 | for (j=0; j<conf->raid_disks; j++) | 2815 | for (j = 0; j < conf->geo.raid_disks; j++) |
2810 | if (conf->mirrors[j].rdev == NULL || | 2816 | if (conf->mirrors[j].rdev == NULL || |
2811 | test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { | 2817 | test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { |
2812 | still_degraded = 1; | 2818 | still_degraded = 1; |
@@ -2984,9 +2990,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2984 | r10_bio->sector = sector_nr; | 2990 | r10_bio->sector = sector_nr; |
2985 | set_bit(R10BIO_IsSync, &r10_bio->state); | 2991 | set_bit(R10BIO_IsSync, &r10_bio->state); |
2986 | raid10_find_phys(conf, r10_bio); | 2992 | raid10_find_phys(conf, r10_bio); |
2987 | r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1; | 2993 | r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; |
2988 | 2994 | ||
2989 | for (i=0; i<conf->copies; i++) { | 2995 | for (i = 0; i < conf->copies; i++) { |
2990 | int d = r10_bio->devs[i].devnum; | 2996 | int d = r10_bio->devs[i].devnum; |
2991 | sector_t first_bad, sector; | 2997 | sector_t first_bad, sector; |
2992 | int bad_sectors; | 2998 | int bad_sectors; |
@@ -3152,16 +3158,16 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) | |||
3152 | struct r10conf *conf = mddev->private; | 3158 | struct r10conf *conf = mddev->private; |
3153 | 3159 | ||
3154 | if (!raid_disks) | 3160 | if (!raid_disks) |
3155 | raid_disks = conf->raid_disks; | 3161 | raid_disks = conf->geo.raid_disks; |
3156 | if (!sectors) | 3162 | if (!sectors) |
3157 | sectors = conf->dev_sectors; | 3163 | sectors = conf->dev_sectors; |
3158 | 3164 | ||
3159 | size = sectors >> conf->chunk_shift; | 3165 | size = sectors >> conf->geo.chunk_shift; |
3160 | sector_div(size, conf->far_copies); | 3166 | sector_div(size, conf->geo.far_copies); |
3161 | size = size * raid_disks; | 3167 | size = size * raid_disks; |
3162 | sector_div(size, conf->near_copies); | 3168 | sector_div(size, conf->geo.near_copies); |
3163 | 3169 | ||
3164 | return size << conf->chunk_shift; | 3170 | return size << conf->geo.chunk_shift; |
3165 | } | 3171 | } |
3166 | 3172 | ||
3167 | static void calc_sectors(struct r10conf *conf, sector_t size) | 3173 | static void calc_sectors(struct r10conf *conf, sector_t size) |
@@ -3171,10 +3177,10 @@ static void calc_sectors(struct r10conf *conf, sector_t size) | |||
3171 | * conf->stride | 3177 | * conf->stride |
3172 | */ | 3178 | */ |
3173 | 3179 | ||
3174 | size = size >> conf->chunk_shift; | 3180 | size = size >> conf->geo.chunk_shift; |
3175 | sector_div(size, conf->far_copies); | 3181 | sector_div(size, conf->geo.far_copies); |
3176 | size = size * conf->raid_disks; | 3182 | size = size * conf->geo.raid_disks; |
3177 | sector_div(size, conf->near_copies); | 3183 | sector_div(size, conf->geo.near_copies); |
3178 | /* 'size' is now the number of chunks in the array */ | 3184 | /* 'size' is now the number of chunks in the array */ |
3179 | /* calculate "used chunks per device" */ | 3185 | /* calculate "used chunks per device" */ |
3180 | size = size * conf->copies; | 3186 | size = size * conf->copies; |
@@ -3182,15 +3188,15 @@ static void calc_sectors(struct r10conf *conf, sector_t size) | |||
3182 | /* We need to round up when dividing by raid_disks to | 3188 | /* We need to round up when dividing by raid_disks to |
3183 | * get the stride size. | 3189 | * get the stride size. |
3184 | */ | 3190 | */ |
3185 | size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks); | 3191 | size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); |
3186 | 3192 | ||
3187 | conf->dev_sectors = size << conf->chunk_shift; | 3193 | conf->dev_sectors = size << conf->geo.chunk_shift; |
3188 | 3194 | ||
3189 | if (conf->far_offset) | 3195 | if (conf->geo.far_offset) |
3190 | conf->stride = 1 << conf->chunk_shift; | 3196 | conf->geo.stride = 1 << conf->geo.chunk_shift; |
3191 | else { | 3197 | else { |
3192 | sector_div(size, conf->far_copies); | 3198 | sector_div(size, conf->geo.far_copies); |
3193 | conf->stride = size << conf->chunk_shift; | 3199 | conf->geo.stride = size << conf->geo.chunk_shift; |
3194 | } | 3200 | } |
3195 | } | 3201 | } |
3196 | 3202 | ||
@@ -3234,13 +3240,13 @@ static struct r10conf *setup_conf(struct mddev *mddev) | |||
3234 | goto out; | 3240 | goto out; |
3235 | 3241 | ||
3236 | 3242 | ||
3237 | conf->raid_disks = mddev->raid_disks; | 3243 | conf->geo.raid_disks = mddev->raid_disks; |
3238 | conf->near_copies = nc; | 3244 | conf->geo.near_copies = nc; |
3239 | conf->far_copies = fc; | 3245 | conf->geo.far_copies = fc; |
3240 | conf->copies = nc*fc; | 3246 | conf->copies = nc*fc; |
3241 | conf->far_offset = fo; | 3247 | conf->geo.far_offset = fo; |
3242 | conf->chunk_mask = mddev->new_chunk_sectors - 1; | 3248 | conf->geo.chunk_mask = mddev->new_chunk_sectors - 1; |
3243 | conf->chunk_shift = ffz(~mddev->new_chunk_sectors); | 3249 | conf->geo.chunk_shift = ffz(~mddev->new_chunk_sectors); |
3244 | 3250 | ||
3245 | conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, | 3251 | conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, |
3246 | r10bio_pool_free, conf); | 3252 | r10bio_pool_free, conf); |
@@ -3304,16 +3310,16 @@ static int run(struct mddev *mddev) | |||
3304 | 3310 | ||
3305 | chunk_size = mddev->chunk_sectors << 9; | 3311 | chunk_size = mddev->chunk_sectors << 9; |
3306 | blk_queue_io_min(mddev->queue, chunk_size); | 3312 | blk_queue_io_min(mddev->queue, chunk_size); |
3307 | if (conf->raid_disks % conf->near_copies) | 3313 | if (conf->geo.raid_disks % conf->geo.near_copies) |
3308 | blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks); | 3314 | blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); |
3309 | else | 3315 | else |
3310 | blk_queue_io_opt(mddev->queue, chunk_size * | 3316 | blk_queue_io_opt(mddev->queue, chunk_size * |
3311 | (conf->raid_disks / conf->near_copies)); | 3317 | (conf->geo.raid_disks / conf->geo.near_copies)); |
3312 | 3318 | ||
3313 | rdev_for_each(rdev, mddev) { | 3319 | rdev_for_each(rdev, mddev) { |
3314 | 3320 | ||
3315 | disk_idx = rdev->raid_disk; | 3321 | disk_idx = rdev->raid_disk; |
3316 | if (disk_idx >= conf->raid_disks | 3322 | if (disk_idx >= conf->geo.raid_disks |
3317 | || disk_idx < 0) | 3323 | || disk_idx < 0) |
3318 | continue; | 3324 | continue; |
3319 | disk = conf->mirrors + disk_idx; | 3325 | disk = conf->mirrors + disk_idx; |
@@ -3341,7 +3347,7 @@ static int run(struct mddev *mddev) | |||
3341 | } | 3347 | } |
3342 | 3348 | ||
3343 | mddev->degraded = 0; | 3349 | mddev->degraded = 0; |
3344 | for (i = 0; i < conf->raid_disks; i++) { | 3350 | for (i = 0; i < conf->geo.raid_disks; i++) { |
3345 | 3351 | ||
3346 | disk = conf->mirrors + i; | 3352 | disk = conf->mirrors + i; |
3347 | 3353 | ||
@@ -3368,8 +3374,8 @@ static int run(struct mddev *mddev) | |||
3368 | mdname(mddev)); | 3374 | mdname(mddev)); |
3369 | printk(KERN_INFO | 3375 | printk(KERN_INFO |
3370 | "md/raid10:%s: active with %d out of %d devices\n", | 3376 | "md/raid10:%s: active with %d out of %d devices\n", |
3371 | mdname(mddev), conf->raid_disks - mddev->degraded, | 3377 | mdname(mddev), conf->geo.raid_disks - mddev->degraded, |
3372 | conf->raid_disks); | 3378 | conf->geo.raid_disks); |
3373 | /* | 3379 | /* |
3374 | * Ok, everything is just fine now | 3380 | * Ok, everything is just fine now |
3375 | */ | 3381 | */ |
@@ -3386,9 +3392,9 @@ static int run(struct mddev *mddev) | |||
3386 | * maybe... | 3392 | * maybe... |
3387 | */ | 3393 | */ |
3388 | { | 3394 | { |
3389 | int stripe = conf->raid_disks * | 3395 | int stripe = conf->geo.raid_disks * |
3390 | ((mddev->chunk_sectors << 9) / PAGE_SIZE); | 3396 | ((mddev->chunk_sectors << 9) / PAGE_SIZE); |
3391 | stripe /= conf->near_copies; | 3397 | stripe /= conf->geo.near_copies; |
3392 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) | 3398 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) |
3393 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; | 3399 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; |
3394 | } | 3400 | } |
@@ -3460,7 +3466,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) | |||
3460 | struct r10conf *conf = mddev->private; | 3466 | struct r10conf *conf = mddev->private; |
3461 | sector_t oldsize, size; | 3467 | sector_t oldsize, size; |
3462 | 3468 | ||
3463 | if (conf->far_copies > 1 && !conf->far_offset) | 3469 | if (conf->geo.far_copies > 1 && !conf->geo.far_offset) |
3464 | return -EINVAL; | 3470 | return -EINVAL; |
3465 | 3471 | ||
3466 | oldsize = raid10_size(mddev, 0, 0); | 3472 | oldsize = raid10_size(mddev, 0, 0); |
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 7c615613c381..4c4942ac46fc 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h | |||
@@ -14,33 +14,34 @@ struct mirror_info { | |||
14 | struct r10conf { | 14 | struct r10conf { |
15 | struct mddev *mddev; | 15 | struct mddev *mddev; |
16 | struct mirror_info *mirrors; | 16 | struct mirror_info *mirrors; |
17 | int raid_disks; | ||
18 | spinlock_t device_lock; | 17 | spinlock_t device_lock; |
19 | 18 | ||
20 | /* geometry */ | 19 | /* geometry */ |
21 | int near_copies; /* number of copies laid out | 20 | struct geom { |
21 | int raid_disks; | ||
22 | int near_copies; /* number of copies laid out | ||
22 | * raid0 style */ | 23 | * raid0 style */ |
23 | int far_copies; /* number of copies laid out | 24 | int far_copies; /* number of copies laid out |
24 | * at large strides across drives | 25 | * at large strides across drives |
25 | */ | 26 | */ |
26 | int far_offset; /* far_copies are offset by 1 | 27 | int far_offset; /* far_copies are offset by 1 |
27 | * stripe instead of many | 28 | * stripe instead of many |
28 | */ | 29 | */ |
29 | int copies; /* near_copies * far_copies. | 30 | sector_t stride; /* distance between far copies. |
30 | * must be <= raid_disks | ||
31 | */ | ||
32 | sector_t stride; /* distance between far copies. | ||
33 | * This is size / far_copies unless | 31 | * This is size / far_copies unless |
34 | * far_offset, in which case it is | 32 | * far_offset, in which case it is |
35 | * 1 stripe. | 33 | * 1 stripe. |
36 | */ | 34 | */ |
35 | int chunk_shift; /* shift from chunks to sectors */ | ||
36 | sector_t chunk_mask; | ||
37 | } geo; | ||
38 | int copies; /* near_copies * far_copies. | ||
39 | * must be <= raid_disks | ||
40 | */ | ||
37 | 41 | ||
38 | sector_t dev_sectors; /* temp copy of | 42 | sector_t dev_sectors; /* temp copy of |
39 | * mddev->dev_sectors */ | 43 | * mddev->dev_sectors */ |
40 | 44 | ||
41 | int chunk_shift; /* shift from chunks to sectors */ | ||
42 | sector_t chunk_mask; | ||
43 | |||
44 | struct list_head retry_list; | 45 | struct list_head retry_list; |
45 | /* queue pending writes and submit them on unplug */ | 46 | /* queue pending writes and submit them on unplug */ |
46 | struct bio_list pending_bio_list; | 47 | struct bio_list pending_bio_list; |