aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorAndre Noll <maan@systemlinux.org>2009-03-30 23:33:13 -0400
committerNeilBrown <neilb@suse.de>2009-03-30 23:33:13 -0400
commit58c0fed400603a802968b23ddf78f029c5a84e41 (patch)
tree474fcb9775bb07f39ebb7802fb9b51d69222dcbb /drivers/md/raid5.c
parent575a80fa4f623141e9791e41879d87800fb6d862 (diff)
md: Make mddev->size sector-based.
This patch renames the "size" field of struct mddev_s to "dev_sectors" and stores the number of 512-byte sectors instead of the number of 1K-blocks in it. All users of that field, including raid levels 1,4-6,10, are adjusted accordingly. This simplifies the code a bit because it allows to get rid of a couple of divisions/multiplications by two. In order to make checkpatch happy, some minor coding style issues have also been addressed. In particular, size_store() now uses strict_strtoull() instead of simple_strtoull(). Signed-off-by: Andre Noll <maan@systemlinux.org> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 849478e9afdc..4d7142376e58 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3629,8 +3629,8 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3629 *(new_data_disks) -1, 3629 *(new_data_disks) -1,
3630 raid_disks, data_disks, 3630 raid_disks, data_disks,
3631 &dd_idx, &pd_idx, conf); 3631 &dd_idx, &pd_idx, conf);
3632 if (last_sector >= (mddev->size<<1)) 3632 if (last_sector >= mddev->dev_sectors)
3633 last_sector = (mddev->size<<1)-1; 3633 last_sector = mddev->dev_sectors - 1;
3634 while (first_sector <= last_sector) { 3634 while (first_sector <= last_sector) {
3635 pd_idx = stripe_to_pdidx(first_sector, conf, 3635 pd_idx = stripe_to_pdidx(first_sector, conf,
3636 conf->previous_raid_disks); 3636 conf->previous_raid_disks);
@@ -3670,7 +3670,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3670 struct stripe_head *sh; 3670 struct stripe_head *sh;
3671 int pd_idx; 3671 int pd_idx;
3672 int raid_disks = conf->raid_disks; 3672 int raid_disks = conf->raid_disks;
3673 sector_t max_sector = mddev->size << 1; 3673 sector_t max_sector = mddev->dev_sectors;
3674 int sync_blocks; 3674 int sync_blocks;
3675 int still_degraded = 0; 3675 int still_degraded = 0;
3676 int i; 3676 int i;
@@ -3708,7 +3708,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3708 */ 3708 */
3709 if (mddev->degraded >= conf->max_degraded && 3709 if (mddev->degraded >= conf->max_degraded &&
3710 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3710 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3711 sector_t rv = (mddev->size << 1) - sector_nr; 3711 sector_t rv = mddev->dev_sectors - sector_nr;
3712 *skipped = 1; 3712 *skipped = 1;
3713 return rv; 3713 return rv;
3714 } 3714 }
@@ -4146,8 +4146,8 @@ static int run(mddev_t *mddev)
4146 conf->expand_progress = mddev->reshape_position; 4146 conf->expand_progress = mddev->reshape_position;
4147 4147
4148 /* device size must be a multiple of chunk size */ 4148 /* device size must be a multiple of chunk size */
4149 mddev->size &= ~(mddev->chunk_size/1024 -1); 4149 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1);
4150 mddev->resync_max_sectors = mddev->size << 1; 4150 mddev->resync_max_sectors = mddev->dev_sectors;
4151 4151
4152 if (conf->level == 6 && conf->raid_disks < 4) { 4152 if (conf->level == 6 && conf->raid_disks < 4) {
4153 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4153 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
@@ -4254,8 +4254,8 @@ static int run(mddev_t *mddev)
4254 mddev->queue->backing_dev_info.congested_data = mddev; 4254 mddev->queue->backing_dev_info.congested_data = mddev;
4255 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4255 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4256 4256
4257 mddev->array_sectors = 2 * mddev->size * (conf->previous_raid_disks - 4257 mddev->array_sectors = mddev->dev_sectors *
4258 conf->max_degraded); 4258 (conf->previous_raid_disks - conf->max_degraded);
4259 4259
4260 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4260 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4261 4261
@@ -4482,11 +4482,11 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
4482 - conf->max_degraded); 4482 - conf->max_degraded);
4483 set_capacity(mddev->gendisk, mddev->array_sectors); 4483 set_capacity(mddev->gendisk, mddev->array_sectors);
4484 mddev->changed = 1; 4484 mddev->changed = 1;
4485 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 4485 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
4486 mddev->recovery_cp = mddev->size << 1; 4486 mddev->recovery_cp = mddev->dev_sectors;
4487 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4487 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4488 } 4488 }
4489 mddev->size = sectors /2; 4489 mddev->dev_sectors = sectors;
4490 mddev->resync_max_sectors = sectors; 4490 mddev->resync_max_sectors = sectors;
4491 return 0; 4491 return 0;
4492} 4492}
@@ -4615,7 +4615,7 @@ static void end_reshape(raid5_conf_t *conf)
4615 struct block_device *bdev; 4615 struct block_device *bdev;
4616 4616
4617 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4617 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
4618 conf->mddev->array_sectors = 2 * conf->mddev->size * 4618 conf->mddev->array_sectors = conf->mddev->dev_sectors *
4619 (conf->raid_disks - conf->max_degraded); 4619 (conf->raid_disks - conf->max_degraded);
4620 set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors); 4620 set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors);
4621 conf->mddev->changed = 1; 4621 conf->mddev->changed = 1;