diff options
author | Andre Noll <maan@systemlinux.org> | 2009-06-17 18:45:01 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2009-06-17 18:45:01 -0400 |
commit | 9d8f0363623b3da12c43007cf77f5e1a4e8a5964 (patch) | |
tree | 0fee53971a397ade209dd36c4f1ed50db6450faf /drivers/md/raid5.c | |
parent | fbb704efb784e2c8418e34dc3013af76bdd58101 (diff) |
md: Make mddev->chunk_size sector-based.
This patch renames the chunk_size field to chunk_sectors with the
implied change of semantics. Since
is_power_of_2(chunk_size) = is_power_of_2(chunk_sectors << 9)
= is_power_of_2(chunk_sectors)
these bits don't need an adjustment for the shift.
Signed-off-by: Andre Noll <maan@systemlinux.org>
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 41 |
1 files changed, 22 insertions, 19 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index be4e62f611bc..1e4fd5e8bfdd 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3352,13 +3352,13 @@ static int raid5_mergeable_bvec(struct request_queue *q, | |||
3352 | mddev_t *mddev = q->queuedata; | 3352 | mddev_t *mddev = q->queuedata; |
3353 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | 3353 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
3354 | int max; | 3354 | int max; |
3355 | unsigned int chunk_sectors = mddev->chunk_size >> 9; | 3355 | unsigned int chunk_sectors = mddev->chunk_sectors; |
3356 | unsigned int bio_sectors = bvm->bi_size >> 9; | 3356 | unsigned int bio_sectors = bvm->bi_size >> 9; |
3357 | 3357 | ||
3358 | if ((bvm->bi_rw & 1) == WRITE) | 3358 | if ((bvm->bi_rw & 1) == WRITE) |
3359 | return biovec->bv_len; /* always allow writes to be mergeable */ | 3359 | return biovec->bv_len; /* always allow writes to be mergeable */ |
3360 | 3360 | ||
3361 | if (mddev->new_chunk < mddev->chunk_size) | 3361 | if (mddev->new_chunk < mddev->chunk_sectors << 9) |
3362 | chunk_sectors = mddev->new_chunk >> 9; | 3362 | chunk_sectors = mddev->new_chunk >> 9; |
3363 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; | 3363 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; |
3364 | if (max < 0) max = 0; | 3364 | if (max < 0) max = 0; |
@@ -3372,10 +3372,10 @@ static int raid5_mergeable_bvec(struct request_queue *q, | |||
3372 | static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) | 3372 | static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) |
3373 | { | 3373 | { |
3374 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); | 3374 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); |
3375 | unsigned int chunk_sectors = mddev->chunk_size >> 9; | 3375 | unsigned int chunk_sectors = mddev->chunk_sectors; |
3376 | unsigned int bio_sectors = bio->bi_size >> 9; | 3376 | unsigned int bio_sectors = bio->bi_size >> 9; |
3377 | 3377 | ||
3378 | if (mddev->new_chunk < mddev->chunk_size) | 3378 | if (mddev->new_chunk < mddev->chunk_sectors << 9) |
3379 | chunk_sectors = mddev->new_chunk >> 9; | 3379 | chunk_sectors = mddev->new_chunk >> 9; |
3380 | return chunk_sectors >= | 3380 | return chunk_sectors >= |
3381 | ((sector & (chunk_sectors - 1)) + bio_sectors); | 3381 | ((sector & (chunk_sectors - 1)) + bio_sectors); |
@@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3791 | * If old and new chunk sizes differ, we need to process the | 3791 | * If old and new chunk sizes differ, we need to process the |
3792 | * largest of these | 3792 | * largest of these |
3793 | */ | 3793 | */ |
3794 | if (mddev->new_chunk > mddev->chunk_size) | 3794 | if (mddev->new_chunk > mddev->chunk_sectors << 9) |
3795 | reshape_sectors = mddev->new_chunk / 512; | 3795 | reshape_sectors = mddev->new_chunk / 512; |
3796 | else | 3796 | else |
3797 | reshape_sectors = mddev->chunk_size / 512; | 3797 | reshape_sectors = mddev->chunk_sectors; |
3798 | 3798 | ||
3799 | /* we update the metadata when there is more than 3Meg | 3799 | /* we update the metadata when there is more than 3Meg |
3800 | * in the block range (that is rather arbitrary, should | 3800 | * in the block range (that is rather arbitrary, should |
@@ -4303,7 +4303,7 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
4303 | raid_disks = conf->previous_raid_disks; | 4303 | raid_disks = conf->previous_raid_disks; |
4304 | } | 4304 | } |
4305 | 4305 | ||
4306 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); | 4306 | sectors &= ~((sector_t)mddev->chunk_sectors - 1); |
4307 | sectors &= ~((sector_t)mddev->new_chunk/512 - 1); | 4307 | sectors &= ~((sector_t)mddev->new_chunk/512 - 1); |
4308 | return sectors * (raid_disks - conf->max_degraded); | 4308 | return sectors * (raid_disks - conf->max_degraded); |
4309 | } | 4309 | } |
@@ -4412,7 +4412,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4412 | conf->max_nr_stripes = NR_STRIPES; | 4412 | conf->max_nr_stripes = NR_STRIPES; |
4413 | conf->reshape_progress = mddev->reshape_position; | 4413 | conf->reshape_progress = mddev->reshape_position; |
4414 | if (conf->reshape_progress != MaxSector) { | 4414 | if (conf->reshape_progress != MaxSector) { |
4415 | conf->prev_chunk = mddev->chunk_size; | 4415 | conf->prev_chunk = mddev->chunk_sectors << 9; |
4416 | conf->prev_algo = mddev->layout; | 4416 | conf->prev_algo = mddev->layout; |
4417 | } | 4417 | } |
4418 | 4418 | ||
@@ -4484,7 +4484,7 @@ static int run(mddev_t *mddev) | |||
4484 | } | 4484 | } |
4485 | /* here_new is the stripe we will write to */ | 4485 | /* here_new is the stripe we will write to */ |
4486 | here_old = mddev->reshape_position; | 4486 | here_old = mddev->reshape_position; |
4487 | sector_div(here_old, (mddev->chunk_size>>9)* | 4487 | sector_div(here_old, mddev->chunk_sectors * |
4488 | (old_disks-max_degraded)); | 4488 | (old_disks-max_degraded)); |
4489 | /* here_old is the first stripe that we might need to read | 4489 | /* here_old is the first stripe that we might need to read |
4490 | * from */ | 4490 | * from */ |
@@ -4499,7 +4499,7 @@ static int run(mddev_t *mddev) | |||
4499 | } else { | 4499 | } else { |
4500 | BUG_ON(mddev->level != mddev->new_level); | 4500 | BUG_ON(mddev->level != mddev->new_level); |
4501 | BUG_ON(mddev->layout != mddev->new_layout); | 4501 | BUG_ON(mddev->layout != mddev->new_layout); |
4502 | BUG_ON(mddev->chunk_size != mddev->new_chunk); | 4502 | BUG_ON(mddev->chunk_sectors << 9 != mddev->new_chunk); |
4503 | BUG_ON(mddev->delta_disks != 0); | 4503 | BUG_ON(mddev->delta_disks != 0); |
4504 | } | 4504 | } |
4505 | 4505 | ||
@@ -4533,7 +4533,7 @@ static int run(mddev_t *mddev) | |||
4533 | } | 4533 | } |
4534 | 4534 | ||
4535 | /* device size must be a multiple of chunk size */ | 4535 | /* device size must be a multiple of chunk size */ |
4536 | mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); | 4536 | mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); |
4537 | mddev->resync_max_sectors = mddev->dev_sectors; | 4537 | mddev->resync_max_sectors = mddev->dev_sectors; |
4538 | 4538 | ||
4539 | if (mddev->degraded > 0 && | 4539 | if (mddev->degraded > 0 && |
@@ -4582,7 +4582,7 @@ static int run(mddev_t *mddev) | |||
4582 | { | 4582 | { |
4583 | int data_disks = conf->previous_raid_disks - conf->max_degraded; | 4583 | int data_disks = conf->previous_raid_disks - conf->max_degraded; |
4584 | int stripe = data_disks * | 4584 | int stripe = data_disks * |
4585 | (mddev->chunk_size / PAGE_SIZE); | 4585 | ((mddev->chunk_sectors << 9) / PAGE_SIZE); |
4586 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) | 4586 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
4587 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; | 4587 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
4588 | } | 4588 | } |
@@ -4679,7 +4679,8 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
4679 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; | 4679 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
4680 | int i; | 4680 | int i; |
4681 | 4681 | ||
4682 | seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); | 4682 | seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, |
4683 | mddev->chunk_sectors / 2, mddev->layout); | ||
4683 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); | 4684 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); |
4684 | for (i = 0; i < conf->raid_disks; i++) | 4685 | for (i = 0; i < conf->raid_disks; i++) |
4685 | seq_printf (seq, "%s", | 4686 | seq_printf (seq, "%s", |
@@ -4827,7 +4828,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) | |||
4827 | * any io in the removed space completes, but it hardly seems | 4828 | * any io in the removed space completes, but it hardly seems |
4828 | * worth it. | 4829 | * worth it. |
4829 | */ | 4830 | */ |
4830 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); | 4831 | sectors &= ~((sector_t)mddev->chunk_sectors - 1); |
4831 | md_set_array_sectors(mddev, raid5_size(mddev, sectors, | 4832 | md_set_array_sectors(mddev, raid5_size(mddev, sectors, |
4832 | mddev->raid_disks)); | 4833 | mddev->raid_disks)); |
4833 | if (mddev->array_sectors > | 4834 | if (mddev->array_sectors > |
@@ -4850,7 +4851,7 @@ static int raid5_check_reshape(mddev_t *mddev) | |||
4850 | 4851 | ||
4851 | if (mddev->delta_disks == 0 && | 4852 | if (mddev->delta_disks == 0 && |
4852 | mddev->new_layout == mddev->layout && | 4853 | mddev->new_layout == mddev->layout && |
4853 | mddev->new_chunk == mddev->chunk_size) | 4854 | mddev->new_chunk == mddev->chunk_sectors << 9) |
4854 | return -EINVAL; /* nothing to do */ | 4855 | return -EINVAL; /* nothing to do */ |
4855 | if (mddev->bitmap) | 4856 | if (mddev->bitmap) |
4856 | /* Cannot grow a bitmap yet */ | 4857 | /* Cannot grow a bitmap yet */ |
@@ -4878,10 +4879,11 @@ static int raid5_check_reshape(mddev_t *mddev) | |||
4878 | * If the chunk size is greater, user-space should request more | 4879 | * If the chunk size is greater, user-space should request more |
4879 | * stripe_heads first. | 4880 | * stripe_heads first. |
4880 | */ | 4881 | */ |
4881 | if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || | 4882 | if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 |
4883 | > conf->max_nr_stripes || | ||
4882 | (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { | 4884 | (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { |
4883 | printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", | 4885 | printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", |
4884 | (max(mddev->chunk_size, mddev->new_chunk) | 4886 | (max(mddev->chunk_sectors << 9, mddev->new_chunk) |
4885 | / STRIPE_SIZE)*4); | 4887 | / STRIPE_SIZE)*4); |
4886 | return -ENOSPC; | 4888 | return -ENOSPC; |
4887 | } | 4889 | } |
@@ -5054,7 +5056,7 @@ static void raid5_finish_reshape(mddev_t *mddev) | |||
5054 | raid5_remove_disk(mddev, d); | 5056 | raid5_remove_disk(mddev, d); |
5055 | } | 5057 | } |
5056 | mddev->layout = conf->algorithm; | 5058 | mddev->layout = conf->algorithm; |
5057 | mddev->chunk_size = conf->chunk_size; | 5059 | mddev->chunk_sectors = conf->chunk_size >> 9; |
5058 | mddev->reshape_position = MaxSector; | 5060 | mddev->reshape_position = MaxSector; |
5059 | mddev->delta_disks = 0; | 5061 | mddev->delta_disks = 0; |
5060 | } | 5062 | } |
@@ -5183,7 +5185,8 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) | |||
5183 | } | 5185 | } |
5184 | if (new_chunk > 0) { | 5186 | if (new_chunk > 0) { |
5185 | conf->chunk_size = new_chunk; | 5187 | conf->chunk_size = new_chunk; |
5186 | mddev->chunk_size = mddev->new_chunk = new_chunk; | 5188 | mddev->new_chunk = new_chunk; |
5189 | mddev->chunk_sectors = new_chunk >> 9; | ||
5187 | } | 5190 | } |
5188 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 5191 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
5189 | md_wakeup_thread(mddev->thread); | 5192 | md_wakeup_thread(mddev->thread); |