aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorAndre Noll <maan@systemlinux.org>2009-06-17 18:45:27 -0400
committerNeilBrown <neilb@suse.de>2009-06-17 18:45:27 -0400
commit664e7c413f1e90eceb0b2596dd73a0832faec058 (patch)
tree72ea827a0f2a493766d3ea1fd14909c756aa4496 /drivers/md/raid5.c
parent9d8f0363623b3da12c43007cf77f5e1a4e8a5964 (diff)
md: Convert mddev->new_chunk to sectors.
A straight-forward conversion which gets rid of some multiplications/divisions/shifts. The patch also introduces a couple of new ones, most of which are due to conf->chunk_size still being represented in bytes. This will be cleaned up in subsequent patches. Signed-off-by: Andre Noll <maan@systemlinux.org> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c45
1 files changed, 24 insertions, 21 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1e4fd5e8bfdd..bc3564cfbba0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3358,8 +3358,8 @@ static int raid5_mergeable_bvec(struct request_queue *q,
3358 if ((bvm->bi_rw & 1) == WRITE) 3358 if ((bvm->bi_rw & 1) == WRITE)
3359 return biovec->bv_len; /* always allow writes to be mergeable */ 3359 return biovec->bv_len; /* always allow writes to be mergeable */
3360 3360
3361 if (mddev->new_chunk < mddev->chunk_sectors << 9) 3361 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3362 chunk_sectors = mddev->new_chunk >> 9; 3362 chunk_sectors = mddev->new_chunk_sectors;
3363 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3363 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3364 if (max < 0) max = 0; 3364 if (max < 0) max = 0;
3365 if (max <= biovec->bv_len && bio_sectors == 0) 3365 if (max <= biovec->bv_len && bio_sectors == 0)
@@ -3375,8 +3375,8 @@ static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3375 unsigned int chunk_sectors = mddev->chunk_sectors; 3375 unsigned int chunk_sectors = mddev->chunk_sectors;
3376 unsigned int bio_sectors = bio->bi_size >> 9; 3376 unsigned int bio_sectors = bio->bi_size >> 9;
3377 3377
3378 if (mddev->new_chunk < mddev->chunk_sectors << 9) 3378 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3379 chunk_sectors = mddev->new_chunk >> 9; 3379 chunk_sectors = mddev->new_chunk_sectors;
3380 return chunk_sectors >= 3380 return chunk_sectors >=
3381 ((sector & (chunk_sectors - 1)) + bio_sectors); 3381 ((sector & (chunk_sectors - 1)) + bio_sectors);
3382} 3382}
@@ -3791,8 +3791,8 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3791 * If old and new chunk sizes differ, we need to process the 3791 * If old and new chunk sizes differ, we need to process the
3792 * largest of these 3792 * largest of these
3793 */ 3793 */
3794 if (mddev->new_chunk > mddev->chunk_sectors << 9) 3794 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
3795 reshape_sectors = mddev->new_chunk / 512; 3795 reshape_sectors = mddev->new_chunk_sectors;
3796 else 3796 else
3797 reshape_sectors = mddev->chunk_sectors; 3797 reshape_sectors = mddev->chunk_sectors;
3798 3798
@@ -4304,7 +4304,7 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4304 } 4304 }
4305 4305
4306 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 4306 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4307 sectors &= ~((sector_t)mddev->new_chunk/512 - 1); 4307 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4308 return sectors * (raid_disks - conf->max_degraded); 4308 return sectors * (raid_disks - conf->max_degraded);
4309} 4309}
4310 4310
@@ -4336,10 +4336,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4336 return ERR_PTR(-EINVAL); 4336 return ERR_PTR(-EINVAL);
4337 } 4337 }
4338 4338
4339 if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE || 4339 if (!mddev->new_chunk_sectors ||
4340 !is_power_of_2(mddev->new_chunk)) { 4340 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4341 !is_power_of_2(mddev->new_chunk_sectors)) {
4341 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4342 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
4342 mddev->new_chunk, mdname(mddev)); 4343 mddev->new_chunk_sectors << 9, mdname(mddev));
4343 return ERR_PTR(-EINVAL); 4344 return ERR_PTR(-EINVAL);
4344 } 4345 }
4345 4346
@@ -4402,7 +4403,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4402 conf->fullsync = 1; 4403 conf->fullsync = 1;
4403 } 4404 }
4404 4405
4405 conf->chunk_size = mddev->new_chunk; 4406 conf->chunk_size = mddev->new_chunk_sectors << 9;
4406 conf->level = mddev->new_level; 4407 conf->level = mddev->new_level;
4407 if (conf->level == 6) 4408 if (conf->level == 6)
4408 conf->max_degraded = 2; 4409 conf->max_degraded = 2;
@@ -4476,7 +4477,7 @@ static int run(mddev_t *mddev)
4476 * geometry. 4477 * geometry.
4477 */ 4478 */
4478 here_new = mddev->reshape_position; 4479 here_new = mddev->reshape_position;
4479 if (sector_div(here_new, (mddev->new_chunk>>9)* 4480 if (sector_div(here_new, mddev->new_chunk_sectors *
4480 (mddev->raid_disks - max_degraded))) { 4481 (mddev->raid_disks - max_degraded))) {
4481 printk(KERN_ERR "raid5: reshape_position not " 4482 printk(KERN_ERR "raid5: reshape_position not "
4482 "on a stripe boundary\n"); 4483 "on a stripe boundary\n");
@@ -4499,7 +4500,7 @@ static int run(mddev_t *mddev)
4499 } else { 4500 } else {
4500 BUG_ON(mddev->level != mddev->new_level); 4501 BUG_ON(mddev->level != mddev->new_level);
4501 BUG_ON(mddev->layout != mddev->new_layout); 4502 BUG_ON(mddev->layout != mddev->new_layout);
4502 BUG_ON(mddev->chunk_sectors << 9 != mddev->new_chunk); 4503 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
4503 BUG_ON(mddev->delta_disks != 0); 4504 BUG_ON(mddev->delta_disks != 0);
4504 } 4505 }
4505 4506
@@ -4851,7 +4852,7 @@ static int raid5_check_reshape(mddev_t *mddev)
4851 4852
4852 if (mddev->delta_disks == 0 && 4853 if (mddev->delta_disks == 0 &&
4853 mddev->new_layout == mddev->layout && 4854 mddev->new_layout == mddev->layout &&
4854 mddev->new_chunk == mddev->chunk_sectors << 9) 4855 mddev->new_chunk_sectors == mddev->chunk_sectors)
4855 return -EINVAL; /* nothing to do */ 4856 return -EINVAL; /* nothing to do */
4856 if (mddev->bitmap) 4857 if (mddev->bitmap)
4857 /* Cannot grow a bitmap yet */ 4858 /* Cannot grow a bitmap yet */
@@ -4881,9 +4882,11 @@ static int raid5_check_reshape(mddev_t *mddev)
4881 */ 4882 */
4882 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 4883 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
4883 > conf->max_nr_stripes || 4884 > conf->max_nr_stripes ||
4884 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4885 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
4886 > conf->max_nr_stripes) {
4885 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4887 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
4886 (max(mddev->chunk_sectors << 9, mddev->new_chunk) 4888 (max(mddev->chunk_sectors << 9,
4889 mddev->new_chunk_sectors << 9)
4887 / STRIPE_SIZE)*4); 4890 / STRIPE_SIZE)*4);
4888 return -ENOSPC; 4891 return -ENOSPC;
4889 } 4892 }
@@ -4929,7 +4932,7 @@ static int raid5_start_reshape(mddev_t *mddev)
4929 conf->previous_raid_disks = conf->raid_disks; 4932 conf->previous_raid_disks = conf->raid_disks;
4930 conf->raid_disks += mddev->delta_disks; 4933 conf->raid_disks += mddev->delta_disks;
4931 conf->prev_chunk = conf->chunk_size; 4934 conf->prev_chunk = conf->chunk_size;
4932 conf->chunk_size = mddev->new_chunk; 4935 conf->chunk_size = mddev->new_chunk_sectors << 9;
4933 conf->prev_algo = conf->algorithm; 4936 conf->prev_algo = conf->algorithm;
4934 conf->algorithm = mddev->new_layout; 4937 conf->algorithm = mddev->new_layout;
4935 if (mddev->delta_disks < 0) 4938 if (mddev->delta_disks < 0)
@@ -5114,7 +5117,7 @@ static void *raid5_takeover_raid1(mddev_t *mddev)
5114 5117
5115 mddev->new_level = 5; 5118 mddev->new_level = 5;
5116 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5119 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5117 mddev->new_chunk = chunksect << 9; 5120 mddev->new_chunk_sectors = chunksect;
5118 5121
5119 return setup_conf(mddev); 5122 return setup_conf(mddev);
5120} 5123}
@@ -5185,7 +5188,7 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5185 } 5188 }
5186 if (new_chunk > 0) { 5189 if (new_chunk > 0) {
5187 conf->chunk_size = new_chunk; 5190 conf->chunk_size = new_chunk;
5188 mddev->new_chunk = new_chunk; 5191 mddev->new_chunk_sectors = new_chunk >> 9;
5189 mddev->chunk_sectors = new_chunk >> 9; 5192 mddev->chunk_sectors = new_chunk >> 9;
5190 } 5193 }
5191 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5194 set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -5194,7 +5197,7 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5194 if (new_layout >= 0) 5197 if (new_layout >= 0)
5195 mddev->new_layout = new_layout; 5198 mddev->new_layout = new_layout;
5196 if (new_chunk > 0) 5199 if (new_chunk > 0)
5197 mddev->new_chunk = new_chunk; 5200 mddev->new_chunk_sectors = new_chunk >> 9;
5198 } 5201 }
5199 return 0; 5202 return 0;
5200} 5203}
@@ -5219,7 +5222,7 @@ static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5219 if (new_layout >= 0) 5222 if (new_layout >= 0)
5220 mddev->new_layout = new_layout; 5223 mddev->new_layout = new_layout;
5221 if (new_chunk > 0) 5224 if (new_chunk > 0)
5222 mddev->new_chunk = new_chunk; 5225 mddev->new_chunk_sectors = new_chunk >> 9;
5223 5226
5224 return 0; 5227 return 0;
5225} 5228}