aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorAndre Noll <maan@systemlinux.org>2009-06-17 18:45:01 -0400
committerNeilBrown <neilb@suse.de>2009-06-17 18:45:01 -0400
commit9d8f0363623b3da12c43007cf77f5e1a4e8a5964 (patch)
tree0fee53971a397ade209dd36c4f1ed50db6450faf /drivers/md
parentfbb704efb784e2c8418e34dc3013af76bdd58101 (diff)
md: Make mddev->chunk_size sector-based.
This patch renames the chunk_size field to chunk_sectors with the implied change of semantics. Since is_power_of_2(chunk_size) = is_power_of_2(chunk_sectors << 9) = is_power_of_2(chunk_sectors) these bits don't need an adjustment for the shift. Signed-off-by: Andre Noll <maan@systemlinux.org> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c51
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid0.c27
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c15
-rw-r--r--drivers/md/raid5.c41
7 files changed, 74 insertions, 68 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 9b02a73fbc6b..9f7cec42dd8e 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -305,7 +305,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
305static void linear_status (struct seq_file *seq, mddev_t *mddev) 305static void linear_status (struct seq_file *seq, mddev_t *mddev)
306{ 306{
307 307
308 seq_printf(seq, " %dk rounding", mddev->chunk_size/1024); 308 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
309} 309}
310 310
311 311
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a02bde70874b..abcc0fef30e3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -869,7 +869,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
869 mddev->minor_version = sb->minor_version; 869 mddev->minor_version = sb->minor_version;
870 mddev->patch_version = sb->patch_version; 870 mddev->patch_version = sb->patch_version;
871 mddev->external = 0; 871 mddev->external = 0;
872 mddev->chunk_size = sb->chunk_size; 872 mddev->chunk_sectors = sb->chunk_size >> 9;
873 mddev->ctime = sb->ctime; 873 mddev->ctime = sb->ctime;
874 mddev->utime = sb->utime; 874 mddev->utime = sb->utime;
875 mddev->level = sb->level; 875 mddev->level = sb->level;
@@ -892,7 +892,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
892 mddev->delta_disks = 0; 892 mddev->delta_disks = 0;
893 mddev->new_level = mddev->level; 893 mddev->new_level = mddev->level;
894 mddev->new_layout = mddev->layout; 894 mddev->new_layout = mddev->layout;
895 mddev->new_chunk = mddev->chunk_size; 895 mddev->new_chunk = mddev->chunk_sectors << 9;
896 } 896 }
897 897
898 if (sb->state & (1<<MD_SB_CLEAN)) 898 if (sb->state & (1<<MD_SB_CLEAN))
@@ -1021,7 +1021,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1021 sb->recovery_cp = 0; 1021 sb->recovery_cp = 0;
1022 1022
1023 sb->layout = mddev->layout; 1023 sb->layout = mddev->layout;
1024 sb->chunk_size = mddev->chunk_size; 1024 sb->chunk_size = mddev->chunk_sectors << 9;
1025 1025
1026 if (mddev->bitmap && mddev->bitmap_file == NULL) 1026 if (mddev->bitmap && mddev->bitmap_file == NULL)
1027 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1027 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
@@ -1278,7 +1278,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1278 mddev->major_version = 1; 1278 mddev->major_version = 1;
1279 mddev->patch_version = 0; 1279 mddev->patch_version = 0;
1280 mddev->external = 0; 1280 mddev->external = 0;
1281 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1281 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1282 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1282 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1283 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1283 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1284 mddev->level = le32_to_cpu(sb->level); 1284 mddev->level = le32_to_cpu(sb->level);
@@ -1310,7 +1310,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1310 mddev->delta_disks = 0; 1310 mddev->delta_disks = 0;
1311 mddev->new_level = mddev->level; 1311 mddev->new_level = mddev->level;
1312 mddev->new_layout = mddev->layout; 1312 mddev->new_layout = mddev->layout;
1313 mddev->new_chunk = mddev->chunk_size; 1313 mddev->new_chunk = mddev->chunk_sectors << 9;
1314 } 1314 }
1315 1315
1316 } else if (mddev->pers == NULL) { 1316 } else if (mddev->pers == NULL) {
@@ -1382,7 +1382,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1382 1382
1383 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1383 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1384 sb->size = cpu_to_le64(mddev->dev_sectors); 1384 sb->size = cpu_to_le64(mddev->dev_sectors);
1385 sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9); 1385 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1386 sb->level = cpu_to_le32(mddev->level); 1386 sb->level = cpu_to_le32(mddev->level);
1387 sb->layout = cpu_to_le32(mddev->layout); 1387 sb->layout = cpu_to_le32(mddev->layout);
1388 1388
@@ -2753,7 +2753,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2753 if (IS_ERR(priv)) { 2753 if (IS_ERR(priv)) {
2754 mddev->new_level = mddev->level; 2754 mddev->new_level = mddev->level;
2755 mddev->new_layout = mddev->layout; 2755 mddev->new_layout = mddev->layout;
2756 mddev->new_chunk = mddev->chunk_size; 2756 mddev->new_chunk = mddev->chunk_sectors << 9;
2757 mddev->raid_disks -= mddev->delta_disks; 2757 mddev->raid_disks -= mddev->delta_disks;
2758 mddev->delta_disks = 0; 2758 mddev->delta_disks = 0;
2759 module_put(pers->owner); 2759 module_put(pers->owner);
@@ -2771,7 +2771,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2771 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2771 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2772 mddev->level = mddev->new_level; 2772 mddev->level = mddev->new_level;
2773 mddev->layout = mddev->new_layout; 2773 mddev->layout = mddev->new_layout;
2774 mddev->chunk_size = mddev->new_chunk; 2774 mddev->chunk_sectors = mddev->new_chunk >> 9;
2775 mddev->delta_disks = 0; 2775 mddev->delta_disks = 0;
2776 pers->run(mddev); 2776 pers->run(mddev);
2777 mddev_resume(mddev); 2777 mddev_resume(mddev);
@@ -2864,10 +2864,10 @@ static ssize_t
2864chunk_size_show(mddev_t *mddev, char *page) 2864chunk_size_show(mddev_t *mddev, char *page)
2865{ 2865{
2866 if (mddev->reshape_position != MaxSector && 2866 if (mddev->reshape_position != MaxSector &&
2867 mddev->chunk_size != mddev->new_chunk) 2867 mddev->chunk_sectors << 9 != mddev->new_chunk)
2868 return sprintf(page, "%d (%d)\n", mddev->new_chunk, 2868 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2869 mddev->chunk_size); 2869 mddev->chunk_sectors << 9);
2870 return sprintf(page, "%d\n", mddev->chunk_size); 2870 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
2871} 2871}
2872 2872
2873static ssize_t 2873static ssize_t
@@ -2889,7 +2889,7 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2889 } else { 2889 } else {
2890 mddev->new_chunk = n; 2890 mddev->new_chunk = n;
2891 if (mddev->reshape_position == MaxSector) 2891 if (mddev->reshape_position == MaxSector)
2892 mddev->chunk_size = n; 2892 mddev->chunk_sectors = n >> 9;
2893 } 2893 }
2894 return len; 2894 return len;
2895} 2895}
@@ -3534,9 +3534,9 @@ min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3534 return -EBUSY; 3534 return -EBUSY;
3535 3535
3536 /* Must be a multiple of chunk_size */ 3536 /* Must be a multiple of chunk_size */
3537 if (mddev->chunk_size) { 3537 if (mddev->chunk_sectors) {
3538 sector_t temp = min; 3538 sector_t temp = min;
3539 if (sector_div(temp, (mddev->chunk_size>>9))) 3539 if (sector_div(temp, mddev->chunk_sectors))
3540 return -EINVAL; 3540 return -EINVAL;
3541 } 3541 }
3542 mddev->resync_min = min; 3542 mddev->resync_min = min;
@@ -3572,9 +3572,9 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3572 return -EBUSY; 3572 return -EBUSY;
3573 3573
3574 /* Must be a multiple of chunk_size */ 3574 /* Must be a multiple of chunk_size */
3575 if (mddev->chunk_size) { 3575 if (mddev->chunk_sectors) {
3576 sector_t temp = max; 3576 sector_t temp = max;
3577 if (sector_div(temp, (mddev->chunk_size>>9))) 3577 if (sector_div(temp, mddev->chunk_sectors))
3578 return -EINVAL; 3578 return -EINVAL;
3579 } 3579 }
3580 mddev->resync_max = max; 3580 mddev->resync_max = max;
@@ -3665,7 +3665,7 @@ reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3665 mddev->delta_disks = 0; 3665 mddev->delta_disks = 0;
3666 mddev->new_level = mddev->level; 3666 mddev->new_level = mddev->level;
3667 mddev->new_layout = mddev->layout; 3667 mddev->new_layout = mddev->layout;
3668 mddev->new_chunk = mddev->chunk_size; 3668 mddev->new_chunk = mddev->chunk_sectors << 9;
3669 return len; 3669 return len;
3670} 3670}
3671 3671
@@ -4007,7 +4007,7 @@ static int do_md_run(mddev_t * mddev)
4007 analyze_sbs(mddev); 4007 analyze_sbs(mddev);
4008 } 4008 }
4009 4009
4010 chunk_size = mddev->chunk_size; 4010 chunk_size = mddev->chunk_sectors << 9;
4011 4011
4012 if (chunk_size) { 4012 if (chunk_size) {
4013 if (chunk_size > MAX_CHUNK_SIZE) { 4013 if (chunk_size > MAX_CHUNK_SIZE) {
@@ -4406,7 +4406,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4406 mddev->flags = 0; 4406 mddev->flags = 0;
4407 mddev->ro = 0; 4407 mddev->ro = 0;
4408 mddev->metadata_type[0] = 0; 4408 mddev->metadata_type[0] = 0;
4409 mddev->chunk_size = 0; 4409 mddev->chunk_sectors = 0;
4410 mddev->ctime = mddev->utime = 0; 4410 mddev->ctime = mddev->utime = 0;
4411 mddev->layout = 0; 4411 mddev->layout = 0;
4412 mddev->max_disks = 0; 4412 mddev->max_disks = 0;
@@ -4619,7 +4619,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
4619 info.spare_disks = spare; 4619 info.spare_disks = spare;
4620 4620
4621 info.layout = mddev->layout; 4621 info.layout = mddev->layout;
4622 info.chunk_size = mddev->chunk_size; 4622 info.chunk_size = mddev->chunk_sectors << 9;
4623 4623
4624 if (copy_to_user(arg, &info, sizeof(info))) 4624 if (copy_to_user(arg, &info, sizeof(info)))
4625 return -EFAULT; 4625 return -EFAULT;
@@ -4844,7 +4844,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4844 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4844 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4845 } else 4845 } else
4846 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4846 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4847 rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); 4847 rdev->sectors = calc_num_sectors(rdev,
4848 mddev->chunk_sectors << 9);
4848 4849
4849 err = bind_rdev_to_array(rdev, mddev); 4850 err = bind_rdev_to_array(rdev, mddev);
4850 if (err) { 4851 if (err) {
@@ -4914,7 +4915,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
4914 else 4915 else
4915 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4916 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4916 4917
4917 rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); 4918 rdev->sectors = calc_num_sectors(rdev, mddev->chunk_sectors << 9);
4918 4919
4919 if (test_bit(Faulty, &rdev->flags)) { 4920 if (test_bit(Faulty, &rdev->flags)) {
4920 printk(KERN_WARNING 4921 printk(KERN_WARNING
@@ -5063,7 +5064,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5063 mddev->external = 0; 5064 mddev->external = 0;
5064 5065
5065 mddev->layout = info->layout; 5066 mddev->layout = info->layout;
5066 mddev->chunk_size = info->chunk_size; 5067 mddev->chunk_sectors = info->chunk_size >> 9;
5067 5068
5068 mddev->max_disks = MD_SB_DISKS; 5069 mddev->max_disks = MD_SB_DISKS;
5069 5070
@@ -5082,7 +5083,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5082 get_random_bytes(mddev->uuid, 16); 5083 get_random_bytes(mddev->uuid, 16);
5083 5084
5084 mddev->new_level = mddev->level; 5085 mddev->new_level = mddev->level;
5085 mddev->new_chunk = mddev->chunk_size; 5086 mddev->new_chunk = mddev->chunk_sectors << 9;
5086 mddev->new_layout = mddev->layout; 5087 mddev->new_layout = mddev->layout;
5087 mddev->delta_disks = 0; 5088 mddev->delta_disks = 0;
5088 5089
@@ -5192,7 +5193,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5192 mddev->level != info->level || 5193 mddev->level != info->level ||
5193/* mddev->layout != info->layout || */ 5194/* mddev->layout != info->layout || */
5194 !mddev->persistent != info->not_persistent|| 5195 !mddev->persistent != info->not_persistent||
5195 mddev->chunk_size != info->chunk_size || 5196 mddev->chunk_sectors != info->chunk_size >> 9 ||
5196 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5197 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5197 ((state^info->state) & 0xfffffe00) 5198 ((state^info->state) & 0xfffffe00)
5198 ) 5199 )
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8227ab909d44..5d78830043d0 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -145,7 +145,7 @@ struct mddev_s
145 int external; /* metadata is 145 int external; /* metadata is
146 * managed externally */ 146 * managed externally */
147 char metadata_type[17]; /* externally set*/ 147 char metadata_type[17]; /* externally set*/
148 int chunk_size; 148 int chunk_sectors;
149 time_t ctime, utime; 149 time_t ctime, utime;
150 int level, layout; 150 int level, layout;
151 char clevel[16]; 151 char clevel[16];
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 7cd2671cc794..f20b18ff7969 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -238,10 +238,10 @@ static int create_strip_zones(mddev_t *mddev)
238 * now since we have the hard sector sizes, we can make sure 238 * now since we have the hard sector sizes, we can make sure
239 * chunk size is a multiple of that sector size 239 * chunk size is a multiple of that sector size
240 */ 240 */
241 if (mddev->chunk_size % queue_logical_block_size(mddev->queue)) { 241 if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
242 printk(KERN_ERR "%s chunk_size of %d not valid\n", 242 printk(KERN_ERR "%s chunk_size of %d not valid\n",
243 mdname(mddev), 243 mdname(mddev),
244 mddev->chunk_size); 244 mddev->chunk_sectors << 9);
245 goto abort; 245 goto abort;
246 } 246 }
247 printk(KERN_INFO "raid0: done.\n"); 247 printk(KERN_INFO "raid0: done.\n");
@@ -270,10 +270,10 @@ static int raid0_mergeable_bvec(struct request_queue *q,
270 mddev_t *mddev = q->queuedata; 270 mddev_t *mddev = q->queuedata;
271 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 271 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
272 int max; 272 int max;
273 unsigned int chunk_sectors = mddev->chunk_size >> 9; 273 unsigned int chunk_sectors = mddev->chunk_sectors;
274 unsigned int bio_sectors = bvm->bi_size >> 9; 274 unsigned int bio_sectors = bvm->bi_size >> 9;
275 275
276 if (is_power_of_2(mddev->chunk_size)) 276 if (is_power_of_2(mddev->chunk_sectors))
277 max = (chunk_sectors - ((sector & (chunk_sectors-1)) 277 max = (chunk_sectors - ((sector & (chunk_sectors-1))
278 + bio_sectors)) << 9; 278 + bio_sectors)) << 9;
279 else 279 else
@@ -304,11 +304,11 @@ static int raid0_run(mddev_t *mddev)
304{ 304{
305 int ret; 305 int ret;
306 306
307 if (mddev->chunk_size == 0) { 307 if (mddev->chunk_sectors == 0) {
308 printk(KERN_ERR "md/raid0: chunk size must be set.\n"); 308 printk(KERN_ERR "md/raid0: chunk size must be set.\n");
309 return -EINVAL; 309 return -EINVAL;
310 } 310 }
311 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); 311 blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
312 mddev->queue->queue_lock = &mddev->queue->__queue_lock; 312 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
313 313
314 ret = create_strip_zones(mddev); 314 ret = create_strip_zones(mddev);
@@ -330,7 +330,8 @@ static int raid0_run(mddev_t *mddev)
330 * chunksize should be used in that case. 330 * chunksize should be used in that case.
331 */ 331 */
332 { 332 {
333 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; 333 int stripe = mddev->raid_disks *
334 (mddev->chunk_sectors << 9) / PAGE_SIZE;
334 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 335 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
335 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 336 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
336 } 337 }
@@ -381,9 +382,9 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
381 unsigned int sect_in_chunk; 382 unsigned int sect_in_chunk;
382 sector_t chunk; 383 sector_t chunk;
383 raid0_conf_t *conf = mddev->private; 384 raid0_conf_t *conf = mddev->private;
384 unsigned int chunk_sects = mddev->chunk_size >> 9; 385 unsigned int chunk_sects = mddev->chunk_sectors;
385 386
386 if (is_power_of_2(mddev->chunk_size)) { 387 if (is_power_of_2(mddev->chunk_sectors)) {
387 int chunksect_bits = ffz(~chunk_sects); 388 int chunksect_bits = ffz(~chunk_sects);
388 /* find the sector offset inside the chunk */ 389 /* find the sector offset inside the chunk */
389 sect_in_chunk = sector & (chunk_sects - 1); 390 sect_in_chunk = sector & (chunk_sects - 1);
@@ -413,7 +414,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
413static inline int is_io_in_chunk_boundary(mddev_t *mddev, 414static inline int is_io_in_chunk_boundary(mddev_t *mddev,
414 unsigned int chunk_sects, struct bio *bio) 415 unsigned int chunk_sects, struct bio *bio)
415{ 416{
416 if (likely(is_power_of_2(mddev->chunk_size))) { 417 if (likely(is_power_of_2(mddev->chunk_sectors))) {
417 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) 418 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
418 + (bio->bi_size >> 9)); 419 + (bio->bi_size >> 9));
419 } else{ 420 } else{
@@ -444,7 +445,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
444 bio_sectors(bio)); 445 bio_sectors(bio));
445 part_stat_unlock(); 446 part_stat_unlock();
446 447
447 chunk_sects = mddev->chunk_size >> 9; 448 chunk_sects = mddev->chunk_sectors;
448 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { 449 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
449 sector_t sector = bio->bi_sector; 450 sector_t sector = bio->bi_sector;
450 struct bio_pair *bp; 451 struct bio_pair *bp;
@@ -455,7 +456,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
455 /* This is a one page bio that upper layers 456 /* This is a one page bio that upper layers
456 * refuse to split for us, so we need to split it. 457 * refuse to split for us, so we need to split it.
457 */ 458 */
458 if (likely(is_power_of_2(mddev->chunk_size))) 459 if (likely(is_power_of_2(mddev->chunk_sectors)))
459 bp = bio_split(bio, chunk_sects - (sector & 460 bp = bio_split(bio, chunk_sects - (sector &
460 (chunk_sects-1))); 461 (chunk_sects-1)));
461 else 462 else
@@ -519,7 +520,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
519 zone_start = conf->strip_zone[j].zone_end; 520 zone_start = conf->strip_zone[j].zone_end;
520 } 521 }
521#endif 522#endif
522 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); 523 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
523 return; 524 return;
524} 525}
525 526
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5ea5bca53a5e..388635735ae5 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2161,10 +2161,10 @@ static int raid1_reshape(mddev_t *mddev)
2161 int d, d2, err; 2161 int d, d2, err;
2162 2162
2163 /* Cannot change chunk_size, layout, or level */ 2163 /* Cannot change chunk_size, layout, or level */
2164 if (mddev->chunk_size != mddev->new_chunk || 2164 if (mddev->chunk_sectors << 9 != mddev->new_chunk ||
2165 mddev->layout != mddev->new_layout || 2165 mddev->layout != mddev->new_layout ||
2166 mddev->level != mddev->new_level) { 2166 mddev->level != mddev->new_level) {
2167 mddev->new_chunk = mddev->chunk_size; 2167 mddev->new_chunk = mddev->chunk_sectors << 9;
2168 mddev->new_layout = mddev->layout; 2168 mddev->new_layout = mddev->layout;
2169 mddev->new_level = mddev->level; 2169 mddev->new_level = mddev->level;
2170 return -EINVAL; 2170 return -EINVAL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 06bef686f91b..30029a312cf5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
461 mddev_t *mddev = q->queuedata; 461 mddev_t *mddev = q->queuedata;
462 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 462 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
463 int max; 463 int max;
464 unsigned int chunk_sectors = mddev->chunk_size >> 9; 464 unsigned int chunk_sectors = mddev->chunk_sectors;
465 unsigned int bio_sectors = bvm->bi_size >> 9; 465 unsigned int bio_sectors = bvm->bi_size >> 9;
466 466
467 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 467 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -985,7 +985,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
985 int i; 985 int i;
986 986
987 if (conf->near_copies < conf->raid_disks) 987 if (conf->near_copies < conf->raid_disks)
988 seq_printf(seq, " %dK chunks", mddev->chunk_size/1024); 988 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
989 if (conf->near_copies > 1) 989 if (conf->near_copies > 1)
990 seq_printf(seq, " %d near-copies", conf->near_copies); 990 seq_printf(seq, " %d near-copies", conf->near_copies);
991 if (conf->far_copies > 1) { 991 if (conf->far_copies > 1) {
@@ -2050,8 +2050,8 @@ static int run(mddev_t *mddev)
2050 int nc, fc, fo; 2050 int nc, fc, fo;
2051 sector_t stride, size; 2051 sector_t stride, size;
2052 2052
2053 if (mddev->chunk_size < PAGE_SIZE || 2053 if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
2054 !is_power_of_2(mddev->chunk_size)) { 2054 !is_power_of_2(mddev->chunk_sectors)) {
2055 printk(KERN_ERR "md/raid10: chunk size must be " 2055 printk(KERN_ERR "md/raid10: chunk size must be "
2056 "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE); 2056 "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
2057 return -EINVAL; 2057 return -EINVAL;
@@ -2096,8 +2096,8 @@ static int run(mddev_t *mddev)
2096 conf->far_copies = fc; 2096 conf->far_copies = fc;
2097 conf->copies = nc*fc; 2097 conf->copies = nc*fc;
2098 conf->far_offset = fo; 2098 conf->far_offset = fo;
2099 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; 2099 conf->chunk_mask = mddev->chunk_sectors - 1;
2100 conf->chunk_shift = ffz(~mddev->chunk_size) - 9; 2100 conf->chunk_shift = ffz(~mddev->chunk_sectors);
2101 size = mddev->dev_sectors >> conf->chunk_shift; 2101 size = mddev->dev_sectors >> conf->chunk_shift;
2102 sector_div(size, fc); 2102 sector_div(size, fc);
2103 size = size * conf->raid_disks; 2103 size = size * conf->raid_disks;
@@ -2205,7 +2205,8 @@ static int run(mddev_t *mddev)
2205 * maybe... 2205 * maybe...
2206 */ 2206 */
2207 { 2207 {
2208 int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE); 2208 int stripe = conf->raid_disks *
2209 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
2209 stripe /= conf->near_copies; 2210 stripe /= conf->near_copies;
2210 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 2211 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2211 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 2212 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index be4e62f611bc..1e4fd5e8bfdd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3352,13 +3352,13 @@ static int raid5_mergeable_bvec(struct request_queue *q,
3352 mddev_t *mddev = q->queuedata; 3352 mddev_t *mddev = q->queuedata;
3353 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3353 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3354 int max; 3354 int max;
3355 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3355 unsigned int chunk_sectors = mddev->chunk_sectors;
3356 unsigned int bio_sectors = bvm->bi_size >> 9; 3356 unsigned int bio_sectors = bvm->bi_size >> 9;
3357 3357
3358 if ((bvm->bi_rw & 1) == WRITE) 3358 if ((bvm->bi_rw & 1) == WRITE)
3359 return biovec->bv_len; /* always allow writes to be mergeable */ 3359 return biovec->bv_len; /* always allow writes to be mergeable */
3360 3360
3361 if (mddev->new_chunk < mddev->chunk_size) 3361 if (mddev->new_chunk < mddev->chunk_sectors << 9)
3362 chunk_sectors = mddev->new_chunk >> 9; 3362 chunk_sectors = mddev->new_chunk >> 9;
3363 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3363 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3364 if (max < 0) max = 0; 3364 if (max < 0) max = 0;
@@ -3372,10 +3372,10 @@ static int raid5_mergeable_bvec(struct request_queue *q,
3372static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3372static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3373{ 3373{
3374 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3374 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3375 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3375 unsigned int chunk_sectors = mddev->chunk_sectors;
3376 unsigned int bio_sectors = bio->bi_size >> 9; 3376 unsigned int bio_sectors = bio->bi_size >> 9;
3377 3377
3378 if (mddev->new_chunk < mddev->chunk_size) 3378 if (mddev->new_chunk < mddev->chunk_sectors << 9)
3379 chunk_sectors = mddev->new_chunk >> 9; 3379 chunk_sectors = mddev->new_chunk >> 9;
3380 return chunk_sectors >= 3380 return chunk_sectors >=
3381 ((sector & (chunk_sectors - 1)) + bio_sectors); 3381 ((sector & (chunk_sectors - 1)) + bio_sectors);
@@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3791 * If old and new chunk sizes differ, we need to process the 3791 * If old and new chunk sizes differ, we need to process the
3792 * largest of these 3792 * largest of these
3793 */ 3793 */
3794 if (mddev->new_chunk > mddev->chunk_size) 3794 if (mddev->new_chunk > mddev->chunk_sectors << 9)
3795 reshape_sectors = mddev->new_chunk / 512; 3795 reshape_sectors = mddev->new_chunk / 512;
3796 else 3796 else
3797 reshape_sectors = mddev->chunk_size / 512; 3797 reshape_sectors = mddev->chunk_sectors;
3798 3798
3799 /* we update the metadata when there is more than 3Meg 3799 /* we update the metadata when there is more than 3Meg
3800 * in the block range (that is rather arbitrary, should 3800 * in the block range (that is rather arbitrary, should
@@ -4303,7 +4303,7 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4303 raid_disks = conf->previous_raid_disks; 4303 raid_disks = conf->previous_raid_disks;
4304 } 4304 }
4305 4305
4306 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4306 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4307 sectors &= ~((sector_t)mddev->new_chunk/512 - 1); 4307 sectors &= ~((sector_t)mddev->new_chunk/512 - 1);
4308 return sectors * (raid_disks - conf->max_degraded); 4308 return sectors * (raid_disks - conf->max_degraded);
4309} 4309}
@@ -4412,7 +4412,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4412 conf->max_nr_stripes = NR_STRIPES; 4412 conf->max_nr_stripes = NR_STRIPES;
4413 conf->reshape_progress = mddev->reshape_position; 4413 conf->reshape_progress = mddev->reshape_position;
4414 if (conf->reshape_progress != MaxSector) { 4414 if (conf->reshape_progress != MaxSector) {
4415 conf->prev_chunk = mddev->chunk_size; 4415 conf->prev_chunk = mddev->chunk_sectors << 9;
4416 conf->prev_algo = mddev->layout; 4416 conf->prev_algo = mddev->layout;
4417 } 4417 }
4418 4418
@@ -4484,7 +4484,7 @@ static int run(mddev_t *mddev)
4484 } 4484 }
4485 /* here_new is the stripe we will write to */ 4485 /* here_new is the stripe we will write to */
4486 here_old = mddev->reshape_position; 4486 here_old = mddev->reshape_position;
4487 sector_div(here_old, (mddev->chunk_size>>9)* 4487 sector_div(here_old, mddev->chunk_sectors *
4488 (old_disks-max_degraded)); 4488 (old_disks-max_degraded));
4489 /* here_old is the first stripe that we might need to read 4489 /* here_old is the first stripe that we might need to read
4490 * from */ 4490 * from */
@@ -4499,7 +4499,7 @@ static int run(mddev_t *mddev)
4499 } else { 4499 } else {
4500 BUG_ON(mddev->level != mddev->new_level); 4500 BUG_ON(mddev->level != mddev->new_level);
4501 BUG_ON(mddev->layout != mddev->new_layout); 4501 BUG_ON(mddev->layout != mddev->new_layout);
4502 BUG_ON(mddev->chunk_size != mddev->new_chunk); 4502 BUG_ON(mddev->chunk_sectors << 9 != mddev->new_chunk);
4503 BUG_ON(mddev->delta_disks != 0); 4503 BUG_ON(mddev->delta_disks != 0);
4504 } 4504 }
4505 4505
@@ -4533,7 +4533,7 @@ static int run(mddev_t *mddev)
4533 } 4533 }
4534 4534
4535 /* device size must be a multiple of chunk size */ 4535 /* device size must be a multiple of chunk size */
4536 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); 4536 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4537 mddev->resync_max_sectors = mddev->dev_sectors; 4537 mddev->resync_max_sectors = mddev->dev_sectors;
4538 4538
4539 if (mddev->degraded > 0 && 4539 if (mddev->degraded > 0 &&
@@ -4582,7 +4582,7 @@ static int run(mddev_t *mddev)
4582 { 4582 {
4583 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4583 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4584 int stripe = data_disks * 4584 int stripe = data_disks *
4585 (mddev->chunk_size / PAGE_SIZE); 4585 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
4586 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4586 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4587 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4587 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4588 } 4588 }
@@ -4679,7 +4679,8 @@ static void status(struct seq_file *seq, mddev_t *mddev)
4679 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4679 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4680 int i; 4680 int i;
4681 4681
4682 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4682 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
4683 mddev->chunk_sectors / 2, mddev->layout);
4683 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4684 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
4684 for (i = 0; i < conf->raid_disks; i++) 4685 for (i = 0; i < conf->raid_disks; i++)
4685 seq_printf (seq, "%s", 4686 seq_printf (seq, "%s",
@@ -4827,7 +4828,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
4827 * any io in the removed space completes, but it hardly seems 4828 * any io in the removed space completes, but it hardly seems
4828 * worth it. 4829 * worth it.
4829 */ 4830 */
4830 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4831 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4831 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 4832 md_set_array_sectors(mddev, raid5_size(mddev, sectors,
4832 mddev->raid_disks)); 4833 mddev->raid_disks));
4833 if (mddev->array_sectors > 4834 if (mddev->array_sectors >
@@ -4850,7 +4851,7 @@ static int raid5_check_reshape(mddev_t *mddev)
4850 4851
4851 if (mddev->delta_disks == 0 && 4852 if (mddev->delta_disks == 0 &&
4852 mddev->new_layout == mddev->layout && 4853 mddev->new_layout == mddev->layout &&
4853 mddev->new_chunk == mddev->chunk_size) 4854 mddev->new_chunk == mddev->chunk_sectors << 9)
4854 return -EINVAL; /* nothing to do */ 4855 return -EINVAL; /* nothing to do */
4855 if (mddev->bitmap) 4856 if (mddev->bitmap)
4856 /* Cannot grow a bitmap yet */ 4857 /* Cannot grow a bitmap yet */
@@ -4878,10 +4879,11 @@ static int raid5_check_reshape(mddev_t *mddev)
4878 * If the chunk size is greater, user-space should request more 4879 * If the chunk size is greater, user-space should request more
4879 * stripe_heads first. 4880 * stripe_heads first.
4880 */ 4881 */
4881 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4882 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
4883 > conf->max_nr_stripes ||
4882 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4884 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
4883 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4885 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
4884 (max(mddev->chunk_size, mddev->new_chunk) 4886 (max(mddev->chunk_sectors << 9, mddev->new_chunk)
4885 / STRIPE_SIZE)*4); 4887 / STRIPE_SIZE)*4);
4886 return -ENOSPC; 4888 return -ENOSPC;
4887 } 4889 }
@@ -5054,7 +5056,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
5054 raid5_remove_disk(mddev, d); 5056 raid5_remove_disk(mddev, d);
5055 } 5057 }
5056 mddev->layout = conf->algorithm; 5058 mddev->layout = conf->algorithm;
5057 mddev->chunk_size = conf->chunk_size; 5059 mddev->chunk_sectors = conf->chunk_size >> 9;
5058 mddev->reshape_position = MaxSector; 5060 mddev->reshape_position = MaxSector;
5059 mddev->delta_disks = 0; 5061 mddev->delta_disks = 0;
5060 } 5062 }
@@ -5183,7 +5185,8 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5183 } 5185 }
5184 if (new_chunk > 0) { 5186 if (new_chunk > 0) {
5185 conf->chunk_size = new_chunk; 5187 conf->chunk_size = new_chunk;
5186 mddev->chunk_size = mddev->new_chunk = new_chunk; 5188 mddev->new_chunk = new_chunk;
5189 mddev->chunk_sectors = new_chunk >> 9;
5187 } 5190 }
5188 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5191 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5189 md_wakeup_thread(mddev->thread); 5192 md_wakeup_thread(mddev->thread);