aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndre Noll <maan@systemlinux.org>2009-03-30 23:33:13 -0400
committerNeilBrown <neilb@suse.de>2009-03-30 23:33:13 -0400
commit58c0fed400603a802968b23ddf78f029c5a84e41 (patch)
tree474fcb9775bb07f39ebb7802fb9b51d69222dcbb
parent575a80fa4f623141e9791e41879d87800fb6d862 (diff)
md: Make mddev->size sector-based.
This patch renames the "size" field of struct mddev_s to "dev_sectors" and stores the number of 512-byte sectors instead of the number of 1K-blocks in it. All users of that field, including raid levels 1,4-6,10, are adjusted accordingly. This simplifies the code a bit because it allows to get rid of a couple of divisions/multiplications by two. In order to make checkpatch happy, some minor coding style issues have also been addressed. In particular, size_store() now uses strict_strtoull() instead of simple_strtoull(). Signed-off-by: Andre Noll <maan@systemlinux.org> Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/md.c97
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid1.c10
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c24
8 files changed, 78 insertions, 68 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5d64da990804..f8a9f7ab2cb8 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -298,7 +298,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
298 + size/512 > 0) 298 + size/512 > 0)
299 /* bitmap runs in to metadata */ 299 /* bitmap runs in to metadata */
300 goto bad_alignment; 300 goto bad_alignment;
301 if (rdev->data_offset + mddev->size*2 301 if (rdev->data_offset + mddev->dev_sectors
302 > rdev->sb_start + bitmap->offset) 302 > rdev->sb_start + bitmap->offset)
303 /* data runs in to bitmap */ 303 /* data runs in to bitmap */
304 goto bad_alignment; 304 goto bad_alignment;
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 7b66b9fca29d..18793c137278 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -301,7 +301,7 @@ static int run(mddev_t *mddev)
301 list_for_each_entry(rdev, &mddev->disks, same_set) 301 list_for_each_entry(rdev, &mddev->disks, same_set)
302 conf->rdev = rdev; 302 conf->rdev = rdev;
303 303
304 mddev->array_sectors = mddev->size * 2; 304 mddev->array_sectors = mddev->dev_sectors;
305 mddev->private = conf; 305 mddev->private = conf;
306 306
307 reconfig(mddev, mddev->layout, -1); 307 reconfig(mddev, mddev->layout, -1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b2c00ce602b1..be4a131e8c01 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -816,7 +816,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
816 mddev->clevel[0] = 0; 816 mddev->clevel[0] = 0;
817 mddev->layout = sb->layout; 817 mddev->layout = sb->layout;
818 mddev->raid_disks = sb->raid_disks; 818 mddev->raid_disks = sb->raid_disks;
819 mddev->size = sb->size; 819 mddev->dev_sectors = sb->size * 2;
820 mddev->events = ev1; 820 mddev->events = ev1;
821 mddev->bitmap_offset = 0; 821 mddev->bitmap_offset = 0;
822 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 822 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
@@ -930,7 +930,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
930 930
931 sb->ctime = mddev->ctime; 931 sb->ctime = mddev->ctime;
932 sb->level = mddev->level; 932 sb->level = mddev->level;
933 sb->size = mddev->size; 933 sb->size = mddev->dev_sectors / 2;
934 sb->raid_disks = mddev->raid_disks; 934 sb->raid_disks = mddev->raid_disks;
935 sb->md_minor = mddev->md_minor; 935 sb->md_minor = mddev->md_minor;
936 sb->not_persistent = 0; 936 sb->not_persistent = 0;
@@ -1028,7 +1028,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1028static unsigned long long 1028static unsigned long long
1029super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1029super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1030{ 1030{
1031 if (num_sectors && num_sectors < rdev->mddev->size * 2) 1031 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1032 return 0; /* component must fit device */ 1032 return 0; /* component must fit device */
1033 if (rdev->mddev->bitmap_offset) 1033 if (rdev->mddev->bitmap_offset)
1034 return 0; /* can't move bitmap */ 1034 return 0; /* can't move bitmap */
@@ -1220,7 +1220,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1220 mddev->clevel[0] = 0; 1220 mddev->clevel[0] = 0;
1221 mddev->layout = le32_to_cpu(sb->layout); 1221 mddev->layout = le32_to_cpu(sb->layout);
1222 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1222 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1223 mddev->size = le64_to_cpu(sb->size)/2; 1223 mddev->dev_sectors = le64_to_cpu(sb->size);
1224 mddev->events = ev1; 1224 mddev->events = ev1;
1225 mddev->bitmap_offset = 0; 1225 mddev->bitmap_offset = 0;
1226 mddev->default_bitmap_offset = 1024 >> 9; 1226 mddev->default_bitmap_offset = 1024 >> 9;
@@ -1316,7 +1316,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1316 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1316 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1317 1317
1318 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1318 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1319 sb->size = cpu_to_le64(mddev->size<<1); 1319 sb->size = cpu_to_le64(mddev->dev_sectors);
1320 1320
1321 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1321 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1322 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1322 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
@@ -1374,7 +1374,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1374{ 1374{
1375 struct mdp_superblock_1 *sb; 1375 struct mdp_superblock_1 *sb;
1376 sector_t max_sectors; 1376 sector_t max_sectors;
1377 if (num_sectors && num_sectors < rdev->mddev->size * 2) 1377 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1378 return 0; /* component must fit device */ 1378 return 0; /* component must fit device */
1379 if (rdev->sb_start < rdev->data_offset) { 1379 if (rdev->sb_start < rdev->data_offset) {
1380 /* minor versions 1 and 2; superblock before data */ 1380 /* minor versions 1 and 2; superblock before data */
@@ -1490,8 +1490,9 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1490 if (find_rdev(mddev, rdev->bdev->bd_dev)) 1490 if (find_rdev(mddev, rdev->bdev->bd_dev))
1491 return -EEXIST; 1491 return -EEXIST;
1492 1492
1493 /* make sure rdev->size exceeds mddev->size */ 1493 /* make sure rdev->size exceeds mddev->dev_sectors / 2 */
1494 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { 1494 if (rdev->size && (mddev->dev_sectors == 0 ||
1495 rdev->size < mddev->dev_sectors / 2)) {
1495 if (mddev->pers) { 1496 if (mddev->pers) {
1496 /* Cannot change size, so fail 1497 /* Cannot change size, so fail
1497 * If mddev->level <= 0, then we don't care 1498 * If mddev->level <= 0, then we don't care
@@ -1500,7 +1501,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1500 if (mddev->level > 0) 1501 if (mddev->level > 0)
1501 return -ENOSPC; 1502 return -ENOSPC;
1502 } else 1503 } else
1503 mddev->size = rdev->size; 1504 mddev->dev_sectors = rdev->size * 2;
1504 } 1505 }
1505 1506
1506 /* Verify rdev->desc_nr is unique. 1507 /* Verify rdev->desc_nr is unique.
@@ -2243,7 +2244,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2243 size -= rdev->data_offset/2; 2244 size -= rdev->data_offset/2;
2244 } 2245 }
2245 } 2246 }
2246 if (size < my_mddev->size) 2247 if (size < my_mddev->dev_sectors / 2)
2247 return -EINVAL; /* component must fit device */ 2248 return -EINVAL; /* component must fit device */
2248 2249
2249 rdev->size = size; 2250 rdev->size = size;
@@ -2809,7 +2810,7 @@ array_state_show(mddev_t *mddev, char *page)
2809 else { 2810 else {
2810 if (list_empty(&mddev->disks) && 2811 if (list_empty(&mddev->disks) &&
2811 mddev->raid_disks == 0 && 2812 mddev->raid_disks == 0 &&
2812 mddev->size == 0) 2813 mddev->dev_sectors == 0)
2813 st = clear; 2814 st = clear;
2814 else 2815 else
2815 st = inactive; 2816 st = inactive;
@@ -3016,7 +3017,8 @@ __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3016static ssize_t 3017static ssize_t
3017size_show(mddev_t *mddev, char *page) 3018size_show(mddev_t *mddev, char *page)
3018{ 3019{
3019 return sprintf(page, "%llu\n", (unsigned long long)mddev->size); 3020 return sprintf(page, "%llu\n",
3021 (unsigned long long)mddev->dev_sectors / 2);
3020} 3022}
3021 3023
3022static int update_size(mddev_t *mddev, sector_t num_sectors); 3024static int update_size(mddev_t *mddev, sector_t num_sectors);
@@ -3028,20 +3030,19 @@ size_store(mddev_t *mddev, const char *buf, size_t len)
3028 * not increase it (except from 0). 3030 * not increase it (except from 0).
3029 * If array is active, we can try an on-line resize 3031 * If array is active, we can try an on-line resize
3030 */ 3032 */
3031 char *e; 3033 unsigned long long sectors;
3032 int err = 0; 3034 int err = strict_strtoull(buf, 10, &sectors);
3033 unsigned long long size = simple_strtoull(buf, &e, 10);
3034 if (!*buf || *buf == '\n' ||
3035 (*e && *e != '\n'))
3036 return -EINVAL;
3037 3035
3036 if (err < 0)
3037 return err;
3038 sectors *= 2;
3038 if (mddev->pers) { 3039 if (mddev->pers) {
3039 err = update_size(mddev, size * 2); 3040 err = update_size(mddev, sectors);
3040 md_update_sb(mddev, 1); 3041 md_update_sb(mddev, 1);
3041 } else { 3042 } else {
3042 if (mddev->size == 0 || 3043 if (mddev->dev_sectors == 0 ||
3043 mddev->size > size) 3044 mddev->dev_sectors > sectors)
3044 mddev->size = size; 3045 mddev->dev_sectors = sectors;
3045 else 3046 else
3046 err = -ENOSPC; 3047 err = -ENOSPC;
3047 } 3048 }
@@ -3306,15 +3307,15 @@ static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3306static ssize_t 3307static ssize_t
3307sync_completed_show(mddev_t *mddev, char *page) 3308sync_completed_show(mddev_t *mddev, char *page)
3308{ 3309{
3309 unsigned long max_blocks, resync; 3310 unsigned long max_sectors, resync;
3310 3311
3311 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3312 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3312 max_blocks = mddev->resync_max_sectors; 3313 max_sectors = mddev->resync_max_sectors;
3313 else 3314 else
3314 max_blocks = mddev->size << 1; 3315 max_sectors = mddev->dev_sectors;
3315 3316
3316 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 3317 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
3317 return sprintf(page, "%lu / %lu\n", resync, max_blocks); 3318 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3318} 3319}
3319 3320
3320static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3321static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
@@ -3789,11 +3790,11 @@ static int do_md_run(mddev_t * mddev)
3789 3790
3790 /* perform some consistency tests on the device. 3791 /* perform some consistency tests on the device.
3791 * We don't want the data to overlap the metadata, 3792 * We don't want the data to overlap the metadata,
3792 * Internal Bitmap issues has handled elsewhere. 3793 * Internal Bitmap issues have been handled elsewhere.
3793 */ 3794 */
3794 if (rdev->data_offset < rdev->sb_start) { 3795 if (rdev->data_offset < rdev->sb_start) {
3795 if (mddev->size && 3796 if (mddev->dev_sectors &&
3796 rdev->data_offset + mddev->size*2 3797 rdev->data_offset + mddev->dev_sectors
3797 > rdev->sb_start) { 3798 > rdev->sb_start) {
3798 printk("md: %s: data overlaps metadata\n", 3799 printk("md: %s: data overlaps metadata\n",
3799 mdname(mddev)); 3800 mdname(mddev));
@@ -3875,7 +3876,9 @@ static int do_md_run(mddev_t * mddev)
3875 } 3876 }
3876 3877
3877 mddev->recovery = 0; 3878 mddev->recovery = 0;
3878 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 3879 /* may be over-ridden by personality */
3880 mddev->resync_max_sectors = mddev->dev_sectors;
3881
3879 mddev->barriers_work = 1; 3882 mddev->barriers_work = 1;
3880 mddev->ok_start_degraded = start_dirty_degraded; 3883 mddev->ok_start_degraded = start_dirty_degraded;
3881 3884
@@ -4131,7 +4134,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4131 export_array(mddev); 4134 export_array(mddev);
4132 4135
4133 mddev->array_sectors = 0; 4136 mddev->array_sectors = 0;
4134 mddev->size = 0; 4137 mddev->dev_sectors = 0;
4135 mddev->raid_disks = 0; 4138 mddev->raid_disks = 0;
4136 mddev->recovery_cp = 0; 4139 mddev->recovery_cp = 0;
4137 mddev->resync_min = 0; 4140 mddev->resync_min = 0;
@@ -4337,8 +4340,8 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
4337 info.patch_version = MD_PATCHLEVEL_VERSION; 4340 info.patch_version = MD_PATCHLEVEL_VERSION;
4338 info.ctime = mddev->ctime; 4341 info.ctime = mddev->ctime;
4339 info.level = mddev->level; 4342 info.level = mddev->level;
4340 info.size = mddev->size; 4343 info.size = mddev->dev_sectors / 2;
4341 if (info.size != mddev->size) /* overflow */ 4344 if (info.size != mddev->dev_sectors / 2) /* overflow */
4342 info.size = -1; 4345 info.size = -1;
4343 info.nr_disks = nr; 4346 info.nr_disks = nr;
4344 info.raid_disks = mddev->raid_disks; 4347 info.raid_disks = mddev->raid_disks;
@@ -4788,7 +4791,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4788 4791
4789 mddev->level = info->level; 4792 mddev->level = info->level;
4790 mddev->clevel[0] = 0; 4793 mddev->clevel[0] = 0;
4791 mddev->size = info->size; 4794 mddev->dev_sectors = 2 * (sector_t)info->size;
4792 mddev->raid_disks = info->raid_disks; 4795 mddev->raid_disks = info->raid_disks;
4793 /* don't set md_minor, it is determined by which /dev/md* was 4796 /* don't set md_minor, it is determined by which /dev/md* was
4794 * openned 4797 * openned
@@ -4926,12 +4929,18 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4926 ) 4929 )
4927 return -EINVAL; 4930 return -EINVAL;
4928 /* Check there is only one change */ 4931 /* Check there is only one change */
4929 if (info->size >= 0 && mddev->size != info->size) cnt++; 4932 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
4930 if (mddev->raid_disks != info->raid_disks) cnt++; 4933 cnt++;
4931 if (mddev->layout != info->layout) cnt++; 4934 if (mddev->raid_disks != info->raid_disks)
4932 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; 4935 cnt++;
4933 if (cnt == 0) return 0; 4936 if (mddev->layout != info->layout)
4934 if (cnt > 1) return -EINVAL; 4937 cnt++;
4938 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
4939 cnt++;
4940 if (cnt == 0)
4941 return 0;
4942 if (cnt > 1)
4943 return -EINVAL;
4935 4944
4936 if (mddev->layout != info->layout) { 4945 if (mddev->layout != info->layout) {
4937 /* Change layout 4946 /* Change layout
@@ -4943,7 +4952,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4943 else 4952 else
4944 return mddev->pers->reconfig(mddev, info->layout, -1); 4953 return mddev->pers->reconfig(mddev, info->layout, -1);
4945 } 4954 }
4946 if (info->size >= 0 && mddev->size != info->size) 4955 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
4947 rv = update_size(mddev, (sector_t)info->size * 2); 4956 rv = update_size(mddev, (sector_t)info->size * 2);
4948 4957
4949 if (mddev->raid_disks != info->raid_disks) 4958 if (mddev->raid_disks != info->raid_disks)
@@ -5443,7 +5452,7 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
5443 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 5452 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5444 max_blocks = mddev->resync_max_sectors >> 1; 5453 max_blocks = mddev->resync_max_sectors >> 1;
5445 else 5454 else
5446 max_blocks = mddev->size; 5455 max_blocks = mddev->dev_sectors / 2;
5447 5456
5448 /* 5457 /*
5449 * Should not happen. 5458 * Should not happen.
@@ -6019,10 +6028,10 @@ void md_do_sync(mddev_t *mddev)
6019 j = mddev->recovery_cp; 6028 j = mddev->recovery_cp;
6020 6029
6021 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6030 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6022 max_sectors = mddev->size << 1; 6031 max_sectors = mddev->dev_sectors;
6023 else { 6032 else {
6024 /* recovery follows the physical size of devices */ 6033 /* recovery follows the physical size of devices */
6025 max_sectors = mddev->size << 1; 6034 max_sectors = mddev->dev_sectors;
6026 j = MaxSector; 6035 j = MaxSector;
6027 list_for_each_entry(rdev, &mddev->disks, same_set) 6036 list_for_each_entry(rdev, &mddev->disks, same_set)
6028 if (rdev->raid_disk >= 0 && 6037 if (rdev->raid_disk >= 0 &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index bede26c9d4a9..946121236235 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -142,7 +142,8 @@ struct mddev_s
142 char clevel[16]; 142 char clevel[16];
143 int raid_disks; 143 int raid_disks;
144 int max_disks; 144 int max_disks;
145 sector_t size; /* used size of component devices */ 145 sector_t dev_sectors; /* used size of
146 * component devices */
146 sector_t array_sectors; /* exported array size */ 147 sector_t array_sectors; /* exported array size */
147 __u64 events; 148 __u64 events;
148 149
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 0ed1005afb58..87accf74e4b4 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -502,7 +502,7 @@ static int multipath_run (mddev_t *mddev)
502 /* 502 /*
503 * Ok, everything is just fine now 503 * Ok, everything is just fine now
504 */ 504 */
505 mddev->array_sectors = mddev->size * 2; 505 mddev->array_sectors = mddev->dev_sectors;
506 506
507 mddev->queue->unplug_fn = multipath_unplug; 507 mddev->queue->unplug_fn = multipath_unplug;
508 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 508 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 051ecfa61514..779958705abf 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1726,7 +1726,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1726 return 0; 1726 return 0;
1727 } 1727 }
1728 1728
1729 max_sector = mddev->size << 1; 1729 max_sector = mddev->dev_sectors;
1730 if (sector_nr >= max_sector) { 1730 if (sector_nr >= max_sector) {
1731 /* If we aborted, we need to abort the 1731 /* If we aborted, we need to abort the
1732 * sync on the 'current' bitmap chunk (there will 1732 * sync on the 'current' bitmap chunk (there will
@@ -2051,7 +2051,7 @@ static int run(mddev_t *mddev)
2051 /* 2051 /*
2052 * Ok, everything is just fine now 2052 * Ok, everything is just fine now
2053 */ 2053 */
2054 mddev->array_sectors = mddev->size * 2; 2054 mddev->array_sectors = mddev->dev_sectors;
2055 2055
2056 mddev->queue->unplug_fn = raid1_unplug; 2056 mddev->queue->unplug_fn = raid1_unplug;
2057 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2057 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
@@ -2116,12 +2116,12 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
2116 mddev->array_sectors = sectors; 2116 mddev->array_sectors = sectors;
2117 set_capacity(mddev->gendisk, mddev->array_sectors); 2117 set_capacity(mddev->gendisk, mddev->array_sectors);
2118 mddev->changed = 1; 2118 mddev->changed = 1;
2119 if (mddev->array_sectors / 2 > mddev->size && 2119 if (mddev->array_sectors > mddev->dev_sectors &&
2120 mddev->recovery_cp == MaxSector) { 2120 mddev->recovery_cp == MaxSector) {
2121 mddev->recovery_cp = mddev->size << 1; 2121 mddev->recovery_cp = mddev->dev_sectors;
2122 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2122 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2123 } 2123 }
2124 mddev->size = mddev->array_sectors / 2; 2124 mddev->dev_sectors = mddev->array_sectors;
2125 mddev->resync_max_sectors = sectors; 2125 mddev->resync_max_sectors = sectors;
2126 return 0; 2126 return 0;
2127} 2127}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index fea61e3dcd95..d56cb2ae515f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1698,7 +1698,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1698 return 0; 1698 return 0;
1699 1699
1700 skipped: 1700 skipped:
1701 max_sector = mddev->size << 1; 1701 max_sector = mddev->dev_sectors;
1702 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 1702 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1703 max_sector = mddev->resync_max_sectors; 1703 max_sector = mddev->resync_max_sectors;
1704 if (sector_nr >= max_sector) { 1704 if (sector_nr >= max_sector) {
@@ -2079,7 +2079,7 @@ static int run(mddev_t *mddev)
2079 conf->far_offset = fo; 2079 conf->far_offset = fo;
2080 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; 2080 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
2081 conf->chunk_shift = ffz(~mddev->chunk_size) - 9; 2081 conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
2082 size = mddev->size >> (conf->chunk_shift-1); 2082 size = mddev->dev_sectors >> conf->chunk_shift;
2083 sector_div(size, fc); 2083 sector_div(size, fc);
2084 size = size * conf->raid_disks; 2084 size = size * conf->raid_disks;
2085 sector_div(size, nc); 2085 sector_div(size, nc);
@@ -2092,7 +2092,7 @@ static int run(mddev_t *mddev)
2092 */ 2092 */
2093 stride += conf->raid_disks - 1; 2093 stride += conf->raid_disks - 1;
2094 sector_div(stride, conf->raid_disks); 2094 sector_div(stride, conf->raid_disks);
2095 mddev->size = stride << (conf->chunk_shift-1); 2095 mddev->dev_sectors = stride << conf->chunk_shift;
2096 2096
2097 if (fo) 2097 if (fo)
2098 stride = 1; 2098 stride = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 849478e9afdc..4d7142376e58 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3629,8 +3629,8 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3629 *(new_data_disks) -1, 3629 *(new_data_disks) -1,
3630 raid_disks, data_disks, 3630 raid_disks, data_disks,
3631 &dd_idx, &pd_idx, conf); 3631 &dd_idx, &pd_idx, conf);
3632 if (last_sector >= (mddev->size<<1)) 3632 if (last_sector >= mddev->dev_sectors)
3633 last_sector = (mddev->size<<1)-1; 3633 last_sector = mddev->dev_sectors - 1;
3634 while (first_sector <= last_sector) { 3634 while (first_sector <= last_sector) {
3635 pd_idx = stripe_to_pdidx(first_sector, conf, 3635 pd_idx = stripe_to_pdidx(first_sector, conf,
3636 conf->previous_raid_disks); 3636 conf->previous_raid_disks);
@@ -3670,7 +3670,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3670 struct stripe_head *sh; 3670 struct stripe_head *sh;
3671 int pd_idx; 3671 int pd_idx;
3672 int raid_disks = conf->raid_disks; 3672 int raid_disks = conf->raid_disks;
3673 sector_t max_sector = mddev->size << 1; 3673 sector_t max_sector = mddev->dev_sectors;
3674 int sync_blocks; 3674 int sync_blocks;
3675 int still_degraded = 0; 3675 int still_degraded = 0;
3676 int i; 3676 int i;
@@ -3708,7 +3708,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3708 */ 3708 */
3709 if (mddev->degraded >= conf->max_degraded && 3709 if (mddev->degraded >= conf->max_degraded &&
3710 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3710 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3711 sector_t rv = (mddev->size << 1) - sector_nr; 3711 sector_t rv = mddev->dev_sectors - sector_nr;
3712 *skipped = 1; 3712 *skipped = 1;
3713 return rv; 3713 return rv;
3714 } 3714 }
@@ -4146,8 +4146,8 @@ static int run(mddev_t *mddev)
4146 conf->expand_progress = mddev->reshape_position; 4146 conf->expand_progress = mddev->reshape_position;
4147 4147
4148 /* device size must be a multiple of chunk size */ 4148 /* device size must be a multiple of chunk size */
4149 mddev->size &= ~(mddev->chunk_size/1024 -1); 4149 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1);
4150 mddev->resync_max_sectors = mddev->size << 1; 4150 mddev->resync_max_sectors = mddev->dev_sectors;
4151 4151
4152 if (conf->level == 6 && conf->raid_disks < 4) { 4152 if (conf->level == 6 && conf->raid_disks < 4) {
4153 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4153 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
@@ -4254,8 +4254,8 @@ static int run(mddev_t *mddev)
4254 mddev->queue->backing_dev_info.congested_data = mddev; 4254 mddev->queue->backing_dev_info.congested_data = mddev;
4255 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4255 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4256 4256
4257 mddev->array_sectors = 2 * mddev->size * (conf->previous_raid_disks - 4257 mddev->array_sectors = mddev->dev_sectors *
4258 conf->max_degraded); 4258 (conf->previous_raid_disks - conf->max_degraded);
4259 4259
4260 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4260 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4261 4261
@@ -4482,11 +4482,11 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
4482 - conf->max_degraded); 4482 - conf->max_degraded);
4483 set_capacity(mddev->gendisk, mddev->array_sectors); 4483 set_capacity(mddev->gendisk, mddev->array_sectors);
4484 mddev->changed = 1; 4484 mddev->changed = 1;
4485 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 4485 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
4486 mddev->recovery_cp = mddev->size << 1; 4486 mddev->recovery_cp = mddev->dev_sectors;
4487 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4487 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4488 } 4488 }
4489 mddev->size = sectors /2; 4489 mddev->dev_sectors = sectors;
4490 mddev->resync_max_sectors = sectors; 4490 mddev->resync_max_sectors = sectors;
4491 return 0; 4491 return 0;
4492} 4492}
@@ -4615,7 +4615,7 @@ static void end_reshape(raid5_conf_t *conf)
4615 struct block_device *bdev; 4615 struct block_device *bdev;
4616 4616
4617 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4617 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
4618 conf->mddev->array_sectors = 2 * conf->mddev->size * 4618 conf->mddev->array_sectors = conf->mddev->dev_sectors *
4619 (conf->raid_disks - conf->max_degraded); 4619 (conf->raid_disks - conf->max_degraded);
4620 set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors); 4620 set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors);
4621 conf->mddev->changed = 1; 4621 conf->mddev->changed = 1;