aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10c..3d9020cf6f6e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1151 for ( ; mirror <= last ; mirror++) 1151 for ( ; mirror <= last ; mirror++)
1152 if ( !(p=conf->mirrors+mirror)->rdev) { 1152 if ( !(p=conf->mirrors+mirror)->rdev) {
1153 1153
1154 blk_queue_stack_limits(mddev->queue, 1154 disk_stack_limits(mddev->gendisk, rdev->bdev,
1155 rdev->bdev->bd_disk->queue); 1155 rdev->data_offset << 9);
1156 /* as we don't honour merge_bvec_fn, we must never risk 1156 /* as we don't honour merge_bvec_fn, we must never risk
1157 * violating it, so limit ->max_sector to one PAGE, as 1157 * violating it, so limit ->max_sector to one PAGE, as
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
@@ -1170,6 +1170,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1170 break; 1170 break;
1171 } 1171 }
1172 1172
1173 md_integrity_add_rdev(rdev, mddev);
1173 print_conf(conf); 1174 print_conf(conf);
1174 return err; 1175 return err;
1175} 1176}
@@ -1203,7 +1204,9 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
1203 /* lost the race, try later */ 1204 /* lost the race, try later */
1204 err = -EBUSY; 1205 err = -EBUSY;
1205 p->rdev = rdev; 1206 p->rdev = rdev;
1207 goto abort;
1206 } 1208 }
1209 md_integrity_register(mddev);
1207 } 1210 }
1208abort: 1211abort:
1209 1212
@@ -2044,7 +2047,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2044static int run(mddev_t *mddev) 2047static int run(mddev_t *mddev)
2045{ 2048{
2046 conf_t *conf; 2049 conf_t *conf;
2047 int i, disk_idx; 2050 int i, disk_idx, chunk_size;
2048 mirror_info_t *disk; 2051 mirror_info_t *disk;
2049 mdk_rdev_t *rdev; 2052 mdk_rdev_t *rdev;
2050 int nc, fc, fo; 2053 int nc, fc, fo;
@@ -2130,6 +2133,14 @@ static int run(mddev_t *mddev)
2130 spin_lock_init(&conf->device_lock); 2133 spin_lock_init(&conf->device_lock);
2131 mddev->queue->queue_lock = &conf->device_lock; 2134 mddev->queue->queue_lock = &conf->device_lock;
2132 2135
2136 chunk_size = mddev->chunk_sectors << 9;
2137 blk_queue_io_min(mddev->queue, chunk_size);
2138 if (conf->raid_disks % conf->near_copies)
2139 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2140 else
2141 blk_queue_io_opt(mddev->queue, chunk_size *
2142 (conf->raid_disks / conf->near_copies));
2143
2133 list_for_each_entry(rdev, &mddev->disks, same_set) { 2144 list_for_each_entry(rdev, &mddev->disks, same_set) {
2134 disk_idx = rdev->raid_disk; 2145 disk_idx = rdev->raid_disk;
2135 if (disk_idx >= mddev->raid_disks 2146 if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2149,8 @@ static int run(mddev_t *mddev)
2138 disk = conf->mirrors + disk_idx; 2149 disk = conf->mirrors + disk_idx;
2139 2150
2140 disk->rdev = rdev; 2151 disk->rdev = rdev;
2141 2152 disk_stack_limits(mddev->gendisk, rdev->bdev,
2142 blk_queue_stack_limits(mddev->queue, 2153 rdev->data_offset << 9);
2143 rdev->bdev->bd_disk->queue);
2144 /* as we don't honour merge_bvec_fn, we must never risk 2154 /* as we don't honour merge_bvec_fn, we must never risk
2145 * violating it, so limit ->max_sector to one PAGE, as 2155 * violating it, so limit ->max_sector to one PAGE, as
2146 * a one page request is never in violation. 2156 * a one page request is never in violation.
@@ -2218,6 +2228,7 @@ static int run(mddev_t *mddev)
2218 2228
2219 if (conf->near_copies < mddev->raid_disks) 2229 if (conf->near_copies < mddev->raid_disks)
2220 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2230 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2231 md_integrity_register(mddev);
2221 return 0; 2232 return 0;
2222 2233
2223out_free_conf: 2234out_free_conf: