aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-exception-store.c9
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c74
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c28
11 files changed, 104 insertions, 63 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9933eb861c71..529e2ba505c3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -776,7 +776,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
776 * But don't wait if split was due to the io size restriction 776 * But don't wait if split was due to the io size restriction
777 */ 777 */
778 if (unlikely(out_of_pages)) 778 if (unlikely(out_of_pages))
779 congestion_wait(WRITE, HZ/100); 779 congestion_wait(BLK_RW_ASYNC, HZ/100);
780 780
781 /* 781 /*
782 * With async crypto it is unsafe to share the crypto context 782 * With async crypto it is unsafe to share the crypto context
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index c3ae51584b12..3710ff88fc10 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
195 struct dm_exception_store **store) 195 struct dm_exception_store **store)
196{ 196{
197 int r = 0; 197 int r = 0;
198 struct dm_exception_store_type *type; 198 struct dm_exception_store_type *type = NULL;
199 struct dm_exception_store *tmp_store; 199 struct dm_exception_store *tmp_store;
200 char persistent; 200 char persistent;
201 201
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
211 } 211 }
212 212
213 persistent = toupper(*argv[1]); 213 persistent = toupper(*argv[1]);
214 if (persistent != 'P' && persistent != 'N') { 214 if (persistent == 'P')
215 type = get_type("P");
216 else if (persistent == 'N')
217 type = get_type("N");
218 else {
215 ti->error = "Persistent flag is not P or N"; 219 ti->error = "Persistent flag is not P or N";
216 return -EINVAL; 220 return -EINVAL;
217 } 221 }
218 222
219 type = get_type(&persistent);
220 if (!type) { 223 if (!type) {
221 ti->error = "Exception store type not recognised"; 224 ti->error = "Exception store type not recognised";
222 r = -EINVAL; 225 r = -EINVAL;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4899ebe767c8..2cba557d9e61 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -495,7 +495,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
495 return 0; 495 return 0;
496 } 496 }
497 497
498 if (blk_stack_limits(limits, &q->limits, start) < 0) 498 if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
499 DMWARN("%s: target device %s is misaligned", 499 DMWARN("%s: target device %s is misaligned",
500 dm_device_name(ti->table->md), bdevname(bdev, b)); 500 dm_device_name(ti->table->md), bdevname(bdev, b));
501 501
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c6d4ee8921d..9acd54a5cffb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1017,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1017 clone->bi_flags |= 1 << BIO_CLONED; 1017 clone->bi_flags |= 1 << BIO_CLONED;
1018 1018
1019 if (bio_integrity(bio)) { 1019 if (bio_integrity(bio)) {
1020 bio_integrity_clone(clone, bio, GFP_NOIO); 1020 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1021 bio_integrity_trim(clone, 1021 bio_integrity_trim(clone,
1022 bio_sector_offset(bio, idx, offset), len); 1022 bio_sector_offset(bio, idx, offset), len);
1023 } 1023 }
@@ -1045,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1045 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 1045 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1046 1046
1047 if (bio_integrity(bio)) { 1047 if (bio_integrity(bio)) {
1048 bio_integrity_clone(clone, bio, GFP_NOIO); 1048 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1049 1049
1050 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1050 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1051 bio_integrity_trim(clone, 1051 bio_integrity_trim(clone,
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 15c8b7b25a9b..5810fa906af0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
166 rdev->sectors = sectors * mddev->chunk_sectors; 166 rdev->sectors = sectors * mddev->chunk_sectors;
167 } 167 }
168 168
169 blk_queue_stack_limits(mddev->queue, 169 disk_stack_limits(mddev->gendisk, rdev->bdev,
170 rdev->bdev->bd_disk->queue); 170 rdev->data_offset << 9);
171 /* as we don't honour merge_bvec_fn, we must never risk 171 /* as we don't honour merge_bvec_fn, we must never risk
172 * violating it, so limit ->max_sector to one PAGE, as 172 * violating it, so limit ->max_sector to one PAGE, as
173 * a one page request is never in violation. 173 * a one page request is never in violation.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 09be637d52cb..d4351ff0849f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1756,9 +1756,10 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1756 __u8 *uuid; 1756 __u8 *uuid;
1757 1757
1758 uuid = sb->set_uuid; 1758 uuid = sb->set_uuid;
1759 printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" 1759 printk(KERN_INFO
1760 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" 1760 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1761 KERN_INFO "md: Name: \"%s\" CT:%llu\n", 1761 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1762 "md: Name: \"%s\" CT:%llu\n",
1762 le32_to_cpu(sb->major_version), 1763 le32_to_cpu(sb->major_version),
1763 le32_to_cpu(sb->feature_map), 1764 le32_to_cpu(sb->feature_map),
1764 uuid[0], uuid[1], uuid[2], uuid[3], 1765 uuid[0], uuid[1], uuid[2], uuid[3],
@@ -1770,12 +1771,13 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1770 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 1771 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1771 1772
1772 uuid = sb->device_uuid; 1773 uuid = sb->device_uuid;
1773 printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 1774 printk(KERN_INFO
1775 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1774 " RO:%llu\n" 1776 " RO:%llu\n"
1775 KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" 1777 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1776 ":%02x%02x%02x%02x%02x%02x\n" 1778 ":%02x%02x%02x%02x%02x%02x\n"
1777 KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 1779 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1778 KERN_INFO "md: (MaxDev:%u) \n", 1780 "md: (MaxDev:%u) \n",
1779 le32_to_cpu(sb->level), 1781 le32_to_cpu(sb->level),
1780 (unsigned long long)le64_to_cpu(sb->size), 1782 (unsigned long long)le64_to_cpu(sb->size),
1781 le32_to_cpu(sb->raid_disks), 1783 le32_to_cpu(sb->raid_disks),
@@ -3573,7 +3575,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3573 char *e; 3575 char *e;
3574 unsigned long long new = simple_strtoull(buf, &e, 10); 3576 unsigned long long new = simple_strtoull(buf, &e, 10);
3575 3577
3576 if (mddev->pers->quiesce == NULL) 3578 if (mddev->pers == NULL ||
3579 mddev->pers->quiesce == NULL)
3577 return -EINVAL; 3580 return -EINVAL;
3578 if (buf == e || (*e && *e != '\n')) 3581 if (buf == e || (*e && *e != '\n'))
3579 return -EINVAL; 3582 return -EINVAL;
@@ -3601,7 +3604,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3601 char *e; 3604 char *e;
3602 unsigned long long new = simple_strtoull(buf, &e, 10); 3605 unsigned long long new = simple_strtoull(buf, &e, 10);
3603 3606
3604 if (mddev->pers->quiesce == NULL) 3607 if (mddev->pers == NULL ||
3608 mddev->pers->quiesce == NULL)
3605 return -EINVAL; 3609 return -EINVAL;
3606 if (buf == e || (*e && *e != '\n')) 3610 if (buf == e || (*e && *e != '\n'))
3607 return -EINVAL; 3611 return -EINVAL;
@@ -3844,11 +3848,9 @@ static int md_alloc(dev_t dev, char *name)
3844 flush_scheduled_work(); 3848 flush_scheduled_work();
3845 3849
3846 mutex_lock(&disks_mutex); 3850 mutex_lock(&disks_mutex);
3847 if (mddev->gendisk) { 3851 error = -EEXIST;
3848 mutex_unlock(&disks_mutex); 3852 if (mddev->gendisk)
3849 mddev_put(mddev); 3853 goto abort;
3850 return -EEXIST;
3851 }
3852 3854
3853 if (name) { 3855 if (name) {
3854 /* Need to ensure that 'name' is not a duplicate. 3856 /* Need to ensure that 'name' is not a duplicate.
@@ -3860,17 +3862,15 @@ static int md_alloc(dev_t dev, char *name)
3860 if (mddev2->gendisk && 3862 if (mddev2->gendisk &&
3861 strcmp(mddev2->gendisk->disk_name, name) == 0) { 3863 strcmp(mddev2->gendisk->disk_name, name) == 0) {
3862 spin_unlock(&all_mddevs_lock); 3864 spin_unlock(&all_mddevs_lock);
3863 return -EEXIST; 3865 goto abort;
3864 } 3866 }
3865 spin_unlock(&all_mddevs_lock); 3867 spin_unlock(&all_mddevs_lock);
3866 } 3868 }
3867 3869
3870 error = -ENOMEM;
3868 mddev->queue = blk_alloc_queue(GFP_KERNEL); 3871 mddev->queue = blk_alloc_queue(GFP_KERNEL);
3869 if (!mddev->queue) { 3872 if (!mddev->queue)
3870 mutex_unlock(&disks_mutex); 3873 goto abort;
3871 mddev_put(mddev);
3872 return -ENOMEM;
3873 }
3874 mddev->queue->queuedata = mddev; 3874 mddev->queue->queuedata = mddev;
3875 3875
3876 /* Can be unlocked because the queue is new: no concurrency */ 3876 /* Can be unlocked because the queue is new: no concurrency */
@@ -3880,11 +3880,9 @@ static int md_alloc(dev_t dev, char *name)
3880 3880
3881 disk = alloc_disk(1 << shift); 3881 disk = alloc_disk(1 << shift);
3882 if (!disk) { 3882 if (!disk) {
3883 mutex_unlock(&disks_mutex);
3884 blk_cleanup_queue(mddev->queue); 3883 blk_cleanup_queue(mddev->queue);
3885 mddev->queue = NULL; 3884 mddev->queue = NULL;
3886 mddev_put(mddev); 3885 goto abort;
3887 return -ENOMEM;
3888 } 3886 }
3889 disk->major = MAJOR(mddev->unit); 3887 disk->major = MAJOR(mddev->unit);
3890 disk->first_minor = unit << shift; 3888 disk->first_minor = unit << shift;
@@ -3906,16 +3904,22 @@ static int md_alloc(dev_t dev, char *name)
3906 mddev->gendisk = disk; 3904 mddev->gendisk = disk;
3907 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 3905 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3908 &disk_to_dev(disk)->kobj, "%s", "md"); 3906 &disk_to_dev(disk)->kobj, "%s", "md");
3909 mutex_unlock(&disks_mutex); 3907 if (error) {
3910 if (error) 3908 /* This isn't possible, but as kobject_init_and_add is marked
3909 * __must_check, we must do something with the result
3910 */
3911 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3911 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3912 disk->disk_name); 3912 disk->disk_name);
3913 else { 3913 error = 0;
3914 }
3915 abort:
3916 mutex_unlock(&disks_mutex);
3917 if (!error) {
3914 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3918 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3915 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); 3919 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3916 } 3920 }
3917 mddev_put(mddev); 3921 mddev_put(mddev);
3918 return 0; 3922 return error;
3919} 3923}
3920 3924
3921static struct kobject *md_probe(dev_t dev, int *part, void *data) 3925static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -6334,10 +6338,16 @@ void md_do_sync(mddev_t *mddev)
6334 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6338 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6335 } 6339 }
6336 6340
6337 if (j >= mddev->resync_max) 6341 while (j >= mddev->resync_max && !kthread_should_stop()) {
6338 wait_event(mddev->recovery_wait, 6342 /* As this condition is controlled by user-space,
6339 mddev->resync_max > j 6343 * we can block indefinitely, so use '_interruptible'
6340 || kthread_should_stop()); 6344 * to avoid triggering warnings.
6345 */
6346 flush_signals(current); /* just in case */
6347 wait_event_interruptible(mddev->recovery_wait,
6348 mddev->resync_max > j
6349 || kthread_should_stop());
6350 }
6341 6351
6342 if (kthread_should_stop()) 6352 if (kthread_should_stop())
6343 goto interrupted; 6353 goto interrupted;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index cbe368fa6598..237fe3fd235c 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
294 for (path = first; path <= last; path++) 294 for (path = first; path <= last; path++)
295 if ((p=conf->multipaths+path)->rdev == NULL) { 295 if ((p=conf->multipaths+path)->rdev == NULL) {
296 q = rdev->bdev->bd_disk->queue; 296 q = rdev->bdev->bd_disk->queue;
297 blk_queue_stack_limits(mddev->queue, q); 297 disk_stack_limits(mddev->gendisk, rdev->bdev,
298 rdev->data_offset << 9);
298 299
299 /* as we don't honour merge_bvec_fn, we must never risk 300 /* as we don't honour merge_bvec_fn, we must never risk
300 * violating it, so limit ->max_sector to one PAGE, as 301 * violating it, so limit ->max_sector to one PAGE, as
@@ -463,9 +464,9 @@ static int multipath_run (mddev_t *mddev)
463 464
464 disk = conf->multipaths + disk_idx; 465 disk = conf->multipaths + disk_idx;
465 disk->rdev = rdev; 466 disk->rdev = rdev;
467 disk_stack_limits(mddev->gendisk, rdev->bdev,
468 rdev->data_offset << 9);
466 469
467 blk_queue_stack_limits(mddev->queue,
468 rdev->bdev->bd_disk->queue);
469 /* as we don't honour merge_bvec_fn, we must never risk 470 /* as we don't honour merge_bvec_fn, we must never risk
470 * violating it, not that we ever expect a device with 471 * violating it, not that we ever expect a device with
471 * a merge_bvec_fn to be involved in multipath */ 472 * a merge_bvec_fn to be involved in multipath */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ab4a489d8695..335f490dcad6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
170 } 170 }
171 dev[j] = rdev1; 171 dev[j] = rdev1;
172 172
173 blk_queue_stack_limits(mddev->queue, 173 disk_stack_limits(mddev->gendisk, rdev1->bdev,
174 rdev1->bdev->bd_disk->queue); 174 rdev1->data_offset << 9);
175 /* as we don't honour merge_bvec_fn, we must never risk 175 /* as we don't honour merge_bvec_fn, we must never risk
176 * violating it, so limit ->max_sector to one PAGE, as 176 * violating it, so limit ->max_sector to one PAGE, as
177 * a one page request is never in violation. 177 * a one page request is never in violation.
@@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev)
250 mddev->chunk_sectors << 9); 250 mddev->chunk_sectors << 9);
251 goto abort; 251 goto abort;
252 } 252 }
253
254 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
255 blk_queue_io_opt(mddev->queue,
256 (mddev->chunk_sectors << 9) * mddev->raid_disks);
257
253 printk(KERN_INFO "raid0: done.\n"); 258 printk(KERN_INFO "raid0: done.\n");
254 mddev->private = conf; 259 mddev->private = conf;
255 return 0; 260 return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 89939a7aef57..0569efba0c02 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1123 for (mirror = first; mirror <= last; mirror++) 1123 for (mirror = first; mirror <= last; mirror++)
1124 if ( !(p=conf->mirrors+mirror)->rdev) { 1124 if ( !(p=conf->mirrors+mirror)->rdev) {
1125 1125
1126 blk_queue_stack_limits(mddev->queue, 1126 disk_stack_limits(mddev->gendisk, rdev->bdev,
1127 rdev->bdev->bd_disk->queue); 1127 rdev->data_offset << 9);
1128 /* as we don't honour merge_bvec_fn, we must never risk 1128 /* as we don't honour merge_bvec_fn, we must never risk
1129 * violating it, so limit ->max_sector to one PAGE, as 1129 * violating it, so limit ->max_sector to one PAGE, as
1130 * a one page request is never in violation. 1130 * a one page request is never in violation.
@@ -1988,9 +1988,8 @@ static int run(mddev_t *mddev)
1988 disk = conf->mirrors + disk_idx; 1988 disk = conf->mirrors + disk_idx;
1989 1989
1990 disk->rdev = rdev; 1990 disk->rdev = rdev;
1991 1991 disk_stack_limits(mddev->gendisk, rdev->bdev,
1992 blk_queue_stack_limits(mddev->queue, 1992 rdev->data_offset << 9);
1993 rdev->bdev->bd_disk->queue);
1994 /* as we don't honour merge_bvec_fn, we must never risk 1993 /* as we don't honour merge_bvec_fn, we must never risk
1995 * violating it, so limit ->max_sector to one PAGE, as 1994 * violating it, so limit ->max_sector to one PAGE, as
1996 * a one page request is never in violation. 1995 * a one page request is never in violation.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10c..7298a5e5a183 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1151 for ( ; mirror <= last ; mirror++) 1151 for ( ; mirror <= last ; mirror++)
1152 if ( !(p=conf->mirrors+mirror)->rdev) { 1152 if ( !(p=conf->mirrors+mirror)->rdev) {
1153 1153
1154 blk_queue_stack_limits(mddev->queue, 1154 disk_stack_limits(mddev->gendisk, rdev->bdev,
1155 rdev->bdev->bd_disk->queue); 1155 rdev->data_offset << 9);
1156 /* as we don't honour merge_bvec_fn, we must never risk 1156 /* as we don't honour merge_bvec_fn, we must never risk
1157 * violating it, so limit ->max_sector to one PAGE, as 1157 * violating it, so limit ->max_sector to one PAGE, as
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2044static int run(mddev_t *mddev) 2044static int run(mddev_t *mddev)
2045{ 2045{
2046 conf_t *conf; 2046 conf_t *conf;
2047 int i, disk_idx; 2047 int i, disk_idx, chunk_size;
2048 mirror_info_t *disk; 2048 mirror_info_t *disk;
2049 mdk_rdev_t *rdev; 2049 mdk_rdev_t *rdev;
2050 int nc, fc, fo; 2050 int nc, fc, fo;
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev)
2130 spin_lock_init(&conf->device_lock); 2130 spin_lock_init(&conf->device_lock);
2131 mddev->queue->queue_lock = &conf->device_lock; 2131 mddev->queue->queue_lock = &conf->device_lock;
2132 2132
2133 chunk_size = mddev->chunk_sectors << 9;
2134 blk_queue_io_min(mddev->queue, chunk_size);
2135 if (conf->raid_disks % conf->near_copies)
2136 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2137 else
2138 blk_queue_io_opt(mddev->queue, chunk_size *
2139 (conf->raid_disks / conf->near_copies));
2140
2133 list_for_each_entry(rdev, &mddev->disks, same_set) { 2141 list_for_each_entry(rdev, &mddev->disks, same_set) {
2134 disk_idx = rdev->raid_disk; 2142 disk_idx = rdev->raid_disk;
2135 if (disk_idx >= mddev->raid_disks 2143 if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev)
2138 disk = conf->mirrors + disk_idx; 2146 disk = conf->mirrors + disk_idx;
2139 2147
2140 disk->rdev = rdev; 2148 disk->rdev = rdev;
2141 2149 disk_stack_limits(mddev->gendisk, rdev->bdev,
2142 blk_queue_stack_limits(mddev->queue, 2150 rdev->data_offset << 9);
2143 rdev->bdev->bd_disk->queue);
2144 /* as we don't honour merge_bvec_fn, we must never risk 2151 /* as we don't honour merge_bvec_fn, we must never risk
2145 * violating it, so limit ->max_sector to one PAGE, as 2152 * violating it, so limit ->max_sector to one PAGE, as
2146 * a one page request is never in violation. 2153 * a one page request is never in violation.
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f9f991e6e138..37835538b58e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3699,13 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi)
3699 goto retry; 3699 goto retry;
3700 } 3700 }
3701 } 3701 }
3702 /* FIXME what if we get a false positive because these 3702
3703 * are being updated. 3703 if (bio_data_dir(bi) == WRITE &&
3704 */ 3704 logical_sector >= mddev->suspend_lo &&
3705 if (logical_sector >= mddev->suspend_lo &&
3706 logical_sector < mddev->suspend_hi) { 3705 logical_sector < mddev->suspend_hi) {
3707 release_stripe(sh); 3706 release_stripe(sh);
3708 schedule(); 3707 /* As the suspend_* range is controlled by
3708 * userspace, we want an interruptible
3709 * wait.
3710 */
3711 flush_signals(current);
3712 prepare_to_wait(&conf->wait_for_overlap,
3713 &w, TASK_INTERRUPTIBLE);
3714 if (logical_sector >= mddev->suspend_lo &&
3715 logical_sector < mddev->suspend_hi)
3716 schedule();
3709 goto retry; 3717 goto retry;
3710 } 3718 }
3711 3719
@@ -4452,7 +4460,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4452static int run(mddev_t *mddev) 4460static int run(mddev_t *mddev)
4453{ 4461{
4454 raid5_conf_t *conf; 4462 raid5_conf_t *conf;
4455 int working_disks = 0; 4463 int working_disks = 0, chunk_size;
4456 mdk_rdev_t *rdev; 4464 mdk_rdev_t *rdev;
4457 4465
4458 if (mddev->recovery_cp != MaxSector) 4466 if (mddev->recovery_cp != MaxSector)
@@ -4607,6 +4615,14 @@ static int run(mddev_t *mddev)
4607 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4615 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
4608 4616
4609 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4617 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4618 chunk_size = mddev->chunk_sectors << 9;
4619 blk_queue_io_min(mddev->queue, chunk_size);
4620 blk_queue_io_opt(mddev->queue, chunk_size *
4621 (conf->raid_disks - conf->max_degraded));
4622
4623 list_for_each_entry(rdev, &mddev->disks, same_set)
4624 disk_stack_limits(mddev->gendisk, rdev->bdev,
4625 rdev->data_offset << 9);
4610 4626
4611 return 0; 4627 return 0;
4612abort: 4628abort: