aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c78
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c42
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5.c61
8 files changed, 144 insertions, 62 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..0ed7f6bc2a7f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
216 216
217 if (md_check_no_bitmap(mddev)) 217 if (md_check_no_bitmap(mddev))
218 return -EINVAL; 218 return -EINVAL;
219 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
220 conf = linear_conf(mddev, mddev->raid_disks); 219 conf = linear_conf(mddev, mddev->raid_disks);
221 220
222 if (!conf) 221 if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b76cfc89e1b5..818313e277e7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -287,6 +287,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
287 mddev_t *mddev = q->queuedata; 287 mddev_t *mddev = q->queuedata;
288 int rv; 288 int rv;
289 int cpu; 289 int cpu;
290 unsigned int sectors;
290 291
291 if (mddev == NULL || mddev->pers == NULL 292 if (mddev == NULL || mddev->pers == NULL
292 || !mddev->ready) { 293 || !mddev->ready) {
@@ -311,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
311 atomic_inc(&mddev->active_io); 312 atomic_inc(&mddev->active_io);
312 rcu_read_unlock(); 313 rcu_read_unlock();
313 314
315 /*
316 * save the sectors now since our bio can
317 * go away inside make_request
318 */
319 sectors = bio_sectors(bio);
314 rv = mddev->pers->make_request(mddev, bio); 320 rv = mddev->pers->make_request(mddev, bio);
315 321
316 cpu = part_stat_lock(); 322 cpu = part_stat_lock();
317 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 323 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
318 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 324 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
319 bio_sectors(bio));
320 part_stat_unlock(); 325 part_stat_unlock();
321 326
322 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 327 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -548,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
548{ 553{
549 mddev_t *mddev, *new = NULL; 554 mddev_t *mddev, *new = NULL;
550 555
556 if (unit && MAJOR(unit) != MD_MAJOR)
557 unit &= ~((1<<MdpMinorShift)-1);
558
551 retry: 559 retry:
552 spin_lock(&all_mddevs_lock); 560 spin_lock(&all_mddevs_lock);
553 561
@@ -1947,8 +1955,6 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1947 __bdevname(dev, b)); 1955 __bdevname(dev, b));
1948 return PTR_ERR(bdev); 1956 return PTR_ERR(bdev);
1949 } 1957 }
1950 if (!shared)
1951 set_bit(AllReserved, &rdev->flags);
1952 rdev->bdev = bdev; 1958 rdev->bdev = bdev;
1953 return err; 1959 return err;
1954} 1960}
@@ -2465,6 +2471,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2465 if (rdev->raid_disk != -1) 2471 if (rdev->raid_disk != -1)
2466 return -EBUSY; 2472 return -EBUSY;
2467 2473
2474 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2475 return -EBUSY;
2476
2468 if (rdev->mddev->pers->hot_add_disk == NULL) 2477 if (rdev->mddev->pers->hot_add_disk == NULL)
2469 return -EINVAL; 2478 return -EINVAL;
2470 2479
@@ -2610,12 +2619,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2610 2619
2611 mddev_lock(mddev); 2620 mddev_lock(mddev);
2612 list_for_each_entry(rdev2, &mddev->disks, same_set) 2621 list_for_each_entry(rdev2, &mddev->disks, same_set)
2613 if (test_bit(AllReserved, &rdev2->flags) || 2622 if (rdev->bdev == rdev2->bdev &&
2614 (rdev->bdev == rdev2->bdev && 2623 rdev != rdev2 &&
2615 rdev != rdev2 && 2624 overlaps(rdev->data_offset, rdev->sectors,
2616 overlaps(rdev->data_offset, rdev->sectors, 2625 rdev2->data_offset,
2617 rdev2->data_offset, 2626 rdev2->sectors)) {
2618 rdev2->sectors))) {
2619 overlap = 1; 2627 overlap = 1;
2620 break; 2628 break;
2621 } 2629 }
@@ -4133,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
4133 } 4141 }
4134 4142
4135 mddev->array_sectors = sectors; 4143 mddev->array_sectors = sectors;
4136 set_capacity(mddev->gendisk, mddev->array_sectors); 4144 if (mddev->pers) {
4137 if (mddev->pers) 4145 set_capacity(mddev->gendisk, mddev->array_sectors);
4138 revalidate_disk(mddev->gendisk); 4146 revalidate_disk(mddev->gendisk);
4139 4147 }
4140 return len; 4148 return len;
4141} 4149}
4142 4150
@@ -4619,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
4619 } 4627 }
4620 set_capacity(mddev->gendisk, mddev->array_sectors); 4628 set_capacity(mddev->gendisk, mddev->array_sectors);
4621 revalidate_disk(mddev->gendisk); 4629 revalidate_disk(mddev->gendisk);
4630 mddev->changed = 1;
4622 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4631 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4623out: 4632out:
4624 return err; 4633 return err;
@@ -4707,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
4707 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4716 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4708 mddev->recovery = 0; 4717 mddev->recovery = 0;
4709 mddev->in_sync = 0; 4718 mddev->in_sync = 0;
4719 mddev->changed = 0;
4710 mddev->degraded = 0; 4720 mddev->degraded = 0;
4711 mddev->safemode = 0; 4721 mddev->safemode = 0;
4712 mddev->bitmap_info.offset = 0; 4722 mddev->bitmap_info.offset = 0;
@@ -4822,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4822 4832
4823 set_capacity(disk, 0); 4833 set_capacity(disk, 0);
4824 mutex_unlock(&mddev->open_mutex); 4834 mutex_unlock(&mddev->open_mutex);
4835 mddev->changed = 1;
4825 revalidate_disk(disk); 4836 revalidate_disk(disk);
4826 4837
4827 if (mddev->ro) 4838 if (mddev->ro)
@@ -5578,6 +5589,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
5578 mddev->delta_disks = raid_disks - mddev->raid_disks; 5589 mddev->delta_disks = raid_disks - mddev->raid_disks;
5579 5590
5580 rv = mddev->pers->check_reshape(mddev); 5591 rv = mddev->pers->check_reshape(mddev);
5592 if (rv < 0)
5593 mddev->delta_disks = 0;
5581 return rv; 5594 return rv;
5582} 5595}
5583 5596
@@ -6004,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
6004 atomic_inc(&mddev->openers); 6017 atomic_inc(&mddev->openers);
6005 mutex_unlock(&mddev->open_mutex); 6018 mutex_unlock(&mddev->open_mutex);
6006 6019
6007 check_disk_size_change(mddev->gendisk, bdev); 6020 check_disk_change(bdev);
6008 out: 6021 out:
6009 return err; 6022 return err;
6010} 6023}
@@ -6019,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
6019 6032
6020 return 0; 6033 return 0;
6021} 6034}
6035
6036static int md_media_changed(struct gendisk *disk)
6037{
6038 mddev_t *mddev = disk->private_data;
6039
6040 return mddev->changed;
6041}
6042
6043static int md_revalidate(struct gendisk *disk)
6044{
6045 mddev_t *mddev = disk->private_data;
6046
6047 mddev->changed = 0;
6048 return 0;
6049}
6022static const struct block_device_operations md_fops = 6050static const struct block_device_operations md_fops =
6023{ 6051{
6024 .owner = THIS_MODULE, 6052 .owner = THIS_MODULE,
@@ -6029,6 +6057,8 @@ static const struct block_device_operations md_fops =
6029 .compat_ioctl = md_compat_ioctl, 6057 .compat_ioctl = md_compat_ioctl,
6030#endif 6058#endif
6031 .getgeo = md_getgeo, 6059 .getgeo = md_getgeo,
6060 .media_changed = md_media_changed,
6061 .revalidate_disk= md_revalidate,
6032}; 6062};
6033 6063
6034static int md_thread(void * arg) 6064static int md_thread(void * arg)
@@ -6985,9 +7015,6 @@ void md_do_sync(mddev_t *mddev)
6985 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7015 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6986 mddev->resync_min = mddev->curr_resync_completed; 7016 mddev->resync_min = mddev->curr_resync_completed;
6987 mddev->curr_resync = 0; 7017 mddev->curr_resync = 0;
6988 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6989 mddev->curr_resync_completed = 0;
6990 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6991 wake_up(&resync_wait); 7018 wake_up(&resync_wait);
6992 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7019 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6993 md_wakeup_thread(mddev->thread); 7020 md_wakeup_thread(mddev->thread);
@@ -7028,7 +7055,7 @@ static int remove_and_add_spares(mddev_t *mddev)
7028 } 7055 }
7029 } 7056 }
7030 7057
7031 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 7058 if (mddev->degraded && !mddev->recovery_disabled) {
7032 list_for_each_entry(rdev, &mddev->disks, same_set) { 7059 list_for_each_entry(rdev, &mddev->disks, same_set) {
7033 if (rdev->raid_disk >= 0 && 7060 if (rdev->raid_disk >= 0 &&
7034 !test_bit(In_sync, &rdev->flags) && 7061 !test_bit(In_sync, &rdev->flags) &&
@@ -7151,7 +7178,20 @@ void md_check_recovery(mddev_t *mddev)
7151 /* Only thing we do on a ro array is remove 7178 /* Only thing we do on a ro array is remove
7152 * failed devices. 7179 * failed devices.
7153 */ 7180 */
7154 remove_and_add_spares(mddev); 7181 mdk_rdev_t *rdev;
7182 list_for_each_entry(rdev, &mddev->disks, same_set)
7183 if (rdev->raid_disk >= 0 &&
7184 !test_bit(Blocked, &rdev->flags) &&
7185 test_bit(Faulty, &rdev->flags) &&
7186 atomic_read(&rdev->nr_pending)==0) {
7187 if (mddev->pers->hot_remove_disk(
7188 mddev, rdev->raid_disk)==0) {
7189 char nm[20];
7190 sprintf(nm,"rd%d", rdev->raid_disk);
7191 sysfs_remove_link(&mddev->kobj, nm);
7192 rdev->raid_disk = -1;
7193 }
7194 }
7155 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7195 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7156 goto unlock; 7196 goto unlock;
7157 } 7197 }
diff --git a/drivers/md/md.h b/drivers/md/md.h
index eec517ced31a..12215d437fcc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -93,8 +93,6 @@ struct mdk_rdev_s
93#define Faulty 1 /* device is known to have a fault */ 93#define Faulty 1 /* device is known to have a fault */
94#define In_sync 2 /* device is in_sync with rest of array */ 94#define In_sync 2 /* device is in_sync with rest of array */
95#define WriteMostly 4 /* Avoid reading if at all possible */ 95#define WriteMostly 4 /* Avoid reading if at all possible */
96#define AllReserved 6 /* If whole device is reserved for
97 * one array */
98#define AutoDetected 7 /* added by auto-detect */ 96#define AutoDetected 7 /* added by auto-detect */
99#define Blocked 8 /* An error occured on an externally 97#define Blocked 8 /* An error occured on an externally
100 * managed array, don't allow writes 98 * managed array, don't allow writes
@@ -276,6 +274,8 @@ struct mddev_s
276 atomic_t active; /* general refcount */ 274 atomic_t active; /* general refcount */
277 atomic_t openers; /* number of active opens */ 275 atomic_t openers; /* number of active opens */
278 276
277 int changed; /* True if we might need to
278 * reread partition info */
279 int degraded; /* whether md should consider 279 int degraded; /* whether md should consider
280 * adding a spare 280 * adding a spare
281 */ 281 */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..3a62d440e27b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
435 * bookkeeping area. [whatever we allocate in multipath_run(), 435 * bookkeeping area. [whatever we allocate in multipath_run(),
436 * should be freed in multipath_stop()] 436 * should be freed in multipath_stop()]
437 */ 437 */
438 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
439 438
440 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); 439 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
441 mddev->private = conf; 440 mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a39f4c355e55..c0ac457f1218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -179,6 +179,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
179 rdev1->new_raid_disk = j; 179 rdev1->new_raid_disk = j;
180 } 180 }
181 181
182 if (mddev->level == 1) {
183 /* taiking over a raid1 array-
184 * we have only one active disk
185 */
186 j = 0;
187 rdev1->new_raid_disk = j;
188 }
189
182 if (j < 0 || j >= mddev->raid_disks) { 190 if (j < 0 || j >= mddev->raid_disks) {
183 printk(KERN_ERR "md/raid0:%s: bad disk number %d - " 191 printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
184 "aborting!\n", mdname(mddev), j); 192 "aborting!\n", mdname(mddev), j);
@@ -353,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
353 if (md_check_no_bitmap(mddev)) 361 if (md_check_no_bitmap(mddev))
354 return -EINVAL; 362 return -EINVAL;
355 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 363 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
356 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
357 364
358 /* if private is not null, we are here after takeover */ 365 /* if private is not null, we are here after takeover */
359 if (mddev->private == NULL) { 366 if (mddev->private == NULL) {
@@ -644,12 +651,39 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
644 return priv_conf; 651 return priv_conf;
645} 652}
646 653
654static void *raid0_takeover_raid1(mddev_t *mddev)
655{
656 raid0_conf_t *priv_conf;
657
658 /* Check layout:
659 * - (N - 1) mirror drives must be already faulty
660 */
661 if ((mddev->raid_disks - 1) != mddev->degraded) {
662 printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
663 mdname(mddev));
664 return ERR_PTR(-EINVAL);
665 }
666
667 /* Set new parameters */
668 mddev->new_level = 0;
669 mddev->new_layout = 0;
670 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
671 mddev->delta_disks = 1 - mddev->raid_disks;
672 mddev->raid_disks = 1;
673 /* make sure it will be not marked as dirty */
674 mddev->recovery_cp = MaxSector;
675
676 create_strip_zones(mddev, &priv_conf);
677 return priv_conf;
678}
679
647static void *raid0_takeover(mddev_t *mddev) 680static void *raid0_takeover(mddev_t *mddev)
648{ 681{
649 /* raid0 can take over: 682 /* raid0 can take over:
650 * raid4 - if all data disks are active. 683 * raid4 - if all data disks are active.
651 * raid5 - providing it is Raid4 layout and one disk is faulty 684 * raid5 - providing it is Raid4 layout and one disk is faulty
652 * raid10 - assuming we have all necessary active disks 685 * raid10 - assuming we have all necessary active disks
686 * raid1 - with (N -1) mirror drives faulty
653 */ 687 */
654 if (mddev->level == 4) 688 if (mddev->level == 4)
655 return raid0_takeover_raid45(mddev); 689 return raid0_takeover_raid45(mddev);
@@ -665,6 +699,12 @@ static void *raid0_takeover(mddev_t *mddev)
665 if (mddev->level == 10) 699 if (mddev->level == 10)
666 return raid0_takeover_raid10(mddev); 700 return raid0_takeover_raid10(mddev);
667 701
702 if (mddev->level == 1)
703 return raid0_takeover_raid1(mddev);
704
705 printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
706 mddev->level);
707
668 return ERR_PTR(-EINVAL); 708 return ERR_PTR(-EINVAL);
669} 709}
670 710
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba9..06cd712807d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
593 if (conf->pending_bio_list.head) { 593 if (conf->pending_bio_list.head) {
594 struct bio *bio; 594 struct bio *bio;
595 bio = bio_list_get(&conf->pending_bio_list); 595 bio = bio_list_get(&conf->pending_bio_list);
596 /* Only take the spinlock to quiet a warning */
597 spin_lock(conf->mddev->queue->queue_lock);
596 blk_remove_plug(conf->mddev->queue); 598 blk_remove_plug(conf->mddev->queue);
599 spin_unlock(conf->mddev->queue->queue_lock);
597 spin_unlock_irq(&conf->device_lock); 600 spin_unlock_irq(&conf->device_lock);
598 /* flush any pending bitmap writes to 601 /* flush any pending bitmap writes to
599 * disk before proceeding w/ I/O */ 602 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_inc(&r1_bio->remaining); 962 atomic_inc(&r1_bio->remaining);
960 spin_lock_irqsave(&conf->device_lock, flags); 963 spin_lock_irqsave(&conf->device_lock, flags);
961 bio_list_add(&conf->pending_bio_list, mbio); 964 bio_list_add(&conf->pending_bio_list, mbio);
962 blk_plug_device(mddev->queue); 965 blk_plug_device_unlocked(mddev->queue);
963 spin_unlock_irqrestore(&conf->device_lock, flags); 966 spin_unlock_irqrestore(&conf->device_lock, flags);
964 } 967 }
965 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); 968 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
2021 if (IS_ERR(conf)) 2024 if (IS_ERR(conf))
2022 return PTR_ERR(conf); 2025 return PTR_ERR(conf);
2023 2026
2024 mddev->queue->queue_lock = &conf->device_lock;
2025 list_for_each_entry(rdev, &mddev->disks, same_set) { 2027 list_for_each_entry(rdev, &mddev->disks, same_set) {
2026 disk_stack_limits(mddev->gendisk, rdev->bdev, 2028 disk_stack_limits(mddev->gendisk, rdev->bdev,
2027 rdev->data_offset << 9); 2029 rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 69b659544390..747d061d8e05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
662 if (conf->pending_bio_list.head) { 662 if (conf->pending_bio_list.head) {
663 struct bio *bio; 663 struct bio *bio;
664 bio = bio_list_get(&conf->pending_bio_list); 664 bio = bio_list_get(&conf->pending_bio_list);
665 /* Spinlock only taken to quiet a warning */
666 spin_lock(conf->mddev->queue->queue_lock);
665 blk_remove_plug(conf->mddev->queue); 667 blk_remove_plug(conf->mddev->queue);
668 spin_unlock(conf->mddev->queue->queue_lock);
666 spin_unlock_irq(&conf->device_lock); 669 spin_unlock_irq(&conf->device_lock);
667 /* flush any pending bitmap writes to disk 670 /* flush any pending bitmap writes to disk
668 * before proceeding w/ I/O */ 671 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
971 atomic_inc(&r10_bio->remaining); 974 atomic_inc(&r10_bio->remaining);
972 spin_lock_irqsave(&conf->device_lock, flags); 975 spin_lock_irqsave(&conf->device_lock, flags);
973 bio_list_add(&conf->pending_bio_list, mbio); 976 bio_list_add(&conf->pending_bio_list, mbio);
974 blk_plug_device(mddev->queue); 977 blk_plug_device_unlocked(mddev->queue);
975 spin_unlock_irqrestore(&conf->device_lock, flags); 978 spin_unlock_irqrestore(&conf->device_lock, flags);
976 } 979 }
977 980
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
2304 if (!conf) 2307 if (!conf)
2305 goto out; 2308 goto out;
2306 2309
2307 mddev->queue->queue_lock = &conf->device_lock;
2308
2309 mddev->thread = conf->thread; 2310 mddev->thread = conf->thread;
2310 conf->thread = NULL; 2311 conf->thread = NULL;
2311 2312
@@ -2463,11 +2464,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
2463 mddev->recovery_cp = MaxSector; 2464 mddev->recovery_cp = MaxSector;
2464 2465
2465 conf = setup_conf(mddev); 2466 conf = setup_conf(mddev);
2466 if (!IS_ERR(conf)) 2467 if (!IS_ERR(conf)) {
2467 list_for_each_entry(rdev, &mddev->disks, same_set) 2468 list_for_each_entry(rdev, &mddev->disks, same_set)
2468 if (rdev->raid_disk >= 0) 2469 if (rdev->raid_disk >= 0)
2469 rdev->new_raid_disk = rdev->raid_disk * 2; 2470 rdev->new_raid_disk = rdev->raid_disk * 2;
2470 2471 conf->barrier = 1;
2472 }
2473
2471 return conf; 2474 return conf;
2472} 2475}
2473 2476
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5044babfcda0..78536fdbd87f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
5204 5204
5205 mddev->queue->backing_dev_info.congested_data = mddev; 5205 mddev->queue->backing_dev_info.congested_data = mddev;
5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5207 mddev->queue->queue_lock = &conf->device_lock;
5208 mddev->queue->unplug_fn = raid5_unplug_queue; 5207 mddev->queue->unplug_fn = raid5_unplug_queue;
5209 5208
5210 chunk_size = mddev->chunk_sectors << 9; 5209 chunk_size = mddev->chunk_sectors << 9;
@@ -5517,7 +5516,6 @@ static int raid5_start_reshape(mddev_t *mddev)
5517 raid5_conf_t *conf = mddev->private; 5516 raid5_conf_t *conf = mddev->private;
5518 mdk_rdev_t *rdev; 5517 mdk_rdev_t *rdev;
5519 int spares = 0; 5518 int spares = 0;
5520 int added_devices = 0;
5521 unsigned long flags; 5519 unsigned long flags;
5522 5520
5523 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5521 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5527,8 +5525,8 @@ static int raid5_start_reshape(mddev_t *mddev)
5527 return -ENOSPC; 5525 return -ENOSPC;
5528 5526
5529 list_for_each_entry(rdev, &mddev->disks, same_set) 5527 list_for_each_entry(rdev, &mddev->disks, same_set)
5530 if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks) 5528 if (!test_bit(In_sync, &rdev->flags)
5531 && !test_bit(Faulty, &rdev->flags)) 5529 && !test_bit(Faulty, &rdev->flags))
5532 spares++; 5530 spares++;
5533 5531
5534 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 5532 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5571,34 +5569,35 @@ static int raid5_start_reshape(mddev_t *mddev)
5571 * to correctly record the "partially reconstructed" state of 5569 * to correctly record the "partially reconstructed" state of
5572 * such devices during the reshape and confusion could result. 5570 * such devices during the reshape and confusion could result.
5573 */ 5571 */
5574 if (mddev->delta_disks >= 0) 5572 if (mddev->delta_disks >= 0) {
5575 list_for_each_entry(rdev, &mddev->disks, same_set) 5573 int added_devices = 0;
5576 if (rdev->raid_disk < 0 && 5574 list_for_each_entry(rdev, &mddev->disks, same_set)
5577 !test_bit(Faulty, &rdev->flags)) { 5575 if (rdev->raid_disk < 0 &&
5578 if (raid5_add_disk(mddev, rdev) == 0) { 5576 !test_bit(Faulty, &rdev->flags)) {
5579 char nm[20]; 5577 if (raid5_add_disk(mddev, rdev) == 0) {
5580 if (rdev->raid_disk >= conf->previous_raid_disks) { 5578 char nm[20];
5581 set_bit(In_sync, &rdev->flags); 5579 if (rdev->raid_disk
5582 added_devices++; 5580 >= conf->previous_raid_disks) {
5583 } else 5581 set_bit(In_sync, &rdev->flags);
5584 rdev->recovery_offset = 0; 5582 added_devices++;
5585 sprintf(nm, "rd%d", rdev->raid_disk); 5583 } else
5586 if (sysfs_create_link(&mddev->kobj, 5584 rdev->recovery_offset = 0;
5587 &rdev->kobj, nm)) 5585 sprintf(nm, "rd%d", rdev->raid_disk);
5588 /* Failure here is OK */; 5586 if (sysfs_create_link(&mddev->kobj,
5589 } else 5587 &rdev->kobj, nm))
5590 break; 5588 /* Failure here is OK */;
5591 } else if (rdev->raid_disk >= conf->previous_raid_disks 5589 }
5592 && !test_bit(Faulty, &rdev->flags)) { 5590 } else if (rdev->raid_disk >= conf->previous_raid_disks
5593 /* This is a spare that was manually added */ 5591 && !test_bit(Faulty, &rdev->flags)) {
5594 set_bit(In_sync, &rdev->flags); 5592 /* This is a spare that was manually added */
5595 added_devices++; 5593 set_bit(In_sync, &rdev->flags);
5596 } 5594 added_devices++;
5595 }
5597 5596
5598 /* When a reshape changes the number of devices, ->degraded 5597 /* When a reshape changes the number of devices,
5599 * is measured against the larger of the pre and post number of 5598 * ->degraded is measured against the larger of the
5600 * devices.*/ 5599 * pre and post number of devices.
5601 if (mddev->delta_disks > 0) { 5600 */
5602 spin_lock_irqsave(&conf->device_lock, flags); 5601 spin_lock_irqsave(&conf->device_lock, flags);
5603 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) 5602 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5604 - added_devices; 5603 - added_devices;