aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c78
1 files changed, 59 insertions, 19 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b76cfc89e1b5..818313e277e7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -287,6 +287,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
287 mddev_t *mddev = q->queuedata; 287 mddev_t *mddev = q->queuedata;
288 int rv; 288 int rv;
289 int cpu; 289 int cpu;
290 unsigned int sectors;
290 291
291 if (mddev == NULL || mddev->pers == NULL 292 if (mddev == NULL || mddev->pers == NULL
292 || !mddev->ready) { 293 || !mddev->ready) {
@@ -311,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
311 atomic_inc(&mddev->active_io); 312 atomic_inc(&mddev->active_io);
312 rcu_read_unlock(); 313 rcu_read_unlock();
313 314
315 /*
316 * save the sectors now since our bio can
317 * go away inside make_request
318 */
319 sectors = bio_sectors(bio);
314 rv = mddev->pers->make_request(mddev, bio); 320 rv = mddev->pers->make_request(mddev, bio);
315 321
316 cpu = part_stat_lock(); 322 cpu = part_stat_lock();
317 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 323 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
318 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 324 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
319 bio_sectors(bio));
320 part_stat_unlock(); 325 part_stat_unlock();
321 326
322 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 327 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -548,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
548{ 553{
549 mddev_t *mddev, *new = NULL; 554 mddev_t *mddev, *new = NULL;
550 555
556 if (unit && MAJOR(unit) != MD_MAJOR)
557 unit &= ~((1<<MdpMinorShift)-1);
558
551 retry: 559 retry:
552 spin_lock(&all_mddevs_lock); 560 spin_lock(&all_mddevs_lock);
553 561
@@ -1947,8 +1955,6 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1947 __bdevname(dev, b)); 1955 __bdevname(dev, b));
1948 return PTR_ERR(bdev); 1956 return PTR_ERR(bdev);
1949 } 1957 }
1950 if (!shared)
1951 set_bit(AllReserved, &rdev->flags);
1952 rdev->bdev = bdev; 1958 rdev->bdev = bdev;
1953 return err; 1959 return err;
1954} 1960}
@@ -2465,6 +2471,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2465 if (rdev->raid_disk != -1) 2471 if (rdev->raid_disk != -1)
2466 return -EBUSY; 2472 return -EBUSY;
2467 2473
2474 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2475 return -EBUSY;
2476
2468 if (rdev->mddev->pers->hot_add_disk == NULL) 2477 if (rdev->mddev->pers->hot_add_disk == NULL)
2469 return -EINVAL; 2478 return -EINVAL;
2470 2479
@@ -2610,12 +2619,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2610 2619
2611 mddev_lock(mddev); 2620 mddev_lock(mddev);
2612 list_for_each_entry(rdev2, &mddev->disks, same_set) 2621 list_for_each_entry(rdev2, &mddev->disks, same_set)
2613 if (test_bit(AllReserved, &rdev2->flags) || 2622 if (rdev->bdev == rdev2->bdev &&
2614 (rdev->bdev == rdev2->bdev && 2623 rdev != rdev2 &&
2615 rdev != rdev2 && 2624 overlaps(rdev->data_offset, rdev->sectors,
2616 overlaps(rdev->data_offset, rdev->sectors, 2625 rdev2->data_offset,
2617 rdev2->data_offset, 2626 rdev2->sectors)) {
2618 rdev2->sectors))) {
2619 overlap = 1; 2627 overlap = 1;
2620 break; 2628 break;
2621 } 2629 }
@@ -4133,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
4133 } 4141 }
4134 4142
4135 mddev->array_sectors = sectors; 4143 mddev->array_sectors = sectors;
4136 set_capacity(mddev->gendisk, mddev->array_sectors); 4144 if (mddev->pers) {
4137 if (mddev->pers) 4145 set_capacity(mddev->gendisk, mddev->array_sectors);
4138 revalidate_disk(mddev->gendisk); 4146 revalidate_disk(mddev->gendisk);
4139 4147 }
4140 return len; 4148 return len;
4141} 4149}
4142 4150
@@ -4619,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
4619 } 4627 }
4620 set_capacity(mddev->gendisk, mddev->array_sectors); 4628 set_capacity(mddev->gendisk, mddev->array_sectors);
4621 revalidate_disk(mddev->gendisk); 4629 revalidate_disk(mddev->gendisk);
4630 mddev->changed = 1;
4622 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4631 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4623out: 4632out:
4624 return err; 4633 return err;
@@ -4707,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
4707 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4716 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4708 mddev->recovery = 0; 4717 mddev->recovery = 0;
4709 mddev->in_sync = 0; 4718 mddev->in_sync = 0;
4719 mddev->changed = 0;
4710 mddev->degraded = 0; 4720 mddev->degraded = 0;
4711 mddev->safemode = 0; 4721 mddev->safemode = 0;
4712 mddev->bitmap_info.offset = 0; 4722 mddev->bitmap_info.offset = 0;
@@ -4822,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4822 4832
4823 set_capacity(disk, 0); 4833 set_capacity(disk, 0);
4824 mutex_unlock(&mddev->open_mutex); 4834 mutex_unlock(&mddev->open_mutex);
4835 mddev->changed = 1;
4825 revalidate_disk(disk); 4836 revalidate_disk(disk);
4826 4837
4827 if (mddev->ro) 4838 if (mddev->ro)
@@ -5578,6 +5589,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
5578 mddev->delta_disks = raid_disks - mddev->raid_disks; 5589 mddev->delta_disks = raid_disks - mddev->raid_disks;
5579 5590
5580 rv = mddev->pers->check_reshape(mddev); 5591 rv = mddev->pers->check_reshape(mddev);
5592 if (rv < 0)
5593 mddev->delta_disks = 0;
5581 return rv; 5594 return rv;
5582} 5595}
5583 5596
@@ -6004,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
6004 atomic_inc(&mddev->openers); 6017 atomic_inc(&mddev->openers);
6005 mutex_unlock(&mddev->open_mutex); 6018 mutex_unlock(&mddev->open_mutex);
6006 6019
6007 check_disk_size_change(mddev->gendisk, bdev); 6020 check_disk_change(bdev);
6008 out: 6021 out:
6009 return err; 6022 return err;
6010} 6023}
@@ -6019,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
6019 6032
6020 return 0; 6033 return 0;
6021} 6034}
6035
6036static int md_media_changed(struct gendisk *disk)
6037{
6038 mddev_t *mddev = disk->private_data;
6039
6040 return mddev->changed;
6041}
6042
6043static int md_revalidate(struct gendisk *disk)
6044{
6045 mddev_t *mddev = disk->private_data;
6046
6047 mddev->changed = 0;
6048 return 0;
6049}
6022static const struct block_device_operations md_fops = 6050static const struct block_device_operations md_fops =
6023{ 6051{
6024 .owner = THIS_MODULE, 6052 .owner = THIS_MODULE,
@@ -6029,6 +6057,8 @@ static const struct block_device_operations md_fops =
6029 .compat_ioctl = md_compat_ioctl, 6057 .compat_ioctl = md_compat_ioctl,
6030#endif 6058#endif
6031 .getgeo = md_getgeo, 6059 .getgeo = md_getgeo,
6060 .media_changed = md_media_changed,
6061 .revalidate_disk= md_revalidate,
6032}; 6062};
6033 6063
6034static int md_thread(void * arg) 6064static int md_thread(void * arg)
@@ -6985,9 +7015,6 @@ void md_do_sync(mddev_t *mddev)
6985 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7015 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6986 mddev->resync_min = mddev->curr_resync_completed; 7016 mddev->resync_min = mddev->curr_resync_completed;
6987 mddev->curr_resync = 0; 7017 mddev->curr_resync = 0;
6988 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6989 mddev->curr_resync_completed = 0;
6990 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6991 wake_up(&resync_wait); 7018 wake_up(&resync_wait);
6992 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7019 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6993 md_wakeup_thread(mddev->thread); 7020 md_wakeup_thread(mddev->thread);
@@ -7028,7 +7055,7 @@ static int remove_and_add_spares(mddev_t *mddev)
7028 } 7055 }
7029 } 7056 }
7030 7057
7031 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 7058 if (mddev->degraded && !mddev->recovery_disabled) {
7032 list_for_each_entry(rdev, &mddev->disks, same_set) { 7059 list_for_each_entry(rdev, &mddev->disks, same_set) {
7033 if (rdev->raid_disk >= 0 && 7060 if (rdev->raid_disk >= 0 &&
7034 !test_bit(In_sync, &rdev->flags) && 7061 !test_bit(In_sync, &rdev->flags) &&
@@ -7151,7 +7178,20 @@ void md_check_recovery(mddev_t *mddev)
7151 /* Only thing we do on a ro array is remove 7178 /* Only thing we do on a ro array is remove
7152 * failed devices. 7179 * failed devices.
7153 */ 7180 */
7154 remove_and_add_spares(mddev); 7181 mdk_rdev_t *rdev;
7182 list_for_each_entry(rdev, &mddev->disks, same_set)
7183 if (rdev->raid_disk >= 0 &&
7184 !test_bit(Blocked, &rdev->flags) &&
7185 test_bit(Faulty, &rdev->flags) &&
7186 atomic_read(&rdev->nr_pending)==0) {
7187 if (mddev->pers->hot_remove_disk(
7188 mddev, rdev->raid_disk)==0) {
7189 char nm[20];
7190 sprintf(nm,"rd%d", rdev->raid_disk);
7191 sysfs_remove_link(&mddev->kobj, nm);
7192 rdev->raid_disk = -1;
7193 }
7194 }
7155 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7195 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7156 goto unlock; 7196 goto unlock;
7157 } 7197 }