aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-mpath.c8
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap.c32
-rw-r--r--drivers/md/dm-snap.h2
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm.c34
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c57
-rw-r--r--drivers/md/raid10.c2
10 files changed, 97 insertions, 50 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 4840733cd903..3d7f4923cd13 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -441,13 +441,13 @@ static void process_queued_ios(struct work_struct *work)
441 __choose_pgpath(m); 441 __choose_pgpath(m);
442 442
443 pgpath = m->current_pgpath; 443 pgpath = m->current_pgpath;
444 m->pgpath_to_activate = m->current_pgpath;
445 444
446 if ((pgpath && !m->queue_io) || 445 if ((pgpath && !m->queue_io) ||
447 (!pgpath && !m->queue_if_no_path)) 446 (!pgpath && !m->queue_if_no_path))
448 must_queue = 0; 447 must_queue = 0;
449 448
450 if (m->pg_init_required && !m->pg_init_in_progress) { 449 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
450 m->pgpath_to_activate = pgpath;
451 m->pg_init_count++; 451 m->pg_init_count++;
452 m->pg_init_required = 0; 452 m->pg_init_required = 0;
453 m->pg_init_in_progress = 1; 453 m->pg_init_in_progress = 1;
@@ -708,6 +708,10 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
708 m->hw_handler_name = NULL; 708 m->hw_handler_name = NULL;
709 return -EINVAL; 709 return -EINVAL;
710 } 710 }
711
712 if (hw_argc > 1)
713 DMWARN("Ignoring user-specified arguments for "
714 "hardware handler \"%s\"", m->hw_handler_name);
711 consume(as, hw_argc - 1); 715 consume(as, hw_argc - 1);
712 716
713 return 0; 717 return 0;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 92dcc06832a4..ec43f9fa4b2a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -656,9 +656,10 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
656 return; 656 return;
657 657
658 if (!ms->log_failure) { 658 if (!ms->log_failure) {
659 while ((bio = bio_list_pop(failures))) 659 while ((bio = bio_list_pop(failures))) {
660 ms->in_sync = 0; 660 ms->in_sync = 0;
661 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); 661 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
662 }
662 return; 663 return;
663 } 664 }
664 665
@@ -1031,6 +1032,7 @@ static void mirror_dtr(struct dm_target *ti)
1031 1032
1032 del_timer_sync(&ms->timer); 1033 del_timer_sync(&ms->timer);
1033 flush_workqueue(ms->kmirrord_wq); 1034 flush_workqueue(ms->kmirrord_wq);
1035 flush_scheduled_work();
1034 dm_kcopyd_client_destroy(ms->kcopyd_client); 1036 dm_kcopyd_client_destroy(ms->kcopyd_client);
1035 destroy_workqueue(ms->kmirrord_wq); 1037 destroy_workqueue(ms->kmirrord_wq);
1036 free_context(ms, ti, ms->nr_mirrors); 1038 free_context(ms, ti, ms->nr_mirrors);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index b2d9d1ac28ad..6c96db26b87c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -229,19 +229,21 @@ static void __insert_origin(struct origin *o)
229 */ 229 */
230static int register_snapshot(struct dm_snapshot *snap) 230static int register_snapshot(struct dm_snapshot *snap)
231{ 231{
232 struct origin *o; 232 struct origin *o, *new_o;
233 struct block_device *bdev = snap->origin->bdev; 233 struct block_device *bdev = snap->origin->bdev;
234 234
235 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
236 if (!new_o)
237 return -ENOMEM;
238
235 down_write(&_origins_lock); 239 down_write(&_origins_lock);
236 o = __lookup_origin(bdev); 240 o = __lookup_origin(bdev);
237 241
238 if (!o) { 242 if (o)
243 kfree(new_o);
244 else {
239 /* New origin */ 245 /* New origin */
240 o = kmalloc(sizeof(*o), GFP_KERNEL); 246 o = new_o;
241 if (!o) {
242 up_write(&_origins_lock);
243 return -ENOMEM;
244 }
245 247
246 /* Initialise the struct */ 248 /* Initialise the struct */
247 INIT_LIST_HEAD(&o->snapshots); 249 INIT_LIST_HEAD(&o->snapshots);
@@ -368,6 +370,7 @@ static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snaps
368 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 370 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
369 GFP_NOIO); 371 GFP_NOIO);
370 372
373 atomic_inc(&s->pending_exceptions_count);
371 pe->snap = s; 374 pe->snap = s;
372 375
373 return pe; 376 return pe;
@@ -375,7 +378,11 @@ static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snaps
375 378
376static void free_pending_exception(struct dm_snap_pending_exception *pe) 379static void free_pending_exception(struct dm_snap_pending_exception *pe)
377{ 380{
378 mempool_free(pe, pe->snap->pending_pool); 381 struct dm_snapshot *s = pe->snap;
382
383 mempool_free(pe, s->pending_pool);
384 smp_mb__before_atomic_dec();
385 atomic_dec(&s->pending_exceptions_count);
379} 386}
380 387
381static void insert_completed_exception(struct dm_snapshot *s, 388static void insert_completed_exception(struct dm_snapshot *s,
@@ -600,6 +607,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
600 607
601 s->valid = 1; 608 s->valid = 1;
602 s->active = 0; 609 s->active = 0;
610 atomic_set(&s->pending_exceptions_count, 0);
603 init_rwsem(&s->lock); 611 init_rwsem(&s->lock);
604 spin_lock_init(&s->pe_lock); 612 spin_lock_init(&s->pe_lock);
605 s->ti = ti; 613 s->ti = ti;
@@ -726,6 +734,14 @@ static void snapshot_dtr(struct dm_target *ti)
726 /* After this returns there can be no new kcopyd jobs. */ 734 /* After this returns there can be no new kcopyd jobs. */
727 unregister_snapshot(s); 735 unregister_snapshot(s);
728 736
737 while (atomic_read(&s->pending_exceptions_count))
738 yield();
739 /*
740 * Ensure instructions in mempool_destroy aren't reordered
741 * before atomic_read.
742 */
743 smp_mb();
744
729#ifdef CONFIG_DM_DEBUG 745#ifdef CONFIG_DM_DEBUG
730 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 746 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
731 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 747 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index f07315fe2362..99c0106ede2d 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -160,6 +160,8 @@ struct dm_snapshot {
160 160
161 mempool_t *pending_pool; 161 mempool_t *pending_pool;
162 162
163 atomic_t pending_exceptions_count;
164
163 struct exception_table pending; 165 struct exception_table pending;
164 struct exception_table complete; 166 struct exception_table complete;
165 167
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a2d068dbe9e2..9e4ef88d421e 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -320,8 +320,10 @@ int __init dm_stripe_init(void)
320 int r; 320 int r;
321 321
322 r = dm_register_target(&stripe_target); 322 r = dm_register_target(&stripe_target);
323 if (r < 0) 323 if (r < 0) {
324 DMWARN("target registration failed"); 324 DMWARN("target registration failed");
325 return r;
326 }
325 327
326 kstriped = create_singlethread_workqueue("kstriped"); 328 kstriped = create_singlethread_workqueue("kstriped");
327 if (!kstriped) { 329 if (!kstriped) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a63161aec487..04e5fd742c2c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -668,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs)
668 if (!rs->max_segment_size) 668 if (!rs->max_segment_size)
669 rs->max_segment_size = MAX_SEGMENT_SIZE; 669 rs->max_segment_size = MAX_SEGMENT_SIZE;
670 if (!rs->seg_boundary_mask) 670 if (!rs->seg_boundary_mask)
671 rs->seg_boundary_mask = -1; 671 rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
672 if (!rs->bounce_pfn) 672 if (!rs->bounce_pfn)
673 rs->bounce_pfn = -1; 673 rs->bounce_pfn = -1;
674} 674}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6963ad148408..c99e4728ff41 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -375,7 +375,7 @@ static void start_io_acct(struct dm_io *io)
375 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 375 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
376} 376}
377 377
378static int end_io_acct(struct dm_io *io) 378static void end_io_acct(struct dm_io *io)
379{ 379{
380 struct mapped_device *md = io->md; 380 struct mapped_device *md = io->md;
381 struct bio *bio = io->bio; 381 struct bio *bio = io->bio;
@@ -391,7 +391,9 @@ static int end_io_acct(struct dm_io *io)
391 dm_disk(md)->part0.in_flight = pending = 391 dm_disk(md)->part0.in_flight = pending =
392 atomic_dec_return(&md->pending); 392 atomic_dec_return(&md->pending);
393 393
394 return !pending; 394 /* nudge anyone waiting on suspend queue */
395 if (!pending)
396 wake_up(&md->wait);
395} 397}
396 398
397/* 399/*
@@ -499,9 +501,7 @@ static void dec_pending(struct dm_io *io, int error)
499 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 501 spin_unlock_irqrestore(&io->md->pushback_lock, flags);
500 } 502 }
501 503
502 if (end_io_acct(io)) 504 end_io_acct(io);
503 /* nudge anyone waiting on suspend queue */
504 wake_up(&io->md->wait);
505 505
506 if (io->error != DM_ENDIO_REQUEUE) { 506 if (io->error != DM_ENDIO_REQUEUE) {
507 blk_add_trace_bio(io->md->queue, io->bio, 507 blk_add_trace_bio(io->md->queue, io->bio,
@@ -937,16 +937,24 @@ static void dm_unplug_all(struct request_queue *q)
937 937
938static int dm_any_congested(void *congested_data, int bdi_bits) 938static int dm_any_congested(void *congested_data, int bdi_bits)
939{ 939{
940 int r; 940 int r = bdi_bits;
941 struct mapped_device *md = (struct mapped_device *) congested_data; 941 struct mapped_device *md = congested_data;
942 struct dm_table *map = dm_get_table(md); 942 struct dm_table *map;
943 943
944 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 944 atomic_inc(&md->pending);
945 r = bdi_bits; 945
946 else 946 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
947 r = dm_table_any_congested(map, bdi_bits); 947 map = dm_get_table(md);
948 if (map) {
949 r = dm_table_any_congested(map, bdi_bits);
950 dm_table_put(map);
951 }
952 }
953
954 if (!atomic_dec_return(&md->pending))
955 /* nudge anyone waiting on suspend queue */
956 wake_up(&md->wait);
948 957
949 dm_table_put(map);
950 return r; 958 return r;
951} 959}
952 960
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 190147c79e79..3b90c5c924ec 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -148,6 +148,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
148 148
149 min_sectors = conf->array_sectors; 149 min_sectors = conf->array_sectors;
150 sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *)); 150 sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
151 if (min_sectors == 0)
152 min_sectors = 1;
151 153
152 /* min_sectors is the minimum spacing that will fit the hash 154 /* min_sectors is the minimum spacing that will fit the hash
153 * table in one PAGE. This may be much smaller than needed. 155 * table in one PAGE. This may be much smaller than needed.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c1a837ca193c..1b1d32694f6f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -222,6 +222,9 @@ static void mddev_put(mddev_t *mddev)
222 list_del(&mddev->all_mddevs); 222 list_del(&mddev->all_mddevs);
223 spin_unlock(&all_mddevs_lock); 223 spin_unlock(&all_mddevs_lock);
224 blk_cleanup_queue(mddev->queue); 224 blk_cleanup_queue(mddev->queue);
225 if (mddev->sysfs_state)
226 sysfs_put(mddev->sysfs_state);
227 mddev->sysfs_state = NULL;
225 kobject_put(&mddev->kobj); 228 kobject_put(&mddev->kobj);
226 } else 229 } else
227 spin_unlock(&all_mddevs_lock); 230 spin_unlock(&all_mddevs_lock);
@@ -1459,6 +1462,8 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1459 kobject_del(&rdev->kobj); 1462 kobject_del(&rdev->kobj);
1460 goto fail; 1463 goto fail;
1461 } 1464 }
1465 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
1466
1462 list_add_rcu(&rdev->same_set, &mddev->disks); 1467 list_add_rcu(&rdev->same_set, &mddev->disks);
1463 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1468 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1464 return 0; 1469 return 0;
@@ -1488,7 +1493,8 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1488 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1493 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1489 rdev->mddev = NULL; 1494 rdev->mddev = NULL;
1490 sysfs_remove_link(&rdev->kobj, "block"); 1495 sysfs_remove_link(&rdev->kobj, "block");
1491 1496 sysfs_put(rdev->sysfs_state);
1497 rdev->sysfs_state = NULL;
1492 /* We need to delay this, otherwise we can deadlock when 1498 /* We need to delay this, otherwise we can deadlock when
1493 * writing to 'remove' to "dev/state". We also need 1499 * writing to 'remove' to "dev/state". We also need
1494 * to delay it due to rcu usage. 1500 * to delay it due to rcu usage.
@@ -1923,8 +1929,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1923 1929
1924 err = 0; 1930 err = 0;
1925 } 1931 }
1926 if (!err) 1932 if (!err && rdev->sysfs_state)
1927 sysfs_notify(&rdev->kobj, NULL, "state"); 1933 sysfs_notify_dirent(rdev->sysfs_state);
1928 return err ? err : len; 1934 return err ? err : len;
1929} 1935}
1930static struct rdev_sysfs_entry rdev_state = 1936static struct rdev_sysfs_entry rdev_state =
@@ -2019,7 +2025,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2019 rdev->raid_disk = -1; 2025 rdev->raid_disk = -1;
2020 return err; 2026 return err;
2021 } else 2027 } else
2022 sysfs_notify(&rdev->kobj, NULL, "state"); 2028 sysfs_notify_dirent(rdev->sysfs_state);
2023 sprintf(nm, "rd%d", rdev->raid_disk); 2029 sprintf(nm, "rd%d", rdev->raid_disk);
2024 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 2030 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2025 printk(KERN_WARNING 2031 printk(KERN_WARNING
@@ -2036,7 +2042,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2036 clear_bit(Faulty, &rdev->flags); 2042 clear_bit(Faulty, &rdev->flags);
2037 clear_bit(WriteMostly, &rdev->flags); 2043 clear_bit(WriteMostly, &rdev->flags);
2038 set_bit(In_sync, &rdev->flags); 2044 set_bit(In_sync, &rdev->flags);
2039 sysfs_notify(&rdev->kobj, NULL, "state"); 2045 sysfs_notify_dirent(rdev->sysfs_state);
2040 } 2046 }
2041 return len; 2047 return len;
2042} 2048}
@@ -2770,7 +2776,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
2770 if (err) 2776 if (err)
2771 return err; 2777 return err;
2772 else { 2778 else {
2773 sysfs_notify(&mddev->kobj, NULL, "array_state"); 2779 sysfs_notify_dirent(mddev->sysfs_state);
2774 return len; 2780 return len;
2775 } 2781 }
2776} 2782}
@@ -3457,6 +3463,11 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3457 disk->fops = &md_fops; 3463 disk->fops = &md_fops;
3458 disk->private_data = mddev; 3464 disk->private_data = mddev;
3459 disk->queue = mddev->queue; 3465 disk->queue = mddev->queue;
3466 /* Allow extended partitions. This makes the
3467 * 'mdp' device redundant, but we can really
3468 * remove it now.
3469 */
3470 disk->flags |= GENHD_FL_EXT_DEVT;
3460 add_disk(disk); 3471 add_disk(disk);
3461 mddev->gendisk = disk; 3472 mddev->gendisk = disk;
3462 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 3473 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
@@ -3465,8 +3476,10 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3465 if (error) 3476 if (error)
3466 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3477 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3467 disk->disk_name); 3478 disk->disk_name);
3468 else 3479 else {
3469 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3480 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3481 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3482 }
3470 return NULL; 3483 return NULL;
3471} 3484}
3472 3485
@@ -3477,7 +3490,7 @@ static void md_safemode_timeout(unsigned long data)
3477 if (!atomic_read(&mddev->writes_pending)) { 3490 if (!atomic_read(&mddev->writes_pending)) {
3478 mddev->safemode = 1; 3491 mddev->safemode = 1;
3479 if (mddev->external) 3492 if (mddev->external)
3480 set_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags); 3493 sysfs_notify_dirent(mddev->sysfs_state);
3481 } 3494 }
3482 md_wakeup_thread(mddev->thread); 3495 md_wakeup_thread(mddev->thread);
3483} 3496}
@@ -3578,7 +3591,7 @@ static int do_md_run(mddev_t * mddev)
3578 return -EINVAL; 3591 return -EINVAL;
3579 } 3592 }
3580 } 3593 }
3581 sysfs_notify(&rdev->kobj, NULL, "state"); 3594 sysfs_notify_dirent(rdev->sysfs_state);
3582 } 3595 }
3583 3596
3584 md_probe(mddev->unit, NULL, NULL); 3597 md_probe(mddev->unit, NULL, NULL);
@@ -3740,7 +3753,7 @@ static int do_md_run(mddev_t * mddev)
3740 3753
3741 mddev->changed = 1; 3754 mddev->changed = 1;
3742 md_new_event(mddev); 3755 md_new_event(mddev);
3743 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3756 sysfs_notify_dirent(mddev->sysfs_state);
3744 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 3757 sysfs_notify(&mddev->kobj, NULL, "sync_action");
3745 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3758 sysfs_notify(&mddev->kobj, NULL, "degraded");
3746 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 3759 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
@@ -3767,7 +3780,7 @@ static int restart_array(mddev_t *mddev)
3767 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3780 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3768 md_wakeup_thread(mddev->thread); 3781 md_wakeup_thread(mddev->thread);
3769 md_wakeup_thread(mddev->sync_thread); 3782 md_wakeup_thread(mddev->sync_thread);
3770 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3783 sysfs_notify_dirent(mddev->sysfs_state);
3771 return 0; 3784 return 0;
3772} 3785}
3773 3786
@@ -3847,7 +3860,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3847 module_put(mddev->pers->owner); 3860 module_put(mddev->pers->owner);
3848 mddev->pers = NULL; 3861 mddev->pers = NULL;
3849 /* tell userspace to handle 'inactive' */ 3862 /* tell userspace to handle 'inactive' */
3850 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3863 sysfs_notify_dirent(mddev->sysfs_state);
3851 3864
3852 set_capacity(disk, 0); 3865 set_capacity(disk, 0);
3853 mddev->changed = 1; 3866 mddev->changed = 1;
@@ -3927,13 +3940,14 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3927 mddev->degraded = 0; 3940 mddev->degraded = 0;
3928 mddev->barriers_work = 0; 3941 mddev->barriers_work = 0;
3929 mddev->safemode = 0; 3942 mddev->safemode = 0;
3943 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
3930 3944
3931 } else if (mddev->pers) 3945 } else if (mddev->pers)
3932 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3946 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3933 mdname(mddev)); 3947 mdname(mddev));
3934 err = 0; 3948 err = 0;
3935 md_new_event(mddev); 3949 md_new_event(mddev);
3936 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3950 sysfs_notify_dirent(mddev->sysfs_state);
3937out: 3951out:
3938 return err; 3952 return err;
3939} 3953}
@@ -4297,7 +4311,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4297 if (err) 4311 if (err)
4298 export_rdev(rdev); 4312 export_rdev(rdev);
4299 else 4313 else
4300 sysfs_notify(&rdev->kobj, NULL, "state"); 4314 sysfs_notify_dirent(rdev->sysfs_state);
4301 4315
4302 md_update_sb(mddev, 1); 4316 md_update_sb(mddev, 1);
4303 if (mddev->degraded) 4317 if (mddev->degraded)
@@ -4938,7 +4952,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
4938 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 4952 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
4939 if (mddev->ro == 2) { 4953 if (mddev->ro == 2) {
4940 mddev->ro = 0; 4954 mddev->ro = 0;
4941 sysfs_notify(&mddev->kobj, NULL, "array_state"); 4955 sysfs_notify_dirent(mddev->sysfs_state);
4942 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4956 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4943 md_wakeup_thread(mddev->thread); 4957 md_wakeup_thread(mddev->thread);
4944 } else { 4958 } else {
@@ -5612,7 +5626,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
5612 spin_unlock_irq(&mddev->write_lock); 5626 spin_unlock_irq(&mddev->write_lock);
5613 } 5627 }
5614 if (did_change) 5628 if (did_change)
5615 sysfs_notify(&mddev->kobj, NULL, "array_state"); 5629 sysfs_notify_dirent(mddev->sysfs_state);
5616 wait_event(mddev->sb_wait, 5630 wait_event(mddev->sb_wait,
5617 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && 5631 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5618 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 5632 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
@@ -5655,7 +5669,7 @@ int md_allow_write(mddev_t *mddev)
5655 mddev->safemode = 1; 5669 mddev->safemode = 1;
5656 spin_unlock_irq(&mddev->write_lock); 5670 spin_unlock_irq(&mddev->write_lock);
5657 md_update_sb(mddev, 0); 5671 md_update_sb(mddev, 0);
5658 sysfs_notify(&mddev->kobj, NULL, "array_state"); 5672 sysfs_notify_dirent(mddev->sysfs_state);
5659 } else 5673 } else
5660 spin_unlock_irq(&mddev->write_lock); 5674 spin_unlock_irq(&mddev->write_lock);
5661 5675
@@ -6048,9 +6062,6 @@ void md_check_recovery(mddev_t *mddev)
6048 if (mddev->bitmap) 6062 if (mddev->bitmap)
6049 bitmap_daemon_work(mddev->bitmap); 6063 bitmap_daemon_work(mddev->bitmap);
6050 6064
6051 if (test_and_clear_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags))
6052 sysfs_notify(&mddev->kobj, NULL, "array_state");
6053
6054 if (mddev->ro) 6065 if (mddev->ro)
6055 return; 6066 return;
6056 6067
@@ -6103,7 +6114,7 @@ void md_check_recovery(mddev_t *mddev)
6103 mddev->safemode = 0; 6114 mddev->safemode = 0;
6104 spin_unlock_irq(&mddev->write_lock); 6115 spin_unlock_irq(&mddev->write_lock);
6105 if (did_change) 6116 if (did_change)
6106 sysfs_notify(&mddev->kobj, NULL, "array_state"); 6117 sysfs_notify_dirent(mddev->sysfs_state);
6107 } 6118 }
6108 6119
6109 if (mddev->flags) 6120 if (mddev->flags)
@@ -6111,7 +6122,7 @@ void md_check_recovery(mddev_t *mddev)
6111 6122
6112 rdev_for_each(rdev, rtmp, mddev) 6123 rdev_for_each(rdev, rtmp, mddev)
6113 if (test_and_clear_bit(StateChanged, &rdev->flags)) 6124 if (test_and_clear_bit(StateChanged, &rdev->flags))
6114 sysfs_notify(&rdev->kobj, NULL, "state"); 6125 sysfs_notify_dirent(rdev->sysfs_state);
6115 6126
6116 6127
6117 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 6128 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -6221,7 +6232,7 @@ void md_check_recovery(mddev_t *mddev)
6221 6232
6222void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 6233void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6223{ 6234{
6224 sysfs_notify(&rdev->kobj, NULL, "state"); 6235 sysfs_notify_dirent(rdev->sysfs_state);
6225 wait_event_timeout(rdev->blocked_wait, 6236 wait_event_timeout(rdev->blocked_wait,
6226 !test_bit(Blocked, &rdev->flags), 6237 !test_bit(Blocked, &rdev->flags),
6227 msecs_to_jiffies(5000)); 6238 msecs_to_jiffies(5000));
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index da5129a24b18..970a96ef9b18 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1137,7 +1137,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1137 if (!enough(conf)) 1137 if (!enough(conf))
1138 return -EINVAL; 1138 return -EINVAL;
1139 1139
1140 if (rdev->raid_disk) 1140 if (rdev->raid_disk >= 0)
1141 first = last = rdev->raid_disk; 1141 first = last = rdev->raid_disk;
1142 1142
1143 if (rdev->saved_raid_disk >= 0 && 1143 if (rdev->saved_raid_disk >= 0 &&