aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c19
-rw-r--r--drivers/md/dm-emc.c6
-rw-r--r--drivers/md/dm-hw-handler.c2
-rw-r--r--drivers/md/dm-mpath.c32
-rw-r--r--drivers/md/dm-path-selector.c2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-zero.c4
-rw-r--r--drivers/md/dm.c96
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/multipath.c5
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid6main.c2
14 files changed, 105 insertions, 85 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 77619a56e2bf..0dd6c2b5391b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -331,25 +331,19 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
331 struct bio *bio; 331 struct bio *bio;
332 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 332 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
333 int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 333 int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
334 unsigned long flags = current->flags;
335 unsigned int i; 334 unsigned int i;
336 335
337 /* 336 /*
338 * Tell VM to act less aggressively and fail earlier. 337 * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
339 * This is not necessary but increases throughput. 338 * to fail earlier. This is not necessary but increases throughput.
340 * FIXME: Is this really intelligent? 339 * FIXME: Is this really intelligent?
341 */ 340 */
342 current->flags &= ~PF_MEMALLOC;
343
344 if (base_bio) 341 if (base_bio)
345 bio = bio_clone(base_bio, GFP_NOIO); 342 bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
346 else 343 else
347 bio = bio_alloc(GFP_NOIO, nr_iovecs); 344 bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
348 if (!bio) { 345 if (!bio)
349 if (flags & PF_MEMALLOC)
350 current->flags |= PF_MEMALLOC;
351 return NULL; 346 return NULL;
352 }
353 347
354 /* if the last bio was not complete, continue where that one ended */ 348 /* if the last bio was not complete, continue where that one ended */
355 bio->bi_idx = *bio_vec_idx; 349 bio->bi_idx = *bio_vec_idx;
@@ -386,9 +380,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
386 size -= bv->bv_len; 380 size -= bv->bv_len;
387 } 381 }
388 382
389 if (flags & PF_MEMALLOC)
390 current->flags |= PF_MEMALLOC;
391
392 if (!bio->bi_size) { 383 if (!bio->bi_size) {
393 bio_put(bio); 384 bio_put(bio);
394 return NULL; 385 return NULL;
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 700658664594..c7067674dcb7 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -223,8 +223,10 @@ static struct emc_handler *alloc_emc_handler(void)
223{ 223{
224 struct emc_handler *h = kmalloc(sizeof(*h), GFP_KERNEL); 224 struct emc_handler *h = kmalloc(sizeof(*h), GFP_KERNEL);
225 225
226 if (h) 226 if (h) {
227 memset(h, 0, sizeof(*h));
227 spin_lock_init(&h->lock); 228 spin_lock_init(&h->lock);
229 }
228 230
229 return h; 231 return h;
230} 232}
@@ -259,8 +261,6 @@ static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
259 if (!h) 261 if (!h)
260 return -ENOMEM; 262 return -ENOMEM;
261 263
262 memset(h, 0, sizeof(*h));
263
264 hwh->context = h; 264 hwh->context = h;
265 265
266 if ((h->short_trespass = short_trespass)) 266 if ((h->short_trespass = short_trespass))
diff --git a/drivers/md/dm-hw-handler.c b/drivers/md/dm-hw-handler.c
index ae63772e44c9..4cc0010e0156 100644
--- a/drivers/md/dm-hw-handler.c
+++ b/drivers/md/dm-hw-handler.c
@@ -23,7 +23,7 @@ struct hwh_internal {
23static LIST_HEAD(_hw_handlers); 23static LIST_HEAD(_hw_handlers);
24static DECLARE_RWSEM(_hwh_lock); 24static DECLARE_RWSEM(_hwh_lock);
25 25
26struct hwh_internal *__find_hw_handler_type(const char *name) 26static struct hwh_internal *__find_hw_handler_type(const char *name)
27{ 27{
28 struct hwh_internal *hwhi; 28 struct hwh_internal *hwhi;
29 29
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 43763a0bd096..1e97b3c12bd5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -101,6 +101,7 @@ typedef int (*action_fn) (struct pgpath *pgpath);
101 101
102static kmem_cache_t *_mpio_cache; 102static kmem_cache_t *_mpio_cache;
103 103
104struct workqueue_struct *kmultipathd;
104static void process_queued_ios(void *data); 105static void process_queued_ios(void *data);
105static void trigger_event(void *data); 106static void trigger_event(void *data);
106 107
@@ -308,7 +309,7 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
308 bio_list_add(&m->queued_ios, bio); 309 bio_list_add(&m->queued_ios, bio);
309 m->queue_size++; 310 m->queue_size++;
310 if (m->pg_init_required || !m->queue_io) 311 if (m->pg_init_required || !m->queue_io)
311 schedule_work(&m->process_queued_ios); 312 queue_work(kmultipathd, &m->process_queued_ios);
312 pgpath = NULL; 313 pgpath = NULL;
313 r = 0; 314 r = 0;
314 } else if (!pgpath) 315 } else if (!pgpath)
@@ -334,7 +335,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path)
334 335
335 m->queue_if_no_path = queue_if_no_path; 336 m->queue_if_no_path = queue_if_no_path;
336 if (!m->queue_if_no_path) 337 if (!m->queue_if_no_path)
337 schedule_work(&m->process_queued_ios); 338 queue_work(kmultipathd, &m->process_queued_ios);
338 339
339 spin_unlock_irqrestore(&m->lock, flags); 340 spin_unlock_irqrestore(&m->lock, flags);
340 341
@@ -800,7 +801,7 @@ static int fail_path(struct pgpath *pgpath)
800 if (pgpath == m->current_pgpath) 801 if (pgpath == m->current_pgpath)
801 m->current_pgpath = NULL; 802 m->current_pgpath = NULL;
802 803
803 schedule_work(&m->trigger_event); 804 queue_work(kmultipathd, &m->trigger_event);
804 805
805out: 806out:
806 spin_unlock_irqrestore(&m->lock, flags); 807 spin_unlock_irqrestore(&m->lock, flags);
@@ -837,9 +838,9 @@ static int reinstate_path(struct pgpath *pgpath)
837 838
838 m->current_pgpath = NULL; 839 m->current_pgpath = NULL;
839 if (!m->nr_valid_paths++) 840 if (!m->nr_valid_paths++)
840 schedule_work(&m->process_queued_ios); 841 queue_work(kmultipathd, &m->process_queued_ios);
841 842
842 schedule_work(&m->trigger_event); 843 queue_work(kmultipathd, &m->trigger_event);
843 844
844out: 845out:
845 spin_unlock_irqrestore(&m->lock, flags); 846 spin_unlock_irqrestore(&m->lock, flags);
@@ -883,7 +884,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
883 884
884 spin_unlock_irqrestore(&m->lock, flags); 885 spin_unlock_irqrestore(&m->lock, flags);
885 886
886 schedule_work(&m->trigger_event); 887 queue_work(kmultipathd, &m->trigger_event);
887} 888}
888 889
889/* 890/*
@@ -913,7 +914,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
913 } 914 }
914 spin_unlock_irqrestore(&m->lock, flags); 915 spin_unlock_irqrestore(&m->lock, flags);
915 916
916 schedule_work(&m->trigger_event); 917 queue_work(kmultipathd, &m->trigger_event);
917 return 0; 918 return 0;
918} 919}
919 920
@@ -968,7 +969,7 @@ void dm_pg_init_complete(struct path *path, unsigned err_flags)
968 m->current_pgpath = NULL; 969 m->current_pgpath = NULL;
969 m->current_pg = NULL; 970 m->current_pg = NULL;
970 } 971 }
971 schedule_work(&m->process_queued_ios); 972 queue_work(kmultipathd, &m->process_queued_ios);
972 spin_unlock_irqrestore(&m->lock, flags); 973 spin_unlock_irqrestore(&m->lock, flags);
973} 974}
974 975
@@ -1018,7 +1019,7 @@ static int do_end_io(struct multipath *m, struct bio *bio,
1018 bio_list_add(&m->queued_ios, bio); 1019 bio_list_add(&m->queued_ios, bio);
1019 m->queue_size++; 1020 m->queue_size++;
1020 if (!m->queue_io) 1021 if (!m->queue_io)
1021 schedule_work(&m->process_queued_ios); 1022 queue_work(kmultipathd, &m->process_queued_ios);
1022 spin_unlock(&m->lock); 1023 spin_unlock(&m->lock);
1023 1024
1024 return 1; /* io not complete */ 1025 return 1; /* io not complete */
@@ -1057,7 +1058,7 @@ static void multipath_presuspend(struct dm_target *ti)
1057 spin_lock_irqsave(&m->lock, flags); 1058 spin_lock_irqsave(&m->lock, flags);
1058 m->suspended = 1; 1059 m->suspended = 1;
1059 if (m->queue_if_no_path) 1060 if (m->queue_if_no_path)
1060 schedule_work(&m->process_queued_ios); 1061 queue_work(kmultipathd, &m->process_queued_ios);
1061 spin_unlock_irqrestore(&m->lock, flags); 1062 spin_unlock_irqrestore(&m->lock, flags);
1062} 1063}
1063 1064
@@ -1274,6 +1275,15 @@ static int __init dm_multipath_init(void)
1274 return -EINVAL; 1275 return -EINVAL;
1275 } 1276 }
1276 1277
1278 kmultipathd = create_workqueue("kmpathd");
1279 if (!kmultipathd) {
1280 DMERR("%s: failed to create workqueue kmpathd",
1281 multipath_target.name);
1282 dm_unregister_target(&multipath_target);
1283 kmem_cache_destroy(_mpio_cache);
1284 return -ENOMEM;
1285 }
1286
1277 DMINFO("dm-multipath version %u.%u.%u loaded", 1287 DMINFO("dm-multipath version %u.%u.%u loaded",
1278 multipath_target.version[0], multipath_target.version[1], 1288 multipath_target.version[0], multipath_target.version[1],
1279 multipath_target.version[2]); 1289 multipath_target.version[2]);
@@ -1285,6 +1295,8 @@ static void __exit dm_multipath_exit(void)
1285{ 1295{
1286 int r; 1296 int r;
1287 1297
1298 destroy_workqueue(kmultipathd);
1299
1288 r = dm_unregister_target(&multipath_target); 1300 r = dm_unregister_target(&multipath_target);
1289 if (r < 0) 1301 if (r < 0)
1290 DMERR("%s: target unregister failed %d", 1302 DMERR("%s: target unregister failed %d",
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index ac5c4bbec6c1..a28c1c2b4ef5 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -26,7 +26,7 @@ struct ps_internal {
26static LIST_HEAD(_path_selectors); 26static LIST_HEAD(_path_selectors);
27static DECLARE_RWSEM(_ps_lock); 27static DECLARE_RWSEM(_ps_lock);
28 28
29struct ps_internal *__find_path_selector_type(const char *name) 29static struct ps_internal *__find_path_selector_type(const char *name)
30{ 30{
31 struct ps_internal *psi; 31 struct ps_internal *psi;
32 32
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ee175d4906c4..18e9b9953fcd 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -242,7 +242,7 @@ static void free_devices(struct list_head *devices)
242 } 242 }
243} 243}
244 244
245void table_destroy(struct dm_table *t) 245static void table_destroy(struct dm_table *t)
246{ 246{
247 unsigned int i; 247 unsigned int i;
248 248
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 7febc2cac73d..51c0639b2487 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -55,7 +55,7 @@ static struct target_type zero_target = {
55 .map = zero_map, 55 .map = zero_map,
56}; 56};
57 57
58int __init dm_zero_init(void) 58static int __init dm_zero_init(void)
59{ 59{
60 int r = dm_register_target(&zero_target); 60 int r = dm_register_target(&zero_target);
61 61
@@ -65,7 +65,7 @@ int __init dm_zero_init(void)
65 return r; 65 return r;
66} 66}
67 67
68void __exit dm_zero_exit(void) 68static void __exit dm_zero_exit(void)
69{ 69{
70 int r = dm_unregister_target(&zero_target); 70 int r = dm_unregister_target(&zero_target);
71 71
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 243ff6884e83..f6b03957efc7 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -97,6 +97,7 @@ struct mapped_device {
97 * freeze/thaw support require holding onto a super block 97 * freeze/thaw support require holding onto a super block
98 */ 98 */
99 struct super_block *frozen_sb; 99 struct super_block *frozen_sb;
100 struct block_device *frozen_bdev;
100}; 101};
101 102
102#define MIN_IOS 256 103#define MIN_IOS 256
@@ -990,44 +991,50 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
990 */ 991 */
991static int __lock_fs(struct mapped_device *md) 992static int __lock_fs(struct mapped_device *md)
992{ 993{
993 struct block_device *bdev; 994 int error = -ENOMEM;
994 995
995 if (test_and_set_bit(DMF_FS_LOCKED, &md->flags)) 996 if (test_and_set_bit(DMF_FS_LOCKED, &md->flags))
996 return 0; 997 return 0;
997 998
998 bdev = bdget_disk(md->disk, 0); 999 md->frozen_bdev = bdget_disk(md->disk, 0);
999 if (!bdev) { 1000 if (!md->frozen_bdev) {
1000 DMWARN("bdget failed in __lock_fs"); 1001 DMWARN("bdget failed in __lock_fs");
1001 return -ENOMEM; 1002 goto out;
1002 } 1003 }
1003 1004
1004 WARN_ON(md->frozen_sb); 1005 WARN_ON(md->frozen_sb);
1005 md->frozen_sb = freeze_bdev(bdev); 1006
1007 md->frozen_sb = freeze_bdev(md->frozen_bdev);
1008 if (IS_ERR(md->frozen_sb)) {
1009 error = PTR_ERR(md->frozen_sb);
1010 goto out_bdput;
1011 }
1012
1006 /* don't bdput right now, we don't want the bdev 1013 /* don't bdput right now, we don't want the bdev
1007 * to go away while it is locked. We'll bdput 1014 * to go away while it is locked. We'll bdput
1008 * in __unlock_fs 1015 * in __unlock_fs
1009 */ 1016 */
1010 return 0; 1017 return 0;
1018
1019out_bdput:
1020 bdput(md->frozen_bdev);
1021 md->frozen_sb = NULL;
1022 md->frozen_bdev = NULL;
1023out:
1024 clear_bit(DMF_FS_LOCKED, &md->flags);
1025 return error;
1011} 1026}
1012 1027
1013static int __unlock_fs(struct mapped_device *md) 1028static void __unlock_fs(struct mapped_device *md)
1014{ 1029{
1015 struct block_device *bdev;
1016
1017 if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags)) 1030 if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags))
1018 return 0; 1031 return;
1019 1032
1020 bdev = bdget_disk(md->disk, 0); 1033 thaw_bdev(md->frozen_bdev, md->frozen_sb);
1021 if (!bdev) { 1034 bdput(md->frozen_bdev);
1022 DMWARN("bdget failed in __unlock_fs");
1023 return -ENOMEM;
1024 }
1025 1035
1026 thaw_bdev(bdev, md->frozen_sb);
1027 md->frozen_sb = NULL; 1036 md->frozen_sb = NULL;
1028 bdput(bdev); 1037 md->frozen_bdev = NULL;
1029 bdput(bdev);
1030 return 0;
1031} 1038}
1032 1039
1033/* 1040/*
@@ -1041,37 +1048,37 @@ int dm_suspend(struct mapped_device *md)
1041{ 1048{
1042 struct dm_table *map; 1049 struct dm_table *map;
1043 DECLARE_WAITQUEUE(wait, current); 1050 DECLARE_WAITQUEUE(wait, current);
1051 int error = -EINVAL;
1044 1052
1045 /* Flush I/O to the device. */ 1053 /* Flush I/O to the device. */
1046 down_read(&md->lock); 1054 down_read(&md->lock);
1047 if (test_bit(DMF_BLOCK_IO, &md->flags)) { 1055 if (test_bit(DMF_BLOCK_IO, &md->flags))
1048 up_read(&md->lock); 1056 goto out_read_unlock;
1049 return -EINVAL; 1057
1050 } 1058 error = __lock_fs(md);
1059 if (error)
1060 goto out_read_unlock;
1051 1061
1052 map = dm_get_table(md); 1062 map = dm_get_table(md);
1053 if (map) 1063 if (map)
1054 dm_table_presuspend_targets(map); 1064 dm_table_presuspend_targets(map);
1055 __lock_fs(md);
1056 1065
1057 up_read(&md->lock); 1066 up_read(&md->lock);
1058 1067
1059 /* 1068 /*
1060 * First we set the BLOCK_IO flag so no more ios will be 1069 * First we set the BLOCK_IO flag so no more ios will be mapped.
1061 * mapped. 1070 *
1071 * If the flag is already set we know another thread is trying to
1072 * suspend as well, so we leave the fs locked for this thread.
1062 */ 1073 */
1074 error = -EINVAL;
1063 down_write(&md->lock); 1075 down_write(&md->lock);
1064 if (test_bit(DMF_BLOCK_IO, &md->flags)) { 1076 if (test_and_set_bit(DMF_BLOCK_IO, &md->flags)) {
1065 /* 1077 if (map)
1066 * If we get here we know another thread is 1078 dm_table_put(map);
1067 * trying to suspend as well, so we leave the fs 1079 goto out_write_unlock;
1068 * locked for this thread.
1069 */
1070 up_write(&md->lock);
1071 return -EINVAL;
1072 } 1080 }
1073 1081
1074 set_bit(DMF_BLOCK_IO, &md->flags);
1075 add_wait_queue(&md->wait, &wait); 1082 add_wait_queue(&md->wait, &wait);
1076 up_write(&md->lock); 1083 up_write(&md->lock);
1077 1084
@@ -1099,12 +1106,9 @@ int dm_suspend(struct mapped_device *md)
1099 remove_wait_queue(&md->wait, &wait); 1106 remove_wait_queue(&md->wait, &wait);
1100 1107
1101 /* were we interrupted ? */ 1108 /* were we interrupted ? */
1102 if (atomic_read(&md->pending)) { 1109 error = -EINTR;
1103 __unlock_fs(md); 1110 if (atomic_read(&md->pending))
1104 clear_bit(DMF_BLOCK_IO, &md->flags); 1111 goto out_unfreeze;
1105 up_write(&md->lock);
1106 return -EINTR;
1107 }
1108 1112
1109 set_bit(DMF_SUSPENDED, &md->flags); 1113 set_bit(DMF_SUSPENDED, &md->flags);
1110 1114
@@ -1115,6 +1119,18 @@ int dm_suspend(struct mapped_device *md)
1115 up_write(&md->lock); 1119 up_write(&md->lock);
1116 1120
1117 return 0; 1121 return 0;
1122
1123out_unfreeze:
1124 /* FIXME Undo dm_table_presuspend_targets */
1125 __unlock_fs(md);
1126 clear_bit(DMF_BLOCK_IO, &md->flags);
1127out_write_unlock:
1128 up_write(&md->lock);
1129 return error;
1130
1131out_read_unlock:
1132 up_read(&md->lock);
1133 return error;
1118} 1134}
1119 1135
1120int dm_resume(struct mapped_device *md) 1136int dm_resume(struct mapped_device *md)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 97af857d8a88..d899204d3743 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -957,7 +957,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
957} 957}
958 958
959 959
960struct super_type super_types[] = { 960static struct super_type super_types[] = {
961 [0] = { 961 [0] = {
962 .name = "0.90.0", 962 .name = "0.90.0",
963 .owner = THIS_MODULE, 963 .owner = THIS_MODULE,
@@ -2740,7 +2740,7 @@ static struct block_device_operations md_fops =
2740 .revalidate_disk= md_revalidate, 2740 .revalidate_disk= md_revalidate,
2741}; 2741};
2742 2742
2743int md_thread(void * arg) 2743static int md_thread(void * arg)
2744{ 2744{
2745 mdk_thread_t *thread = arg; 2745 mdk_thread_t *thread = arg;
2746 2746
@@ -3232,7 +3232,7 @@ void md_handle_safemode(mddev_t *mddev)
3232} 3232}
3233 3233
3234 3234
3235DECLARE_WAIT_QUEUE_HEAD(resync_wait); 3235static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3236 3236
3237#define SYNC_MARKS 10 3237#define SYNC_MARKS 10
3238#define SYNC_MARK_STEP (3*HZ) 3238#define SYNC_MARK_STEP (3*HZ)
@@ -3575,8 +3575,8 @@ void md_check_recovery(mddev_t *mddev)
3575 } 3575 }
3576} 3576}
3577 3577
3578int md_notify_reboot(struct notifier_block *this, 3578static int md_notify_reboot(struct notifier_block *this,
3579 unsigned long code, void *x) 3579 unsigned long code, void *x)
3580{ 3580{
3581 struct list_head *tmp; 3581 struct list_head *tmp;
3582 mddev_t *mddev; 3582 mddev_t *mddev;
@@ -3599,7 +3599,7 @@ int md_notify_reboot(struct notifier_block *this,
3599 return NOTIFY_DONE; 3599 return NOTIFY_DONE;
3600} 3600}
3601 3601
3602struct notifier_block md_notifier = { 3602static struct notifier_block md_notifier = {
3603 .notifier_call = md_notify_reboot, 3603 .notifier_call = md_notify_reboot,
3604 .next = NULL, 3604 .next = NULL,
3605 .priority = INT_MAX, /* before any real devices */ 3605 .priority = INT_MAX, /* before any real devices */
@@ -3616,7 +3616,7 @@ static void md_geninit(void)
3616 p->proc_fops = &md_seq_fops; 3616 p->proc_fops = &md_seq_fops;
3617} 3617}
3618 3618
3619int __init md_init(void) 3619static int __init md_init(void)
3620{ 3620{
3621 int minor; 3621 int minor;
3622 3622
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index c9b134cd1532..4e4bfde3db5d 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -103,7 +103,8 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
103 mempool_free(mp_bh, conf->pool); 103 mempool_free(mp_bh, conf->pool);
104} 104}
105 105
106int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error) 106static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
107 int error)
107{ 108{
108 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 109 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
109 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); 110 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
@@ -355,7 +356,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
355 goto abort; 356 goto abort;
356 } 357 }
357 p->rdev = NULL; 358 p->rdev = NULL;
358 synchronize_kernel(); 359 synchronize_rcu();
359 if (atomic_read(&rdev->nr_pending)) { 360 if (atomic_read(&rdev->nr_pending)) {
360 /* lost the race, try later */ 361 /* lost the race, try later */
361 err = -EBUSY; 362 err = -EBUSY;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a389394b52f6..83380b5d6593 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -797,7 +797,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
797 goto abort; 797 goto abort;
798 } 798 }
799 p->rdev = NULL; 799 p->rdev = NULL;
800 synchronize_kernel(); 800 synchronize_rcu();
801 if (atomic_read(&rdev->nr_pending)) { 801 if (atomic_read(&rdev->nr_pending)) {
802 /* lost the race, try later */ 802 /* lost the race, try later */
803 err = -EBUSY; 803 err = -EBUSY;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b100bfe4fdca..e9dc2876a626 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -977,7 +977,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
977 goto abort; 977 goto abort;
978 } 978 }
979 p->rdev = NULL; 979 p->rdev = NULL;
980 synchronize_kernel(); 980 synchronize_rcu();
981 if (atomic_read(&rdev->nr_pending)) { 981 if (atomic_read(&rdev->nr_pending)) {
982 /* lost the race, try later */ 982 /* lost the race, try later */
983 err = -EBUSY; 983 err = -EBUSY;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 52c3a81c4aa7..e96e2a10a9c9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1873,7 +1873,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
1873 goto abort; 1873 goto abort;
1874 } 1874 }
1875 p->rdev = NULL; 1875 p->rdev = NULL;
1876 synchronize_kernel(); 1876 synchronize_rcu();
1877 if (atomic_read(&rdev->nr_pending)) { 1877 if (atomic_read(&rdev->nr_pending)) {
1878 /* lost the race, try later */ 1878 /* lost the race, try later */
1879 err = -EBUSY; 1879 err = -EBUSY;
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index 7e30ab29691a..8a33f351e092 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -2038,7 +2038,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number)
2038 goto abort; 2038 goto abort;
2039 } 2039 }
2040 p->rdev = NULL; 2040 p->rdev = NULL;
2041 synchronize_kernel(); 2041 synchronize_rcu();
2042 if (atomic_read(&rdev->nr_pending)) { 2042 if (atomic_read(&rdev->nr_pending)) {
2043 /* lost the race, try later */ 2043 /* lost the race, try later */
2044 err = -EBUSY; 2044 err = -EBUSY;