diff options
author | Alasdair G Kergon <agk@redhat.com> | 2005-05-05 19:16:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-05 19:36:46 -0400 |
commit | c557308e1f4b6f7d6144a06e8f98c467814f3aed (patch) | |
tree | fe786dde1aea83e435fd86e75b2ee81238a8ae14 /drivers | |
parent | b84b0287a8ba618568a8bc9ac8847ac332abe90d (diff) |
[PATCH] device-mapper multipath: Use private workqueue
dm-mpath.c needs to use a private workqueue (like other dm targets already do)
to avoid interfering with users of the default workqueue.
Signed-Off-By: Alasdair G Kergon <agk@redhat.com>
Acked-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Lars Marowsky-Bree <lmb@suse.de>
Signed-off-by: <mikenc@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-mpath.c | 32 |
1 files changed, 22 insertions, 10 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 43763a0bd096..1e97b3c12bd5 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -101,6 +101,7 @@ typedef int (*action_fn) (struct pgpath *pgpath); | |||
101 | 101 | ||
102 | static kmem_cache_t *_mpio_cache; | 102 | static kmem_cache_t *_mpio_cache; |
103 | 103 | ||
104 | struct workqueue_struct *kmultipathd; | ||
104 | static void process_queued_ios(void *data); | 105 | static void process_queued_ios(void *data); |
105 | static void trigger_event(void *data); | 106 | static void trigger_event(void *data); |
106 | 107 | ||
@@ -308,7 +309,7 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio, | |||
308 | bio_list_add(&m->queued_ios, bio); | 309 | bio_list_add(&m->queued_ios, bio); |
309 | m->queue_size++; | 310 | m->queue_size++; |
310 | if (m->pg_init_required || !m->queue_io) | 311 | if (m->pg_init_required || !m->queue_io) |
311 | schedule_work(&m->process_queued_ios); | 312 | queue_work(kmultipathd, &m->process_queued_ios); |
312 | pgpath = NULL; | 313 | pgpath = NULL; |
313 | r = 0; | 314 | r = 0; |
314 | } else if (!pgpath) | 315 | } else if (!pgpath) |
@@ -334,7 +335,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path) | |||
334 | 335 | ||
335 | m->queue_if_no_path = queue_if_no_path; | 336 | m->queue_if_no_path = queue_if_no_path; |
336 | if (!m->queue_if_no_path) | 337 | if (!m->queue_if_no_path) |
337 | schedule_work(&m->process_queued_ios); | 338 | queue_work(kmultipathd, &m->process_queued_ios); |
338 | 339 | ||
339 | spin_unlock_irqrestore(&m->lock, flags); | 340 | spin_unlock_irqrestore(&m->lock, flags); |
340 | 341 | ||
@@ -800,7 +801,7 @@ static int fail_path(struct pgpath *pgpath) | |||
800 | if (pgpath == m->current_pgpath) | 801 | if (pgpath == m->current_pgpath) |
801 | m->current_pgpath = NULL; | 802 | m->current_pgpath = NULL; |
802 | 803 | ||
803 | schedule_work(&m->trigger_event); | 804 | queue_work(kmultipathd, &m->trigger_event); |
804 | 805 | ||
805 | out: | 806 | out: |
806 | spin_unlock_irqrestore(&m->lock, flags); | 807 | spin_unlock_irqrestore(&m->lock, flags); |
@@ -837,9 +838,9 @@ static int reinstate_path(struct pgpath *pgpath) | |||
837 | 838 | ||
838 | m->current_pgpath = NULL; | 839 | m->current_pgpath = NULL; |
839 | if (!m->nr_valid_paths++) | 840 | if (!m->nr_valid_paths++) |
840 | schedule_work(&m->process_queued_ios); | 841 | queue_work(kmultipathd, &m->process_queued_ios); |
841 | 842 | ||
842 | schedule_work(&m->trigger_event); | 843 | queue_work(kmultipathd, &m->trigger_event); |
843 | 844 | ||
844 | out: | 845 | out: |
845 | spin_unlock_irqrestore(&m->lock, flags); | 846 | spin_unlock_irqrestore(&m->lock, flags); |
@@ -883,7 +884,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg, | |||
883 | 884 | ||
884 | spin_unlock_irqrestore(&m->lock, flags); | 885 | spin_unlock_irqrestore(&m->lock, flags); |
885 | 886 | ||
886 | schedule_work(&m->trigger_event); | 887 | queue_work(kmultipathd, &m->trigger_event); |
887 | } | 888 | } |
888 | 889 | ||
889 | /* | 890 | /* |
@@ -913,7 +914,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr) | |||
913 | } | 914 | } |
914 | spin_unlock_irqrestore(&m->lock, flags); | 915 | spin_unlock_irqrestore(&m->lock, flags); |
915 | 916 | ||
916 | schedule_work(&m->trigger_event); | 917 | queue_work(kmultipathd, &m->trigger_event); |
917 | return 0; | 918 | return 0; |
918 | } | 919 | } |
919 | 920 | ||
@@ -968,7 +969,7 @@ void dm_pg_init_complete(struct path *path, unsigned err_flags) | |||
968 | m->current_pgpath = NULL; | 969 | m->current_pgpath = NULL; |
969 | m->current_pg = NULL; | 970 | m->current_pg = NULL; |
970 | } | 971 | } |
971 | schedule_work(&m->process_queued_ios); | 972 | queue_work(kmultipathd, &m->process_queued_ios); |
972 | spin_unlock_irqrestore(&m->lock, flags); | 973 | spin_unlock_irqrestore(&m->lock, flags); |
973 | } | 974 | } |
974 | 975 | ||
@@ -1018,7 +1019,7 @@ static int do_end_io(struct multipath *m, struct bio *bio, | |||
1018 | bio_list_add(&m->queued_ios, bio); | 1019 | bio_list_add(&m->queued_ios, bio); |
1019 | m->queue_size++; | 1020 | m->queue_size++; |
1020 | if (!m->queue_io) | 1021 | if (!m->queue_io) |
1021 | schedule_work(&m->process_queued_ios); | 1022 | queue_work(kmultipathd, &m->process_queued_ios); |
1022 | spin_unlock(&m->lock); | 1023 | spin_unlock(&m->lock); |
1023 | 1024 | ||
1024 | return 1; /* io not complete */ | 1025 | return 1; /* io not complete */ |
@@ -1057,7 +1058,7 @@ static void multipath_presuspend(struct dm_target *ti) | |||
1057 | spin_lock_irqsave(&m->lock, flags); | 1058 | spin_lock_irqsave(&m->lock, flags); |
1058 | m->suspended = 1; | 1059 | m->suspended = 1; |
1059 | if (m->queue_if_no_path) | 1060 | if (m->queue_if_no_path) |
1060 | schedule_work(&m->process_queued_ios); | 1061 | queue_work(kmultipathd, &m->process_queued_ios); |
1061 | spin_unlock_irqrestore(&m->lock, flags); | 1062 | spin_unlock_irqrestore(&m->lock, flags); |
1062 | } | 1063 | } |
1063 | 1064 | ||
@@ -1274,6 +1275,15 @@ static int __init dm_multipath_init(void) | |||
1274 | return -EINVAL; | 1275 | return -EINVAL; |
1275 | } | 1276 | } |
1276 | 1277 | ||
1278 | kmultipathd = create_workqueue("kmpathd"); | ||
1279 | if (!kmultipathd) { | ||
1280 | DMERR("%s: failed to create workqueue kmpathd", | ||
1281 | multipath_target.name); | ||
1282 | dm_unregister_target(&multipath_target); | ||
1283 | kmem_cache_destroy(_mpio_cache); | ||
1284 | return -ENOMEM; | ||
1285 | } | ||
1286 | |||
1277 | DMINFO("dm-multipath version %u.%u.%u loaded", | 1287 | DMINFO("dm-multipath version %u.%u.%u loaded", |
1278 | multipath_target.version[0], multipath_target.version[1], | 1288 | multipath_target.version[0], multipath_target.version[1], |
1279 | multipath_target.version[2]); | 1289 | multipath_target.version[2]); |
@@ -1285,6 +1295,8 @@ static void __exit dm_multipath_exit(void) | |||
1285 | { | 1295 | { |
1286 | int r; | 1296 | int r; |
1287 | 1297 | ||
1298 | destroy_workqueue(kmultipathd); | ||
1299 | |||
1288 | r = dm_unregister_target(&multipath_target); | 1300 | r = dm_unregister_target(&multipath_target); |
1289 | if (r < 0) | 1301 | if (r < 0) |
1290 | DMERR("%s: target unregister failed %d", | 1302 | DMERR("%s: target unregister failed %d", |