diff options
author | Alasdair G Kergon <agk@redhat.com> | 2009-01-05 22:05:13 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-01-05 22:05:13 -0500 |
commit | fe9cf30eb8186ef267d1868dc9f12f2d0f40835a (patch) | |
tree | 357db984073d7362b6c31dd431f77768c65800bf /drivers/md/dm-mpath.c | |
parent | 784aae735d9b0bba3f8b9faef4c8b30df3bf0128 (diff) |
dm mpath: move trigger_event to system workqueue
The same workqueue is used both for sending uevents and processing queued I/O.
Deadlock has been reported in RHEL5 when sending a uevent was blocked waiting
for the queued I/O to be processed. Use scheduled_work() for the asynchronous
uevents instead.
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 345a26047ae0..095f77bf9681 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -889,7 +889,7 @@ static int fail_path(struct pgpath *pgpath) | |||
889 | dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, | 889 | dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, |
890 | pgpath->path.dev->name, m->nr_valid_paths); | 890 | pgpath->path.dev->name, m->nr_valid_paths); |
891 | 891 | ||
892 | queue_work(kmultipathd, &m->trigger_event); | 892 | schedule_work(&m->trigger_event); |
893 | queue_work(kmultipathd, &pgpath->deactivate_path); | 893 | queue_work(kmultipathd, &pgpath->deactivate_path); |
894 | 894 | ||
895 | out: | 895 | out: |
@@ -932,7 +932,7 @@ static int reinstate_path(struct pgpath *pgpath) | |||
932 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, | 932 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, |
933 | pgpath->path.dev->name, m->nr_valid_paths); | 933 | pgpath->path.dev->name, m->nr_valid_paths); |
934 | 934 | ||
935 | queue_work(kmultipathd, &m->trigger_event); | 935 | schedule_work(&m->trigger_event); |
936 | 936 | ||
937 | out: | 937 | out: |
938 | spin_unlock_irqrestore(&m->lock, flags); | 938 | spin_unlock_irqrestore(&m->lock, flags); |
@@ -976,7 +976,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg, | |||
976 | 976 | ||
977 | spin_unlock_irqrestore(&m->lock, flags); | 977 | spin_unlock_irqrestore(&m->lock, flags); |
978 | 978 | ||
979 | queue_work(kmultipathd, &m->trigger_event); | 979 | schedule_work(&m->trigger_event); |
980 | } | 980 | } |
981 | 981 | ||
982 | /* | 982 | /* |
@@ -1006,7 +1006,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr) | |||
1006 | } | 1006 | } |
1007 | spin_unlock_irqrestore(&m->lock, flags); | 1007 | spin_unlock_irqrestore(&m->lock, flags); |
1008 | 1008 | ||
1009 | queue_work(kmultipathd, &m->trigger_event); | 1009 | schedule_work(&m->trigger_event); |
1010 | return 0; | 1010 | return 0; |
1011 | } | 1011 | } |
1012 | 1012 | ||