diff options
author | Alasdair G Kergon <agk@redhat.com> | 2005-07-12 18:53:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-12 19:19:10 -0400 |
commit | 436d41087d047b61f8ab0604dc74fff3240a8933 (patch) | |
tree | 46bab3a0cf02a7514b4faa5470a76326e6f8bc72 /drivers | |
parent | a044d016896d2717694003f00d31a98194077511 (diff) |
[PATCH] device-mapper multipath: Avoid possible suspension deadlock
To avoid deadlock when suspending a multipath device after all its paths have
failed, stop queueing any I/O that is about to fail *before* calling
freeze_bdev instead of after.
Instead of setting a multipath 'suspended' flag which would have to be reset
if an error occurs during the process, save the previous queueing state and
leave userspace to restore if it wishes.
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-mpath.c | 25 | ||||
-rw-r--r-- | drivers/md/dm.c | 12 |
2 files changed, 20 insertions, 17 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index fa72f0153206..98da8eee2d2c 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -72,7 +72,7 @@ struct multipath { | |||
72 | 72 | ||
73 | unsigned queue_io; /* Must we queue all I/O? */ | 73 | unsigned queue_io; /* Must we queue all I/O? */ |
74 | unsigned queue_if_no_path; /* Queue I/O if last path fails? */ | 74 | unsigned queue_if_no_path; /* Queue I/O if last path fails? */ |
75 | unsigned suspended; /* Has dm core suspended our I/O? */ | 75 | unsigned saved_queue_if_no_path;/* Saved state during suspension */ |
76 | 76 | ||
77 | struct work_struct process_queued_ios; | 77 | struct work_struct process_queued_ios; |
78 | struct bio_list queued_ios; | 78 | struct bio_list queued_ios; |
@@ -304,7 +304,7 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio, | |||
304 | m->queue_size--; | 304 | m->queue_size--; |
305 | 305 | ||
306 | if ((pgpath && m->queue_io) || | 306 | if ((pgpath && m->queue_io) || |
307 | (!pgpath && m->queue_if_no_path && !m->suspended)) { | 307 | (!pgpath && m->queue_if_no_path)) { |
308 | /* Queue for the daemon to resubmit */ | 308 | /* Queue for the daemon to resubmit */ |
309 | bio_list_add(&m->queued_ios, bio); | 309 | bio_list_add(&m->queued_ios, bio); |
310 | m->queue_size++; | 310 | m->queue_size++; |
@@ -333,6 +333,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path) | |||
333 | 333 | ||
334 | spin_lock_irqsave(&m->lock, flags); | 334 | spin_lock_irqsave(&m->lock, flags); |
335 | 335 | ||
336 | m->saved_queue_if_no_path = m->queue_if_no_path; | ||
336 | m->queue_if_no_path = queue_if_no_path; | 337 | m->queue_if_no_path = queue_if_no_path; |
337 | if (!m->queue_if_no_path) | 338 | if (!m->queue_if_no_path) |
338 | queue_work(kmultipathd, &m->process_queued_ios); | 339 | queue_work(kmultipathd, &m->process_queued_ios); |
@@ -391,7 +392,7 @@ static void process_queued_ios(void *data) | |||
391 | pgpath = m->current_pgpath; | 392 | pgpath = m->current_pgpath; |
392 | 393 | ||
393 | if ((pgpath && m->queue_io) || | 394 | if ((pgpath && m->queue_io) || |
394 | (!pgpath && m->queue_if_no_path && !m->suspended)) | 395 | (!pgpath && m->queue_if_no_path)) |
395 | must_queue = 1; | 396 | must_queue = 1; |
396 | 397 | ||
397 | init_required = m->pg_init_required; | 398 | init_required = m->pg_init_required; |
@@ -998,7 +999,7 @@ static int do_end_io(struct multipath *m, struct bio *bio, | |||
998 | 999 | ||
999 | spin_lock(&m->lock); | 1000 | spin_lock(&m->lock); |
1000 | if (!m->nr_valid_paths) { | 1001 | if (!m->nr_valid_paths) { |
1001 | if (!m->queue_if_no_path || m->suspended) { | 1002 | if (!m->queue_if_no_path) { |
1002 | spin_unlock(&m->lock); | 1003 | spin_unlock(&m->lock); |
1003 | return -EIO; | 1004 | return -EIO; |
1004 | } else { | 1005 | } else { |
@@ -1059,27 +1060,27 @@ static int multipath_end_io(struct dm_target *ti, struct bio *bio, | |||
1059 | 1060 | ||
1060 | /* | 1061 | /* |
1061 | * Suspend can't complete until all the I/O is processed so if | 1062 | * Suspend can't complete until all the I/O is processed so if |
1062 | * the last path failed we will now error any queued I/O. | 1063 | * the last path fails we must error any remaining I/O. |
1064 | * Note that if the freeze_bdev fails while suspending, the | ||
1065 | * queue_if_no_path state is lost - userspace should reset it. | ||
1063 | */ | 1066 | */ |
1064 | static void multipath_presuspend(struct dm_target *ti) | 1067 | static void multipath_presuspend(struct dm_target *ti) |
1065 | { | 1068 | { |
1066 | struct multipath *m = (struct multipath *) ti->private; | 1069 | struct multipath *m = (struct multipath *) ti->private; |
1067 | unsigned long flags; | ||
1068 | 1070 | ||
1069 | spin_lock_irqsave(&m->lock, flags); | 1071 | queue_if_no_path(m, 0); |
1070 | m->suspended = 1; | ||
1071 | if (m->queue_if_no_path) | ||
1072 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1073 | spin_unlock_irqrestore(&m->lock, flags); | ||
1074 | } | 1072 | } |
1075 | 1073 | ||
1074 | /* | ||
1075 | * Restore the queue_if_no_path setting. | ||
1076 | */ | ||
1076 | static void multipath_resume(struct dm_target *ti) | 1077 | static void multipath_resume(struct dm_target *ti) |
1077 | { | 1078 | { |
1078 | struct multipath *m = (struct multipath *) ti->private; | 1079 | struct multipath *m = (struct multipath *) ti->private; |
1079 | unsigned long flags; | 1080 | unsigned long flags; |
1080 | 1081 | ||
1081 | spin_lock_irqsave(&m->lock, flags); | 1082 | spin_lock_irqsave(&m->lock, flags); |
1082 | m->suspended = 0; | 1083 | m->queue_if_no_path = m->saved_queue_if_no_path; |
1083 | spin_unlock_irqrestore(&m->lock, flags); | 1084 | spin_unlock_irqrestore(&m->lock, flags); |
1084 | } | 1085 | } |
1085 | 1086 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5d40555b42ba..bb3ad79c14d7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1055,14 +1055,17 @@ int dm_suspend(struct mapped_device *md) | |||
1055 | if (test_bit(DMF_BLOCK_IO, &md->flags)) | 1055 | if (test_bit(DMF_BLOCK_IO, &md->flags)) |
1056 | goto out_read_unlock; | 1056 | goto out_read_unlock; |
1057 | 1057 | ||
1058 | error = __lock_fs(md); | ||
1059 | if (error) | ||
1060 | goto out_read_unlock; | ||
1061 | |||
1062 | map = dm_get_table(md); | 1058 | map = dm_get_table(md); |
1063 | if (map) | 1059 | if (map) |
1060 | /* This does not get reverted if there's an error later. */ | ||
1064 | dm_table_presuspend_targets(map); | 1061 | dm_table_presuspend_targets(map); |
1065 | 1062 | ||
1063 | error = __lock_fs(md); | ||
1064 | if (error) { | ||
1065 | dm_table_put(map); | ||
1066 | goto out_read_unlock; | ||
1067 | } | ||
1068 | |||
1066 | up_read(&md->lock); | 1069 | up_read(&md->lock); |
1067 | 1070 | ||
1068 | /* | 1071 | /* |
@@ -1121,7 +1124,6 @@ int dm_suspend(struct mapped_device *md) | |||
1121 | return 0; | 1124 | return 0; |
1122 | 1125 | ||
1123 | out_unfreeze: | 1126 | out_unfreeze: |
1124 | /* FIXME Undo dm_table_presuspend_targets */ | ||
1125 | __unlock_fs(md); | 1127 | __unlock_fs(md); |
1126 | clear_bit(DMF_BLOCK_IO, &md->flags); | 1128 | clear_bit(DMF_BLOCK_IO, &md->flags); |
1127 | out_write_unlock: | 1129 | out_write_unlock: |