diff options
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 68 |
1 files changed, 43 insertions, 25 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 0c1b8520ef86..785806bdb248 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -63,6 +63,7 @@ struct multipath { | |||
63 | unsigned nr_priority_groups; | 63 | unsigned nr_priority_groups; |
64 | struct list_head priority_groups; | 64 | struct list_head priority_groups; |
65 | unsigned pg_init_required; /* pg_init needs calling? */ | 65 | unsigned pg_init_required; /* pg_init needs calling? */ |
66 | unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ | ||
66 | 67 | ||
67 | unsigned nr_valid_paths; /* Total number of usable paths */ | 68 | unsigned nr_valid_paths; /* Total number of usable paths */ |
68 | struct pgpath *current_pgpath; | 69 | struct pgpath *current_pgpath; |
@@ -72,7 +73,7 @@ struct multipath { | |||
72 | 73 | ||
73 | unsigned queue_io; /* Must we queue all I/O? */ | 74 | unsigned queue_io; /* Must we queue all I/O? */ |
74 | unsigned queue_if_no_path; /* Queue I/O if last path fails? */ | 75 | unsigned queue_if_no_path; /* Queue I/O if last path fails? */ |
75 | unsigned suspended; /* Has dm core suspended our I/O? */ | 76 | unsigned saved_queue_if_no_path;/* Saved state during suspension */ |
76 | 77 | ||
77 | struct work_struct process_queued_ios; | 78 | struct work_struct process_queued_ios; |
78 | struct bio_list queued_ios; | 79 | struct bio_list queued_ios; |
@@ -304,11 +305,12 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio, | |||
304 | m->queue_size--; | 305 | m->queue_size--; |
305 | 306 | ||
306 | if ((pgpath && m->queue_io) || | 307 | if ((pgpath && m->queue_io) || |
307 | (!pgpath && m->queue_if_no_path && !m->suspended)) { | 308 | (!pgpath && m->queue_if_no_path)) { |
308 | /* Queue for the daemon to resubmit */ | 309 | /* Queue for the daemon to resubmit */ |
309 | bio_list_add(&m->queued_ios, bio); | 310 | bio_list_add(&m->queued_ios, bio); |
310 | m->queue_size++; | 311 | m->queue_size++; |
311 | if (m->pg_init_required || !m->queue_io) | 312 | if ((m->pg_init_required && !m->pg_init_in_progress) || |
313 | !m->queue_io) | ||
312 | queue_work(kmultipathd, &m->process_queued_ios); | 314 | queue_work(kmultipathd, &m->process_queued_ios); |
313 | pgpath = NULL; | 315 | pgpath = NULL; |
314 | r = 0; | 316 | r = 0; |
@@ -333,8 +335,9 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path) | |||
333 | 335 | ||
334 | spin_lock_irqsave(&m->lock, flags); | 336 | spin_lock_irqsave(&m->lock, flags); |
335 | 337 | ||
338 | m->saved_queue_if_no_path = m->queue_if_no_path; | ||
336 | m->queue_if_no_path = queue_if_no_path; | 339 | m->queue_if_no_path = queue_if_no_path; |
337 | if (!m->queue_if_no_path) | 340 | if (!m->queue_if_no_path && m->queue_size) |
338 | queue_work(kmultipathd, &m->process_queued_ios); | 341 | queue_work(kmultipathd, &m->process_queued_ios); |
339 | 342 | ||
340 | spin_unlock_irqrestore(&m->lock, flags); | 343 | spin_unlock_irqrestore(&m->lock, flags); |
@@ -379,25 +382,31 @@ static void process_queued_ios(void *data) | |||
379 | { | 382 | { |
380 | struct multipath *m = (struct multipath *) data; | 383 | struct multipath *m = (struct multipath *) data; |
381 | struct hw_handler *hwh = &m->hw_handler; | 384 | struct hw_handler *hwh = &m->hw_handler; |
382 | struct pgpath *pgpath; | 385 | struct pgpath *pgpath = NULL; |
383 | unsigned init_required, must_queue = 0; | 386 | unsigned init_required = 0, must_queue = 1; |
384 | unsigned long flags; | 387 | unsigned long flags; |
385 | 388 | ||
386 | spin_lock_irqsave(&m->lock, flags); | 389 | spin_lock_irqsave(&m->lock, flags); |
387 | 390 | ||
391 | if (!m->queue_size) | ||
392 | goto out; | ||
393 | |||
388 | if (!m->current_pgpath) | 394 | if (!m->current_pgpath) |
389 | __choose_pgpath(m); | 395 | __choose_pgpath(m); |
390 | 396 | ||
391 | pgpath = m->current_pgpath; | 397 | pgpath = m->current_pgpath; |
392 | 398 | ||
393 | if ((pgpath && m->queue_io) || | 399 | if ((pgpath && !m->queue_io) || |
394 | (!pgpath && m->queue_if_no_path && !m->suspended)) | 400 | (!pgpath && !m->queue_if_no_path)) |
395 | must_queue = 1; | 401 | must_queue = 0; |
396 | 402 | ||
397 | init_required = m->pg_init_required; | 403 | if (m->pg_init_required && !m->pg_init_in_progress) { |
398 | if (init_required) | ||
399 | m->pg_init_required = 0; | 404 | m->pg_init_required = 0; |
405 | m->pg_init_in_progress = 1; | ||
406 | init_required = 1; | ||
407 | } | ||
400 | 408 | ||
409 | out: | ||
401 | spin_unlock_irqrestore(&m->lock, flags); | 410 | spin_unlock_irqrestore(&m->lock, flags); |
402 | 411 | ||
403 | if (init_required) | 412 | if (init_required) |
@@ -752,6 +761,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
752 | static void multipath_dtr(struct dm_target *ti) | 761 | static void multipath_dtr(struct dm_target *ti) |
753 | { | 762 | { |
754 | struct multipath *m = (struct multipath *) ti->private; | 763 | struct multipath *m = (struct multipath *) ti->private; |
764 | |||
765 | flush_workqueue(kmultipathd); | ||
755 | free_multipath(m); | 766 | free_multipath(m); |
756 | } | 767 | } |
757 | 768 | ||
@@ -765,6 +776,9 @@ static int multipath_map(struct dm_target *ti, struct bio *bio, | |||
765 | struct mpath_io *mpio; | 776 | struct mpath_io *mpio; |
766 | struct multipath *m = (struct multipath *) ti->private; | 777 | struct multipath *m = (struct multipath *) ti->private; |
767 | 778 | ||
779 | if (bio_barrier(bio)) | ||
780 | return -EOPNOTSUPP; | ||
781 | |||
768 | mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); | 782 | mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); |
769 | dm_bio_record(&mpio->details, bio); | 783 | dm_bio_record(&mpio->details, bio); |
770 | 784 | ||
@@ -837,7 +851,7 @@ static int reinstate_path(struct pgpath *pgpath) | |||
837 | pgpath->path.is_active = 1; | 851 | pgpath->path.is_active = 1; |
838 | 852 | ||
839 | m->current_pgpath = NULL; | 853 | m->current_pgpath = NULL; |
840 | if (!m->nr_valid_paths++) | 854 | if (!m->nr_valid_paths++ && m->queue_size) |
841 | queue_work(kmultipathd, &m->process_queued_ios); | 855 | queue_work(kmultipathd, &m->process_queued_ios); |
842 | 856 | ||
843 | queue_work(kmultipathd, &m->trigger_event); | 857 | queue_work(kmultipathd, &m->trigger_event); |
@@ -963,12 +977,13 @@ void dm_pg_init_complete(struct path *path, unsigned err_flags) | |||
963 | bypass_pg(m, pg, 1); | 977 | bypass_pg(m, pg, 1); |
964 | 978 | ||
965 | spin_lock_irqsave(&m->lock, flags); | 979 | spin_lock_irqsave(&m->lock, flags); |
966 | if (!err_flags) | 980 | if (err_flags) { |
967 | m->queue_io = 0; | ||
968 | else { | ||
969 | m->current_pgpath = NULL; | 981 | m->current_pgpath = NULL; |
970 | m->current_pg = NULL; | 982 | m->current_pg = NULL; |
971 | } | 983 | } else if (!m->pg_init_required) |
984 | m->queue_io = 0; | ||
985 | |||
986 | m->pg_init_in_progress = 0; | ||
972 | queue_work(kmultipathd, &m->process_queued_ios); | 987 | queue_work(kmultipathd, &m->process_queued_ios); |
973 | spin_unlock_irqrestore(&m->lock, flags); | 988 | spin_unlock_irqrestore(&m->lock, flags); |
974 | } | 989 | } |
@@ -988,9 +1003,12 @@ static int do_end_io(struct multipath *m, struct bio *bio, | |||
988 | if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) | 1003 | if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) |
989 | return error; | 1004 | return error; |
990 | 1005 | ||
1006 | if (error == -EOPNOTSUPP) | ||
1007 | return error; | ||
1008 | |||
991 | spin_lock(&m->lock); | 1009 | spin_lock(&m->lock); |
992 | if (!m->nr_valid_paths) { | 1010 | if (!m->nr_valid_paths) { |
993 | if (!m->queue_if_no_path || m->suspended) { | 1011 | if (!m->queue_if_no_path) { |
994 | spin_unlock(&m->lock); | 1012 | spin_unlock(&m->lock); |
995 | return -EIO; | 1013 | return -EIO; |
996 | } else { | 1014 | } else { |
@@ -1051,27 +1069,27 @@ static int multipath_end_io(struct dm_target *ti, struct bio *bio, | |||
1051 | 1069 | ||
1052 | /* | 1070 | /* |
1053 | * Suspend can't complete until all the I/O is processed so if | 1071 | * Suspend can't complete until all the I/O is processed so if |
1054 | * the last path failed we will now error any queued I/O. | 1072 | * the last path fails we must error any remaining I/O. |
1073 | * Note that if the freeze_bdev fails while suspending, the | ||
1074 | * queue_if_no_path state is lost - userspace should reset it. | ||
1055 | */ | 1075 | */ |
1056 | static void multipath_presuspend(struct dm_target *ti) | 1076 | static void multipath_presuspend(struct dm_target *ti) |
1057 | { | 1077 | { |
1058 | struct multipath *m = (struct multipath *) ti->private; | 1078 | struct multipath *m = (struct multipath *) ti->private; |
1059 | unsigned long flags; | ||
1060 | 1079 | ||
1061 | spin_lock_irqsave(&m->lock, flags); | 1080 | queue_if_no_path(m, 0); |
1062 | m->suspended = 1; | ||
1063 | if (m->queue_if_no_path) | ||
1064 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1065 | spin_unlock_irqrestore(&m->lock, flags); | ||
1066 | } | 1081 | } |
1067 | 1082 | ||
1083 | /* | ||
1084 | * Restore the queue_if_no_path setting. | ||
1085 | */ | ||
1068 | static void multipath_resume(struct dm_target *ti) | 1086 | static void multipath_resume(struct dm_target *ti) |
1069 | { | 1087 | { |
1070 | struct multipath *m = (struct multipath *) ti->private; | 1088 | struct multipath *m = (struct multipath *) ti->private; |
1071 | unsigned long flags; | 1089 | unsigned long flags; |
1072 | 1090 | ||
1073 | spin_lock_irqsave(&m->lock, flags); | 1091 | spin_lock_irqsave(&m->lock, flags); |
1074 | m->suspended = 0; | 1092 | m->queue_if_no_path = m->saved_queue_if_no_path; |
1075 | spin_unlock_irqrestore(&m->lock, flags); | 1093 | spin_unlock_irqrestore(&m->lock, flags); |
1076 | } | 1094 | } |
1077 | 1095 | ||