diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-mpath.c | 63 |
1 files changed, 26 insertions, 37 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 545abcc25c42..838f01b1dd30 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -35,6 +35,7 @@ struct pgpath { | |||
35 | 35 | ||
36 | struct dm_path path; | 36 | struct dm_path path; |
37 | struct work_struct deactivate_path; | 37 | struct work_struct deactivate_path; |
38 | struct work_struct activate_path; | ||
38 | }; | 39 | }; |
39 | 40 | ||
40 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) | 41 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) |
@@ -64,8 +65,6 @@ struct multipath { | |||
64 | spinlock_t lock; | 65 | spinlock_t lock; |
65 | 66 | ||
66 | const char *hw_handler_name; | 67 | const char *hw_handler_name; |
67 | struct work_struct activate_path; | ||
68 | struct pgpath *pgpath_to_activate; | ||
69 | unsigned nr_priority_groups; | 68 | unsigned nr_priority_groups; |
70 | struct list_head priority_groups; | 69 | struct list_head priority_groups; |
71 | unsigned pg_init_required; /* pg_init needs calling? */ | 70 | unsigned pg_init_required; /* pg_init needs calling? */ |
@@ -128,6 +127,7 @@ static struct pgpath *alloc_pgpath(void) | |||
128 | if (pgpath) { | 127 | if (pgpath) { |
129 | pgpath->is_active = 1; | 128 | pgpath->is_active = 1; |
130 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); | 129 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); |
130 | INIT_WORK(&pgpath->activate_path, activate_path); | ||
131 | } | 131 | } |
132 | 132 | ||
133 | return pgpath; | 133 | return pgpath; |
@@ -160,7 +160,6 @@ static struct priority_group *alloc_priority_group(void) | |||
160 | 160 | ||
161 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | 161 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) |
162 | { | 162 | { |
163 | unsigned long flags; | ||
164 | struct pgpath *pgpath, *tmp; | 163 | struct pgpath *pgpath, *tmp; |
165 | struct multipath *m = ti->private; | 164 | struct multipath *m = ti->private; |
166 | 165 | ||
@@ -169,10 +168,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | |||
169 | if (m->hw_handler_name) | 168 | if (m->hw_handler_name) |
170 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); | 169 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); |
171 | dm_put_device(ti, pgpath->path.dev); | 170 | dm_put_device(ti, pgpath->path.dev); |
172 | spin_lock_irqsave(&m->lock, flags); | ||
173 | if (m->pgpath_to_activate == pgpath) | ||
174 | m->pgpath_to_activate = NULL; | ||
175 | spin_unlock_irqrestore(&m->lock, flags); | ||
176 | free_pgpath(pgpath); | 171 | free_pgpath(pgpath); |
177 | } | 172 | } |
178 | } | 173 | } |
@@ -202,7 +197,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
202 | m->queue_io = 1; | 197 | m->queue_io = 1; |
203 | INIT_WORK(&m->process_queued_ios, process_queued_ios); | 198 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
204 | INIT_WORK(&m->trigger_event, trigger_event); | 199 | INIT_WORK(&m->trigger_event, trigger_event); |
205 | INIT_WORK(&m->activate_path, activate_path); | ||
206 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 200 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
207 | if (!m->mpio_pool) { | 201 | if (!m->mpio_pool) { |
208 | kfree(m); | 202 | kfree(m); |
@@ -427,8 +421,8 @@ static void process_queued_ios(struct work_struct *work) | |||
427 | { | 421 | { |
428 | struct multipath *m = | 422 | struct multipath *m = |
429 | container_of(work, struct multipath, process_queued_ios); | 423 | container_of(work, struct multipath, process_queued_ios); |
430 | struct pgpath *pgpath = NULL; | 424 | struct pgpath *pgpath = NULL, *tmp; |
431 | unsigned init_required = 0, must_queue = 1; | 425 | unsigned must_queue = 1; |
432 | unsigned long flags; | 426 | unsigned long flags; |
433 | 427 | ||
434 | spin_lock_irqsave(&m->lock, flags); | 428 | spin_lock_irqsave(&m->lock, flags); |
@@ -446,19 +440,15 @@ static void process_queued_ios(struct work_struct *work) | |||
446 | must_queue = 0; | 440 | must_queue = 0; |
447 | 441 | ||
448 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { | 442 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { |
449 | m->pgpath_to_activate = pgpath; | ||
450 | m->pg_init_count++; | 443 | m->pg_init_count++; |
451 | m->pg_init_required = 0; | 444 | m->pg_init_required = 0; |
452 | m->pg_init_in_progress = 1; | 445 | list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) { |
453 | init_required = 1; | 446 | if (queue_work(kmpath_handlerd, &tmp->activate_path)) |
447 | m->pg_init_in_progress++; | ||
448 | } | ||
454 | } | 449 | } |
455 | |||
456 | out: | 450 | out: |
457 | spin_unlock_irqrestore(&m->lock, flags); | 451 | spin_unlock_irqrestore(&m->lock, flags); |
458 | |||
459 | if (init_required) | ||
460 | queue_work(kmpath_handlerd, &m->activate_path); | ||
461 | |||
462 | if (!must_queue) | 452 | if (!must_queue) |
463 | dispatch_queued_ios(m); | 453 | dispatch_queued_ios(m); |
464 | } | 454 | } |
@@ -946,9 +936,13 @@ static int reinstate_path(struct pgpath *pgpath) | |||
946 | 936 | ||
947 | pgpath->is_active = 1; | 937 | pgpath->is_active = 1; |
948 | 938 | ||
949 | m->current_pgpath = NULL; | 939 | if (!m->nr_valid_paths++ && m->queue_size) { |
950 | if (!m->nr_valid_paths++ && m->queue_size) | 940 | m->current_pgpath = NULL; |
951 | queue_work(kmultipathd, &m->process_queued_ios); | 941 | queue_work(kmultipathd, &m->process_queued_ios); |
942 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { | ||
943 | if (queue_work(kmpath_handlerd, &pgpath->activate_path)) | ||
944 | m->pg_init_in_progress++; | ||
945 | } | ||
952 | 946 | ||
953 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, | 947 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, |
954 | pgpath->path.dev->name, m->nr_valid_paths); | 948 | pgpath->path.dev->name, m->nr_valid_paths); |
@@ -1124,35 +1118,30 @@ static void pg_init_done(struct dm_path *path, int errors) | |||
1124 | 1118 | ||
1125 | spin_lock_irqsave(&m->lock, flags); | 1119 | spin_lock_irqsave(&m->lock, flags); |
1126 | if (errors) { | 1120 | if (errors) { |
1127 | DMERR("Could not failover device. Error %d.", errors); | 1121 | if (pgpath == m->current_pgpath) { |
1128 | m->current_pgpath = NULL; | 1122 | DMERR("Could not failover device. Error %d.", errors); |
1129 | m->current_pg = NULL; | 1123 | m->current_pgpath = NULL; |
1124 | m->current_pg = NULL; | ||
1125 | } | ||
1130 | } else if (!m->pg_init_required) { | 1126 | } else if (!m->pg_init_required) { |
1131 | m->queue_io = 0; | 1127 | m->queue_io = 0; |
1132 | pg->bypassed = 0; | 1128 | pg->bypassed = 0; |
1133 | } | 1129 | } |
1134 | 1130 | ||
1135 | m->pg_init_in_progress = 0; | 1131 | m->pg_init_in_progress--; |
1136 | queue_work(kmultipathd, &m->process_queued_ios); | 1132 | if (!m->pg_init_in_progress) |
1133 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1137 | spin_unlock_irqrestore(&m->lock, flags); | 1134 | spin_unlock_irqrestore(&m->lock, flags); |
1138 | } | 1135 | } |
1139 | 1136 | ||
1140 | static void activate_path(struct work_struct *work) | 1137 | static void activate_path(struct work_struct *work) |
1141 | { | 1138 | { |
1142 | int ret; | 1139 | int ret; |
1143 | struct multipath *m = | 1140 | struct pgpath *pgpath = |
1144 | container_of(work, struct multipath, activate_path); | 1141 | container_of(work, struct pgpath, activate_path); |
1145 | struct dm_path *path; | ||
1146 | unsigned long flags; | ||
1147 | 1142 | ||
1148 | spin_lock_irqsave(&m->lock, flags); | 1143 | ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev)); |
1149 | path = &m->pgpath_to_activate->path; | 1144 | pg_init_done(&pgpath->path, ret); |
1150 | m->pgpath_to_activate = NULL; | ||
1151 | spin_unlock_irqrestore(&m->lock, flags); | ||
1152 | if (!path) | ||
1153 | return; | ||
1154 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | ||
1155 | pg_init_done(path, ret); | ||
1156 | } | 1145 | } |
1157 | 1146 | ||
1158 | /* | 1147 | /* |