diff options
author | Hannes Reinecke <hare@suse.de> | 2014-02-28 09:33:45 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-03-27 16:56:24 -0400 |
commit | 3e9f1be1b4079bfe689ef6be5174f3177b3fd2aa (patch) | |
tree | 7f7279aa21963b65f95a60ca45aec93190526172 /drivers/md/dm-mpath.c | |
parent | e809917735ebf1b9a56c24e877ce0d320baee2ec (diff) |
dm mpath: remove process_queued_ios()
process_queued_ios() has served 3 functions:
1) select pg and pgpath if none is selected
2) start pg_init if requested
3) dispatch queued IOs when pg is ready
Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).
Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.
If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().
If pg_init is requested, we have to make sure the request is detected
and pg_init will be started. pg_init is requested in 3 places:
a) __choose_pgpath() in map_io()
b) __choose_pgpath() in multipath_ioctl()
c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).
So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 69 |
1 files changed, 27 insertions, 42 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e1c3ed31c9df..1c6a3f8da24d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -93,8 +93,6 @@ struct multipath { | |||
93 | unsigned pg_init_count; /* Number of times pg_init called */ | 93 | unsigned pg_init_count; /* Number of times pg_init called */ |
94 | unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ | 94 | unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ |
95 | 95 | ||
96 | struct work_struct process_queued_ios; | ||
97 | |||
98 | struct work_struct trigger_event; | 96 | struct work_struct trigger_event; |
99 | 97 | ||
100 | /* | 98 | /* |
@@ -119,7 +117,6 @@ typedef int (*action_fn) (struct pgpath *pgpath); | |||
119 | static struct kmem_cache *_mpio_cache; | 117 | static struct kmem_cache *_mpio_cache; |
120 | 118 | ||
121 | static struct workqueue_struct *kmultipathd, *kmpath_handlerd; | 119 | static struct workqueue_struct *kmultipathd, *kmpath_handlerd; |
122 | static void process_queued_ios(struct work_struct *work); | ||
123 | static void trigger_event(struct work_struct *work); | 120 | static void trigger_event(struct work_struct *work); |
124 | static void activate_path(struct work_struct *work); | 121 | static void activate_path(struct work_struct *work); |
125 | static int __pgpath_busy(struct pgpath *pgpath); | 122 | static int __pgpath_busy(struct pgpath *pgpath); |
@@ -197,7 +194,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
197 | spin_lock_init(&m->lock); | 194 | spin_lock_init(&m->lock); |
198 | m->queue_io = 1; | 195 | m->queue_io = 1; |
199 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | 196 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; |
200 | INIT_WORK(&m->process_queued_ios, process_queued_ios); | ||
201 | INIT_WORK(&m->trigger_event, trigger_event); | 197 | INIT_WORK(&m->trigger_event, trigger_event); |
202 | init_waitqueue_head(&m->pg_init_wait); | 198 | init_waitqueue_head(&m->pg_init_wait); |
203 | mutex_init(&m->work_mutex); | 199 | mutex_init(&m->work_mutex); |
@@ -254,16 +250,21 @@ static void clear_mapinfo(struct multipath *m, union map_info *info) | |||
254 | * Path selection | 250 | * Path selection |
255 | *-----------------------------------------------*/ | 251 | *-----------------------------------------------*/ |
256 | 252 | ||
257 | static void __pg_init_all_paths(struct multipath *m) | 253 | static int __pg_init_all_paths(struct multipath *m) |
258 | { | 254 | { |
259 | struct pgpath *pgpath; | 255 | struct pgpath *pgpath; |
260 | unsigned long pg_init_delay = 0; | 256 | unsigned long pg_init_delay = 0; |
261 | 257 | ||
262 | if (m->pg_init_in_progress || m->pg_init_disabled) | 258 | if (m->pg_init_in_progress || m->pg_init_disabled) |
263 | return; | 259 | return 0; |
264 | 260 | ||
265 | m->pg_init_count++; | 261 | m->pg_init_count++; |
266 | m->pg_init_required = 0; | 262 | m->pg_init_required = 0; |
263 | |||
264 | /* Check here to reset pg_init_required */ | ||
265 | if (!m->current_pg) | ||
266 | return 0; | ||
267 | |||
267 | if (m->pg_init_delay_retry) | 268 | if (m->pg_init_delay_retry) |
268 | pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? | 269 | pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? |
269 | m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); | 270 | m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); |
@@ -275,6 +276,7 @@ static void __pg_init_all_paths(struct multipath *m) | |||
275 | pg_init_delay)) | 276 | pg_init_delay)) |
276 | m->pg_init_in_progress++; | 277 | m->pg_init_in_progress++; |
277 | } | 278 | } |
279 | return m->pg_init_in_progress; | ||
278 | } | 280 | } |
279 | 281 | ||
280 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | 282 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) |
@@ -436,40 +438,13 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, | |||
436 | m->saved_queue_if_no_path = queue_if_no_path; | 438 | m->saved_queue_if_no_path = queue_if_no_path; |
437 | m->queue_if_no_path = queue_if_no_path; | 439 | m->queue_if_no_path = queue_if_no_path; |
438 | if (!m->queue_if_no_path) | 440 | if (!m->queue_if_no_path) |
439 | queue_work(kmultipathd, &m->process_queued_ios); | 441 | dm_table_run_md_queue_async(m->ti->table); |
440 | 442 | ||
441 | spin_unlock_irqrestore(&m->lock, flags); | 443 | spin_unlock_irqrestore(&m->lock, flags); |
442 | 444 | ||
443 | return 0; | 445 | return 0; |
444 | } | 446 | } |
445 | 447 | ||
446 | static void process_queued_ios(struct work_struct *work) | ||
447 | { | ||
448 | struct multipath *m = | ||
449 | container_of(work, struct multipath, process_queued_ios); | ||
450 | struct pgpath *pgpath = NULL; | ||
451 | unsigned must_queue = 1; | ||
452 | unsigned long flags; | ||
453 | |||
454 | spin_lock_irqsave(&m->lock, flags); | ||
455 | |||
456 | if (!m->current_pgpath) | ||
457 | __choose_pgpath(m, 0); | ||
458 | |||
459 | pgpath = m->current_pgpath; | ||
460 | |||
461 | if ((pgpath && !m->queue_io) || | ||
462 | (!pgpath && !m->queue_if_no_path)) | ||
463 | must_queue = 0; | ||
464 | |||
465 | if (pgpath && m->pg_init_required) | ||
466 | __pg_init_all_paths(m); | ||
467 | |||
468 | spin_unlock_irqrestore(&m->lock, flags); | ||
469 | if (!must_queue) | ||
470 | dm_table_run_md_queue_async(m->ti->table); | ||
471 | } | ||
472 | |||
473 | /* | 448 | /* |
474 | * An event is triggered whenever a path is taken out of use. | 449 | * An event is triggered whenever a path is taken out of use. |
475 | * Includes path failure and PG bypass. | 450 | * Includes path failure and PG bypass. |
@@ -1016,7 +991,7 @@ static int reinstate_path(struct pgpath *pgpath) | |||
1016 | 991 | ||
1017 | if (!m->nr_valid_paths++) { | 992 | if (!m->nr_valid_paths++) { |
1018 | m->current_pgpath = NULL; | 993 | m->current_pgpath = NULL; |
1019 | queue_work(kmultipathd, &m->process_queued_ios); | 994 | dm_table_run_md_queue_async(m->ti->table); |
1020 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { | 995 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { |
1021 | if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) | 996 | if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) |
1022 | m->pg_init_in_progress++; | 997 | m->pg_init_in_progress++; |
@@ -1212,11 +1187,12 @@ static void pg_init_done(void *data, int errors) | |||
1212 | /* Activations of other paths are still on going */ | 1187 | /* Activations of other paths are still on going */ |
1213 | goto out; | 1188 | goto out; |
1214 | 1189 | ||
1215 | if (!m->pg_init_required) | 1190 | if (m->pg_init_required) { |
1216 | m->queue_io = 0; | 1191 | m->pg_init_delay_retry = delay_retry; |
1217 | 1192 | if (__pg_init_all_paths(m)) | |
1218 | m->pg_init_delay_retry = delay_retry; | 1193 | goto out; |
1219 | queue_work(kmultipathd, &m->process_queued_ios); | 1194 | } |
1195 | m->queue_io = 0; | ||
1220 | 1196 | ||
1221 | /* | 1197 | /* |
1222 | * Wake up any thread waiting to suspend. | 1198 | * Wake up any thread waiting to suspend. |
@@ -1592,8 +1568,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | |||
1592 | r = err; | 1568 | r = err; |
1593 | } | 1569 | } |
1594 | 1570 | ||
1595 | if (r == -ENOTCONN && !fatal_signal_pending(current)) | 1571 | if (r == -ENOTCONN && !fatal_signal_pending(current)) { |
1596 | queue_work(kmultipathd, &m->process_queued_ios); | 1572 | spin_lock_irqsave(&m->lock, flags); |
1573 | if (!m->current_pg) { | ||
1574 | /* Path status changed, redo selection */ | ||
1575 | __choose_pgpath(m, 0); | ||
1576 | } | ||
1577 | if (m->pg_init_required) | ||
1578 | __pg_init_all_paths(m); | ||
1579 | spin_unlock_irqrestore(&m->lock, flags); | ||
1580 | dm_table_run_md_queue_async(m->ti->table); | ||
1581 | } | ||
1597 | 1582 | ||
1598 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 1583 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); |
1599 | } | 1584 | } |