diff options
author | Hannes Reinecke <hare@suse.de> | 2013-10-01 05:49:56 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2013-11-05 11:20:34 -0500 |
commit | b63349a7a53d34ffde70cb4feec48ea9e6f5e97b (patch) | |
tree | d0e3b95ed6e2d8dbb219fce7fa8008351b8c1b8b /drivers/md/dm-mpath.c | |
parent | 954a73d5d3073df2231820c718fdd2f18b0fe4c9 (diff) |
dm mpath: requeue I/O during pg_init
When pg_init is running no I/O can be submitted to the underlying
devices, as the path priority etc might change. When using queue_io for
this, requests will be piling up within multipath as the block I/O
scheduler just sees a _very fast_ device. All of this queued I/O has to
be resubmitted from within multipathing once pg_init is done.
This approach has the problem that it's virtually impossible to
abort I/O when pg_init is running, and we're adding heavy load
to the devices after pg_init since all of the queued I/O needs to be
resubmitted _before_ any requests can be pulled off of the request queue
and normal operation continues.
This patch will requeue the I/O that triggers the pg_init call, and
return 'busy' when pg_init is in progress. With these changes the block
I/O scheduler will stop submitting I/O during pg_init, resulting in a
quicker path switch and less I/O pressure (and memory consumption) after
pg_init.
Signed-off-by: Hannes Reinecke <hare@suse.de>
[patch header edited for clarity and typos by Mike Snitzer]
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 799e479db93b..6eb9dc9ef8f3 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -391,13 +391,16 @@ static int map_io(struct multipath *m, struct request *clone, | |||
391 | if (was_queued) | 391 | if (was_queued) |
392 | m->queue_size--; | 392 | m->queue_size--; |
393 | 393 | ||
394 | if ((pgpath && m->queue_io) || | 394 | if (m->pg_init_required) { |
395 | (!pgpath && m->queue_if_no_path)) { | 395 | if (!m->pg_init_in_progress) |
396 | queue_work(kmultipathd, &m->process_queued_ios); | ||
397 | r = DM_MAPIO_REQUEUE; | ||
398 | } else if ((pgpath && m->queue_io) || | ||
399 | (!pgpath && m->queue_if_no_path)) { | ||
396 | /* Queue for the daemon to resubmit */ | 400 | /* Queue for the daemon to resubmit */ |
397 | list_add_tail(&clone->queuelist, &m->queued_ios); | 401 | list_add_tail(&clone->queuelist, &m->queued_ios); |
398 | m->queue_size++; | 402 | m->queue_size++; |
399 | if ((m->pg_init_required && !m->pg_init_in_progress) || | 403 | if (!m->queue_io) |
400 | !m->queue_io) | ||
401 | queue_work(kmultipathd, &m->process_queued_ios); | 404 | queue_work(kmultipathd, &m->process_queued_ios); |
402 | pgpath = NULL; | 405 | pgpath = NULL; |
403 | r = DM_MAPIO_SUBMITTED; | 406 | r = DM_MAPIO_SUBMITTED; |
@@ -1677,6 +1680,11 @@ static int multipath_busy(struct dm_target *ti) | |||
1677 | 1680 | ||
1678 | spin_lock_irqsave(&m->lock, flags); | 1681 | spin_lock_irqsave(&m->lock, flags); |
1679 | 1682 | ||
1683 | /* pg_init in progress, requeue until done */ | ||
1684 | if (m->pg_init_in_progress) { | ||
1685 | busy = 1; | ||
1686 | goto out; | ||
1687 | } | ||
1680 | /* Guess which priority_group will be used at next mapping time */ | 1688 | /* Guess which priority_group will be used at next mapping time */ |
1681 | if (unlikely(!m->current_pgpath && m->next_pg)) | 1689 | if (unlikely(!m->current_pgpath && m->next_pg)) |
1682 | pg = m->next_pg; | 1690 | pg = m->next_pg; |