diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2010-02-16 13:43:01 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2010-02-16 13:43:01 -0500 |
commit | 9eef87da2a8ea4920e0d913ff977cac064b68ee0 (patch) | |
tree | 811eb358ce2fa66517b0cf59501522c17dfbe6bc /drivers/md | |
parent | 558569aa9d83e016295bac77d900342908d7fd85 (diff) |
dm mpath: fix stall when requeueing io
This patch fixes the problem that system may stall if target's ->map_rq
returns DM_MAPIO_REQUEUE in map_request().
E.g. stall happens on 1 CPU box when a dm-mpath device with queue_if_no_path
bounces between all-paths-down and paths-up on I/O load.
When target's ->map_rq returns DM_MAPIO_REQUEUE, map_request() requeues
the request and returns to dm_request_fn(). Then, dm_request_fn()
doesn't exit the I/O dispatching loop and continues processing
the requeued request again.
This map and requeue loop can be done with interrupt disabled,
so 1 CPU system can be stalled if this situation happens.
For example, commands below can stall my 1 CPU box within 1 minute or so:
# dmsetup table mp
mp: 0 2097152 multipath 1 queue_if_no_path 0 1 1 service-time 0 1 2 8:144 1 1
# while true; do dd if=/dev/mapper/mp of=/dev/null bs=1M count=100; done &
# while true; do \
> dmsetup message mp 0 "fail_path 8:144" \
> dmsetup suspend --noflush mp \
> dmsetup resume mp \
> dmsetup message mp 0 "reinstate_path 8:144" \
> done
To fix the problem above, this patch changes dm_request_fn() to exit
the I/O dispatching loop once if a request is requeued in map_request().
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Cc: stable@kernel.org
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm.c | 21 |
1 files changed, 17 insertions, 4 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3167480b532c..aa4e2aa86d49 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) | |||
1595 | return BLKPREP_OK; | 1595 | return BLKPREP_OK; |
1596 | } | 1596 | } |
1597 | 1597 | ||
1598 | static void map_request(struct dm_target *ti, struct request *clone, | 1598 | /* |
1599 | struct mapped_device *md) | 1599 | * Returns: |
1600 | * 0 : the request has been processed (not requeued) | ||
1601 | * !0 : the request has been requeued | ||
1602 | */ | ||
1603 | static int map_request(struct dm_target *ti, struct request *clone, | ||
1604 | struct mapped_device *md) | ||
1600 | { | 1605 | { |
1601 | int r; | 1606 | int r, requeued = 0; |
1602 | struct dm_rq_target_io *tio = clone->end_io_data; | 1607 | struct dm_rq_target_io *tio = clone->end_io_data; |
1603 | 1608 | ||
1604 | /* | 1609 | /* |
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone, | |||
1625 | case DM_MAPIO_REQUEUE: | 1630 | case DM_MAPIO_REQUEUE: |
1626 | /* The target wants to requeue the I/O */ | 1631 | /* The target wants to requeue the I/O */ |
1627 | dm_requeue_unmapped_request(clone); | 1632 | dm_requeue_unmapped_request(clone); |
1633 | requeued = 1; | ||
1628 | break; | 1634 | break; |
1629 | default: | 1635 | default: |
1630 | if (r > 0) { | 1636 | if (r > 0) { |
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone, | |||
1636 | dm_kill_unmapped_request(clone, r); | 1642 | dm_kill_unmapped_request(clone, r); |
1637 | break; | 1643 | break; |
1638 | } | 1644 | } |
1645 | |||
1646 | return requeued; | ||
1639 | } | 1647 | } |
1640 | 1648 | ||
1641 | /* | 1649 | /* |
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q) | |||
1677 | atomic_inc(&md->pending[rq_data_dir(clone)]); | 1685 | atomic_inc(&md->pending[rq_data_dir(clone)]); |
1678 | 1686 | ||
1679 | spin_unlock(q->queue_lock); | 1687 | spin_unlock(q->queue_lock); |
1680 | map_request(ti, clone, md); | 1688 | if (map_request(ti, clone, md)) |
1689 | goto requeued; | ||
1690 | |||
1681 | spin_lock_irq(q->queue_lock); | 1691 | spin_lock_irq(q->queue_lock); |
1682 | } | 1692 | } |
1683 | 1693 | ||
1684 | goto out; | 1694 | goto out; |
1685 | 1695 | ||
1696 | requeued: | ||
1697 | spin_lock_irq(q->queue_lock); | ||
1698 | |||
1686 | plug_and_out: | 1699 | plug_and_out: |
1687 | if (!elv_queue_empty(q)) | 1700 | if (!elv_queue_empty(q)) |
1688 | /* Some requests still remain, retry later */ | 1701 | /* Some requests still remain, retry later */ |