diff options
| -rw-r--r-- | drivers/md/dm.c | 56 |
1 files changed, 38 insertions, 18 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4e09b6ff5b49..6748e0c4df1f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped) | |||
| 865 | { | 865 | { |
| 866 | int r = error; | 866 | int r = error; |
| 867 | struct dm_rq_target_io *tio = clone->end_io_data; | 867 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 868 | dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; | 868 | dm_request_endio_fn rq_end_io = NULL; |
| 869 | 869 | ||
| 870 | if (mapped && rq_end_io) | 870 | if (tio->ti) { |
| 871 | r = rq_end_io(tio->ti, clone, error, &tio->info); | 871 | rq_end_io = tio->ti->type->rq_end_io; |
| 872 | |||
| 873 | if (mapped && rq_end_io) | ||
| 874 | r = rq_end_io(tio->ti, clone, error, &tio->info); | ||
| 875 | } | ||
| 872 | 876 | ||
| 873 | if (r <= 0) | 877 | if (r <= 0) |
| 874 | /* The target wants to complete the I/O */ | 878 | /* The target wants to complete the I/O */ |
| @@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone, | |||
| 1588 | int r, requeued = 0; | 1592 | int r, requeued = 0; |
| 1589 | struct dm_rq_target_io *tio = clone->end_io_data; | 1593 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 1590 | 1594 | ||
| 1591 | /* | ||
| 1592 | * Hold the md reference here for the in-flight I/O. | ||
| 1593 | * We can't rely on the reference count by device opener, | ||
| 1594 | * because the device may be closed during the request completion | ||
| 1595 | * when all bios are completed. | ||
| 1596 | * See the comment in rq_completed() too. | ||
| 1597 | */ | ||
| 1598 | dm_get(md); | ||
| 1599 | |||
| 1600 | tio->ti = ti; | 1595 | tio->ti = ti; |
| 1601 | r = ti->type->map_rq(ti, clone, &tio->info); | 1596 | r = ti->type->map_rq(ti, clone, &tio->info); |
| 1602 | switch (r) { | 1597 | switch (r) { |
| @@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone, | |||
| 1628 | return requeued; | 1623 | return requeued; |
| 1629 | } | 1624 | } |
| 1630 | 1625 | ||
| 1626 | static struct request *dm_start_request(struct mapped_device *md, struct request *orig) | ||
| 1627 | { | ||
| 1628 | struct request *clone; | ||
| 1629 | |||
| 1630 | blk_start_request(orig); | ||
| 1631 | clone = orig->special; | ||
| 1632 | atomic_inc(&md->pending[rq_data_dir(clone)]); | ||
| 1633 | |||
| 1634 | /* | ||
| 1635 | * Hold the md reference here for the in-flight I/O. | ||
| 1636 | * We can't rely on the reference count by device opener, | ||
| 1637 | * because the device may be closed during the request completion | ||
| 1638 | * when all bios are completed. | ||
| 1639 | * See the comment in rq_completed() too. | ||
| 1640 | */ | ||
| 1641 | dm_get(md); | ||
| 1642 | |||
| 1643 | return clone; | ||
| 1644 | } | ||
| 1645 | |||
| 1631 | /* | 1646 | /* |
| 1632 | * q->request_fn for request-based dm. | 1647 | * q->request_fn for request-based dm. |
| 1633 | * Called with the queue lock held. | 1648 | * Called with the queue lock held. |
| @@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q) | |||
| 1657 | pos = blk_rq_pos(rq); | 1672 | pos = blk_rq_pos(rq); |
| 1658 | 1673 | ||
| 1659 | ti = dm_table_find_target(map, pos); | 1674 | ti = dm_table_find_target(map, pos); |
| 1660 | BUG_ON(!dm_target_is_valid(ti)); | 1675 | if (!dm_target_is_valid(ti)) { |
| 1676 | /* | ||
| 1677 | * Must perform setup, that dm_done() requires, | ||
| 1678 | * before calling dm_kill_unmapped_request | ||
| 1679 | */ | ||
| 1680 | DMERR_LIMIT("request attempted access beyond the end of device"); | ||
| 1681 | clone = dm_start_request(md, rq); | ||
| 1682 | dm_kill_unmapped_request(clone, -EIO); | ||
| 1683 | continue; | ||
| 1684 | } | ||
| 1661 | 1685 | ||
| 1662 | if (ti->type->busy && ti->type->busy(ti)) | 1686 | if (ti->type->busy && ti->type->busy(ti)) |
| 1663 | goto delay_and_out; | 1687 | goto delay_and_out; |
| 1664 | 1688 | ||
| 1665 | blk_start_request(rq); | 1689 | clone = dm_start_request(md, rq); |
| 1666 | clone = rq->special; | ||
| 1667 | atomic_inc(&md->pending[rq_data_dir(clone)]); | ||
| 1668 | 1690 | ||
| 1669 | spin_unlock(q->queue_lock); | 1691 | spin_unlock(q->queue_lock); |
| 1670 | if (map_request(ti, clone, md)) | 1692 | if (map_request(ti, clone, md)) |
| @@ -1684,8 +1706,6 @@ delay_and_out: | |||
| 1684 | blk_delay_queue(q, HZ / 10); | 1706 | blk_delay_queue(q, HZ / 10); |
| 1685 | out: | 1707 | out: |
| 1686 | dm_table_put(map); | 1708 | dm_table_put(map); |
| 1687 | |||
| 1688 | return; | ||
| 1689 | } | 1709 | } |
| 1690 | 1710 | ||
| 1691 | int dm_underlying_device_busy(struct request_queue *q) | 1711 | int dm_underlying_device_busy(struct request_queue *q) |
