diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 40 |
1 files changed, 24 insertions, 16 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a930b72314ac..2caf492890d6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |||
1082 | dm_put(md); | 1082 | dm_put(md); |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | static void free_rq_clone(struct request *clone, bool must_be_mapped) | 1085 | static void free_rq_clone(struct request *clone) |
1086 | { | 1086 | { |
1087 | struct dm_rq_target_io *tio = clone->end_io_data; | 1087 | struct dm_rq_target_io *tio = clone->end_io_data; |
1088 | struct mapped_device *md = tio->md; | 1088 | struct mapped_device *md = tio->md; |
1089 | 1089 | ||
1090 | WARN_ON_ONCE(must_be_mapped && !clone->q); | ||
1091 | |||
1092 | blk_rq_unprep_clone(clone); | 1090 | blk_rq_unprep_clone(clone); |
1093 | 1091 | ||
1094 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) | 1092 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) |
@@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error) | |||
1132 | rq->sense_len = clone->sense_len; | 1130 | rq->sense_len = clone->sense_len; |
1133 | } | 1131 | } |
1134 | 1132 | ||
1135 | free_rq_clone(clone, true); | 1133 | free_rq_clone(clone); |
1136 | if (!rq->q->mq_ops) | 1134 | if (!rq->q->mq_ops) |
1137 | blk_end_request_all(rq, error); | 1135 | blk_end_request_all(rq, error); |
1138 | else | 1136 | else |
@@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq) | |||
1151 | } | 1149 | } |
1152 | 1150 | ||
1153 | if (clone) | 1151 | if (clone) |
1154 | free_rq_clone(clone, false); | 1152 | free_rq_clone(clone); |
1155 | } | 1153 | } |
1156 | 1154 | ||
1157 | /* | 1155 | /* |
@@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq) | |||
1164 | 1162 | ||
1165 | spin_lock_irqsave(q->queue_lock, flags); | 1163 | spin_lock_irqsave(q->queue_lock, flags); |
1166 | blk_requeue_request(q, rq); | 1164 | blk_requeue_request(q, rq); |
1165 | blk_run_queue_async(q); | ||
1167 | spin_unlock_irqrestore(q->queue_lock, flags); | 1166 | spin_unlock_irqrestore(q->queue_lock, flags); |
1168 | } | 1167 | } |
1169 | 1168 | ||
@@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q, | |||
1724 | struct mapped_device *md = q->queuedata; | 1723 | struct mapped_device *md = q->queuedata; |
1725 | struct dm_table *map = dm_get_live_table_fast(md); | 1724 | struct dm_table *map = dm_get_live_table_fast(md); |
1726 | struct dm_target *ti; | 1725 | struct dm_target *ti; |
1727 | sector_t max_sectors; | 1726 | sector_t max_sectors, max_size = 0; |
1728 | int max_size = 0; | ||
1729 | 1727 | ||
1730 | if (unlikely(!map)) | 1728 | if (unlikely(!map)) |
1731 | goto out; | 1729 | goto out; |
@@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q, | |||
1740 | max_sectors = min(max_io_len(bvm->bi_sector, ti), | 1738 | max_sectors = min(max_io_len(bvm->bi_sector, ti), |
1741 | (sector_t) queue_max_sectors(q)); | 1739 | (sector_t) queue_max_sectors(q)); |
1742 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; | 1740 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; |
1743 | if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ | 1741 | |
1744 | max_size = 0; | 1742 | /* |
1743 | * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t | ||
1744 | * to the targets' merge function since it holds sectors not bytes). | ||
1745 | * Just doing this as an interim fix for stable@ because the more | ||
1746 | * comprehensive cleanup of switching to sector_t will impact every | ||
1747 | * DM target that implements a ->merge hook. | ||
1748 | */ | ||
1749 | if (max_size > INT_MAX) | ||
1750 | max_size = INT_MAX; | ||
1745 | 1751 | ||
1746 | /* | 1752 | /* |
1747 | * merge_bvec_fn() returns number of bytes | 1753 | * merge_bvec_fn() returns number of bytes |
@@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q, | |||
1749 | * max is precomputed maximal io size | 1755 | * max is precomputed maximal io size |
1750 | */ | 1756 | */ |
1751 | if (max_size && ti->type->merge) | 1757 | if (max_size && ti->type->merge) |
1752 | max_size = ti->type->merge(ti, bvm, biovec, max_size); | 1758 | max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); |
1753 | /* | 1759 | /* |
1754 | * If the target doesn't support merge method and some of the devices | 1760 | * If the target doesn't support merge method and some of the devices |
1755 | * provided their merge_bvec method (we know this by looking for the | 1761 | * provided their merge_bvec method (we know this by looking for the |
@@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, | |||
1971 | dm_kill_unmapped_request(rq, r); | 1977 | dm_kill_unmapped_request(rq, r); |
1972 | return r; | 1978 | return r; |
1973 | } | 1979 | } |
1974 | if (IS_ERR(clone)) | 1980 | if (r != DM_MAPIO_REMAPPED) |
1975 | return DM_MAPIO_REQUEUE; | 1981 | return r; |
1976 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { | 1982 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { |
1977 | /* -ENOMEM */ | 1983 | /* -ENOMEM */ |
1978 | ti->type->release_clone_rq(clone); | 1984 | ti->type->release_clone_rq(clone); |
@@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
2753 | if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { | 2759 | if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { |
2754 | /* clone request is allocated at the end of the pdu */ | 2760 | /* clone request is allocated at the end of the pdu */ |
2755 | tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); | 2761 | tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); |
2756 | if (!clone_rq(rq, md, tio, GFP_ATOMIC)) | 2762 | (void) clone_rq(rq, md, tio, GFP_ATOMIC); |
2757 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
2758 | queue_kthread_work(&md->kworker, &tio->work); | 2763 | queue_kthread_work(&md->kworker, &tio->work); |
2759 | } else { | 2764 | } else { |
2760 | /* Direct call is fine since .queue_rq allows allocations */ | 2765 | /* Direct call is fine since .queue_rq allows allocations */ |
2761 | if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) | 2766 | if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { |
2762 | dm_requeue_unmapped_original_request(md, rq); | 2767 | /* Undo dm_start_request() before requeuing */ |
2768 | rq_completed(md, rq_data_dir(rq), false); | ||
2769 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
2770 | } | ||
2763 | } | 2771 | } |
2764 | 2772 | ||
2765 | return BLK_MQ_RQ_QUEUE_OK; | 2773 | return BLK_MQ_RQ_QUEUE_OK; |