aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/md/dm.c40
3 files changed, 36 insertions, 24 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 63953477a07c..eff7bdd7731d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
429 /* blk-mq request-based interface */ 429 /* blk-mq request-based interface */
430 *__clone = blk_get_request(bdev_get_queue(bdev), 430 *__clone = blk_get_request(bdev_get_queue(bdev),
431 rq_data_dir(rq), GFP_ATOMIC); 431 rq_data_dir(rq), GFP_ATOMIC);
432 if (IS_ERR(*__clone)) 432 if (IS_ERR(*__clone)) {
433 /* ENOMEM, requeue */ 433 /* ENOMEM, requeue */
434 clear_mapinfo(m, map_context);
434 return r; 435 return r;
436 }
435 (*__clone)->bio = (*__clone)->biotail = NULL; 437 (*__clone)->bio = (*__clone)->biotail = NULL;
436 (*__clone)->rq_disk = bdev->bd_disk; 438 (*__clone)->rq_disk = bdev->bd_disk;
437 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; 439 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d9b00b8565c6..16ba55ad7089 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
820} 820}
821EXPORT_SYMBOL(dm_consume_args); 821EXPORT_SYMBOL(dm_consume_args);
822 822
823static bool __table_type_request_based(unsigned table_type)
824{
825 return (table_type == DM_TYPE_REQUEST_BASED ||
826 table_type == DM_TYPE_MQ_REQUEST_BASED);
827}
828
823static int dm_table_set_type(struct dm_table *t) 829static int dm_table_set_type(struct dm_table *t)
824{ 830{
825 unsigned i; 831 unsigned i;
@@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t)
852 * Determine the type from the live device. 858 * Determine the type from the live device.
853 * Default to bio-based if device is new. 859 * Default to bio-based if device is new.
854 */ 860 */
855 if (live_md_type == DM_TYPE_REQUEST_BASED || 861 if (__table_type_request_based(live_md_type))
856 live_md_type == DM_TYPE_MQ_REQUEST_BASED)
857 request_based = 1; 862 request_based = 1;
858 else 863 else
859 bio_based = 1; 864 bio_based = 1;
@@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t)
903 } 908 }
904 t->type = DM_TYPE_MQ_REQUEST_BASED; 909 t->type = DM_TYPE_MQ_REQUEST_BASED;
905 910
906 } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { 911 } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
907 /* inherit live MD type */ 912 /* inherit live MD type */
908 t->type = live_md_type; 913 t->type = live_md_type;
909 914
@@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
925 930
926bool dm_table_request_based(struct dm_table *t) 931bool dm_table_request_based(struct dm_table *t)
927{ 932{
928 unsigned table_type = dm_table_get_type(t); 933 return __table_type_request_based(dm_table_get_type(t));
929
930 return (table_type == DM_TYPE_REQUEST_BASED ||
931 table_type == DM_TYPE_MQ_REQUEST_BASED);
932} 934}
933 935
934bool dm_table_mq_request_based(struct dm_table *t) 936bool dm_table_mq_request_based(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a930b72314ac..2caf492890d6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1082 dm_put(md); 1082 dm_put(md);
1083} 1083}
1084 1084
1085static void free_rq_clone(struct request *clone, bool must_be_mapped) 1085static void free_rq_clone(struct request *clone)
1086{ 1086{
1087 struct dm_rq_target_io *tio = clone->end_io_data; 1087 struct dm_rq_target_io *tio = clone->end_io_data;
1088 struct mapped_device *md = tio->md; 1088 struct mapped_device *md = tio->md;
1089 1089
1090 WARN_ON_ONCE(must_be_mapped && !clone->q);
1091
1092 blk_rq_unprep_clone(clone); 1090 blk_rq_unprep_clone(clone);
1093 1091
1094 if (md->type == DM_TYPE_MQ_REQUEST_BASED) 1092 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
@@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error)
1132 rq->sense_len = clone->sense_len; 1130 rq->sense_len = clone->sense_len;
1133 } 1131 }
1134 1132
1135 free_rq_clone(clone, true); 1133 free_rq_clone(clone);
1136 if (!rq->q->mq_ops) 1134 if (!rq->q->mq_ops)
1137 blk_end_request_all(rq, error); 1135 blk_end_request_all(rq, error);
1138 else 1136 else
@@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq)
1151 } 1149 }
1152 1150
1153 if (clone) 1151 if (clone)
1154 free_rq_clone(clone, false); 1152 free_rq_clone(clone);
1155} 1153}
1156 1154
1157/* 1155/*
@@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq)
1164 1162
1165 spin_lock_irqsave(q->queue_lock, flags); 1163 spin_lock_irqsave(q->queue_lock, flags);
1166 blk_requeue_request(q, rq); 1164 blk_requeue_request(q, rq);
1165 blk_run_queue_async(q);
1167 spin_unlock_irqrestore(q->queue_lock, flags); 1166 spin_unlock_irqrestore(q->queue_lock, flags);
1168} 1167}
1169 1168
@@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q,
1724 struct mapped_device *md = q->queuedata; 1723 struct mapped_device *md = q->queuedata;
1725 struct dm_table *map = dm_get_live_table_fast(md); 1724 struct dm_table *map = dm_get_live_table_fast(md);
1726 struct dm_target *ti; 1725 struct dm_target *ti;
1727 sector_t max_sectors; 1726 sector_t max_sectors, max_size = 0;
1728 int max_size = 0;
1729 1727
1730 if (unlikely(!map)) 1728 if (unlikely(!map))
1731 goto out; 1729 goto out;
@@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q,
1740 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1738 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1741 (sector_t) queue_max_sectors(q)); 1739 (sector_t) queue_max_sectors(q));
1742 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1740 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1743 if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ 1741
1744 max_size = 0; 1742 /*
1743 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
1744 * to the targets' merge function since it holds sectors not bytes).
1745 * Just doing this as an interim fix for stable@ because the more
1746 * comprehensive cleanup of switching to sector_t will impact every
1747 * DM target that implements a ->merge hook.
1748 */
1749 if (max_size > INT_MAX)
1750 max_size = INT_MAX;
1745 1751
1746 /* 1752 /*
1747 * merge_bvec_fn() returns number of bytes 1753 * merge_bvec_fn() returns number of bytes
@@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q,
1749 * max is precomputed maximal io size 1755 * max is precomputed maximal io size
1750 */ 1756 */
1751 if (max_size && ti->type->merge) 1757 if (max_size && ti->type->merge)
1752 max_size = ti->type->merge(ti, bvm, biovec, max_size); 1758 max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
1753 /* 1759 /*
1754 * If the target doesn't support merge method and some of the devices 1760 * If the target doesn't support merge method and some of the devices
1755 * provided their merge_bvec method (we know this by looking for the 1761 * provided their merge_bvec method (we know this by looking for the
@@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
1971 dm_kill_unmapped_request(rq, r); 1977 dm_kill_unmapped_request(rq, r);
1972 return r; 1978 return r;
1973 } 1979 }
1974 if (IS_ERR(clone)) 1980 if (r != DM_MAPIO_REMAPPED)
1975 return DM_MAPIO_REQUEUE; 1981 return r;
1976 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 1982 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
1977 /* -ENOMEM */ 1983 /* -ENOMEM */
1978 ti->type->release_clone_rq(clone); 1984 ti->type->release_clone_rq(clone);
@@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
2753 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { 2759 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
2754 /* clone request is allocated at the end of the pdu */ 2760 /* clone request is allocated at the end of the pdu */
2755 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); 2761 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
2756 if (!clone_rq(rq, md, tio, GFP_ATOMIC)) 2762 (void) clone_rq(rq, md, tio, GFP_ATOMIC);
2757 return BLK_MQ_RQ_QUEUE_BUSY;
2758 queue_kthread_work(&md->kworker, &tio->work); 2763 queue_kthread_work(&md->kworker, &tio->work);
2759 } else { 2764 } else {
2760 /* Direct call is fine since .queue_rq allows allocations */ 2765 /* Direct call is fine since .queue_rq allows allocations */
2761 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 2766 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
2762 dm_requeue_unmapped_original_request(md, rq); 2767 /* Undo dm_start_request() before requeuing */
2768 rq_completed(md, rq_data_dir(rq), false);
2769 return BLK_MQ_RQ_QUEUE_BUSY;
2770 }
2763 } 2771 }
2764 2772
2765 return BLK_MQ_RQ_QUEUE_OK; 2773 return BLK_MQ_RQ_QUEUE_OK;