aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f8c7ca3e8947..a930b72314ac 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1082 dm_put(md); 1082 dm_put(md);
1083} 1083}
1084 1084
1085static void free_rq_clone(struct request *clone) 1085static void free_rq_clone(struct request *clone, bool must_be_mapped)
1086{ 1086{
1087 struct dm_rq_target_io *tio = clone->end_io_data; 1087 struct dm_rq_target_io *tio = clone->end_io_data;
1088 struct mapped_device *md = tio->md; 1088 struct mapped_device *md = tio->md;
1089 1089
1090 WARN_ON_ONCE(must_be_mapped && !clone->q);
1091
1090 blk_rq_unprep_clone(clone); 1092 blk_rq_unprep_clone(clone);
1091 1093
1092 if (clone->q->mq_ops) 1094 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1095 /* stacked on blk-mq queue(s) */
1093 tio->ti->type->release_clone_rq(clone); 1096 tio->ti->type->release_clone_rq(clone);
1094 else if (!md->queue->mq_ops) 1097 else if (!md->queue->mq_ops)
1095 /* request_fn queue stacked on request_fn queue(s) */ 1098 /* request_fn queue stacked on request_fn queue(s) */
1096 free_clone_request(md, clone); 1099 free_clone_request(md, clone);
1100 /*
1101 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
1102 * no need to call free_clone_request() because we leverage blk-mq by
1103 * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
1104 */
1097 1105
1098 if (!md->queue->mq_ops) 1106 if (!md->queue->mq_ops)
1099 free_rq_tio(tio); 1107 free_rq_tio(tio);
@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error)
1124 rq->sense_len = clone->sense_len; 1132 rq->sense_len = clone->sense_len;
1125 } 1133 }
1126 1134
1127 free_rq_clone(clone); 1135 free_rq_clone(clone, true);
1128 if (!rq->q->mq_ops) 1136 if (!rq->q->mq_ops)
1129 blk_end_request_all(rq, error); 1137 blk_end_request_all(rq, error);
1130 else 1138 else
@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq)
1143 } 1151 }
1144 1152
1145 if (clone) 1153 if (clone)
1146 free_rq_clone(clone); 1154 free_rq_clone(clone, false);
1147} 1155}
1148 1156
1149/* 1157/*
@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2662{ 2670{
2663 struct request_queue *q = NULL; 2671 struct request_queue *q = NULL;
2664 2672
2665 if (md->queue->elevator)
2666 return 0;
2667
2668 /* Fully initialize the queue */ 2673 /* Fully initialize the queue */
2669 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 2674 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2670 if (!q) 2675 if (!q)