diff options
| -rw-r--r-- | drivers/md/dm-ioctl.c | 17 | ||||
| -rw-r--r-- | drivers/md/dm.c | 19 |
2 files changed, 21 insertions, 15 deletions
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index c8a18e4ee9dc..720ceeb7fa9b 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
| @@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
| 1298 | goto err_unlock_md_type; | 1298 | goto err_unlock_md_type; |
| 1299 | } | 1299 | } |
| 1300 | 1300 | ||
| 1301 | if (dm_get_md_type(md) == DM_TYPE_NONE) | 1301 | if (dm_get_md_type(md) == DM_TYPE_NONE) { |
| 1302 | /* Initial table load: acquire type of table. */ | 1302 | /* Initial table load: acquire type of table. */ |
| 1303 | dm_set_md_type(md, dm_table_get_type(t)); | 1303 | dm_set_md_type(md, dm_table_get_type(t)); |
| 1304 | else if (dm_get_md_type(md) != dm_table_get_type(t)) { | 1304 | |
| 1305 | /* setup md->queue to reflect md's type (may block) */ | ||
| 1306 | r = dm_setup_md_queue(md); | ||
| 1307 | if (r) { | ||
| 1308 | DMWARN("unable to set up device queue for new table."); | ||
| 1309 | goto err_unlock_md_type; | ||
| 1310 | } | ||
| 1311 | } else if (dm_get_md_type(md) != dm_table_get_type(t)) { | ||
| 1305 | DMWARN("can't change device type after initial table load."); | 1312 | DMWARN("can't change device type after initial table load."); |
| 1306 | r = -EINVAL; | 1313 | r = -EINVAL; |
| 1307 | goto err_unlock_md_type; | 1314 | goto err_unlock_md_type; |
| 1308 | } | 1315 | } |
| 1309 | 1316 | ||
| 1310 | /* setup md->queue to reflect md's type (may block) */ | ||
| 1311 | r = dm_setup_md_queue(md); | ||
| 1312 | if (r) { | ||
| 1313 | DMWARN("unable to set up device queue for new table."); | ||
| 1314 | goto err_unlock_md_type; | ||
| 1315 | } | ||
| 1316 | dm_unlock_md_type(md); | 1317 | dm_unlock_md_type(md); |
| 1317 | 1318 | ||
| 1318 | /* stage inactive table */ | 1319 | /* stage inactive table */ |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f8c7ca3e8947..a930b72314ac 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |||
| 1082 | dm_put(md); | 1082 | dm_put(md); |
| 1083 | } | 1083 | } |
| 1084 | 1084 | ||
| 1085 | static void free_rq_clone(struct request *clone) | 1085 | static void free_rq_clone(struct request *clone, bool must_be_mapped) |
| 1086 | { | 1086 | { |
| 1087 | struct dm_rq_target_io *tio = clone->end_io_data; | 1087 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 1088 | struct mapped_device *md = tio->md; | 1088 | struct mapped_device *md = tio->md; |
| 1089 | 1089 | ||
| 1090 | WARN_ON_ONCE(must_be_mapped && !clone->q); | ||
| 1091 | |||
| 1090 | blk_rq_unprep_clone(clone); | 1092 | blk_rq_unprep_clone(clone); |
| 1091 | 1093 | ||
| 1092 | if (clone->q->mq_ops) | 1094 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) |
| 1095 | /* stacked on blk-mq queue(s) */ | ||
| 1093 | tio->ti->type->release_clone_rq(clone); | 1096 | tio->ti->type->release_clone_rq(clone); |
| 1094 | else if (!md->queue->mq_ops) | 1097 | else if (!md->queue->mq_ops) |
| 1095 | /* request_fn queue stacked on request_fn queue(s) */ | 1098 | /* request_fn queue stacked on request_fn queue(s) */ |
| 1096 | free_clone_request(md, clone); | 1099 | free_clone_request(md, clone); |
| 1100 | /* | ||
| 1101 | * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: | ||
| 1102 | * no need to call free_clone_request() because we leverage blk-mq by | ||
| 1103 | * allocating the clone at the end of the blk-mq pdu (see: clone_rq) | ||
| 1104 | */ | ||
| 1097 | 1105 | ||
| 1098 | if (!md->queue->mq_ops) | 1106 | if (!md->queue->mq_ops) |
| 1099 | free_rq_tio(tio); | 1107 | free_rq_tio(tio); |
| @@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error) | |||
| 1124 | rq->sense_len = clone->sense_len; | 1132 | rq->sense_len = clone->sense_len; |
| 1125 | } | 1133 | } |
| 1126 | 1134 | ||
| 1127 | free_rq_clone(clone); | 1135 | free_rq_clone(clone, true); |
| 1128 | if (!rq->q->mq_ops) | 1136 | if (!rq->q->mq_ops) |
| 1129 | blk_end_request_all(rq, error); | 1137 | blk_end_request_all(rq, error); |
| 1130 | else | 1138 | else |
| @@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq) | |||
| 1143 | } | 1151 | } |
| 1144 | 1152 | ||
| 1145 | if (clone) | 1153 | if (clone) |
| 1146 | free_rq_clone(clone); | 1154 | free_rq_clone(clone, false); |
| 1147 | } | 1155 | } |
| 1148 | 1156 | ||
| 1149 | /* | 1157 | /* |
| @@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) | |||
| 2662 | { | 2670 | { |
| 2663 | struct request_queue *q = NULL; | 2671 | struct request_queue *q = NULL; |
| 2664 | 2672 | ||
| 2665 | if (md->queue->elevator) | ||
| 2666 | return 0; | ||
| 2667 | |||
| 2668 | /* Fully initialize the queue */ | 2673 | /* Fully initialize the queue */ |
| 2669 | q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); | 2674 | q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); |
| 2670 | if (!q) | 2675 | if (!q) |
