aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-09-14 10:18:54 -0400
committerJens Axboe <axboe@fb.com>2016-09-15 10:42:03 -0400
commit7d7e0f90b70f6c5367c2d1c9a7e87dd228bd0816 (patch)
tree6105df4466a36e85a24ead3b67a35d31f9f37011
parentbdd17e75cd97c5c39feee409890a91d0396640fe (diff)
blk-mq: remove ->map_queue
All drivers use the default, so provide an inline version of it. If we ever need other queue mapping we can add an optional method back, although supporting will also require major changes to the queue setup code. This provides better code generation, and better debugability as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-flush.c6
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-mq.c40
-rw-r--r--block/blk-mq.h6
-rw-r--r--block/blk.h11
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/null_blk.c1
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/virtio_blk.c1
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/mtd/ubi/block.c1
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--include/linux/blk-mq.h7
18 files changed, 25 insertions, 65 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index d308def812db..6a14b68b9135 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -232,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, int error)
232 232
233 /* release the tag's ownership to the req cloned from */ 233 /* release the tag's ownership to the req cloned from */
234 spin_lock_irqsave(&fq->mq_flush_lock, flags); 234 spin_lock_irqsave(&fq->mq_flush_lock, flags);
235 hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu); 235 hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
236 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); 236 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
237 flush_rq->tag = -1; 237 flush_rq->tag = -1;
238 } 238 }
@@ -325,7 +325,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
325 flush_rq->tag = first_rq->tag; 325 flush_rq->tag = first_rq->tag;
326 fq->orig_rq = first_rq; 326 fq->orig_rq = first_rq;
327 327
328 hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu); 328 hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
329 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); 329 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
330 } 330 }
331 331
@@ -358,7 +358,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
358 unsigned long flags; 358 unsigned long flags;
359 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); 359 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
360 360
361 hctx = q->mq_ops->map_queue(q, ctx->cpu); 361 hctx = blk_mq_map_queue(q, ctx->cpu);
362 362
363 /* 363 /*
364 * After populating an empty queue, kick it to avoid stall. Read 364 * After populating an empty queue, kick it to avoid stall. Read
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 729bac3a673b..16028130289f 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -301,8 +301,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
301 io_schedule(); 301 io_schedule();
302 302
303 data->ctx = blk_mq_get_ctx(data->q); 303 data->ctx = blk_mq_get_ctx(data->q);
304 data->hctx = data->q->mq_ops->map_queue(data->q, 304 data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
305 data->ctx->cpu);
306 if (data->flags & BLK_MQ_REQ_RESERVED) { 305 if (data->flags & BLK_MQ_REQ_RESERVED) {
307 bt = &data->hctx->tags->breserved_tags; 306 bt = &data->hctx->tags->breserved_tags;
308 } else { 307 } else {
@@ -726,7 +725,7 @@ u32 blk_mq_unique_tag(struct request *rq)
726 int hwq = 0; 725 int hwq = 0;
727 726
728 if (q->mq_ops) { 727 if (q->mq_ops) {
729 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); 728 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
730 hwq = hctx->queue_num; 729 hwq = hctx->queue_num;
731 } 730 }
732 731
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c9499f118ef6..6e077a9d61a8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -245,7 +245,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
245 return ERR_PTR(ret); 245 return ERR_PTR(ret);
246 246
247 ctx = blk_mq_get_ctx(q); 247 ctx = blk_mq_get_ctx(q);
248 hctx = q->mq_ops->map_queue(q, ctx->cpu); 248 hctx = blk_mq_map_queue(q, ctx->cpu);
249 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 249 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
250 250
251 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 251 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
@@ -254,7 +254,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
254 blk_mq_put_ctx(ctx); 254 blk_mq_put_ctx(ctx);
255 255
256 ctx = blk_mq_get_ctx(q); 256 ctx = blk_mq_get_ctx(q);
257 hctx = q->mq_ops->map_queue(q, ctx->cpu); 257 hctx = blk_mq_map_queue(q, ctx->cpu);
258 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 258 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
259 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 259 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
260 ctx = alloc_data.ctx; 260 ctx = alloc_data.ctx;
@@ -338,11 +338,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
338 338
339void blk_mq_free_request(struct request *rq) 339void blk_mq_free_request(struct request *rq)
340{ 340{
341 struct blk_mq_hw_ctx *hctx; 341 blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
342 struct request_queue *q = rq->q;
343
344 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
345 blk_mq_free_hctx_request(hctx, rq);
346} 342}
347EXPORT_SYMBOL_GPL(blk_mq_free_request); 343EXPORT_SYMBOL_GPL(blk_mq_free_request);
348 344
@@ -1074,9 +1070,7 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1074{ 1070{
1075 struct blk_mq_ctx *ctx = rq->mq_ctx; 1071 struct blk_mq_ctx *ctx = rq->mq_ctx;
1076 struct request_queue *q = rq->q; 1072 struct request_queue *q = rq->q;
1077 struct blk_mq_hw_ctx *hctx; 1073 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1078
1079 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1080 1074
1081 spin_lock(&ctx->lock); 1075 spin_lock(&ctx->lock);
1082 __blk_mq_insert_request(hctx, rq, at_head); 1076 __blk_mq_insert_request(hctx, rq, at_head);
@@ -1093,12 +1087,10 @@ static void blk_mq_insert_requests(struct request_queue *q,
1093 bool from_schedule) 1087 bool from_schedule)
1094 1088
1095{ 1089{
1096 struct blk_mq_hw_ctx *hctx; 1090 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1097 1091
1098 trace_block_unplug(q, depth, !from_schedule); 1092 trace_block_unplug(q, depth, !from_schedule);
1099 1093
1100 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1101
1102 /* 1094 /*
1103 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1095 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1104 * offline now 1096 * offline now
@@ -1232,7 +1224,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1232 1224
1233 blk_queue_enter_live(q); 1225 blk_queue_enter_live(q);
1234 ctx = blk_mq_get_ctx(q); 1226 ctx = blk_mq_get_ctx(q);
1235 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1227 hctx = blk_mq_map_queue(q, ctx->cpu);
1236 1228
1237 if (rw_is_sync(bio_op(bio), bio->bi_opf)) 1229 if (rw_is_sync(bio_op(bio), bio->bi_opf))
1238 op_flags |= REQ_SYNC; 1230 op_flags |= REQ_SYNC;
@@ -1246,7 +1238,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1246 trace_block_sleeprq(q, bio, op); 1238 trace_block_sleeprq(q, bio, op);
1247 1239
1248 ctx = blk_mq_get_ctx(q); 1240 ctx = blk_mq_get_ctx(q);
1249 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1241 hctx = blk_mq_map_queue(q, ctx->cpu);
1250 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); 1242 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
1251 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); 1243 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
1252 ctx = alloc_data.ctx; 1244 ctx = alloc_data.ctx;
@@ -1263,8 +1255,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1263{ 1255{
1264 int ret; 1256 int ret;
1265 struct request_queue *q = rq->q; 1257 struct request_queue *q = rq->q;
1266 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, 1258 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
1267 rq->mq_ctx->cpu);
1268 struct blk_mq_queue_data bd = { 1259 struct blk_mq_queue_data bd = {
1269 .rq = rq, 1260 .rq = rq,
1270 .list = NULL, 1261 .list = NULL,
@@ -1468,15 +1459,6 @@ run_queue:
1468 return cookie; 1459 return cookie;
1469} 1460}
1470 1461
1471/*
1472 * Default mapping to a software queue, since we use one per CPU.
1473 */
1474struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1475{
1476 return q->queue_hw_ctx[q->mq_map[cpu]];
1477}
1478EXPORT_SYMBOL(blk_mq_map_queue);
1479
1480static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, 1462static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1481 struct blk_mq_tags *tags, unsigned int hctx_idx) 1463 struct blk_mq_tags *tags, unsigned int hctx_idx)
1482{ 1464{
@@ -1810,7 +1792,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
1810 if (!cpu_online(i)) 1792 if (!cpu_online(i))
1811 continue; 1793 continue;
1812 1794
1813 hctx = q->mq_ops->map_queue(q, i); 1795 hctx = blk_mq_map_queue(q, i);
1814 1796
1815 /* 1797 /*
1816 * Set local node, IFF we have more than one hw queue. If 1798 * Set local node, IFF we have more than one hw queue. If
@@ -1848,7 +1830,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
1848 continue; 1830 continue;
1849 1831
1850 ctx = per_cpu_ptr(q->queue_ctx, i); 1832 ctx = per_cpu_ptr(q->queue_ctx, i);
1851 hctx = q->mq_ops->map_queue(q, i); 1833 hctx = blk_mq_map_queue(q, i);
1852 1834
1853 cpumask_set_cpu(i, hctx->cpumask); 1835 cpumask_set_cpu(i, hctx->cpumask);
1854 ctx->index_hw = hctx->nr_ctx; 1836 ctx->index_hw = hctx->nr_ctx;
@@ -2313,7 +2295,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2313 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 2295 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2314 return -EINVAL; 2296 return -EINVAL;
2315 2297
2316 if (!set->ops->queue_rq || !set->ops->map_queue) 2298 if (!set->ops->queue_rq)
2317 return -EINVAL; 2299 return -EINVAL;
2318 2300
2319 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 2301 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9087b11037b7..ec774bf4aea2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -52,6 +52,12 @@ extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
52 const struct cpumask *online_mask); 52 const struct cpumask *online_mask);
53extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); 53extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
54 54
55static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
56 int cpu)
57{
58 return q->queue_hw_ctx[q->mq_map[cpu]];
59}
60
55/* 61/*
56 * sysfs helpers 62 * sysfs helpers
57 */ 63 */
diff --git a/block/blk.h b/block/blk.h
index c37492f5edaa..74444c49078f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -39,14 +39,9 @@ extern struct ida blk_queue_ida;
39static inline struct blk_flush_queue *blk_get_flush_queue( 39static inline struct blk_flush_queue *blk_get_flush_queue(
40 struct request_queue *q, struct blk_mq_ctx *ctx) 40 struct request_queue *q, struct blk_mq_ctx *ctx)
41{ 41{
42 struct blk_mq_hw_ctx *hctx; 42 if (q->mq_ops)
43 43 return blk_mq_map_queue(q, ctx->cpu)->fq;
44 if (!q->mq_ops) 44 return q->fq;
45 return q->fq;
46
47 hctx = q->mq_ops->map_queue(q, ctx->cpu);
48
49 return hctx->fq;
50} 45}
51 46
52static inline void __blk_get_queue(struct request_queue *q) 47static inline void __blk_get_queue(struct request_queue *q)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9f2107f7095..cbdb3b162718 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1703,7 +1703,6 @@ static int loop_init_request(void *data, struct request *rq,
1703 1703
1704static struct blk_mq_ops loop_mq_ops = { 1704static struct blk_mq_ops loop_mq_ops = {
1705 .queue_rq = loop_queue_rq, 1705 .queue_rq = loop_queue_rq,
1706 .map_queue = blk_mq_map_queue,
1707 .init_request = loop_init_request, 1706 .init_request = loop_init_request,
1708}; 1707};
1709 1708
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 88c46853dbb5..3cfd879267b2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3895,7 +3895,6 @@ exit_handler:
3895 3895
3896static struct blk_mq_ops mtip_mq_ops = { 3896static struct blk_mq_ops mtip_mq_ops = {
3897 .queue_rq = mtip_queue_rq, 3897 .queue_rq = mtip_queue_rq,
3898 .map_queue = blk_mq_map_queue,
3899 .init_request = mtip_init_cmd, 3898 .init_request = mtip_init_cmd,
3900 .exit_request = mtip_free_cmd, 3899 .exit_request = mtip_free_cmd,
3901 .complete = mtip_softirq_done_fn, 3900 .complete = mtip_softirq_done_fn,
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 75a7f88d6717..7d3b7d6e5149 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -393,7 +393,6 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
393 393
394static struct blk_mq_ops null_mq_ops = { 394static struct blk_mq_ops null_mq_ops = {
395 .queue_rq = null_queue_rq, 395 .queue_rq = null_queue_rq,
396 .map_queue = blk_mq_map_queue,
397 .init_hctx = null_init_hctx, 396 .init_hctx = null_init_hctx,
398 .complete = null_softirq_done_fn, 397 .complete = null_softirq_done_fn,
399}; 398};
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6c6519f6492a..c1f84df7838b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3621,7 +3621,6 @@ static int rbd_init_request(void *data, struct request *rq,
3621 3621
3622static struct blk_mq_ops rbd_mq_ops = { 3622static struct blk_mq_ops rbd_mq_ops = {
3623 .queue_rq = rbd_queue_rq, 3623 .queue_rq = rbd_queue_rq,
3624 .map_queue = blk_mq_map_queue,
3625 .init_request = rbd_init_request, 3624 .init_request = rbd_init_request,
3626}; 3625};
3627 3626
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 93b1aaa5ba3b..2dc5c96c186a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -542,7 +542,6 @@ static int virtblk_init_request(void *data, struct request *rq,
542 542
543static struct blk_mq_ops virtio_mq_ops = { 543static struct blk_mq_ops virtio_mq_ops = {
544 .queue_rq = virtio_queue_rq, 544 .queue_rq = virtio_queue_rq,
545 .map_queue = blk_mq_map_queue,
546 .complete = virtblk_request_done, 545 .complete = virtblk_request_done,
547 .init_request = virtblk_init_request, 546 .init_request = virtblk_init_request,
548}; 547};
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 88ef6d4729b4..9908597c5209 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -909,7 +909,6 @@ out_busy:
909 909
910static struct blk_mq_ops blkfront_mq_ops = { 910static struct blk_mq_ops blkfront_mq_ops = {
911 .queue_rq = blkif_queue_rq, 911 .queue_rq = blkif_queue_rq,
912 .map_queue = blk_mq_map_queue,
913}; 912};
914 913
915static void blkif_set_queue_limits(struct blkfront_info *info) 914static void blkif_set_queue_limits(struct blkfront_info *info)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1ca7463e8bb2..d1c3645d5ce1 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -908,7 +908,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
908 908
909static struct blk_mq_ops dm_mq_ops = { 909static struct blk_mq_ops dm_mq_ops = {
910 .queue_rq = dm_mq_queue_rq, 910 .queue_rq = dm_mq_queue_rq,
911 .map_queue = blk_mq_map_queue,
912 .complete = dm_softirq_done, 911 .complete = dm_softirq_done,
913 .init_request = dm_mq_init_request, 912 .init_request = dm_mq_init_request,
914}; 913};
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index ebf46ad2d513..d1e6931c132f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -351,7 +351,6 @@ static int ubiblock_init_request(void *data, struct request *req,
351static struct blk_mq_ops ubiblock_mq_ops = { 351static struct blk_mq_ops ubiblock_mq_ops = {
352 .queue_rq = ubiblock_queue_rq, 352 .queue_rq = ubiblock_queue_rq,
353 .init_request = ubiblock_init_request, 353 .init_request = ubiblock_init_request,
354 .map_queue = blk_mq_map_queue,
355}; 354};
356 355
357static DEFINE_IDR(ubiblock_minor_idr); 356static DEFINE_IDR(ubiblock_minor_idr);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8dcf5a960951..086fd7e45119 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1131,7 +1131,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1131static struct blk_mq_ops nvme_mq_admin_ops = { 1131static struct blk_mq_ops nvme_mq_admin_ops = {
1132 .queue_rq = nvme_queue_rq, 1132 .queue_rq = nvme_queue_rq,
1133 .complete = nvme_complete_rq, 1133 .complete = nvme_complete_rq,
1134 .map_queue = blk_mq_map_queue,
1135 .init_hctx = nvme_admin_init_hctx, 1134 .init_hctx = nvme_admin_init_hctx,
1136 .exit_hctx = nvme_admin_exit_hctx, 1135 .exit_hctx = nvme_admin_exit_hctx,
1137 .init_request = nvme_admin_init_request, 1136 .init_request = nvme_admin_init_request,
@@ -1141,7 +1140,6 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
1141static struct blk_mq_ops nvme_mq_ops = { 1140static struct blk_mq_ops nvme_mq_ops = {
1142 .queue_rq = nvme_queue_rq, 1141 .queue_rq = nvme_queue_rq,
1143 .complete = nvme_complete_rq, 1142 .complete = nvme_complete_rq,
1144 .map_queue = blk_mq_map_queue,
1145 .init_hctx = nvme_init_hctx, 1143 .init_hctx = nvme_init_hctx,
1146 .init_request = nvme_init_request, 1144 .init_request = nvme_init_request,
1147 .timeout = nvme_timeout, 1145 .timeout = nvme_timeout,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index ab545fb347a0..9bbd8866363b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1531,7 +1531,6 @@ static void nvme_rdma_complete_rq(struct request *rq)
1531static struct blk_mq_ops nvme_rdma_mq_ops = { 1531static struct blk_mq_ops nvme_rdma_mq_ops = {
1532 .queue_rq = nvme_rdma_queue_rq, 1532 .queue_rq = nvme_rdma_queue_rq,
1533 .complete = nvme_rdma_complete_rq, 1533 .complete = nvme_rdma_complete_rq,
1534 .map_queue = blk_mq_map_queue,
1535 .init_request = nvme_rdma_init_request, 1534 .init_request = nvme_rdma_init_request,
1536 .exit_request = nvme_rdma_exit_request, 1535 .exit_request = nvme_rdma_exit_request,
1537 .reinit_request = nvme_rdma_reinit_request, 1536 .reinit_request = nvme_rdma_reinit_request,
@@ -1543,7 +1542,6 @@ static struct blk_mq_ops nvme_rdma_mq_ops = {
1543static struct blk_mq_ops nvme_rdma_admin_mq_ops = { 1542static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
1544 .queue_rq = nvme_rdma_queue_rq, 1543 .queue_rq = nvme_rdma_queue_rq,
1545 .complete = nvme_rdma_complete_rq, 1544 .complete = nvme_rdma_complete_rq,
1546 .map_queue = blk_mq_map_queue,
1547 .init_request = nvme_rdma_init_admin_request, 1545 .init_request = nvme_rdma_init_admin_request,
1548 .exit_request = nvme_rdma_exit_admin_request, 1546 .exit_request = nvme_rdma_exit_admin_request,
1549 .reinit_request = nvme_rdma_reinit_request, 1547 .reinit_request = nvme_rdma_reinit_request,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 395e60dad835..d5df77d686b2 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -273,7 +273,6 @@ static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
273static struct blk_mq_ops nvme_loop_mq_ops = { 273static struct blk_mq_ops nvme_loop_mq_ops = {
274 .queue_rq = nvme_loop_queue_rq, 274 .queue_rq = nvme_loop_queue_rq,
275 .complete = nvme_loop_complete_rq, 275 .complete = nvme_loop_complete_rq,
276 .map_queue = blk_mq_map_queue,
277 .init_request = nvme_loop_init_request, 276 .init_request = nvme_loop_init_request,
278 .init_hctx = nvme_loop_init_hctx, 277 .init_hctx = nvme_loop_init_hctx,
279 .timeout = nvme_loop_timeout, 278 .timeout = nvme_loop_timeout,
@@ -282,7 +281,6 @@ static struct blk_mq_ops nvme_loop_mq_ops = {
282static struct blk_mq_ops nvme_loop_admin_mq_ops = { 281static struct blk_mq_ops nvme_loop_admin_mq_ops = {
283 .queue_rq = nvme_loop_queue_rq, 282 .queue_rq = nvme_loop_queue_rq,
284 .complete = nvme_loop_complete_rq, 283 .complete = nvme_loop_complete_rq,
285 .map_queue = blk_mq_map_queue,
286 .init_request = nvme_loop_init_admin_request, 284 .init_request = nvme_loop_init_admin_request,
287 .init_hctx = nvme_loop_init_admin_hctx, 285 .init_hctx = nvme_loop_init_admin_hctx,
288 .timeout = nvme_loop_timeout, 286 .timeout = nvme_loop_timeout,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c71344aebdbb..2cca9cffc63f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2077,7 +2077,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
2077} 2077}
2078 2078
2079static struct blk_mq_ops scsi_mq_ops = { 2079static struct blk_mq_ops scsi_mq_ops = {
2080 .map_queue = blk_mq_map_queue,
2081 .queue_rq = scsi_queue_rq, 2080 .queue_rq = scsi_queue_rq,
2082 .complete = scsi_softirq_done, 2081 .complete = scsi_softirq_done,
2083 .timeout = scsi_timeout, 2082 .timeout = scsi_timeout,
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index deda16a9bde4..f01379f2b0ac 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -91,7 +91,6 @@ struct blk_mq_queue_data {
91}; 91};
92 92
93typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 93typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
94typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
95typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 94typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
96typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 95typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
97typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 96typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -114,11 +113,6 @@ struct blk_mq_ops {
114 queue_rq_fn *queue_rq; 113 queue_rq_fn *queue_rq;
115 114
116 /* 115 /*
117 * Map to specific hardware queue
118 */
119 map_queue_fn *map_queue;
120
121 /*
122 * Called on request timeout 116 * Called on request timeout
123 */ 117 */
124 timeout_fn *timeout; 118 timeout_fn *timeout;
@@ -223,7 +217,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
223 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 217 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
224} 218}
225 219
226struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
227 220
228int blk_mq_request_started(struct request *rq); 221int blk_mq_request_started(struct request *rq);
229void blk_mq_start_request(struct request *rq); 222void blk_mq_start_request(struct request *rq);