summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-29 15:11:38 -0400
committerJens Axboe <axboe@kernel.dk>2018-11-07 15:44:59 -0500
commitf9afca4d367b8c915f28d29fcaba7460640403ff (patch)
tree452e46a75ae18332d1d795b32554943de47db4c2
parentff2c56609d9b1f0739ae3a3bfdb78191d01e4192 (diff)
blk-mq: pass in request/bio flags to queue mapping
Prep patch for being able to place request based not just on CPU location, but also on the type of request. Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-flush.c7
-rw-r--r--block/blk-mq-debugfs.c4
-rw-r--r--block/blk-mq-sched.c16
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-mq.c50
-rw-r--r--block/blk-mq.h6
-rw-r--r--block/blk.h6
7 files changed, 57 insertions, 37 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 248fe78c2b9b..77e9f5b2ee05 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -215,7 +215,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
215 215
216 /* release the tag's ownership to the req cloned from */ 216 /* release the tag's ownership to the req cloned from */
217 spin_lock_irqsave(&fq->mq_flush_lock, flags); 217 spin_lock_irqsave(&fq->mq_flush_lock, flags);
218 hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu); 218 hctx = blk_mq_map_queue(q, flush_rq->cmd_flags, flush_rq->mq_ctx->cpu);
219 if (!q->elevator) { 219 if (!q->elevator) {
220 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); 220 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
221 flush_rq->tag = -1; 221 flush_rq->tag = -1;
@@ -301,7 +301,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
301 if (!q->elevator) { 301 if (!q->elevator) {
302 fq->orig_rq = first_rq; 302 fq->orig_rq = first_rq;
303 flush_rq->tag = first_rq->tag; 303 flush_rq->tag = first_rq->tag;
304 hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu); 304 hctx = blk_mq_map_queue(q, first_rq->cmd_flags,
305 first_rq->mq_ctx->cpu);
305 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); 306 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
306 } else { 307 } else {
307 flush_rq->internal_tag = first_rq->internal_tag; 308 flush_rq->internal_tag = first_rq->internal_tag;
@@ -324,7 +325,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
324 unsigned long flags; 325 unsigned long flags;
325 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); 326 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
326 327
327 hctx = blk_mq_map_queue(q, ctx->cpu); 328 hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
328 329
329 if (q->elevator) { 330 if (q->elevator) {
330 WARN_ON(rq->tag < 0); 331 WARN_ON(rq->tag < 0);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 9ed43a7c70b5..fac70c81b7de 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -427,8 +427,10 @@ struct show_busy_params {
427static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved) 427static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
428{ 428{
429 const struct show_busy_params *params = data; 429 const struct show_busy_params *params = data;
430 struct blk_mq_hw_ctx *hctx;
430 431
431 if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx) 432 hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
433 if (hctx == params->hctx)
432 __blk_mq_debugfs_rq_show(params->m, 434 __blk_mq_debugfs_rq_show(params->m,
433 list_entry_rq(&rq->queuelist)); 435 list_entry_rq(&rq->queuelist));
434} 436}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 0feefd6c6aaa..68087bf71a61 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -310,7 +310,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
310{ 310{
311 struct elevator_queue *e = q->elevator; 311 struct elevator_queue *e = q->elevator;
312 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 312 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
313 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 313 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu);
314 bool ret = false; 314 bool ret = false;
315 315
316 if (e && e->type->ops.bio_merge) { 316 if (e && e->type->ops.bio_merge) {
@@ -366,7 +366,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
366 struct request_queue *q = rq->q; 366 struct request_queue *q = rq->q;
367 struct elevator_queue *e = q->elevator; 367 struct elevator_queue *e = q->elevator;
368 struct blk_mq_ctx *ctx = rq->mq_ctx; 368 struct blk_mq_ctx *ctx = rq->mq_ctx;
369 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 369 struct blk_mq_hw_ctx *hctx;
370
371 hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
370 372
371 /* flush rq in flush machinery need to be dispatched directly */ 373 /* flush rq in flush machinery need to be dispatched directly */
372 if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { 374 if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
@@ -399,9 +401,15 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
399 struct blk_mq_ctx *ctx, 401 struct blk_mq_ctx *ctx,
400 struct list_head *list, bool run_queue_async) 402 struct list_head *list, bool run_queue_async)
401{ 403{
402 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 404 struct blk_mq_hw_ctx *hctx;
403 struct elevator_queue *e = hctx->queue->elevator; 405 struct elevator_queue *e;
406 struct request *rq;
407
408 /* For list inserts, requests better be on the same hw queue */
409 rq = list_first_entry(list, struct request, queuelist);
410 hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
404 411
412 e = hctx->queue->elevator;
405 if (e && e->type->ops.insert_requests) 413 if (e && e->type->ops.insert_requests)
406 e->type->ops.insert_requests(hctx, list, false); 414 e->type->ops.insert_requests(hctx, list, false);
407 else { 415 else {
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 4254e74c1446..478a959357f5 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -168,7 +168,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
168 io_schedule(); 168 io_schedule();
169 169
170 data->ctx = blk_mq_get_ctx(data->q); 170 data->ctx = blk_mq_get_ctx(data->q);
171 data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); 171 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
172 data->ctx->cpu);
172 tags = blk_mq_tags_from_data(data); 173 tags = blk_mq_tags_from_data(data);
173 if (data->flags & BLK_MQ_REQ_RESERVED) 174 if (data->flags & BLK_MQ_REQ_RESERVED)
174 bt = &tags->breserved_tags; 175 bt = &tags->breserved_tags;
@@ -530,7 +531,7 @@ u32 blk_mq_unique_tag(struct request *rq)
530 struct blk_mq_hw_ctx *hctx; 531 struct blk_mq_hw_ctx *hctx;
531 int hwq = 0; 532 int hwq = 0;
532 533
533 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); 534 hctx = blk_mq_map_queue(q, rq->cmd_flags, rq->mq_ctx->cpu);
534 hwq = hctx->queue_num; 535 hwq = hctx->queue_num;
535 536
536 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | 537 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
diff --git a/block/blk-mq.c b/block/blk-mq.c
index fac88d16988b..67dec64440dd 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -331,8 +331,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
331} 331}
332 332
333static struct request *blk_mq_get_request(struct request_queue *q, 333static struct request *blk_mq_get_request(struct request_queue *q,
334 struct bio *bio, unsigned int op, 334 struct bio *bio,
335 struct blk_mq_alloc_data *data) 335 struct blk_mq_alloc_data *data)
336{ 336{
337 struct elevator_queue *e = q->elevator; 337 struct elevator_queue *e = q->elevator;
338 struct request *rq; 338 struct request *rq;
@@ -346,8 +346,9 @@ static struct request *blk_mq_get_request(struct request_queue *q,
346 put_ctx_on_error = true; 346 put_ctx_on_error = true;
347 } 347 }
348 if (likely(!data->hctx)) 348 if (likely(!data->hctx))
349 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 349 data->hctx = blk_mq_map_queue(q, data->cmd_flags,
350 if (op & REQ_NOWAIT) 350 data->ctx->cpu);
351 if (data->cmd_flags & REQ_NOWAIT)
351 data->flags |= BLK_MQ_REQ_NOWAIT; 352 data->flags |= BLK_MQ_REQ_NOWAIT;
352 353
353 if (e) { 354 if (e) {
@@ -358,9 +359,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
358 * dispatch list. Don't include reserved tags in the 359 * dispatch list. Don't include reserved tags in the
359 * limiting, as it isn't useful. 360 * limiting, as it isn't useful.
360 */ 361 */
361 if (!op_is_flush(op) && e->type->ops.limit_depth && 362 if (!op_is_flush(data->cmd_flags) &&
363 e->type->ops.limit_depth &&
362 !(data->flags & BLK_MQ_REQ_RESERVED)) 364 !(data->flags & BLK_MQ_REQ_RESERVED))
363 e->type->ops.limit_depth(op, data); 365 e->type->ops.limit_depth(data->cmd_flags, data);
364 } else { 366 } else {
365 blk_mq_tag_busy(data->hctx); 367 blk_mq_tag_busy(data->hctx);
366 } 368 }
@@ -375,8 +377,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
375 return NULL; 377 return NULL;
376 } 378 }
377 379
378 rq = blk_mq_rq_ctx_init(data, tag, op); 380 rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags);
379 if (!op_is_flush(op)) { 381 if (!op_is_flush(data->cmd_flags)) {
380 rq->elv.icq = NULL; 382 rq->elv.icq = NULL;
381 if (e && e->type->ops.prepare_request) { 383 if (e && e->type->ops.prepare_request) {
382 if (e->type->icq_cache && rq_ioc(bio)) 384 if (e->type->icq_cache && rq_ioc(bio))
@@ -393,7 +395,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
393struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 395struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
394 blk_mq_req_flags_t flags) 396 blk_mq_req_flags_t flags)
395{ 397{
396 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 398 struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
397 struct request *rq; 399 struct request *rq;
398 int ret; 400 int ret;
399 401
@@ -401,7 +403,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
401 if (ret) 403 if (ret)
402 return ERR_PTR(ret); 404 return ERR_PTR(ret);
403 405
404 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 406 rq = blk_mq_get_request(q, NULL, &alloc_data);
405 blk_queue_exit(q); 407 blk_queue_exit(q);
406 408
407 if (!rq) 409 if (!rq)
@@ -419,7 +421,7 @@ EXPORT_SYMBOL(blk_mq_alloc_request);
419struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 421struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
420 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 422 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
421{ 423{
422 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 424 struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
423 struct request *rq; 425 struct request *rq;
424 unsigned int cpu; 426 unsigned int cpu;
425 int ret; 427 int ret;
@@ -452,7 +454,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
452 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); 454 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
453 alloc_data.ctx = __blk_mq_get_ctx(q, cpu); 455 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
454 456
455 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 457 rq = blk_mq_get_request(q, NULL, &alloc_data);
456 blk_queue_exit(q); 458 blk_queue_exit(q);
457 459
458 if (!rq) 460 if (!rq)
@@ -466,7 +468,7 @@ static void __blk_mq_free_request(struct request *rq)
466{ 468{
467 struct request_queue *q = rq->q; 469 struct request_queue *q = rq->q;
468 struct blk_mq_ctx *ctx = rq->mq_ctx; 470 struct blk_mq_ctx *ctx = rq->mq_ctx;
469 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 471 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
470 const int sched_tag = rq->internal_tag; 472 const int sched_tag = rq->internal_tag;
471 473
472 blk_pm_mark_last_busy(rq); 474 blk_pm_mark_last_busy(rq);
@@ -483,7 +485,7 @@ void blk_mq_free_request(struct request *rq)
483 struct request_queue *q = rq->q; 485 struct request_queue *q = rq->q;
484 struct elevator_queue *e = q->elevator; 486 struct elevator_queue *e = q->elevator;
485 struct blk_mq_ctx *ctx = rq->mq_ctx; 487 struct blk_mq_ctx *ctx = rq->mq_ctx;
486 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 488 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
487 489
488 if (rq->rq_flags & RQF_ELVPRIV) { 490 if (rq->rq_flags & RQF_ELVPRIV) {
489 if (e && e->type->ops.finish_request) 491 if (e && e->type->ops.finish_request)
@@ -977,8 +979,9 @@ bool blk_mq_get_driver_tag(struct request *rq)
977{ 979{
978 struct blk_mq_alloc_data data = { 980 struct blk_mq_alloc_data data = {
979 .q = rq->q, 981 .q = rq->q,
980 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), 982 .hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu),
981 .flags = BLK_MQ_REQ_NOWAIT, 983 .flags = BLK_MQ_REQ_NOWAIT,
984 .cmd_flags = rq->cmd_flags,
982 }; 985 };
983 bool shared; 986 bool shared;
984 987
@@ -1142,7 +1145,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1142 1145
1143 rq = list_first_entry(list, struct request, queuelist); 1146 rq = list_first_entry(list, struct request, queuelist);
1144 1147
1145 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); 1148 hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
1146 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) 1149 if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1147 break; 1150 break;
1148 1151
@@ -1573,7 +1576,8 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1573void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) 1576void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1574{ 1577{
1575 struct blk_mq_ctx *ctx = rq->mq_ctx; 1578 struct blk_mq_ctx *ctx = rq->mq_ctx;
1576 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); 1579 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
1580 ctx->cpu);
1577 1581
1578 spin_lock(&hctx->lock); 1582 spin_lock(&hctx->lock);
1579 list_add_tail(&rq->queuelist, &hctx->dispatch); 1583 list_add_tail(&rq->queuelist, &hctx->dispatch);
@@ -1783,7 +1787,8 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
1783 int srcu_idx; 1787 int srcu_idx;
1784 blk_qc_t unused_cookie; 1788 blk_qc_t unused_cookie;
1785 struct blk_mq_ctx *ctx = rq->mq_ctx; 1789 struct blk_mq_ctx *ctx = rq->mq_ctx;
1786 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); 1790 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
1791 ctx->cpu);
1787 1792
1788 hctx_lock(hctx, &srcu_idx); 1793 hctx_lock(hctx, &srcu_idx);
1789 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true); 1794 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
@@ -1817,7 +1822,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1817{ 1822{
1818 const int is_sync = op_is_sync(bio->bi_opf); 1823 const int is_sync = op_is_sync(bio->bi_opf);
1819 const int is_flush_fua = op_is_flush(bio->bi_opf); 1824 const int is_flush_fua = op_is_flush(bio->bi_opf);
1820 struct blk_mq_alloc_data data = { .flags = 0 }; 1825 struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
1821 struct request *rq; 1826 struct request *rq;
1822 unsigned int request_count = 0; 1827 unsigned int request_count = 0;
1823 struct blk_plug *plug; 1828 struct blk_plug *plug;
@@ -1840,7 +1845,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1840 1845
1841 rq_qos_throttle(q, bio, NULL); 1846 rq_qos_throttle(q, bio, NULL);
1842 1847
1843 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); 1848 rq = blk_mq_get_request(q, bio, &data);
1844 if (unlikely(!rq)) { 1849 if (unlikely(!rq)) {
1845 rq_qos_cleanup(q, bio); 1850 rq_qos_cleanup(q, bio);
1846 if (bio->bi_opf & REQ_NOWAIT) 1851 if (bio->bi_opf & REQ_NOWAIT)
@@ -1909,6 +1914,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1909 1914
1910 if (same_queue_rq) { 1915 if (same_queue_rq) {
1911 data.hctx = blk_mq_map_queue(q, 1916 data.hctx = blk_mq_map_queue(q,
1917 same_queue_rq->cmd_flags,
1912 same_queue_rq->mq_ctx->cpu); 1918 same_queue_rq->mq_ctx->cpu);
1913 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1919 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1914 &cookie); 1920 &cookie);
@@ -2263,7 +2269,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
2263 * Set local node, IFF we have more than one hw queue. If 2269 * Set local node, IFF we have more than one hw queue. If
2264 * not, we remain on the home node of the device 2270 * not, we remain on the home node of the device
2265 */ 2271 */
2266 hctx = blk_mq_map_queue(q, i); 2272 hctx = blk_mq_map_queue_type(q, 0, i);
2267 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2273 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2268 hctx->numa_node = local_memory_node(cpu_to_node(i)); 2274 hctx->numa_node = local_memory_node(cpu_to_node(i));
2269 } 2275 }
@@ -2336,7 +2342,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2336 } 2342 }
2337 2343
2338 ctx = per_cpu_ptr(q->queue_ctx, i); 2344 ctx = per_cpu_ptr(q->queue_ctx, i);
2339 hctx = blk_mq_map_queue(q, i); 2345 hctx = blk_mq_map_queue_type(q, 0, i);
2340 2346
2341 cpumask_set_cpu(i, hctx->cpumask); 2347 cpumask_set_cpu(i, hctx->cpumask);
2342 ctx->index_hw = hctx->nr_ctx; 2348 ctx->index_hw = hctx->nr_ctx;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index d9facfb9ca51..6a8f8b60d8ba 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -73,6 +73,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
73extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 73extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
74 74
75static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 75static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
76 unsigned int flags,
76 unsigned int cpu) 77 unsigned int cpu)
77{ 78{
78 struct blk_mq_tag_set *set = q->tag_set; 79 struct blk_mq_tag_set *set = q->tag_set;
@@ -84,7 +85,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
84 unsigned int hctx_type, 85 unsigned int hctx_type,
85 unsigned int cpu) 86 unsigned int cpu)
86{ 87{
87 return blk_mq_map_queue(q, cpu); 88 return blk_mq_map_queue(q, hctx_type, cpu);
88} 89}
89 90
90/* 91/*
@@ -135,6 +136,7 @@ struct blk_mq_alloc_data {
135 struct request_queue *q; 136 struct request_queue *q;
136 blk_mq_req_flags_t flags; 137 blk_mq_req_flags_t flags;
137 unsigned int shallow_depth; 138 unsigned int shallow_depth;
139 unsigned int cmd_flags;
138 140
139 /* input & output parameter */ 141 /* input & output parameter */
140 struct blk_mq_ctx *ctx; 142 struct blk_mq_ctx *ctx;
@@ -209,7 +211,7 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
209 if (rq->tag == -1 || rq->internal_tag == -1) 211 if (rq->tag == -1 || rq->internal_tag == -1)
210 return; 212 return;
211 213
212 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); 214 hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
213 __blk_mq_put_driver_tag(hctx, rq); 215 __blk_mq_put_driver_tag(hctx, rq);
214} 216}
215 217
diff --git a/block/blk.h b/block/blk.h
index 2bf1cfeeb9c0..78ae94886acf 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -104,10 +104,10 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
104 __clear_bit(flag, &q->queue_flags); 104 __clear_bit(flag, &q->queue_flags);
105} 105}
106 106
107static inline struct blk_flush_queue *blk_get_flush_queue( 107static inline struct blk_flush_queue *
108 struct request_queue *q, struct blk_mq_ctx *ctx) 108blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
109{ 109{
110 return blk_mq_map_queue(q, ctx->cpu)->fq; 110 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq;
111} 111}
112 112
113static inline void __blk_get_queue(struct request_queue *q) 113static inline void __blk_get_queue(struct request_queue *q)