summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-27 03:00:47 -0500
committerJens Axboe <axboe@fb.com>2017-01-27 11:03:14 -0500
commitbd6737f1ae92e2f1c6e8362efe96dbe7f18fa07d (patch)
treeffed03cc3bd01143a8e43d6daca2288836a4a9e3
parentf73f44eb00cb136990cfb7d40e436c13d7669ec8 (diff)
blk-mq-sched: add flush insertion into blk_mq_sched_insert_request()
Instead of letting the caller check this and handle the details of inserting a flush request, put the logic in the scheduler insertion function. This fixes direct flush insertion outside of the usual make_request_fn calls, like from dm via blk_insert_cloned_request(). Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-mq-sched.c58
-rw-r--r--block/blk-mq-sched.h45
-rw-r--r--block/blk-mq-tag.c8
-rw-r--r--block/blk-mq.c25
-rw-r--r--block/blk-mq.h2
8 files changed, 89 insertions, 55 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index b830e14117dd..4bfd8674afd0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2129,7 +2129,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2129 if (q->mq_ops) { 2129 if (q->mq_ops) {
2130 if (blk_queue_io_stat(q)) 2130 if (blk_queue_io_stat(q))
2131 blk_account_io_start(rq, true); 2131 blk_account_io_start(rq, true);
2132 blk_mq_sched_insert_request(rq, false, true, false); 2132 blk_mq_sched_insert_request(rq, false, true, false, false);
2133 return 0; 2133 return 0;
2134 } 2134 }
2135 2135
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 86656fdfa637..ed1f10165268 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
66 * be reused after dying flag is set 66 * be reused after dying flag is set
67 */ 67 */
68 if (q->mq_ops) { 68 if (q->mq_ops) {
69 blk_mq_sched_insert_request(rq, at_head, true, false); 69 blk_mq_sched_insert_request(rq, at_head, true, false, false);
70 return; 70 return;
71 } 71 }
72 72
diff --git a/block/blk-flush.c b/block/blk-flush.c
index d7de34ee39c2..4427896641ac 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -456,7 +456,7 @@ void blk_insert_flush(struct request *rq)
456 if ((policy & REQ_FSEQ_DATA) && 456 if ((policy & REQ_FSEQ_DATA) &&
457 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 457 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
458 if (q->mq_ops) 458 if (q->mq_ops)
459 blk_mq_sched_insert_request(rq, false, true, false); 459 blk_mq_sched_insert_request(rq, false, true, false, false);
460 else 460 else
461 list_add_tail(&rq->queuelist, &q->queue_head); 461 list_add_tail(&rq->queuelist, &q->queue_head);
462 return; 462 return;
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 4139b07ab33b..1112752f888d 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -335,6 +335,64 @@ void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
335 } 335 }
336} 336}
337 337
338/*
339 * Add flush/fua to the queue. If we fail getting a driver tag, then
340 * punt to the requeue list. Requeue will re-invoke us from a context
341 * that's safe to block from.
342 */
343static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
344 struct request *rq, bool can_block)
345{
346 if (blk_mq_get_driver_tag(rq, &hctx, can_block)) {
347 blk_insert_flush(rq);
348 blk_mq_run_hw_queue(hctx, true);
349 } else
350 blk_mq_add_to_requeue_list(rq, true, true);
351}
352
353void blk_mq_sched_insert_request(struct request *rq, bool at_head,
354 bool run_queue, bool async, bool can_block)
355{
356 struct request_queue *q = rq->q;
357 struct elevator_queue *e = q->elevator;
358 struct blk_mq_ctx *ctx = rq->mq_ctx;
359 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
360
361 if (rq->tag == -1 && (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))) {
362 blk_mq_sched_insert_flush(hctx, rq, can_block);
363 return;
364 }
365
366 if (e && e->type->ops.mq.insert_requests) {
367 LIST_HEAD(list);
368
369 list_add(&rq->queuelist, &list);
370 e->type->ops.mq.insert_requests(hctx, &list, at_head);
371 } else {
372 spin_lock(&ctx->lock);
373 __blk_mq_insert_request(hctx, rq, at_head);
374 spin_unlock(&ctx->lock);
375 }
376
377 if (run_queue)
378 blk_mq_run_hw_queue(hctx, async);
379}
380
381void blk_mq_sched_insert_requests(struct request_queue *q,
382 struct blk_mq_ctx *ctx,
383 struct list_head *list, bool run_queue_async)
384{
385 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
386 struct elevator_queue *e = hctx->queue->elevator;
387
388 if (e && e->type->ops.mq.insert_requests)
389 e->type->ops.mq.insert_requests(hctx, list, false);
390 else
391 blk_mq_insert_requests(hctx, ctx, list);
392
393 blk_mq_run_hw_queue(hctx, run_queue_async);
394}
395
338static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 396static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
339 struct blk_mq_hw_ctx *hctx, 397 struct blk_mq_hw_ctx *hctx,
340 unsigned int hctx_idx) 398 unsigned int hctx_idx)
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index becbc7840364..9478aaeb48c5 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -21,6 +21,12 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); 21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); 22void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
23 23
24void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25 bool run_queue, bool async, bool can_block);
26void blk_mq_sched_insert_requests(struct request_queue *q,
27 struct blk_mq_ctx *ctx,
28 struct list_head *list, bool run_queue_async);
29
24void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 30void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
25void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, 31void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
26 struct list_head *rq_list, 32 struct list_head *rq_list,
@@ -62,45 +68,6 @@ static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
62 e->type->ops.mq.put_rq_priv(q, rq); 68 e->type->ops.mq.put_rq_priv(q, rq);
63} 69}
64 70
65static inline void
66blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue,
67 bool async)
68{
69 struct request_queue *q = rq->q;
70 struct elevator_queue *e = q->elevator;
71 struct blk_mq_ctx *ctx = rq->mq_ctx;
72 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
73
74 if (e && e->type->ops.mq.insert_requests) {
75 LIST_HEAD(list);
76
77 list_add(&rq->queuelist, &list);
78 e->type->ops.mq.insert_requests(hctx, &list, at_head);
79 } else {
80 spin_lock(&ctx->lock);
81 __blk_mq_insert_request(hctx, rq, at_head);
82 spin_unlock(&ctx->lock);
83 }
84
85 if (run_queue)
86 blk_mq_run_hw_queue(hctx, async);
87}
88
89static inline void
90blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx,
91 struct list_head *list, bool run_queue_async)
92{
93 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
94 struct elevator_queue *e = hctx->queue->elevator;
95
96 if (e && e->type->ops.mq.insert_requests)
97 e->type->ops.mq.insert_requests(hctx, list, false);
98 else
99 blk_mq_insert_requests(hctx, ctx, list);
100
101 blk_mq_run_hw_queue(hctx, run_queue_async);
102}
103
104static inline bool 71static inline bool
105blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, 72blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
106 struct bio *bio) 73 struct bio *bio)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index f8de2dbbb29f..54c84363c1b2 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -106,6 +106,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
106 struct sbq_wait_state *ws; 106 struct sbq_wait_state *ws;
107 DEFINE_WAIT(wait); 107 DEFINE_WAIT(wait);
108 unsigned int tag_offset; 108 unsigned int tag_offset;
109 bool drop_ctx;
109 int tag; 110 int tag;
110 111
111 if (data->flags & BLK_MQ_REQ_RESERVED) { 112 if (data->flags & BLK_MQ_REQ_RESERVED) {
@@ -128,6 +129,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
128 return BLK_MQ_TAG_FAIL; 129 return BLK_MQ_TAG_FAIL;
129 130
130 ws = bt_wait_ptr(bt, data->hctx); 131 ws = bt_wait_ptr(bt, data->hctx);
132 drop_ctx = data->ctx == NULL;
131 do { 133 do {
132 prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); 134 prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
133 135
@@ -150,7 +152,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
150 if (tag != -1) 152 if (tag != -1)
151 break; 153 break;
152 154
153 blk_mq_put_ctx(data->ctx); 155 if (data->ctx)
156 blk_mq_put_ctx(data->ctx);
154 157
155 io_schedule(); 158 io_schedule();
156 159
@@ -166,6 +169,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
166 ws = bt_wait_ptr(bt, data->hctx); 169 ws = bt_wait_ptr(bt, data->hctx);
167 } while (1); 170 } while (1);
168 171
172 if (drop_ctx && data->ctx)
173 blk_mq_put_ctx(data->ctx);
174
169 finish_wait(&ws->wait, &wait); 175 finish_wait(&ws->wait, &wait);
170 176
171found_tag: 177found_tag:
diff --git a/block/blk-mq.c b/block/blk-mq.c
index da2123dd681e..60dac10228fe 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -568,13 +568,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
568 568
569 rq->rq_flags &= ~RQF_SOFTBARRIER; 569 rq->rq_flags &= ~RQF_SOFTBARRIER;
570 list_del_init(&rq->queuelist); 570 list_del_init(&rq->queuelist);
571 blk_mq_sched_insert_request(rq, true, false, false); 571 blk_mq_sched_insert_request(rq, true, false, false, true);
572 } 572 }
573 573
574 while (!list_empty(&rq_list)) { 574 while (!list_empty(&rq_list)) {
575 rq = list_entry(rq_list.next, struct request, queuelist); 575 rq = list_entry(rq_list.next, struct request, queuelist);
576 list_del_init(&rq->queuelist); 576 list_del_init(&rq->queuelist);
577 blk_mq_sched_insert_request(rq, false, false, false); 577 blk_mq_sched_insert_request(rq, false, false, false, true);
578 } 578 }
579 579
580 blk_mq_run_hw_queues(q, false); 580 blk_mq_run_hw_queues(q, false);
@@ -847,12 +847,11 @@ static inline unsigned int queued_to_index(unsigned int queued)
847 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 847 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
848} 848}
849 849
850static bool blk_mq_get_driver_tag(struct request *rq, 850bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
851 struct blk_mq_hw_ctx **hctx, bool wait) 851 bool wait)
852{ 852{
853 struct blk_mq_alloc_data data = { 853 struct blk_mq_alloc_data data = {
854 .q = rq->q, 854 .q = rq->q,
855 .ctx = rq->mq_ctx,
856 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), 855 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
857 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 856 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
858 }; 857 };
@@ -1395,7 +1394,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1395 } 1394 }
1396 1395
1397insert: 1396insert:
1398 blk_mq_sched_insert_request(rq, false, true, true); 1397 blk_mq_sched_insert_request(rq, false, true, true, false);
1399} 1398}
1400 1399
1401/* 1400/*
@@ -1446,10 +1445,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1446 cookie = request_to_qc_t(data.hctx, rq); 1445 cookie = request_to_qc_t(data.hctx, rq);
1447 1446
1448 if (unlikely(is_flush_fua)) { 1447 if (unlikely(is_flush_fua)) {
1448 blk_mq_put_ctx(data.ctx);
1449 blk_mq_bio_to_request(rq, bio); 1449 blk_mq_bio_to_request(rq, bio);
1450 blk_mq_get_driver_tag(rq, NULL, true); 1450 blk_mq_get_driver_tag(rq, NULL, true);
1451 blk_insert_flush(rq); 1451 blk_insert_flush(rq);
1452 goto run_queue; 1452 blk_mq_run_hw_queue(data.hctx, true);
1453 goto done;
1453 } 1454 }
1454 1455
1455 plug = current->plug; 1456 plug = current->plug;
@@ -1502,7 +1503,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1502 blk_mq_put_ctx(data.ctx); 1503 blk_mq_put_ctx(data.ctx);
1503 blk_mq_bio_to_request(rq, bio); 1504 blk_mq_bio_to_request(rq, bio);
1504 blk_mq_sched_insert_request(rq, false, true, 1505 blk_mq_sched_insert_request(rq, false, true,
1505 !is_sync || is_flush_fua); 1506 !is_sync || is_flush_fua, true);
1506 goto done; 1507 goto done;
1507 } 1508 }
1508 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1509 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1512,7 +1513,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1512 * latter allows for merging opportunities and more efficient 1513 * latter allows for merging opportunities and more efficient
1513 * dispatching. 1514 * dispatching.
1514 */ 1515 */
1515run_queue:
1516 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); 1516 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1517 } 1517 }
1518 blk_mq_put_ctx(data.ctx); 1518 blk_mq_put_ctx(data.ctx);
@@ -1568,10 +1568,12 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1568 cookie = request_to_qc_t(data.hctx, rq); 1568 cookie = request_to_qc_t(data.hctx, rq);
1569 1569
1570 if (unlikely(is_flush_fua)) { 1570 if (unlikely(is_flush_fua)) {
1571 blk_mq_put_ctx(data.ctx);
1571 blk_mq_bio_to_request(rq, bio); 1572 blk_mq_bio_to_request(rq, bio);
1572 blk_mq_get_driver_tag(rq, NULL, true); 1573 blk_mq_get_driver_tag(rq, NULL, true);
1573 blk_insert_flush(rq); 1574 blk_insert_flush(rq);
1574 goto run_queue; 1575 blk_mq_run_hw_queue(data.hctx, true);
1576 goto done;
1575 } 1577 }
1576 1578
1577 /* 1579 /*
@@ -1612,7 +1614,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1612 blk_mq_put_ctx(data.ctx); 1614 blk_mq_put_ctx(data.ctx);
1613 blk_mq_bio_to_request(rq, bio); 1615 blk_mq_bio_to_request(rq, bio);
1614 blk_mq_sched_insert_request(rq, false, true, 1616 blk_mq_sched_insert_request(rq, false, true,
1615 !is_sync || is_flush_fua); 1617 !is_sync || is_flush_fua, true);
1616 goto done; 1618 goto done;
1617 } 1619 }
1618 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1620 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1622,7 +1624,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1622 * latter allows for merging opportunities and more efficient 1624 * latter allows for merging opportunities and more efficient
1623 * dispatching. 1625 * dispatching.
1624 */ 1626 */
1625run_queue:
1626 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); 1627 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1627 } 1628 }
1628 1629
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 077a4003f1fd..57cdbf6c0cee 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -34,6 +34,8 @@ void blk_mq_wake_waiters(struct request_queue *q);
34bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); 34bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); 36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
38 bool wait);
37 39
38/* 40/*
39 * Internal helpers for allocating/freeing the request map 41 * Internal helpers for allocating/freeing the request map