summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2017-11-02 11:24:38 -0400
committerJens Axboe <axboe@kernel.dk>2017-11-04 14:40:13 -0400
commit923218f6166a84688973acdc39094f3bee1e9ad4 (patch)
tree1013b8c39764532780292633f7e4214c99480aaf /block/blk-mq.c
parent244c65a3ccaa06fd15cc940315606674d3108b2f (diff)
blk-mq: don't allocate driver tag upfront for flush rq
The idea behind it is simple: 1) for none scheduler, driver tag has to be borrowed for flush rq, otherwise we may run out of tag, and that causes an IO hang. And get/put driver tag is actually noop for none, so reordering tags isn't necessary at all. 2) for a real I/O scheduler, we need not allocate a driver tag upfront for flush rq. It works just fine to follow the same approach as normal requests: allocate driver tag for each rq just before calling ->queue_rq(). One driver visible change is that the driver tag isn't shared in the flush request sequence. That won't be a problem, since we always do that in legacy path. Then flush rq need not be treated specially wrt. get/put driver tag. This cleans up the code - for instance, reorder_tags_to_front() can be removed, and we needn't worry about request ordering in dispatch list for avoiding I/O deadlock. Also we have to put the driver tag before requeueing. Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c41
1 files changed, 6 insertions, 35 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 14f6886fbec8..c501cbd0de93 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -653,6 +653,8 @@ static void __blk_mq_requeue_request(struct request *rq)
653{ 653{
654 struct request_queue *q = rq->q; 654 struct request_queue *q = rq->q;
655 655
656 blk_mq_put_driver_tag(rq);
657
656 trace_block_rq_requeue(q, rq); 658 trace_block_rq_requeue(q, rq);
657 wbt_requeue(q->rq_wb, &rq->issue_stat); 659 wbt_requeue(q->rq_wb, &rq->issue_stat);
658 blk_mq_sched_requeue_request(rq); 660 blk_mq_sched_requeue_request(rq);
@@ -996,30 +998,6 @@ done:
996 return rq->tag != -1; 998 return rq->tag != -1;
997} 999}
998 1000
999/*
1000 * If we fail getting a driver tag because all the driver tags are already
1001 * assigned and on the dispatch list, BUT the first entry does not have a
1002 * tag, then we could deadlock. For that case, move entries with assigned
1003 * driver tags to the front, leaving the set of tagged requests in the
1004 * same order, and the untagged set in the same order.
1005 */
1006static bool reorder_tags_to_front(struct list_head *list)
1007{
1008 struct request *rq, *tmp, *first = NULL;
1009
1010 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
1011 if (rq == first)
1012 break;
1013 if (rq->tag != -1) {
1014 list_move(&rq->queuelist, list);
1015 if (!first)
1016 first = rq;
1017 }
1018 }
1019
1020 return first != NULL;
1021}
1022
1023static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, 1001static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
1024 void *key) 1002 void *key)
1025{ 1003{
@@ -1080,9 +1058,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1080 1058
1081 rq = list_first_entry(list, struct request, queuelist); 1059 rq = list_first_entry(list, struct request, queuelist);
1082 if (!blk_mq_get_driver_tag(rq, &hctx, false)) { 1060 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1083 if (!queued && reorder_tags_to_front(list))
1084 continue;
1085
1086 /* 1061 /*
1087 * The initial allocation attempt failed, so we need to 1062 * The initial allocation attempt failed, so we need to
1088 * rerun the hardware queue when a tag is freed. 1063 * rerun the hardware queue when a tag is freed.
@@ -1133,7 +1108,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1133 nxt = list_first_entry(list, struct request, queuelist); 1108 nxt = list_first_entry(list, struct request, queuelist);
1134 blk_mq_put_driver_tag(nxt); 1109 blk_mq_put_driver_tag(nxt);
1135 } 1110 }
1136 blk_mq_put_driver_tag_hctx(hctx, rq);
1137 list_add(&rq->queuelist, list); 1111 list_add(&rq->queuelist, list);
1138 __blk_mq_requeue_request(rq); 1112 __blk_mq_requeue_request(rq);
1139 break; 1113 break;
@@ -1698,13 +1672,10 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1698 if (unlikely(is_flush_fua)) { 1672 if (unlikely(is_flush_fua)) {
1699 blk_mq_put_ctx(data.ctx); 1673 blk_mq_put_ctx(data.ctx);
1700 blk_mq_bio_to_request(rq, bio); 1674 blk_mq_bio_to_request(rq, bio);
1701 if (q->elevator) { 1675
1702 blk_mq_sched_insert_request(rq, false, true, true, 1676 /* bypass scheduler for flush rq */
1703 true); 1677 blk_insert_flush(rq);
1704 } else { 1678 blk_mq_run_hw_queue(data.hctx, true);
1705 blk_insert_flush(rq);
1706 blk_mq_run_hw_queue(data.hctx, true);
1707 }
1708 } else if (plug && q->nr_hw_queues == 1) { 1679 } else if (plug && q->nr_hw_queues == 1) {
1709 struct request *last = NULL; 1680 struct request *last = NULL;
1710 1681