aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2017-02-22 13:58:30 -0500
committerJens Axboe <axboe@fb.com>2017-02-23 13:55:47 -0500
commitd38d35155514ceef5efb79f6d5b4f0f1638da5b4 (patch)
treeea9eb2e3a10f286ec6813026d6a52a4483602cd4
parentda55f2cc78418dee88400aafbbaed19d7ac8188e (diff)
blk-mq-sched: separate mark hctx and queue restart operations
In blk_mq_sched_dispatch_requests(), we call blk_mq_sched_mark_restart() after we dispatch requests left over on our hardware queue dispatch list. This is so we'll go back and dispatch requests from the scheduler. In this case, it's only necessary to restart the hardware queue that we are running; there's no reason to run other hardware queues just because we are using shared tags. So, split out blk_mq_sched_mark_restart() into two operations, one for just the hardware queue and one for the whole request queue. The core code only needs the hctx variant, but I/O schedulers will want to use both. This also requires adjusting blk_mq_sched_restart_queues() to always check the queue restart flag, not just when using shared tags. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq-sched.c20
-rw-r--r--block/blk-mq-sched.h26
2 files changed, 26 insertions, 20 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c14f92308244..98c7b061781e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -205,7 +205,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
205 * needing a restart in that case. 205 * needing a restart in that case.
206 */ 206 */
207 if (!list_empty(&rq_list)) { 207 if (!list_empty(&rq_list)) {
208 blk_mq_sched_mark_restart(hctx); 208 blk_mq_sched_mark_restart_hctx(hctx);
209 did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); 209 did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
210 } else if (!has_sched_dispatch) { 210 } else if (!has_sched_dispatch) {
211 blk_mq_flush_busy_ctxs(hctx, &rq_list); 211 blk_mq_flush_busy_ctxs(hctx, &rq_list);
@@ -331,20 +331,16 @@ static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
331 331
332void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) 332void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
333{ 333{
334 struct request_queue *q = hctx->queue;
334 unsigned int i; 335 unsigned int i;
335 336
336 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) 337 if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
338 if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
339 queue_for_each_hw_ctx(q, hctx, i)
340 blk_mq_sched_restart_hctx(hctx);
341 }
342 } else {
337 blk_mq_sched_restart_hctx(hctx); 343 blk_mq_sched_restart_hctx(hctx);
338 else {
339 struct request_queue *q = hctx->queue;
340
341 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
342 return;
343
344 clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
345
346 queue_for_each_hw_ctx(q, hctx, i)
347 blk_mq_sched_restart_hctx(hctx);
348 } 344 }
349} 345}
350 346
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 7b5f3b95c78e..a75b16b123f7 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -122,17 +122,27 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
122 return false; 122 return false;
123} 123}
124 124
125static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx) 125/*
126 * Mark a hardware queue as needing a restart.
127 */
128static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
126{ 129{
127 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { 130 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
128 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 131 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
129 if (hctx->flags & BLK_MQ_F_TAG_SHARED) { 132}
130 struct request_queue *q = hctx->queue; 133
134/*
135 * Mark a hardware queue and the request queue it belongs to as needing a
136 * restart.
137 */
138static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
139{
140 struct request_queue *q = hctx->queue;
131 141
132 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) 142 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
133 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags); 143 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
134 } 144 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
135 } 145 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
136} 146}
137 147
138static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 148static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)