aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-26 16:42:34 -0500
committerJens Axboe <axboe@fb.com>2017-01-27 10:20:34 -0500
commit50e1dab86aa2c10cbca2f754aae9542169403141 (patch)
tree2e17cd0d604bf63d82e4f8838b57ec579690ca52 /block/blk-mq-sched.c
parent99cf1dc580f0766825395aae4f60ec1d8438f011 (diff)
blk-mq-sched: fix starvation for multiple hardware queues and shared tags
If we have both multiple hardware queues and shared tag map between devices, we need to ensure that we propagate the hardware queue restart bit higher up. This is because we can get into a situation where we don't have any IO pending on a hardware queue, yet we fail getting a tag to start new IO. If that happens, it's not enough to mark the hardware queue as needing a restart, we need to bubble that up to the higher level queue as well. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Tested-by: Hannes Reinecke <hare@suse.com>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 4cee060a292d..fcc0e893d687 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -301,6 +301,34 @@ bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
301} 301}
302EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert); 302EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert);
303 303
304static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
305{
306 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
307 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
308 if (blk_mq_hctx_has_pending(hctx))
309 blk_mq_run_hw_queue(hctx, true);
310 }
311}
312
313void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
314{
315 unsigned int i;
316
317 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
318 blk_mq_sched_restart_hctx(hctx);
319 else {
320 struct request_queue *q = hctx->queue;
321
322 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
323 return;
324
325 clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
326
327 queue_for_each_hw_ctx(q, hctx, i)
328 blk_mq_sched_restart_hctx(hctx);
329 }
330}
331
304static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 332static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
305 struct blk_mq_hw_ctx *hctx, 333 struct blk_mq_hw_ctx *hctx,
306 unsigned int hctx_idx) 334 unsigned int hctx_idx)