aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@canonical.com>2014-12-03 06:38:04 -0500
committerJens Axboe <axboe@fb.com>2014-12-08 23:37:08 -0500
commit19c66e59ce57e7b181625cbb408d48eb10837763 (patch)
tree4eb5617cb983d279305e7b9f7e65cf61f0175d2a /block/blk-mq.c
parent080ff3511450fd73948697fef34a3cc382675b59 (diff)
blk-mq: prevent unmapped hw queue from being scheduled
When one hardware queue has no mapped software queues, it shouldn't have been scheduled. Otherwise WARNING or OOPS can triggered. blk_mq_hw_queue_mapped() helper is introduce for fixing the problem. Signed-off-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4854e709aa34..b21a3b6f7b65 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -589,7 +589,7 @@ static void blk_mq_rq_timer(unsigned long priv)
589 * If not software queues are currently mapped to this 589 * If not software queues are currently mapped to this
590 * hardware queue, there's nothing to check 590 * hardware queue, there's nothing to check
591 */ 591 */
592 if (!hctx->nr_ctx || !hctx->tags) 592 if (!blk_mq_hw_queue_mapped(hctx))
593 continue; 593 continue;
594 594
595 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); 595 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
@@ -809,7 +809,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
809 809
810void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 810void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
811{ 811{
812 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) 812 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
813 !blk_mq_hw_queue_mapped(hctx)))
813 return; 814 return;
814 815
815 if (!async) { 816 if (!async) {
@@ -916,6 +917,9 @@ static void blk_mq_delay_work_fn(struct work_struct *work)
916 917
917void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 918void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
918{ 919{
920 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
921 return;
922
919 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 923 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
920 &hctx->delay_work, msecs_to_jiffies(msecs)); 924 &hctx->delay_work, msecs_to_jiffies(msecs));
921} 925}