aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-04-04 23:34:48 -0400
committerJens Axboe <axboe@fb.com>2014-04-07 10:17:18 -0400
commitbccb5f7c8bdfe460d95f986c6edf2e75d8052897 (patch)
tree7c745e10cab2c6cd91dbe2d8664601dda6d89a68 /block/blk-mq.c
parent60b0ea120c80cba1cf5fe5ae82a35b1179263de3 (diff)
blk-mq: fix potential stall during CPU unplug with IO pending
When a CPU is unplugged, we move the blk_mq_ctx request entries to the current queue. The current code forgets to remap the blk_mq_hw_ctx before marking the software context pending, which breaks if old-cpu and new-cpu don't map to the same hardware queue. Additionally, if we mark entries as pending in the new hardware queue, then make sure we schedule it for running. Otherwise request could be sitting there until someone else queues IO for that hardware queue. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b1bcc619d0ea..1d2a9bdbee57 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
956 unsigned int cpu) 956 unsigned int cpu)
957{ 957{
958 struct blk_mq_hw_ctx *hctx = data; 958 struct blk_mq_hw_ctx *hctx = data;
959 struct request_queue *q = hctx->queue;
959 struct blk_mq_ctx *ctx; 960 struct blk_mq_ctx *ctx;
960 LIST_HEAD(tmp); 961 LIST_HEAD(tmp);
961 962
@@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
965 /* 966 /*
966 * Move ctx entries to new CPU, if this one is going away. 967 * Move ctx entries to new CPU, if this one is going away.
967 */ 968 */
968 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 969 ctx = __blk_mq_get_ctx(q, cpu);
969 970
970 spin_lock(&ctx->lock); 971 spin_lock(&ctx->lock);
971 if (!list_empty(&ctx->rq_list)) { 972 if (!list_empty(&ctx->rq_list)) {
@@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
977 if (list_empty(&tmp)) 978 if (list_empty(&tmp))
978 return; 979 return;
979 980
980 ctx = blk_mq_get_ctx(hctx->queue); 981 ctx = blk_mq_get_ctx(q);
981 spin_lock(&ctx->lock); 982 spin_lock(&ctx->lock);
982 983
983 while (!list_empty(&tmp)) { 984 while (!list_empty(&tmp)) {
@@ -988,10 +989,13 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
988 list_move_tail(&rq->queuelist, &ctx->rq_list); 989 list_move_tail(&rq->queuelist, &ctx->rq_list);
989 } 990 }
990 991
992 hctx = q->mq_ops->map_queue(q, ctx->cpu);
991 blk_mq_hctx_mark_pending(hctx, ctx); 993 blk_mq_hctx_mark_pending(hctx, ctx);
992 994
993 spin_unlock(&ctx->lock); 995 spin_unlock(&ctx->lock);
994 blk_mq_put_ctx(ctx); 996 blk_mq_put_ctx(ctx);
997
998 blk_mq_run_hw_queue(hctx, true);
995} 999}
996 1000
997static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, 1001static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,