aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorGabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>2016-09-27 23:24:24 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-01-19 14:18:07 -0500
commitd9c19f90f3a50eb4ff8d17616492338c0b2f9434 (patch)
tree3a4c6783db9c7970a0c4af0b36564d5a4482a4aa /block
parentcb50d45c3d42633830ca60b34bf46c14941cdde4 (diff)
blk-mq: Always schedule hctx->next_cpu
commit c02ebfdddbafa9a6a0f52fbd715e6bfa229af9d3 upstream. Commit 0e87e58bf60e ("blk-mq: improve warning for running a queue on the wrong CPU") attempts to avoid triggering the WARN_ON in __blk_mq_run_hw_queue when the expected CPU is dead. Problem is, in the last batch execution before round robin, blk_mq_hctx_next_cpu can schedule a dead CPU and also update next_cpu to the next alive CPU in the mask, which will trigger the WARN_ON despite the previous workaround. The following patch fixes this scenario by always scheduling the value in hctx->next_cpu. This changes the moment when we round-robin the CPU running the hctx, but it really doesn't matter, since it still executes BLK_MQ_CPU_WORK_BATCH times in a row before switching to another CPU. Fixes: 0e87e58bf60e ("blk-mq: improve warning for running a queue on the wrong CPU") Signed-off-by: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com> Signed-off-by: Jens Axboe <axboe@fb.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ad459e4e8071..81caceb96c3c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -895,7 +895,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
895 return WORK_CPU_UNBOUND; 895 return WORK_CPU_UNBOUND;
896 896
897 if (--hctx->next_cpu_batch <= 0) { 897 if (--hctx->next_cpu_batch <= 0) {
898 int cpu = hctx->next_cpu, next_cpu; 898 int next_cpu;
899 899
900 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); 900 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
901 if (next_cpu >= nr_cpu_ids) 901 if (next_cpu >= nr_cpu_ids)
@@ -903,8 +903,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
903 903
904 hctx->next_cpu = next_cpu; 904 hctx->next_cpu = next_cpu;
905 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 905 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
906
907 return cpu;
908 } 906 }
909 907
910 return hctx->next_cpu; 908 return hctx->next_cpu;