aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-mq.c27
-rw-r--r--block/elevator.c2
2 files changed, 28 insertions, 1 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 11097477eeab..1c66c319325c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1285,7 +1285,30 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1285 1285
1286bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1286bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1287{ 1287{
1288 if (blk_mq_hctx_has_pending(hctx)) { 1288 int srcu_idx;
1289 bool need_run;
1290
1291 /*
1292 * When queue is quiesced, we may be switching io scheduler, or
1293 * updating nr_hw_queues, or other things, and we can't run queue
1294 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1295 *
1296 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1297 * quiesced.
1298 */
1299 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1300 rcu_read_lock();
1301 need_run = !blk_queue_quiesced(hctx->queue) &&
1302 blk_mq_hctx_has_pending(hctx);
1303 rcu_read_unlock();
1304 } else {
1305 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1306 need_run = !blk_queue_quiesced(hctx->queue) &&
1307 blk_mq_hctx_has_pending(hctx);
1308 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1309 }
1310
1311 if (need_run) {
1289 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1312 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1290 return true; 1313 return true;
1291 } 1314 }
@@ -2710,6 +2733,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2710 return -EINVAL; 2733 return -EINVAL;
2711 2734
2712 blk_mq_freeze_queue(q); 2735 blk_mq_freeze_queue(q);
2736 blk_mq_quiesce_queue(q);
2713 2737
2714 ret = 0; 2738 ret = 0;
2715 queue_for_each_hw_ctx(q, hctx, i) { 2739 queue_for_each_hw_ctx(q, hctx, i) {
@@ -2733,6 +2757,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2733 if (!ret) 2757 if (!ret)
2734 q->nr_requests = nr; 2758 q->nr_requests = nr;
2735 2759
2760 blk_mq_unquiesce_queue(q);
2736 blk_mq_unfreeze_queue(q); 2761 blk_mq_unfreeze_queue(q);
2737 2762
2738 return ret; 2763 return ret;
diff --git a/block/elevator.c b/block/elevator.c
index 7bda083d5968..138faeb08a7c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -968,6 +968,7 @@ static int elevator_switch_mq(struct request_queue *q,
968 int ret; 968 int ret;
969 969
970 blk_mq_freeze_queue(q); 970 blk_mq_freeze_queue(q);
971 blk_mq_quiesce_queue(q);
971 972
972 if (q->elevator) { 973 if (q->elevator) {
973 if (q->elevator->registered) 974 if (q->elevator->registered)
@@ -994,6 +995,7 @@ static int elevator_switch_mq(struct request_queue *q,
994 blk_add_trace_msg(q, "elv switch: none"); 995 blk_add_trace_msg(q, "elv switch: none");
995 996
996out: 997out:
998 blk_mq_unquiesce_queue(q);
997 blk_mq_unfreeze_queue(q); 999 blk_mq_unfreeze_queue(q);
998 return ret; 1000 return ret;
999} 1001}