aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6f8dba161bfe..c359d72e9d76 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
438 */ 438 */
439void blk_queue_bypass_start(struct request_queue *q) 439void blk_queue_bypass_start(struct request_queue *q)
440{ 440{
441 bool drain;
442
443 spin_lock_irq(q->queue_lock); 441 spin_lock_irq(q->queue_lock);
444 drain = !q->bypass_depth++; 442 q->bypass_depth++;
445 queue_flag_set(QUEUE_FLAG_BYPASS, q); 443 queue_flag_set(QUEUE_FLAG_BYPASS, q);
446 spin_unlock_irq(q->queue_lock); 444 spin_unlock_irq(q->queue_lock);
447 445
448 if (drain) { 446 /*
447 * Queues start drained. Skip actual draining till init is
448 * complete. This avoids lenghty delays during queue init which
449 * can happen many times during boot.
450 */
451 if (blk_queue_init_done(q)) {
449 spin_lock_irq(q->queue_lock); 452 spin_lock_irq(q->queue_lock);
450 __blk_drain_queue(q, false); 453 __blk_drain_queue(q, false);
451 spin_unlock_irq(q->queue_lock); 454 spin_unlock_irq(q->queue_lock);
@@ -511,7 +514,7 @@ void blk_cleanup_queue(struct request_queue *q)
511 * prevent that q->request_fn() gets invoked after draining finished. 514 * prevent that q->request_fn() gets invoked after draining finished.
512 */ 515 */
513 if (q->mq_ops) { 516 if (q->mq_ops) {
514 blk_mq_drain_queue(q); 517 blk_mq_freeze_queue(q);
515 spin_lock_irq(lock); 518 spin_lock_irq(lock);
516 } else { 519 } else {
517 spin_lock_irq(lock); 520 spin_lock_irq(lock);