diff options
author | Tejun Heo <tj@kernel.org> | 2014-07-01 12:29:17 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-07-01 12:29:17 -0400 |
commit | 776687bce42bb22cce48b5da950e48ebbb9a948f (patch) | |
tree | 68901461bfd070246574f1e2440ba1ef2ae93ec0 | |
parent | 531ed6261e7466907418b1a9971a5c71d7d250e4 (diff) |
block, blk-mq: draining can't be skipped even if bypass_depth was non-zero
Currently, both blk_queue_bypass_start() and blk_mq_freeze_queue()
skip queue draining if bypass_depth was already above zero. The
assumption is that the one which bumped the bypass_depth should have
performed draining already; however, there's nothing which prevents a
new instance of bypassing/freezing from starting before the previous
one finishes draining. The current code may allow the later
bypassing/freezing instances to complete while there still are
in-flight requests which haven't finished draining.
Fix it by draining regardless of bypass_depth. We still skip draining
from blk_queue_bypass_start() while the queue is initializing to avoid
introducing excessive delays during boot. INIT_DONE setting is moved
above the initial blk_queue_bypass_end() so that bypassing attempts
can't slip inbetween.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-core.c | 11 | ||||
-rw-r--r-- | block/blk-mq.c | 7 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 |
3 files changed, 10 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 6f8dba161bfe..0d0bdd65b2d7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) | |||
438 | */ | 438 | */ |
439 | void blk_queue_bypass_start(struct request_queue *q) | 439 | void blk_queue_bypass_start(struct request_queue *q) |
440 | { | 440 | { |
441 | bool drain; | ||
442 | |||
443 | spin_lock_irq(q->queue_lock); | 441 | spin_lock_irq(q->queue_lock); |
444 | drain = !q->bypass_depth++; | 442 | q->bypass_depth++; |
445 | queue_flag_set(QUEUE_FLAG_BYPASS, q); | 443 | queue_flag_set(QUEUE_FLAG_BYPASS, q); |
446 | spin_unlock_irq(q->queue_lock); | 444 | spin_unlock_irq(q->queue_lock); |
447 | 445 | ||
448 | if (drain) { | 446 | /* |
447 | * Queues start drained. Skip actual draining till init is | ||
448 | * complete. This avoids lenghty delays during queue init which | ||
449 | * can happen many times during boot. | ||
450 | */ | ||
451 | if (blk_queue_init_done(q)) { | ||
449 | spin_lock_irq(q->queue_lock); | 452 | spin_lock_irq(q->queue_lock); |
450 | __blk_drain_queue(q, false); | 453 | __blk_drain_queue(q, false); |
451 | spin_unlock_irq(q->queue_lock); | 454 | spin_unlock_irq(q->queue_lock); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 9541f5111ba6..f4bdddd7ed99 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -131,15 +131,12 @@ void blk_mq_drain_queue(struct request_queue *q) | |||
131 | */ | 131 | */ |
132 | static void blk_mq_freeze_queue(struct request_queue *q) | 132 | static void blk_mq_freeze_queue(struct request_queue *q) |
133 | { | 133 | { |
134 | bool drain; | ||
135 | |||
136 | spin_lock_irq(q->queue_lock); | 134 | spin_lock_irq(q->queue_lock); |
137 | drain = !q->bypass_depth++; | 135 | q->bypass_depth++; |
138 | queue_flag_set(QUEUE_FLAG_BYPASS, q); | 136 | queue_flag_set(QUEUE_FLAG_BYPASS, q); |
139 | spin_unlock_irq(q->queue_lock); | 137 | spin_unlock_irq(q->queue_lock); |
140 | 138 | ||
141 | if (drain) | 139 | blk_mq_drain_queue(q); |
142 | blk_mq_drain_queue(q); | ||
143 | } | 140 | } |
144 | 141 | ||
145 | static void blk_mq_unfreeze_queue(struct request_queue *q) | 142 | static void blk_mq_unfreeze_queue(struct request_queue *q) |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 23321fbab293..4db5abf96b9e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -554,8 +554,8 @@ int blk_register_queue(struct gendisk *disk) | |||
554 | * Initialization must be complete by now. Finish the initial | 554 | * Initialization must be complete by now. Finish the initial |
555 | * bypass from queue allocation. | 555 | * bypass from queue allocation. |
556 | */ | 556 | */ |
557 | blk_queue_bypass_end(q); | ||
558 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | 557 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); |
558 | blk_queue_bypass_end(q); | ||
559 | 559 | ||
560 | ret = blk_trace_init_sysfs(dev); | 560 | ret = blk_trace_init_sysfs(dev); |
561 | if (ret) | 561 | if (ret) |