aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-13 16:11:31 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:06 -0400
commitb82d4b197c782ced82a8b7b76664125d2d3c156c (patch)
tree2dafdbbdd9ca909e6a633c2d92818995ed58f8dd /block
parent80fd99792b0b9f162abdf3da12fb10eb9eb5f321 (diff)
blkcg: make request_queue bypassing on allocation
With the previous change to guarantee bypass visiblity for RCU read lock regions, entering bypass mode involves non-trivial overhead and future changes are scheduled to make use of bypass mode during init path. Combined it may end up adding noticeable delay during boot. This patch makes request_queue start its life in bypass mode, which is ended on queue init completion at the end of blk_init_allocated_queue(), and updates blk_queue_bypass_start() such that draining and RCU synchronization are performed only when the queue actually enters bypass mode. This avoids unnecessarily switching in and out of bypass mode during init avoiding the overhead and any nasty surprises which may step from leaving bypass mode on half-initialized queues. The boot time overhead was pointed out by Vivek. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f2db628aa509..3b02ba351f8c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -421,14 +421,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
421 */ 421 */
422void blk_queue_bypass_start(struct request_queue *q) 422void blk_queue_bypass_start(struct request_queue *q)
423{ 423{
424 bool drain;
425
424 spin_lock_irq(q->queue_lock); 426 spin_lock_irq(q->queue_lock);
425 q->bypass_depth++; 427 drain = !q->bypass_depth++;
426 queue_flag_set(QUEUE_FLAG_BYPASS, q); 428 queue_flag_set(QUEUE_FLAG_BYPASS, q);
427 spin_unlock_irq(q->queue_lock); 429 spin_unlock_irq(q->queue_lock);
428 430
429 blk_drain_queue(q, false); 431 if (drain) {
430 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 432 blk_drain_queue(q, false);
431 synchronize_rcu(); 433 /* ensure blk_queue_bypass() is %true inside RCU read lock */
434 synchronize_rcu();
435 }
432} 436}
433EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 437EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
434 438
@@ -577,6 +581,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
577 */ 581 */
578 q->queue_lock = &q->__queue_lock; 582 q->queue_lock = &q->__queue_lock;
579 583
584 /*
585 * A queue starts its life with bypass turned on to avoid
586 * unnecessary bypass on/off overhead and nasty surprises during
587 * init. The initial bypass will be finished at the end of
588 * blk_init_allocated_queue().
589 */
590 q->bypass_depth = 1;
591 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
592
580 if (blkcg_init_queue(q)) 593 if (blkcg_init_queue(q))
581 goto fail_id; 594 goto fail_id;
582 595
@@ -672,15 +685,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
672 685
673 q->sg_reserved_size = INT_MAX; 686 q->sg_reserved_size = INT_MAX;
674 687
675 /* 688 /* init elevator */
676 * all done 689 if (elevator_init(q, NULL))
677 */ 690 return NULL;
678 if (!elevator_init(q, NULL)) {
679 blk_queue_congestion_threshold(q);
680 return q;
681 }
682 691
683 return NULL; 692 blk_queue_congestion_threshold(q);
693
694 /* all done, end the initial bypass */
695 blk_queue_bypass_end(q);
696 return q;
684} 697}
685EXPORT_SYMBOL(blk_init_allocated_queue); 698EXPORT_SYMBOL(blk_init_allocated_queue);
686 699