aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-04-15 09:49:07 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-15 09:49:07 -0400
commitf6603783f9f099bf7a83b3f6c689bbbf74f0e96e (patch)
tree450065f77e95b6cd0eee13c9d8f721016be79839 /block
parent88b996cd0652280cc9b9fc70008fda15f14175e1 (diff)
block: only force kblockd unplugging from the schedule() path
For the explicit unplugging, we'd prefer to kick things off immediately and not pay the penalty of the latency to switch to kblockd. So let blk_finish_plug() do the run inline, while the implicit-on-schedule-out unplug will punt to kblockd. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index b598fa7720d4..3c8121072507 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2662,16 +2662,17 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2662 return !(rqa->q <= rqb->q); 2662 return !(rqa->q <= rqb->q);
2663} 2663}
2664 2664
2665static void queue_unplugged(struct request_queue *q, unsigned int depth) 2665static void queue_unplugged(struct request_queue *q, unsigned int depth,
2666 bool force_kblockd)
2666{ 2667{
2667 trace_block_unplug_io(q, depth); 2668 trace_block_unplug_io(q, depth);
2668 __blk_run_queue(q, true); 2669 __blk_run_queue(q, force_kblockd);
2669 2670
2670 if (q->unplugged_fn) 2671 if (q->unplugged_fn)
2671 q->unplugged_fn(q); 2672 q->unplugged_fn(q);
2672} 2673}
2673 2674
2674void blk_flush_plug_list(struct blk_plug *plug) 2675void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
2675{ 2676{
2676 struct request_queue *q; 2677 struct request_queue *q;
2677 unsigned long flags; 2678 unsigned long flags;
@@ -2706,7 +2707,7 @@ void blk_flush_plug_list(struct blk_plug *plug)
2706 BUG_ON(!rq->q); 2707 BUG_ON(!rq->q);
2707 if (rq->q != q) { 2708 if (rq->q != q) {
2708 if (q) { 2709 if (q) {
2709 queue_unplugged(q, depth); 2710 queue_unplugged(q, depth, force_kblockd);
2710 spin_unlock(q->queue_lock); 2711 spin_unlock(q->queue_lock);
2711 } 2712 }
2712 q = rq->q; 2713 q = rq->q;
@@ -2727,7 +2728,7 @@ void blk_flush_plug_list(struct blk_plug *plug)
2727 } 2728 }
2728 2729
2729 if (q) { 2730 if (q) {
2730 queue_unplugged(q, depth); 2731 queue_unplugged(q, depth, force_kblockd);
2731 spin_unlock(q->queue_lock); 2732 spin_unlock(q->queue_lock);
2732 } 2733 }
2733 2734
@@ -2737,7 +2738,7 @@ EXPORT_SYMBOL(blk_flush_plug_list);
2737 2738
2738void blk_finish_plug(struct blk_plug *plug) 2739void blk_finish_plug(struct blk_plug *plug)
2739{ 2740{
2740 blk_flush_plug_list(plug); 2741 blk_flush_plug_list(plug, false);
2741 2742
2742 if (plug == current->plug) 2743 if (plug == current->plug)
2743 current->plug = NULL; 2744 current->plug = NULL;