aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-04-16 07:27:55 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-16 07:27:55 -0400
commita237c1c5bc5dc5c76a21be922dca4826f3eca8ca (patch)
treea216c9a6d9e870b84424938e9e0b4722dc8634cd /kernel/sched.c
parent5853b4f06f7b9b56f37f457d7923f7b96496074e (diff)
block: let io_schedule() flush the plug inline
Linus correctly observes that the most important dispatch cases are now done from kblockd, this isn't ideal for latency reasons. The original reason for switching dispatches out-of-line was to avoid too deep a stack, so by _only_ letting the "accidental" flush directly in schedule() be guarded by offload to kblockd, we should be able to get the best of both worlds. So add a blk_schedule_flush_plug() that offloads to kblockd, and only use that from the schedule() path. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a187c3fe027b..312f8b95c2d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4118,7 +4118,7 @@ need_resched:
4118 */ 4118 */
4119 if (blk_needs_flush_plug(prev)) { 4119 if (blk_needs_flush_plug(prev)) {
4120 raw_spin_unlock(&rq->lock); 4120 raw_spin_unlock(&rq->lock);
4121 blk_flush_plug(prev); 4121 blk_schedule_flush_plug(prev);
4122 raw_spin_lock(&rq->lock); 4122 raw_spin_lock(&rq->lock);
4123 } 4123 }
4124 } 4124 }