aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-13 07:21:26 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-20 14:49:53 -0400
commitff743345bf7685a207868048a70e23164c4785e5 (patch)
tree7cdb917ad5ac3aa7798b0358a246ddc46a363cdc /kernel/sched.c
parent13318a7186d8e0ae08c996ea4111a945e7789772 (diff)
sched: remove extra call overhead for schedule()
Lai Jiangshan's patch reminded me that I promised Nick to remove that extra call overhead in schedule(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20090313112300.927414207@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7601ceebf7ce..797f6fdabadf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5131,13 +5131,15 @@ pick_next_task(struct rq *rq)
5131/* 5131/*
5132 * schedule() is the main scheduler function. 5132 * schedule() is the main scheduler function.
5133 */ 5133 */
5134asmlinkage void __sched __schedule(void) 5134asmlinkage void __sched schedule(void)
5135{ 5135{
5136 struct task_struct *prev, *next; 5136 struct task_struct *prev, *next;
5137 unsigned long *switch_count; 5137 unsigned long *switch_count;
5138 struct rq *rq; 5138 struct rq *rq;
5139 int cpu; 5139 int cpu;
5140 5140
5141need_resched:
5142 preempt_disable();
5141 cpu = smp_processor_id(); 5143 cpu = smp_processor_id();
5142 rq = cpu_rq(cpu); 5144 rq = cpu_rq(cpu);
5143 rcu_qsctr_inc(cpu); 5145 rcu_qsctr_inc(cpu);
@@ -5194,15 +5196,9 @@ need_resched_nonpreemptible:
5194 5196
5195 if (unlikely(reacquire_kernel_lock(current) < 0)) 5197 if (unlikely(reacquire_kernel_lock(current) < 0))
5196 goto need_resched_nonpreemptible; 5198 goto need_resched_nonpreemptible;
5197}
5198 5199
5199asmlinkage void __sched schedule(void)
5200{
5201need_resched:
5202 preempt_disable();
5203 __schedule();
5204 preempt_enable_no_resched(); 5200 preempt_enable_no_resched();
5205 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 5201 if (need_resched())
5206 goto need_resched; 5202 goto need_resched;
5207} 5203}
5208EXPORT_SYMBOL(schedule); 5204EXPORT_SYMBOL(schedule);