aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-13 07:21:26 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-20 14:49:53 -0400
commitff743345bf7685a207868048a70e23164c4785e5 (patch)
tree7cdb917ad5ac3aa7798b0358a246ddc46a363cdc /kernel
parent13318a7186d8e0ae08c996ea4111a945e7789772 (diff)
sched: remove extra call overhead for schedule()
Lai Jiangshan's patch reminded me that I promised Nick to remove that extra call overhead in schedule(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20090313112300.927414207@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/sched.c12
2 files changed, 7 insertions, 9 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 5d79781394a3..e1fb73510409 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -248,7 +248,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
248 248
249 /* didnt get the lock, go to sleep: */ 249 /* didnt get the lock, go to sleep: */
250 spin_unlock_mutex(&lock->wait_lock, flags); 250 spin_unlock_mutex(&lock->wait_lock, flags);
251 __schedule(); 251 preempt_enable_no_resched();
252 schedule();
253 preempt_disable();
252 spin_lock_mutex(&lock->wait_lock, flags); 254 spin_lock_mutex(&lock->wait_lock, flags);
253 } 255 }
254 256
diff --git a/kernel/sched.c b/kernel/sched.c
index 7601ceebf7ce..797f6fdabadf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5131,13 +5131,15 @@ pick_next_task(struct rq *rq)
5131/* 5131/*
5132 * schedule() is the main scheduler function. 5132 * schedule() is the main scheduler function.
5133 */ 5133 */
5134asmlinkage void __sched __schedule(void) 5134asmlinkage void __sched schedule(void)
5135{ 5135{
5136 struct task_struct *prev, *next; 5136 struct task_struct *prev, *next;
5137 unsigned long *switch_count; 5137 unsigned long *switch_count;
5138 struct rq *rq; 5138 struct rq *rq;
5139 int cpu; 5139 int cpu;
5140 5140
5141need_resched:
5142 preempt_disable();
5141 cpu = smp_processor_id(); 5143 cpu = smp_processor_id();
5142 rq = cpu_rq(cpu); 5144 rq = cpu_rq(cpu);
5143 rcu_qsctr_inc(cpu); 5145 rcu_qsctr_inc(cpu);
@@ -5194,15 +5196,9 @@ need_resched_nonpreemptible:
5194 5196
5195 if (unlikely(reacquire_kernel_lock(current) < 0)) 5197 if (unlikely(reacquire_kernel_lock(current) < 0))
5196 goto need_resched_nonpreemptible; 5198 goto need_resched_nonpreemptible;
5197}
5198 5199
5199asmlinkage void __sched schedule(void)
5200{
5201need_resched:
5202 preempt_disable();
5203 __schedule();
5204 preempt_enable_no_resched(); 5200 preempt_enable_no_resched();
5205 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 5201 if (need_resched())
5206 goto need_resched; 5202 goto need_resched;
5207} 5203}
5208EXPORT_SYMBOL(schedule); 5204EXPORT_SYMBOL(schedule);