aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-01-14 09:36:26 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-14 12:09:00 -0500
commit41719b03091911028116155deddc5eedf8c45e37 (patch)
tree20a699807d78bc0af86b19443dc751415c0cc6f7 /kernel/sched.c
parent93d81d1aca26e64a75d06a85f7e128b5f49053e7 (diff)
mutex: preemption fixes
The problem is that dropping the spinlock right before schedule is a voluntary preemption point and can cause a schedule, right after which we schedule again. Fix this inefficiency by keeping preemption disabled until we schedule, do this by explicity disabling preemption and providing a schedule() variant that assumes preemption is already disabled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8be2c13b50d0..b001c133c359 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4538,15 +4538,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
4538/* 4538/*
4539 * schedule() is the main scheduler function. 4539 * schedule() is the main scheduler function.
4540 */ 4540 */
4541asmlinkage void __sched schedule(void) 4541asmlinkage void __sched __schedule(void)
4542{ 4542{
4543 struct task_struct *prev, *next; 4543 struct task_struct *prev, *next;
4544 unsigned long *switch_count; 4544 unsigned long *switch_count;
4545 struct rq *rq; 4545 struct rq *rq;
4546 int cpu; 4546 int cpu;
4547 4547
4548need_resched:
4549 preempt_disable();
4550 cpu = smp_processor_id(); 4548 cpu = smp_processor_id();
4551 rq = cpu_rq(cpu); 4549 rq = cpu_rq(cpu);
4552 rcu_qsctr_inc(cpu); 4550 rcu_qsctr_inc(cpu);
@@ -4603,7 +4601,13 @@ need_resched_nonpreemptible:
4603 4601
4604 if (unlikely(reacquire_kernel_lock(current) < 0)) 4602 if (unlikely(reacquire_kernel_lock(current) < 0))
4605 goto need_resched_nonpreemptible; 4603 goto need_resched_nonpreemptible;
4604}
4606 4605
4606asmlinkage void __sched schedule(void)
4607{
4608need_resched:
4609 preempt_disable();
4610 __schedule();
4607 preempt_enable_no_resched(); 4611 preempt_enable_no_resched();
4608 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 4612 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
4609 goto need_resched; 4613 goto need_resched;