aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c19
1 files changed, 3 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 22712b2e058a..629614ad0358 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3955,10 +3955,9 @@ EXPORT_SYMBOL(schedule);
3955asmlinkage void __sched preempt_schedule(void) 3955asmlinkage void __sched preempt_schedule(void)
3956{ 3956{
3957 struct thread_info *ti = current_thread_info(); 3957 struct thread_info *ti = current_thread_info();
3958#ifdef CONFIG_PREEMPT_BKL
3959 struct task_struct *task = current; 3958 struct task_struct *task = current;
3960 int saved_lock_depth; 3959 int saved_lock_depth;
3961#endif 3960
3962 /* 3961 /*
3963 * If there is a non-zero preempt_count or interrupts are disabled, 3962 * If there is a non-zero preempt_count or interrupts are disabled,
3964 * we do not want to preempt the current task. Just return.. 3963 * we do not want to preempt the current task. Just return..
@@ -3974,14 +3973,10 @@ asmlinkage void __sched preempt_schedule(void)
3974 * clear ->lock_depth so that schedule() doesnt 3973 * clear ->lock_depth so that schedule() doesnt
3975 * auto-release the semaphore: 3974 * auto-release the semaphore:
3976 */ 3975 */
3977#ifdef CONFIG_PREEMPT_BKL
3978 saved_lock_depth = task->lock_depth; 3976 saved_lock_depth = task->lock_depth;
3979 task->lock_depth = -1; 3977 task->lock_depth = -1;
3980#endif
3981 schedule(); 3978 schedule();
3982#ifdef CONFIG_PREEMPT_BKL
3983 task->lock_depth = saved_lock_depth; 3979 task->lock_depth = saved_lock_depth;
3984#endif
3985 sub_preempt_count(PREEMPT_ACTIVE); 3980 sub_preempt_count(PREEMPT_ACTIVE);
3986 3981
3987 /* 3982 /*
@@ -4002,10 +3997,9 @@ EXPORT_SYMBOL(preempt_schedule);
4002asmlinkage void __sched preempt_schedule_irq(void) 3997asmlinkage void __sched preempt_schedule_irq(void)
4003{ 3998{
4004 struct thread_info *ti = current_thread_info(); 3999 struct thread_info *ti = current_thread_info();
4005#ifdef CONFIG_PREEMPT_BKL
4006 struct task_struct *task = current; 4000 struct task_struct *task = current;
4007 int saved_lock_depth; 4001 int saved_lock_depth;
4008#endif 4002
4009 /* Catch callers which need to be fixed */ 4003 /* Catch callers which need to be fixed */
4010 BUG_ON(ti->preempt_count || !irqs_disabled()); 4004 BUG_ON(ti->preempt_count || !irqs_disabled());
4011 4005
@@ -4017,16 +4011,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
4017 * clear ->lock_depth so that schedule() doesnt 4011 * clear ->lock_depth so that schedule() doesnt
4018 * auto-release the semaphore: 4012 * auto-release the semaphore:
4019 */ 4013 */
4020#ifdef CONFIG_PREEMPT_BKL
4021 saved_lock_depth = task->lock_depth; 4014 saved_lock_depth = task->lock_depth;
4022 task->lock_depth = -1; 4015 task->lock_depth = -1;
4023#endif
4024 local_irq_enable(); 4016 local_irq_enable();
4025 schedule(); 4017 schedule();
4026 local_irq_disable(); 4018 local_irq_disable();
4027#ifdef CONFIG_PREEMPT_BKL
4028 task->lock_depth = saved_lock_depth; 4019 task->lock_depth = saved_lock_depth;
4029#endif
4030 sub_preempt_count(PREEMPT_ACTIVE); 4020 sub_preempt_count(PREEMPT_ACTIVE);
4031 4021
4032 /* 4022 /*
@@ -5241,11 +5231,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5241 spin_unlock_irqrestore(&rq->lock, flags); 5231 spin_unlock_irqrestore(&rq->lock, flags);
5242 5232
5243 /* Set the preempt count _outside_ the spinlocks! */ 5233 /* Set the preempt count _outside_ the spinlocks! */
5244#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
5245 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5246#else
5247 task_thread_info(idle)->preempt_count = 0; 5234 task_thread_info(idle)->preempt_count = 0;
5248#endif 5235
5249 /* 5236 /*
5250 * The idle tasks have their own, simple scheduling class: 5237 * The idle tasks have their own, simple scheduling class:
5251 */ 5238 */