diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 31 |
1 files changed, 5 insertions, 26 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 58fb8af1577..cfa222a9153 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4567,8 +4567,6 @@ EXPORT_SYMBOL(schedule); | |||
4567 | asmlinkage void __sched preempt_schedule(void) | 4567 | asmlinkage void __sched preempt_schedule(void) |
4568 | { | 4568 | { |
4569 | struct thread_info *ti = current_thread_info(); | 4569 | struct thread_info *ti = current_thread_info(); |
4570 | struct task_struct *task = current; | ||
4571 | int saved_lock_depth; | ||
4572 | 4570 | ||
4573 | /* | 4571 | /* |
4574 | * If there is a non-zero preempt_count or interrupts are disabled, | 4572 | * If there is a non-zero preempt_count or interrupts are disabled, |
@@ -4579,16 +4577,7 @@ asmlinkage void __sched preempt_schedule(void) | |||
4579 | 4577 | ||
4580 | do { | 4578 | do { |
4581 | add_preempt_count(PREEMPT_ACTIVE); | 4579 | add_preempt_count(PREEMPT_ACTIVE); |
4582 | |||
4583 | /* | ||
4584 | * We keep the big kernel semaphore locked, but we | ||
4585 | * clear ->lock_depth so that schedule() doesnt | ||
4586 | * auto-release the semaphore: | ||
4587 | */ | ||
4588 | saved_lock_depth = task->lock_depth; | ||
4589 | task->lock_depth = -1; | ||
4590 | schedule(); | 4580 | schedule(); |
4591 | task->lock_depth = saved_lock_depth; | ||
4592 | sub_preempt_count(PREEMPT_ACTIVE); | 4581 | sub_preempt_count(PREEMPT_ACTIVE); |
4593 | 4582 | ||
4594 | /* | 4583 | /* |
@@ -4609,26 +4598,15 @@ EXPORT_SYMBOL(preempt_schedule); | |||
4609 | asmlinkage void __sched preempt_schedule_irq(void) | 4598 | asmlinkage void __sched preempt_schedule_irq(void) |
4610 | { | 4599 | { |
4611 | struct thread_info *ti = current_thread_info(); | 4600 | struct thread_info *ti = current_thread_info(); |
4612 | struct task_struct *task = current; | ||
4613 | int saved_lock_depth; | ||
4614 | 4601 | ||
4615 | /* Catch callers which need to be fixed */ | 4602 | /* Catch callers which need to be fixed */ |
4616 | BUG_ON(ti->preempt_count || !irqs_disabled()); | 4603 | BUG_ON(ti->preempt_count || !irqs_disabled()); |
4617 | 4604 | ||
4618 | do { | 4605 | do { |
4619 | add_preempt_count(PREEMPT_ACTIVE); | 4606 | add_preempt_count(PREEMPT_ACTIVE); |
4620 | |||
4621 | /* | ||
4622 | * We keep the big kernel semaphore locked, but we | ||
4623 | * clear ->lock_depth so that schedule() doesnt | ||
4624 | * auto-release the semaphore: | ||
4625 | */ | ||
4626 | saved_lock_depth = task->lock_depth; | ||
4627 | task->lock_depth = -1; | ||
4628 | local_irq_enable(); | 4607 | local_irq_enable(); |
4629 | schedule(); | 4608 | schedule(); |
4630 | local_irq_disable(); | 4609 | local_irq_disable(); |
4631 | task->lock_depth = saved_lock_depth; | ||
4632 | sub_preempt_count(PREEMPT_ACTIVE); | 4610 | sub_preempt_count(PREEMPT_ACTIVE); |
4633 | 4611 | ||
4634 | /* | 4612 | /* |
@@ -5547,7 +5525,6 @@ static void __cond_resched(void) | |||
5547 | } while (need_resched()); | 5525 | } while (need_resched()); |
5548 | } | 5526 | } |
5549 | 5527 | ||
5550 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) | ||
5551 | int __sched _cond_resched(void) | 5528 | int __sched _cond_resched(void) |
5552 | { | 5529 | { |
5553 | if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && | 5530 | if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && |
@@ -5558,7 +5535,6 @@ int __sched _cond_resched(void) | |||
5558 | return 0; | 5535 | return 0; |
5559 | } | 5536 | } |
5560 | EXPORT_SYMBOL(_cond_resched); | 5537 | EXPORT_SYMBOL(_cond_resched); |
5561 | #endif | ||
5562 | 5538 | ||
5563 | /* | 5539 | /* |
5564 | * cond_resched_lock() - if a reschedule is pending, drop the given lock, | 5540 | * cond_resched_lock() - if a reschedule is pending, drop the given lock, |
@@ -5853,8 +5829,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5853 | spin_unlock_irqrestore(&rq->lock, flags); | 5829 | spin_unlock_irqrestore(&rq->lock, flags); |
5854 | 5830 | ||
5855 | /* Set the preempt count _outside_ the spinlocks! */ | 5831 | /* Set the preempt count _outside_ the spinlocks! */ |
5832 | #if defined(CONFIG_PREEMPT) | ||
5833 | task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); | ||
5834 | #else | ||
5856 | task_thread_info(idle)->preempt_count = 0; | 5835 | task_thread_info(idle)->preempt_count = 0; |
5857 | 5836 | #endif | |
5858 | /* | 5837 | /* |
5859 | * The idle tasks have their own, simple scheduling class: | 5838 | * The idle tasks have their own, simple scheduling class: |
5860 | */ | 5839 | */ |
@@ -9007,7 +8986,7 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) | |||
9007 | #endif | 8986 | #endif |
9008 | 8987 | ||
9009 | #ifdef CONFIG_RT_GROUP_SCHED | 8988 | #ifdef CONFIG_RT_GROUP_SCHED |
9010 | static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, | 8989 | static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, |
9011 | s64 val) | 8990 | s64 val) |
9012 | { | 8991 | { |
9013 | return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); | 8992 | return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); |