aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c21
1 files changed, 7 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5cd833bc2173..3df33da0dafc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1567,6 +1567,7 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
1567 return try_to_wake_up(p, state, 0); 1567 return try_to_wake_up(p, state, 0);
1568} 1568}
1569 1569
1570static void task_running_tick(struct rq *rq, struct task_struct *p);
1570/* 1571/*
1571 * Perform scheduler related setup for a newly forked process p. 1572 * Perform scheduler related setup for a newly forked process p.
1572 * p is forked by current. 1573 * p is forked by current.
@@ -1627,7 +1628,7 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
1627 * runqueue lock is not a problem. 1628 * runqueue lock is not a problem.
1628 */ 1629 */
1629 current->time_slice = 1; 1630 current->time_slice = 1;
1630 scheduler_tick(); 1631 task_running_tick(cpu_rq(cpu), current);
1631 } 1632 }
1632 local_irq_enable(); 1633 local_irq_enable();
1633 put_cpu(); 1634 put_cpu();
@@ -4616,15 +4617,6 @@ asmlinkage long sys_sched_yield(void)
4616 return 0; 4617 return 0;
4617} 4618}
4618 4619
4619static inline int __resched_legal(int expected_preempt_count)
4620{
4621 if (unlikely(preempt_count() != expected_preempt_count))
4622 return 0;
4623 if (unlikely(system_state != SYSTEM_RUNNING))
4624 return 0;
4625 return 1;
4626}
4627
4628static void __cond_resched(void) 4620static void __cond_resched(void)
4629{ 4621{
4630#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 4622#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
@@ -4644,7 +4636,8 @@ static void __cond_resched(void)
4644 4636
4645int __sched cond_resched(void) 4637int __sched cond_resched(void)
4646{ 4638{
4647 if (need_resched() && __resched_legal(0)) { 4639 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
4640 system_state == SYSTEM_RUNNING) {
4648 __cond_resched(); 4641 __cond_resched();
4649 return 1; 4642 return 1;
4650 } 4643 }
@@ -4670,7 +4663,7 @@ int cond_resched_lock(spinlock_t *lock)
4670 ret = 1; 4663 ret = 1;
4671 spin_lock(lock); 4664 spin_lock(lock);
4672 } 4665 }
4673 if (need_resched() && __resched_legal(1)) { 4666 if (need_resched() && system_state == SYSTEM_RUNNING) {
4674 spin_release(&lock->dep_map, 1, _THIS_IP_); 4667 spin_release(&lock->dep_map, 1, _THIS_IP_);
4675 _raw_spin_unlock(lock); 4668 _raw_spin_unlock(lock);
4676 preempt_enable_no_resched(); 4669 preempt_enable_no_resched();
@@ -4686,7 +4679,7 @@ int __sched cond_resched_softirq(void)
4686{ 4679{
4687 BUG_ON(!in_softirq()); 4680 BUG_ON(!in_softirq());
4688 4681
4689 if (need_resched() && __resched_legal(0)) { 4682 if (need_resched() && system_state == SYSTEM_RUNNING) {
4690 raw_local_irq_disable(); 4683 raw_local_irq_disable();
4691 _local_bh_enable(); 4684 _local_bh_enable();
4692 raw_local_irq_enable(); 4685 raw_local_irq_enable();
@@ -5607,7 +5600,7 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
5607} 5600}
5608 5601
5609/* cpus with isolated domains */ 5602/* cpus with isolated domains */
5610static cpumask_t __cpuinitdata cpu_isolated_map = CPU_MASK_NONE; 5603static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
5611 5604
5612/* Setup the mask of cpus configured for isolated domains */ 5605/* Setup the mask of cpus configured for isolated domains */
5613static int __init isolated_cpu_setup(char *str) 5606static int __init isolated_cpu_setup(char *str)