aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f28f19e65b59..f06950c8a6ce 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
668 */ 668 */
669unsigned int sysctl_sched_rt_period = 1000000; 669unsigned int sysctl_sched_rt_period = 1000000;
670 670
671static __read_mostly int scheduler_running;
672
671/* 673/*
672 * part of the period that we allow rt tasks to run in us. 674 * part of the period that we allow rt tasks to run in us.
673 * default: 0.95s 675 * default: 0.95s
@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu)
689 unsigned long flags; 691 unsigned long flags;
690 struct rq *rq; 692 struct rq *rq;
691 693
692 local_irq_save(flags);
693 rq = cpu_rq(cpu);
694 /* 694 /*
695 * Only call sched_clock() if the scheduler has already been 695 * Only call sched_clock() if the scheduler has already been
696 * initialized (some code might call cpu_clock() very early): 696 * initialized (some code might call cpu_clock() very early):
697 */ 697 */
698 if (rq->idle) 698 if (unlikely(!scheduler_running))
699 update_rq_clock(rq); 699 return 0;
700
701 local_irq_save(flags);
702 rq = cpu_rq(cpu);
703 update_rq_clock(rq);
700 now = rq->clock; 704 now = rq->clock;
701 local_irq_restore(flags); 705 local_irq_restore(flags);
702 706
@@ -1831,6 +1835,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1831 long old_state; 1835 long old_state;
1832 struct rq *rq; 1836 struct rq *rq;
1833 1837
1838 smp_wmb();
1834 rq = task_rq_lock(p, &flags); 1839 rq = task_rq_lock(p, &flags);
1835 old_state = p->state; 1840 old_state = p->state;
1836 if (!(old_state & state)) 1841 if (!(old_state & state))
@@ -3766,7 +3771,7 @@ void scheduler_tick(void)
3766 3771
3767#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) 3772#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3768 3773
3769void add_preempt_count(int val) 3774void __kprobes add_preempt_count(int val)
3770{ 3775{
3771 /* 3776 /*
3772 * Underflow? 3777 * Underflow?
@@ -3782,7 +3787,7 @@ void add_preempt_count(int val)
3782} 3787}
3783EXPORT_SYMBOL(add_preempt_count); 3788EXPORT_SYMBOL(add_preempt_count);
3784 3789
3785void sub_preempt_count(int val) 3790void __kprobes sub_preempt_count(int val)
3786{ 3791{
3787 /* 3792 /*
3788 * Underflow? 3793 * Underflow?
@@ -3884,7 +3889,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
3884asmlinkage void __sched schedule(void) 3889asmlinkage void __sched schedule(void)
3885{ 3890{
3886 struct task_struct *prev, *next; 3891 struct task_struct *prev, *next;
3887 long *switch_count; 3892 unsigned long *switch_count;
3888 struct rq *rq; 3893 struct rq *rq;
3889 int cpu; 3894 int cpu;
3890 3895
@@ -7283,6 +7288,8 @@ void __init sched_init(void)
7283 * During early bootup we pretend to be a normal task: 7288 * During early bootup we pretend to be a normal task:
7284 */ 7289 */
7285 current->sched_class = &fair_sched_class; 7290 current->sched_class = &fair_sched_class;
7291
7292 scheduler_running = 1;
7286} 7293}
7287 7294
7288#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 7295#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP