aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b387a8de26a5..f06950c8a6ce 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
668 */ 668 */
669unsigned int sysctl_sched_rt_period = 1000000; 669unsigned int sysctl_sched_rt_period = 1000000;
670 670
671static __read_mostly int scheduler_running;
672
671/* 673/*
672 * part of the period that we allow rt tasks to run in us. 674 * part of the period that we allow rt tasks to run in us.
673 * default: 0.95s 675 * default: 0.95s
@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu)
689 unsigned long flags; 691 unsigned long flags;
690 struct rq *rq; 692 struct rq *rq;
691 693
692 local_irq_save(flags);
693 rq = cpu_rq(cpu);
694 /* 694 /*
695 * Only call sched_clock() if the scheduler has already been 695 * Only call sched_clock() if the scheduler has already been
696 * initialized (some code might call cpu_clock() very early): 696 * initialized (some code might call cpu_clock() very early):
697 */ 697 */
698 if (rq->idle) 698 if (unlikely(!scheduler_running))
699 update_rq_clock(rq); 699 return 0;
700
701 local_irq_save(flags);
702 rq = cpu_rq(cpu);
703 update_rq_clock(rq);
700 now = rq->clock; 704 now = rq->clock;
701 local_irq_restore(flags); 705 local_irq_restore(flags);
702 706
@@ -3885,7 +3889,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
3885asmlinkage void __sched schedule(void) 3889asmlinkage void __sched schedule(void)
3886{ 3890{
3887 struct task_struct *prev, *next; 3891 struct task_struct *prev, *next;
3888 long *switch_count; 3892 unsigned long *switch_count;
3889 struct rq *rq; 3893 struct rq *rq;
3890 int cpu; 3894 int cpu;
3891 3895
@@ -7284,6 +7288,8 @@ void __init sched_init(void)
7284 * During early bootup we pretend to be a normal task: 7288 * During early bootup we pretend to be a normal task:
7285 */ 7289 */
7286 current->sched_class = &fair_sched_class; 7290 current->sched_class = &fair_sched_class;
7291
7292 scheduler_running = 1;
7287} 7293}
7288 7294
7289#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 7295#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP