aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-02-13 08:02:36 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-25 10:34:16 -0500
commit6892b75e60557a48c01d57ba320419a9e2ce9846 (patch)
tree2ba5c725d4723385e88b0a54e8bb4be5b9e38384 /kernel/sched.c
parentbfa274e2436fc7ef72ef51c878083647f1cfd429 (diff)
sched: make early bootup sched_clock() use safer
do not call sched_clock() too early. Not only might rq->idle not be set up - but pure per-cpu data might not be accessible either. this solves an ia64 early bootup hang with CONFIG_PRINTK_TIME=y. Tested-by: Tony Luck <tony.luck@gmail.com> Acked-by: Tony Luck <tony.luck@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b387a8de26a5..7286ccb01082 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
668 */ 668 */
669unsigned int sysctl_sched_rt_period = 1000000; 669unsigned int sysctl_sched_rt_period = 1000000;
670 670
671static __read_mostly int scheduler_running;
672
671/* 673/*
672 * part of the period that we allow rt tasks to run in us. 674 * part of the period that we allow rt tasks to run in us.
673 * default: 0.95s 675 * default: 0.95s
@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu)
689 unsigned long flags; 691 unsigned long flags;
690 struct rq *rq; 692 struct rq *rq;
691 693
692 local_irq_save(flags);
693 rq = cpu_rq(cpu);
694 /* 694 /*
695 * Only call sched_clock() if the scheduler has already been 695 * Only call sched_clock() if the scheduler has already been
696 * initialized (some code might call cpu_clock() very early): 696 * initialized (some code might call cpu_clock() very early):
697 */ 697 */
698 if (rq->idle) 698 if (unlikely(!scheduler_running))
699 update_rq_clock(rq); 699 return 0;
700
701 local_irq_save(flags);
702 rq = cpu_rq(cpu);
703 update_rq_clock(rq);
700 now = rq->clock; 704 now = rq->clock;
701 local_irq_restore(flags); 705 local_irq_restore(flags);
702 706
@@ -7284,6 +7288,8 @@ void __init sched_init(void)
7284 * During early bootup we pretend to be a normal task: 7288 * During early bootup we pretend to be a normal task:
7285 */ 7289 */
7286 current->sched_class = &fair_sched_class; 7290 current->sched_class = &fair_sched_class;
7291
7292 scheduler_running = 1;
7287} 7293}
7288 7294
7289#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 7295#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP