aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-15 16:16:32 -0400
committerIngo Molnar <mingo@kernel.org>2013-05-31 05:31:50 -0400
commit45eacc692771bd2b1ea3d384e6345cab3da10861 (patch)
tree47b131fb39077451fe247a96c0fc98ab142b79fa /kernel/sched
parent67dd331c5d811b2e50c935a24c82f31b61c6dcd3 (diff)
vtime: Use consistent clocks among nohz accounting
While computing the cputime delta of dynticks CPUs, we are mixing up clocks of differents natures: * local_clock() which takes care of unstable clock sources and fix these if needed. * sched_clock() which is the weaker version of local_clock(). It doesn't compute any fixup in case of unstable source. If the clock source is stable, those two clocks are the same and we can safely compute the difference against two random points. Otherwise it results in random deltas as sched_clock() can randomly drift away, back or forward, from local_clock(). As a consequence, some strange behaviour with unstable tsc has been observed such as non progressing constant zero cputime. (The 'top' command showing no load). Fix this by only using local_clock(), or its irq safe/remote equivalent, in vtime code. Reported-by: Mike Galbraith <efault@gmx.de> Suggested-by: Mike Galbraith <efault@gmx.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cputime.c6
2 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 58453b8272fd..e1a27f918723 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4745,7 +4745,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
4745 */ 4745 */
4746 idle->sched_class = &idle_sched_class; 4746 idle->sched_class = &idle_sched_class;
4747 ftrace_graph_init_idle_task(idle, cpu); 4747 ftrace_graph_init_idle_task(idle, cpu);
4748 vtime_init_idle(idle); 4748 vtime_init_idle(idle, cpu);
4749#if defined(CONFIG_SMP) 4749#if defined(CONFIG_SMP)
4750 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4750 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4751#endif 4751#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index cc2dc3eea8a3..b5ccba22603b 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev)
747 747
748 write_seqlock(&current->vtime_seqlock); 748 write_seqlock(&current->vtime_seqlock);
749 current->vtime_snap_whence = VTIME_SYS; 749 current->vtime_snap_whence = VTIME_SYS;
750 current->vtime_snap = sched_clock(); 750 current->vtime_snap = sched_clock_cpu(smp_processor_id());
751 write_sequnlock(&current->vtime_seqlock); 751 write_sequnlock(&current->vtime_seqlock);
752} 752}
753 753
754void vtime_init_idle(struct task_struct *t) 754void vtime_init_idle(struct task_struct *t, int cpu)
755{ 755{
756 unsigned long flags; 756 unsigned long flags;
757 757
758 write_seqlock_irqsave(&t->vtime_seqlock, flags); 758 write_seqlock_irqsave(&t->vtime_seqlock, flags);
759 t->vtime_snap_whence = VTIME_SYS; 759 t->vtime_snap_whence = VTIME_SYS;
760 t->vtime_snap = sched_clock(); 760 t->vtime_snap = sched_clock_cpu(cpu);
761 write_sequnlock_irqrestore(&t->vtime_seqlock, flags); 761 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
762} 762}
763 763