aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-11-19 08:47:35 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-11-29 13:23:15 -0500
commitbec8520dca0d27c1ddac703f9d0a78275ca2603e (patch)
tree6c3039cac6d3f0ba02289dd05d5d1c4b2248056e
parent7b3d2f6e08ed5eb6bcf6912938f7a542405f8e8e (diff)
x86/tsc: Detect random warps
If time warps can be observed then they should only ever be observed on one CPU. If they are observed on both CPUs then the system is completely hosed. Add a check for this condition and notify if it happens. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Link: http://lkml.kernel.org/r/20161119134017.574838461@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/tsc_sync.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 78083bf23ed1..40f8edd55151 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -37,6 +37,7 @@ static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37static cycles_t last_tsc; 37static cycles_t last_tsc;
38static cycles_t max_warp; 38static cycles_t max_warp;
39static int nr_warps; 39static int nr_warps;
40static int random_warps;
40 41
41/* 42/*
42 * TSC-warp measurement loop running on both CPUs. This is not called 43 * TSC-warp measurement loop running on both CPUs. This is not called
@@ -45,7 +46,7 @@ static int nr_warps;
45static void check_tsc_warp(unsigned int timeout) 46static void check_tsc_warp(unsigned int timeout)
46{ 47{
47 cycles_t start, now, prev, end; 48 cycles_t start, now, prev, end;
48 int i; 49 int i, cur_warps = 0;
49 50
50 start = rdtsc_ordered(); 51 start = rdtsc_ordered();
51 /* 52 /*
@@ -85,7 +86,14 @@ static void check_tsc_warp(unsigned int timeout)
85 if (unlikely(prev > now)) { 86 if (unlikely(prev > now)) {
86 arch_spin_lock(&sync_lock); 87 arch_spin_lock(&sync_lock);
87 max_warp = max(max_warp, prev - now); 88 max_warp = max(max_warp, prev - now);
89 /*
90 * Check whether this bounces back and forth. Only
91 * one CPU should observe time going backwards.
92 */
93 if (cur_warps != nr_warps)
94 random_warps++;
88 nr_warps++; 95 nr_warps++;
96 cur_warps = nr_warps;
89 arch_spin_unlock(&sync_lock); 97 arch_spin_unlock(&sync_lock);
90 } 98 }
91 } 99 }
@@ -160,6 +168,8 @@ void check_tsc_sync_source(int cpu)
160 smp_processor_id(), cpu); 168 smp_processor_id(), cpu);
161 pr_warning("Measured %Ld cycles TSC warp between CPUs, " 169 pr_warning("Measured %Ld cycles TSC warp between CPUs, "
162 "turning off TSC clock.\n", max_warp); 170 "turning off TSC clock.\n", max_warp);
171 if (random_warps)
172 pr_warning("TSC warped randomly between CPUs\n");
163 mark_tsc_unstable("check_tsc_sync_source failed"); 173 mark_tsc_unstable("check_tsc_sync_source failed");
164 } else { 174 } else {
165 pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", 175 pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
@@ -170,6 +180,7 @@ void check_tsc_sync_source(int cpu)
170 * Reset it - just in case we boot another CPU later: 180 * Reset it - just in case we boot another CPU later:
171 */ 181 */
172 atomic_set(&start_count, 0); 182 atomic_set(&start_count, 0);
183 random_warps = 0;
173 nr_warps = 0; 184 nr_warps = 0;
174 max_warp = 0; 185 max_warp = 0;
175 last_tsc = 0; 186 last_tsc = 0;