aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormike.travis@hpe.com <mike.travis@hpe.com>2017-10-12 12:32:02 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-10-16 16:50:36 -0400
commit341102c3ef29c33611586072363cf9982a8bdb77 (patch)
tree3eb0e290e32b80153189269f92edf2e187c97a44
parentccb64941f375a6eb21b1b20136730eb7d1716068 (diff)
x86/tsc: Add option that TSC on Socket 0 being non-zero is valid
Add a flag to indicate and process that TSC counters are on chassis that reset at different times during system startup. Therefore which TSC ADJUST values should be zero is not predictable. Signed-off-by: Mike Travis <mike.travis@hpe.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Dimitri Sivanich <dimitri.sivanich@hpe.com> Reviewed-by: Russ Anderson <russ.anderson@hpe.com> Reviewed-by: Andrew Banman <andrew.abanman@hpe.com> Reviewed-by: Peter Zijlstra <peterz@infradead.org> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Andrew Banman <andrew.banman@hpe.com> Cc: Bin Gao <bin.gao@linux.intel.com> Link: https://lkml.kernel.org/r/20171012163201.944370012@stormcage.americas.sgi.com
-rw-r--r--arch/x86/include/asm/tsc.h2
-rw-r--r--arch/x86/kernel/tsc_sync.c39
2 files changed, 37 insertions, 4 deletions
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index d0509c75e150..79125f3609c4 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -36,11 +36,13 @@ extern void tsc_init(void);
36extern void mark_tsc_unstable(char *reason); 36extern void mark_tsc_unstable(char *reason);
37extern int unsynchronized_tsc(void); 37extern int unsynchronized_tsc(void);
38extern int check_tsc_unstable(void); 38extern int check_tsc_unstable(void);
39extern void mark_tsc_async_resets(char *reason);
39extern unsigned long native_calibrate_cpu(void); 40extern unsigned long native_calibrate_cpu(void);
40extern unsigned long native_calibrate_tsc(void); 41extern unsigned long native_calibrate_tsc(void);
41extern unsigned long long native_sched_clock_from_tsc(u64 tsc); 42extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
42 43
43extern int tsc_clocksource_reliable; 44extern int tsc_clocksource_reliable;
45extern bool tsc_async_resets;
44 46
45/* 47/*
46 * Boot-time check whether the TSCs are synchronized across 48 * Boot-time check whether the TSCs are synchronized across
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 7842371bc9e4..3873dcdc7d7b 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -30,6 +30,20 @@ struct tsc_adjust {
30 30
31static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); 31static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
32 32
33/*
34 * TSC's on different sockets may be reset asynchronously.
35 * This may cause the TSC ADJUST value on socket 0 to be NOT 0.
36 */
37bool __read_mostly tsc_async_resets;
38
39void mark_tsc_async_resets(char *reason)
40{
41 if (tsc_async_resets)
42 return;
43 tsc_async_resets = true;
44 pr_info("tsc: Marking TSC async resets true due to %s\n", reason);
45}
46
33void tsc_verify_tsc_adjust(bool resume) 47void tsc_verify_tsc_adjust(bool resume)
34{ 48{
35 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); 49 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
@@ -71,12 +85,22 @@ static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
71 * non zero. We don't do that on non boot cpus because physical 85 * non zero. We don't do that on non boot cpus because physical
72 * hotplug should have set the ADJUST register to a value > 0 so 86 * hotplug should have set the ADJUST register to a value > 0 so
73 * the TSC is in sync with the already running cpus. 87 * the TSC is in sync with the already running cpus.
88 *
89 * Also don't force the ADJUST value to zero if that is a valid value
90 * for socket 0 as determined by the system arch. This is required
91 * when multiple sockets are reset asynchronously with each other
92 * and socket 0 may not have an TSC ADJUST value of 0.
74 */ 93 */
75 if (bootcpu && bootval != 0) { 94 if (bootcpu && bootval != 0) {
76 pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu, 95 if (likely(!tsc_async_resets)) {
77 bootval); 96 pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n",
78 wrmsrl(MSR_IA32_TSC_ADJUST, 0); 97 cpu, bootval);
79 bootval = 0; 98 wrmsrl(MSR_IA32_TSC_ADJUST, 0);
99 bootval = 0;
100 } else {
101 pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n",
102 cpu, bootval);
103 }
80 } 104 }
81 cur->adjusted = bootval; 105 cur->adjusted = bootval;
82} 106}
@@ -118,6 +142,13 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
118 cur->warned = false; 142 cur->warned = false;
119 143
120 /* 144 /*
145 * If a non-zero TSC value for socket 0 may be valid then the default
146 * adjusted value cannot assumed to be zero either.
147 */
148 if (tsc_async_resets)
149 cur->adjusted = bootval;
150
151 /*
121 * Check whether this CPU is the first in a package to come up. In 152 * Check whether this CPU is the first in a package to come up. In
122 * this case do not check the boot value against another package 153 * this case do not check the boot value against another package
123 * because the new package might have been physically hotplugged, 154 * because the new package might have been physically hotplugged,