summaryrefslogtreecommitdiffstats
path: root/kernel/sched/clock.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-03-17 07:48:18 -0400
committerIngo Molnar <mingo@kernel.org>2017-03-23 02:31:49 -0400
commit698eff6355f735d46d1b7113df8b422874cd7988 (patch)
tree77a7207a64ac5c8b87844bc4e91f551bf4102275 /kernel/sched/clock.c
parent71fdb70eb48784c1f28cdf2e67c4c587dd7f2594 (diff)
sched/clock, x86/perf: Fix "perf test tsc"
People reported that commit: 5680d8094ffa ("sched/clock: Provide better clock continuity") broke "perf test tsc". That commit added another offset to the reported clock value; so take that into account when computing the provided offset values. Reported-by: Adrian Hunter <adrian.hunter@intel.com> Reported-by: Arnaldo Carvalho de Melo <acme@kernel.org> Tested-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: 5680d8094ffa ("sched/clock: Provide better clock continuity") Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/clock.c')
-rw-r--r--kernel/sched/clock.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index fec0f58c8dee..24a3e01bf8cb 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -96,10 +96,10 @@ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
96static int __sched_clock_stable_early = 1; 96static int __sched_clock_stable_early = 1;
97 97
98/* 98/*
99 * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset 99 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
100 */ 100 */
101static __read_mostly u64 raw_offset; 101__read_mostly u64 __sched_clock_offset;
102static __read_mostly u64 gtod_offset; 102static __read_mostly u64 __gtod_offset;
103 103
104struct sched_clock_data { 104struct sched_clock_data {
105 u64 tick_raw; 105 u64 tick_raw;
@@ -131,11 +131,11 @@ static void __set_sched_clock_stable(void)
131 /* 131 /*
132 * Attempt to make the (initial) unstable->stable transition continuous. 132 * Attempt to make the (initial) unstable->stable transition continuous.
133 */ 133 */
134 raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); 134 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
135 135
136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
137 scd->tick_gtod, gtod_offset, 137 scd->tick_gtod, __gtod_offset,
138 scd->tick_raw, raw_offset); 138 scd->tick_raw, __sched_clock_offset);
139 139
140 static_branch_enable(&__sched_clock_stable); 140 static_branch_enable(&__sched_clock_stable);
141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
@@ -161,11 +161,11 @@ static void __clear_sched_clock_stable(void)
161 * 161 *
162 * Still do what we can. 162 * Still do what we can.
163 */ 163 */
164 gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); 164 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod);
165 165
166 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 166 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
167 scd->tick_gtod, gtod_offset, 167 scd->tick_gtod, __gtod_offset,
168 scd->tick_raw, raw_offset); 168 scd->tick_raw, __sched_clock_offset);
169 169
170 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 170 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
171 171
@@ -238,7 +238,7 @@ again:
238 * scd->tick_gtod + TICK_NSEC); 238 * scd->tick_gtod + TICK_NSEC);
239 */ 239 */
240 240
241 clock = scd->tick_gtod + gtod_offset + delta; 241 clock = scd->tick_gtod + __gtod_offset + delta;
242 min_clock = wrap_max(scd->tick_gtod, old_clock); 242 min_clock = wrap_max(scd->tick_gtod, old_clock);
243 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); 243 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
244 244
@@ -324,7 +324,7 @@ u64 sched_clock_cpu(int cpu)
324 u64 clock; 324 u64 clock;
325 325
326 if (sched_clock_stable()) 326 if (sched_clock_stable())
327 return sched_clock() + raw_offset; 327 return sched_clock() + __sched_clock_offset;
328 328
329 if (unlikely(!sched_clock_running)) 329 if (unlikely(!sched_clock_running))
330 return 0ull; 330 return 0ull;