diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-07 16:14:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-07 16:14:37 -0400 |
commit | 950b0d28378c4ee63a30dad732a8319c8a41c95d (patch) | |
tree | e7e8a41ac5b11b650180a84c92f517d15907a9ba /arch | |
parent | 2557a933b795c1988c721ebb871cd735128bb9cb (diff) | |
parent | 871de939030c903fd5ed50a7c4c88e02998e1cbc (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
x86: fix 64-bit asm NOPS for CONFIG_GENERIC_CPU
x86: fix call to set_cyc2ns_scale() from time_cpufreq_notifier()
revert "x86: tsc prevent time going backwards"
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 27 |
2 files changed, 6 insertions, 40 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index d7498b34c8e9..c2241e04ea5f 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -256,9 +256,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | |||
256 | ref_freq, freq->new); | 256 | ref_freq, freq->new); |
257 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | 257 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { |
258 | tsc_khz = cpu_khz; | 258 | tsc_khz = cpu_khz; |
259 | preempt_disable(); | 259 | set_cyc2ns_scale(cpu_khz, freq->cpu); |
260 | set_cyc2ns_scale(cpu_khz, smp_processor_id()); | ||
261 | preempt_enable(); | ||
262 | /* | 260 | /* |
263 | * TSC based sched_clock turns | 261 | * TSC based sched_clock turns |
264 | * to junk w/ cpufreq | 262 | * to junk w/ cpufreq |
@@ -287,27 +285,14 @@ core_initcall(cpufreq_tsc); | |||
287 | /* clock source code */ | 285 | /* clock source code */ |
288 | 286 | ||
289 | static unsigned long current_tsc_khz = 0; | 287 | static unsigned long current_tsc_khz = 0; |
290 | static struct clocksource clocksource_tsc; | ||
291 | 288 | ||
292 | /* | ||
293 | * We compare the TSC to the cycle_last value in the clocksource | ||
294 | * structure to avoid a nasty time-warp issue. This can be observed in | ||
295 | * a very small window right after one CPU updated cycle_last under | ||
296 | * xtime lock and the other CPU reads a TSC value which is smaller | ||
297 | * than the cycle_last reference value due to a TSC which is slighty | ||
298 | * behind. This delta is nowhere else observable, but in that case it | ||
299 | * results in a forward time jump in the range of hours due to the | ||
300 | * unsigned delta calculation of the time keeping core code, which is | ||
301 | * necessary to support wrapping clocksources like pm timer. | ||
302 | */ | ||
303 | static cycle_t read_tsc(void) | 289 | static cycle_t read_tsc(void) |
304 | { | 290 | { |
305 | cycle_t ret; | 291 | cycle_t ret; |
306 | 292 | ||
307 | rdtscll(ret); | 293 | rdtscll(ret); |
308 | 294 | ||
309 | return ret >= clocksource_tsc.cycle_last ? | 295 | return ret; |
310 | ret : clocksource_tsc.cycle_last; | ||
311 | } | 296 | } |
312 | 297 | ||
313 | static struct clocksource clocksource_tsc = { | 298 | static struct clocksource clocksource_tsc = { |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index 01fc9f0c39e2..d3bebaaad842 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <asm/hpet.h> | 11 | #include <asm/hpet.h> |
12 | #include <asm/timex.h> | 12 | #include <asm/timex.h> |
13 | #include <asm/timer.h> | 13 | #include <asm/timer.h> |
14 | #include <asm/vgtod.h> | ||
15 | 14 | ||
16 | static int notsc __initdata = 0; | 15 | static int notsc __initdata = 0; |
17 | 16 | ||
@@ -149,9 +148,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
149 | mark_tsc_unstable("cpufreq changes"); | 148 | mark_tsc_unstable("cpufreq changes"); |
150 | } | 149 | } |
151 | 150 | ||
152 | preempt_disable(); | 151 | set_cyc2ns_scale(tsc_khz_ref, freq->cpu); |
153 | set_cyc2ns_scale(tsc_khz_ref, smp_processor_id()); | ||
154 | preempt_enable(); | ||
155 | 152 | ||
156 | return 0; | 153 | return 0; |
157 | } | 154 | } |
@@ -291,34 +288,18 @@ int __init notsc_setup(char *s) | |||
291 | 288 | ||
292 | __setup("notsc", notsc_setup); | 289 | __setup("notsc", notsc_setup); |
293 | 290 | ||
294 | static struct clocksource clocksource_tsc; | ||
295 | 291 | ||
296 | /* | 292 | /* clock source code: */ |
297 | * We compare the TSC to the cycle_last value in the clocksource | ||
298 | * structure to avoid a nasty time-warp. This can be observed in a | ||
299 | * very small window right after one CPU updated cycle_last under | ||
300 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | ||
301 | * is smaller than the cycle_last reference value due to a TSC which | ||
302 | * is slighty behind. This delta is nowhere else observable, but in | ||
303 | * that case it results in a forward time jump in the range of hours | ||
304 | * due to the unsigned delta calculation of the time keeping core | ||
305 | * code, which is necessary to support wrapping clocksources like pm | ||
306 | * timer. | ||
307 | */ | ||
308 | static cycle_t read_tsc(void) | 293 | static cycle_t read_tsc(void) |
309 | { | 294 | { |
310 | cycle_t ret = (cycle_t)get_cycles(); | 295 | cycle_t ret = (cycle_t)get_cycles(); |
311 | 296 | return ret; | |
312 | return ret >= clocksource_tsc.cycle_last ? | ||
313 | ret : clocksource_tsc.cycle_last; | ||
314 | } | 297 | } |
315 | 298 | ||
316 | static cycle_t __vsyscall_fn vread_tsc(void) | 299 | static cycle_t __vsyscall_fn vread_tsc(void) |
317 | { | 300 | { |
318 | cycle_t ret = (cycle_t)vget_cycles(); | 301 | cycle_t ret = (cycle_t)vget_cycles(); |
319 | 302 | return ret; | |
320 | return ret >= __vsyscall_gtod_data.clock.cycle_last ? | ||
321 | ret : __vsyscall_gtod_data.clock.cycle_last; | ||
322 | } | 303 | } |
323 | 304 | ||
324 | static struct clocksource clocksource_tsc = { | 305 | static struct clocksource clocksource_tsc = { |