diff options
author | Alok Kataria <akataria@vmware.com> | 2008-07-01 14:43:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-09 01:43:27 -0400 |
commit | 8fbbc4b45ce3e4c0eeb15004c79c72b6896a79c2 (patch) | |
tree | 3b7c8e4ee74ef5fec07f64ea56bff60afb433c2c | |
parent | 2dbe06faf37b39f9ecffc054dd173b2a1dc2adcd (diff) |
x86: merge tsc_init and clocksource code
Unify the clocksource code.
Unify the tsc_init code.
Signed-off-by: Alok N Kataria <akataria@vmware.com>
Signed-off-by: Dan Hecht <dhecht@vmware.com>
Cc: Dan Hecht <dhecht@vmware.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/time_64.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 212 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 188 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 106 | ||||
-rw-r--r-- | include/asm-x86/apic.h | 7 | ||||
-rw-r--r-- | include/asm-x86/delay.h | 4 | ||||
-rw-r--r-- | include/asm-x86/time.h | 2 | ||||
-rw-r--r-- | include/asm-x86/tsc.h | 1 |
9 files changed, 224 insertions, 330 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index ca904ee17252..59b14c940a28 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -26,7 +26,7 @@ obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o | |||
26 | obj-y += bootflag.o e820.o | 26 | obj-y += bootflag.o e820.o |
27 | obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o | 27 | obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o |
28 | obj-y += alternative.o i8253.o pci-nommu.o | 28 | obj-y += alternative.o i8253.o pci-nommu.o |
29 | obj-y += tsc_$(BITS).o io_delay.o rtc.o tsc.o | 29 | obj-y += tsc.o io_delay.o rtc.o |
30 | 30 | ||
31 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | 31 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o |
32 | obj-y += process.o | 32 | obj-y += process.o |
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index c6ac4dad41f6..e3d49c553af2 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -56,7 +56,7 @@ static irqreturn_t timer_event_interrupt(int irq, void *dev_id) | |||
56 | /* calibrate_cpu is used on systems with fixed rate TSCs to determine | 56 | /* calibrate_cpu is used on systems with fixed rate TSCs to determine |
57 | * processor frequency */ | 57 | * processor frequency */ |
58 | #define TICK_COUNT 100000000 | 58 | #define TICK_COUNT 100000000 |
59 | static unsigned long __init calibrate_cpu(void) | 59 | unsigned long __init calibrate_cpu(void) |
60 | { | 60 | { |
61 | int tsc_start, tsc_now; | 61 | int tsc_start, tsc_now; |
62 | int i, no_ctr_free; | 62 | int i, no_ctr_free; |
@@ -114,41 +114,13 @@ void __init hpet_time_init(void) | |||
114 | setup_irq(0, &irq0); | 114 | setup_irq(0, &irq0); |
115 | } | 115 | } |
116 | 116 | ||
117 | extern void set_cyc2ns_scale(unsigned long cpu_khz, int cpu); | ||
118 | |||
119 | void __init time_init(void) | 117 | void __init time_init(void) |
120 | { | 118 | { |
121 | int cpu; | 119 | tsc_init(); |
122 | |||
123 | cpu_khz = calculate_cpu_khz(); | ||
124 | tsc_khz = cpu_khz; | ||
125 | |||
126 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | ||
127 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | ||
128 | cpu_khz = calibrate_cpu(); | ||
129 | |||
130 | lpj_fine = ((unsigned long)tsc_khz * 1000)/HZ; | ||
131 | |||
132 | if (unsynchronized_tsc()) | ||
133 | mark_tsc_unstable("TSCs unsynchronized"); | ||
134 | |||
135 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) | 120 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) |
136 | vgetcpu_mode = VGETCPU_RDTSCP; | 121 | vgetcpu_mode = VGETCPU_RDTSCP; |
137 | else | 122 | else |
138 | vgetcpu_mode = VGETCPU_LSL; | 123 | vgetcpu_mode = VGETCPU_LSL; |
139 | 124 | ||
140 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", | ||
141 | cpu_khz / 1000, cpu_khz % 1000); | ||
142 | |||
143 | /* | ||
144 | * Secondary CPUs do not run through tsc_init(), so set up | ||
145 | * all the scale factors for all CPUs, assuming the same | ||
146 | * speed as the bootup CPU. (cpufreq notifiers will fix this | ||
147 | * up if their speed diverges) | ||
148 | */ | ||
149 | for_each_possible_cpu(cpu) | ||
150 | set_cyc2ns_scale(cpu_khz, cpu); | ||
151 | |||
152 | init_tsc_clocksource(); | ||
153 | late_time_init = choose_time_init(); | 125 | late_time_init = choose_time_init(); |
154 | } | 126 | } |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 595f78a22212..94c16bdd5696 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -5,8 +5,16 @@ | |||
5 | #include <linux/timer.h> | 5 | #include <linux/timer.h> |
6 | #include <linux/acpi_pmtmr.h> | 6 | #include <linux/acpi_pmtmr.h> |
7 | #include <linux/cpufreq.h> | 7 | #include <linux/cpufreq.h> |
8 | #include <linux/dmi.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/clocksource.h> | ||
11 | #include <linux/percpu.h> | ||
8 | 12 | ||
9 | #include <asm/hpet.h> | 13 | #include <asm/hpet.h> |
14 | #include <asm/timer.h> | ||
15 | #include <asm/vgtod.h> | ||
16 | #include <asm/time.h> | ||
17 | #include <asm/delay.h> | ||
10 | 18 | ||
11 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ | 19 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ |
12 | EXPORT_SYMBOL(cpu_khz); | 20 | EXPORT_SYMBOL(cpu_khz); |
@@ -16,12 +24,12 @@ EXPORT_SYMBOL(tsc_khz); | |||
16 | /* | 24 | /* |
17 | * TSC can be unstable due to cpufreq or due to unsynced TSCs | 25 | * TSC can be unstable due to cpufreq or due to unsynced TSCs |
18 | */ | 26 | */ |
19 | int tsc_unstable; | 27 | static int tsc_unstable; |
20 | 28 | ||
21 | /* native_sched_clock() is called before tsc_init(), so | 29 | /* native_sched_clock() is called before tsc_init(), so |
22 | we must start with the TSC soft disabled to prevent | 30 | we must start with the TSC soft disabled to prevent |
23 | erroneous rdtsc usage on !cpu_has_tsc processors */ | 31 | erroneous rdtsc usage on !cpu_has_tsc processors */ |
24 | int tsc_disabled = -1; | 32 | static int tsc_disabled = -1; |
25 | 33 | ||
26 | /* | 34 | /* |
27 | * Scheduler clock - returns current time in nanosec units. | 35 | * Scheduler clock - returns current time in nanosec units. |
@@ -241,7 +249,7 @@ EXPORT_SYMBOL(recalibrate_cpu_khz); | |||
241 | 249 | ||
242 | DEFINE_PER_CPU(unsigned long, cyc2ns); | 250 | DEFINE_PER_CPU(unsigned long, cyc2ns); |
243 | 251 | ||
244 | void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | 252 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
245 | { | 253 | { |
246 | unsigned long long tsc_now, ns_now; | 254 | unsigned long long tsc_now, ns_now; |
247 | unsigned long flags, *scale; | 255 | unsigned long flags, *scale; |
@@ -329,3 +337,201 @@ static int __init cpufreq_tsc(void) | |||
329 | core_initcall(cpufreq_tsc); | 337 | core_initcall(cpufreq_tsc); |
330 | 338 | ||
331 | #endif /* CONFIG_CPU_FREQ */ | 339 | #endif /* CONFIG_CPU_FREQ */ |
340 | |||
341 | /* clocksource code */ | ||
342 | |||
343 | static struct clocksource clocksource_tsc; | ||
344 | |||
345 | /* | ||
346 | * We compare the TSC to the cycle_last value in the clocksource | ||
347 | * structure to avoid a nasty time-warp. This can be observed in a | ||
348 | * very small window right after one CPU updated cycle_last under | ||
349 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | ||
350 | * is smaller than the cycle_last reference value due to a TSC which | ||
351 | * is slighty behind. This delta is nowhere else observable, but in | ||
352 | * that case it results in a forward time jump in the range of hours | ||
353 | * due to the unsigned delta calculation of the time keeping core | ||
354 | * code, which is necessary to support wrapping clocksources like pm | ||
355 | * timer. | ||
356 | */ | ||
357 | static cycle_t read_tsc(void) | ||
358 | { | ||
359 | cycle_t ret = (cycle_t)get_cycles(); | ||
360 | |||
361 | return ret >= clocksource_tsc.cycle_last ? | ||
362 | ret : clocksource_tsc.cycle_last; | ||
363 | } | ||
364 | |||
365 | static cycle_t __vsyscall_fn vread_tsc(void) | ||
366 | { | ||
367 | cycle_t ret = (cycle_t)vget_cycles(); | ||
368 | |||
369 | return ret >= __vsyscall_gtod_data.clock.cycle_last ? | ||
370 | ret : __vsyscall_gtod_data.clock.cycle_last; | ||
371 | } | ||
372 | |||
373 | static struct clocksource clocksource_tsc = { | ||
374 | .name = "tsc", | ||
375 | .rating = 300, | ||
376 | .read = read_tsc, | ||
377 | .mask = CLOCKSOURCE_MASK(64), | ||
378 | .shift = 22, | ||
379 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | ||
380 | CLOCK_SOURCE_MUST_VERIFY, | ||
381 | #ifdef CONFIG_X86_64 | ||
382 | .vread = vread_tsc, | ||
383 | #endif | ||
384 | }; | ||
385 | |||
386 | void mark_tsc_unstable(char *reason) | ||
387 | { | ||
388 | if (!tsc_unstable) { | ||
389 | tsc_unstable = 1; | ||
390 | printk("Marking TSC unstable due to %s\n", reason); | ||
391 | /* Change only the rating, when not registered */ | ||
392 | if (clocksource_tsc.mult) | ||
393 | clocksource_change_rating(&clocksource_tsc, 0); | ||
394 | else | ||
395 | clocksource_tsc.rating = 0; | ||
396 | } | ||
397 | } | ||
398 | |||
399 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
400 | |||
401 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | ||
402 | { | ||
403 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | ||
404 | d->ident); | ||
405 | tsc_unstable = 1; | ||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | /* List of systems that have known TSC problems */ | ||
410 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | ||
411 | { | ||
412 | .callback = dmi_mark_tsc_unstable, | ||
413 | .ident = "IBM Thinkpad 380XD", | ||
414 | .matches = { | ||
415 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
416 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | ||
417 | }, | ||
418 | }, | ||
419 | {} | ||
420 | }; | ||
421 | |||
422 | /* | ||
423 | * Geode_LX - the OLPC CPU has a possibly a very reliable TSC | ||
424 | */ | ||
425 | #ifdef CONFIG_MGEODE_LX | ||
426 | /* RTSC counts during suspend */ | ||
427 | #define RTSC_SUSP 0x100 | ||
428 | |||
429 | static void __init check_geode_tsc_reliable(void) | ||
430 | { | ||
431 | unsigned long res_low, res_high; | ||
432 | |||
433 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); | ||
434 | if (res_low & RTSC_SUSP) | ||
435 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | ||
436 | } | ||
437 | #else | ||
438 | static inline void check_geode_tsc_reliable(void) { } | ||
439 | #endif | ||
440 | |||
441 | /* | ||
442 | * Make an educated guess if the TSC is trustworthy and synchronized | ||
443 | * over all CPUs. | ||
444 | */ | ||
445 | __cpuinit int unsynchronized_tsc(void) | ||
446 | { | ||
447 | if (!cpu_has_tsc || tsc_unstable) | ||
448 | return 1; | ||
449 | |||
450 | #ifdef CONFIG_SMP | ||
451 | if (apic_is_clustered_box()) | ||
452 | return 1; | ||
453 | #endif | ||
454 | |||
455 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
456 | return 0; | ||
457 | /* | ||
458 | * Intel systems are normally all synchronized. | ||
459 | * Exceptions must mark TSC as unstable: | ||
460 | */ | ||
461 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | ||
462 | /* assume multi socket systems are not synchronized: */ | ||
463 | if (num_possible_cpus() > 1) | ||
464 | tsc_unstable = 1; | ||
465 | } | ||
466 | |||
467 | return tsc_unstable; | ||
468 | } | ||
469 | |||
470 | static void __init init_tsc_clocksource(void) | ||
471 | { | ||
472 | clocksource_tsc.mult = clocksource_khz2mult(tsc_khz, | ||
473 | clocksource_tsc.shift); | ||
474 | /* lower the rating if we already know its unstable: */ | ||
475 | if (check_tsc_unstable()) { | ||
476 | clocksource_tsc.rating = 0; | ||
477 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | ||
478 | } | ||
479 | clocksource_register(&clocksource_tsc); | ||
480 | } | ||
481 | |||
482 | void __init tsc_init(void) | ||
483 | { | ||
484 | u64 lpj; | ||
485 | int cpu; | ||
486 | |||
487 | if (!cpu_has_tsc) | ||
488 | return; | ||
489 | |||
490 | cpu_khz = calculate_cpu_khz(); | ||
491 | tsc_khz = cpu_khz; | ||
492 | |||
493 | if (!cpu_khz) { | ||
494 | mark_tsc_unstable("could not calculate TSC khz"); | ||
495 | return; | ||
496 | } | ||
497 | |||
498 | #ifdef CONFIG_X86_64 | ||
499 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | ||
500 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | ||
501 | cpu_khz = calibrate_cpu(); | ||
502 | #endif | ||
503 | |||
504 | lpj = ((u64)tsc_khz * 1000); | ||
505 | do_div(lpj, HZ); | ||
506 | lpj_fine = lpj; | ||
507 | |||
508 | printk("Detected %lu.%03lu MHz processor.\n", | ||
509 | (unsigned long)cpu_khz / 1000, | ||
510 | (unsigned long)cpu_khz % 1000); | ||
511 | |||
512 | /* | ||
513 | * Secondary CPUs do not run through tsc_init(), so set up | ||
514 | * all the scale factors for all CPUs, assuming the same | ||
515 | * speed as the bootup CPU. (cpufreq notifiers will fix this | ||
516 | * up if their speed diverges) | ||
517 | */ | ||
518 | for_each_possible_cpu(cpu) | ||
519 | set_cyc2ns_scale(cpu_khz, cpu); | ||
520 | |||
521 | if (tsc_disabled > 0) | ||
522 | return; | ||
523 | |||
524 | /* now allow native_sched_clock() to use rdtsc */ | ||
525 | tsc_disabled = 0; | ||
526 | |||
527 | use_tsc_delay(); | ||
528 | /* Check and install the TSC clocksource */ | ||
529 | dmi_check_system(bad_tsc_dmi_table); | ||
530 | |||
531 | if (unsynchronized_tsc()) | ||
532 | mark_tsc_unstable("TSCs unsynchronized"); | ||
533 | |||
534 | check_geode_tsc_reliable(); | ||
535 | init_tsc_clocksource(); | ||
536 | } | ||
537 | |||
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c deleted file mode 100644 index bbc153d36f84..000000000000 --- a/arch/x86/kernel/tsc_32.c +++ /dev/null | |||
@@ -1,188 +0,0 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/clocksource.h> | ||
3 | #include <linux/workqueue.h> | ||
4 | #include <linux/delay.h> | ||
5 | #include <linux/cpufreq.h> | ||
6 | #include <linux/jiffies.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/dmi.h> | ||
9 | #include <linux/percpu.h> | ||
10 | |||
11 | #include <asm/delay.h> | ||
12 | #include <asm/tsc.h> | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/timer.h> | ||
15 | |||
16 | #include "mach_timer.h" | ||
17 | |||
18 | extern int tsc_unstable; | ||
19 | extern int tsc_disabled; | ||
20 | |||
21 | /* clock source code */ | ||
22 | |||
23 | static struct clocksource clocksource_tsc; | ||
24 | |||
25 | /* | ||
26 | * We compare the TSC to the cycle_last value in the clocksource | ||
27 | * structure to avoid a nasty time-warp issue. This can be observed in | ||
28 | * a very small window right after one CPU updated cycle_last under | ||
29 | * xtime lock and the other CPU reads a TSC value which is smaller | ||
30 | * than the cycle_last reference value due to a TSC which is slighty | ||
31 | * behind. This delta is nowhere else observable, but in that case it | ||
32 | * results in a forward time jump in the range of hours due to the | ||
33 | * unsigned delta calculation of the time keeping core code, which is | ||
34 | * necessary to support wrapping clocksources like pm timer. | ||
35 | */ | ||
36 | static cycle_t read_tsc(void) | ||
37 | { | ||
38 | cycle_t ret; | ||
39 | |||
40 | rdtscll(ret); | ||
41 | |||
42 | return ret >= clocksource_tsc.cycle_last ? | ||
43 | ret : clocksource_tsc.cycle_last; | ||
44 | } | ||
45 | |||
46 | static struct clocksource clocksource_tsc = { | ||
47 | .name = "tsc", | ||
48 | .rating = 300, | ||
49 | .read = read_tsc, | ||
50 | .mask = CLOCKSOURCE_MASK(64), | ||
51 | .mult = 0, /* to be set */ | ||
52 | .shift = 22, | ||
53 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | ||
54 | CLOCK_SOURCE_MUST_VERIFY, | ||
55 | }; | ||
56 | |||
57 | void mark_tsc_unstable(char *reason) | ||
58 | { | ||
59 | if (!tsc_unstable) { | ||
60 | tsc_unstable = 1; | ||
61 | printk("Marking TSC unstable due to: %s.\n", reason); | ||
62 | /* Can be called before registration */ | ||
63 | if (clocksource_tsc.mult) | ||
64 | clocksource_change_rating(&clocksource_tsc, 0); | ||
65 | else | ||
66 | clocksource_tsc.rating = 0; | ||
67 | } | ||
68 | } | ||
69 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
70 | |||
71 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | ||
72 | { | ||
73 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | ||
74 | d->ident); | ||
75 | tsc_unstable = 1; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | /* List of systems that have known TSC problems */ | ||
80 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | ||
81 | { | ||
82 | .callback = dmi_mark_tsc_unstable, | ||
83 | .ident = "IBM Thinkpad 380XD", | ||
84 | .matches = { | ||
85 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
86 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | ||
87 | }, | ||
88 | }, | ||
89 | {} | ||
90 | }; | ||
91 | |||
92 | /* | ||
93 | * Make an educated guess if the TSC is trustworthy and synchronized | ||
94 | * over all CPUs. | ||
95 | */ | ||
96 | __cpuinit int unsynchronized_tsc(void) | ||
97 | { | ||
98 | if (!cpu_has_tsc || tsc_unstable) | ||
99 | return 1; | ||
100 | |||
101 | /* Anything with constant TSC should be synchronized */ | ||
102 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
103 | return 0; | ||
104 | |||
105 | /* | ||
106 | * Intel systems are normally all synchronized. | ||
107 | * Exceptions must mark TSC as unstable: | ||
108 | */ | ||
109 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | ||
110 | /* assume multi socket systems are not synchronized: */ | ||
111 | if (num_possible_cpus() > 1) | ||
112 | tsc_unstable = 1; | ||
113 | } | ||
114 | return tsc_unstable; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Geode_LX - the OLPC CPU has a possibly a very reliable TSC | ||
119 | */ | ||
120 | #ifdef CONFIG_MGEODE_LX | ||
121 | /* RTSC counts during suspend */ | ||
122 | #define RTSC_SUSP 0x100 | ||
123 | |||
124 | static void __init check_geode_tsc_reliable(void) | ||
125 | { | ||
126 | unsigned long res_low, res_high; | ||
127 | |||
128 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); | ||
129 | if (res_low & RTSC_SUSP) | ||
130 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | ||
131 | } | ||
132 | #else | ||
133 | static inline void check_geode_tsc_reliable(void) { } | ||
134 | #endif | ||
135 | |||
136 | |||
137 | void __init tsc_init(void) | ||
138 | { | ||
139 | int cpu; | ||
140 | u64 lpj; | ||
141 | |||
142 | if (!cpu_has_tsc || tsc_disabled > 0) | ||
143 | return; | ||
144 | |||
145 | cpu_khz = calculate_cpu_khz(); | ||
146 | tsc_khz = cpu_khz; | ||
147 | |||
148 | if (!cpu_khz) { | ||
149 | mark_tsc_unstable("could not calculate TSC khz"); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | lpj = ((u64)tsc_khz * 1000); | ||
154 | do_div(lpj, HZ); | ||
155 | lpj_fine = lpj; | ||
156 | |||
157 | /* now allow native_sched_clock() to use rdtsc */ | ||
158 | tsc_disabled = 0; | ||
159 | |||
160 | printk("Detected %lu.%03lu MHz processor.\n", | ||
161 | (unsigned long)cpu_khz / 1000, | ||
162 | (unsigned long)cpu_khz % 1000); | ||
163 | |||
164 | /* | ||
165 | * Secondary CPUs do not run through tsc_init(), so set up | ||
166 | * all the scale factors for all CPUs, assuming the same | ||
167 | * speed as the bootup CPU. (cpufreq notifiers will fix this | ||
168 | * up if their speed diverges) | ||
169 | */ | ||
170 | for_each_possible_cpu(cpu) | ||
171 | set_cyc2ns_scale(cpu_khz, cpu); | ||
172 | |||
173 | use_tsc_delay(); | ||
174 | |||
175 | /* Check and install the TSC clocksource */ | ||
176 | dmi_check_system(bad_tsc_dmi_table); | ||
177 | |||
178 | unsynchronized_tsc(); | ||
179 | check_geode_tsc_reliable(); | ||
180 | clocksource_tsc.mult = clocksource_khz2mult(tsc_khz, | ||
181 | clocksource_tsc.shift); | ||
182 | /* lower the rating if we already know its unstable: */ | ||
183 | if (check_tsc_unstable()) { | ||
184 | clocksource_tsc.rating = 0; | ||
185 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | ||
186 | } | ||
187 | clocksource_register(&clocksource_tsc); | ||
188 | } | ||
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c deleted file mode 100644 index 80a274b018c2..000000000000 --- a/arch/x86/kernel/tsc_64.c +++ /dev/null | |||
@@ -1,106 +0,0 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/interrupt.h> | ||
4 | #include <linux/init.h> | ||
5 | #include <linux/clocksource.h> | ||
6 | #include <linux/time.h> | ||
7 | #include <linux/acpi.h> | ||
8 | #include <linux/cpufreq.h> | ||
9 | #include <linux/acpi_pmtmr.h> | ||
10 | |||
11 | #include <asm/hpet.h> | ||
12 | #include <asm/timex.h> | ||
13 | #include <asm/timer.h> | ||
14 | #include <asm/vgtod.h> | ||
15 | |||
16 | extern int tsc_unstable; | ||
17 | extern int tsc_disabled; | ||
18 | |||
19 | /* | ||
20 | * Make an educated guess if the TSC is trustworthy and synchronized | ||
21 | * over all CPUs. | ||
22 | */ | ||
23 | __cpuinit int unsynchronized_tsc(void) | ||
24 | { | ||
25 | if (tsc_unstable) | ||
26 | return 1; | ||
27 | |||
28 | #ifdef CONFIG_SMP | ||
29 | if (apic_is_clustered_box()) | ||
30 | return 1; | ||
31 | #endif | ||
32 | |||
33 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
34 | return 0; | ||
35 | |||
36 | /* Assume multi socket systems are not synchronized */ | ||
37 | return num_present_cpus() > 1; | ||
38 | } | ||
39 | |||
40 | static struct clocksource clocksource_tsc; | ||
41 | |||
42 | /* | ||
43 | * We compare the TSC to the cycle_last value in the clocksource | ||
44 | * structure to avoid a nasty time-warp. This can be observed in a | ||
45 | * very small window right after one CPU updated cycle_last under | ||
46 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | ||
47 | * is smaller than the cycle_last reference value due to a TSC which | ||
48 | * is slighty behind. This delta is nowhere else observable, but in | ||
49 | * that case it results in a forward time jump in the range of hours | ||
50 | * due to the unsigned delta calculation of the time keeping core | ||
51 | * code, which is necessary to support wrapping clocksources like pm | ||
52 | * timer. | ||
53 | */ | ||
54 | static cycle_t read_tsc(void) | ||
55 | { | ||
56 | cycle_t ret = (cycle_t)get_cycles(); | ||
57 | |||
58 | return ret >= clocksource_tsc.cycle_last ? | ||
59 | ret : clocksource_tsc.cycle_last; | ||
60 | } | ||
61 | |||
62 | static cycle_t __vsyscall_fn vread_tsc(void) | ||
63 | { | ||
64 | cycle_t ret = (cycle_t)vget_cycles(); | ||
65 | |||
66 | return ret >= __vsyscall_gtod_data.clock.cycle_last ? | ||
67 | ret : __vsyscall_gtod_data.clock.cycle_last; | ||
68 | } | ||
69 | |||
70 | static struct clocksource clocksource_tsc = { | ||
71 | .name = "tsc", | ||
72 | .rating = 300, | ||
73 | .read = read_tsc, | ||
74 | .mask = CLOCKSOURCE_MASK(64), | ||
75 | .shift = 22, | ||
76 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | ||
77 | CLOCK_SOURCE_MUST_VERIFY, | ||
78 | .vread = vread_tsc, | ||
79 | }; | ||
80 | |||
81 | void mark_tsc_unstable(char *reason) | ||
82 | { | ||
83 | if (!tsc_unstable) { | ||
84 | tsc_unstable = 1; | ||
85 | printk("Marking TSC unstable due to %s\n", reason); | ||
86 | /* Change only the rating, when not registered */ | ||
87 | if (clocksource_tsc.mult) | ||
88 | clocksource_change_rating(&clocksource_tsc, 0); | ||
89 | else | ||
90 | clocksource_tsc.rating = 0; | ||
91 | } | ||
92 | } | ||
93 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
94 | |||
95 | void __init init_tsc_clocksource(void) | ||
96 | { | ||
97 | if (tsc_disabled > 0) | ||
98 | return; | ||
99 | |||
100 | clocksource_tsc.mult = clocksource_khz2mult(tsc_khz, | ||
101 | clocksource_tsc.shift); | ||
102 | if (check_tsc_unstable()) | ||
103 | clocksource_tsc.rating = 0; | ||
104 | |||
105 | clocksource_register(&clocksource_tsc); | ||
106 | } | ||
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index a29807737d3d..4e2c1e517f06 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -121,12 +121,17 @@ extern void enable_NMI_through_LVT0(void); | |||
121 | */ | 121 | */ |
122 | #ifdef CONFIG_X86_64 | 122 | #ifdef CONFIG_X86_64 |
123 | extern void early_init_lapic_mapping(void); | 123 | extern void early_init_lapic_mapping(void); |
124 | extern int apic_is_clustered_box(void); | ||
125 | #else | ||
126 | static inline int apic_is_clustered_box(void) | ||
127 | { | ||
128 | return 0; | ||
129 | } | ||
124 | #endif | 130 | #endif |
125 | 131 | ||
126 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); | 132 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); |
127 | extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); | 133 | extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); |
128 | 134 | ||
129 | extern int apic_is_clustered_box(void); | ||
130 | 135 | ||
131 | #else /* !CONFIG_X86_LOCAL_APIC */ | 136 | #else /* !CONFIG_X86_LOCAL_APIC */ |
132 | static inline void lapic_shutdown(void) { } | 137 | static inline void lapic_shutdown(void) { } |
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h index 409a649204aa..bb80880c834b 100644 --- a/include/asm-x86/delay.h +++ b/include/asm-x86/delay.h | |||
@@ -26,6 +26,10 @@ extern void __delay(unsigned long loops); | |||
26 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | 26 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ |
27 | __ndelay(n)) | 27 | __ndelay(n)) |
28 | 28 | ||
29 | #ifdef CONFIG_X86_32 | ||
29 | void use_tsc_delay(void); | 30 | void use_tsc_delay(void); |
31 | #else | ||
32 | #define use_tsc_delay() {} | ||
33 | #endif | ||
30 | 34 | ||
31 | #endif /* _ASM_X86_DELAY_H */ | 35 | #endif /* _ASM_X86_DELAY_H */ |
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h index bce72d7a958c..a17fa473e91d 100644 --- a/include/asm-x86/time.h +++ b/include/asm-x86/time.h | |||
@@ -56,4 +56,6 @@ static inline int native_set_wallclock(unsigned long nowtime) | |||
56 | 56 | ||
57 | #endif /* CONFIG_PARAVIRT */ | 57 | #endif /* CONFIG_PARAVIRT */ |
58 | 58 | ||
59 | extern unsigned long __init calibrate_cpu(void); | ||
60 | |||
59 | #endif | 61 | #endif |
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 761054d7fefb..cb6f6ee45b8f 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h | |||
@@ -48,7 +48,6 @@ static __always_inline cycles_t vget_cycles(void) | |||
48 | extern void tsc_init(void); | 48 | extern void tsc_init(void); |
49 | extern void mark_tsc_unstable(char *reason); | 49 | extern void mark_tsc_unstable(char *reason); |
50 | extern int unsynchronized_tsc(void); | 50 | extern int unsynchronized_tsc(void); |
51 | extern void init_tsc_clocksource(void); | ||
52 | int check_tsc_unstable(void); | 51 | int check_tsc_unstable(void); |
53 | 52 | ||
54 | /* | 53 | /* |