aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-08-20 11:06:25 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-31 03:35:47 -0400
commit2d826404f0bdcac2a4dd7e3c446b70d6a3b63b78 (patch)
tree7db2dc0fbde3a25a89f1fc1514152567f612ccde
parent47926214d8b2bef13b2be57c500194a804f16198 (diff)
x86: Move tsc_calibration to x86_init_ops
TSC calibration is modified by the vmware hypervisor and paravirt by separate means. Moorestown wants to add its own calibration routine as well. So make calibrate_tsc a proper x86_init_ops function and override it by paravirt or by the early setup of the vmware hypervisor. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/include/asm/timer.h5
-rw-r--r--arch/x86/include/asm/tsc.h3
-rw-r--r--arch/x86/include/asm/vmware.h2
-rw-r--r--arch/x86/include/asm/x86_init.h9
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c14
-rw-r--r--arch/x86/kernel/cpu/vmware.c21
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/tsc.c13
-rw-r--r--arch/x86/kernel/vmi_32.c2
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/x86_init.c5
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/xen/enlighten.c3
17 files changed, 48 insertions, 41 deletions
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 369f5c5d09a1..b78c0941e422 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,7 +20,7 @@
20#ifndef ASM_X86__HYPERVISOR_H 20#ifndef ASM_X86__HYPERVISOR_H
21#define ASM_X86__HYPERVISOR_H 21#define ASM_X86__HYPERVISOR_H
22 22
23extern unsigned long get_hypervisor_tsc_freq(void);
24extern void init_hypervisor(struct cpuinfo_x86 *c); 23extern void init_hypervisor(struct cpuinfo_x86 *c);
24extern void init_hypervisor_platform(void);
25 25
26#endif 26#endif
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 11a4ba7b209c..1e458a553303 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -210,7 +210,6 @@ static inline unsigned long long paravirt_sched_clock(void)
210{ 210{
211 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 211 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
212} 212}
213#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
214 213
215static inline unsigned long long paravirt_read_pmc(int counter) 214static inline unsigned long long paravirt_read_pmc(int counter)
216{ 215{
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 65228ccc5f0d..5469630b27f5 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -8,7 +8,6 @@
8#define TICK_SIZE (tick_nsec / 1000) 8#define TICK_SIZE (tick_nsec / 1000)
9 9
10unsigned long long native_sched_clock(void); 10unsigned long long native_sched_clock(void);
11unsigned long native_calibrate_tsc(void);
12extern int recalibrate_cpu_khz(void); 11extern int recalibrate_cpu_khz(void);
13 12
14#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) 13#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
@@ -19,10 +18,6 @@ extern int timer_ack;
19 18
20extern int no_timer_check; 19extern int no_timer_check;
21 20
22#ifndef CONFIG_PARAVIRT
23#define calibrate_tsc() native_calibrate_tsc()
24#endif
25
26/* Accelerators for sched_clock() 21/* Accelerators for sched_clock()
27 * convert from cycles(64bits) => nanoseconds (64bits) 22 * convert from cycles(64bits) => nanoseconds (64bits)
28 * basic equation: 23 * basic equation:
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 38ae163cc91b..c0427295e8f5 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -48,7 +48,8 @@ static __always_inline cycles_t vget_cycles(void)
48extern void tsc_init(void); 48extern void tsc_init(void);
49extern void mark_tsc_unstable(char *reason); 49extern void mark_tsc_unstable(char *reason);
50extern int unsynchronized_tsc(void); 50extern int unsynchronized_tsc(void);
51int check_tsc_unstable(void); 51extern int check_tsc_unstable(void);
52extern unsigned long native_calibrate_tsc(void);
52 53
53/* 54/*
54 * Boot-time check whether the TSCs are synchronized across 55 * Boot-time check whether the TSCs are synchronized across
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index c11b7e100d83..e49ed6d2fd4e 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -20,7 +20,7 @@
20#ifndef ASM_X86__VMWARE_H 20#ifndef ASM_X86__VMWARE_H
21#define ASM_X86__VMWARE_H 21#define ASM_X86__VMWARE_H
22 22
23extern unsigned long vmware_get_tsc_khz(void); 23extern void vmware_platform_setup(void);
24extern int vmware_platform(void); 24extern int vmware_platform(void);
25extern void vmware_set_feature_bits(struct cpuinfo_x86 *c); 25extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
26 26
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index f8bdd2271a04..20df51871713 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -112,8 +112,17 @@ struct x86_cpuinit_ops {
112 void (*setup_percpu_clockev)(void); 112 void (*setup_percpu_clockev)(void);
113}; 113};
114 114
115/**
116 * struct x86_platform_ops - platform specific runtime functions
117 * @calibrate_tsc: calibrate TSC
118 */
119struct x86_platform_ops {
120 unsigned long (*calibrate_tsc)(void);
121};
122
115extern struct x86_init_ops x86_init; 123extern struct x86_init_ops x86_init;
116extern struct x86_cpuinit_ops x86_cpuinit; 124extern struct x86_cpuinit_ops x86_cpuinit;
125extern struct x86_platform_ops x86_platform;
117 126
118extern void x86_init_noop(void); 127extern void x86_init_noop(void);
119extern void x86_init_uint_noop(unsigned int unused); 128extern void x86_init_uint_noop(unsigned int unused);
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 93ba8eeb100a..08be922de33a 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -34,13 +34,6 @@ detect_hypervisor_vendor(struct cpuinfo_x86 *c)
34 c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; 34 c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
35} 35}
36 36
37unsigned long get_hypervisor_tsc_freq(void)
38{
39 if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
40 return vmware_get_tsc_khz();
41 return 0;
42}
43
44static inline void __cpuinit 37static inline void __cpuinit
45hypervisor_set_feature_bits(struct cpuinfo_x86 *c) 38hypervisor_set_feature_bits(struct cpuinfo_x86 *c)
46{ 39{
@@ -55,3 +48,10 @@ void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
55 detect_hypervisor_vendor(c); 48 detect_hypervisor_vendor(c);
56 hypervisor_set_feature_bits(c); 49 hypervisor_set_feature_bits(c);
57} 50}
51
52void __init init_hypervisor_platform(void)
53{
54 init_hypervisor(&boot_cpu_data);
55 if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
56 vmware_platform_setup();
57}
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index bc24f514ec93..0a46b4df5d80 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -24,6 +24,7 @@
24#include <linux/dmi.h> 24#include <linux/dmi.h>
25#include <asm/div64.h> 25#include <asm/div64.h>
26#include <asm/vmware.h> 26#include <asm/vmware.h>
27#include <asm/x86_init.h>
27 28
28#define CPUID_VMWARE_INFO_LEAF 0x40000000 29#define CPUID_VMWARE_INFO_LEAF 0x40000000
29#define VMWARE_HYPERVISOR_MAGIC 0x564D5868 30#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@@ -47,21 +48,29 @@ static inline int __vmware_platform(void)
47 return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; 48 return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
48} 49}
49 50
50static unsigned long __vmware_get_tsc_khz(void) 51static unsigned long vmware_get_tsc_khz(void)
51{ 52{
52 uint64_t tsc_hz; 53 uint64_t tsc_hz;
53 uint32_t eax, ebx, ecx, edx; 54 uint32_t eax, ebx, ecx, edx;
54 55
55 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); 56 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
56 57
57 if (ebx == UINT_MAX)
58 return 0;
59 tsc_hz = eax | (((uint64_t)ebx) << 32); 58 tsc_hz = eax | (((uint64_t)ebx) << 32);
60 do_div(tsc_hz, 1000); 59 do_div(tsc_hz, 1000);
61 BUG_ON(tsc_hz >> 32); 60 BUG_ON(tsc_hz >> 32);
62 return tsc_hz; 61 return tsc_hz;
63} 62}
64 63
64void __init vmware_platform_setup(void)
65{
66 uint32_t eax, ebx, ecx, edx;
67
68 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
69
70 if (ebx != UINT_MAX)
71 x86_platform.calibrate_tsc = vmware_get_tsc_khz;
72}
73
65/* 74/*
66 * While checking the dmi string infomation, just checking the product 75 * While checking the dmi string infomation, just checking the product
67 * serial key should be enough, as this will always have a VMware 76 * serial key should be enough, as this will always have a VMware
@@ -87,12 +96,6 @@ int vmware_platform(void)
87 return 0; 96 return 0;
88} 97}
89 98
90unsigned long vmware_get_tsc_khz(void)
91{
92 BUG_ON(!vmware_platform());
93 return __vmware_get_tsc_khz();
94}
95
96/* 99/*
97 * VMware hypervisor takes care of exporting a reliable TSC to the guest. 100 * VMware hypervisor takes care of exporting a reliable TSC to the guest.
98 * Still, due to timing difference when running on virtual cpus, the TSC can 101 * Still, due to timing difference when running on virtual cpus, the TSC can
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 64e9b5f59d2d..75a21b61b863 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -187,7 +187,7 @@ void __init kvmclock_init(void)
187 pv_time_ops.get_wallclock = kvm_get_wallclock; 187 pv_time_ops.get_wallclock = kvm_get_wallclock;
188 pv_time_ops.set_wallclock = kvm_set_wallclock; 188 pv_time_ops.set_wallclock = kvm_set_wallclock;
189 pv_time_ops.sched_clock = kvm_clock_read; 189 pv_time_ops.sched_clock = kvm_clock_read;
190 pv_time_ops.get_tsc_khz = kvm_get_tsc_khz; 190 x86_platform.calibrate_tsc = kvm_get_tsc_khz;
191#ifdef CONFIG_X86_LOCAL_APIC 191#ifdef CONFIG_X86_LOCAL_APIC
192 x86_cpuinit.setup_percpu_clockev = 192 x86_cpuinit.setup_percpu_clockev =
193 kvm_setup_secondary_clock; 193 kvm_setup_secondary_clock;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 9c0e644a76dc..7cbf898d839b 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -309,7 +309,6 @@ struct pv_time_ops pv_time_ops = {
309 .get_wallclock = native_get_wallclock, 309 .get_wallclock = native_get_wallclock,
310 .set_wallclock = native_set_wallclock, 310 .set_wallclock = native_set_wallclock,
311 .sched_clock = native_sched_clock, 311 .sched_clock = native_sched_clock,
312 .get_tsc_khz = native_calibrate_tsc,
313}; 312};
314 313
315struct pv_irq_ops pv_irq_ops = { 314struct pv_irq_ops pv_irq_ops = {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bb207a47c631..2d93026af7cd 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -818,7 +818,7 @@ void __init setup_arch(char **cmdline_p)
818 * VMware detection requires dmi to be available, so this 818 * VMware detection requires dmi to be available, so this
819 * needs to be done after dmi_scan_machine, for the BP. 819 * needs to be done after dmi_scan_machine, for the BP.
820 */ 820 */
821 init_hypervisor(&boot_cpu_data); 821 init_hypervisor_platform();
822 822
823 x86_init.resources.probe_roms(); 823 x86_init.resources.probe_roms();
824 824
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 97a0bcbad100..9917632a8b49 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -18,6 +18,7 @@
18#include <asm/delay.h> 18#include <asm/delay.h>
19#include <asm/hypervisor.h> 19#include <asm/hypervisor.h>
20#include <asm/nmi.h> 20#include <asm/nmi.h>
21#include <asm/x86_init.h>
21 22
22unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 23unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
23EXPORT_SYMBOL(cpu_khz); 24EXPORT_SYMBOL(cpu_khz);
@@ -401,15 +402,9 @@ unsigned long native_calibrate_tsc(void)
401{ 402{
402 u64 tsc1, tsc2, delta, ref1, ref2; 403 u64 tsc1, tsc2, delta, ref1, ref2;
403 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 404 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
404 unsigned long flags, latch, ms, fast_calibrate, hv_tsc_khz; 405 unsigned long flags, latch, ms, fast_calibrate;
405 int hpet = is_hpet_enabled(), i, loopmin; 406 int hpet = is_hpet_enabled(), i, loopmin;
406 407
407 hv_tsc_khz = get_hypervisor_tsc_freq();
408 if (hv_tsc_khz) {
409 printk(KERN_INFO "TSC: Frequency read from the hypervisor\n");
410 return hv_tsc_khz;
411 }
412
413 local_irq_save(flags); 408 local_irq_save(flags);
414 fast_calibrate = quick_pit_calibrate(); 409 fast_calibrate = quick_pit_calibrate();
415 local_irq_restore(flags); 410 local_irq_restore(flags);
@@ -567,7 +562,7 @@ int recalibrate_cpu_khz(void)
567 unsigned long cpu_khz_old = cpu_khz; 562 unsigned long cpu_khz_old = cpu_khz;
568 563
569 if (cpu_has_tsc) { 564 if (cpu_has_tsc) {
570 tsc_khz = calibrate_tsc(); 565 tsc_khz = x86_platform.calibrate_tsc();
571 cpu_khz = tsc_khz; 566 cpu_khz = tsc_khz;
572 cpu_data(0).loops_per_jiffy = 567 cpu_data(0).loops_per_jiffy =
573 cpufreq_scale(cpu_data(0).loops_per_jiffy, 568 cpufreq_scale(cpu_data(0).loops_per_jiffy,
@@ -917,7 +912,7 @@ void __init tsc_init(void)
917 if (!cpu_has_tsc) 912 if (!cpu_has_tsc)
918 return; 913 return;
919 914
920 tsc_khz = calibrate_tsc(); 915 tsc_khz = x86_platform.calibrate_tsc();
921 cpu_khz = tsc_khz; 916 cpu_khz = tsc_khz;
922 917
923 if (!tsc_khz) { 918 if (!tsc_khz) {
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index cd7d0fbbf66e..052ae81ee08b 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -825,7 +825,7 @@ static inline int __init activate_vmi(void)
825 x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init; 825 x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
826#endif 826#endif
827 pv_time_ops.sched_clock = vmi_sched_clock; 827 pv_time_ops.sched_clock = vmi_sched_clock;
828 pv_time_ops.get_tsc_khz = vmi_tsc_khz; 828 x86_platform.calibrate_tsc = vmi_tsc_khz;
829 829
830 /* We have true wallclock functions; disable CMOS clock sync */ 830 /* We have true wallclock functions; disable CMOS clock sync */
831 no_sync_cmos_clock = 1; 831 no_sync_cmos_clock = 1;
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 2b3eb82efeeb..611b9e2360d3 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -68,7 +68,7 @@ unsigned long long vmi_sched_clock(void)
68 return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE)); 68 return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
69} 69}
70 70
71/* paravirt_ops.get_tsc_khz = vmi_tsc_khz */ 71/* x86_platform.calibrate_tsc = vmi_tsc_khz */
72unsigned long vmi_tsc_khz(void) 72unsigned long vmi_tsc_khz(void)
73{ 73{
74 unsigned long long khz; 74 unsigned long long khz;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 4790b92714a6..13081b921914 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -13,6 +13,7 @@
13#include <asm/e820.h> 13#include <asm/e820.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16#include <asm/tsc.h>
16 17
17void __cpuinit x86_init_noop(void) { } 18void __cpuinit x86_init_noop(void) { }
18void __init x86_init_uint_noop(unsigned int unused) { } 19void __init x86_init_uint_noop(unsigned int unused) { }
@@ -67,3 +68,7 @@ struct __initdata x86_init_ops x86_init = {
67__cpuinitdata struct x86_cpuinit_ops x86_cpuinit = { 68__cpuinitdata struct x86_cpuinit_ops x86_cpuinit = {
68 .setup_percpu_clockev = setup_secondary_APIC_clock, 69 .setup_percpu_clockev = setup_secondary_APIC_clock,
69}; 70};
71
72struct x86_platform_ops x86_platform = {
73 .calibrate_tsc = native_calibrate_tsc,
74};
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 6caa8c0c793b..fabe745513d9 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1320,11 +1320,11 @@ __init void lguest_init(void)
1320 1320
1321 /* Time operations */ 1321 /* Time operations */
1322 pv_time_ops.get_wallclock = lguest_get_wallclock; 1322 pv_time_ops.get_wallclock = lguest_get_wallclock;
1323 pv_time_ops.get_tsc_khz = lguest_tsc_khz;
1324 1323
1325 x86_init.resources.memory_setup = lguest_memory_setup; 1324 x86_init.resources.memory_setup = lguest_memory_setup;
1326 x86_init.irqs.intr_init = lguest_init_IRQ; 1325 x86_init.irqs.intr_init = lguest_init_IRQ;
1327 x86_init.timers.timer_init = lguest_time_init; 1326 x86_init.timers.timer_init = lguest_time_init;
1327 x86_platform.calibrate_tsc = lguest_tsc_khz;
1328 1328
1329 /* 1329 /*
1330 * Now is a good time to look at the implementations of these functions 1330 * Now is a good time to look at the implementations of these functions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 84826b842b54..ee8cac77c8a4 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -844,7 +844,6 @@ static const struct pv_init_ops xen_init_ops __initdata = {
844static const struct pv_time_ops xen_time_ops __initdata = { 844static const struct pv_time_ops xen_time_ops __initdata = {
845 .set_wallclock = xen_set_wallclock, 845 .set_wallclock = xen_set_wallclock,
846 .get_wallclock = xen_get_wallclock, 846 .get_wallclock = xen_get_wallclock,
847 .get_tsc_khz = xen_tsc_khz,
848 .sched_clock = xen_sched_clock, 847 .sched_clock = xen_sched_clock,
849}; 848};
850 849
@@ -980,6 +979,8 @@ asmlinkage void __init xen_start_kernel(void)
980 x86_init.timers.setup_percpu_clockev = x86_init_noop; 979 x86_init.timers.setup_percpu_clockev = x86_init_noop;
981 x86_cpuinit.setup_percpu_clockev = x86_init_noop; 980 x86_cpuinit.setup_percpu_clockev = x86_init_noop;
982 981
982 x86_platform.calibrate_tsc = xen_tsc_khz;
983
983#ifdef CONFIG_X86_64 984#ifdef CONFIG_X86_64
984 /* 985 /*
985 * Setup percpu state. We only need to do this for 64-bit 986 * Setup percpu state. We only need to do this for 64-bit