aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/vmware.h1
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c11
-rw-r--r--arch/x86/kernel/cpu/vmware.c18
-rw-r--r--arch/x86/kernel/tsc_sync.c8
4 files changed, 36 insertions, 2 deletions
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index 02dfea5aebc..c11b7e100d8 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -22,5 +22,6 @@
22 22
23extern unsigned long vmware_get_tsc_khz(void); 23extern unsigned long vmware_get_tsc_khz(void);
24extern int vmware_platform(void); 24extern int vmware_platform(void);
25extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
25 26
26#endif 27#endif
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 7bd55064ffe..35ae2b75226 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -41,8 +41,17 @@ unsigned long get_hypervisor_tsc_freq(void)
41 return 0; 41 return 0;
42} 42}
43 43
44static inline void __cpuinit
45hypervisor_set_feature_bits(struct cpuinfo_x86 *c)
46{
47 if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) {
48 vmware_set_feature_bits(c);
49 return;
50 }
51}
52
44void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) 53void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
45{ 54{
46 detect_hypervisor_vendor(c); 55 detect_hypervisor_vendor(c);
56 hypervisor_set_feature_bits(c);
47} 57}
48
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index d5d1b75a4b7..2ac4394fcb9 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -86,3 +86,21 @@ unsigned long vmware_get_tsc_khz(void)
86 BUG_ON(!vmware_platform()); 86 BUG_ON(!vmware_platform());
87 return __vmware_get_tsc_khz(); 87 return __vmware_get_tsc_khz();
88} 88}
89
90/*
91 * VMware hypervisor takes care of exporting a reliable TSC to the guest.
92 * Still, due to timing difference when running on virtual cpus, the TSC can
93 * be marked as unstable in some cases. For example, the TSC sync check at
94 * bootup can fail due to a marginal offset between vcpus' TSCs (though the
95 * TSCs do not drift from each other). Also, the ACPI PM timer clocksource
96 * is not suitable as a watchdog when running on a hypervisor because the
97 * kernel may miss a wrap of the counter if the vcpu is descheduled for a
98 * long time. To skip these checks at runtime we set these capability bits,
99 * so that the kernel could just trust the hypervisor with providing a
100 * reliable virtual TSC that is suitable for timekeeping.
101 */
102void __cpuinit vmware_set_feature_bits(struct cpuinfo_x86 *c)
103{
104 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
105 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
106}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 9ffb01c31c4..5977c40a138 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -108,6 +108,12 @@ void __cpuinit check_tsc_sync_source(int cpu)
108 if (unsynchronized_tsc()) 108 if (unsynchronized_tsc())
109 return; 109 return;
110 110
111 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
112 printk(KERN_INFO
113 "Skipping synchronization checks as TSC is reliable.\n");
114 return;
115 }
116
111 printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:", 117 printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:",
112 smp_processor_id(), cpu); 118 smp_processor_id(), cpu);
113 119
@@ -161,7 +167,7 @@ void __cpuinit check_tsc_sync_target(void)
161{ 167{
162 int cpus = 2; 168 int cpus = 2;
163 169
164 if (unsynchronized_tsc()) 170 if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
165 return; 171 return;
166 172
167 /* 173 /*