diff options
-rw-r--r-- | arch/x86/kernel/time_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_sync.c | 4 | ||||
-rw-r--r-- | include/asm-x86/tsc.h | 60 |
4 files changed, 17 insertions, 61 deletions
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index 91d4d495904e..61b17f5ec867 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -83,7 +83,7 @@ unsigned long __init native_calculate_cpu_khz(void) | |||
83 | rdtscl(tsc_start); | 83 | rdtscl(tsc_start); |
84 | do { | 84 | do { |
85 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); | 85 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); |
86 | tsc_now = get_cycles_sync(); | 86 | tsc_now = get_cycles(); |
87 | } while ((tsc_now - tsc_start) < TICK_COUNT); | 87 | } while ((tsc_now - tsc_start) < TICK_COUNT); |
88 | 88 | ||
89 | local_irq_restore(flags); | 89 | local_irq_restore(flags); |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index 3723401c4593..2cc55b726c22 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -181,12 +181,12 @@ static unsigned long __init tsc_read_refs(unsigned long *pm, | |||
181 | int i; | 181 | int i; |
182 | 182 | ||
183 | for (i = 0; i < MAX_RETRIES; i++) { | 183 | for (i = 0; i < MAX_RETRIES; i++) { |
184 | t1 = get_cycles_sync(); | 184 | t1 = get_cycles(); |
185 | if (hpet) | 185 | if (hpet) |
186 | *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; | 186 | *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; |
187 | else | 187 | else |
188 | *pm = acpi_pm_read_early(); | 188 | *pm = acpi_pm_read_early(); |
189 | t2 = get_cycles_sync(); | 189 | t2 = get_cycles(); |
190 | if ((t2 - t1) < SMI_TRESHOLD) | 190 | if ((t2 - t1) < SMI_TRESHOLD) |
191 | return t2; | 191 | return t2; |
192 | } | 192 | } |
@@ -210,9 +210,9 @@ void __init tsc_calibrate(void) | |||
210 | outb(0xb0, 0x43); | 210 | outb(0xb0, 0x43); |
211 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | 211 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); |
212 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | 212 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); |
213 | tr1 = get_cycles_sync(); | 213 | tr1 = get_cycles(); |
214 | while ((inb(0x61) & 0x20) == 0); | 214 | while ((inb(0x61) & 0x20) == 0); |
215 | tr2 = get_cycles_sync(); | 215 | tr2 = get_cycles(); |
216 | 216 | ||
217 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | 217 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); |
218 | 218 | ||
@@ -300,13 +300,13 @@ __setup("notsc", notsc_setup); | |||
300 | /* clock source code: */ | 300 | /* clock source code: */ |
301 | static cycle_t read_tsc(void) | 301 | static cycle_t read_tsc(void) |
302 | { | 302 | { |
303 | cycle_t ret = (cycle_t)get_cycles_sync(); | 303 | cycle_t ret = (cycle_t)get_cycles(); |
304 | return ret; | 304 | return ret; |
305 | } | 305 | } |
306 | 306 | ||
307 | static cycle_t __vsyscall_fn vread_tsc(void) | 307 | static cycle_t __vsyscall_fn vread_tsc(void) |
308 | { | 308 | { |
309 | cycle_t ret = (cycle_t)vget_cycles_sync(); | 309 | cycle_t ret = (cycle_t)vget_cycles(); |
310 | return ret; | 310 | return ret; |
311 | } | 311 | } |
312 | 312 | ||
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 05d8f25de6ae..ace340524c01 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -46,7 +46,7 @@ static __cpuinit void check_tsc_warp(void) | |||
46 | cycles_t start, now, prev, end; | 46 | cycles_t start, now, prev, end; |
47 | int i; | 47 | int i; |
48 | 48 | ||
49 | start = get_cycles_sync(); | 49 | start = get_cycles(); |
50 | /* | 50 | /* |
51 | * The measurement runs for 20 msecs: | 51 | * The measurement runs for 20 msecs: |
52 | */ | 52 | */ |
@@ -61,7 +61,7 @@ static __cpuinit void check_tsc_warp(void) | |||
61 | */ | 61 | */ |
62 | __raw_spin_lock(&sync_lock); | 62 | __raw_spin_lock(&sync_lock); |
63 | prev = last_tsc; | 63 | prev = last_tsc; |
64 | now = get_cycles_sync(); | 64 | now = get_cycles(); |
65 | last_tsc = now; | 65 | last_tsc = now; |
66 | __raw_spin_unlock(&sync_lock); | 66 | __raw_spin_unlock(&sync_lock); |
67 | 67 | ||
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 401303724130..f51a50da35aa 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h | |||
@@ -36,62 +36,18 @@ static inline cycles_t get_cycles(void) | |||
36 | return ret; | 36 | return ret; |
37 | } | 37 | } |
38 | 38 | ||
39 | /* Like get_cycles, but make sure the CPU is synchronized. */ | 39 | static inline cycles_t vget_cycles(void) |
40 | static __always_inline cycles_t __get_cycles_sync(void) | ||
41 | { | 40 | { |
42 | unsigned long long ret; | ||
43 | unsigned eax, edx; | ||
44 | |||
45 | /* | 41 | /* |
46 | * Use RDTSCP if possible; it is guaranteed to be synchronous | 42 | * We only do VDSOs on TSC capable CPUs, so this shouldnt |
47 | * and doesn't cause a VMEXIT on Hypervisors | 43 | * access boot_cpu_data (which is not VDSO-safe): |
48 | */ | 44 | */ |
49 | alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, | 45 | #ifndef CONFIG_X86_TSC |
50 | ASM_OUTPUT2("=a" (eax), "=d" (edx)), | 46 | if (!cpu_has_tsc) |
51 | "a" (0U), "d" (0U) : "ecx", "memory"); | 47 | return 0; |
52 | ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); | ||
53 | if (ret) | ||
54 | return ret; | ||
55 | |||
56 | /* | ||
57 | * Don't do an additional sync on CPUs where we know | ||
58 | * RDTSC is already synchronous: | ||
59 | */ | ||
60 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | ||
61 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static __always_inline cycles_t get_cycles_sync(void) | ||
67 | { | ||
68 | unsigned long long ret; | ||
69 | ret = __get_cycles_sync(); | ||
70 | if (!ret) | ||
71 | rdtscll(ret); | ||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | #ifdef CONFIG_PARAVIRT | ||
76 | /* | ||
77 | * For paravirt guests, some functionalities are executed through function | ||
78 | * pointers in the various pvops structures. | ||
79 | * These function pointers exist inside the kernel and can not | ||
80 | * be accessed by user space. To avoid this, we make a copy of the | ||
81 | * get_cycles_sync (called in kernel) but force the use of native_read_tsc. | ||
82 | * Ideally, the guest should set up it's own clock and vread | ||
83 | */ | ||
84 | static __always_inline long long vget_cycles_sync(void) | ||
85 | { | ||
86 | unsigned long long ret; | ||
87 | ret = __get_cycles_sync(); | ||
88 | if (!ret) | ||
89 | ret = native_read_tsc(); | ||
90 | return ret; | ||
91 | } | ||
92 | #else | ||
93 | # define vget_cycles_sync() get_cycles_sync() | ||
94 | #endif | 48 | #endif |
49 | return (cycles_t) native_read_tsc(); | ||
50 | } | ||
95 | 51 | ||
96 | extern void tsc_init(void); | 52 | extern void tsc_init(void); |
97 | extern void mark_tsc_unstable(char *reason); | 53 | extern void mark_tsc_unstable(char *reason); |