diff options
author | Andi Kleen <ak@suse.de> | 2008-01-30 07:32:39 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:32:39 -0500 |
commit | 6d63de8dbcda98511206897562ecfcdacf18f523 (patch) | |
tree | f845f109636c3cc7eddad455b89ffb986a1188d0 /include/asm-x86/tsc.h | |
parent | f06e4ec1c15691b0cfd2397ae32214fa36c90d71 (diff) |
x86: remove get_cycles_sync
rdtsc is now speculation-safe, so no need for the sync variants of
the APIs.
[ mingo@elte.hu: removed the nsec_barrier() complication. ]
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/tsc.h')
-rw-r--r-- | include/asm-x86/tsc.h | 60 |
1 files changed, 8 insertions, 52 deletions
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 40130372413..f51a50da35a 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h | |||
@@ -36,62 +36,18 @@ static inline cycles_t get_cycles(void) | |||
36 | return ret; | 36 | return ret; |
37 | } | 37 | } |
38 | 38 | ||
39 | /* Like get_cycles, but make sure the CPU is synchronized. */ | 39 | static inline cycles_t vget_cycles(void) |
40 | static __always_inline cycles_t __get_cycles_sync(void) | ||
41 | { | 40 | { |
42 | unsigned long long ret; | ||
43 | unsigned eax, edx; | ||
44 | |||
45 | /* | 41 | /* |
46 | * Use RDTSCP if possible; it is guaranteed to be synchronous | 42 | * We only do VDSOs on TSC capable CPUs, so this shouldnt |
47 | * and doesn't cause a VMEXIT on Hypervisors | 43 | * access boot_cpu_data (which is not VDSO-safe): |
48 | */ | 44 | */ |
49 | alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, | 45 | #ifndef CONFIG_X86_TSC |
50 | ASM_OUTPUT2("=a" (eax), "=d" (edx)), | 46 | if (!cpu_has_tsc) |
51 | "a" (0U), "d" (0U) : "ecx", "memory"); | 47 | return 0; |
52 | ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); | ||
53 | if (ret) | ||
54 | return ret; | ||
55 | |||
56 | /* | ||
57 | * Don't do an additional sync on CPUs where we know | ||
58 | * RDTSC is already synchronous: | ||
59 | */ | ||
60 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | ||
61 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static __always_inline cycles_t get_cycles_sync(void) | ||
67 | { | ||
68 | unsigned long long ret; | ||
69 | ret = __get_cycles_sync(); | ||
70 | if (!ret) | ||
71 | rdtscll(ret); | ||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | #ifdef CONFIG_PARAVIRT | ||
76 | /* | ||
77 | * For paravirt guests, some functionalities are executed through function | ||
78 | * pointers in the various pvops structures. | ||
79 | * These function pointers exist inside the kernel and can not | ||
80 | * be accessed by user space. To avoid this, we make a copy of the | ||
81 | * get_cycles_sync (called in kernel) but force the use of native_read_tsc. | ||
82 | * Ideally, the guest should set up it's own clock and vread | ||
83 | */ | ||
84 | static __always_inline long long vget_cycles_sync(void) | ||
85 | { | ||
86 | unsigned long long ret; | ||
87 | ret = __get_cycles_sync(); | ||
88 | if (!ret) | ||
89 | ret = native_read_tsc(); | ||
90 | return ret; | ||
91 | } | ||
92 | #else | ||
93 | # define vget_cycles_sync() get_cycles_sync() | ||
94 | #endif | 48 | #endif |
49 | return (cycles_t) native_read_tsc(); | ||
50 | } | ||
95 | 51 | ||
96 | extern void tsc_init(void); | 52 | extern void tsc_init(void); |
97 | extern void mark_tsc_unstable(char *reason); | 53 | extern void mark_tsc_unstable(char *reason); |