aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso
diff options
context:
space:
mode:
authorStefani Seibold <stefani@seibold.net>2014-03-17 18:22:03 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-03-18 15:51:59 -0400
commit411f790cd7e91fac0db80d3cf789cb6deeac298e (patch)
treef80032a07195711a6b1e63c1080749f2e24bcbaa /arch/x86/vdso
parent3935ed6a3a533c1736e3ca65bff72afd1773be27 (diff)
x86, vdso: Revamp vclock_gettime.c
This intermediate patch revamps the vclock_gettime.c by moving some functions around. It is only for spliting purpose, to make whole the 32 bit vdso timer patch easier to review. Reviewed-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Stefani Seibold <stefani@seibold.net> Link: http://lkml.kernel.org/r/1395094933-14252-4-git-send-email-stefani@seibold.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r--arch/x86/vdso/vclock_gettime.c85
1 files changed, 42 insertions, 43 deletions
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index eb5d7a56f8d4..bbc80657050f 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -26,41 +26,26 @@
26 26
27#define gtod (&VVAR(vsyscall_gtod_data)) 27#define gtod (&VVAR(vsyscall_gtod_data))
28 28
29notrace static cycle_t vread_tsc(void) 29static notrace cycle_t vread_hpet(void)
30{ 30{
31 cycle_t ret; 31 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
32 u64 last; 32}
33
34 /*
35 * Empirically, a fence (of type that depends on the CPU)
36 * before rdtsc is enough to ensure that rdtsc is ordered
37 * with respect to loads. The various CPU manuals are unclear
38 * as to whether rdtsc can be reordered with later loads,
39 * but no one has ever seen it happen.
40 */
41 rdtsc_barrier();
42 ret = (cycle_t)vget_cycles();
43
44 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
45
46 if (likely(ret >= last))
47 return ret;
48 33
49 /* 34notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
50 * GCC likes to generate cmov here, but this branch is extremely 35{
51 * predictable (it's just a funciton of time and the likely is 36 long ret;
52 * very likely) and there's a data dependence, so force GCC 37 asm("syscall" : "=a" (ret) :
53 * to generate a branch instead. I don't barrier() because 38 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
54 * we don't actually need a barrier, and if this function 39 return ret;
55 * ever gets inlined it will generate worse code.
56 */
57 asm volatile ("");
58 return last;
59} 40}
60 41
61static notrace cycle_t vread_hpet(void) 42notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
62{ 43{
63 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER); 44 long ret;
45
46 asm("syscall" : "=a" (ret) :
47 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
48 return ret;
64} 49}
65 50
66#ifdef CONFIG_PARAVIRT_CLOCK 51#ifdef CONFIG_PARAVIRT_CLOCK
@@ -133,23 +118,37 @@ static notrace cycle_t vread_pvclock(int *mode)
133} 118}
134#endif 119#endif
135 120
136notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) 121notrace static cycle_t vread_tsc(void)
137{ 122{
138 long ret; 123 cycle_t ret;
139 asm("syscall" : "=a" (ret) : 124 u64 last;
140 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
141 return ret;
142}
143 125
144notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) 126 /*
145{ 127 * Empirically, a fence (of type that depends on the CPU)
146 long ret; 128 * before rdtsc is enough to ensure that rdtsc is ordered
129 * with respect to loads. The various CPU manuals are unclear
130 * as to whether rdtsc can be reordered with later loads,
131 * but no one has ever seen it happen.
132 */
133 rdtsc_barrier();
134 ret = (cycle_t)vget_cycles();
147 135
148 asm("syscall" : "=a" (ret) : 136 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
149 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
150 return ret;
151}
152 137
138 if (likely(ret >= last))
139 return ret;
140
141 /*
142 * GCC likes to generate cmov here, but this branch is extremely
143 * predictable (it's just a funciton of time and the likely is
144 * very likely) and there's a data dependence, so force GCC
145 * to generate a branch instead. I don't barrier() because
146 * we don't actually need a barrier, and if this function
147 * ever gets inlined it will generate worse code.
148 */
149 asm volatile ("");
150 return last;
151}
153 152
154notrace static inline u64 vgetsns(int *mode) 153notrace static inline u64 vgetsns(int *mode)
155{ 154{