aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso/vclock_gettime.c
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@mit.edu>2011-07-14 06:47:22 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 20:57:05 -0400
commit98d0ac38ca7b1b7a552c9a2359174ff84decb600 (patch)
tree0c244e828f86c779c348a4888ed9e303c3e59811 /arch/x86/vdso/vclock_gettime.c
parent433bd805e5fd2c731b3a9025b034f066272d336e (diff)
x86-64: Move vread_tsc and vread_hpet into the vDSO
The vsyscall page now consists entirely of trap instructions. Cc: John Stultz <johnstul@us.ibm.com> Signed-off-by: Andy Lutomirski <luto@mit.edu> Link: http://lkml.kernel.org/r/637648f303f2ef93af93bae25186e9a1bea093f5.1310639973.git.luto@mit.edu Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso/vclock_gettime.c')
-rw-r--r--arch/x86/vdso/vclock_gettime.c53
1 files changed, 47 insertions, 6 deletions
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index cf54813ac527..8792d6e0a2c3 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -17,6 +17,7 @@
17#include <linux/time.h> 17#include <linux/time.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <asm/vsyscall.h> 19#include <asm/vsyscall.h>
20#include <asm/fixmap.h>
20#include <asm/vgtod.h> 21#include <asm/vgtod.h>
21#include <asm/timex.h> 22#include <asm/timex.h>
22#include <asm/hpet.h> 23#include <asm/hpet.h>
@@ -25,6 +26,43 @@
25 26
26#define gtod (&VVAR(vsyscall_gtod_data)) 27#define gtod (&VVAR(vsyscall_gtod_data))
27 28
29notrace static cycle_t vread_tsc(void)
30{
31 cycle_t ret;
32 u64 last;
33
34 /*
35 * Empirically, a fence (of type that depends on the CPU)
36 * before rdtsc is enough to ensure that rdtsc is ordered
37 * with respect to loads. The various CPU manuals are unclear
38 * as to whether rdtsc can be reordered with later loads,
39 * but no one has ever seen it happen.
40 */
41 rdtsc_barrier();
42 ret = (cycle_t)vget_cycles();
43
44 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
45
46 if (likely(ret >= last))
47 return ret;
48
49 /*
50 * GCC likes to generate cmov here, but this branch is extremely
51 * predictable (it's just a funciton of time and the likely is
52 * very likely) and there's a data dependence, so force GCC
53 * to generate a branch instead. I don't barrier() because
54 * we don't actually need a barrier, and if this function
55 * ever gets inlined it will generate worse code.
56 */
57 asm volatile ("");
58 return last;
59}
60
61static notrace cycle_t vread_hpet(void)
62{
63 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
64}
65
28notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) 66notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
29{ 67{
30 long ret; 68 long ret;
@@ -36,9 +74,12 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
36notrace static inline long vgetns(void) 74notrace static inline long vgetns(void)
37{ 75{
38 long v; 76 long v;
39 cycles_t (*vread)(void); 77 cycles_t cycles;
40 vread = gtod->clock.vread; 78 if (gtod->clock.vclock_mode == VCLOCK_TSC)
41 v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask; 79 cycles = vread_tsc();
80 else
81 cycles = vread_hpet();
82 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
42 return (v * gtod->clock.mult) >> gtod->clock.shift; 83 return (v * gtod->clock.mult) >> gtod->clock.shift;
43} 84}
44 85
@@ -118,11 +159,11 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
118{ 159{
119 switch (clock) { 160 switch (clock) {
120 case CLOCK_REALTIME: 161 case CLOCK_REALTIME:
121 if (likely(gtod->clock.vread)) 162 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
122 return do_realtime(ts); 163 return do_realtime(ts);
123 break; 164 break;
124 case CLOCK_MONOTONIC: 165 case CLOCK_MONOTONIC:
125 if (likely(gtod->clock.vread)) 166 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
126 return do_monotonic(ts); 167 return do_monotonic(ts);
127 break; 168 break;
128 case CLOCK_REALTIME_COARSE: 169 case CLOCK_REALTIME_COARSE:
@@ -139,7 +180,7 @@ int clock_gettime(clockid_t, struct timespec *)
139notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 180notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
140{ 181{
141 long ret; 182 long ret;
142 if (likely(gtod->clock.vread)) { 183 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) {
143 if (likely(tv != NULL)) { 184 if (likely(tv != NULL)) {
144 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 185 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
145 offsetof(struct timespec, tv_nsec) || 186 offsetof(struct timespec, tv_nsec) ||