aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso/vclock_gettime.c
diff options
context:
space:
mode:
authorjohn stultz <johnstul@us.ibm.com>2009-08-19 22:13:34 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-21 15:43:46 -0400
commitda15cfdae03351c689736f8d142618592e3cebc3 (patch)
tree497fe3f77e27fa9cf0a484422c7bc382031df1bd /arch/x86/vdso/vclock_gettime.c
parent8cab02dc3c58a12235c6d463ce684dded9696848 (diff)
time: Introduce CLOCK_REALTIME_COARSE
After talking with some application writers who want very fast, but not fine-grained timestamps, I decided to try to implement new clock_ids to clock_gettime(): CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE which returns the time at the last tick. This is very fast as we don't have to access any hardware (which can be very painful if you're using something like the acpi_pm clocksource), and we can even use the vdso clock_gettime() method to avoid the syscall. The only trade off is you only get low-res tick grained time resolution. This isn't a new idea, I know Ingo has a patch in the -rt tree that made the vsyscall gettimeofday() return coarse grained time when the vsyscall64 sysctrl was set to 2. However this affects all applications on a system. With this method, applications can choose the proper speed/granularity trade-off for themselves. Signed-off-by: John Stultz <johnstul@us.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: nikolag@ca.ibm.com Cc: Darren Hart <dvhltc@us.ibm.com> Cc: arjan@infradead.org Cc: jonathan@jonmasters.org LKML-Reference: <1250734414.6897.5.camel@localhost.localdomain> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/vdso/vclock_gettime.c')
-rw-r--r--arch/x86/vdso/vclock_gettime.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 6a40b78b46a..ee55754cc3c 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -86,14 +86,47 @@ notrace static noinline int do_monotonic(struct timespec *ts)
86 return 0; 86 return 0;
87} 87}
88 88
89notrace static noinline int do_realtime_coarse(struct timespec *ts)
90{
91 unsigned long seq;
92 do {
93 seq = read_seqbegin(&gtod->lock);
94 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
95 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
96 } while (unlikely(read_seqretry(&gtod->lock, seq)));
97 return 0;
98}
99
100notrace static noinline int do_monotonic_coarse(struct timespec *ts)
101{
102 unsigned long seq, ns, secs;
103 do {
104 seq = read_seqbegin(&gtod->lock);
105 secs = gtod->wall_time_coarse.tv_sec;
106 ns = gtod->wall_time_coarse.tv_nsec;
107 secs += gtod->wall_to_monotonic.tv_sec;
108 ns += gtod->wall_to_monotonic.tv_nsec;
109 } while (unlikely(read_seqretry(&gtod->lock, seq)));
110 vset_normalized_timespec(ts, secs, ns);
111 return 0;
112}
113
89notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 114notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
90{ 115{
91 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) 116 if (likely(gtod->sysctl_enabled))
92 switch (clock) { 117 switch (clock) {
93 case CLOCK_REALTIME: 118 case CLOCK_REALTIME:
94 return do_realtime(ts); 119 if (likely(gtod->clock.vread))
120 return do_realtime(ts);
121 break;
95 case CLOCK_MONOTONIC: 122 case CLOCK_MONOTONIC:
96 return do_monotonic(ts); 123 if (likely(gtod->clock.vread))
124 return do_monotonic(ts);
125 break;
126 case CLOCK_REALTIME_COARSE:
127 return do_realtime_coarse(ts);
128 case CLOCK_MONOTONIC_COARSE:
129 return do_monotonic_coarse(ts);
97 } 130 }
98 return vdso_fallback_gettime(clock, ts); 131 return vdso_fallback_gettime(clock, ts);
99} 132}