aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2014-10-02 10:32:15 -0400
committerChris Metcalf <cmetcalf@tilera.com>2014-10-02 13:56:07 -0400
commit78410af51146796f783925009c8676a30d6c6d90 (patch)
tree9d1acdab6ef79c6ce121c7ea7cd5c90d25d52c2d /arch/tile
parent94fb1afbcb3e1f8666c9065baded2cb66e72126f (diff)
tile: add clock_gettime support to vDSO
This change adds support for clock_gettime with CLOCK_REALTIME and CLOCK_MONOTONIC using vDSO. It also updates the vdso struct nomenclature used for the clocks to match the x86 code to keep it easier to update going forward. We also support the *_COARSE clockid_t, for apps that want speed but aren't concerned about fine-grained timestamps; this saves about 20 cycles per call (see http://lwn.net/Articles/342018/). Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/vdso.h15
-rw-r--r--arch/tile/kernel/time.c45
-rw-r--r--arch/tile/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c145
4 files changed, 172 insertions, 35 deletions
diff --git a/arch/tile/include/asm/vdso.h b/arch/tile/include/asm/vdso.h
index d64b0d58a7e9..9b069692153f 100644
--- a/arch/tile/include/asm/vdso.h
+++ b/arch/tile/include/asm/vdso.h
@@ -29,13 +29,18 @@
29struct vdso_data { 29struct vdso_data {
30 seqcount_t tz_seq; /* Timezone seqlock */ 30 seqcount_t tz_seq; /* Timezone seqlock */
31 seqcount_t tb_seq; /* Timebase seqlock */ 31 seqcount_t tb_seq; /* Timebase seqlock */
32 __u64 xtime_tod_stamp; /* TOD clock for xtime */ 32 __u64 cycle_last; /* TOD clock for xtime */
33 __u64 xtime_clock_sec; /* Kernel time second */ 33 __u64 mask; /* Cycle mask */
34 __u64 xtime_clock_nsec; /* Kernel time nanosecond */
35 __u64 wtom_clock_sec; /* Wall to monotonic clock second */
36 __u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */
37 __u32 mult; /* Cycle to nanosecond multiplier */ 34 __u32 mult; /* Cycle to nanosecond multiplier */
38 __u32 shift; /* Cycle to nanosecond divisor (power of two) */ 35 __u32 shift; /* Cycle to nanosecond divisor (power of two) */
36 __u64 wall_time_sec;
37 __u64 wall_time_snsec;
38 __u64 monotonic_time_sec;
39 __u64 monotonic_time_snsec;
40 __u64 wall_time_coarse_sec;
41 __u64 wall_time_coarse_nsec;
42 __u64 monotonic_time_coarse_sec;
43 __u64 monotonic_time_coarse_nsec;
39 __u32 tz_minuteswest; /* Minutes west of Greenwich */ 44 __u32 tz_minuteswest; /* Minutes west of Greenwich */
40 __u32 tz_dsttime; /* Type of dst correction */ 45 __u32 tz_dsttime; /* Type of dst correction */
41}; 46};
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 2fe8323db77e..c1b362277fb7 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -257,21 +257,44 @@ void update_vsyscall_tz(void)
257 257
258void update_vsyscall(struct timekeeper *tk) 258void update_vsyscall(struct timekeeper *tk)
259{ 259{
260 struct timespec *wtm = &tk->wall_to_monotonic; 260 if (tk->tkr.clock != &cycle_counter_cs)
261 struct clocksource *clock = tk->tkr.clock;
262
263 if (clock != &cycle_counter_cs)
264 return; 261 return;
265 262
266 write_seqcount_begin(&vdso_data->tb_seq); 263 write_seqcount_begin(&vdso_data->tb_seq);
267 264
268 vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; 265 vdso_data->cycle_last = tk->tkr.cycle_last;
269 vdso_data->xtime_clock_sec = tk->xtime_sec; 266 vdso_data->mask = tk->tkr.mask;
270 vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; 267 vdso_data->mult = tk->tkr.mult;
271 vdso_data->wtom_clock_sec = wtm->tv_sec; 268 vdso_data->shift = tk->tkr.shift;
272 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 269
273 vdso_data->mult = tk->tkr.mult; 270 vdso_data->wall_time_sec = tk->xtime_sec;
274 vdso_data->shift = tk->tkr.shift; 271 vdso_data->wall_time_snsec = tk->tkr.xtime_nsec;
272
273 vdso_data->monotonic_time_sec = tk->xtime_sec
274 + tk->wall_to_monotonic.tv_sec;
275 vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec
276 + ((u64)tk->wall_to_monotonic.tv_nsec
277 << tk->tkr.shift);
278 while (vdso_data->monotonic_time_snsec >=
279 (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
280 vdso_data->monotonic_time_snsec -=
281 ((u64)NSEC_PER_SEC) << tk->tkr.shift;
282 vdso_data->monotonic_time_sec++;
283 }
284
285 vdso_data->wall_time_coarse_sec = tk->xtime_sec;
286 vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
287 tk->tkr.shift);
288
289 vdso_data->monotonic_time_coarse_sec =
290 vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
291 vdso_data->monotonic_time_coarse_nsec =
292 vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
293
294 while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
295 vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
296 vdso_data->monotonic_time_coarse_sec++;
297 }
275 298
276 write_seqcount_end(&vdso_data->tb_seq); 299 write_seqcount_end(&vdso_data->tb_seq);
277} 300}
diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S
index 041cd6c39c83..731529f3f06f 100644
--- a/arch/tile/kernel/vdso/vdso.lds.S
+++ b/arch/tile/kernel/vdso/vdso.lds.S
@@ -82,6 +82,8 @@ VERSION
82 __vdso_rt_sigreturn; 82 __vdso_rt_sigreturn;
83 __vdso_gettimeofday; 83 __vdso_gettimeofday;
84 gettimeofday; 84 gettimeofday;
85 __vdso_clock_gettime;
86 clock_gettime;
85 local:*; 87 local:*;
86 }; 88 };
87} 89}
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
index 7cff8fbac4f0..8bb21eda07d8 100644
--- a/arch/tile/kernel/vdso/vgettimeofday.c
+++ b/arch/tile/kernel/vdso/vgettimeofday.c
@@ -15,6 +15,7 @@
15#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */ 15#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
16#include <linux/time.h> 16#include <linux/time.h>
17#include <asm/timex.h> 17#include <asm/timex.h>
18#include <asm/unistd.h>
18#include <asm/vdso.h> 19#include <asm/vdso.h>
19 20
20#if CHIP_HAS_SPLIT_CYCLE() 21#if CHIP_HAS_SPLIT_CYCLE()
@@ -35,6 +36,11 @@ static inline cycles_t get_cycles_inline(void)
35#define get_cycles get_cycles_inline 36#define get_cycles get_cycles_inline
36#endif 37#endif
37 38
39struct syscall_return_value {
40 long value;
41 long error;
42};
43
38/* 44/*
39 * Find out the vDSO data page address in the process address space. 45 * Find out the vDSO data page address in the process address space.
40 */ 46 */
@@ -50,11 +56,82 @@ inline unsigned long get_datapage(void)
50 return ret; 56 return ret;
51} 57}
52 58
53int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 59static inline u64 vgetsns(struct vdso_data *vdso)
60{
61 return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
62}
63
64static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
65{
66 unsigned count;
67 u64 ns;
68
69 do {
70 count = read_seqcount_begin(&vdso->tb_seq);
71 ts->tv_sec = vdso->wall_time_sec;
72 ns = vdso->wall_time_snsec;
73 ns += vgetsns(vdso);
74 ns >>= vdso->shift;
75 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
76
77 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
78 ts->tv_nsec = ns;
79
80 return 0;
81}
82
83static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
84{
85 unsigned count;
86 u64 ns;
87
88 do {
89 count = read_seqcount_begin(&vdso->tb_seq);
90 ts->tv_sec = vdso->monotonic_time_sec;
91 ns = vdso->monotonic_time_snsec;
92 ns += vgetsns(vdso);
93 ns >>= vdso->shift;
94 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
95
96 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
97 ts->tv_nsec = ns;
98
99 return 0;
100}
101
102static inline int do_realtime_coarse(struct vdso_data *vdso,
103 struct timespec *ts)
104{
105 unsigned count;
106
107 do {
108 count = read_seqcount_begin(&vdso->tb_seq);
109 ts->tv_sec = vdso->wall_time_coarse_sec;
110 ts->tv_nsec = vdso->wall_time_coarse_nsec;
111 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
112
113 return 0;
114}
115
116static inline int do_monotonic_coarse(struct vdso_data *vdso,
117 struct timespec *ts)
54{ 118{
55 cycles_t cycles;
56 unsigned count; 119 unsigned count;
57 unsigned long sec, ns; 120
121 do {
122 count = read_seqcount_begin(&vdso->tb_seq);
123 ts->tv_sec = vdso->monotonic_time_coarse_sec;
124 ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
125 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
126
127 return 0;
128}
129
130struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
131 struct timezone *tz)
132{
133 struct syscall_return_value ret = { 0, 0 };
134 unsigned count;
58 struct vdso_data *vdso = (struct vdso_data *)get_datapage(); 135 struct vdso_data *vdso = (struct vdso_data *)get_datapage();
59 136
60 /* The use of the timezone is obsolete, normally tz is NULL. */ 137 /* The use of the timezone is obsolete, normally tz is NULL. */
@@ -67,25 +144,55 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
67 } 144 }
68 145
69 if (unlikely(tv == NULL)) 146 if (unlikely(tv == NULL))
70 return 0; 147 return ret;
71 148
72 do { 149 do_realtime(vdso, (struct timespec *)tv);
73 count = read_seqcount_begin(&vdso->tb_seq); 150 tv->tv_usec /= 1000;
74 sec = vdso->xtime_clock_sec;
75 cycles = get_cycles() - vdso->xtime_tod_stamp;
76 ns = (cycles * vdso->mult) + vdso->xtime_clock_nsec;
77 ns >>= vdso->shift;
78 if (ns >= NSEC_PER_SEC) {
79 ns -= NSEC_PER_SEC;
80 sec += 1;
81 }
82 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
83 151
84 tv->tv_sec = sec; 152 return ret;
85 tv->tv_usec = ns / 1000;
86
87 return 0;
88} 153}
89 154
90int gettimeofday(struct timeval *tv, struct timezone *tz) 155int gettimeofday(struct timeval *tv, struct timezone *tz)
91 __attribute__((weak, alias("__vdso_gettimeofday"))); 156 __attribute__((weak, alias("__vdso_gettimeofday")));
157
158static struct syscall_return_value vdso_fallback_gettime(long clock,
159 struct timespec *ts)
160{
161 struct syscall_return_value ret;
162 __asm__ __volatile__ (
163 "swint1"
164 : "=R00" (ret.value), "=R01" (ret.error)
165 : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
166 : "r2", "r3", "r4", "r5", "r6", "r7",
167 "r8", "r9", "r11", "r12", "r13", "r14", "r15",
168 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
169 "r24", "r25", "r26", "r27", "r28", "r29", "memory");
170 return ret;
171}
172
173struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
174 struct timespec *ts)
175{
176 struct vdso_data *vdso = (struct vdso_data *)get_datapage();
177 struct syscall_return_value ret = { 0, 0 };
178
179 switch (clock) {
180 case CLOCK_REALTIME:
181 do_realtime(vdso, ts);
182 return ret;
183 case CLOCK_MONOTONIC:
184 do_monotonic(vdso, ts);
185 return ret;
186 case CLOCK_REALTIME_COARSE:
187 do_realtime_coarse(vdso, ts);
188 return ret;
189 case CLOCK_MONOTONIC_COARSE:
190 do_monotonic_coarse(vdso, ts);
191 return ret;
192 default:
193 return vdso_fallback_gettime(clock, ts);
194 }
195}
196
197int clock_gettime(clockid_t clock, struct timespec *ts)
198 __attribute__((weak, alias("__vdso_clock_gettime")));