aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/vdso32/gettimeofday.S
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-11-11 05:15:21 -0500
committerPaul Mackerras <paulus@samba.org>2005-11-11 06:25:39 -0500
commita7f290dad32ee34d931561b7943c858fe2aae503 (patch)
tree850f04ed9ffba8aef6e151fa9c9e8a0c667bb795 /arch/powerpc/kernel/vdso32/gettimeofday.S
parent6761c4a07378e19e3710bb69cea65795774529b1 (diff)
[PATCH] powerpc: Merge vdso's and add vdso support to 32 bits kernel
This patch moves the vdso's to arch/powerpc, adds support for the 32 bits vdso to the 32 bits kernel, rename systemcfg (finally !), and adds some new (still untested) routines to both vdso's: clock_gettime() with support for CLOCK_REALTIME and CLOCK_MONOTONIC, clock_getres() (same clocks) and get_tbfreq() for glibc to retreive the timebase frequency. Tom,Steve: The implementation of get_tbfreq() I've done for 32 bits returns a long long (r3, r4) not a long. This is such that if we ever add support for >4Ghz timebases on ppc32, the userland interface won't have to change. I have tested gettimeofday() using some glibc patches in both ppc32 and ppc64 kernels using 32 bits userland (I haven't had a chance to test a 64 bits userland yet, but the implementation didn't change and was tested earlier). I haven't tested yet the new functions. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/vdso32/gettimeofday.S')
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S315
1 files changed, 315 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
new file mode 100644
index 000000000000..aeb5fc9b87b3
--- /dev/null
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -0,0 +1,315 @@
1/*
2 * Userland implementation of gettimeofday() for 32 bits processes in a
3 * ppc64 kernel for use in the vDSO
4 *
5 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
6 * IBM Corp.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <linux/config.h>
14#include <asm/processor.h>
15#include <asm/ppc_asm.h>
16#include <asm/vdso.h>
17#include <asm/asm-offsets.h>
18#include <asm/unistd.h>
19
20 .text
21/*
22 * Exact prototype of gettimeofday
23 *
24 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
25 *
26 */
27V_FUNCTION_BEGIN(__kernel_gettimeofday)
28 .cfi_startproc
29 mflr r12
30 .cfi_register lr,r12
31
32 mr r10,r3 /* r10 saves tv */
33 mr r11,r4 /* r11 saves tz */
34 bl __get_datapage@local /* get data page */
35 mr r9, r3 /* datapage ptr in r9 */
36 bl __do_get_xsec@local /* get xsec from tb & kernel */
37 bne- 2f /* out of line -> do syscall */
38
39 /* seconds are xsec >> 20 */
40 rlwinm r5,r4,12,20,31
41 rlwimi r5,r3,12,0,19
42 stw r5,TVAL32_TV_SEC(r10)
43
44 /* get remaining xsec and convert to usec. we scale
45 * up remaining xsec by 12 bits and get the top 32 bits
46 * of the multiplication
47 */
48 rlwinm r5,r4,12,0,19
49 lis r6,1000000@h
50 ori r6,r6,1000000@l
51 mulhwu r5,r5,r6
52 stw r5,TVAL32_TV_USEC(r10)
53
54 cmpli cr0,r11,0 /* check if tz is NULL */
55 beq 1f
56 lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
57 lwz r5,CFG_TZ_DSTTIME(r9)
58 stw r4,TZONE_TZ_MINWEST(r11)
59 stw r5,TZONE_TZ_DSTTIME(r11)
60
611: mtlr r12
62 li r3,0
63 blr
64
652:
66 mtlr r12
67 mr r3,r10
68 mr r4,r11
69 li r0,__NR_gettimeofday
70 sc
71 blr
72 .cfi_endproc
73V_FUNCTION_END(__kernel_gettimeofday)
74
75/*
76 * Exact prototype of clock_gettime()
77 *
78 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
79 *
80 */
81V_FUNCTION_BEGIN(__kernel_clock_gettime)
82 .cfi_startproc
83 /* Check for supported clock IDs */
84 cmpli cr0,r3,CLOCK_REALTIME
85 cmpli cr1,r3,CLOCK_MONOTONIC
86 cror cr0,cr0,cr1
87 bne cr0,99f
88
89 mflr r12 /* r12 saves lr */
90 .cfi_register lr,r12
91 mr r10,r3 /* r10 saves id */
92 mr r11,r4 /* r11 saves tp */
93 bl __get_datapage@local /* get data page */
94 mr r9, r3 /* datapage ptr in r9 */
95 beq cr1,50f /* if monotonic -> jump there */
96
97 /*
98 * CLOCK_REALTIME
99 */
100
101 bl __do_get_xsec@local /* get xsec from tb & kernel */
102 bne- 98f /* out of line -> do syscall */
103
104 /* seconds are xsec >> 20 */
105 rlwinm r5,r4,12,20,31
106 rlwimi r5,r3,12,0,19
107 stw r5,TSPC32_TV_SEC(r11)
108
109 /* get remaining xsec and convert to nsec. we scale
110 * up remaining xsec by 12 bits and get the top 32 bits
111 * of the multiplication, then we multiply by 1000
112 */
113 rlwinm r5,r4,12,0,19
114 lis r6,1000000@h
115 ori r6,r6,1000000@l
116 mulhwu r5,r5,r6
117 mulli r5,r5,1000
118 stw r5,TSPC32_TV_NSEC(r11)
119 mtlr r12
120 li r3,0
121 blr
122
123 /*
124 * CLOCK_MONOTONIC
125 */
126
12750: bl __do_get_xsec@local /* get xsec from tb & kernel */
128 bne- 98f /* out of line -> do syscall */
129
130 /* seconds are xsec >> 20 */
131 rlwinm r6,r4,12,20,31
132 rlwimi r6,r3,12,0,19
133
134 /* get remaining xsec and convert to nsec. we scale
135 * up remaining xsec by 12 bits and get the top 32 bits
136 * of the multiplication, then we multiply by 1000
137 */
138 rlwinm r7,r4,12,0,19
139 lis r5,1000000@h
140 ori r5,r5,1000000@l
141 mulhwu r7,r7,r5
142 mulli r7,r7,1000
143
144 /* now we must fixup using wall to monotonic. We need to snapshot
145 * that value and do the counter trick again. Fortunately, we still
146 * have the counter value in r8 that was returned by __do_get_xsec.
147 * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5
148 * can be used
149 */
150
151 lwz r3,WTOM_CLOCK_SEC(r9)
152 lwz r4,WTOM_CLOCK_NSEC(r9)
153
154 /* We now have our result in r3,r4. We create a fake dependency
155 * on that result and re-check the counter
156 */
157 or r5,r4,r3
158 xor r0,r5,r5
159 add r9,r9,r0
160#ifdef CONFIG_PPC64
161 lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
162#else
163 lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
164#endif
165 cmpl cr0,r8,r0 /* check if updated */
166 bne- 50b
167
168 /* Calculate and store result. Note that this mimmics the C code,
169 * which may cause funny results if nsec goes negative... is that
170 * possible at all ?
171 */
172 add r3,r3,r6
173 add r4,r4,r7
174 lis r5,NSEC_PER_SEC@h
175 ori r5,r5,NSEC_PER_SEC@l
176 cmpli cr0,r4,r5
177 blt 1f
178 subf r4,r5,r4
179 addi r3,r3,1
1801: stw r3,TSPC32_TV_SEC(r11)
181 stw r4,TSPC32_TV_NSEC(r11)
182
183 mtlr r12
184 li r3,0
185 blr
186
187 /*
188 * syscall fallback
189 */
19098:
191 mtlr r12
192 mr r3,r10
193 mr r4,r11
19499:
195 li r0,__NR_clock_gettime
196 sc
197 blr
198 .cfi_endproc
199V_FUNCTION_END(__kernel_clock_gettime)
200
201
202/*
203 * Exact prototype of clock_getres()
204 *
205 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
206 *
207 */
208V_FUNCTION_BEGIN(__kernel_clock_getres)
209 .cfi_startproc
210 /* Check for supported clock IDs */
211 cmpwi cr0,r3,CLOCK_REALTIME
212 cmpwi cr1,r3,CLOCK_MONOTONIC
213 cror cr0,cr0,cr1
214 bne cr0,99f
215
216 li r3,0
217 cmpli cr0,r4,0
218 beqlr
219 lis r5,CLOCK_REALTIME_RES@h
220 ori r5,r5,CLOCK_REALTIME_RES@l
221 stw r3,TSPC32_TV_SEC(r4)
222 stw r5,TSPC32_TV_NSEC(r4)
223 blr
224
225 /*
226 * syscall fallback
227 */
22899:
229 li r0,__NR_clock_getres
230 sc
231 blr
232 .cfi_endproc
233V_FUNCTION_END(__kernel_clock_getres)
234
235
236/*
237 * This is the core of gettimeofday() & friends, it returns the xsec
238 * value in r3 & r4 and expects the datapage ptr (non clobbered)
239 * in r9. clobbers r0,r4,r5,r6,r7,r8.
240 * When returning, r8 contains the counter value that can be reused
241 * by the monotonic clock implementation
242 */
243__do_get_xsec:
244 .cfi_startproc
245 /* Check for update count & load values. We use the low
246 * order 32 bits of the update count
247 */
248#ifdef CONFIG_PPC64
2491: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9)
250#else
2511: lwz r8,(CFG_TB_UPDATE_COUNT)(r9)
252#endif
253 andi. r0,r8,1 /* pending update ? loop */
254 bne- 1b
255 xor r0,r8,r8 /* create dependency */
256 add r9,r9,r0
257
258 /* Load orig stamp (offset to TB) */
259 lwz r5,CFG_TB_ORIG_STAMP(r9)
260 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
261
262 /* Get a stable TB value */
2632: mftbu r3
264 mftbl r4
265 mftbu r0
266 cmpl cr0,r3,r0
267 bne- 2b
268
269 /* Substract tb orig stamp. If the high part is non-zero, we jump to
270 * the slow path which call the syscall.
271 * If it's ok, then we have our 32 bits tb_ticks value in r7
272 */
273 subfc r7,r6,r4
274 subfe. r0,r5,r3
275 bne- 3f
276
277 /* Load scale factor & do multiplication */
278 lwz r5,CFG_TB_TO_XS(r9) /* load values */
279 lwz r6,(CFG_TB_TO_XS+4)(r9)
280 mulhwu r4,r7,r5
281 mulhwu r6,r7,r6
282 mullw r0,r7,r5
283 addc r6,r6,r0
284
285 /* At this point, we have the scaled xsec value in r4 + XER:CA
286 * we load & add the stamp since epoch
287 */
288 lwz r5,CFG_STAMP_XSEC(r9)
289 lwz r6,(CFG_STAMP_XSEC+4)(r9)
290 adde r4,r4,r6
291 addze r3,r5
292
293 /* We now have our result in r3,r4. We create a fake dependency
294 * on that result and re-check the counter
295 */
296 or r6,r4,r3
297 xor r0,r6,r6
298 add r9,r9,r0
299#ifdef CONFIG_PPC64
300 lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
301#else
302 lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
303#endif
304 cmpl cr0,r8,r0 /* check if updated */
305 bne- 1b
306
307 /* Warning ! The caller expects CR:EQ to be set to indicate a
308 * successful calculation (so it won't fallback to the syscall
309 * method). We have overriden that CR bit in the counter check,
310 * but fortunately, the loop exit condition _is_ CR:EQ set, so
311 * we can exit safely here. If you change this code, be careful
312 * of that side effect.
313 */
3143: blr
315 .cfi_endproc