diff options
Diffstat (limited to 'arch/powerpc/kernel/vdso64')
-rw-r--r-- | arch/powerpc/kernel/vdso64/Makefile | 35 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/cacheflush.S | 66 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/datapage.S | 84 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/gettimeofday.S | 242 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/note.S | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/sigtramp.S | 295 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/vdso64.lds.S | 116 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/vdso64_wrapper.S | 13 |
8 files changed, 852 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile new file mode 100644 index 000000000000..ab39988452cc --- /dev/null +++ b/arch/powerpc/kernel/vdso64/Makefile | |||
@@ -0,0 +1,35 @@ | |||
1 | # List of files in the vdso, has to be asm only for now | ||
2 | |||
3 | obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o | ||
4 | |||
5 | # Build rules | ||
6 | |||
7 | targets := $(obj-vdso64) vdso64.so | ||
8 | obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) | ||
9 | |||
10 | EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin | ||
11 | EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 | ||
12 | EXTRA_AFLAGS := -D__VDSO64__ -s | ||
13 | |||
14 | obj-y += vdso64_wrapper.o | ||
15 | extra-y += vdso64.lds | ||
16 | CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) | ||
17 | |||
18 | # Force dependency (incbin is bad) | ||
19 | $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so | ||
20 | |||
21 | # link rule for the .so file, .lds has to be first | ||
22 | $(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) | ||
23 | $(call if_changed,vdso64ld) | ||
24 | |||
25 | # assembly rules for the .S files | ||
26 | $(obj-vdso64): %.o: %.S | ||
27 | $(call if_changed_dep,vdso64as) | ||
28 | |||
29 | # actual build commands | ||
30 | quiet_cmd_vdso64ld = VDSO64L $@ | ||
31 | cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ | ||
32 | quiet_cmd_vdso64as = VDSO64A $@ | ||
33 | cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< | ||
34 | |||
35 | |||
diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S new file mode 100644 index 000000000000..d4a0ad28d534 --- /dev/null +++ b/arch/powerpc/kernel/vdso64/cacheflush.S | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * vDSO provided cache flush routines | ||
3 | * | ||
4 | * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), | ||
5 | * IBM Corp. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/ppc_asm.h> | ||
15 | #include <asm/vdso.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | |||
18 | .text | ||
19 | |||
20 | /* | ||
21 | * Default "generic" version of __kernel_sync_dicache. | ||
22 | * | ||
23 | * void __kernel_sync_dicache(unsigned long start, unsigned long end) | ||
24 | * | ||
25 | * Flushes the data cache & invalidate the instruction cache for the | ||
26 | * provided range [start, end[ | ||
27 | * | ||
28 | * Note: all CPUs supported by this kernel have a 128 bytes cache | ||
29 | * line size so we don't have to peek that info from the datapage | ||
30 | */ | ||
31 | V_FUNCTION_BEGIN(__kernel_sync_dicache) | ||
32 | .cfi_startproc | ||
33 | li r5,127 | ||
34 | andc r6,r3,r5 /* round low to line bdy */ | ||
35 | subf r8,r6,r4 /* compute length */ | ||
36 | add r8,r8,r5 /* ensure we get enough */ | ||
37 | srwi. r8,r8,7 /* compute line count */ | ||
38 | beqlr /* nothing to do? */ | ||
39 | mtctr r8 | ||
40 | mr r3,r6 | ||
41 | 1: dcbst 0,r3 | ||
42 | addi r3,r3,128 | ||
43 | bdnz 1b | ||
44 | sync | ||
45 | mtctr r8 | ||
46 | 1: icbi 0,r6 | ||
47 | addi r6,r6,128 | ||
48 | bdnz 1b | ||
49 | isync | ||
50 | li r3,0 | ||
51 | blr | ||
52 | .cfi_endproc | ||
53 | V_FUNCTION_END(__kernel_sync_dicache) | ||
54 | |||
55 | |||
56 | /* | ||
57 | * POWER5 version of __kernel_sync_dicache | ||
58 | */ | ||
59 | V_FUNCTION_BEGIN(__kernel_sync_dicache_p5) | ||
60 | .cfi_startproc | ||
61 | sync | ||
62 | isync | ||
63 | li r3,0 | ||
64 | blr | ||
65 | .cfi_endproc | ||
66 | V_FUNCTION_END(__kernel_sync_dicache_p5) | ||
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S new file mode 100644 index 000000000000..e67eda0f8cda --- /dev/null +++ b/arch/powerpc/kernel/vdso64/datapage.S | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | * Access to the shared data page by the vDSO & syscall map | ||
3 | * | ||
4 | * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/ppc_asm.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | #include <asm/unistd.h> | ||
17 | #include <asm/vdso.h> | ||
18 | |||
19 | .text | ||
20 | V_FUNCTION_BEGIN(__get_datapage) | ||
21 | .cfi_startproc | ||
22 | /* We don't want that exposed or overridable as we want other objects | ||
23 | * to be able to bl directly to here | ||
24 | */ | ||
25 | .protected __get_datapage | ||
26 | .hidden __get_datapage | ||
27 | |||
28 | mflr r0 | ||
29 | .cfi_register lr,r0 | ||
30 | |||
31 | bcl 20,31,1f | ||
32 | .global __kernel_datapage_offset; | ||
33 | __kernel_datapage_offset: | ||
34 | .long 0 | ||
35 | 1: | ||
36 | mflr r3 | ||
37 | mtlr r0 | ||
38 | lwz r0,0(r3) | ||
39 | add r3,r0,r3 | ||
40 | blr | ||
41 | .cfi_endproc | ||
42 | V_FUNCTION_END(__get_datapage) | ||
43 | |||
44 | /* | ||
45 | * void *__kernel_get_syscall_map(unsigned int *syscall_count) ; | ||
46 | * | ||
47 | * returns a pointer to the syscall map. the map is agnostic to the | ||
48 | * size of "long", unlike kernel bitops, it stores bits from top to | ||
49 | * bottom so that memory actually contains a linear bitmap | ||
50 | * check for syscall N by testing bit (0x80000000 >> (N & 0x1f)) of | ||
51 | * 32 bits int at N >> 5. | ||
52 | */ | ||
53 | V_FUNCTION_BEGIN(__kernel_get_syscall_map) | ||
54 | .cfi_startproc | ||
55 | mflr r12 | ||
56 | .cfi_register lr,r12 | ||
57 | |||
58 | mr r4,r3 | ||
59 | bl V_LOCAL_FUNC(__get_datapage) | ||
60 | mtlr r12 | ||
61 | addi r3,r3,CFG_SYSCALL_MAP64 | ||
62 | cmpli cr0,r4,0 | ||
63 | beqlr | ||
64 | li r0,__NR_syscalls | ||
65 | stw r0,0(r4) | ||
66 | blr | ||
67 | .cfi_endproc | ||
68 | V_FUNCTION_END(__kernel_get_syscall_map) | ||
69 | |||
70 | |||
71 | /* | ||
72 | * void unsigned long __kernel_get_tbfreq(void); | ||
73 | * | ||
74 | * returns the timebase frequency in HZ | ||
75 | */ | ||
76 | V_FUNCTION_BEGIN(__kernel_get_tbfreq) | ||
77 | .cfi_startproc | ||
78 | mflr r12 | ||
79 | .cfi_register lr,r12 | ||
80 | bl V_LOCAL_FUNC(__get_datapage) | ||
81 | ld r3,CFG_TB_TICKS_PER_SEC(r3) | ||
82 | mtlr r12 | ||
83 | .cfi_endproc | ||
84 | V_FUNCTION_END(__kernel_get_tbfreq) | ||
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S new file mode 100644 index 000000000000..d371c02a8c0e --- /dev/null +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * Userland implementation of gettimeofday() for 64 bits processes in a | ||
3 | * ppc64 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), | ||
6 | * IBM Corp. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | #include <linux/config.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/vdso.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/unistd.h> | ||
19 | |||
20 | .text | ||
21 | /* | ||
22 | * Exact prototype of gettimeofday | ||
23 | * | ||
24 | * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); | ||
25 | * | ||
26 | */ | ||
27 | V_FUNCTION_BEGIN(__kernel_gettimeofday) | ||
28 | .cfi_startproc | ||
29 | mflr r12 | ||
30 | .cfi_register lr,r12 | ||
31 | |||
32 | mr r11,r3 /* r11 holds tv */ | ||
33 | mr r10,r4 /* r10 holds tz */ | ||
34 | bl V_LOCAL_FUNC(__get_datapage) /* get data page */ | ||
35 | bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ | ||
36 | lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ | ||
37 | ori r7,r7,16960 | ||
38 | rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ | ||
39 | rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ | ||
40 | std r5,TVAL64_TV_SEC(r11) /* store sec in tv */ | ||
41 | subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ | ||
42 | mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / | ||
43 | * XSEC_PER_SEC | ||
44 | */ | ||
45 | rldicl r0,r0,44,20 | ||
46 | cmpldi cr0,r10,0 /* check if tz is NULL */ | ||
47 | std r0,TVAL64_TV_USEC(r11) /* store usec in tv */ | ||
48 | beq 1f | ||
49 | lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */ | ||
50 | lwz r5,CFG_TZ_DSTTIME(r3) | ||
51 | stw r4,TZONE_TZ_MINWEST(r10) | ||
52 | stw r5,TZONE_TZ_DSTTIME(r10) | ||
53 | 1: mtlr r12 | ||
54 | li r3,0 /* always success */ | ||
55 | blr | ||
56 | .cfi_endproc | ||
57 | V_FUNCTION_END(__kernel_gettimeofday) | ||
58 | |||
59 | |||
60 | /* | ||
61 | * Exact prototype of clock_gettime() | ||
62 | * | ||
63 | * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); | ||
64 | * | ||
65 | */ | ||
66 | V_FUNCTION_BEGIN(__kernel_clock_gettime) | ||
67 | .cfi_startproc | ||
68 | /* Check for supported clock IDs */ | ||
69 | cmpwi cr0,r3,CLOCK_REALTIME | ||
70 | cmpwi cr1,r3,CLOCK_MONOTONIC | ||
71 | cror cr0,cr0,cr1 | ||
72 | bne cr0,99f | ||
73 | |||
74 | mflr r12 /* r12 saves lr */ | ||
75 | .cfi_register lr,r12 | ||
76 | mr r10,r3 /* r10 saves id */ | ||
77 | mr r11,r4 /* r11 saves tp */ | ||
78 | bl V_LOCAL_FUNC(__get_datapage) /* get data page */ | ||
79 | beq cr1,50f /* if monotonic -> jump there */ | ||
80 | |||
81 | /* | ||
82 | * CLOCK_REALTIME | ||
83 | */ | ||
84 | |||
85 | bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ | ||
86 | |||
87 | lis r7,0x3b9a /* r7 = 1000000000 = NSEC_PER_SEC */ | ||
88 | ori r7,r7,0xca00 | ||
89 | rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ | ||
90 | rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ | ||
91 | std r5,TSPC64_TV_SEC(r11) /* store sec in tv */ | ||
92 | subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ | ||
93 | mulld r0,r0,r7 /* nsec = (xsec * NSEC_PER_SEC) / | ||
94 | * XSEC_PER_SEC | ||
95 | */ | ||
96 | rldicl r0,r0,44,20 | ||
97 | std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */ | ||
98 | |||
99 | mtlr r12 | ||
100 | li r3,0 | ||
101 | blr | ||
102 | |||
103 | /* | ||
104 | * CLOCK_MONOTONIC | ||
105 | */ | ||
106 | |||
107 | 50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ | ||
108 | |||
109 | lis r7,0x3b9a /* r7 = 1000000000 = NSEC_PER_SEC */ | ||
110 | ori r7,r7,0xca00 | ||
111 | rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ | ||
112 | rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ | ||
113 | subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ | ||
114 | mulld r0,r0,r7 /* nsec = (xsec * NSEC_PER_SEC) / | ||
115 | * XSEC_PER_SEC | ||
116 | */ | ||
117 | rldicl r6,r0,44,20 | ||
118 | |||
119 | /* now we must fixup using wall to monotonic. We need to snapshot | ||
120 | * that value and do the counter trick again. Fortunately, we still | ||
121 | * have the counter value in r8 that was returned by __do_get_xsec. | ||
122 | * At this point, r5,r6 contain our sec/nsec values. | ||
123 | * can be used | ||
124 | */ | ||
125 | |||
126 | lwz r4,WTOM_CLOCK_SEC(r9) | ||
127 | lwz r7,WTOM_CLOCK_NSEC(r9) | ||
128 | |||
129 | /* We now have our result in r4,r7. We create a fake dependency | ||
130 | * on that result and re-check the counter | ||
131 | */ | ||
132 | or r9,r4,r7 | ||
133 | xor r0,r9,r9 | ||
134 | add r3,r3,r0 | ||
135 | ld r0,CFG_TB_UPDATE_COUNT(r3) | ||
136 | cmpld cr0,r0,r8 /* check if updated */ | ||
137 | bne- 50b | ||
138 | |||
139 | /* Calculate and store result. Note that this mimmics the C code, | ||
140 | * which may cause funny results if nsec goes negative... is that | ||
141 | * possible at all ? | ||
142 | */ | ||
143 | add r4,r4,r5 | ||
144 | add r7,r7,r6 | ||
145 | lis r9,NSEC_PER_SEC@h | ||
146 | ori r9,r9,NSEC_PER_SEC@l | ||
147 | cmpli cr0,r7,r9 | ||
148 | blt 1f | ||
149 | subf r7,r9,r7 | ||
150 | addi r4,r4,1 | ||
151 | 1: std r4,TSPC64_TV_SEC(r11) | ||
152 | std r7,TSPC64_TV_NSEC(r11) | ||
153 | |||
154 | mtlr r12 | ||
155 | li r3,0 | ||
156 | blr | ||
157 | |||
158 | /* | ||
159 | * syscall fallback | ||
160 | */ | ||
161 | 98: | ||
162 | mtlr r12 | ||
163 | mr r3,r10 | ||
164 | mr r4,r11 | ||
165 | 99: | ||
166 | li r0,__NR_clock_gettime | ||
167 | sc | ||
168 | blr | ||
169 | .cfi_endproc | ||
170 | V_FUNCTION_END(__kernel_clock_gettime) | ||
171 | |||
172 | |||
173 | /* | ||
174 | * Exact prototype of clock_getres() | ||
175 | * | ||
176 | * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); | ||
177 | * | ||
178 | */ | ||
179 | V_FUNCTION_BEGIN(__kernel_clock_getres) | ||
180 | .cfi_startproc | ||
181 | /* Check for supported clock IDs */ | ||
182 | cmpwi cr0,r3,CLOCK_REALTIME | ||
183 | cmpwi cr1,r3,CLOCK_MONOTONIC | ||
184 | cror cr0,cr0,cr1 | ||
185 | bne cr0,99f | ||
186 | |||
187 | li r3,0 | ||
188 | cmpli cr0,r4,0 | ||
189 | beqlr | ||
190 | lis r5,CLOCK_REALTIME_RES@h | ||
191 | ori r5,r5,CLOCK_REALTIME_RES@l | ||
192 | std r3,TSPC64_TV_SEC(r4) | ||
193 | std r5,TSPC64_TV_NSEC(r4) | ||
194 | blr | ||
195 | |||
196 | /* | ||
197 | * syscall fallback | ||
198 | */ | ||
199 | 99: | ||
200 | li r0,__NR_clock_getres | ||
201 | sc | ||
202 | blr | ||
203 | .cfi_endproc | ||
204 | V_FUNCTION_END(__kernel_clock_getres) | ||
205 | |||
206 | |||
207 | /* | ||
208 | * This is the core of gettimeofday(), it returns the xsec | ||
209 | * value in r4 and expects the datapage ptr (non clobbered) | ||
210 | * in r3. clobbers r0,r4,r5,r6,r7,r8 | ||
211 | * When returning, r8 contains the counter value that can be reused | ||
212 | */ | ||
213 | V_FUNCTION_BEGIN(__do_get_xsec) | ||
214 | .cfi_startproc | ||
215 | /* check for update count & load values */ | ||
216 | 1: ld r8,CFG_TB_UPDATE_COUNT(r3) | ||
217 | andi. r0,r4,1 /* pending update ? loop */ | ||
218 | bne- 1b | ||
219 | xor r0,r4,r4 /* create dependency */ | ||
220 | add r3,r3,r0 | ||
221 | |||
222 | /* Get TB & offset it */ | ||
223 | mftb r7 | ||
224 | ld r9,CFG_TB_ORIG_STAMP(r3) | ||
225 | subf r7,r9,r7 | ||
226 | |||
227 | /* Scale result */ | ||
228 | ld r5,CFG_TB_TO_XS(r3) | ||
229 | mulhdu r7,r7,r5 | ||
230 | |||
231 | /* Add stamp since epoch */ | ||
232 | ld r6,CFG_STAMP_XSEC(r3) | ||
233 | add r4,r6,r7 | ||
234 | |||
235 | xor r0,r4,r4 | ||
236 | add r3,r3,r0 | ||
237 | ld r0,CFG_TB_UPDATE_COUNT(r3) | ||
238 | cmpld cr0,r0,r8 /* check if updated */ | ||
239 | bne- 1b | ||
240 | blr | ||
241 | .cfi_endproc | ||
242 | V_FUNCTION_END(__do_get_xsec) | ||
diff --git a/arch/powerpc/kernel/vdso64/note.S b/arch/powerpc/kernel/vdso64/note.S new file mode 100644 index 000000000000..dc2a509f7e8a --- /dev/null +++ b/arch/powerpc/kernel/vdso64/note.S | |||
@@ -0,0 +1 @@ | |||
#include "../vdso32/note.S" | |||
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S new file mode 100644 index 000000000000..31b604ab56de --- /dev/null +++ b/arch/powerpc/kernel/vdso64/sigtramp.S | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * Signal trampoline for 64 bits processes in a ppc64 kernel for | ||
3 | * use in the vDSO | ||
4 | * | ||
5 | * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp. | ||
6 | * Copyright (C) 2004 Alan Modra (amodra@au.ibm.com)), IBM Corp. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | #include <linux/config.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/unistd.h> | ||
17 | #include <asm/vdso.h> | ||
18 | #include <asm/ptrace.h> /* XXX for __SIGNAL_FRAMESIZE */ | ||
19 | |||
20 | .text | ||
21 | |||
22 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from | ||
23 | the return address to get an address in the middle of the presumed | ||
24 | call instruction. Since we don't have a call here, we artifically | ||
25 | extend the range covered by the unwind info by padding before the | ||
26 | real start. */ | ||
27 | nop | ||
28 | .balign 8 | ||
29 | V_FUNCTION_BEGIN(__kernel_sigtramp_rt64) | ||
30 | .Lsigrt_start = . - 4 | ||
31 | addi r1, r1, __SIGNAL_FRAMESIZE | ||
32 | li r0,__NR_rt_sigreturn | ||
33 | sc | ||
34 | .Lsigrt_end: | ||
35 | V_FUNCTION_END(__kernel_sigtramp_rt64) | ||
36 | /* The ".balign 8" above and the following zeros mimic the old stack | ||
37 | trampoline layout. The last magic value is the ucontext pointer, | ||
38 | chosen in such a way that older libgcc unwind code returns a zero | ||
39 | for a sigcontext pointer. */ | ||
40 | .long 0,0,0 | ||
41 | .quad 0,-21*8 | ||
42 | |||
43 | /* Register r1 can be found at offset 8 of a pt_regs structure. | ||
44 | A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */ | ||
45 | #define cfa_save \ | ||
46 | .byte 0x0f; /* DW_CFA_def_cfa_expression */ \ | ||
47 | .uleb128 9f - 1f; /* length */ \ | ||
48 | 1: \ | ||
49 | .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ | ||
50 | .byte 0x06; /* DW_OP_deref */ \ | ||
51 | .byte 0x23; .uleb128 RSIZE; /* DW_OP_plus_uconst */ \ | ||
52 | .byte 0x06; /* DW_OP_deref */ \ | ||
53 | 9: | ||
54 | |||
55 | /* Register REGNO can be found at offset OFS of a pt_regs structure. | ||
56 | A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */ | ||
57 | #define rsave(regno, ofs) \ | ||
58 | .byte 0x10; /* DW_CFA_expression */ \ | ||
59 | .uleb128 regno; /* regno */ \ | ||
60 | .uleb128 9f - 1f; /* length */ \ | ||
61 | 1: \ | ||
62 | .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ | ||
63 | .byte 0x06; /* DW_OP_deref */ \ | ||
64 | .ifne ofs; \ | ||
65 | .byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \ | ||
66 | .endif; \ | ||
67 | 9: | ||
68 | |||
69 | /* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16 | ||
70 | of the VMX reg struct. A pointer to the VMX reg struct is at VREGS in | ||
71 | the pt_regs struct. This macro is for REGNO == 0, and contains | ||
72 | 'subroutines' that the other macros jump to. */ | ||
73 | #define vsave_msr0(regno) \ | ||
74 | .byte 0x10; /* DW_CFA_expression */ \ | ||
75 | .uleb128 regno + 77; /* regno */ \ | ||
76 | .uleb128 9f - 1f; /* length */ \ | ||
77 | 1: \ | ||
78 | .byte 0x30 + regno; /* DW_OP_lit0 */ \ | ||
79 | 2: \ | ||
80 | .byte 0x40; /* DW_OP_lit16 */ \ | ||
81 | .byte 0x1e; /* DW_OP_mul */ \ | ||
82 | 3: \ | ||
83 | .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ | ||
84 | .byte 0x06; /* DW_OP_deref */ \ | ||
85 | .byte 0x12; /* DW_OP_dup */ \ | ||
86 | .byte 0x23; /* DW_OP_plus_uconst */ \ | ||
87 | .uleb128 33*RSIZE; /* msr offset */ \ | ||
88 | .byte 0x06; /* DW_OP_deref */ \ | ||
89 | .byte 0x0c; .long 1 << 25; /* DW_OP_const4u */ \ | ||
90 | .byte 0x1a; /* DW_OP_and */ \ | ||
91 | .byte 0x12; /* DW_OP_dup, ret 0 if bra taken */ \ | ||
92 | .byte 0x30; /* DW_OP_lit0 */ \ | ||
93 | .byte 0x29; /* DW_OP_eq */ \ | ||
94 | .byte 0x28; .short 0x7fff; /* DW_OP_bra to end */ \ | ||
95 | .byte 0x13; /* DW_OP_drop, pop the 0 */ \ | ||
96 | .byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \ | ||
97 | .byte 0x06; /* DW_OP_deref */ \ | ||
98 | .byte 0x22; /* DW_OP_plus */ \ | ||
99 | .byte 0x2f; .short 0x7fff; /* DW_OP_skip to end */ \ | ||
100 | 9: | ||
101 | |||
102 | /* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16 | ||
103 | of the VMX reg struct. REGNO is 1 thru 31. */ | ||
104 | #define vsave_msr1(regno) \ | ||
105 | .byte 0x10; /* DW_CFA_expression */ \ | ||
106 | .uleb128 regno + 77; /* regno */ \ | ||
107 | .uleb128 9f - 1f; /* length */ \ | ||
108 | 1: \ | ||
109 | .byte 0x30 + regno; /* DW_OP_lit n */ \ | ||
110 | .byte 0x2f; .short 2b - 9f; /* DW_OP_skip */ \ | ||
111 | 9: | ||
112 | |||
113 | /* If msr bit 1<<25 is set, then VMX register REGNO is at offset OFS of | ||
114 | the VMX save block. */ | ||
115 | #define vsave_msr2(regno, ofs) \ | ||
116 | .byte 0x10; /* DW_CFA_expression */ \ | ||
117 | .uleb128 regno + 77; /* regno */ \ | ||
118 | .uleb128 9f - 1f; /* length */ \ | ||
119 | 1: \ | ||
120 | .byte 0x0a; .short ofs; /* DW_OP_const2u */ \ | ||
121 | .byte 0x2f; .short 3b - 9f; /* DW_OP_skip */ \ | ||
122 | 9: | ||
123 | |||
124 | /* VMX register REGNO is at offset OFS of the VMX save area. */ | ||
125 | #define vsave(regno, ofs) \ | ||
126 | .byte 0x10; /* DW_CFA_expression */ \ | ||
127 | .uleb128 regno + 77; /* regno */ \ | ||
128 | .uleb128 9f - 1f; /* length */ \ | ||
129 | 1: \ | ||
130 | .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ | ||
131 | .byte 0x06; /* DW_OP_deref */ \ | ||
132 | .byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \ | ||
133 | .byte 0x06; /* DW_OP_deref */ \ | ||
134 | .byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \ | ||
135 | 9: | ||
136 | |||
137 | /* This is where the pt_regs pointer can be found on the stack. */ | ||
138 | #define PTREGS 128+168+56 | ||
139 | |||
140 | /* Size of regs. */ | ||
141 | #define RSIZE 8 | ||
142 | |||
143 | /* This is the offset of the VMX reg pointer. */ | ||
144 | #define VREGS 48*RSIZE+33*8 | ||
145 | |||
146 | /* Describe where general purpose regs are saved. */ | ||
147 | #define EH_FRAME_GEN \ | ||
148 | cfa_save; \ | ||
149 | rsave ( 0, 0*RSIZE); \ | ||
150 | rsave ( 2, 2*RSIZE); \ | ||
151 | rsave ( 3, 3*RSIZE); \ | ||
152 | rsave ( 4, 4*RSIZE); \ | ||
153 | rsave ( 5, 5*RSIZE); \ | ||
154 | rsave ( 6, 6*RSIZE); \ | ||
155 | rsave ( 7, 7*RSIZE); \ | ||
156 | rsave ( 8, 8*RSIZE); \ | ||
157 | rsave ( 9, 9*RSIZE); \ | ||
158 | rsave (10, 10*RSIZE); \ | ||
159 | rsave (11, 11*RSIZE); \ | ||
160 | rsave (12, 12*RSIZE); \ | ||
161 | rsave (13, 13*RSIZE); \ | ||
162 | rsave (14, 14*RSIZE); \ | ||
163 | rsave (15, 15*RSIZE); \ | ||
164 | rsave (16, 16*RSIZE); \ | ||
165 | rsave (17, 17*RSIZE); \ | ||
166 | rsave (18, 18*RSIZE); \ | ||
167 | rsave (19, 19*RSIZE); \ | ||
168 | rsave (20, 20*RSIZE); \ | ||
169 | rsave (21, 21*RSIZE); \ | ||
170 | rsave (22, 22*RSIZE); \ | ||
171 | rsave (23, 23*RSIZE); \ | ||
172 | rsave (24, 24*RSIZE); \ | ||
173 | rsave (25, 25*RSIZE); \ | ||
174 | rsave (26, 26*RSIZE); \ | ||
175 | rsave (27, 27*RSIZE); \ | ||
176 | rsave (28, 28*RSIZE); \ | ||
177 | rsave (29, 29*RSIZE); \ | ||
178 | rsave (30, 30*RSIZE); \ | ||
179 | rsave (31, 31*RSIZE); \ | ||
180 | rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \ | ||
181 | rsave (65, 36*RSIZE); /* lr */ \ | ||
182 | rsave (70, 38*RSIZE) /* cr */ | ||
183 | |||
184 | /* Describe where the FP regs are saved. */ | ||
185 | #define EH_FRAME_FP \ | ||
186 | rsave (32, 48*RSIZE + 0*8); \ | ||
187 | rsave (33, 48*RSIZE + 1*8); \ | ||
188 | rsave (34, 48*RSIZE + 2*8); \ | ||
189 | rsave (35, 48*RSIZE + 3*8); \ | ||
190 | rsave (36, 48*RSIZE + 4*8); \ | ||
191 | rsave (37, 48*RSIZE + 5*8); \ | ||
192 | rsave (38, 48*RSIZE + 6*8); \ | ||
193 | rsave (39, 48*RSIZE + 7*8); \ | ||
194 | rsave (40, 48*RSIZE + 8*8); \ | ||
195 | rsave (41, 48*RSIZE + 9*8); \ | ||
196 | rsave (42, 48*RSIZE + 10*8); \ | ||
197 | rsave (43, 48*RSIZE + 11*8); \ | ||
198 | rsave (44, 48*RSIZE + 12*8); \ | ||
199 | rsave (45, 48*RSIZE + 13*8); \ | ||
200 | rsave (46, 48*RSIZE + 14*8); \ | ||
201 | rsave (47, 48*RSIZE + 15*8); \ | ||
202 | rsave (48, 48*RSIZE + 16*8); \ | ||
203 | rsave (49, 48*RSIZE + 17*8); \ | ||
204 | rsave (50, 48*RSIZE + 18*8); \ | ||
205 | rsave (51, 48*RSIZE + 19*8); \ | ||
206 | rsave (52, 48*RSIZE + 20*8); \ | ||
207 | rsave (53, 48*RSIZE + 21*8); \ | ||
208 | rsave (54, 48*RSIZE + 22*8); \ | ||
209 | rsave (55, 48*RSIZE + 23*8); \ | ||
210 | rsave (56, 48*RSIZE + 24*8); \ | ||
211 | rsave (57, 48*RSIZE + 25*8); \ | ||
212 | rsave (58, 48*RSIZE + 26*8); \ | ||
213 | rsave (59, 48*RSIZE + 27*8); \ | ||
214 | rsave (60, 48*RSIZE + 28*8); \ | ||
215 | rsave (61, 48*RSIZE + 29*8); \ | ||
216 | rsave (62, 48*RSIZE + 30*8); \ | ||
217 | rsave (63, 48*RSIZE + 31*8) | ||
218 | |||
219 | /* Describe where the VMX regs are saved. */ | ||
220 | #ifdef CONFIG_ALTIVEC | ||
221 | #define EH_FRAME_VMX \ | ||
222 | vsave_msr0 ( 0); \ | ||
223 | vsave_msr1 ( 1); \ | ||
224 | vsave_msr1 ( 2); \ | ||
225 | vsave_msr1 ( 3); \ | ||
226 | vsave_msr1 ( 4); \ | ||
227 | vsave_msr1 ( 5); \ | ||
228 | vsave_msr1 ( 6); \ | ||
229 | vsave_msr1 ( 7); \ | ||
230 | vsave_msr1 ( 8); \ | ||
231 | vsave_msr1 ( 9); \ | ||
232 | vsave_msr1 (10); \ | ||
233 | vsave_msr1 (11); \ | ||
234 | vsave_msr1 (12); \ | ||
235 | vsave_msr1 (13); \ | ||
236 | vsave_msr1 (14); \ | ||
237 | vsave_msr1 (15); \ | ||
238 | vsave_msr1 (16); \ | ||
239 | vsave_msr1 (17); \ | ||
240 | vsave_msr1 (18); \ | ||
241 | vsave_msr1 (19); \ | ||
242 | vsave_msr1 (20); \ | ||
243 | vsave_msr1 (21); \ | ||
244 | vsave_msr1 (22); \ | ||
245 | vsave_msr1 (23); \ | ||
246 | vsave_msr1 (24); \ | ||
247 | vsave_msr1 (25); \ | ||
248 | vsave_msr1 (26); \ | ||
249 | vsave_msr1 (27); \ | ||
250 | vsave_msr1 (28); \ | ||
251 | vsave_msr1 (29); \ | ||
252 | vsave_msr1 (30); \ | ||
253 | vsave_msr1 (31); \ | ||
254 | vsave_msr2 (33, 32*16+12); \ | ||
255 | vsave (32, 33*16) | ||
256 | #else | ||
257 | #define EH_FRAME_VMX | ||
258 | #endif | ||
259 | |||
260 | .section .eh_frame,"a",@progbits | ||
261 | .Lcie: | ||
262 | .long .Lcie_end - .Lcie_start | ||
263 | .Lcie_start: | ||
264 | .long 0 /* CIE ID */ | ||
265 | .byte 1 /* Version number */ | ||
266 | .string "zR" /* NUL-terminated augmentation string */ | ||
267 | .uleb128 4 /* Code alignment factor */ | ||
268 | .sleb128 -8 /* Data alignment factor */ | ||
269 | .byte 67 /* Return address register column, ap */ | ||
270 | .uleb128 1 /* Augmentation value length */ | ||
271 | .byte 0x14 /* DW_EH_PE_pcrel | DW_EH_PE_udata8. */ | ||
272 | .byte 0x0c,1,0 /* DW_CFA_def_cfa: r1 ofs 0 */ | ||
273 | .balign 8 | ||
274 | .Lcie_end: | ||
275 | |||
276 | .long .Lfde0_end - .Lfde0_start | ||
277 | .Lfde0_start: | ||
278 | .long .Lfde0_start - .Lcie /* CIE pointer. */ | ||
279 | .quad .Lsigrt_start - . /* PC start, length */ | ||
280 | .quad .Lsigrt_end - .Lsigrt_start | ||
281 | .uleb128 0 /* Augmentation */ | ||
282 | EH_FRAME_GEN | ||
283 | EH_FRAME_FP | ||
284 | EH_FRAME_VMX | ||
285 | # Do we really need to describe the frame at this point? ie. will | ||
286 | # we ever have some call chain that returns somewhere past the addi? | ||
287 | # I don't think so, since gcc doesn't support async signals. | ||
288 | # .byte 0x41 /* DW_CFA_advance_loc 1*4 */ | ||
289 | #undef PTREGS | ||
290 | #define PTREGS 168+56 | ||
291 | # EH_FRAME_GEN | ||
292 | # EH_FRAME_FP | ||
293 | # EH_FRAME_VMX | ||
294 | .balign 8 | ||
295 | .Lfde0_end: | ||
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S new file mode 100644 index 000000000000..4bdf224464ab --- /dev/null +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * This is the infamous ld script for the 64 bits vdso | ||
3 | * library | ||
4 | */ | ||
5 | #include <asm/vdso.h> | ||
6 | |||
7 | OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc") | ||
8 | OUTPUT_ARCH(powerpc:common64) | ||
9 | ENTRY(_start) | ||
10 | |||
11 | SECTIONS | ||
12 | { | ||
13 | . = VDSO64_LBASE + SIZEOF_HEADERS; | ||
14 | .hash : { *(.hash) } :text | ||
15 | .dynsym : { *(.dynsym) } | ||
16 | .dynstr : { *(.dynstr) } | ||
17 | .gnu.version : { *(.gnu.version) } | ||
18 | .gnu.version_d : { *(.gnu.version_d) } | ||
19 | .gnu.version_r : { *(.gnu.version_r) } | ||
20 | |||
21 | .note : { *(.note.*) } :text :note | ||
22 | |||
23 | . = ALIGN (16); | ||
24 | .text : | ||
25 | { | ||
26 | *(.text .stub .text.* .gnu.linkonce.t.*) | ||
27 | *(.sfpr .glink) | ||
28 | } :text | ||
29 | PROVIDE (__etext = .); | ||
30 | PROVIDE (_etext = .); | ||
31 | PROVIDE (etext = .); | ||
32 | |||
33 | /* Other stuff is appended to the text segment: */ | ||
34 | .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } | ||
35 | .rodata1 : { *(.rodata1) } | ||
36 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
37 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
38 | .gcc_except_table : { *(.gcc_except_table) } | ||
39 | |||
40 | .opd ALIGN(8) : { KEEP (*(.opd)) } | ||
41 | .got ALIGN(8) : { *(.got .toc) } | ||
42 | .rela.dyn ALIGN(8) : { *(.rela.dyn) } | ||
43 | |||
44 | .dynamic : { *(.dynamic) } :text :dynamic | ||
45 | |||
46 | _end = .; | ||
47 | PROVIDE (end = .); | ||
48 | |||
49 | /* Stabs debugging sections are here too | ||
50 | */ | ||
51 | .stab 0 : { *(.stab) } | ||
52 | .stabstr 0 : { *(.stabstr) } | ||
53 | .stab.excl 0 : { *(.stab.excl) } | ||
54 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
55 | .stab.index 0 : { *(.stab.index) } | ||
56 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
57 | .comment 0 : { *(.comment) } | ||
58 | /* DWARF debug sectio/ns. | ||
59 | Symbols in the DWARF debugging sections are relative to the beginning | ||
60 | of the section so we begin them at 0. */ | ||
61 | /* DWARF 1 */ | ||
62 | .debug 0 : { *(.debug) } | ||
63 | .line 0 : { *(.line) } | ||
64 | /* GNU DWARF 1 extensions */ | ||
65 | .debug_srcinfo 0 : { *(.debug_srcinfo) } | ||
66 | .debug_sfnames 0 : { *(.debug_sfnames) } | ||
67 | /* DWARF 1.1 and DWARF 2 */ | ||
68 | .debug_aranges 0 : { *(.debug_aranges) } | ||
69 | .debug_pubnames 0 : { *(.debug_pubnames) } | ||
70 | /* DWARF 2 */ | ||
71 | .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } | ||
72 | .debug_abbrev 0 : { *(.debug_abbrev) } | ||
73 | .debug_line 0 : { *(.debug_line) } | ||
74 | .debug_frame 0 : { *(.debug_frame) } | ||
75 | .debug_str 0 : { *(.debug_str) } | ||
76 | .debug_loc 0 : { *(.debug_loc) } | ||
77 | .debug_macinfo 0 : { *(.debug_macinfo) } | ||
78 | /* SGI/MIPS DWARF 2 extensions */ | ||
79 | .debug_weaknames 0 : { *(.debug_weaknames) } | ||
80 | .debug_funcnames 0 : { *(.debug_funcnames) } | ||
81 | .debug_typenames 0 : { *(.debug_typenames) } | ||
82 | .debug_varnames 0 : { *(.debug_varnames) } | ||
83 | |||
84 | /DISCARD/ : { *(.note.GNU-stack) } | ||
85 | /DISCARD/ : { *(.branch_lt) } | ||
86 | /DISCARD/ : { *(.data .data.* .gnu.linkonce.d.*) } | ||
87 | /DISCARD/ : { *(.bss .sbss .dynbss .dynsbss) } | ||
88 | } | ||
89 | |||
90 | PHDRS | ||
91 | { | ||
92 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | ||
93 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
94 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
95 | eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * This controls what symbols we export from the DSO. | ||
100 | */ | ||
101 | VERSION | ||
102 | { | ||
103 | VDSO_VERSION_STRING { | ||
104 | global: | ||
105 | __kernel_datapage_offset; /* Has to be there for the kernel to find */ | ||
106 | __kernel_get_syscall_map; | ||
107 | __kernel_gettimeofday; | ||
108 | __kernel_clock_gettime; | ||
109 | __kernel_clock_getres; | ||
110 | __kernel_get_tbfreq; | ||
111 | __kernel_sync_dicache; | ||
112 | __kernel_sync_dicache_p5; | ||
113 | __kernel_sigtramp_rt64; | ||
114 | local: *; | ||
115 | }; | ||
116 | } | ||
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S new file mode 100644 index 000000000000..0529cb9e3b97 --- /dev/null +++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <asm/page.h> | ||
3 | |||
4 | .section ".data.page_aligned" | ||
5 | |||
6 | .globl vdso64_start, vdso64_end | ||
7 | .balign PAGE_SIZE | ||
8 | vdso64_start: | ||
9 | .incbin "arch/powerpc/kernel/vdso64/vdso64.so" | ||
10 | .balign PAGE_SIZE | ||
11 | vdso64_end: | ||
12 | |||
13 | .previous | ||