diff options
author | Al Viro <viro@ftp.linux.org.uk> | 2011-08-18 15:03:19 -0400 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2011-11-02 09:14:51 -0400 |
commit | 6582b7f7743da6ce3e3714e9e8b18e0e073d4acd (patch) | |
tree | 649346d2333865ccd4bb4d560ceb50c39139c5c2 /arch/um/sys-x86 | |
parent | 858ba94499b4f48e9eb0be7cf0092f1ea9460fef (diff) |
um: merge arch/um/sys-{i386,x86_64}
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um/sys-x86')
41 files changed, 4297 insertions, 0 deletions
diff --git a/arch/um/sys-x86/Makefile b/arch/um/sys-x86/Makefile new file mode 100644 index 000000000000..671de0b45dd8 --- /dev/null +++ b/arch/um/sys-x86/Makefile | |||
@@ -0,0 +1,45 @@ | |||
1 | # | ||
2 | # Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | # | ||
4 | |||
5 | ifeq ($(CONFIG_X86_32),y) | ||
6 | BITS := 32 | ||
7 | else | ||
8 | BITS := 64 | ||
9 | endif | ||
10 | |||
11 | obj-y = bug.o bugs_$(BITS).o delay_$(BITS).o fault.o ksyms.o ldt.o \ | ||
12 | ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal_$(BITS).o \ | ||
13 | stub_$(BITS).o stub_segv_$(BITS).o syscalls_$(BITS).o \ | ||
14 | sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o mem_$(BITS).o | ||
15 | |||
16 | ifeq ($(CONFIG_X86_32),y) | ||
17 | |||
18 | obj-y += checksum_32.o | ||
19 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | ||
20 | |||
21 | subarch-obj-y = lib/string_32.o lib/atomic64_32.o lib/atomic64_cx8_32.o | ||
22 | subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o | ||
23 | subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o | ||
24 | subarch-obj-$(CONFIG_MODULES) += kernel/module.o | ||
25 | |||
26 | else | ||
27 | |||
28 | obj-y += vdso/ | ||
29 | |||
30 | subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \ | ||
31 | lib/rwsem.o | ||
32 | |||
33 | endif | ||
34 | |||
35 | subarch-obj-$(CONFIG_MODULES) += kernel/module.o | ||
36 | |||
37 | USER_OBJS := bugs_$(BITS).o ptrace_user.o fault.o | ||
38 | |||
39 | extra-y += user-offsets.s | ||
40 | $(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) | ||
41 | |||
42 | UNPROFILE_OBJS := stub_segv.o | ||
43 | CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING) | ||
44 | |||
45 | include arch/um/scripts/Makefile.rules | ||
diff --git a/arch/um/sys-x86/bug.c b/arch/um/sys-x86/bug.c new file mode 100644 index 000000000000..e8034e363d83 --- /dev/null +++ b/arch/um/sys-x86/bug.c | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL V2 | ||
4 | */ | ||
5 | |||
6 | #include <linux/uaccess.h> | ||
7 | |||
8 | /* | ||
9 | * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because | ||
10 | * that's not relevant in skas mode. | ||
11 | */ | ||
12 | |||
13 | int is_valid_bugaddr(unsigned long eip) | ||
14 | { | ||
15 | unsigned short ud2; | ||
16 | |||
17 | if (probe_kernel_address((unsigned short __user *)eip, ud2)) | ||
18 | return 0; | ||
19 | |||
20 | return ud2 == 0x0b0f; | ||
21 | } | ||
diff --git a/arch/um/sys-x86/bugs_32.c b/arch/um/sys-x86/bugs_32.c new file mode 100644 index 000000000000..7058e1fa903b --- /dev/null +++ b/arch/um/sys-x86/bugs_32.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include "kern_util.h" | ||
8 | #include "longjmp.h" | ||
9 | #include "task.h" | ||
10 | #include "sysdep/ptrace.h" | ||
11 | |||
12 | /* Set during early boot */ | ||
13 | static int host_has_cmov = 1; | ||
14 | static jmp_buf cmov_test_return; | ||
15 | |||
16 | static void cmov_sigill_test_handler(int sig) | ||
17 | { | ||
18 | host_has_cmov = 0; | ||
19 | longjmp(cmov_test_return, 1); | ||
20 | } | ||
21 | |||
22 | void arch_check_bugs(void) | ||
23 | { | ||
24 | struct sigaction old, new; | ||
25 | |||
26 | printk(UM_KERN_INFO "Checking for host processor cmov support..."); | ||
27 | new.sa_handler = cmov_sigill_test_handler; | ||
28 | |||
29 | /* Make sure that SIGILL is enabled after the handler longjmps back */ | ||
30 | new.sa_flags = SA_NODEFER; | ||
31 | sigemptyset(&new.sa_mask); | ||
32 | sigaction(SIGILL, &new, &old); | ||
33 | |||
34 | if (setjmp(cmov_test_return) == 0) { | ||
35 | unsigned long foo = 0; | ||
36 | __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo)); | ||
37 | printk(UM_KERN_CONT "Yes\n"); | ||
38 | } else | ||
39 | printk(UM_KERN_CONT "No\n"); | ||
40 | |||
41 | sigaction(SIGILL, &old, &new); | ||
42 | } | ||
43 | |||
44 | void arch_examine_signal(int sig, struct uml_pt_regs *regs) | ||
45 | { | ||
46 | unsigned char tmp[2]; | ||
47 | |||
48 | /* | ||
49 | * This is testing for a cmov (0x0f 0x4x) instruction causing a | ||
50 | * SIGILL in init. | ||
51 | */ | ||
52 | if ((sig != SIGILL) || (TASK_PID(get_current()) != 1)) | ||
53 | return; | ||
54 | |||
55 | if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) { | ||
56 | printk(UM_KERN_ERR "SIGILL in init, could not read " | ||
57 | "instructions!\n"); | ||
58 | return; | ||
59 | } | ||
60 | |||
61 | if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40)) | ||
62 | return; | ||
63 | |||
64 | if (host_has_cmov == 0) | ||
65 | printk(UM_KERN_ERR "SIGILL caused by cmov, which this " | ||
66 | "processor doesn't implement. Boot a filesystem " | ||
67 | "compiled for older processors"); | ||
68 | else if (host_has_cmov == 1) | ||
69 | printk(UM_KERN_ERR "SIGILL caused by cmov, which this " | ||
70 | "processor claims to implement"); | ||
71 | else | ||
72 | printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)", | ||
73 | host_has_cmov); | ||
74 | } | ||
diff --git a/arch/um/sys-x86/bugs_64.c b/arch/um/sys-x86/bugs_64.c new file mode 100644 index 000000000000..44e02ba2a265 --- /dev/null +++ b/arch/um/sys-x86/bugs_64.c | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #include "sysdep/ptrace.h" | ||
8 | |||
9 | void arch_check_bugs(void) | ||
10 | { | ||
11 | } | ||
12 | |||
13 | void arch_examine_signal(int sig, struct uml_pt_regs *regs) | ||
14 | { | ||
15 | } | ||
diff --git a/arch/um/sys-x86/checksum_32.S b/arch/um/sys-x86/checksum_32.S new file mode 100644 index 000000000000..f058d2f82e18 --- /dev/null +++ b/arch/um/sys-x86/checksum_32.S | |||
@@ -0,0 +1,458 @@ | |||
1 | /* | ||
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
3 | * operating system. INET is implemented using the BSD Socket | ||
4 | * interface as the means of communication with the user level. | ||
5 | * | ||
6 | * IP/TCP/UDP checksumming routines | ||
7 | * | ||
8 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | ||
9 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | ||
10 | * Tom May, <ftom@netcom.com> | ||
11 | * Pentium Pro/II routines: | ||
12 | * Alexander Kjeldaas <astor@guardian.no> | ||
13 | * Finn Arne Gangstad <finnag@guardian.no> | ||
14 | * Lots of code moved from tcp.c and ip.c; see those files | ||
15 | * for more names. | ||
16 | * | ||
17 | * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception | ||
18 | * handling. | ||
19 | * Andi Kleen, add zeroing on error | ||
20 | * converted to pure assembler | ||
21 | * | ||
22 | * This program is free software; you can redistribute it and/or | ||
23 | * modify it under the terms of the GNU General Public License | ||
24 | * as published by the Free Software Foundation; either version | ||
25 | * 2 of the License, or (at your option) any later version. | ||
26 | */ | ||
27 | |||
28 | #include <asm/errno.h> | ||
29 | |||
30 | /* | ||
31 | * computes a partial checksum, e.g. for TCP/UDP fragments | ||
32 | */ | ||
33 | |||
34 | /* | ||
35 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | ||
36 | */ | ||
37 | |||
38 | .text | ||
39 | .align 4 | ||
40 | .globl csum_partial | ||
41 | |||
42 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | ||
43 | |||
44 | /* | ||
45 | * Experiments with Ethernet and SLIP connections show that buff | ||
46 | * is aligned on either a 2-byte or 4-byte boundary. We get at | ||
47 | * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. | ||
48 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte | ||
49 | * alignment for the unrolled loop. | ||
50 | */ | ||
51 | csum_partial: | ||
52 | pushl %esi | ||
53 | pushl %ebx | ||
54 | movl 20(%esp),%eax # Function arg: unsigned int sum | ||
55 | movl 16(%esp),%ecx # Function arg: int len | ||
56 | movl 12(%esp),%esi # Function arg: unsigned char *buff | ||
57 | testl $2, %esi # Check alignment. | ||
58 | jz 2f # Jump if alignment is ok. | ||
59 | subl $2, %ecx # Alignment uses up two bytes. | ||
60 | jae 1f # Jump if we had at least two bytes. | ||
61 | addl $2, %ecx # ecx was < 2. Deal with it. | ||
62 | jmp 4f | ||
63 | 1: movw (%esi), %bx | ||
64 | addl $2, %esi | ||
65 | addw %bx, %ax | ||
66 | adcl $0, %eax | ||
67 | 2: | ||
68 | movl %ecx, %edx | ||
69 | shrl $5, %ecx | ||
70 | jz 2f | ||
71 | testl %esi, %esi | ||
72 | 1: movl (%esi), %ebx | ||
73 | adcl %ebx, %eax | ||
74 | movl 4(%esi), %ebx | ||
75 | adcl %ebx, %eax | ||
76 | movl 8(%esi), %ebx | ||
77 | adcl %ebx, %eax | ||
78 | movl 12(%esi), %ebx | ||
79 | adcl %ebx, %eax | ||
80 | movl 16(%esi), %ebx | ||
81 | adcl %ebx, %eax | ||
82 | movl 20(%esi), %ebx | ||
83 | adcl %ebx, %eax | ||
84 | movl 24(%esi), %ebx | ||
85 | adcl %ebx, %eax | ||
86 | movl 28(%esi), %ebx | ||
87 | adcl %ebx, %eax | ||
88 | lea 32(%esi), %esi | ||
89 | dec %ecx | ||
90 | jne 1b | ||
91 | adcl $0, %eax | ||
92 | 2: movl %edx, %ecx | ||
93 | andl $0x1c, %edx | ||
94 | je 4f | ||
95 | shrl $2, %edx # This clears CF | ||
96 | 3: adcl (%esi), %eax | ||
97 | lea 4(%esi), %esi | ||
98 | dec %edx | ||
99 | jne 3b | ||
100 | adcl $0, %eax | ||
101 | 4: andl $3, %ecx | ||
102 | jz 7f | ||
103 | cmpl $2, %ecx | ||
104 | jb 5f | ||
105 | movw (%esi),%cx | ||
106 | leal 2(%esi),%esi | ||
107 | je 6f | ||
108 | shll $16,%ecx | ||
109 | 5: movb (%esi),%cl | ||
110 | 6: addl %ecx,%eax | ||
111 | adcl $0, %eax | ||
112 | 7: | ||
113 | popl %ebx | ||
114 | popl %esi | ||
115 | ret | ||
116 | |||
117 | #else | ||
118 | |||
119 | /* Version for PentiumII/PPro */ | ||
120 | |||
121 | csum_partial: | ||
122 | pushl %esi | ||
123 | pushl %ebx | ||
124 | movl 20(%esp),%eax # Function arg: unsigned int sum | ||
125 | movl 16(%esp),%ecx # Function arg: int len | ||
126 | movl 12(%esp),%esi # Function arg: const unsigned char *buf | ||
127 | |||
128 | testl $2, %esi | ||
129 | jnz 30f | ||
130 | 10: | ||
131 | movl %ecx, %edx | ||
132 | movl %ecx, %ebx | ||
133 | andl $0x7c, %ebx | ||
134 | shrl $7, %ecx | ||
135 | addl %ebx,%esi | ||
136 | shrl $2, %ebx | ||
137 | negl %ebx | ||
138 | lea 45f(%ebx,%ebx,2), %ebx | ||
139 | testl %esi, %esi | ||
140 | jmp *%ebx | ||
141 | |||
142 | # Handle 2-byte-aligned regions | ||
143 | 20: addw (%esi), %ax | ||
144 | lea 2(%esi), %esi | ||
145 | adcl $0, %eax | ||
146 | jmp 10b | ||
147 | |||
148 | 30: subl $2, %ecx | ||
149 | ja 20b | ||
150 | je 32f | ||
151 | movzbl (%esi),%ebx # csumming 1 byte, 2-aligned | ||
152 | addl %ebx, %eax | ||
153 | adcl $0, %eax | ||
154 | jmp 80f | ||
155 | 32: | ||
156 | addw (%esi), %ax # csumming 2 bytes, 2-aligned | ||
157 | adcl $0, %eax | ||
158 | jmp 80f | ||
159 | |||
160 | 40: | ||
161 | addl -128(%esi), %eax | ||
162 | adcl -124(%esi), %eax | ||
163 | adcl -120(%esi), %eax | ||
164 | adcl -116(%esi), %eax | ||
165 | adcl -112(%esi), %eax | ||
166 | adcl -108(%esi), %eax | ||
167 | adcl -104(%esi), %eax | ||
168 | adcl -100(%esi), %eax | ||
169 | adcl -96(%esi), %eax | ||
170 | adcl -92(%esi), %eax | ||
171 | adcl -88(%esi), %eax | ||
172 | adcl -84(%esi), %eax | ||
173 | adcl -80(%esi), %eax | ||
174 | adcl -76(%esi), %eax | ||
175 | adcl -72(%esi), %eax | ||
176 | adcl -68(%esi), %eax | ||
177 | adcl -64(%esi), %eax | ||
178 | adcl -60(%esi), %eax | ||
179 | adcl -56(%esi), %eax | ||
180 | adcl -52(%esi), %eax | ||
181 | adcl -48(%esi), %eax | ||
182 | adcl -44(%esi), %eax | ||
183 | adcl -40(%esi), %eax | ||
184 | adcl -36(%esi), %eax | ||
185 | adcl -32(%esi), %eax | ||
186 | adcl -28(%esi), %eax | ||
187 | adcl -24(%esi), %eax | ||
188 | adcl -20(%esi), %eax | ||
189 | adcl -16(%esi), %eax | ||
190 | adcl -12(%esi), %eax | ||
191 | adcl -8(%esi), %eax | ||
192 | adcl -4(%esi), %eax | ||
193 | 45: | ||
194 | lea 128(%esi), %esi | ||
195 | adcl $0, %eax | ||
196 | dec %ecx | ||
197 | jge 40b | ||
198 | movl %edx, %ecx | ||
199 | 50: andl $3, %ecx | ||
200 | jz 80f | ||
201 | |||
202 | # Handle the last 1-3 bytes without jumping | ||
203 | notl %ecx # 1->2, 2->1, 3->0, higher bits are masked | ||
204 | movl $0xffffff,%ebx # by the shll and shrl instructions | ||
205 | shll $3,%ecx | ||
206 | shrl %cl,%ebx | ||
207 | andl -128(%esi),%ebx # esi is 4-aligned so should be ok | ||
208 | addl %ebx,%eax | ||
209 | adcl $0,%eax | ||
210 | 80: | ||
211 | popl %ebx | ||
212 | popl %esi | ||
213 | ret | ||
214 | |||
215 | #endif | ||
216 | |||
217 | /* | ||
218 | unsigned int csum_partial_copy_generic (const char *src, char *dst, | ||
219 | int len, int sum, int *src_err_ptr, int *dst_err_ptr) | ||
220 | */ | ||
221 | |||
222 | /* | ||
223 | * Copy from ds while checksumming, otherwise like csum_partial | ||
224 | * | ||
225 | * The macros SRC and DST specify the type of access for the instruction. | ||
226 | * thus we can call a custom exception handler for all access types. | ||
227 | * | ||
228 | * FIXME: could someone double-check whether I haven't mixed up some SRC and | ||
229 | * DST definitions? It's damn hard to trigger all cases. I hope I got | ||
230 | * them all but there's no guarantee. | ||
231 | */ | ||
232 | |||
233 | #define SRC(y...) \ | ||
234 | 9999: y; \ | ||
235 | .section __ex_table, "a"; \ | ||
236 | .long 9999b, 6001f ; \ | ||
237 | .previous | ||
238 | |||
239 | #define DST(y...) \ | ||
240 | 9999: y; \ | ||
241 | .section __ex_table, "a"; \ | ||
242 | .long 9999b, 6002f ; \ | ||
243 | .previous | ||
244 | |||
245 | .align 4 | ||
246 | |||
247 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | ||
248 | |||
249 | #define ARGBASE 16 | ||
250 | #define FP 12 | ||
251 | |||
252 | csum_partial_copy_generic_i386: | ||
253 | subl $4,%esp | ||
254 | pushl %edi | ||
255 | pushl %esi | ||
256 | pushl %ebx | ||
257 | movl ARGBASE+16(%esp),%eax # sum | ||
258 | movl ARGBASE+12(%esp),%ecx # len | ||
259 | movl ARGBASE+4(%esp),%esi # src | ||
260 | movl ARGBASE+8(%esp),%edi # dst | ||
261 | |||
262 | testl $2, %edi # Check alignment. | ||
263 | jz 2f # Jump if alignment is ok. | ||
264 | subl $2, %ecx # Alignment uses up two bytes. | ||
265 | jae 1f # Jump if we had at least two bytes. | ||
266 | addl $2, %ecx # ecx was < 2. Deal with it. | ||
267 | jmp 4f | ||
268 | SRC(1: movw (%esi), %bx ) | ||
269 | addl $2, %esi | ||
270 | DST( movw %bx, (%edi) ) | ||
271 | addl $2, %edi | ||
272 | addw %bx, %ax | ||
273 | adcl $0, %eax | ||
274 | 2: | ||
275 | movl %ecx, FP(%esp) | ||
276 | shrl $5, %ecx | ||
277 | jz 2f | ||
278 | testl %esi, %esi | ||
279 | SRC(1: movl (%esi), %ebx ) | ||
280 | SRC( movl 4(%esi), %edx ) | ||
281 | adcl %ebx, %eax | ||
282 | DST( movl %ebx, (%edi) ) | ||
283 | adcl %edx, %eax | ||
284 | DST( movl %edx, 4(%edi) ) | ||
285 | |||
286 | SRC( movl 8(%esi), %ebx ) | ||
287 | SRC( movl 12(%esi), %edx ) | ||
288 | adcl %ebx, %eax | ||
289 | DST( movl %ebx, 8(%edi) ) | ||
290 | adcl %edx, %eax | ||
291 | DST( movl %edx, 12(%edi) ) | ||
292 | |||
293 | SRC( movl 16(%esi), %ebx ) | ||
294 | SRC( movl 20(%esi), %edx ) | ||
295 | adcl %ebx, %eax | ||
296 | DST( movl %ebx, 16(%edi) ) | ||
297 | adcl %edx, %eax | ||
298 | DST( movl %edx, 20(%edi) ) | ||
299 | |||
300 | SRC( movl 24(%esi), %ebx ) | ||
301 | SRC( movl 28(%esi), %edx ) | ||
302 | adcl %ebx, %eax | ||
303 | DST( movl %ebx, 24(%edi) ) | ||
304 | adcl %edx, %eax | ||
305 | DST( movl %edx, 28(%edi) ) | ||
306 | |||
307 | lea 32(%esi), %esi | ||
308 | lea 32(%edi), %edi | ||
309 | dec %ecx | ||
310 | jne 1b | ||
311 | adcl $0, %eax | ||
312 | 2: movl FP(%esp), %edx | ||
313 | movl %edx, %ecx | ||
314 | andl $0x1c, %edx | ||
315 | je 4f | ||
316 | shrl $2, %edx # This clears CF | ||
317 | SRC(3: movl (%esi), %ebx ) | ||
318 | adcl %ebx, %eax | ||
319 | DST( movl %ebx, (%edi) ) | ||
320 | lea 4(%esi), %esi | ||
321 | lea 4(%edi), %edi | ||
322 | dec %edx | ||
323 | jne 3b | ||
324 | adcl $0, %eax | ||
325 | 4: andl $3, %ecx | ||
326 | jz 7f | ||
327 | cmpl $2, %ecx | ||
328 | jb 5f | ||
329 | SRC( movw (%esi), %cx ) | ||
330 | leal 2(%esi), %esi | ||
331 | DST( movw %cx, (%edi) ) | ||
332 | leal 2(%edi), %edi | ||
333 | je 6f | ||
334 | shll $16,%ecx | ||
335 | SRC(5: movb (%esi), %cl ) | ||
336 | DST( movb %cl, (%edi) ) | ||
337 | 6: addl %ecx, %eax | ||
338 | adcl $0, %eax | ||
339 | 7: | ||
340 | 5000: | ||
341 | |||
342 | # Exception handler: | ||
343 | .section .fixup, "ax" | ||
344 | |||
345 | 6001: | ||
346 | movl ARGBASE+20(%esp), %ebx # src_err_ptr | ||
347 | movl $-EFAULT, (%ebx) | ||
348 | |||
349 | # zero the complete destination - computing the rest | ||
350 | # is too much work | ||
351 | movl ARGBASE+8(%esp), %edi # dst | ||
352 | movl ARGBASE+12(%esp), %ecx # len | ||
353 | xorl %eax,%eax | ||
354 | rep ; stosb | ||
355 | |||
356 | jmp 5000b | ||
357 | |||
358 | 6002: | ||
359 | movl ARGBASE+24(%esp), %ebx # dst_err_ptr | ||
360 | movl $-EFAULT,(%ebx) | ||
361 | jmp 5000b | ||
362 | |||
363 | .previous | ||
364 | |||
365 | popl %ebx | ||
366 | popl %esi | ||
367 | popl %edi | ||
368 | popl %ecx # equivalent to addl $4,%esp | ||
369 | ret | ||
370 | |||
371 | #else | ||
372 | |||
373 | /* Version for PentiumII/PPro */ | ||
374 | |||
375 | #define ROUND1(x) \ | ||
376 | SRC(movl x(%esi), %ebx ) ; \ | ||
377 | addl %ebx, %eax ; \ | ||
378 | DST(movl %ebx, x(%edi) ) ; | ||
379 | |||
380 | #define ROUND(x) \ | ||
381 | SRC(movl x(%esi), %ebx ) ; \ | ||
382 | adcl %ebx, %eax ; \ | ||
383 | DST(movl %ebx, x(%edi) ) ; | ||
384 | |||
385 | #define ARGBASE 12 | ||
386 | |||
387 | csum_partial_copy_generic_i386: | ||
388 | pushl %ebx | ||
389 | pushl %edi | ||
390 | pushl %esi | ||
391 | movl ARGBASE+4(%esp),%esi #src | ||
392 | movl ARGBASE+8(%esp),%edi #dst | ||
393 | movl ARGBASE+12(%esp),%ecx #len | ||
394 | movl ARGBASE+16(%esp),%eax #sum | ||
395 | # movl %ecx, %edx | ||
396 | movl %ecx, %ebx | ||
397 | movl %esi, %edx | ||
398 | shrl $6, %ecx | ||
399 | andl $0x3c, %ebx | ||
400 | negl %ebx | ||
401 | subl %ebx, %esi | ||
402 | subl %ebx, %edi | ||
403 | lea -1(%esi),%edx | ||
404 | andl $-32,%edx | ||
405 | lea 3f(%ebx,%ebx), %ebx | ||
406 | testl %esi, %esi | ||
407 | jmp *%ebx | ||
408 | 1: addl $64,%esi | ||
409 | addl $64,%edi | ||
410 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) | ||
411 | ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) | ||
412 | ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) | ||
413 | ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) | ||
414 | ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) | ||
415 | 3: adcl $0,%eax | ||
416 | addl $64, %edx | ||
417 | dec %ecx | ||
418 | jge 1b | ||
419 | 4: movl ARGBASE+12(%esp),%edx #len | ||
420 | andl $3, %edx | ||
421 | jz 7f | ||
422 | cmpl $2, %edx | ||
423 | jb 5f | ||
424 | SRC( movw (%esi), %dx ) | ||
425 | leal 2(%esi), %esi | ||
426 | DST( movw %dx, (%edi) ) | ||
427 | leal 2(%edi), %edi | ||
428 | je 6f | ||
429 | shll $16,%edx | ||
430 | 5: | ||
431 | SRC( movb (%esi), %dl ) | ||
432 | DST( movb %dl, (%edi) ) | ||
433 | 6: addl %edx, %eax | ||
434 | adcl $0, %eax | ||
435 | 7: | ||
436 | .section .fixup, "ax" | ||
437 | 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr | ||
438 | movl $-EFAULT, (%ebx) | ||
439 | # zero the complete destination (computing the rest is too much work) | ||
440 | movl ARGBASE+8(%esp),%edi # dst | ||
441 | movl ARGBASE+12(%esp),%ecx # len | ||
442 | xorl %eax,%eax | ||
443 | rep; stosb | ||
444 | jmp 7b | ||
445 | 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr | ||
446 | movl $-EFAULT, (%ebx) | ||
447 | jmp 7b | ||
448 | .previous | ||
449 | |||
450 | popl %esi | ||
451 | popl %edi | ||
452 | popl %ebx | ||
453 | ret | ||
454 | |||
455 | #undef ROUND | ||
456 | #undef ROUND1 | ||
457 | |||
458 | #endif | ||
diff --git a/arch/um/sys-x86/delay_32.c b/arch/um/sys-x86/delay_32.c new file mode 100644 index 000000000000..f3fe1a688f7e --- /dev/null +++ b/arch/um/sys-x86/delay_32.c | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * Mostly copied from arch/x86/lib/delay.c | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <asm/param.h> | ||
14 | |||
15 | void __delay(unsigned long loops) | ||
16 | { | ||
17 | asm volatile( | ||
18 | "test %0,%0\n" | ||
19 | "jz 3f\n" | ||
20 | "jmp 1f\n" | ||
21 | |||
22 | ".align 16\n" | ||
23 | "1: jmp 2f\n" | ||
24 | |||
25 | ".align 16\n" | ||
26 | "2: dec %0\n" | ||
27 | " jnz 2b\n" | ||
28 | "3: dec %0\n" | ||
29 | |||
30 | : /* we don't need output */ | ||
31 | : "a" (loops) | ||
32 | ); | ||
33 | } | ||
34 | EXPORT_SYMBOL(__delay); | ||
35 | |||
36 | inline void __const_udelay(unsigned long xloops) | ||
37 | { | ||
38 | int d0; | ||
39 | |||
40 | xloops *= 4; | ||
41 | asm("mull %%edx" | ||
42 | : "=d" (xloops), "=&a" (d0) | ||
43 | : "1" (xloops), "0" | ||
44 | (loops_per_jiffy * (HZ/4))); | ||
45 | |||
46 | __delay(++xloops); | ||
47 | } | ||
48 | EXPORT_SYMBOL(__const_udelay); | ||
49 | |||
50 | void __udelay(unsigned long usecs) | ||
51 | { | ||
52 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ | ||
53 | } | ||
54 | EXPORT_SYMBOL(__udelay); | ||
55 | |||
56 | void __ndelay(unsigned long nsecs) | ||
57 | { | ||
58 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | ||
59 | } | ||
60 | EXPORT_SYMBOL(__ndelay); | ||
diff --git a/arch/um/sys-x86/delay_64.c b/arch/um/sys-x86/delay_64.c new file mode 100644 index 000000000000..f3fe1a688f7e --- /dev/null +++ b/arch/um/sys-x86/delay_64.c | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * Mostly copied from arch/x86/lib/delay.c | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <asm/param.h> | ||
14 | |||
15 | void __delay(unsigned long loops) | ||
16 | { | ||
17 | asm volatile( | ||
18 | "test %0,%0\n" | ||
19 | "jz 3f\n" | ||
20 | "jmp 1f\n" | ||
21 | |||
22 | ".align 16\n" | ||
23 | "1: jmp 2f\n" | ||
24 | |||
25 | ".align 16\n" | ||
26 | "2: dec %0\n" | ||
27 | " jnz 2b\n" | ||
28 | "3: dec %0\n" | ||
29 | |||
30 | : /* we don't need output */ | ||
31 | : "a" (loops) | ||
32 | ); | ||
33 | } | ||
34 | EXPORT_SYMBOL(__delay); | ||
35 | |||
36 | inline void __const_udelay(unsigned long xloops) | ||
37 | { | ||
38 | int d0; | ||
39 | |||
40 | xloops *= 4; | ||
41 | asm("mull %%edx" | ||
42 | : "=d" (xloops), "=&a" (d0) | ||
43 | : "1" (xloops), "0" | ||
44 | (loops_per_jiffy * (HZ/4))); | ||
45 | |||
46 | __delay(++xloops); | ||
47 | } | ||
48 | EXPORT_SYMBOL(__const_udelay); | ||
49 | |||
50 | void __udelay(unsigned long usecs) | ||
51 | { | ||
52 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ | ||
53 | } | ||
54 | EXPORT_SYMBOL(__udelay); | ||
55 | |||
56 | void __ndelay(unsigned long nsecs) | ||
57 | { | ||
58 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | ||
59 | } | ||
60 | EXPORT_SYMBOL(__ndelay); | ||
diff --git a/arch/um/sys-x86/elfcore.c b/arch/um/sys-x86/elfcore.c new file mode 100644 index 000000000000..6bb49b687c97 --- /dev/null +++ b/arch/um/sys-x86/elfcore.c | |||
@@ -0,0 +1,83 @@ | |||
1 | #include <linux/elf.h> | ||
2 | #include <linux/coredump.h> | ||
3 | #include <linux/fs.h> | ||
4 | #include <linux/mm.h> | ||
5 | |||
6 | #include <asm/elf.h> | ||
7 | |||
8 | |||
9 | Elf32_Half elf_core_extra_phdrs(void) | ||
10 | { | ||
11 | return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; | ||
12 | } | ||
13 | |||
14 | int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, | ||
15 | unsigned long limit) | ||
16 | { | ||
17 | if ( vsyscall_ehdr ) { | ||
18 | const struct elfhdr *const ehdrp = | ||
19 | (struct elfhdr *) vsyscall_ehdr; | ||
20 | const struct elf_phdr *const phdrp = | ||
21 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
22 | int i; | ||
23 | Elf32_Off ofs = 0; | ||
24 | |||
25 | for (i = 0; i < ehdrp->e_phnum; ++i) { | ||
26 | struct elf_phdr phdr = phdrp[i]; | ||
27 | |||
28 | if (phdr.p_type == PT_LOAD) { | ||
29 | ofs = phdr.p_offset = offset; | ||
30 | offset += phdr.p_filesz; | ||
31 | } else { | ||
32 | phdr.p_offset += ofs; | ||
33 | } | ||
34 | phdr.p_paddr = 0; /* match other core phdrs */ | ||
35 | *size += sizeof(phdr); | ||
36 | if (*size > limit | ||
37 | || !dump_write(file, &phdr, sizeof(phdr))) | ||
38 | return 0; | ||
39 | } | ||
40 | } | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | int elf_core_write_extra_data(struct file *file, size_t *size, | ||
45 | unsigned long limit) | ||
46 | { | ||
47 | if ( vsyscall_ehdr ) { | ||
48 | const struct elfhdr *const ehdrp = | ||
49 | (struct elfhdr *) vsyscall_ehdr; | ||
50 | const struct elf_phdr *const phdrp = | ||
51 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
52 | int i; | ||
53 | |||
54 | for (i = 0; i < ehdrp->e_phnum; ++i) { | ||
55 | if (phdrp[i].p_type == PT_LOAD) { | ||
56 | void *addr = (void *) phdrp[i].p_vaddr; | ||
57 | size_t filesz = phdrp[i].p_filesz; | ||
58 | |||
59 | *size += filesz; | ||
60 | if (*size > limit | ||
61 | || !dump_write(file, addr, filesz)) | ||
62 | return 0; | ||
63 | } | ||
64 | } | ||
65 | } | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | size_t elf_core_extra_data_size(void) | ||
70 | { | ||
71 | if ( vsyscall_ehdr ) { | ||
72 | const struct elfhdr *const ehdrp = | ||
73 | (struct elfhdr *)vsyscall_ehdr; | ||
74 | const struct elf_phdr *const phdrp = | ||
75 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
76 | int i; | ||
77 | |||
78 | for (i = 0; i < ehdrp->e_phnum; ++i) | ||
79 | if (phdrp[i].p_type == PT_LOAD) | ||
80 | return (size_t) phdrp[i].p_filesz; | ||
81 | } | ||
82 | return 0; | ||
83 | } | ||
diff --git a/arch/um/sys-x86/fault.c b/arch/um/sys-x86/fault.c new file mode 100644 index 000000000000..d670f68532f4 --- /dev/null +++ b/arch/um/sys-x86/fault.c | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "sysdep/ptrace.h" | ||
7 | |||
8 | /* These two are from asm-um/uaccess.h and linux/module.h, check them. */ | ||
9 | struct exception_table_entry | ||
10 | { | ||
11 | unsigned long insn; | ||
12 | unsigned long fixup; | ||
13 | }; | ||
14 | |||
15 | const struct exception_table_entry *search_exception_tables(unsigned long add); | ||
16 | |||
17 | /* Compare this to arch/i386/mm/extable.c:fixup_exception() */ | ||
18 | int arch_fixup(unsigned long address, struct uml_pt_regs *regs) | ||
19 | { | ||
20 | const struct exception_table_entry *fixup; | ||
21 | |||
22 | fixup = search_exception_tables(address); | ||
23 | if (fixup != 0) { | ||
24 | UPT_IP(regs) = fixup->fixup; | ||
25 | return 1; | ||
26 | } | ||
27 | return 0; | ||
28 | } | ||
diff --git a/arch/um/sys-x86/ksyms.c b/arch/um/sys-x86/ksyms.c new file mode 100644 index 000000000000..2e8f43ec6214 --- /dev/null +++ b/arch/um/sys-x86/ksyms.c | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <asm/string.h> | ||
3 | #include <asm/checksum.h> | ||
4 | |||
5 | #ifndef CONFIG_X86_32 | ||
6 | /*XXX: we need them because they would be exported by x86_64 */ | ||
7 | #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 | ||
8 | EXPORT_SYMBOL(memcpy); | ||
9 | #else | ||
10 | EXPORT_SYMBOL(__memcpy); | ||
11 | #endif | ||
12 | #endif | ||
13 | EXPORT_SYMBOL(csum_partial); | ||
diff --git a/arch/um/sys-x86/ldt.c b/arch/um/sys-x86/ldt.c new file mode 100644 index 000000000000..3f2bf208d884 --- /dev/null +++ b/arch/um/sys-x86/ldt.c | |||
@@ -0,0 +1,502 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <linux/mm.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <asm/unistd.h> | ||
10 | #include "os.h" | ||
11 | #include "proc_mm.h" | ||
12 | #include "skas.h" | ||
13 | #include "skas_ptrace.h" | ||
14 | #include "sysdep/tls.h" | ||
15 | |||
16 | extern int modify_ldt(int func, void *ptr, unsigned long bytecount); | ||
17 | |||
18 | static long write_ldt_entry(struct mm_id *mm_idp, int func, | ||
19 | struct user_desc *desc, void **addr, int done) | ||
20 | { | ||
21 | long res; | ||
22 | |||
23 | if (proc_mm) { | ||
24 | /* | ||
25 | * This is a special handling for the case, that the mm to | ||
26 | * modify isn't current->active_mm. | ||
27 | * If this is called directly by modify_ldt, | ||
28 | * (current->active_mm->context.skas.u == mm_idp) | ||
29 | * will be true. So no call to __switch_mm(mm_idp) is done. | ||
30 | * If this is called in case of init_new_ldt or PTRACE_LDT, | ||
31 | * mm_idp won't belong to current->active_mm, but child->mm. | ||
32 | * So we need to switch child's mm into our userspace, then | ||
33 | * later switch back. | ||
34 | * | ||
35 | * Note: I'm unsure: should interrupts be disabled here? | ||
36 | */ | ||
37 | if (!current->active_mm || current->active_mm == &init_mm || | ||
38 | mm_idp != ¤t->active_mm->context.id) | ||
39 | __switch_mm(mm_idp); | ||
40 | } | ||
41 | |||
42 | if (ptrace_ldt) { | ||
43 | struct ptrace_ldt ldt_op = (struct ptrace_ldt) { | ||
44 | .func = func, | ||
45 | .ptr = desc, | ||
46 | .bytecount = sizeof(*desc)}; | ||
47 | u32 cpu; | ||
48 | int pid; | ||
49 | |||
50 | if (!proc_mm) | ||
51 | pid = mm_idp->u.pid; | ||
52 | else { | ||
53 | cpu = get_cpu(); | ||
54 | pid = userspace_pid[cpu]; | ||
55 | } | ||
56 | |||
57 | res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op); | ||
58 | |||
59 | if (proc_mm) | ||
60 | put_cpu(); | ||
61 | } | ||
62 | else { | ||
63 | void *stub_addr; | ||
64 | res = syscall_stub_data(mm_idp, (unsigned long *)desc, | ||
65 | (sizeof(*desc) + sizeof(long) - 1) & | ||
66 | ~(sizeof(long) - 1), | ||
67 | addr, &stub_addr); | ||
68 | if (!res) { | ||
69 | unsigned long args[] = { func, | ||
70 | (unsigned long)stub_addr, | ||
71 | sizeof(*desc), | ||
72 | 0, 0, 0 }; | ||
73 | res = run_syscall_stub(mm_idp, __NR_modify_ldt, args, | ||
74 | 0, addr, done); | ||
75 | } | ||
76 | } | ||
77 | |||
78 | if (proc_mm) { | ||
79 | /* | ||
80 | * This is the second part of special handling, that makes | ||
81 | * PTRACE_LDT possible to implement. | ||
82 | */ | ||
83 | if (current->active_mm && current->active_mm != &init_mm && | ||
84 | mm_idp != ¤t->active_mm->context.id) | ||
85 | __switch_mm(¤t->active_mm->context.id); | ||
86 | } | ||
87 | |||
88 | return res; | ||
89 | } | ||
90 | |||
91 | static long read_ldt_from_host(void __user * ptr, unsigned long bytecount) | ||
92 | { | ||
93 | int res, n; | ||
94 | struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) { | ||
95 | .func = 0, | ||
96 | .bytecount = bytecount, | ||
97 | .ptr = kmalloc(bytecount, GFP_KERNEL)}; | ||
98 | u32 cpu; | ||
99 | |||
100 | if (ptrace_ldt.ptr == NULL) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | /* | ||
104 | * This is called from sys_modify_ldt only, so userspace_pid gives | ||
105 | * us the right number | ||
106 | */ | ||
107 | |||
108 | cpu = get_cpu(); | ||
109 | res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt); | ||
110 | put_cpu(); | ||
111 | if (res < 0) | ||
112 | goto out; | ||
113 | |||
114 | n = copy_to_user(ptr, ptrace_ldt.ptr, res); | ||
115 | if (n != 0) | ||
116 | res = -EFAULT; | ||
117 | |||
118 | out: | ||
119 | kfree(ptrace_ldt.ptr); | ||
120 | |||
121 | return res; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * In skas mode, we hold our own ldt data in UML. | ||
126 | * Thus, the code implementing sys_modify_ldt_skas | ||
127 | * is very similar to (and mostly stolen from) sys_modify_ldt | ||
128 | * for arch/i386/kernel/ldt.c | ||
129 | * The routines copied and modified in part are: | ||
130 | * - read_ldt | ||
131 | * - read_default_ldt | ||
132 | * - write_ldt | ||
133 | * - sys_modify_ldt_skas | ||
134 | */ | ||
135 | |||
136 | static int read_ldt(void __user * ptr, unsigned long bytecount) | ||
137 | { | ||
138 | int i, err = 0; | ||
139 | unsigned long size; | ||
140 | uml_ldt_t * ldt = ¤t->mm->context.ldt; | ||
141 | |||
142 | if (!ldt->entry_count) | ||
143 | goto out; | ||
144 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) | ||
145 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; | ||
146 | err = bytecount; | ||
147 | |||
148 | if (ptrace_ldt) | ||
149 | return read_ldt_from_host(ptr, bytecount); | ||
150 | |||
151 | mutex_lock(&ldt->lock); | ||
152 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) { | ||
153 | size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; | ||
154 | if (size > bytecount) | ||
155 | size = bytecount; | ||
156 | if (copy_to_user(ptr, ldt->u.entries, size)) | ||
157 | err = -EFAULT; | ||
158 | bytecount -= size; | ||
159 | ptr += size; | ||
160 | } | ||
161 | else { | ||
162 | for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; | ||
163 | i++) { | ||
164 | size = PAGE_SIZE; | ||
165 | if (size > bytecount) | ||
166 | size = bytecount; | ||
167 | if (copy_to_user(ptr, ldt->u.pages[i], size)) { | ||
168 | err = -EFAULT; | ||
169 | break; | ||
170 | } | ||
171 | bytecount -= size; | ||
172 | ptr += size; | ||
173 | } | ||
174 | } | ||
175 | mutex_unlock(&ldt->lock); | ||
176 | |||
177 | if (bytecount == 0 || err == -EFAULT) | ||
178 | goto out; | ||
179 | |||
180 | if (clear_user(ptr, bytecount)) | ||
181 | err = -EFAULT; | ||
182 | |||
183 | out: | ||
184 | return err; | ||
185 | } | ||
186 | |||
187 | static int read_default_ldt(void __user * ptr, unsigned long bytecount) | ||
188 | { | ||
189 | int err; | ||
190 | |||
191 | if (bytecount > 5*LDT_ENTRY_SIZE) | ||
192 | bytecount = 5*LDT_ENTRY_SIZE; | ||
193 | |||
194 | err = bytecount; | ||
195 | /* | ||
196 | * UML doesn't support lcall7 and lcall27. | ||
197 | * So, we don't really have a default ldt, but emulate | ||
198 | * an empty ldt of common host default ldt size. | ||
199 | */ | ||
200 | if (clear_user(ptr, bytecount)) | ||
201 | err = -EFAULT; | ||
202 | |||
203 | return err; | ||
204 | } | ||
205 | |||
206 | static int write_ldt(void __user * ptr, unsigned long bytecount, int func) | ||
207 | { | ||
208 | uml_ldt_t * ldt = ¤t->mm->context.ldt; | ||
209 | struct mm_id * mm_idp = ¤t->mm->context.id; | ||
210 | int i, err; | ||
211 | struct user_desc ldt_info; | ||
212 | struct ldt_entry entry0, *ldt_p; | ||
213 | void *addr = NULL; | ||
214 | |||
215 | err = -EINVAL; | ||
216 | if (bytecount != sizeof(ldt_info)) | ||
217 | goto out; | ||
218 | err = -EFAULT; | ||
219 | if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) | ||
220 | goto out; | ||
221 | |||
222 | err = -EINVAL; | ||
223 | if (ldt_info.entry_number >= LDT_ENTRIES) | ||
224 | goto out; | ||
225 | if (ldt_info.contents == 3) { | ||
226 | if (func == 1) | ||
227 | goto out; | ||
228 | if (ldt_info.seg_not_present == 0) | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | if (!ptrace_ldt) | ||
233 | mutex_lock(&ldt->lock); | ||
234 | |||
235 | err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); | ||
236 | if (err) | ||
237 | goto out_unlock; | ||
238 | else if (ptrace_ldt) { | ||
239 | /* With PTRACE_LDT available, this is used as a flag only */ | ||
240 | ldt->entry_count = 1; | ||
241 | goto out; | ||
242 | } | ||
243 | |||
244 | if (ldt_info.entry_number >= ldt->entry_count && | ||
245 | ldt_info.entry_number >= LDT_DIRECT_ENTRIES) { | ||
246 | for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; | ||
247 | i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; | ||
248 | i++) { | ||
249 | if (i == 0) | ||
250 | memcpy(&entry0, ldt->u.entries, | ||
251 | sizeof(entry0)); | ||
252 | ldt->u.pages[i] = (struct ldt_entry *) | ||
253 | __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
254 | if (!ldt->u.pages[i]) { | ||
255 | err = -ENOMEM; | ||
256 | /* Undo the change in host */ | ||
257 | memset(&ldt_info, 0, sizeof(ldt_info)); | ||
258 | write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); | ||
259 | goto out_unlock; | ||
260 | } | ||
261 | if (i == 0) { | ||
262 | memcpy(ldt->u.pages[0], &entry0, | ||
263 | sizeof(entry0)); | ||
264 | memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, | ||
265 | sizeof(entry0)*(LDT_DIRECT_ENTRIES-1)); | ||
266 | } | ||
267 | ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; | ||
268 | } | ||
269 | } | ||
270 | if (ldt->entry_count <= ldt_info.entry_number) | ||
271 | ldt->entry_count = ldt_info.entry_number + 1; | ||
272 | |||
273 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) | ||
274 | ldt_p = ldt->u.entries + ldt_info.entry_number; | ||
275 | else | ||
276 | ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + | ||
277 | ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; | ||
278 | |||
279 | if (ldt_info.base_addr == 0 && ldt_info.limit == 0 && | ||
280 | (func == 1 || LDT_empty(&ldt_info))) { | ||
281 | ldt_p->a = 0; | ||
282 | ldt_p->b = 0; | ||
283 | } | ||
284 | else{ | ||
285 | if (func == 1) | ||
286 | ldt_info.useable = 0; | ||
287 | ldt_p->a = LDT_entry_a(&ldt_info); | ||
288 | ldt_p->b = LDT_entry_b(&ldt_info); | ||
289 | } | ||
290 | err = 0; | ||
291 | |||
292 | out_unlock: | ||
293 | mutex_unlock(&ldt->lock); | ||
294 | out: | ||
295 | return err; | ||
296 | } | ||
297 | |||
298 | static long do_modify_ldt_skas(int func, void __user *ptr, | ||
299 | unsigned long bytecount) | ||
300 | { | ||
301 | int ret = -ENOSYS; | ||
302 | |||
303 | switch (func) { | ||
304 | case 0: | ||
305 | ret = read_ldt(ptr, bytecount); | ||
306 | break; | ||
307 | case 1: | ||
308 | case 0x11: | ||
309 | ret = write_ldt(ptr, bytecount, func); | ||
310 | break; | ||
311 | case 2: | ||
312 | ret = read_default_ldt(ptr, bytecount); | ||
313 | break; | ||
314 | } | ||
315 | return ret; | ||
316 | } | ||
317 | |||
318 | static DEFINE_SPINLOCK(host_ldt_lock); | ||
319 | static short dummy_list[9] = {0, -1}; | ||
320 | static short * host_ldt_entries = NULL; | ||
321 | |||
322 | static void ldt_get_host_info(void) | ||
323 | { | ||
324 | long ret; | ||
325 | struct ldt_entry * ldt; | ||
326 | short *tmp; | ||
327 | int i, size, k, order; | ||
328 | |||
329 | spin_lock(&host_ldt_lock); | ||
330 | |||
331 | if (host_ldt_entries != NULL) { | ||
332 | spin_unlock(&host_ldt_lock); | ||
333 | return; | ||
334 | } | ||
335 | host_ldt_entries = dummy_list+1; | ||
336 | |||
337 | spin_unlock(&host_ldt_lock); | ||
338 | |||
339 | for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++) | ||
340 | ; | ||
341 | |||
342 | ldt = (struct ldt_entry *) | ||
343 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); | ||
344 | if (ldt == NULL) { | ||
345 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer " | ||
346 | "for host ldt\n"); | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); | ||
351 | if (ret < 0) { | ||
352 | printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n"); | ||
353 | goto out_free; | ||
354 | } | ||
355 | if (ret == 0) { | ||
356 | /* default_ldt is active, simply write an empty entry 0 */ | ||
357 | host_ldt_entries = dummy_list; | ||
358 | goto out_free; | ||
359 | } | ||
360 | |||
361 | for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) { | ||
362 | if (ldt[i].a != 0 || ldt[i].b != 0) | ||
363 | size++; | ||
364 | } | ||
365 | |||
366 | if (size < ARRAY_SIZE(dummy_list)) | ||
367 | host_ldt_entries = dummy_list; | ||
368 | else { | ||
369 | size = (size + 1) * sizeof(dummy_list[0]); | ||
370 | tmp = kmalloc(size, GFP_KERNEL); | ||
371 | if (tmp == NULL) { | ||
372 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate " | ||
373 | "host ldt list\n"); | ||
374 | goto out_free; | ||
375 | } | ||
376 | host_ldt_entries = tmp; | ||
377 | } | ||
378 | |||
379 | for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) { | ||
380 | if (ldt[i].a != 0 || ldt[i].b != 0) | ||
381 | host_ldt_entries[k++] = i; | ||
382 | } | ||
383 | host_ldt_entries[k] = -1; | ||
384 | |||
385 | out_free: | ||
386 | free_pages((unsigned long)ldt, order); | ||
387 | } | ||
388 | |||
389 | long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm) | ||
390 | { | ||
391 | struct user_desc desc; | ||
392 | short * num_p; | ||
393 | int i; | ||
394 | long page, err=0; | ||
395 | void *addr = NULL; | ||
396 | struct proc_mm_op copy; | ||
397 | |||
398 | |||
399 | if (!ptrace_ldt) | ||
400 | mutex_init(&new_mm->ldt.lock); | ||
401 | |||
402 | if (!from_mm) { | ||
403 | memset(&desc, 0, sizeof(desc)); | ||
404 | /* | ||
405 | * We have to initialize a clean ldt. | ||
406 | */ | ||
407 | if (proc_mm) { | ||
408 | /* | ||
409 | * If the new mm was created using proc_mm, host's | ||
410 | * default-ldt currently is assigned, which normally | ||
411 | * contains the call-gates for lcall7 and lcall27. | ||
412 | * To remove these gates, we simply write an empty | ||
413 | * entry as number 0 to the host. | ||
414 | */ | ||
415 | err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1); | ||
416 | } | ||
417 | else{ | ||
418 | /* | ||
419 | * Now we try to retrieve info about the ldt, we | ||
420 | * inherited from the host. All ldt-entries found | ||
421 | * will be reset in the following loop | ||
422 | */ | ||
423 | ldt_get_host_info(); | ||
424 | for (num_p=host_ldt_entries; *num_p != -1; num_p++) { | ||
425 | desc.entry_number = *num_p; | ||
426 | err = write_ldt_entry(&new_mm->id, 1, &desc, | ||
427 | &addr, *(num_p + 1) == -1); | ||
428 | if (err) | ||
429 | break; | ||
430 | } | ||
431 | } | ||
432 | new_mm->ldt.entry_count = 0; | ||
433 | |||
434 | goto out; | ||
435 | } | ||
436 | |||
437 | if (proc_mm) { | ||
438 | /* | ||
439 | * We have a valid from_mm, so we now have to copy the LDT of | ||
440 | * from_mm to new_mm, because using proc_mm an new mm with | ||
441 | * an empty/default LDT was created in new_mm() | ||
442 | */ | ||
443 | copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS, | ||
444 | .u = | ||
445 | { .copy_segments = | ||
446 | from_mm->id.u.mm_fd } } ); | ||
447 | i = os_write_file(new_mm->id.u.mm_fd, ©, sizeof(copy)); | ||
448 | if (i != sizeof(copy)) | ||
449 | printk(KERN_ERR "new_mm : /proc/mm copy_segments " | ||
450 | "failed, err = %d\n", -i); | ||
451 | } | ||
452 | |||
453 | if (!ptrace_ldt) { | ||
454 | /* | ||
455 | * Our local LDT is used to supply the data for | ||
456 | * modify_ldt(READLDT), if PTRACE_LDT isn't available, | ||
457 | * i.e., we have to use the stub for modify_ldt, which | ||
458 | * can't handle the big read buffer of up to 64kB. | ||
459 | */ | ||
460 | mutex_lock(&from_mm->ldt.lock); | ||
461 | if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES) | ||
462 | memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries, | ||
463 | sizeof(new_mm->ldt.u.entries)); | ||
464 | else { | ||
465 | i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; | ||
466 | while (i-->0) { | ||
467 | page = __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
468 | if (!page) { | ||
469 | err = -ENOMEM; | ||
470 | break; | ||
471 | } | ||
472 | new_mm->ldt.u.pages[i] = | ||
473 | (struct ldt_entry *) page; | ||
474 | memcpy(new_mm->ldt.u.pages[i], | ||
475 | from_mm->ldt.u.pages[i], PAGE_SIZE); | ||
476 | } | ||
477 | } | ||
478 | new_mm->ldt.entry_count = from_mm->ldt.entry_count; | ||
479 | mutex_unlock(&from_mm->ldt.lock); | ||
480 | } | ||
481 | |||
482 | out: | ||
483 | return err; | ||
484 | } | ||
485 | |||
486 | |||
487 | void free_ldt(struct mm_context *mm) | ||
488 | { | ||
489 | int i; | ||
490 | |||
491 | if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) { | ||
492 | i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; | ||
493 | while (i-- > 0) | ||
494 | free_page((long) mm->ldt.u.pages[i]); | ||
495 | } | ||
496 | mm->ldt.entry_count = 0; | ||
497 | } | ||
498 | |||
499 | int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) | ||
500 | { | ||
501 | return do_modify_ldt_skas(func, ptr, bytecount); | ||
502 | } | ||
diff --git a/arch/um/sys-x86/mem_32.c b/arch/um/sys-x86/mem_32.c new file mode 100644 index 000000000000..639900a6fde9 --- /dev/null +++ b/arch/um/sys-x86/mem_32.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <asm/page.h> | ||
11 | #include <asm/mman.h> | ||
12 | |||
13 | static struct vm_area_struct gate_vma; | ||
14 | |||
15 | static int __init gate_vma_init(void) | ||
16 | { | ||
17 | if (!FIXADDR_USER_START) | ||
18 | return 0; | ||
19 | |||
20 | gate_vma.vm_mm = NULL; | ||
21 | gate_vma.vm_start = FIXADDR_USER_START; | ||
22 | gate_vma.vm_end = FIXADDR_USER_END; | ||
23 | gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; | ||
24 | gate_vma.vm_page_prot = __P101; | ||
25 | |||
26 | /* | ||
27 | * Make sure the vDSO gets into every core dump. | ||
28 | * Dumping its contents makes post-mortem fully interpretable later | ||
29 | * without matching up the same kernel and hardware config to see | ||
30 | * what PC values meant. | ||
31 | */ | ||
32 | gate_vma.vm_flags |= VM_ALWAYSDUMP; | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | __initcall(gate_vma_init); | ||
37 | |||
38 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | ||
39 | { | ||
40 | return FIXADDR_USER_START ? &gate_vma : NULL; | ||
41 | } | ||
42 | |||
43 | int in_gate_area_no_mm(unsigned long addr) | ||
44 | { | ||
45 | if (!FIXADDR_USER_START) | ||
46 | return 0; | ||
47 | |||
48 | if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) | ||
49 | return 1; | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | ||
55 | { | ||
56 | struct vm_area_struct *vma = get_gate_vma(mm); | ||
57 | |||
58 | if (!vma) | ||
59 | return 0; | ||
60 | |||
61 | return (addr >= vma->vm_start) && (addr < vma->vm_end); | ||
62 | } | ||
diff --git a/arch/um/sys-x86/mem_64.c b/arch/um/sys-x86/mem_64.c new file mode 100644 index 000000000000..546518727a73 --- /dev/null +++ b/arch/um/sys-x86/mem_64.c | |||
@@ -0,0 +1,26 @@ | |||
1 | #include "linux/mm.h" | ||
2 | #include "asm/page.h" | ||
3 | #include "asm/mman.h" | ||
4 | |||
5 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
6 | { | ||
7 | if (vma->vm_mm && vma->vm_start == um_vdso_addr) | ||
8 | return "[vdso]"; | ||
9 | |||
10 | return NULL; | ||
11 | } | ||
12 | |||
13 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | ||
14 | { | ||
15 | return NULL; | ||
16 | } | ||
17 | |||
18 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | ||
19 | { | ||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | int in_gate_area_no_mm(unsigned long addr) | ||
24 | { | ||
25 | return 0; | ||
26 | } | ||
diff --git a/arch/um/sys-x86/ptrace_32.c b/arch/um/sys-x86/ptrace_32.c new file mode 100644 index 000000000000..a174fde2531c --- /dev/null +++ b/arch/um/sys-x86/ptrace_32.c | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/mm.h" | ||
7 | #include "linux/sched.h" | ||
8 | #include "asm/uaccess.h" | ||
9 | #include "skas.h" | ||
10 | |||
11 | extern int arch_switch_tls(struct task_struct *to); | ||
12 | |||
13 | void arch_switch_to(struct task_struct *to) | ||
14 | { | ||
15 | int err = arch_switch_tls(to); | ||
16 | if (!err) | ||
17 | return; | ||
18 | |||
19 | if (err != -EINVAL) | ||
20 | printk(KERN_WARNING "arch_switch_tls failed, errno %d, " | ||
21 | "not EINVAL\n", -err); | ||
22 | else | ||
23 | printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n"); | ||
24 | } | ||
25 | |||
26 | int is_syscall(unsigned long addr) | ||
27 | { | ||
28 | unsigned short instr; | ||
29 | int n; | ||
30 | |||
31 | n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); | ||
32 | if (n) { | ||
33 | /* access_process_vm() grants access to vsyscall and stub, | ||
34 | * while copy_from_user doesn't. Maybe access_process_vm is | ||
35 | * slow, but that doesn't matter, since it will be called only | ||
36 | * in case of singlestepping, if copy_from_user failed. | ||
37 | */ | ||
38 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); | ||
39 | if (n != sizeof(instr)) { | ||
40 | printk(KERN_ERR "is_syscall : failed to read " | ||
41 | "instruction from 0x%lx\n", addr); | ||
42 | return 1; | ||
43 | } | ||
44 | } | ||
45 | /* int 0x80 or sysenter */ | ||
46 | return (instr == 0x80cd) || (instr == 0x340f); | ||
47 | } | ||
48 | |||
49 | /* determines which flags the user has access to. */ | ||
50 | /* 1 = access 0 = no access */ | ||
51 | #define FLAG_MASK 0x00044dd5 | ||
52 | |||
53 | static const int reg_offsets[] = { | ||
54 | [EBX] = HOST_EBX, | ||
55 | [ECX] = HOST_ECX, | ||
56 | [EDX] = HOST_EDX, | ||
57 | [ESI] = HOST_ESI, | ||
58 | [EDI] = HOST_EDI, | ||
59 | [EBP] = HOST_EBP, | ||
60 | [EAX] = HOST_EAX, | ||
61 | [DS] = HOST_DS, | ||
62 | [ES] = HOST_ES, | ||
63 | [FS] = HOST_FS, | ||
64 | [GS] = HOST_GS, | ||
65 | [EIP] = HOST_IP, | ||
66 | [CS] = HOST_CS, | ||
67 | [EFL] = HOST_EFLAGS, | ||
68 | [UESP] = HOST_SP, | ||
69 | [SS] = HOST_SS, | ||
70 | }; | ||
71 | |||
72 | int putreg(struct task_struct *child, int regno, unsigned long value) | ||
73 | { | ||
74 | regno >>= 2; | ||
75 | switch (regno) { | ||
76 | case EBX: | ||
77 | case ECX: | ||
78 | case EDX: | ||
79 | case ESI: | ||
80 | case EDI: | ||
81 | case EBP: | ||
82 | case EAX: | ||
83 | case EIP: | ||
84 | case UESP: | ||
85 | break; | ||
86 | case FS: | ||
87 | if (value && (value & 3) != 3) | ||
88 | return -EIO; | ||
89 | break; | ||
90 | case GS: | ||
91 | if (value && (value & 3) != 3) | ||
92 | return -EIO; | ||
93 | break; | ||
94 | case DS: | ||
95 | case ES: | ||
96 | if (value && (value & 3) != 3) | ||
97 | return -EIO; | ||
98 | value &= 0xffff; | ||
99 | break; | ||
100 | case SS: | ||
101 | case CS: | ||
102 | if ((value & 3) != 3) | ||
103 | return -EIO; | ||
104 | value &= 0xffff; | ||
105 | break; | ||
106 | case EFL: | ||
107 | value &= FLAG_MASK; | ||
108 | child->thread.regs.regs.gp[HOST_EFLAGS] |= value; | ||
109 | return 0; | ||
110 | case ORIG_EAX: | ||
111 | child->thread.regs.regs.syscall = value; | ||
112 | return 0; | ||
113 | default : | ||
114 | panic("Bad register in putreg() : %d\n", regno); | ||
115 | } | ||
116 | child->thread.regs.regs.gp[reg_offsets[regno]] = value; | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | int poke_user(struct task_struct *child, long addr, long data) | ||
121 | { | ||
122 | if ((addr & 3) || addr < 0) | ||
123 | return -EIO; | ||
124 | |||
125 | if (addr < MAX_REG_OFFSET) | ||
126 | return putreg(child, addr, data); | ||
127 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
128 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
129 | addr -= offsetof(struct user, u_debugreg[0]); | ||
130 | addr = addr >> 2; | ||
131 | if ((addr == 4) || (addr == 5)) | ||
132 | return -EIO; | ||
133 | child->thread.arch.debugregs[addr] = data; | ||
134 | return 0; | ||
135 | } | ||
136 | return -EIO; | ||
137 | } | ||
138 | |||
139 | unsigned long getreg(struct task_struct *child, int regno) | ||
140 | { | ||
141 | unsigned long mask = ~0UL; | ||
142 | |||
143 | regno >>= 2; | ||
144 | switch (regno) { | ||
145 | case ORIG_EAX: | ||
146 | return child->thread.regs.regs.syscall; | ||
147 | case FS: | ||
148 | case GS: | ||
149 | case DS: | ||
150 | case ES: | ||
151 | case SS: | ||
152 | case CS: | ||
153 | mask = 0xffff; | ||
154 | break; | ||
155 | case EIP: | ||
156 | case UESP: | ||
157 | case EAX: | ||
158 | case EBX: | ||
159 | case ECX: | ||
160 | case EDX: | ||
161 | case ESI: | ||
162 | case EDI: | ||
163 | case EBP: | ||
164 | case EFL: | ||
165 | break; | ||
166 | default: | ||
167 | panic("Bad register in getreg() : %d\n", regno); | ||
168 | } | ||
169 | return mask & child->thread.regs.regs.gp[reg_offsets[regno]]; | ||
170 | } | ||
171 | |||
172 | /* read the word at location addr in the USER area. */ | ||
173 | int peek_user(struct task_struct *child, long addr, long data) | ||
174 | { | ||
175 | unsigned long tmp; | ||
176 | |||
177 | if ((addr & 3) || addr < 0) | ||
178 | return -EIO; | ||
179 | |||
180 | tmp = 0; /* Default return condition */ | ||
181 | if (addr < MAX_REG_OFFSET) { | ||
182 | tmp = getreg(child, addr); | ||
183 | } | ||
184 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
185 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
186 | addr -= offsetof(struct user, u_debugreg[0]); | ||
187 | addr = addr >> 2; | ||
188 | tmp = child->thread.arch.debugregs[addr]; | ||
189 | } | ||
190 | return put_user(tmp, (unsigned long __user *) data); | ||
191 | } | ||
192 | |||
193 | static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
194 | { | ||
195 | int err, n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
196 | struct user_i387_struct fpregs; | ||
197 | |||
198 | err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs); | ||
199 | if (err) | ||
200 | return err; | ||
201 | |||
202 | n = copy_to_user(buf, &fpregs, sizeof(fpregs)); | ||
203 | if(n > 0) | ||
204 | return -EFAULT; | ||
205 | |||
206 | return n; | ||
207 | } | ||
208 | |||
209 | static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
210 | { | ||
211 | int n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
212 | struct user_i387_struct fpregs; | ||
213 | |||
214 | n = copy_from_user(&fpregs, buf, sizeof(fpregs)); | ||
215 | if (n > 0) | ||
216 | return -EFAULT; | ||
217 | |||
218 | return restore_fp_registers(userspace_pid[cpu], | ||
219 | (unsigned long *) &fpregs); | ||
220 | } | ||
221 | |||
222 | static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | ||
223 | { | ||
224 | int err, n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
225 | struct user_fxsr_struct fpregs; | ||
226 | |||
227 | err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs); | ||
228 | if (err) | ||
229 | return err; | ||
230 | |||
231 | n = copy_to_user(buf, &fpregs, sizeof(fpregs)); | ||
232 | if(n > 0) | ||
233 | return -EFAULT; | ||
234 | |||
235 | return n; | ||
236 | } | ||
237 | |||
238 | static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | ||
239 | { | ||
240 | int n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
241 | struct user_fxsr_struct fpregs; | ||
242 | |||
243 | n = copy_from_user(&fpregs, buf, sizeof(fpregs)); | ||
244 | if (n > 0) | ||
245 | return -EFAULT; | ||
246 | |||
247 | return restore_fpx_registers(userspace_pid[cpu], | ||
248 | (unsigned long *) &fpregs); | ||
249 | } | ||
250 | |||
251 | long subarch_ptrace(struct task_struct *child, long request, | ||
252 | unsigned long addr, unsigned long data) | ||
253 | { | ||
254 | int ret = -EIO; | ||
255 | void __user *datap = (void __user *) data; | ||
256 | switch (request) { | ||
257 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
258 | ret = get_fpregs(datap, child); | ||
259 | break; | ||
260 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
261 | ret = set_fpregs(datap, child); | ||
262 | break; | ||
263 | case PTRACE_GETFPXREGS: /* Get the child FPU state. */ | ||
264 | ret = get_fpxregs(datap, child); | ||
265 | break; | ||
266 | case PTRACE_SETFPXREGS: /* Set the child FPU state. */ | ||
267 | ret = set_fpxregs(datap, child); | ||
268 | break; | ||
269 | default: | ||
270 | ret = -EIO; | ||
271 | } | ||
272 | return ret; | ||
273 | } | ||
diff --git a/arch/um/sys-x86/ptrace_64.c b/arch/um/sys-x86/ptrace_64.c new file mode 100644 index 000000000000..44e68e0c0d10 --- /dev/null +++ b/arch/um/sys-x86/ptrace_64.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
4 | * | ||
5 | * Licensed under the GPL | ||
6 | */ | ||
7 | |||
8 | #include <linux/mm.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/errno.h> | ||
11 | #define __FRAME_OFFSETS | ||
12 | #include <asm/ptrace.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | /* | ||
16 | * determines which flags the user has access to. | ||
17 | * 1 = access 0 = no access | ||
18 | */ | ||
19 | #define FLAG_MASK 0x44dd5UL | ||
20 | |||
21 | static const int reg_offsets[] = | ||
22 | { | ||
23 | [R8 >> 3] = HOST_R8, | ||
24 | [R9 >> 3] = HOST_R9, | ||
25 | [R10 >> 3] = HOST_R10, | ||
26 | [R11 >> 3] = HOST_R11, | ||
27 | [R12 >> 3] = HOST_R12, | ||
28 | [R13 >> 3] = HOST_R13, | ||
29 | [R14 >> 3] = HOST_R14, | ||
30 | [R15 >> 3] = HOST_R15, | ||
31 | [RIP >> 3] = HOST_IP, | ||
32 | [RSP >> 3] = HOST_SP, | ||
33 | [RAX >> 3] = HOST_RAX, | ||
34 | [RBX >> 3] = HOST_RBX, | ||
35 | [RCX >> 3] = HOST_RCX, | ||
36 | [RDX >> 3] = HOST_RDX, | ||
37 | [RSI >> 3] = HOST_RSI, | ||
38 | [RDI >> 3] = HOST_RDI, | ||
39 | [RBP >> 3] = HOST_RBP, | ||
40 | [CS >> 3] = HOST_CS, | ||
41 | [SS >> 3] = HOST_SS, | ||
42 | [FS_BASE >> 3] = HOST_FS_BASE, | ||
43 | [GS_BASE >> 3] = HOST_GS_BASE, | ||
44 | [DS >> 3] = HOST_DS, | ||
45 | [ES >> 3] = HOST_ES, | ||
46 | [FS >> 3] = HOST_FS, | ||
47 | [GS >> 3] = HOST_GS, | ||
48 | [EFLAGS >> 3] = HOST_EFLAGS, | ||
49 | [ORIG_RAX >> 3] = HOST_ORIG_RAX, | ||
50 | }; | ||
51 | |||
52 | int putreg(struct task_struct *child, int regno, unsigned long value) | ||
53 | { | ||
54 | #ifdef TIF_IA32 | ||
55 | /* | ||
56 | * Some code in the 64bit emulation may not be 64bit clean. | ||
57 | * Don't take any chances. | ||
58 | */ | ||
59 | if (test_tsk_thread_flag(child, TIF_IA32)) | ||
60 | value &= 0xffffffff; | ||
61 | #endif | ||
62 | switch (regno) { | ||
63 | case R8: | ||
64 | case R9: | ||
65 | case R10: | ||
66 | case R11: | ||
67 | case R12: | ||
68 | case R13: | ||
69 | case R14: | ||
70 | case R15: | ||
71 | case RIP: | ||
72 | case RSP: | ||
73 | case RAX: | ||
74 | case RBX: | ||
75 | case RCX: | ||
76 | case RDX: | ||
77 | case RSI: | ||
78 | case RDI: | ||
79 | case RBP: | ||
80 | case ORIG_RAX: | ||
81 | break; | ||
82 | |||
83 | case FS: | ||
84 | case GS: | ||
85 | case DS: | ||
86 | case ES: | ||
87 | case SS: | ||
88 | case CS: | ||
89 | if (value && (value & 3) != 3) | ||
90 | return -EIO; | ||
91 | value &= 0xffff; | ||
92 | break; | ||
93 | |||
94 | case FS_BASE: | ||
95 | case GS_BASE: | ||
96 | if (!((value >> 48) == 0 || (value >> 48) == 0xffff)) | ||
97 | return -EIO; | ||
98 | break; | ||
99 | |||
100 | case EFLAGS: | ||
101 | value &= FLAG_MASK; | ||
102 | child->thread.regs.regs.gp[HOST_EFLAGS] |= value; | ||
103 | return 0; | ||
104 | |||
105 | default: | ||
106 | panic("Bad register in putreg(): %d\n", regno); | ||
107 | } | ||
108 | |||
109 | child->thread.regs.regs.gp[reg_offsets[regno >> 3]] = value; | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | int poke_user(struct task_struct *child, long addr, long data) | ||
114 | { | ||
115 | if ((addr & 3) || addr < 0) | ||
116 | return -EIO; | ||
117 | |||
118 | if (addr < MAX_REG_OFFSET) | ||
119 | return putreg(child, addr, data); | ||
120 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
121 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
122 | addr -= offsetof(struct user, u_debugreg[0]); | ||
123 | addr = addr >> 2; | ||
124 | if ((addr == 4) || (addr == 5)) | ||
125 | return -EIO; | ||
126 | child->thread.arch.debugregs[addr] = data; | ||
127 | return 0; | ||
128 | } | ||
129 | return -EIO; | ||
130 | } | ||
131 | |||
132 | unsigned long getreg(struct task_struct *child, int regno) | ||
133 | { | ||
134 | unsigned long mask = ~0UL; | ||
135 | #ifdef TIF_IA32 | ||
136 | if (test_tsk_thread_flag(child, TIF_IA32)) | ||
137 | mask = 0xffffffff; | ||
138 | #endif | ||
139 | switch (regno) { | ||
140 | case R8: | ||
141 | case R9: | ||
142 | case R10: | ||
143 | case R11: | ||
144 | case R12: | ||
145 | case R13: | ||
146 | case R14: | ||
147 | case R15: | ||
148 | case RIP: | ||
149 | case RSP: | ||
150 | case RAX: | ||
151 | case RBX: | ||
152 | case RCX: | ||
153 | case RDX: | ||
154 | case RSI: | ||
155 | case RDI: | ||
156 | case RBP: | ||
157 | case ORIG_RAX: | ||
158 | case EFLAGS: | ||
159 | case FS_BASE: | ||
160 | case GS_BASE: | ||
161 | break; | ||
162 | case FS: | ||
163 | case GS: | ||
164 | case DS: | ||
165 | case ES: | ||
166 | case SS: | ||
167 | case CS: | ||
168 | mask = 0xffff; | ||
169 | break; | ||
170 | default: | ||
171 | panic("Bad register in getreg: %d\n", regno); | ||
172 | } | ||
173 | return mask & child->thread.regs.regs.gp[reg_offsets[regno >> 3]]; | ||
174 | } | ||
175 | |||
176 | int peek_user(struct task_struct *child, long addr, long data) | ||
177 | { | ||
178 | /* read the word at location addr in the USER area. */ | ||
179 | unsigned long tmp; | ||
180 | |||
181 | if ((addr & 3) || addr < 0) | ||
182 | return -EIO; | ||
183 | |||
184 | tmp = 0; /* Default return condition */ | ||
185 | if (addr < MAX_REG_OFFSET) | ||
186 | tmp = getreg(child, addr); | ||
187 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
188 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
189 | addr -= offsetof(struct user, u_debugreg[0]); | ||
190 | addr = addr >> 2; | ||
191 | tmp = child->thread.arch.debugregs[addr]; | ||
192 | } | ||
193 | return put_user(tmp, (unsigned long *) data); | ||
194 | } | ||
195 | |||
196 | /* XXX Mostly copied from sys-i386 */ | ||
197 | int is_syscall(unsigned long addr) | ||
198 | { | ||
199 | unsigned short instr; | ||
200 | int n; | ||
201 | |||
202 | n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); | ||
203 | if (n) { | ||
204 | /* | ||
205 | * access_process_vm() grants access to vsyscall and stub, | ||
206 | * while copy_from_user doesn't. Maybe access_process_vm is | ||
207 | * slow, but that doesn't matter, since it will be called only | ||
208 | * in case of singlestepping, if copy_from_user failed. | ||
209 | */ | ||
210 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); | ||
211 | if (n != sizeof(instr)) { | ||
212 | printk("is_syscall : failed to read instruction from " | ||
213 | "0x%lx\n", addr); | ||
214 | return 1; | ||
215 | } | ||
216 | } | ||
217 | /* sysenter */ | ||
218 | return instr == 0x050f; | ||
219 | } | ||
220 | |||
221 | static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
222 | { | ||
223 | int err, n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
224 | long fpregs[HOST_FP_SIZE]; | ||
225 | |||
226 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | ||
227 | err = save_fp_registers(userspace_pid[cpu], fpregs); | ||
228 | if (err) | ||
229 | return err; | ||
230 | |||
231 | n = copy_to_user(buf, fpregs, sizeof(fpregs)); | ||
232 | if (n > 0) | ||
233 | return -EFAULT; | ||
234 | |||
235 | return n; | ||
236 | } | ||
237 | |||
238 | static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
239 | { | ||
240 | int n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
241 | long fpregs[HOST_FP_SIZE]; | ||
242 | |||
243 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | ||
244 | n = copy_from_user(fpregs, buf, sizeof(fpregs)); | ||
245 | if (n > 0) | ||
246 | return -EFAULT; | ||
247 | |||
248 | return restore_fp_registers(userspace_pid[cpu], fpregs); | ||
249 | } | ||
250 | |||
251 | long subarch_ptrace(struct task_struct *child, long request, | ||
252 | unsigned long addr, unsigned long data) | ||
253 | { | ||
254 | int ret = -EIO; | ||
255 | void __user *datap = (void __user *) data; | ||
256 | |||
257 | switch (request) { | ||
258 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
259 | ret = get_fpregs(datap, child); | ||
260 | break; | ||
261 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
262 | ret = set_fpregs(datap, child); | ||
263 | break; | ||
264 | case PTRACE_ARCH_PRCTL: | ||
265 | /* XXX Calls ptrace on the host - needs some SMP thinking */ | ||
266 | ret = arch_prctl(child, data, (void __user *) addr); | ||
267 | break; | ||
268 | } | ||
269 | |||
270 | return ret; | ||
271 | } | ||
diff --git a/arch/um/sys-x86/ptrace_user.c b/arch/um/sys-x86/ptrace_user.c new file mode 100644 index 000000000000..3960ca1dd35a --- /dev/null +++ b/arch/um/sys-x86/ptrace_user.c | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <errno.h> | ||
7 | #include "ptrace_user.h" | ||
8 | |||
9 | int ptrace_getregs(long pid, unsigned long *regs_out) | ||
10 | { | ||
11 | if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0) | ||
12 | return -errno; | ||
13 | return 0; | ||
14 | } | ||
15 | |||
16 | int ptrace_setregs(long pid, unsigned long *regs) | ||
17 | { | ||
18 | if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0) | ||
19 | return -errno; | ||
20 | return 0; | ||
21 | } | ||
diff --git a/arch/um/sys-x86/setjmp_32.S b/arch/um/sys-x86/setjmp_32.S new file mode 100644 index 000000000000..b766792c9933 --- /dev/null +++ b/arch/um/sys-x86/setjmp_32.S | |||
@@ -0,0 +1,58 @@ | |||
1 | # | ||
2 | # arch/i386/setjmp.S | ||
3 | # | ||
4 | # setjmp/longjmp for the i386 architecture | ||
5 | # | ||
6 | |||
7 | # | ||
8 | # The jmp_buf is assumed to contain the following, in order: | ||
9 | # %ebx | ||
10 | # %esp | ||
11 | # %ebp | ||
12 | # %esi | ||
13 | # %edi | ||
14 | # <return address> | ||
15 | # | ||
16 | |||
17 | .text | ||
18 | .align 4 | ||
19 | .globl setjmp | ||
20 | .type setjmp, @function | ||
21 | setjmp: | ||
22 | #ifdef _REGPARM | ||
23 | movl %eax,%edx | ||
24 | #else | ||
25 | movl 4(%esp),%edx | ||
26 | #endif | ||
27 | popl %ecx # Return address, and adjust the stack | ||
28 | xorl %eax,%eax # Return value | ||
29 | movl %ebx,(%edx) | ||
30 | movl %esp,4(%edx) # Post-return %esp! | ||
31 | pushl %ecx # Make the call/return stack happy | ||
32 | movl %ebp,8(%edx) | ||
33 | movl %esi,12(%edx) | ||
34 | movl %edi,16(%edx) | ||
35 | movl %ecx,20(%edx) # Return address | ||
36 | ret | ||
37 | |||
38 | .size setjmp,.-setjmp | ||
39 | |||
40 | .text | ||
41 | .align 4 | ||
42 | .globl longjmp | ||
43 | .type longjmp, @function | ||
44 | longjmp: | ||
45 | #ifdef _REGPARM | ||
46 | xchgl %eax,%edx | ||
47 | #else | ||
48 | movl 4(%esp),%edx # jmp_ptr address | ||
49 | movl 8(%esp),%eax # Return value | ||
50 | #endif | ||
51 | movl (%edx),%ebx | ||
52 | movl 4(%edx),%esp | ||
53 | movl 8(%edx),%ebp | ||
54 | movl 12(%edx),%esi | ||
55 | movl 16(%edx),%edi | ||
56 | jmp *20(%edx) | ||
57 | |||
58 | .size longjmp,.-longjmp | ||
diff --git a/arch/um/sys-x86/setjmp_64.S b/arch/um/sys-x86/setjmp_64.S new file mode 100644 index 000000000000..45f547b4043e --- /dev/null +++ b/arch/um/sys-x86/setjmp_64.S | |||
@@ -0,0 +1,54 @@ | |||
1 | # | ||
2 | # arch/x86_64/setjmp.S | ||
3 | # | ||
4 | # setjmp/longjmp for the x86-64 architecture | ||
5 | # | ||
6 | |||
7 | # | ||
8 | # The jmp_buf is assumed to contain the following, in order: | ||
9 | # %rbx | ||
10 | # %rsp (post-return) | ||
11 | # %rbp | ||
12 | # %r12 | ||
13 | # %r13 | ||
14 | # %r14 | ||
15 | # %r15 | ||
16 | # <return address> | ||
17 | # | ||
18 | |||
19 | .text | ||
20 | .align 4 | ||
21 | .globl setjmp | ||
22 | .type setjmp, @function | ||
23 | setjmp: | ||
24 | pop %rsi # Return address, and adjust the stack | ||
25 | xorl %eax,%eax # Return value | ||
26 | movq %rbx,(%rdi) | ||
27 | movq %rsp,8(%rdi) # Post-return %rsp! | ||
28 | push %rsi # Make the call/return stack happy | ||
29 | movq %rbp,16(%rdi) | ||
30 | movq %r12,24(%rdi) | ||
31 | movq %r13,32(%rdi) | ||
32 | movq %r14,40(%rdi) | ||
33 | movq %r15,48(%rdi) | ||
34 | movq %rsi,56(%rdi) # Return address | ||
35 | ret | ||
36 | |||
37 | .size setjmp,.-setjmp | ||
38 | |||
39 | .text | ||
40 | .align 4 | ||
41 | .globl longjmp | ||
42 | .type longjmp, @function | ||
43 | longjmp: | ||
44 | movl %esi,%eax # Return value (int) | ||
45 | movq (%rdi),%rbx | ||
46 | movq 8(%rdi),%rsp | ||
47 | movq 16(%rdi),%rbp | ||
48 | movq 24(%rdi),%r12 | ||
49 | movq 32(%rdi),%r13 | ||
50 | movq 40(%rdi),%r14 | ||
51 | movq 48(%rdi),%r15 | ||
52 | jmp *56(%rdi) | ||
53 | |||
54 | .size longjmp,.-longjmp | ||
diff --git a/arch/um/sys-x86/signal_32.c b/arch/um/sys-x86/signal_32.c new file mode 100644 index 000000000000..bcbfb0d64813 --- /dev/null +++ b/arch/um/sys-x86/signal_32.c | |||
@@ -0,0 +1,498 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <linux/ptrace.h> | ||
7 | #include <asm/unistd.h> | ||
8 | #include <asm/uaccess.h> | ||
9 | #include <asm/ucontext.h> | ||
10 | #include "frame_kern.h" | ||
11 | #include "skas.h" | ||
12 | |||
13 | /* | ||
14 | * FPU tag word conversions. | ||
15 | */ | ||
16 | |||
17 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) | ||
18 | { | ||
19 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | ||
20 | |||
21 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ | ||
22 | tmp = ~twd; | ||
23 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ | ||
24 | /* and move the valid bits to the lower byte. */ | ||
25 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | ||
26 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | ||
27 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | ||
28 | return tmp; | ||
29 | } | ||
30 | |||
31 | static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave) | ||
32 | { | ||
33 | struct _fpxreg *st = NULL; | ||
34 | unsigned long twd = (unsigned long) fxsave->twd; | ||
35 | unsigned long tag; | ||
36 | unsigned long ret = 0xffff0000; | ||
37 | int i; | ||
38 | |||
39 | #define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16) | ||
40 | |||
41 | for (i = 0; i < 8; i++) { | ||
42 | if (twd & 0x1) { | ||
43 | st = (struct _fpxreg *) FPREG_ADDR(fxsave, i); | ||
44 | |||
45 | switch (st->exponent & 0x7fff) { | ||
46 | case 0x7fff: | ||
47 | tag = 2; /* Special */ | ||
48 | break; | ||
49 | case 0x0000: | ||
50 | if ( !st->significand[0] && | ||
51 | !st->significand[1] && | ||
52 | !st->significand[2] && | ||
53 | !st->significand[3] ) { | ||
54 | tag = 1; /* Zero */ | ||
55 | } else { | ||
56 | tag = 2; /* Special */ | ||
57 | } | ||
58 | break; | ||
59 | default: | ||
60 | if (st->significand[3] & 0x8000) { | ||
61 | tag = 0; /* Valid */ | ||
62 | } else { | ||
63 | tag = 2; /* Special */ | ||
64 | } | ||
65 | break; | ||
66 | } | ||
67 | } else { | ||
68 | tag = 3; /* Empty */ | ||
69 | } | ||
70 | ret |= (tag << (2 * i)); | ||
71 | twd = twd >> 1; | ||
72 | } | ||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | static int convert_fxsr_to_user(struct _fpstate __user *buf, | ||
77 | struct user_fxsr_struct *fxsave) | ||
78 | { | ||
79 | unsigned long env[7]; | ||
80 | struct _fpreg __user *to; | ||
81 | struct _fpxreg *from; | ||
82 | int i; | ||
83 | |||
84 | env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul; | ||
85 | env[1] = (unsigned long)fxsave->swd | 0xffff0000ul; | ||
86 | env[2] = twd_fxsr_to_i387(fxsave); | ||
87 | env[3] = fxsave->fip; | ||
88 | env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16); | ||
89 | env[5] = fxsave->foo; | ||
90 | env[6] = fxsave->fos; | ||
91 | |||
92 | if (__copy_to_user(buf, env, 7 * sizeof(unsigned long))) | ||
93 | return 1; | ||
94 | |||
95 | to = &buf->_st[0]; | ||
96 | from = (struct _fpxreg *) &fxsave->st_space[0]; | ||
97 | for (i = 0; i < 8; i++, to++, from++) { | ||
98 | unsigned long __user *t = (unsigned long __user *)to; | ||
99 | unsigned long *f = (unsigned long *)from; | ||
100 | |||
101 | if (__put_user(*f, t) || | ||
102 | __put_user(*(f + 1), t + 1) || | ||
103 | __put_user(from->exponent, &to->exponent)) | ||
104 | return 1; | ||
105 | } | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave, | ||
110 | struct _fpstate __user *buf) | ||
111 | { | ||
112 | unsigned long env[7]; | ||
113 | struct _fpxreg *to; | ||
114 | struct _fpreg __user *from; | ||
115 | int i; | ||
116 | |||
117 | if (copy_from_user( env, buf, 7 * sizeof(long))) | ||
118 | return 1; | ||
119 | |||
120 | fxsave->cwd = (unsigned short)(env[0] & 0xffff); | ||
121 | fxsave->swd = (unsigned short)(env[1] & 0xffff); | ||
122 | fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); | ||
123 | fxsave->fip = env[3]; | ||
124 | fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16); | ||
125 | fxsave->fcs = (env[4] & 0xffff); | ||
126 | fxsave->foo = env[5]; | ||
127 | fxsave->fos = env[6]; | ||
128 | |||
129 | to = (struct _fpxreg *) &fxsave->st_space[0]; | ||
130 | from = &buf->_st[0]; | ||
131 | for (i = 0; i < 8; i++, to++, from++) { | ||
132 | unsigned long *t = (unsigned long *)to; | ||
133 | unsigned long __user *f = (unsigned long __user *)from; | ||
134 | |||
135 | if (__get_user(*t, f) || | ||
136 | __get_user(*(t + 1), f + 1) || | ||
137 | __get_user(to->exponent, &from->exponent)) | ||
138 | return 1; | ||
139 | } | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | extern int have_fpx_regs; | ||
144 | |||
145 | static int copy_sc_from_user(struct pt_regs *regs, | ||
146 | struct sigcontext __user *from) | ||
147 | { | ||
148 | struct sigcontext sc; | ||
149 | int err, pid; | ||
150 | |||
151 | err = copy_from_user(&sc, from, sizeof(sc)); | ||
152 | if (err) | ||
153 | return err; | ||
154 | |||
155 | pid = userspace_pid[current_thread_info()->cpu]; | ||
156 | |||
157 | #define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname | ||
158 | |||
159 | GETREG(GS, gs); | ||
160 | GETREG(FS, fs); | ||
161 | GETREG(ES, es); | ||
162 | GETREG(DS, ds); | ||
163 | GETREG(EDI, di); | ||
164 | GETREG(ESI, si); | ||
165 | GETREG(EBP, bp); | ||
166 | GETREG(SP, sp); | ||
167 | GETREG(EBX, bx); | ||
168 | GETREG(EDX, dx); | ||
169 | GETREG(ECX, cx); | ||
170 | GETREG(EAX, ax); | ||
171 | GETREG(IP, ip); | ||
172 | GETREG(CS, cs); | ||
173 | GETREG(EFLAGS, flags); | ||
174 | GETREG(SS, ss); | ||
175 | |||
176 | #undef GETREG | ||
177 | if (have_fpx_regs) { | ||
178 | struct user_fxsr_struct fpx; | ||
179 | |||
180 | err = copy_from_user(&fpx, | ||
181 | &((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0], | ||
182 | sizeof(struct user_fxsr_struct)); | ||
183 | if (err) | ||
184 | return 1; | ||
185 | |||
186 | err = convert_fxsr_from_user(&fpx, sc.fpstate); | ||
187 | if (err) | ||
188 | return 1; | ||
189 | |||
190 | err = restore_fpx_registers(pid, (unsigned long *) &fpx); | ||
191 | if (err < 0) { | ||
192 | printk(KERN_ERR "copy_sc_from_user - " | ||
193 | "restore_fpx_registers failed, errno = %d\n", | ||
194 | -err); | ||
195 | return 1; | ||
196 | } | ||
197 | } else { | ||
198 | struct user_i387_struct fp; | ||
199 | |||
200 | err = copy_from_user(&fp, sc.fpstate, | ||
201 | sizeof(struct user_i387_struct)); | ||
202 | if (err) | ||
203 | return 1; | ||
204 | |||
205 | err = restore_fp_registers(pid, (unsigned long *) &fp); | ||
206 | if (err < 0) { | ||
207 | printk(KERN_ERR "copy_sc_from_user - " | ||
208 | "restore_fp_registers failed, errno = %d\n", | ||
209 | -err); | ||
210 | return 1; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int copy_sc_to_user(struct sigcontext __user *to, | ||
218 | struct _fpstate __user *to_fp, struct pt_regs *regs, | ||
219 | unsigned long sp) | ||
220 | { | ||
221 | struct sigcontext sc; | ||
222 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; | ||
223 | int err, pid; | ||
224 | memset(&sc, 0, sizeof(struct sigcontext)); | ||
225 | |||
226 | sc.gs = REGS_GS(regs->regs.gp); | ||
227 | sc.fs = REGS_FS(regs->regs.gp); | ||
228 | sc.es = REGS_ES(regs->regs.gp); | ||
229 | sc.ds = REGS_DS(regs->regs.gp); | ||
230 | sc.di = REGS_EDI(regs->regs.gp); | ||
231 | sc.si = REGS_ESI(regs->regs.gp); | ||
232 | sc.bp = REGS_EBP(regs->regs.gp); | ||
233 | sc.sp = sp; | ||
234 | sc.bx = REGS_EBX(regs->regs.gp); | ||
235 | sc.dx = REGS_EDX(regs->regs.gp); | ||
236 | sc.cx = REGS_ECX(regs->regs.gp); | ||
237 | sc.ax = REGS_EAX(regs->regs.gp); | ||
238 | sc.ip = REGS_IP(regs->regs.gp); | ||
239 | sc.cs = REGS_CS(regs->regs.gp); | ||
240 | sc.flags = REGS_EFLAGS(regs->regs.gp); | ||
241 | sc.sp_at_signal = regs->regs.gp[UESP]; | ||
242 | sc.ss = regs->regs.gp[SS]; | ||
243 | sc.cr2 = fi->cr2; | ||
244 | sc.err = fi->error_code; | ||
245 | sc.trapno = fi->trap_no; | ||
246 | |||
247 | to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1)); | ||
248 | sc.fpstate = to_fp; | ||
249 | |||
250 | pid = userspace_pid[current_thread_info()->cpu]; | ||
251 | if (have_fpx_regs) { | ||
252 | struct user_fxsr_struct fpx; | ||
253 | |||
254 | err = save_fpx_registers(pid, (unsigned long *) &fpx); | ||
255 | if (err < 0){ | ||
256 | printk(KERN_ERR "copy_sc_to_user - save_fpx_registers " | ||
257 | "failed, errno = %d\n", err); | ||
258 | return 1; | ||
259 | } | ||
260 | |||
261 | err = convert_fxsr_to_user(to_fp, &fpx); | ||
262 | if (err) | ||
263 | return 1; | ||
264 | |||
265 | err |= __put_user(fpx.swd, &to_fp->status); | ||
266 | err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic); | ||
267 | if (err) | ||
268 | return 1; | ||
269 | |||
270 | if (copy_to_user(&to_fp->_fxsr_env[0], &fpx, | ||
271 | sizeof(struct user_fxsr_struct))) | ||
272 | return 1; | ||
273 | } | ||
274 | else { | ||
275 | struct user_i387_struct fp; | ||
276 | |||
277 | err = save_fp_registers(pid, (unsigned long *) &fp); | ||
278 | if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct))) | ||
279 | return 1; | ||
280 | } | ||
281 | |||
282 | return copy_to_user(to, &sc, sizeof(sc)); | ||
283 | } | ||
284 | |||
285 | static int copy_ucontext_to_user(struct ucontext __user *uc, | ||
286 | struct _fpstate __user *fp, sigset_t *set, | ||
287 | unsigned long sp) | ||
288 | { | ||
289 | int err = 0; | ||
290 | |||
291 | err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp); | ||
292 | err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags); | ||
293 | err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size); | ||
294 | err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, sp); | ||
295 | err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set)); | ||
296 | return err; | ||
297 | } | ||
298 | |||
299 | struct sigframe | ||
300 | { | ||
301 | char __user *pretcode; | ||
302 | int sig; | ||
303 | struct sigcontext sc; | ||
304 | struct _fpstate fpstate; | ||
305 | unsigned long extramask[_NSIG_WORDS-1]; | ||
306 | char retcode[8]; | ||
307 | }; | ||
308 | |||
309 | struct rt_sigframe | ||
310 | { | ||
311 | char __user *pretcode; | ||
312 | int sig; | ||
313 | struct siginfo __user *pinfo; | ||
314 | void __user *puc; | ||
315 | struct siginfo info; | ||
316 | struct ucontext uc; | ||
317 | struct _fpstate fpstate; | ||
318 | char retcode[8]; | ||
319 | }; | ||
320 | |||
321 | int setup_signal_stack_sc(unsigned long stack_top, int sig, | ||
322 | struct k_sigaction *ka, struct pt_regs *regs, | ||
323 | sigset_t *mask) | ||
324 | { | ||
325 | struct sigframe __user *frame; | ||
326 | void __user *restorer; | ||
327 | unsigned long save_sp = PT_REGS_SP(regs); | ||
328 | int err = 0; | ||
329 | |||
330 | /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */ | ||
331 | stack_top = ((stack_top + 4) & -16UL) - 4; | ||
332 | frame = (struct sigframe __user *) stack_top - 1; | ||
333 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
334 | return 1; | ||
335 | |||
336 | restorer = frame->retcode; | ||
337 | if (ka->sa.sa_flags & SA_RESTORER) | ||
338 | restorer = ka->sa.sa_restorer; | ||
339 | |||
340 | /* Update SP now because the page fault handler refuses to extend | ||
341 | * the stack if the faulting address is too far below the current | ||
342 | * SP, which frame now certainly is. If there's an error, the original | ||
343 | * value is restored on the way out. | ||
344 | * When writing the sigcontext to the stack, we have to write the | ||
345 | * original value, so that's passed to copy_sc_to_user, which does | ||
346 | * the right thing with it. | ||
347 | */ | ||
348 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
349 | |||
350 | err |= __put_user(restorer, &frame->pretcode); | ||
351 | err |= __put_user(sig, &frame->sig); | ||
352 | err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp); | ||
353 | err |= __put_user(mask->sig[0], &frame->sc.oldmask); | ||
354 | if (_NSIG_WORDS > 1) | ||
355 | err |= __copy_to_user(&frame->extramask, &mask->sig[1], | ||
356 | sizeof(frame->extramask)); | ||
357 | |||
358 | /* | ||
359 | * This is popl %eax ; movl $,%eax ; int $0x80 | ||
360 | * | ||
361 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | ||
362 | * reasons and because gdb uses it as a signature to notice | ||
363 | * signal handler stack frames. | ||
364 | */ | ||
365 | err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); | ||
366 | err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); | ||
367 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); | ||
368 | |||
369 | if (err) | ||
370 | goto err; | ||
371 | |||
372 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
373 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | ||
374 | PT_REGS_EAX(regs) = (unsigned long) sig; | ||
375 | PT_REGS_EDX(regs) = (unsigned long) 0; | ||
376 | PT_REGS_ECX(regs) = (unsigned long) 0; | ||
377 | |||
378 | if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) | ||
379 | ptrace_notify(SIGTRAP); | ||
380 | return 0; | ||
381 | |||
382 | err: | ||
383 | PT_REGS_SP(regs) = save_sp; | ||
384 | return err; | ||
385 | } | ||
386 | |||
387 | int setup_signal_stack_si(unsigned long stack_top, int sig, | ||
388 | struct k_sigaction *ka, struct pt_regs *regs, | ||
389 | siginfo_t *info, sigset_t *mask) | ||
390 | { | ||
391 | struct rt_sigframe __user *frame; | ||
392 | void __user *restorer; | ||
393 | unsigned long save_sp = PT_REGS_SP(regs); | ||
394 | int err = 0; | ||
395 | |||
396 | stack_top &= -8UL; | ||
397 | frame = (struct rt_sigframe __user *) stack_top - 1; | ||
398 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
399 | return 1; | ||
400 | |||
401 | restorer = frame->retcode; | ||
402 | if (ka->sa.sa_flags & SA_RESTORER) | ||
403 | restorer = ka->sa.sa_restorer; | ||
404 | |||
405 | /* See comment above about why this is here */ | ||
406 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
407 | |||
408 | err |= __put_user(restorer, &frame->pretcode); | ||
409 | err |= __put_user(sig, &frame->sig); | ||
410 | err |= __put_user(&frame->info, &frame->pinfo); | ||
411 | err |= __put_user(&frame->uc, &frame->puc); | ||
412 | err |= copy_siginfo_to_user(&frame->info, info); | ||
413 | err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask, | ||
414 | save_sp); | ||
415 | |||
416 | /* | ||
417 | * This is movl $,%eax ; int $0x80 | ||
418 | * | ||
419 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | ||
420 | * reasons and because gdb uses it as a signature to notice | ||
421 | * signal handler stack frames. | ||
422 | */ | ||
423 | err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); | ||
424 | err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); | ||
425 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); | ||
426 | |||
427 | if (err) | ||
428 | goto err; | ||
429 | |||
430 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | ||
431 | PT_REGS_EAX(regs) = (unsigned long) sig; | ||
432 | PT_REGS_EDX(regs) = (unsigned long) &frame->info; | ||
433 | PT_REGS_ECX(regs) = (unsigned long) &frame->uc; | ||
434 | |||
435 | if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) | ||
436 | ptrace_notify(SIGTRAP); | ||
437 | return 0; | ||
438 | |||
439 | err: | ||
440 | PT_REGS_SP(regs) = save_sp; | ||
441 | return err; | ||
442 | } | ||
443 | |||
444 | long sys_sigreturn(struct pt_regs regs) | ||
445 | { | ||
446 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); | ||
447 | struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); | ||
448 | sigset_t set; | ||
449 | struct sigcontext __user *sc = &frame->sc; | ||
450 | unsigned long __user *oldmask = &sc->oldmask; | ||
451 | unsigned long __user *extramask = frame->extramask; | ||
452 | int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); | ||
453 | |||
454 | if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || | ||
455 | copy_from_user(&set.sig[1], extramask, sig_size)) | ||
456 | goto segfault; | ||
457 | |||
458 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
459 | set_current_blocked(&set); | ||
460 | |||
461 | if (copy_sc_from_user(¤t->thread.regs, sc)) | ||
462 | goto segfault; | ||
463 | |||
464 | /* Avoid ERESTART handling */ | ||
465 | PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; | ||
466 | return PT_REGS_SYSCALL_RET(¤t->thread.regs); | ||
467 | |||
468 | segfault: | ||
469 | force_sig(SIGSEGV, current); | ||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | long sys_rt_sigreturn(struct pt_regs regs) | ||
474 | { | ||
475 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); | ||
476 | struct rt_sigframe __user *frame = | ||
477 | (struct rt_sigframe __user *) (sp - 4); | ||
478 | sigset_t set; | ||
479 | struct ucontext __user *uc = &frame->uc; | ||
480 | int sig_size = _NSIG_WORDS * sizeof(unsigned long); | ||
481 | |||
482 | if (copy_from_user(&set, &uc->uc_sigmask, sig_size)) | ||
483 | goto segfault; | ||
484 | |||
485 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
486 | set_current_blocked(&set); | ||
487 | |||
488 | if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) | ||
489 | goto segfault; | ||
490 | |||
491 | /* Avoid ERESTART handling */ | ||
492 | PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; | ||
493 | return PT_REGS_SYSCALL_RET(¤t->thread.regs); | ||
494 | |||
495 | segfault: | ||
496 | force_sig(SIGSEGV, current); | ||
497 | return 0; | ||
498 | } | ||
diff --git a/arch/um/sys-x86/signal_64.c b/arch/um/sys-x86/signal_64.c new file mode 100644 index 000000000000..255b2ca0ce67 --- /dev/null +++ b/arch/um/sys-x86/signal_64.c | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 PathScale, Inc. | ||
3 | * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #include <linux/personality.h> | ||
8 | #include <linux/ptrace.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <asm/unistd.h> | ||
11 | #include <asm/uaccess.h> | ||
12 | #include <asm/ucontext.h> | ||
13 | #include "frame_kern.h" | ||
14 | #include "skas.h" | ||
15 | |||
16 | static int copy_sc_from_user(struct pt_regs *regs, | ||
17 | struct sigcontext __user *from) | ||
18 | { | ||
19 | struct sigcontext sc; | ||
20 | struct user_i387_struct fp; | ||
21 | void __user *buf; | ||
22 | int err; | ||
23 | |||
24 | err = copy_from_user(&sc, from, sizeof(sc)); | ||
25 | if (err) | ||
26 | return err; | ||
27 | |||
28 | #define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname | ||
29 | |||
30 | GETREG(R8, r8); | ||
31 | GETREG(R9, r9); | ||
32 | GETREG(R10, r10); | ||
33 | GETREG(R11, r11); | ||
34 | GETREG(R12, r12); | ||
35 | GETREG(R13, r13); | ||
36 | GETREG(R14, r14); | ||
37 | GETREG(R15, r15); | ||
38 | GETREG(RDI, di); | ||
39 | GETREG(RSI, si); | ||
40 | GETREG(RBP, bp); | ||
41 | GETREG(RBX, bx); | ||
42 | GETREG(RDX, dx); | ||
43 | GETREG(RAX, ax); | ||
44 | GETREG(RCX, cx); | ||
45 | GETREG(SP, sp); | ||
46 | GETREG(IP, ip); | ||
47 | GETREG(EFLAGS, flags); | ||
48 | GETREG(CS, cs); | ||
49 | #undef GETREG | ||
50 | |||
51 | buf = sc.fpstate; | ||
52 | |||
53 | err = copy_from_user(&fp, buf, sizeof(struct user_i387_struct)); | ||
54 | if (err) | ||
55 | return 1; | ||
56 | |||
57 | err = restore_fp_registers(userspace_pid[current_thread_info()->cpu], | ||
58 | (unsigned long *) &fp); | ||
59 | if (err < 0) { | ||
60 | printk(KERN_ERR "copy_sc_from_user - " | ||
61 | "restore_fp_registers failed, errno = %d\n", | ||
62 | -err); | ||
63 | return 1; | ||
64 | } | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int copy_sc_to_user(struct sigcontext __user *to, | ||
70 | struct _fpstate __user *to_fp, struct pt_regs *regs, | ||
71 | unsigned long mask, unsigned long sp) | ||
72 | { | ||
73 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; | ||
74 | struct sigcontext sc; | ||
75 | struct user_i387_struct fp; | ||
76 | int err = 0; | ||
77 | memset(&sc, 0, sizeof(struct sigcontext)); | ||
78 | |||
79 | #define PUTREG(regno, regname) sc.regname = regs->regs.gp[HOST_##regno] | ||
80 | |||
81 | PUTREG(RDI, di); | ||
82 | PUTREG(RSI, si); | ||
83 | PUTREG(RBP, bp); | ||
84 | /* | ||
85 | * Must use original RSP, which is passed in, rather than what's in | ||
86 | * signal frame. | ||
87 | */ | ||
88 | sc.sp = sp; | ||
89 | PUTREG(RBX, bx); | ||
90 | PUTREG(RDX, dx); | ||
91 | PUTREG(RCX, cx); | ||
92 | PUTREG(RAX, ax); | ||
93 | PUTREG(R8, r8); | ||
94 | PUTREG(R9, r9); | ||
95 | PUTREG(R10, r10); | ||
96 | PUTREG(R11, r11); | ||
97 | PUTREG(R12, r12); | ||
98 | PUTREG(R13, r13); | ||
99 | PUTREG(R14, r14); | ||
100 | PUTREG(R15, r15); | ||
101 | PUTREG(CS, cs); /* XXX x86_64 doesn't do this */ | ||
102 | |||
103 | sc.cr2 = fi->cr2; | ||
104 | sc.err = fi->error_code; | ||
105 | sc.trapno = fi->trap_no; | ||
106 | |||
107 | PUTREG(IP, ip); | ||
108 | PUTREG(EFLAGS, flags); | ||
109 | #undef PUTREG | ||
110 | |||
111 | sc.oldmask = mask; | ||
112 | |||
113 | err = copy_to_user(to, &sc, sizeof(struct sigcontext)); | ||
114 | if (err) | ||
115 | return 1; | ||
116 | |||
117 | err = save_fp_registers(userspace_pid[current_thread_info()->cpu], | ||
118 | (unsigned long *) &fp); | ||
119 | if (err < 0) { | ||
120 | printk(KERN_ERR "copy_sc_from_user - restore_fp_registers " | ||
121 | "failed, errno = %d\n", -err); | ||
122 | return 1; | ||
123 | } | ||
124 | |||
125 | if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct))) | ||
126 | return 1; | ||
127 | |||
128 | return err; | ||
129 | } | ||
130 | |||
131 | struct rt_sigframe | ||
132 | { | ||
133 | char __user *pretcode; | ||
134 | struct ucontext uc; | ||
135 | struct siginfo info; | ||
136 | struct _fpstate fpstate; | ||
137 | }; | ||
138 | |||
139 | int setup_signal_stack_si(unsigned long stack_top, int sig, | ||
140 | struct k_sigaction *ka, struct pt_regs * regs, | ||
141 | siginfo_t *info, sigset_t *set) | ||
142 | { | ||
143 | struct rt_sigframe __user *frame; | ||
144 | unsigned long save_sp = PT_REGS_RSP(regs); | ||
145 | int err = 0; | ||
146 | struct task_struct *me = current; | ||
147 | |||
148 | frame = (struct rt_sigframe __user *) | ||
149 | round_down(stack_top - sizeof(struct rt_sigframe), 16); | ||
150 | /* Subtract 128 for a red zone and 8 for proper alignment */ | ||
151 | frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8); | ||
152 | |||
153 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
154 | goto out; | ||
155 | |||
156 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
157 | err |= copy_siginfo_to_user(&frame->info, info); | ||
158 | if (err) | ||
159 | goto out; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Update SP now because the page fault handler refuses to extend | ||
164 | * the stack if the faulting address is too far below the current | ||
165 | * SP, which frame now certainly is. If there's an error, the original | ||
166 | * value is restored on the way out. | ||
167 | * When writing the sigcontext to the stack, we have to write the | ||
168 | * original value, so that's passed to copy_sc_to_user, which does | ||
169 | * the right thing with it. | ||
170 | */ | ||
171 | PT_REGS_RSP(regs) = (unsigned long) frame; | ||
172 | |||
173 | /* Create the ucontext. */ | ||
174 | err |= __put_user(0, &frame->uc.uc_flags); | ||
175 | err |= __put_user(0, &frame->uc.uc_link); | ||
176 | err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | ||
177 | err |= __put_user(sas_ss_flags(save_sp), | ||
178 | &frame->uc.uc_stack.ss_flags); | ||
179 | err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
180 | err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, | ||
181 | set->sig[0], save_sp); | ||
182 | err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate); | ||
183 | if (sizeof(*set) == 16) { | ||
184 | __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); | ||
185 | __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); | ||
186 | } | ||
187 | else | ||
188 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, | ||
189 | sizeof(*set)); | ||
190 | |||
191 | /* | ||
192 | * Set up to return from userspace. If provided, use a stub | ||
193 | * already in userspace. | ||
194 | */ | ||
195 | /* x86-64 should always use SA_RESTORER. */ | ||
196 | if (ka->sa.sa_flags & SA_RESTORER) | ||
197 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); | ||
198 | else | ||
199 | /* could use a vstub here */ | ||
200 | goto restore_sp; | ||
201 | |||
202 | if (err) | ||
203 | goto restore_sp; | ||
204 | |||
205 | /* Set up registers for signal handler */ | ||
206 | { | ||
207 | struct exec_domain *ed = current_thread_info()->exec_domain; | ||
208 | if (unlikely(ed && ed->signal_invmap && sig < 32)) | ||
209 | sig = ed->signal_invmap[sig]; | ||
210 | } | ||
211 | |||
212 | PT_REGS_RDI(regs) = sig; | ||
213 | /* In case the signal handler was declared without prototypes */ | ||
214 | PT_REGS_RAX(regs) = 0; | ||
215 | |||
216 | /* | ||
217 | * This also works for non SA_SIGINFO handlers because they expect the | ||
218 | * next argument after the signal number on the stack. | ||
219 | */ | ||
220 | PT_REGS_RSI(regs) = (unsigned long) &frame->info; | ||
221 | PT_REGS_RDX(regs) = (unsigned long) &frame->uc; | ||
222 | PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler; | ||
223 | out: | ||
224 | return err; | ||
225 | |||
226 | restore_sp: | ||
227 | PT_REGS_RSP(regs) = save_sp; | ||
228 | return err; | ||
229 | } | ||
230 | |||
231 | long sys_rt_sigreturn(struct pt_regs *regs) | ||
232 | { | ||
233 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); | ||
234 | struct rt_sigframe __user *frame = | ||
235 | (struct rt_sigframe __user *)(sp - 8); | ||
236 | struct ucontext __user *uc = &frame->uc; | ||
237 | sigset_t set; | ||
238 | |||
239 | if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) | ||
240 | goto segfault; | ||
241 | |||
242 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
243 | set_current_blocked(&set); | ||
244 | |||
245 | if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) | ||
246 | goto segfault; | ||
247 | |||
248 | /* Avoid ERESTART handling */ | ||
249 | PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; | ||
250 | return PT_REGS_SYSCALL_RET(¤t->thread.regs); | ||
251 | |||
252 | segfault: | ||
253 | force_sig(SIGSEGV, current); | ||
254 | return 0; | ||
255 | } | ||
diff --git a/arch/um/sys-x86/stub_32.S b/arch/um/sys-x86/stub_32.S new file mode 100644 index 000000000000..54a36ec20cb7 --- /dev/null +++ b/arch/um/sys-x86/stub_32.S | |||
@@ -0,0 +1,51 @@ | |||
1 | #include "as-layout.h" | ||
2 | |||
3 | .globl syscall_stub | ||
4 | .section .__syscall_stub, "ax" | ||
5 | |||
6 | .globl batch_syscall_stub | ||
7 | batch_syscall_stub: | ||
8 | /* load pointer to first operation */ | ||
9 | mov $(STUB_DATA+8), %esp | ||
10 | |||
11 | again: | ||
12 | /* load length of additional data */ | ||
13 | mov 0x0(%esp), %eax | ||
14 | |||
15 | /* if(length == 0) : end of list */ | ||
16 | /* write possible 0 to header */ | ||
17 | mov %eax, STUB_DATA+4 | ||
18 | cmpl $0, %eax | ||
19 | jz done | ||
20 | |||
21 | /* save current pointer */ | ||
22 | mov %esp, STUB_DATA+4 | ||
23 | |||
24 | /* skip additional data */ | ||
25 | add %eax, %esp | ||
26 | |||
27 | /* load syscall-# */ | ||
28 | pop %eax | ||
29 | |||
30 | /* load syscall params */ | ||
31 | pop %ebx | ||
32 | pop %ecx | ||
33 | pop %edx | ||
34 | pop %esi | ||
35 | pop %edi | ||
36 | pop %ebp | ||
37 | |||
38 | /* execute syscall */ | ||
39 | int $0x80 | ||
40 | |||
41 | /* check return value */ | ||
42 | pop %ebx | ||
43 | cmp %ebx, %eax | ||
44 | je again | ||
45 | |||
46 | done: | ||
47 | /* save return value */ | ||
48 | mov %eax, STUB_DATA | ||
49 | |||
50 | /* stop */ | ||
51 | int3 | ||
diff --git a/arch/um/sys-x86/stub_64.S b/arch/um/sys-x86/stub_64.S new file mode 100644 index 000000000000..20e4a96a6dcb --- /dev/null +++ b/arch/um/sys-x86/stub_64.S | |||
@@ -0,0 +1,66 @@ | |||
1 | #include "as-layout.h" | ||
2 | |||
3 | .globl syscall_stub | ||
4 | .section .__syscall_stub, "ax" | ||
5 | syscall_stub: | ||
6 | syscall | ||
7 | /* We don't have 64-bit constants, so this constructs the address | ||
8 | * we need. | ||
9 | */ | ||
10 | movq $(STUB_DATA >> 32), %rbx | ||
11 | salq $32, %rbx | ||
12 | movq $(STUB_DATA & 0xffffffff), %rcx | ||
13 | or %rcx, %rbx | ||
14 | movq %rax, (%rbx) | ||
15 | int3 | ||
16 | |||
17 | .globl batch_syscall_stub | ||
18 | batch_syscall_stub: | ||
19 | mov $(STUB_DATA >> 32), %rbx | ||
20 | sal $32, %rbx | ||
21 | mov $(STUB_DATA & 0xffffffff), %rax | ||
22 | or %rax, %rbx | ||
23 | /* load pointer to first operation */ | ||
24 | mov %rbx, %rsp | ||
25 | add $0x10, %rsp | ||
26 | again: | ||
27 | /* load length of additional data */ | ||
28 | mov 0x0(%rsp), %rax | ||
29 | |||
30 | /* if(length == 0) : end of list */ | ||
31 | /* write possible 0 to header */ | ||
32 | mov %rax, 8(%rbx) | ||
33 | cmp $0, %rax | ||
34 | jz done | ||
35 | |||
36 | /* save current pointer */ | ||
37 | mov %rsp, 8(%rbx) | ||
38 | |||
39 | /* skip additional data */ | ||
40 | add %rax, %rsp | ||
41 | |||
42 | /* load syscall-# */ | ||
43 | pop %rax | ||
44 | |||
45 | /* load syscall params */ | ||
46 | pop %rdi | ||
47 | pop %rsi | ||
48 | pop %rdx | ||
49 | pop %r10 | ||
50 | pop %r8 | ||
51 | pop %r9 | ||
52 | |||
53 | /* execute syscall */ | ||
54 | syscall | ||
55 | |||
56 | /* check return value */ | ||
57 | pop %rcx | ||
58 | cmp %rcx, %rax | ||
59 | je again | ||
60 | |||
61 | done: | ||
62 | /* save return value */ | ||
63 | mov %rax, (%rbx) | ||
64 | |||
65 | /* stop */ | ||
66 | int3 | ||
diff --git a/arch/um/sys-x86/stub_segv_32.c b/arch/um/sys-x86/stub_segv_32.c new file mode 100644 index 000000000000..28ccf737a79f --- /dev/null +++ b/arch/um/sys-x86/stub_segv_32.c | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "sysdep/stub.h" | ||
7 | #include "sysdep/sigcontext.h" | ||
8 | |||
9 | void __attribute__ ((__section__ (".__syscall_stub"))) | ||
10 | stub_segv_handler(int sig) | ||
11 | { | ||
12 | struct sigcontext *sc = (struct sigcontext *) (&sig + 1); | ||
13 | |||
14 | GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA), sc); | ||
15 | |||
16 | trap_myself(); | ||
17 | } | ||
diff --git a/arch/um/sys-x86/stub_segv_64.c b/arch/um/sys-x86/stub_segv_64.c new file mode 100644 index 000000000000..ced051afc705 --- /dev/null +++ b/arch/um/sys-x86/stub_segv_64.c | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include "as-layout.h" | ||
8 | #include "sysdep/stub.h" | ||
9 | #include "sysdep/faultinfo.h" | ||
10 | #include "sysdep/sigcontext.h" | ||
11 | |||
12 | void __attribute__ ((__section__ (".__syscall_stub"))) | ||
13 | stub_segv_handler(int sig) | ||
14 | { | ||
15 | struct ucontext *uc; | ||
16 | |||
17 | __asm__ __volatile__("movq %%rdx, %0" : "=g" (uc) :); | ||
18 | GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA), | ||
19 | &uc->uc_mcontext); | ||
20 | trap_myself(); | ||
21 | } | ||
22 | |||
diff --git a/arch/um/sys-x86/sys_call_table_32.S b/arch/um/sys-x86/sys_call_table_32.S new file mode 100644 index 000000000000..de274071455d --- /dev/null +++ b/arch/um/sys-x86/sys_call_table_32.S | |||
@@ -0,0 +1,28 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | /* Steal i386 syscall table for our purposes, but with some slight changes.*/ | ||
3 | |||
4 | #define sys_iopl sys_ni_syscall | ||
5 | #define sys_ioperm sys_ni_syscall | ||
6 | |||
7 | #define sys_vm86old sys_ni_syscall | ||
8 | #define sys_vm86 sys_ni_syscall | ||
9 | |||
10 | #define old_mmap sys_old_mmap | ||
11 | |||
12 | #define ptregs_fork sys_fork | ||
13 | #define ptregs_execve sys_execve | ||
14 | #define ptregs_iopl sys_iopl | ||
15 | #define ptregs_vm86old sys_vm86old | ||
16 | #define ptregs_sigreturn sys_sigreturn | ||
17 | #define ptregs_clone sys_clone | ||
18 | #define ptregs_vm86 sys_vm86 | ||
19 | #define ptregs_rt_sigreturn sys_rt_sigreturn | ||
20 | #define ptregs_sigaltstack sys_sigaltstack | ||
21 | #define ptregs_vfork sys_vfork | ||
22 | |||
23 | .section .rodata,"a" | ||
24 | |||
25 | #include "../../x86/kernel/syscall_table_32.S" | ||
26 | |||
27 | ENTRY(syscall_table_size) | ||
28 | .long .-sys_call_table | ||
diff --git a/arch/um/sys-x86/sys_call_table_64.c b/arch/um/sys-x86/sys_call_table_64.c new file mode 100644 index 000000000000..f46de82d675c --- /dev/null +++ b/arch/um/sys-x86/sys_call_table_64.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c | ||
3 | * with some changes for UML. | ||
4 | */ | ||
5 | |||
6 | #include <linux/linkage.h> | ||
7 | #include <linux/sys.h> | ||
8 | #include <linux/cache.h> | ||
9 | |||
10 | #define __NO_STUBS | ||
11 | |||
12 | /* | ||
13 | * Below you can see, in terms of #define's, the differences between the x86-64 | ||
14 | * and the UML syscall table. | ||
15 | */ | ||
16 | |||
17 | /* Not going to be implemented by UML, since we have no hardware. */ | ||
18 | #define stub_iopl sys_ni_syscall | ||
19 | #define sys_ioperm sys_ni_syscall | ||
20 | |||
21 | /* | ||
22 | * The UML TLS problem. Note that x86_64 does not implement this, so the below | ||
23 | * is needed only for the ia32 compatibility. | ||
24 | */ | ||
25 | |||
26 | /* On UML we call it this way ("old" means it's not mmap2) */ | ||
27 | #define sys_mmap old_mmap | ||
28 | |||
29 | #define stub_clone sys_clone | ||
30 | #define stub_fork sys_fork | ||
31 | #define stub_vfork sys_vfork | ||
32 | #define stub_execve sys_execve | ||
33 | #define stub_rt_sigsuspend sys_rt_sigsuspend | ||
34 | #define stub_sigaltstack sys_sigaltstack | ||
35 | #define stub_rt_sigreturn sys_rt_sigreturn | ||
36 | |||
37 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; | ||
38 | #undef _ASM_X86_UNISTD_64_H | ||
39 | #include "../../x86/include/asm/unistd_64.h" | ||
40 | |||
41 | #undef __SYSCALL | ||
42 | #define __SYSCALL(nr, sym) [ nr ] = sym, | ||
43 | #undef _ASM_X86_UNISTD_64_H | ||
44 | |||
45 | typedef void (*sys_call_ptr_t)(void); | ||
46 | |||
47 | extern void sys_ni_syscall(void); | ||
48 | |||
49 | /* | ||
50 | * We used to have a trick here which made sure that holes in the | ||
51 | * x86_64 table were filled in with sys_ni_syscall, but a comment in | ||
52 | * unistd_64.h says that holes aren't allowed, so the trick was | ||
53 | * removed. | ||
54 | * The trick looked like this | ||
55 | * [0 ... UM_NR_syscall_max] = &sys_ni_syscall | ||
56 | * before including unistd_64.h - the later initializations overwrote | ||
57 | * the sys_ni_syscall filler. | ||
58 | */ | ||
59 | |||
60 | sys_call_ptr_t sys_call_table[] __cacheline_aligned = { | ||
61 | #include "../../x86/include/asm/unistd_64.h" | ||
62 | }; | ||
63 | |||
64 | int syscall_table_size = sizeof(sys_call_table); | ||
diff --git a/arch/um/sys-x86/syscalls_32.c b/arch/um/sys-x86/syscalls_32.c new file mode 100644 index 000000000000..70ca357393b8 --- /dev/null +++ b/arch/um/sys-x86/syscalls_32.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/sched.h" | ||
7 | #include "linux/shm.h" | ||
8 | #include "linux/ipc.h" | ||
9 | #include "linux/syscalls.h" | ||
10 | #include "asm/mman.h" | ||
11 | #include "asm/uaccess.h" | ||
12 | #include "asm/unistd.h" | ||
13 | |||
14 | /* | ||
15 | * The prototype on i386 is: | ||
16 | * | ||
17 | * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr) | ||
18 | * | ||
19 | * and the "newtls" arg. on i386 is read by copy_thread directly from the | ||
20 | * register saved on the stack. | ||
21 | */ | ||
22 | long sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
23 | int __user *parent_tid, void *newtls, int __user *child_tid) | ||
24 | { | ||
25 | long ret; | ||
26 | |||
27 | if (!newsp) | ||
28 | newsp = UPT_SP(¤t->thread.regs.regs); | ||
29 | |||
30 | current->thread.forking = 1; | ||
31 | ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, | ||
32 | child_tid); | ||
33 | current->thread.forking = 0; | ||
34 | return ret; | ||
35 | } | ||
36 | |||
37 | long sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
38 | struct old_sigaction __user *oact) | ||
39 | { | ||
40 | struct k_sigaction new_ka, old_ka; | ||
41 | int ret; | ||
42 | |||
43 | if (act) { | ||
44 | old_sigset_t mask; | ||
45 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
46 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
47 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | ||
48 | return -EFAULT; | ||
49 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
50 | __get_user(mask, &act->sa_mask); | ||
51 | siginitset(&new_ka.sa.sa_mask, mask); | ||
52 | } | ||
53 | |||
54 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
55 | |||
56 | if (!ret && oact) { | ||
57 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
58 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
59 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | ||
60 | return -EFAULT; | ||
61 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
62 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
63 | } | ||
64 | |||
65 | return ret; | ||
66 | } | ||
diff --git a/arch/um/sys-x86/syscalls_64.c b/arch/um/sys-x86/syscalls_64.c new file mode 100644 index 000000000000..f3d82bb6e15a --- /dev/null +++ b/arch/um/sys-x86/syscalls_64.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Copyright 2003 PathScale, Inc. | ||
4 | * | ||
5 | * Licensed under the GPL | ||
6 | */ | ||
7 | |||
8 | #include "linux/linkage.h" | ||
9 | #include "linux/personality.h" | ||
10 | #include "linux/utsname.h" | ||
11 | #include "asm/prctl.h" /* XXX This should get the constants from libc */ | ||
12 | #include "asm/uaccess.h" | ||
13 | #include "os.h" | ||
14 | |||
15 | long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) | ||
16 | { | ||
17 | unsigned long *ptr = addr, tmp; | ||
18 | long ret; | ||
19 | int pid = task->mm->context.id.u.pid; | ||
20 | |||
21 | /* | ||
22 | * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to | ||
23 | * be safe), we need to call arch_prctl on the host because | ||
24 | * setting %fs may result in something else happening (like a | ||
25 | * GDT or thread.fs being set instead). So, we let the host | ||
26 | * fiddle the registers and thread struct and restore the | ||
27 | * registers afterwards. | ||
28 | * | ||
29 | * So, the saved registers are stored to the process (this | ||
30 | * needed because a stub may have been the last thing to run), | ||
31 | * arch_prctl is run on the host, then the registers are read | ||
32 | * back. | ||
33 | */ | ||
34 | switch (code) { | ||
35 | case ARCH_SET_FS: | ||
36 | case ARCH_SET_GS: | ||
37 | ret = restore_registers(pid, ¤t->thread.regs.regs); | ||
38 | if (ret) | ||
39 | return ret; | ||
40 | break; | ||
41 | case ARCH_GET_FS: | ||
42 | case ARCH_GET_GS: | ||
43 | /* | ||
44 | * With these two, we read to a local pointer and | ||
45 | * put_user it to the userspace pointer that we were | ||
46 | * given. If addr isn't valid (because it hasn't been | ||
47 | * faulted in or is just bogus), we want put_user to | ||
48 | * fault it in (or return -EFAULT) instead of having | ||
49 | * the host return -EFAULT. | ||
50 | */ | ||
51 | ptr = &tmp; | ||
52 | } | ||
53 | |||
54 | ret = os_arch_prctl(pid, code, ptr); | ||
55 | if (ret) | ||
56 | return ret; | ||
57 | |||
58 | switch (code) { | ||
59 | case ARCH_SET_FS: | ||
60 | current->thread.arch.fs = (unsigned long) ptr; | ||
61 | ret = save_registers(pid, ¤t->thread.regs.regs); | ||
62 | break; | ||
63 | case ARCH_SET_GS: | ||
64 | ret = save_registers(pid, ¤t->thread.regs.regs); | ||
65 | break; | ||
66 | case ARCH_GET_FS: | ||
67 | ret = put_user(tmp, addr); | ||
68 | break; | ||
69 | case ARCH_GET_GS: | ||
70 | ret = put_user(tmp, addr); | ||
71 | break; | ||
72 | } | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | long sys_arch_prctl(int code, unsigned long addr) | ||
78 | { | ||
79 | return arch_prctl(current, code, (unsigned long __user *) addr); | ||
80 | } | ||
81 | |||
82 | long sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
83 | void __user *parent_tid, void __user *child_tid) | ||
84 | { | ||
85 | long ret; | ||
86 | |||
87 | if (!newsp) | ||
88 | newsp = UPT_SP(¤t->thread.regs.regs); | ||
89 | current->thread.forking = 1; | ||
90 | ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, | ||
91 | child_tid); | ||
92 | current->thread.forking = 0; | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | void arch_switch_to(struct task_struct *to) | ||
97 | { | ||
98 | if ((to->thread.arch.fs == 0) || (to->mm == NULL)) | ||
99 | return; | ||
100 | |||
101 | arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs); | ||
102 | } | ||
diff --git a/arch/um/sys-x86/sysrq_32.c b/arch/um/sys-x86/sysrq_32.c new file mode 100644 index 000000000000..171b3e9dc867 --- /dev/null +++ b/arch/um/sys-x86/sysrq_32.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/kernel.h" | ||
7 | #include "linux/smp.h" | ||
8 | #include "linux/sched.h" | ||
9 | #include "linux/kallsyms.h" | ||
10 | #include "asm/ptrace.h" | ||
11 | #include "sysrq.h" | ||
12 | |||
13 | /* This is declared by <linux/sched.h> */ | ||
14 | void show_regs(struct pt_regs *regs) | ||
15 | { | ||
16 | printk("\n"); | ||
17 | printk("EIP: %04lx:[<%08lx>] CPU: %d %s", | ||
18 | 0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs), | ||
19 | smp_processor_id(), print_tainted()); | ||
20 | if (PT_REGS_CS(regs) & 3) | ||
21 | printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs), | ||
22 | PT_REGS_SP(regs)); | ||
23 | printk(" EFLAGS: %08lx\n %s\n", PT_REGS_EFLAGS(regs), | ||
24 | print_tainted()); | ||
25 | printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", | ||
26 | PT_REGS_EAX(regs), PT_REGS_EBX(regs), | ||
27 | PT_REGS_ECX(regs), | ||
28 | PT_REGS_EDX(regs)); | ||
29 | printk("ESI: %08lx EDI: %08lx EBP: %08lx", | ||
30 | PT_REGS_ESI(regs), PT_REGS_EDI(regs), | ||
31 | PT_REGS_EBP(regs)); | ||
32 | printk(" DS: %04lx ES: %04lx\n", | ||
33 | 0xffff & PT_REGS_DS(regs), | ||
34 | 0xffff & PT_REGS_ES(regs)); | ||
35 | |||
36 | show_trace(NULL, (unsigned long *) ®s); | ||
37 | } | ||
38 | |||
39 | /* Copied from i386. */ | ||
40 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
41 | { | ||
42 | return p > (void *)tinfo && | ||
43 | p < (void *)tinfo + THREAD_SIZE - 3; | ||
44 | } | ||
45 | |||
46 | /* Adapted from i386 (we also print the address we read from). */ | ||
47 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | ||
48 | unsigned long *stack, unsigned long ebp) | ||
49 | { | ||
50 | unsigned long addr; | ||
51 | |||
52 | #ifdef CONFIG_FRAME_POINTER | ||
53 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | ||
54 | addr = *(unsigned long *)(ebp + 4); | ||
55 | printk("%08lx: [<%08lx>]", ebp + 4, addr); | ||
56 | print_symbol(" %s", addr); | ||
57 | printk("\n"); | ||
58 | ebp = *(unsigned long *)ebp; | ||
59 | } | ||
60 | #else | ||
61 | while (valid_stack_ptr(tinfo, stack)) { | ||
62 | addr = *stack; | ||
63 | if (__kernel_text_address(addr)) { | ||
64 | printk("%08lx: [<%08lx>]", (unsigned long) stack, addr); | ||
65 | print_symbol(" %s", addr); | ||
66 | printk("\n"); | ||
67 | } | ||
68 | stack++; | ||
69 | } | ||
70 | #endif | ||
71 | return ebp; | ||
72 | } | ||
73 | |||
74 | void show_trace(struct task_struct* task, unsigned long * stack) | ||
75 | { | ||
76 | unsigned long ebp; | ||
77 | struct thread_info *context; | ||
78 | |||
79 | /* Turn this into BUG_ON if possible. */ | ||
80 | if (!stack) { | ||
81 | stack = (unsigned long*) &stack; | ||
82 | printk("show_trace: got NULL stack, implicit assumption task == current"); | ||
83 | WARN_ON(1); | ||
84 | } | ||
85 | |||
86 | if (!task) | ||
87 | task = current; | ||
88 | |||
89 | if (task != current) { | ||
90 | ebp = (unsigned long) KSTK_EBP(task); | ||
91 | } else { | ||
92 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); | ||
93 | } | ||
94 | |||
95 | context = (struct thread_info *) | ||
96 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
97 | print_context_stack(context, stack, ebp); | ||
98 | |||
99 | printk("\n"); | ||
100 | } | ||
101 | |||
diff --git a/arch/um/sys-x86/sysrq_64.c b/arch/um/sys-x86/sysrq_64.c new file mode 100644 index 000000000000..f4f82beb3508 --- /dev/null +++ b/arch/um/sys-x86/sysrq_64.c | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/utsname.h> | ||
11 | #include <asm/current.h> | ||
12 | #include <asm/ptrace.h> | ||
13 | #include "sysrq.h" | ||
14 | |||
15 | void __show_regs(struct pt_regs *regs) | ||
16 | { | ||
17 | printk("\n"); | ||
18 | print_modules(); | ||
19 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current), | ||
20 | current->comm, print_tainted(), init_utsname()->release); | ||
21 | printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff, | ||
22 | PT_REGS_RIP(regs)); | ||
23 | printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs), | ||
24 | PT_REGS_EFLAGS(regs)); | ||
25 | printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", | ||
26 | PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs)); | ||
27 | printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", | ||
28 | PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs)); | ||
29 | printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", | ||
30 | PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs)); | ||
31 | printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", | ||
32 | PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs)); | ||
33 | printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", | ||
34 | PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs)); | ||
35 | } | ||
36 | |||
37 | void show_regs(struct pt_regs *regs) | ||
38 | { | ||
39 | __show_regs(regs); | ||
40 | show_trace(current, (unsigned long *) ®s); | ||
41 | } | ||
diff --git a/arch/um/sys-x86/tls_32.c b/arch/um/sys-x86/tls_32.c new file mode 100644 index 000000000000..c6c7131e563b --- /dev/null +++ b/arch/um/sys-x86/tls_32.c | |||
@@ -0,0 +1,396 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/percpu.h" | ||
7 | #include "linux/sched.h" | ||
8 | #include "asm/uaccess.h" | ||
9 | #include "os.h" | ||
10 | #include "skas.h" | ||
11 | #include "sysdep/tls.h" | ||
12 | |||
13 | /* | ||
14 | * If needed we can detect when it's uninitialized. | ||
15 | * | ||
16 | * These are initialized in an initcall and unchanged thereafter. | ||
17 | */ | ||
18 | static int host_supports_tls = -1; | ||
19 | int host_gdt_entry_tls_min; | ||
20 | |||
21 | int do_set_thread_area(struct user_desc *info) | ||
22 | { | ||
23 | int ret; | ||
24 | u32 cpu; | ||
25 | |||
26 | cpu = get_cpu(); | ||
27 | ret = os_set_thread_area(info, userspace_pid[cpu]); | ||
28 | put_cpu(); | ||
29 | |||
30 | if (ret) | ||
31 | printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, " | ||
32 | "index = %d\n", ret, info->entry_number); | ||
33 | |||
34 | return ret; | ||
35 | } | ||
36 | |||
37 | int do_get_thread_area(struct user_desc *info) | ||
38 | { | ||
39 | int ret; | ||
40 | u32 cpu; | ||
41 | |||
42 | cpu = get_cpu(); | ||
43 | ret = os_get_thread_area(info, userspace_pid[cpu]); | ||
44 | put_cpu(); | ||
45 | |||
46 | if (ret) | ||
47 | printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, " | ||
48 | "index = %d\n", ret, info->entry_number); | ||
49 | |||
50 | return ret; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * sys_get_thread_area: get a yet unused TLS descriptor index. | ||
55 | * XXX: Consider leaving one free slot for glibc usage at first place. This must | ||
56 | * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else. | ||
57 | * | ||
58 | * Also, this must be tested when compiling in SKAS mode with dynamic linking | ||
59 | * and running against NPTL. | ||
60 | */ | ||
61 | static int get_free_idx(struct task_struct* task) | ||
62 | { | ||
63 | struct thread_struct *t = &task->thread; | ||
64 | int idx; | ||
65 | |||
66 | if (!t->arch.tls_array) | ||
67 | return GDT_ENTRY_TLS_MIN; | ||
68 | |||
69 | for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) | ||
70 | if (!t->arch.tls_array[idx].present) | ||
71 | return idx + GDT_ENTRY_TLS_MIN; | ||
72 | return -ESRCH; | ||
73 | } | ||
74 | |||
75 | static inline void clear_user_desc(struct user_desc* info) | ||
76 | { | ||
77 | /* Postcondition: LDT_empty(info) returns true. */ | ||
78 | memset(info, 0, sizeof(*info)); | ||
79 | |||
80 | /* | ||
81 | * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain | ||
82 | * indeed an empty user_desc. | ||
83 | */ | ||
84 | info->read_exec_only = 1; | ||
85 | info->seg_not_present = 1; | ||
86 | } | ||
87 | |||
88 | #define O_FORCE 1 | ||
89 | |||
90 | static int load_TLS(int flags, struct task_struct *to) | ||
91 | { | ||
92 | int ret = 0; | ||
93 | int idx; | ||
94 | |||
95 | for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) { | ||
96 | struct uml_tls_struct* curr = | ||
97 | &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; | ||
98 | |||
99 | /* | ||
100 | * Actually, now if it wasn't flushed it gets cleared and | ||
101 | * flushed to the host, which will clear it. | ||
102 | */ | ||
103 | if (!curr->present) { | ||
104 | if (!curr->flushed) { | ||
105 | clear_user_desc(&curr->tls); | ||
106 | curr->tls.entry_number = idx; | ||
107 | } else { | ||
108 | WARN_ON(!LDT_empty(&curr->tls)); | ||
109 | continue; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | if (!(flags & O_FORCE) && curr->flushed) | ||
114 | continue; | ||
115 | |||
116 | ret = do_set_thread_area(&curr->tls); | ||
117 | if (ret) | ||
118 | goto out; | ||
119 | |||
120 | curr->flushed = 1; | ||
121 | } | ||
122 | out: | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Verify if we need to do a flush for the new process, i.e. if there are any | ||
128 | * present desc's, only if they haven't been flushed. | ||
129 | */ | ||
130 | static inline int needs_TLS_update(struct task_struct *task) | ||
131 | { | ||
132 | int i; | ||
133 | int ret = 0; | ||
134 | |||
135 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | ||
136 | struct uml_tls_struct* curr = | ||
137 | &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
138 | |||
139 | /* | ||
140 | * Can't test curr->present, we may need to clear a descriptor | ||
141 | * which had a value. | ||
142 | */ | ||
143 | if (curr->flushed) | ||
144 | continue; | ||
145 | ret = 1; | ||
146 | break; | ||
147 | } | ||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * On a newly forked process, the TLS descriptors haven't yet been flushed. So | ||
153 | * we mark them as such and the first switch_to will do the job. | ||
154 | */ | ||
155 | void clear_flushed_tls(struct task_struct *task) | ||
156 | { | ||
157 | int i; | ||
158 | |||
159 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | ||
160 | struct uml_tls_struct* curr = | ||
161 | &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
162 | |||
163 | /* | ||
164 | * Still correct to do this, if it wasn't present on the host it | ||
165 | * will remain as flushed as it was. | ||
166 | */ | ||
167 | if (!curr->present) | ||
168 | continue; | ||
169 | |||
170 | curr->flushed = 0; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a | ||
176 | * common host process. So this is needed in SKAS0 too. | ||
177 | * | ||
178 | * However, if each thread had a different host process (and this was discussed | ||
179 | * for SMP support) this won't be needed. | ||
180 | * | ||
181 | * And this will not need be used when (and if) we'll add support to the host | ||
182 | * SKAS patch. | ||
183 | */ | ||
184 | |||
185 | int arch_switch_tls(struct task_struct *to) | ||
186 | { | ||
187 | if (!host_supports_tls) | ||
188 | return 0; | ||
189 | |||
190 | /* | ||
191 | * We have no need whatsoever to switch TLS for kernel threads; beyond | ||
192 | * that, that would also result in us calling os_set_thread_area with | ||
193 | * userspace_pid[cpu] == 0, which gives an error. | ||
194 | */ | ||
195 | if (likely(to->mm)) | ||
196 | return load_TLS(O_FORCE, to); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static int set_tls_entry(struct task_struct* task, struct user_desc *info, | ||
202 | int idx, int flushed) | ||
203 | { | ||
204 | struct thread_struct *t = &task->thread; | ||
205 | |||
206 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
207 | return -EINVAL; | ||
208 | |||
209 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info; | ||
210 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1; | ||
211 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed; | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | int arch_copy_tls(struct task_struct *new) | ||
217 | { | ||
218 | struct user_desc info; | ||
219 | int idx, ret = -EFAULT; | ||
220 | |||
221 | if (copy_from_user(&info, | ||
222 | (void __user *) UPT_ESI(&new->thread.regs.regs), | ||
223 | sizeof(info))) | ||
224 | goto out; | ||
225 | |||
226 | ret = -EINVAL; | ||
227 | if (LDT_empty(&info)) | ||
228 | goto out; | ||
229 | |||
230 | idx = info.entry_number; | ||
231 | |||
232 | ret = set_tls_entry(new, &info, idx, 0); | ||
233 | out: | ||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */ | ||
238 | static int get_tls_entry(struct task_struct *task, struct user_desc *info, | ||
239 | int idx) | ||
240 | { | ||
241 | struct thread_struct *t = &task->thread; | ||
242 | |||
243 | if (!t->arch.tls_array) | ||
244 | goto clear; | ||
245 | |||
246 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
247 | return -EINVAL; | ||
248 | |||
249 | if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present) | ||
250 | goto clear; | ||
251 | |||
252 | *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; | ||
253 | |||
254 | out: | ||
255 | /* | ||
256 | * Temporary debugging check, to make sure that things have been | ||
257 | * flushed. This could be triggered if load_TLS() failed. | ||
258 | */ | ||
259 | if (unlikely(task == current && | ||
260 | !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { | ||
261 | printk(KERN_ERR "get_tls_entry: task with pid %d got here " | ||
262 | "without flushed TLS.", current->pid); | ||
263 | } | ||
264 | |||
265 | return 0; | ||
266 | clear: | ||
267 | /* | ||
268 | * When the TLS entry has not been set, the values read to user in the | ||
269 | * tls_array are 0 (because it's cleared at boot, see | ||
270 | * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that. | ||
271 | */ | ||
272 | clear_user_desc(info); | ||
273 | info->entry_number = idx; | ||
274 | goto out; | ||
275 | } | ||
276 | |||
277 | int sys_set_thread_area(struct user_desc __user *user_desc) | ||
278 | { | ||
279 | struct user_desc info; | ||
280 | int idx, ret; | ||
281 | |||
282 | if (!host_supports_tls) | ||
283 | return -ENOSYS; | ||
284 | |||
285 | if (copy_from_user(&info, user_desc, sizeof(info))) | ||
286 | return -EFAULT; | ||
287 | |||
288 | idx = info.entry_number; | ||
289 | |||
290 | if (idx == -1) { | ||
291 | idx = get_free_idx(current); | ||
292 | if (idx < 0) | ||
293 | return idx; | ||
294 | info.entry_number = idx; | ||
295 | /* Tell the user which slot we chose for him.*/ | ||
296 | if (put_user(idx, &user_desc->entry_number)) | ||
297 | return -EFAULT; | ||
298 | } | ||
299 | |||
300 | ret = do_set_thread_area(&info); | ||
301 | if (ret) | ||
302 | return ret; | ||
303 | return set_tls_entry(current, &info, idx, 1); | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * Perform set_thread_area on behalf of the traced child. | ||
308 | * Note: error handling is not done on the deferred load, and this differ from | ||
309 | * i386. However the only possible error are caused by bugs. | ||
310 | */ | ||
311 | int ptrace_set_thread_area(struct task_struct *child, int idx, | ||
312 | struct user_desc __user *user_desc) | ||
313 | { | ||
314 | struct user_desc info; | ||
315 | |||
316 | if (!host_supports_tls) | ||
317 | return -EIO; | ||
318 | |||
319 | if (copy_from_user(&info, user_desc, sizeof(info))) | ||
320 | return -EFAULT; | ||
321 | |||
322 | return set_tls_entry(child, &info, idx, 0); | ||
323 | } | ||
324 | |||
325 | int sys_get_thread_area(struct user_desc __user *user_desc) | ||
326 | { | ||
327 | struct user_desc info; | ||
328 | int idx, ret; | ||
329 | |||
330 | if (!host_supports_tls) | ||
331 | return -ENOSYS; | ||
332 | |||
333 | if (get_user(idx, &user_desc->entry_number)) | ||
334 | return -EFAULT; | ||
335 | |||
336 | ret = get_tls_entry(current, &info, idx); | ||
337 | if (ret < 0) | ||
338 | goto out; | ||
339 | |||
340 | if (copy_to_user(user_desc, &info, sizeof(info))) | ||
341 | ret = -EFAULT; | ||
342 | |||
343 | out: | ||
344 | return ret; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Perform get_thread_area on behalf of the traced child. | ||
349 | */ | ||
350 | int ptrace_get_thread_area(struct task_struct *child, int idx, | ||
351 | struct user_desc __user *user_desc) | ||
352 | { | ||
353 | struct user_desc info; | ||
354 | int ret; | ||
355 | |||
356 | if (!host_supports_tls) | ||
357 | return -EIO; | ||
358 | |||
359 | ret = get_tls_entry(child, &info, idx); | ||
360 | if (ret < 0) | ||
361 | goto out; | ||
362 | |||
363 | if (copy_to_user(user_desc, &info, sizeof(info))) | ||
364 | ret = -EFAULT; | ||
365 | out: | ||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * This code is really i386-only, but it detects and logs x86_64 GDT indexes | ||
371 | * if a 32-bit UML is running on a 64-bit host. | ||
372 | */ | ||
373 | static int __init __setup_host_supports_tls(void) | ||
374 | { | ||
375 | check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min); | ||
376 | if (host_supports_tls) { | ||
377 | printk(KERN_INFO "Host TLS support detected\n"); | ||
378 | printk(KERN_INFO "Detected host type: "); | ||
379 | switch (host_gdt_entry_tls_min) { | ||
380 | case GDT_ENTRY_TLS_MIN_I386: | ||
381 | printk(KERN_CONT "i386"); | ||
382 | break; | ||
383 | case GDT_ENTRY_TLS_MIN_X86_64: | ||
384 | printk(KERN_CONT "x86_64"); | ||
385 | break; | ||
386 | } | ||
387 | printk(KERN_CONT " (GDT indexes %d to %d)\n", | ||
388 | host_gdt_entry_tls_min, | ||
389 | host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES); | ||
390 | } else | ||
391 | printk(KERN_ERR " Host TLS support NOT detected! " | ||
392 | "TLS support inside UML will not work\n"); | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | __initcall(__setup_host_supports_tls); | ||
diff --git a/arch/um/sys-x86/tls_64.c b/arch/um/sys-x86/tls_64.c new file mode 100644 index 000000000000..f7ba46200ecd --- /dev/null +++ b/arch/um/sys-x86/tls_64.c | |||
@@ -0,0 +1,17 @@ | |||
1 | #include "linux/sched.h" | ||
2 | |||
3 | void clear_flushed_tls(struct task_struct *task) | ||
4 | { | ||
5 | } | ||
6 | |||
7 | int arch_copy_tls(struct task_struct *t) | ||
8 | { | ||
9 | /* | ||
10 | * If CLONE_SETTLS is set, we need to save the thread id | ||
11 | * (which is argument 5, child_tid, of clone) so it can be set | ||
12 | * during context switches. | ||
13 | */ | ||
14 | t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)]; | ||
15 | |||
16 | return 0; | ||
17 | } | ||
diff --git a/arch/um/sys-x86/user-offsets.c b/arch/um/sys-x86/user-offsets.c new file mode 100644 index 000000000000..718f0c0f0b0c --- /dev/null +++ b/arch/um/sys-x86/user-offsets.c | |||
@@ -0,0 +1,86 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <stddef.h> | ||
3 | #include <signal.h> | ||
4 | #include <sys/poll.h> | ||
5 | #include <sys/mman.h> | ||
6 | #include <sys/user.h> | ||
7 | #define __FRAME_OFFSETS | ||
8 | #include <asm/ptrace.h> | ||
9 | #include <asm/types.h> | ||
10 | |||
11 | #define DEFINE(sym, val) \ | ||
12 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
13 | |||
14 | #define DEFINE_LONGS(sym, val) \ | ||
15 | asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) | ||
16 | |||
17 | #define OFFSET(sym, str, mem) \ | ||
18 | DEFINE(sym, offsetof(struct str, mem)); | ||
19 | |||
20 | void foo(void) | ||
21 | { | ||
22 | OFFSET(HOST_SC_TRAPNO, sigcontext, trapno); | ||
23 | OFFSET(HOST_SC_ERR, sigcontext, err); | ||
24 | OFFSET(HOST_SC_CR2, sigcontext, cr2); | ||
25 | |||
26 | #ifdef __i386__ | ||
27 | DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct)); | ||
28 | DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct)); | ||
29 | |||
30 | DEFINE(HOST_IP, EIP); | ||
31 | DEFINE(HOST_SP, UESP); | ||
32 | DEFINE(HOST_EFLAGS, EFL); | ||
33 | DEFINE(HOST_EAX, EAX); | ||
34 | DEFINE(HOST_EBX, EBX); | ||
35 | DEFINE(HOST_ECX, ECX); | ||
36 | DEFINE(HOST_EDX, EDX); | ||
37 | DEFINE(HOST_ESI, ESI); | ||
38 | DEFINE(HOST_EDI, EDI); | ||
39 | DEFINE(HOST_EBP, EBP); | ||
40 | DEFINE(HOST_CS, CS); | ||
41 | DEFINE(HOST_SS, SS); | ||
42 | DEFINE(HOST_DS, DS); | ||
43 | DEFINE(HOST_FS, FS); | ||
44 | DEFINE(HOST_ES, ES); | ||
45 | DEFINE(HOST_GS, GS); | ||
46 | #else | ||
47 | DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long)); | ||
48 | DEFINE_LONGS(HOST_RBX, RBX); | ||
49 | DEFINE_LONGS(HOST_RCX, RCX); | ||
50 | DEFINE_LONGS(HOST_RDI, RDI); | ||
51 | DEFINE_LONGS(HOST_RSI, RSI); | ||
52 | DEFINE_LONGS(HOST_RDX, RDX); | ||
53 | DEFINE_LONGS(HOST_RBP, RBP); | ||
54 | DEFINE_LONGS(HOST_RAX, RAX); | ||
55 | DEFINE_LONGS(HOST_R8, R8); | ||
56 | DEFINE_LONGS(HOST_R9, R9); | ||
57 | DEFINE_LONGS(HOST_R10, R10); | ||
58 | DEFINE_LONGS(HOST_R11, R11); | ||
59 | DEFINE_LONGS(HOST_R12, R12); | ||
60 | DEFINE_LONGS(HOST_R13, R13); | ||
61 | DEFINE_LONGS(HOST_R14, R14); | ||
62 | DEFINE_LONGS(HOST_R15, R15); | ||
63 | DEFINE_LONGS(HOST_ORIG_RAX, ORIG_RAX); | ||
64 | DEFINE_LONGS(HOST_CS, CS); | ||
65 | DEFINE_LONGS(HOST_SS, SS); | ||
66 | DEFINE_LONGS(HOST_EFLAGS, EFLAGS); | ||
67 | #if 0 | ||
68 | DEFINE_LONGS(HOST_FS, FS); | ||
69 | DEFINE_LONGS(HOST_GS, GS); | ||
70 | DEFINE_LONGS(HOST_DS, DS); | ||
71 | DEFINE_LONGS(HOST_ES, ES); | ||
72 | #endif | ||
73 | |||
74 | DEFINE_LONGS(HOST_IP, RIP); | ||
75 | DEFINE_LONGS(HOST_SP, RSP); | ||
76 | #endif | ||
77 | |||
78 | DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); | ||
79 | DEFINE(UM_POLLIN, POLLIN); | ||
80 | DEFINE(UM_POLLPRI, POLLPRI); | ||
81 | DEFINE(UM_POLLOUT, POLLOUT); | ||
82 | |||
83 | DEFINE(UM_PROT_READ, PROT_READ); | ||
84 | DEFINE(UM_PROT_WRITE, PROT_WRITE); | ||
85 | DEFINE(UM_PROT_EXEC, PROT_EXEC); | ||
86 | } | ||
diff --git a/arch/um/sys-x86/vdso/Makefile b/arch/um/sys-x86/vdso/Makefile new file mode 100644 index 000000000000..5dffe6d46686 --- /dev/null +++ b/arch/um/sys-x86/vdso/Makefile | |||
@@ -0,0 +1,90 @@ | |||
1 | # | ||
2 | # Building vDSO images for x86. | ||
3 | # | ||
4 | |||
5 | VDSO64-y := y | ||
6 | |||
7 | vdso-install-$(VDSO64-y) += vdso.so | ||
8 | |||
9 | |||
10 | # files to link into the vdso | ||
11 | vobjs-y := vdso-note.o um_vdso.o | ||
12 | |||
13 | # files to link into kernel | ||
14 | obj-$(VDSO64-y) += vdso.o vma.o | ||
15 | |||
16 | vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) | ||
17 | |||
18 | $(obj)/vdso.o: $(obj)/vdso.so | ||
19 | |||
20 | targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) | ||
21 | |||
22 | export CPPFLAGS_vdso.lds += -P -C | ||
23 | |||
24 | VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ | ||
25 | -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 | ||
26 | |||
27 | $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so | ||
28 | |||
29 | $(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE | ||
30 | $(call if_changed,vdso) | ||
31 | |||
32 | $(obj)/%.so: OBJCOPYFLAGS := -S | ||
33 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | ||
34 | $(call if_changed,objcopy) | ||
35 | |||
36 | # | ||
37 | # Don't omit frame pointers for ease of userspace debugging, but do | ||
38 | # optimize sibling calls. | ||
39 | # | ||
40 | CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ | ||
41 | $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ | ||
42 | -fno-omit-frame-pointer -foptimize-sibling-calls | ||
43 | |||
44 | $(vobjs): KBUILD_CFLAGS += $(CFL) | ||
45 | |||
46 | # | ||
47 | # vDSO code runs in userspace and -pg doesn't help with profiling anyway. | ||
48 | # | ||
49 | CFLAGS_REMOVE_vdso-note.o = -pg | ||
50 | CFLAGS_REMOVE_um_vdso.o = -pg | ||
51 | |||
52 | targets += vdso-syms.lds | ||
53 | obj-$(VDSO64-y) += vdso-syms.lds | ||
54 | |||
55 | # | ||
56 | # Match symbols in the DSO that look like VDSO*; produce a file of constants. | ||
57 | # | ||
58 | sed-vdsosym := -e 's/^00*/0/' \ | ||
59 | -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p' | ||
60 | quiet_cmd_vdsosym = VDSOSYM $@ | ||
61 | define cmd_vdsosym | ||
62 | $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@ | ||
63 | endef | ||
64 | |||
65 | $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE | ||
66 | $(call if_changed,vdsosym) | ||
67 | |||
68 | # | ||
69 | # The DSO images are built using a special linker script. | ||
70 | # | ||
71 | quiet_cmd_vdso = VDSO $@ | ||
72 | cmd_vdso = $(CC) -nostdlib -o $@ \ | ||
73 | $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ | ||
74 | -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ | ||
75 | sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' | ||
76 | |||
77 | VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | ||
78 | GCOV_PROFILE := n | ||
79 | |||
80 | # | ||
81 | # Install the unstripped copy of vdso*.so listed in $(vdso-install-y). | ||
82 | # | ||
83 | quiet_cmd_vdso_install = INSTALL $@ | ||
84 | cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ | ||
85 | $(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE | ||
86 | @mkdir -p $(MODLIB)/vdso | ||
87 | $(call cmd,vdso_install) | ||
88 | |||
89 | PHONY += vdso_install $(vdso-install-y) | ||
90 | vdso_install: $(vdso-install-y) | ||
diff --git a/arch/um/sys-x86/vdso/checkundef.sh b/arch/um/sys-x86/vdso/checkundef.sh new file mode 100644 index 000000000000..7ee90a9b549d --- /dev/null +++ b/arch/um/sys-x86/vdso/checkundef.sh | |||
@@ -0,0 +1,10 @@ | |||
1 | #!/bin/sh | ||
2 | nm="$1" | ||
3 | file="$2" | ||
4 | $nm "$file" | grep '^ *U' > /dev/null 2>&1 | ||
5 | if [ $? -eq 1 ]; then | ||
6 | exit 0 | ||
7 | else | ||
8 | echo "$file: undefined symbols found" >&2 | ||
9 | exit 1 | ||
10 | fi | ||
diff --git a/arch/um/sys-x86/vdso/um_vdso.c b/arch/um/sys-x86/vdso/um_vdso.c new file mode 100644 index 000000000000..7c441b59d375 --- /dev/null +++ b/arch/um/sys-x86/vdso/um_vdso.c | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This vDSO turns all calls into a syscall so that UML can trap them. | ||
9 | */ | ||
10 | |||
11 | |||
12 | /* Disable profiling for userspace code */ | ||
13 | #define DISABLE_BRANCH_PROFILING | ||
14 | |||
15 | #include <linux/time.h> | ||
16 | #include <linux/getcpu.h> | ||
17 | #include <asm/unistd.h> | ||
18 | |||
19 | int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) | ||
20 | { | ||
21 | long ret; | ||
22 | |||
23 | asm("syscall" : "=a" (ret) : | ||
24 | "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); | ||
25 | |||
26 | return ret; | ||
27 | } | ||
28 | int clock_gettime(clockid_t, struct timespec *) | ||
29 | __attribute__((weak, alias("__vdso_clock_gettime"))); | ||
30 | |||
31 | int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | ||
32 | { | ||
33 | long ret; | ||
34 | |||
35 | asm("syscall" : "=a" (ret) : | ||
36 | "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); | ||
37 | |||
38 | return ret; | ||
39 | } | ||
40 | int gettimeofday(struct timeval *, struct timezone *) | ||
41 | __attribute__((weak, alias("__vdso_gettimeofday"))); | ||
42 | |||
43 | time_t __vdso_time(time_t *t) | ||
44 | { | ||
45 | long secs; | ||
46 | |||
47 | asm volatile("syscall" | ||
48 | : "=a" (secs) | ||
49 | : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory"); | ||
50 | |||
51 | return secs; | ||
52 | } | ||
53 | int time(time_t *t) __attribute__((weak, alias("__vdso_time"))); | ||
54 | |||
55 | long | ||
56 | __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) | ||
57 | { | ||
58 | /* | ||
59 | * UML does not support SMP, we can cheat here. :) | ||
60 | */ | ||
61 | |||
62 | if (cpu) | ||
63 | *cpu = 0; | ||
64 | if (node) | ||
65 | *node = 0; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | ||
71 | __attribute__((weak, alias("__vdso_getcpu"))); | ||
diff --git a/arch/um/sys-x86/vdso/vdso-layout.lds.S b/arch/um/sys-x86/vdso/vdso-layout.lds.S new file mode 100644 index 000000000000..634a2cf62046 --- /dev/null +++ b/arch/um/sys-x86/vdso/vdso-layout.lds.S | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Linker script for vDSO. This is an ELF shared object prelinked to | ||
3 | * its virtual address, and with only one read-only segment. | ||
4 | * This script controls its layout. | ||
5 | */ | ||
6 | |||
7 | SECTIONS | ||
8 | { | ||
9 | . = VDSO_PRELINK + SIZEOF_HEADERS; | ||
10 | |||
11 | .hash : { *(.hash) } :text | ||
12 | .gnu.hash : { *(.gnu.hash) } | ||
13 | .dynsym : { *(.dynsym) } | ||
14 | .dynstr : { *(.dynstr) } | ||
15 | .gnu.version : { *(.gnu.version) } | ||
16 | .gnu.version_d : { *(.gnu.version_d) } | ||
17 | .gnu.version_r : { *(.gnu.version_r) } | ||
18 | |||
19 | .note : { *(.note.*) } :text :note | ||
20 | |||
21 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
22 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
23 | |||
24 | .dynamic : { *(.dynamic) } :text :dynamic | ||
25 | |||
26 | .rodata : { *(.rodata*) } :text | ||
27 | .data : { | ||
28 | *(.data*) | ||
29 | *(.sdata*) | ||
30 | *(.got.plt) *(.got) | ||
31 | *(.gnu.linkonce.d.*) | ||
32 | *(.bss*) | ||
33 | *(.dynbss*) | ||
34 | *(.gnu.linkonce.b.*) | ||
35 | } | ||
36 | |||
37 | .altinstructions : { *(.altinstructions) } | ||
38 | .altinstr_replacement : { *(.altinstr_replacement) } | ||
39 | |||
40 | /* | ||
41 | * Align the actual code well away from the non-instruction data. | ||
42 | * This is the best thing for the I-cache. | ||
43 | */ | ||
44 | . = ALIGN(0x100); | ||
45 | |||
46 | .text : { *(.text*) } :text =0x90909090 | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Very old versions of ld do not recognize this name token; use the constant. | ||
51 | */ | ||
52 | #define PT_GNU_EH_FRAME 0x6474e550 | ||
53 | |||
54 | /* | ||
55 | * We must supply the ELF program headers explicitly to get just one | ||
56 | * PT_LOAD segment, and set the flags explicitly to make segments read-only. | ||
57 | */ | ||
58 | PHDRS | ||
59 | { | ||
60 | text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ | ||
61 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
62 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
63 | eh_frame_hdr PT_GNU_EH_FRAME; | ||
64 | } | ||
diff --git a/arch/um/sys-x86/vdso/vdso-note.S b/arch/um/sys-x86/vdso/vdso-note.S new file mode 100644 index 000000000000..79a071e4357e --- /dev/null +++ b/arch/um/sys-x86/vdso/vdso-note.S | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. | ||
3 | * Here we can supply some information useful to userland. | ||
4 | */ | ||
5 | |||
6 | #include <linux/uts.h> | ||
7 | #include <linux/version.h> | ||
8 | #include <linux/elfnote.h> | ||
9 | |||
10 | ELFNOTE_START(Linux, 0, "a") | ||
11 | .long LINUX_VERSION_CODE | ||
12 | ELFNOTE_END | ||
diff --git a/arch/um/sys-x86/vdso/vdso.S b/arch/um/sys-x86/vdso/vdso.S new file mode 100644 index 000000000000..03b053283f86 --- /dev/null +++ b/arch/um/sys-x86/vdso/vdso.S | |||
@@ -0,0 +1,10 @@ | |||
1 | #include <linux/init.h> | ||
2 | |||
3 | __INITDATA | ||
4 | |||
5 | .globl vdso_start, vdso_end | ||
6 | vdso_start: | ||
7 | .incbin "arch/um/sys-x86/vdso/vdso.so" | ||
8 | vdso_end: | ||
9 | |||
10 | __FINIT | ||
diff --git a/arch/um/sys-x86/vdso/vdso.lds.S b/arch/um/sys-x86/vdso/vdso.lds.S new file mode 100644 index 000000000000..b96b2677cad8 --- /dev/null +++ b/arch/um/sys-x86/vdso/vdso.lds.S | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Linker script for 64-bit vDSO. | ||
3 | * We #include the file to define the layout details. | ||
4 | * Here we only choose the prelinked virtual address. | ||
5 | * | ||
6 | * This file defines the version script giving the user-exported symbols in | ||
7 | * the DSO. We can define local symbols here called VDSO* to make their | ||
8 | * values visible using the asm-x86/vdso.h macros from the kernel proper. | ||
9 | */ | ||
10 | |||
11 | #define VDSO_PRELINK 0xffffffffff700000 | ||
12 | #include "vdso-layout.lds.S" | ||
13 | |||
14 | /* | ||
15 | * This controls what userland symbols we export from the vDSO. | ||
16 | */ | ||
17 | VERSION { | ||
18 | LINUX_2.6 { | ||
19 | global: | ||
20 | clock_gettime; | ||
21 | __vdso_clock_gettime; | ||
22 | gettimeofday; | ||
23 | __vdso_gettimeofday; | ||
24 | getcpu; | ||
25 | __vdso_getcpu; | ||
26 | time; | ||
27 | __vdso_time; | ||
28 | local: *; | ||
29 | }; | ||
30 | } | ||
31 | |||
32 | VDSO64_PRELINK = VDSO_PRELINK; | ||
diff --git a/arch/um/sys-x86/vdso/vma.c b/arch/um/sys-x86/vdso/vma.c new file mode 100644 index 000000000000..9495c8d0ce37 --- /dev/null +++ b/arch/um/sys-x86/vdso/vma.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/slab.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <linux/init.h> | ||
14 | |||
15 | unsigned int __read_mostly vdso_enabled = 1; | ||
16 | unsigned long um_vdso_addr; | ||
17 | |||
18 | extern unsigned long task_size; | ||
19 | extern char vdso_start[], vdso_end[]; | ||
20 | |||
21 | static struct page **vdsop; | ||
22 | |||
23 | static int __init init_vdso(void) | ||
24 | { | ||
25 | struct page *um_vdso; | ||
26 | |||
27 | BUG_ON(vdso_end - vdso_start > PAGE_SIZE); | ||
28 | |||
29 | um_vdso_addr = task_size - PAGE_SIZE; | ||
30 | |||
31 | vdsop = kmalloc(GFP_KERNEL, sizeof(struct page *)); | ||
32 | if (!vdsop) | ||
33 | goto oom; | ||
34 | |||
35 | um_vdso = alloc_page(GFP_KERNEL); | ||
36 | if (!um_vdso) { | ||
37 | kfree(vdsop); | ||
38 | |||
39 | goto oom; | ||
40 | } | ||
41 | |||
42 | copy_page(page_address(um_vdso), vdso_start); | ||
43 | *vdsop = um_vdso; | ||
44 | |||
45 | return 0; | ||
46 | |||
47 | oom: | ||
48 | printk(KERN_ERR "Cannot allocate vdso\n"); | ||
49 | vdso_enabled = 0; | ||
50 | |||
51 | return -ENOMEM; | ||
52 | } | ||
53 | subsys_initcall(init_vdso); | ||
54 | |||
55 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
56 | { | ||
57 | int err; | ||
58 | struct mm_struct *mm = current->mm; | ||
59 | |||
60 | if (!vdso_enabled) | ||
61 | return 0; | ||
62 | |||
63 | down_write(&mm->mmap_sem); | ||
64 | |||
65 | err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, | ||
66 | VM_READ|VM_EXEC| | ||
67 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
68 | VM_ALWAYSDUMP, | ||
69 | vdsop); | ||
70 | |||
71 | up_write(&mm->mmap_sem); | ||
72 | |||
73 | return err; | ||
74 | } | ||