diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-02 12:45:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-02 12:45:39 -0400 |
commit | de0a5345a55b8dd5a4695181275df0e691176830 (patch) | |
tree | 17530e824f7f46ce0b1757657179fb5957a6add5 /arch/x86 | |
parent | 994c0e992522c123298b4a91b72f5e67ba2d1123 (diff) | |
parent | 8535639810e578960233ad39def3ac2157b0c3ec (diff) |
Merge branch 'for-linus' of git://github.com/richardweinberger/linux
* 'for-linus' of git://github.com/richardweinberger/linux: (90 commits)
um: fix ubd cow size
um: Fix kmalloc argument order in um/vdso/vma.c
um: switch to use of drivers/Kconfig
UserModeLinux-HOWTO.txt: fix a typo
UserModeLinux-HOWTO.txt: remove ^H characters
um: we need sys/user.h only on i386
um: merge delay_{32,64}.c
um: distribute exports to where exported stuff is defined
um: kill system-um.h
um: generic ftrace.h will do...
um: segment.h is x86-only and needed only there
um: asm/pda.h is not needed anymore
um: hw_irq.h can go generic as well
um: switch to generic-y
um: clean Kconfig up a bit
um: a couple of missing dependencies...
um: kill useless argument of free_chan() and free_one_chan()
um: unify ptrace_user.h
um: unify KSTK_...
um: fix gcov build breakage
...
Diffstat (limited to 'arch/x86')
87 files changed, 6571 insertions, 0 deletions
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um new file mode 100644 index 000000000000..36ddec6a41c9 --- /dev/null +++ b/arch/x86/Makefile.um | |||
@@ -0,0 +1,61 @@ | |||
1 | core-y += arch/x86/crypto/ | ||
2 | |||
3 | ifeq ($(CONFIG_X86_32),y) | ||
4 | START := 0x8048000 | ||
5 | |||
6 | LDFLAGS += -m elf_i386 | ||
7 | ELF_ARCH := i386 | ||
8 | ELF_FORMAT := elf32-i386 | ||
9 | CHECKFLAGS += -D__i386__ | ||
10 | |||
11 | ifeq ("$(origin SUBARCH)", "command line") | ||
12 | ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)") | ||
13 | KBUILD_CFLAGS += $(call cc-option,-m32) | ||
14 | KBUILD_AFLAGS += $(call cc-option,-m32) | ||
15 | LINK-y += $(call cc-option,-m32) | ||
16 | |||
17 | export LDFLAGS | ||
18 | endif | ||
19 | endif | ||
20 | |||
21 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. | ||
22 | include $(srctree)/arch/x86/Makefile_32.cpu | ||
23 | |||
24 | # prevent gcc from keeping the stack 16 byte aligned. Taken from i386. | ||
25 | cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) | ||
26 | |||
27 | # Prevent sprintf in nfsd from being converted to strcpy and resulting in | ||
28 | # an unresolved reference. | ||
29 | cflags-y += -ffreestanding | ||
30 | |||
31 | # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use | ||
32 | # a lot more stack due to the lack of sharing of stacklots. Also, gcc | ||
33 | # 4.3.0 needs -funit-at-a-time for extern inline functions. | ||
34 | KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \ | ||
35 | echo $(call cc-option,-fno-unit-at-a-time); \ | ||
36 | else echo $(call cc-option,-funit-at-a-time); fi ;) | ||
37 | |||
38 | KBUILD_CFLAGS += $(cflags-y) | ||
39 | |||
40 | else | ||
41 | |||
42 | START := 0x60000000 | ||
43 | |||
44 | KBUILD_CFLAGS += -fno-builtin -m64 | ||
45 | |||
46 | CHECKFLAGS += -m64 -D__x86_64__ | ||
47 | KBUILD_AFLAGS += -m64 | ||
48 | LDFLAGS += -m elf_x86_64 | ||
49 | KBUILD_CPPFLAGS += -m64 | ||
50 | |||
51 | ELF_ARCH := i386:x86-64 | ||
52 | ELF_FORMAT := elf64-x86-64 | ||
53 | |||
54 | # Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example. | ||
55 | |||
56 | LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64 | ||
57 | LINK-y += -m64 | ||
58 | |||
59 | # Do unit-at-a-time unconditionally on x86_64, following the host | ||
60 | KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) | ||
61 | endif | ||
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig new file mode 100644 index 000000000000..21bebe63df66 --- /dev/null +++ b/arch/x86/um/Kconfig | |||
@@ -0,0 +1,70 @@ | |||
1 | mainmenu "User Mode Linux/$SUBARCH $KERNELVERSION Kernel Configuration" | ||
2 | |||
3 | source "arch/um/Kconfig.common" | ||
4 | |||
5 | menu "UML-specific options" | ||
6 | |||
7 | menu "Host processor type and features" | ||
8 | |||
9 | config CMPXCHG_LOCAL | ||
10 | bool | ||
11 | default n | ||
12 | |||
13 | config CMPXCHG_DOUBLE | ||
14 | bool | ||
15 | default n | ||
16 | |||
17 | source "arch/x86/Kconfig.cpu" | ||
18 | |||
19 | endmenu | ||
20 | |||
21 | config UML_X86 | ||
22 | def_bool y | ||
23 | select GENERIC_FIND_FIRST_BIT | ||
24 | |||
25 | config 64BIT | ||
26 | bool | ||
27 | default SUBARCH = "x86_64" | ||
28 | |||
29 | config X86_32 | ||
30 | def_bool !64BIT | ||
31 | select HAVE_AOUT | ||
32 | |||
33 | config X86_64 | ||
34 | def_bool 64BIT | ||
35 | |||
36 | config RWSEM_XCHGADD_ALGORITHM | ||
37 | def_bool X86_XADD && 64BIT | ||
38 | |||
39 | config RWSEM_GENERIC_SPINLOCK | ||
40 | def_bool !RWSEM_XCHGADD_ALGORITHM | ||
41 | |||
42 | config 3_LEVEL_PGTABLES | ||
43 | bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT | ||
44 | default 64BIT | ||
45 | depends on EXPERIMENTAL | ||
46 | help | ||
47 | Three-level pagetables will let UML have more than 4G of physical | ||
48 | memory. All the memory that can't be mapped directly will be treated | ||
49 | as high memory. | ||
50 | |||
51 | However, this it experimental on 32-bit architectures, so if unsure say | ||
52 | N (on x86-64 it's automatically enabled, instead, as it's safe there). | ||
53 | |||
54 | config ARCH_HAS_SC_SIGNALS | ||
55 | def_bool !64BIT | ||
56 | |||
57 | config ARCH_REUSE_HOST_VSYSCALL_AREA | ||
58 | def_bool !64BIT | ||
59 | |||
60 | config SMP_BROKEN | ||
61 | def_bool 64BIT | ||
62 | |||
63 | config GENERIC_HWEIGHT | ||
64 | def_bool y | ||
65 | |||
66 | source "arch/um/Kconfig.um" | ||
67 | |||
68 | endmenu | ||
69 | |||
70 | source "arch/um/Kconfig.rest" | ||
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile new file mode 100644 index 000000000000..8fb58400e415 --- /dev/null +++ b/arch/x86/um/Makefile | |||
@@ -0,0 +1,45 @@ | |||
1 | # | ||
2 | # Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | # | ||
4 | |||
5 | ifeq ($(CONFIG_X86_32),y) | ||
6 | BITS := 32 | ||
7 | else | ||
8 | BITS := 64 | ||
9 | endif | ||
10 | |||
11 | obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \ | ||
12 | ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \ | ||
13 | stub_$(BITS).o stub_segv.o syscalls_$(BITS).o \ | ||
14 | sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \ | ||
15 | mem_$(BITS).o subarch.o os-$(OS)/ | ||
16 | |||
17 | ifeq ($(CONFIG_X86_32),y) | ||
18 | |||
19 | obj-y += checksum_32.o | ||
20 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | ||
21 | |||
22 | subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o | ||
23 | subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o | ||
24 | subarch-$(CONFIG_HIGHMEM) += ../mm/highmem_32.o | ||
25 | |||
26 | else | ||
27 | |||
28 | obj-y += vdso/ | ||
29 | |||
30 | subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../lib/thunk_64.o \ | ||
31 | ../lib/rwsem.o | ||
32 | |||
33 | endif | ||
34 | |||
35 | subarch-$(CONFIG_MODULES) += ../kernel/module.o | ||
36 | |||
37 | USER_OBJS := bugs_$(BITS).o ptrace_user.o fault.o | ||
38 | |||
39 | extra-y += user-offsets.s | ||
40 | $(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) | ||
41 | |||
42 | UNPROFILE_OBJS := stub_segv.o | ||
43 | CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING) | ||
44 | |||
45 | include arch/um/scripts/Makefile.rules | ||
diff --git a/arch/x86/um/asm/apic.h b/arch/x86/um/asm/apic.h new file mode 100644 index 000000000000..876dee84ab11 --- /dev/null +++ b/arch/x86/um/asm/apic.h | |||
@@ -0,0 +1,4 @@ | |||
1 | #ifndef __UM_APIC_H | ||
2 | #define __UM_APIC_H | ||
3 | |||
4 | #endif | ||
diff --git a/arch/x86/um/asm/arch_hweight.h b/arch/x86/um/asm/arch_hweight.h new file mode 100644 index 000000000000..c656cf443f4a --- /dev/null +++ b/arch/x86/um/asm/arch_hweight.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_UM_HWEIGHT_H | ||
2 | #define _ASM_UM_HWEIGHT_H | ||
3 | |||
4 | #include <asm-generic/bitops/arch_hweight.h> | ||
5 | |||
6 | #endif | ||
diff --git a/arch/x86/um/asm/archparam.h b/arch/x86/um/asm/archparam.h new file mode 100644 index 000000000000..c17cf68dda0f --- /dev/null +++ b/arch/x86/um/asm/archparam.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) | ||
3 | * Copyright 2003 PathScale, Inc. | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __UM_ARCHPARAM_H | ||
8 | #define __UM_ARCHPARAM_H | ||
9 | |||
10 | #ifdef CONFIG_X86_32 | ||
11 | |||
12 | #ifdef CONFIG_X86_PAE | ||
13 | #define LAST_PKMAP 512 | ||
14 | #else | ||
15 | #define LAST_PKMAP 1024 | ||
16 | #endif | ||
17 | |||
18 | #endif | ||
19 | |||
20 | #endif | ||
diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h new file mode 100644 index 000000000000..b6efe2381b5d --- /dev/null +++ b/arch/x86/um/asm/checksum.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __UM_CHECKSUM_H | ||
2 | #define __UM_CHECKSUM_H | ||
3 | |||
4 | #ifdef CONFIG_X86_32 | ||
5 | # include "checksum_32.h" | ||
6 | #else | ||
7 | # include "checksum_64.h" | ||
8 | #endif | ||
9 | |||
10 | #endif | ||
diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h new file mode 100644 index 000000000000..caab74252e27 --- /dev/null +++ b/arch/x86/um/asm/checksum_32.h | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * Licensed under the GPL | ||
3 | */ | ||
4 | |||
5 | #ifndef __UM_SYSDEP_CHECKSUM_H | ||
6 | #define __UM_SYSDEP_CHECKSUM_H | ||
7 | |||
8 | #include "linux/in6.h" | ||
9 | #include "linux/string.h" | ||
10 | |||
11 | /* | ||
12 | * computes the checksum of a memory block at buff, length len, | ||
13 | * and adds in "sum" (32-bit) | ||
14 | * | ||
15 | * returns a 32-bit number suitable for feeding into itself | ||
16 | * or csum_tcpudp_magic | ||
17 | * | ||
18 | * this function must be called with even lengths, except | ||
19 | * for the last fragment, which may be odd | ||
20 | * | ||
21 | * it's best to have buff aligned on a 32-bit boundary | ||
22 | */ | ||
23 | __wsum csum_partial(const void *buff, int len, __wsum sum); | ||
24 | |||
25 | /* | ||
26 | * Note: when you get a NULL pointer exception here this means someone | ||
27 | * passed in an incorrect kernel address to one of these functions. | ||
28 | * | ||
29 | * If you use these functions directly please don't forget the | ||
30 | * access_ok(). | ||
31 | */ | ||
32 | |||
33 | static __inline__ | ||
34 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, | ||
35 | int len, __wsum sum) | ||
36 | { | ||
37 | memcpy(dst, src, len); | ||
38 | return csum_partial(dst, len, sum); | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * the same as csum_partial, but copies from src while it | ||
43 | * checksums, and handles user-space pointer exceptions correctly, when needed. | ||
44 | * | ||
45 | * here even more important to align src and dst on a 32-bit (or even | ||
46 | * better 64-bit) boundary | ||
47 | */ | ||
48 | |||
49 | static __inline__ | ||
50 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | ||
51 | int len, __wsum sum, int *err_ptr) | ||
52 | { | ||
53 | if (copy_from_user(dst, src, len)) { | ||
54 | *err_ptr = -EFAULT; | ||
55 | return (__force __wsum)-1; | ||
56 | } | ||
57 | |||
58 | return csum_partial(dst, len, sum); | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
63 | * which always checksum on 4 octet boundaries. | ||
64 | * | ||
65 | * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by | ||
66 | * Arnt Gulbrandsen. | ||
67 | */ | ||
68 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | ||
69 | { | ||
70 | unsigned int sum; | ||
71 | |||
72 | __asm__ __volatile__( | ||
73 | "movl (%1), %0 ;\n" | ||
74 | "subl $4, %2 ;\n" | ||
75 | "jbe 2f ;\n" | ||
76 | "addl 4(%1), %0 ;\n" | ||
77 | "adcl 8(%1), %0 ;\n" | ||
78 | "adcl 12(%1), %0 ;\n" | ||
79 | "1: adcl 16(%1), %0 ;\n" | ||
80 | "lea 4(%1), %1 ;\n" | ||
81 | "decl %2 ;\n" | ||
82 | "jne 1b ;\n" | ||
83 | "adcl $0, %0 ;\n" | ||
84 | "movl %0, %2 ;\n" | ||
85 | "shrl $16, %0 ;\n" | ||
86 | "addw %w2, %w0 ;\n" | ||
87 | "adcl $0, %0 ;\n" | ||
88 | "notl %0 ;\n" | ||
89 | "2: ;\n" | ||
90 | /* Since the input registers which are loaded with iph and ipl | ||
91 | are modified, we must also specify them as outputs, or gcc | ||
92 | will assume they contain their original values. */ | ||
93 | : "=r" (sum), "=r" (iph), "=r" (ihl) | ||
94 | : "1" (iph), "2" (ihl) | ||
95 | : "memory"); | ||
96 | return (__force __sum16)sum; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Fold a partial checksum | ||
101 | */ | ||
102 | |||
103 | static inline __sum16 csum_fold(__wsum sum) | ||
104 | { | ||
105 | __asm__( | ||
106 | "addl %1, %0 ;\n" | ||
107 | "adcl $0xffff, %0 ;\n" | ||
108 | : "=r" (sum) | ||
109 | : "r" ((__force u32)sum << 16), | ||
110 | "0" ((__force u32)sum & 0xffff0000) | ||
111 | ); | ||
112 | return (__force __sum16)(~(__force u32)sum >> 16); | ||
113 | } | ||
114 | |||
115 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | ||
116 | unsigned short len, | ||
117 | unsigned short proto, | ||
118 | __wsum sum) | ||
119 | { | ||
120 | __asm__( | ||
121 | "addl %1, %0 ;\n" | ||
122 | "adcl %2, %0 ;\n" | ||
123 | "adcl %3, %0 ;\n" | ||
124 | "adcl $0, %0 ;\n" | ||
125 | : "=r" (sum) | ||
126 | : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum)); | ||
127 | return sum; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * computes the checksum of the TCP/UDP pseudo-header | ||
132 | * returns a 16-bit checksum, already complemented | ||
133 | */ | ||
134 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | ||
135 | unsigned short len, | ||
136 | unsigned short proto, | ||
137 | __wsum sum) | ||
138 | { | ||
139 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
144 | * in icmp.c | ||
145 | */ | ||
146 | |||
147 | static inline __sum16 ip_compute_csum(const void *buff, int len) | ||
148 | { | ||
149 | return csum_fold (csum_partial(buff, len, 0)); | ||
150 | } | ||
151 | |||
152 | #define _HAVE_ARCH_IPV6_CSUM | ||
153 | static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | ||
154 | const struct in6_addr *daddr, | ||
155 | __u32 len, unsigned short proto, | ||
156 | __wsum sum) | ||
157 | { | ||
158 | __asm__( | ||
159 | "addl 0(%1), %0 ;\n" | ||
160 | "adcl 4(%1), %0 ;\n" | ||
161 | "adcl 8(%1), %0 ;\n" | ||
162 | "adcl 12(%1), %0 ;\n" | ||
163 | "adcl 0(%2), %0 ;\n" | ||
164 | "adcl 4(%2), %0 ;\n" | ||
165 | "adcl 8(%2), %0 ;\n" | ||
166 | "adcl 12(%2), %0 ;\n" | ||
167 | "adcl %3, %0 ;\n" | ||
168 | "adcl %4, %0 ;\n" | ||
169 | "adcl $0, %0 ;\n" | ||
170 | : "=&r" (sum) | ||
171 | : "r" (saddr), "r" (daddr), | ||
172 | "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); | ||
173 | |||
174 | return csum_fold(sum); | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Copy and checksum to user | ||
179 | */ | ||
180 | #define HAVE_CSUM_COPY_USER | ||
181 | static __inline__ __wsum csum_and_copy_to_user(const void *src, | ||
182 | void __user *dst, | ||
183 | int len, __wsum sum, int *err_ptr) | ||
184 | { | ||
185 | if (access_ok(VERIFY_WRITE, dst, len)) { | ||
186 | if (copy_to_user(dst, src, len)) { | ||
187 | *err_ptr = -EFAULT; | ||
188 | return (__force __wsum)-1; | ||
189 | } | ||
190 | |||
191 | return csum_partial(src, len, sum); | ||
192 | } | ||
193 | |||
194 | if (len) | ||
195 | *err_ptr = -EFAULT; | ||
196 | |||
197 | return (__force __wsum)-1; /* invalid checksum */ | ||
198 | } | ||
199 | |||
200 | #endif | ||
201 | |||
diff --git a/arch/x86/um/asm/checksum_64.h b/arch/x86/um/asm/checksum_64.h new file mode 100644 index 000000000000..a5be9031ea85 --- /dev/null +++ b/arch/x86/um/asm/checksum_64.h | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Licensed under the GPL | ||
3 | */ | ||
4 | |||
5 | #ifndef __UM_SYSDEP_CHECKSUM_H | ||
6 | #define __UM_SYSDEP_CHECKSUM_H | ||
7 | |||
8 | #include "linux/string.h" | ||
9 | #include "linux/in6.h" | ||
10 | #include "asm/uaccess.h" | ||
11 | |||
12 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); | ||
13 | |||
14 | /* | ||
15 | * Note: when you get a NULL pointer exception here this means someone | ||
16 | * passed in an incorrect kernel address to one of these functions. | ||
17 | * | ||
18 | * If you use these functions directly please don't forget the | ||
19 | * access_ok(). | ||
20 | */ | ||
21 | |||
22 | static __inline__ | ||
23 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, | ||
24 | int len, __wsum sum) | ||
25 | { | ||
26 | memcpy(dst, src, len); | ||
27 | return(csum_partial(dst, len, sum)); | ||
28 | } | ||
29 | |||
30 | static __inline__ | ||
31 | __wsum csum_partial_copy_from_user(const void __user *src, | ||
32 | void *dst, int len, __wsum sum, | ||
33 | int *err_ptr) | ||
34 | { | ||
35 | if (copy_from_user(dst, src, len)) { | ||
36 | *err_ptr = -EFAULT; | ||
37 | return (__force __wsum)-1; | ||
38 | } | ||
39 | return csum_partial(dst, len, sum); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * csum_fold - Fold and invert a 32bit checksum. | ||
44 | * sum: 32bit unfolded sum | ||
45 | * | ||
46 | * Fold a 32bit running checksum to 16bit and invert it. This is usually | ||
47 | * the last step before putting a checksum into a packet. | ||
48 | * Make sure not to mix with 64bit checksums. | ||
49 | */ | ||
50 | static inline __sum16 csum_fold(__wsum sum) | ||
51 | { | ||
52 | __asm__( | ||
53 | " addl %1,%0\n" | ||
54 | " adcl $0xffff,%0" | ||
55 | : "=r" (sum) | ||
56 | : "r" ((__force u32)sum << 16), | ||
57 | "0" ((__force u32)sum & 0xffff0000) | ||
58 | ); | ||
59 | return (__force __sum16)(~(__force u32)sum >> 16); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. | ||
64 | * @saddr: source address | ||
65 | * @daddr: destination address | ||
66 | * @len: length of packet | ||
67 | * @proto: ip protocol of packet | ||
68 | * @sum: initial sum to be added in (32bit unfolded) | ||
69 | * | ||
70 | * Returns the pseudo header checksum the input data. Result is | ||
71 | * 32bit unfolded. | ||
72 | */ | ||
73 | static inline __wsum | ||
74 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, | ||
75 | unsigned short proto, __wsum sum) | ||
76 | { | ||
77 | asm(" addl %1, %0\n" | ||
78 | " adcl %2, %0\n" | ||
79 | " adcl %3, %0\n" | ||
80 | " adcl $0, %0\n" | ||
81 | : "=r" (sum) | ||
82 | : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum)); | ||
83 | return sum; | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * computes the checksum of the TCP/UDP pseudo-header | ||
88 | * returns a 16-bit checksum, already complemented | ||
89 | */ | ||
90 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | ||
91 | unsigned short len, | ||
92 | unsigned short proto, | ||
93 | __wsum sum) | ||
94 | { | ||
95 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * ip_fast_csum - Compute the IPv4 header checksum efficiently. | ||
100 | * iph: ipv4 header | ||
101 | * ihl: length of header / 4 | ||
102 | */ | ||
103 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | ||
104 | { | ||
105 | unsigned int sum; | ||
106 | |||
107 | asm( " movl (%1), %0\n" | ||
108 | " subl $4, %2\n" | ||
109 | " jbe 2f\n" | ||
110 | " addl 4(%1), %0\n" | ||
111 | " adcl 8(%1), %0\n" | ||
112 | " adcl 12(%1), %0\n" | ||
113 | "1: adcl 16(%1), %0\n" | ||
114 | " lea 4(%1), %1\n" | ||
115 | " decl %2\n" | ||
116 | " jne 1b\n" | ||
117 | " adcl $0, %0\n" | ||
118 | " movl %0, %2\n" | ||
119 | " shrl $16, %0\n" | ||
120 | " addw %w2, %w0\n" | ||
121 | " adcl $0, %0\n" | ||
122 | " notl %0\n" | ||
123 | "2:" | ||
124 | /* Since the input registers which are loaded with iph and ipl | ||
125 | are modified, we must also specify them as outputs, or gcc | ||
126 | will assume they contain their original values. */ | ||
127 | : "=r" (sum), "=r" (iph), "=r" (ihl) | ||
128 | : "1" (iph), "2" (ihl) | ||
129 | : "memory"); | ||
130 | return (__force __sum16)sum; | ||
131 | } | ||
132 | |||
133 | static inline unsigned add32_with_carry(unsigned a, unsigned b) | ||
134 | { | ||
135 | asm("addl %2,%0\n\t" | ||
136 | "adcl $0,%0" | ||
137 | : "=r" (a) | ||
138 | : "0" (a), "r" (b)); | ||
139 | return a; | ||
140 | } | ||
141 | |||
142 | extern __sum16 ip_compute_csum(const void *buff, int len); | ||
143 | |||
144 | #endif | ||
diff --git a/arch/x86/um/asm/desc.h b/arch/x86/um/asm/desc.h new file mode 100644 index 000000000000..4ec34a51b62c --- /dev/null +++ b/arch/x86/um/asm/desc.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef __UM_DESC_H | ||
2 | #define __UM_DESC_H | ||
3 | |||
4 | /* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't | ||
5 | * compile, and has never been used. */ | ||
6 | #define LDT_empty(info) (\ | ||
7 | (info)->base_addr == 0 && \ | ||
8 | (info)->limit == 0 && \ | ||
9 | (info)->contents == 0 && \ | ||
10 | (info)->read_exec_only == 1 && \ | ||
11 | (info)->seg_32bit == 0 && \ | ||
12 | (info)->limit_in_pages == 0 && \ | ||
13 | (info)->seg_not_present == 1 && \ | ||
14 | (info)->useable == 0 ) | ||
15 | |||
16 | #endif | ||
diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h new file mode 100644 index 000000000000..f3b0633b69a1 --- /dev/null +++ b/arch/x86/um/asm/elf.h | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | #ifndef __UM_ELF_X86_H | ||
6 | #define __UM_ELF_X86_H | ||
7 | |||
8 | #include <asm/user.h> | ||
9 | #include "skas.h" | ||
10 | |||
11 | #ifdef CONFIG_X86_32 | ||
12 | |||
13 | #define R_386_NONE 0 | ||
14 | #define R_386_32 1 | ||
15 | #define R_386_PC32 2 | ||
16 | #define R_386_GOT32 3 | ||
17 | #define R_386_PLT32 4 | ||
18 | #define R_386_COPY 5 | ||
19 | #define R_386_GLOB_DAT 6 | ||
20 | #define R_386_JMP_SLOT 7 | ||
21 | #define R_386_RELATIVE 8 | ||
22 | #define R_386_GOTOFF 9 | ||
23 | #define R_386_GOTPC 10 | ||
24 | #define R_386_NUM 11 | ||
25 | |||
26 | /* | ||
27 | * This is used to ensure we don't load something for the wrong architecture. | ||
28 | */ | ||
29 | #define elf_check_arch(x) \ | ||
30 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) | ||
31 | |||
32 | #define ELF_CLASS ELFCLASS32 | ||
33 | #define ELF_DATA ELFDATA2LSB | ||
34 | #define ELF_ARCH EM_386 | ||
35 | |||
36 | #define ELF_PLAT_INIT(regs, load_addr) do { \ | ||
37 | PT_REGS_EBX(regs) = 0; \ | ||
38 | PT_REGS_ECX(regs) = 0; \ | ||
39 | PT_REGS_EDX(regs) = 0; \ | ||
40 | PT_REGS_ESI(regs) = 0; \ | ||
41 | PT_REGS_EDI(regs) = 0; \ | ||
42 | PT_REGS_EBP(regs) = 0; \ | ||
43 | PT_REGS_EAX(regs) = 0; \ | ||
44 | } while (0) | ||
45 | |||
46 | /* Shamelessly stolen from include/asm-i386/elf.h */ | ||
47 | |||
48 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ | ||
49 | pr_reg[0] = PT_REGS_EBX(regs); \ | ||
50 | pr_reg[1] = PT_REGS_ECX(regs); \ | ||
51 | pr_reg[2] = PT_REGS_EDX(regs); \ | ||
52 | pr_reg[3] = PT_REGS_ESI(regs); \ | ||
53 | pr_reg[4] = PT_REGS_EDI(regs); \ | ||
54 | pr_reg[5] = PT_REGS_EBP(regs); \ | ||
55 | pr_reg[6] = PT_REGS_EAX(regs); \ | ||
56 | pr_reg[7] = PT_REGS_DS(regs); \ | ||
57 | pr_reg[8] = PT_REGS_ES(regs); \ | ||
58 | /* fake once used fs and gs selectors? */ \ | ||
59 | pr_reg[9] = PT_REGS_DS(regs); \ | ||
60 | pr_reg[10] = PT_REGS_DS(regs); \ | ||
61 | pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \ | ||
62 | pr_reg[12] = PT_REGS_IP(regs); \ | ||
63 | pr_reg[13] = PT_REGS_CS(regs); \ | ||
64 | pr_reg[14] = PT_REGS_EFLAGS(regs); \ | ||
65 | pr_reg[15] = PT_REGS_SP(regs); \ | ||
66 | pr_reg[16] = PT_REGS_SS(regs); \ | ||
67 | } while (0); | ||
68 | |||
69 | extern char * elf_aux_platform; | ||
70 | #define ELF_PLATFORM (elf_aux_platform) | ||
71 | |||
72 | extern unsigned long vsyscall_ehdr; | ||
73 | extern unsigned long vsyscall_end; | ||
74 | extern unsigned long __kernel_vsyscall; | ||
75 | |||
76 | /* | ||
77 | * This is the range that is readable by user mode, and things | ||
78 | * acting like user mode such as get_user_pages. | ||
79 | */ | ||
80 | #define FIXADDR_USER_START vsyscall_ehdr | ||
81 | #define FIXADDR_USER_END vsyscall_end | ||
82 | |||
83 | |||
84 | /* | ||
85 | * Architecture-neutral AT_ values in 0-17, leave some room | ||
86 | * for more of them, start the x86-specific ones at 32. | ||
87 | */ | ||
88 | #define AT_SYSINFO 32 | ||
89 | #define AT_SYSINFO_EHDR 33 | ||
90 | |||
91 | #define ARCH_DLINFO \ | ||
92 | do { \ | ||
93 | if ( vsyscall_ehdr ) { \ | ||
94 | NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \ | ||
95 | NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \ | ||
96 | } \ | ||
97 | } while (0) | ||
98 | |||
99 | #else | ||
100 | |||
101 | /* x86-64 relocation types, taken from asm-x86_64/elf.h */ | ||
102 | #define R_X86_64_NONE 0 /* No reloc */ | ||
103 | #define R_X86_64_64 1 /* Direct 64 bit */ | ||
104 | #define R_X86_64_PC32 2 /* PC relative 32 bit signed */ | ||
105 | #define R_X86_64_GOT32 3 /* 32 bit GOT entry */ | ||
106 | #define R_X86_64_PLT32 4 /* 32 bit PLT address */ | ||
107 | #define R_X86_64_COPY 5 /* Copy symbol at runtime */ | ||
108 | #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ | ||
109 | #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ | ||
110 | #define R_X86_64_RELATIVE 8 /* Adjust by program base */ | ||
111 | #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative | ||
112 | offset to GOT */ | ||
113 | #define R_X86_64_32 10 /* Direct 32 bit zero extended */ | ||
114 | #define R_X86_64_32S 11 /* Direct 32 bit sign extended */ | ||
115 | #define R_X86_64_16 12 /* Direct 16 bit zero extended */ | ||
116 | #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ | ||
117 | #define R_X86_64_8 14 /* Direct 8 bit sign extended */ | ||
118 | #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ | ||
119 | |||
120 | #define R_X86_64_NUM 16 | ||
121 | |||
122 | /* | ||
123 | * This is used to ensure we don't load something for the wrong architecture. | ||
124 | */ | ||
125 | #define elf_check_arch(x) \ | ||
126 | ((x)->e_machine == EM_X86_64) | ||
127 | |||
128 | #define ELF_CLASS ELFCLASS64 | ||
129 | #define ELF_DATA ELFDATA2LSB | ||
130 | #define ELF_ARCH EM_X86_64 | ||
131 | |||
132 | #define ELF_PLAT_INIT(regs, load_addr) do { \ | ||
133 | PT_REGS_RBX(regs) = 0; \ | ||
134 | PT_REGS_RCX(regs) = 0; \ | ||
135 | PT_REGS_RDX(regs) = 0; \ | ||
136 | PT_REGS_RSI(regs) = 0; \ | ||
137 | PT_REGS_RDI(regs) = 0; \ | ||
138 | PT_REGS_RBP(regs) = 0; \ | ||
139 | PT_REGS_RAX(regs) = 0; \ | ||
140 | PT_REGS_R8(regs) = 0; \ | ||
141 | PT_REGS_R9(regs) = 0; \ | ||
142 | PT_REGS_R10(regs) = 0; \ | ||
143 | PT_REGS_R11(regs) = 0; \ | ||
144 | PT_REGS_R12(regs) = 0; \ | ||
145 | PT_REGS_R13(regs) = 0; \ | ||
146 | PT_REGS_R14(regs) = 0; \ | ||
147 | PT_REGS_R15(regs) = 0; \ | ||
148 | } while (0) | ||
149 | |||
150 | #define ELF_CORE_COPY_REGS(pr_reg, _regs) \ | ||
151 | (pr_reg)[0] = (_regs)->regs.gp[0]; \ | ||
152 | (pr_reg)[1] = (_regs)->regs.gp[1]; \ | ||
153 | (pr_reg)[2] = (_regs)->regs.gp[2]; \ | ||
154 | (pr_reg)[3] = (_regs)->regs.gp[3]; \ | ||
155 | (pr_reg)[4] = (_regs)->regs.gp[4]; \ | ||
156 | (pr_reg)[5] = (_regs)->regs.gp[5]; \ | ||
157 | (pr_reg)[6] = (_regs)->regs.gp[6]; \ | ||
158 | (pr_reg)[7] = (_regs)->regs.gp[7]; \ | ||
159 | (pr_reg)[8] = (_regs)->regs.gp[8]; \ | ||
160 | (pr_reg)[9] = (_regs)->regs.gp[9]; \ | ||
161 | (pr_reg)[10] = (_regs)->regs.gp[10]; \ | ||
162 | (pr_reg)[11] = (_regs)->regs.gp[11]; \ | ||
163 | (pr_reg)[12] = (_regs)->regs.gp[12]; \ | ||
164 | (pr_reg)[13] = (_regs)->regs.gp[13]; \ | ||
165 | (pr_reg)[14] = (_regs)->regs.gp[14]; \ | ||
166 | (pr_reg)[15] = (_regs)->regs.gp[15]; \ | ||
167 | (pr_reg)[16] = (_regs)->regs.gp[16]; \ | ||
168 | (pr_reg)[17] = (_regs)->regs.gp[17]; \ | ||
169 | (pr_reg)[18] = (_regs)->regs.gp[18]; \ | ||
170 | (pr_reg)[19] = (_regs)->regs.gp[19]; \ | ||
171 | (pr_reg)[20] = (_regs)->regs.gp[20]; \ | ||
172 | (pr_reg)[21] = current->thread.arch.fs; \ | ||
173 | (pr_reg)[22] = 0; \ | ||
174 | (pr_reg)[23] = 0; \ | ||
175 | (pr_reg)[24] = 0; \ | ||
176 | (pr_reg)[25] = 0; \ | ||
177 | (pr_reg)[26] = 0; | ||
178 | |||
179 | #define ELF_PLATFORM "x86_64" | ||
180 | |||
181 | /* No user-accessible fixmap addresses, i.e. vsyscall */ | ||
182 | #define FIXADDR_USER_START 0 | ||
183 | #define FIXADDR_USER_END 0 | ||
184 | |||
185 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | ||
186 | struct linux_binprm; | ||
187 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
188 | int uses_interp); | ||
189 | |||
190 | extern unsigned long um_vdso_addr; | ||
191 | #define AT_SYSINFO_EHDR 33 | ||
192 | #define ARCH_DLINFO NEW_AUX_ENT(AT_SYSINFO_EHDR, um_vdso_addr) | ||
193 | |||
194 | #endif | ||
195 | |||
196 | typedef unsigned long elf_greg_t; | ||
197 | |||
198 | #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) | ||
199 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
200 | |||
201 | typedef struct user_i387_struct elf_fpregset_t; | ||
202 | |||
203 | #define task_pt_regs(t) (&(t)->thread.regs) | ||
204 | |||
205 | struct task_struct; | ||
206 | |||
207 | extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); | ||
208 | |||
209 | #define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu) | ||
210 | |||
211 | #define ELF_EXEC_PAGESIZE 4096 | ||
212 | |||
213 | #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) | ||
214 | |||
215 | extern long elf_aux_hwcap; | ||
216 | #define ELF_HWCAP (elf_aux_hwcap) | ||
217 | |||
218 | #define SET_PERSONALITY(ex) do ; while(0) | ||
219 | #define __HAVE_ARCH_GATE_AREA 1 | ||
220 | |||
221 | #endif | ||
diff --git a/arch/x86/um/asm/irq_vectors.h b/arch/x86/um/asm/irq_vectors.h new file mode 100644 index 000000000000..272a81e0ce14 --- /dev/null +++ b/arch/x86/um/asm/irq_vectors.h | |||
@@ -0,0 +1,10 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __UM_IRQ_VECTORS_H | ||
7 | #define __UM_IRQ_VECTORS_H | ||
8 | |||
9 | #endif | ||
10 | |||
diff --git a/arch/x86/um/asm/mm_context.h b/arch/x86/um/asm/mm_context.h new file mode 100644 index 000000000000..4a73d63e4760 --- /dev/null +++ b/arch/x86/um/asm/mm_context.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Fujitsu Siemens Computers GmbH | ||
3 | * Licensed under the GPL | ||
4 | * | ||
5 | * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef __ASM_LDT_H | ||
9 | #define __ASM_LDT_H | ||
10 | |||
11 | #include <linux/mutex.h> | ||
12 | #include <asm/ldt.h> | ||
13 | |||
14 | extern void ldt_host_info(void); | ||
15 | |||
16 | #define LDT_PAGES_MAX \ | ||
17 | ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE) | ||
18 | #define LDT_ENTRIES_PER_PAGE \ | ||
19 | (PAGE_SIZE/LDT_ENTRY_SIZE) | ||
20 | #define LDT_DIRECT_ENTRIES \ | ||
21 | ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE) | ||
22 | |||
23 | struct ldt_entry { | ||
24 | __u32 a; | ||
25 | __u32 b; | ||
26 | }; | ||
27 | |||
28 | typedef struct uml_ldt { | ||
29 | int entry_count; | ||
30 | struct mutex lock; | ||
31 | union { | ||
32 | struct ldt_entry * pages[LDT_PAGES_MAX]; | ||
33 | struct ldt_entry entries[LDT_DIRECT_ENTRIES]; | ||
34 | } u; | ||
35 | } uml_ldt_t; | ||
36 | |||
37 | #define LDT_entry_a(info) \ | ||
38 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | ||
39 | |||
40 | #define LDT_entry_b(info) \ | ||
41 | (((info)->base_addr & 0xff000000) | \ | ||
42 | (((info)->base_addr & 0x00ff0000) >> 16) | \ | ||
43 | ((info)->limit & 0xf0000) | \ | ||
44 | (((info)->read_exec_only ^ 1) << 9) | \ | ||
45 | ((info)->contents << 10) | \ | ||
46 | (((info)->seg_not_present ^ 1) << 15) | \ | ||
47 | ((info)->seg_32bit << 22) | \ | ||
48 | ((info)->limit_in_pages << 23) | \ | ||
49 | ((info)->useable << 20) | \ | ||
50 | 0x7000) | ||
51 | |||
52 | #define _LDT_empty(info) (\ | ||
53 | (info)->base_addr == 0 && \ | ||
54 | (info)->limit == 0 && \ | ||
55 | (info)->contents == 0 && \ | ||
56 | (info)->read_exec_only == 1 && \ | ||
57 | (info)->seg_32bit == 0 && \ | ||
58 | (info)->limit_in_pages == 0 && \ | ||
59 | (info)->seg_not_present == 1 && \ | ||
60 | (info)->useable == 0 ) | ||
61 | |||
62 | #ifdef CONFIG_X86_64 | ||
63 | #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) | ||
64 | #else | ||
65 | #define LDT_empty(info) (_LDT_empty(info)) | ||
66 | #endif | ||
67 | |||
68 | struct uml_arch_mm_context { | ||
69 | uml_ldt_t ldt; | ||
70 | }; | ||
71 | |||
72 | #endif | ||
diff --git a/arch/x86/um/asm/module.h b/arch/x86/um/asm/module.h new file mode 100644 index 000000000000..61af80e932eb --- /dev/null +++ b/arch/x86/um/asm/module.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef __UM_MODULE_H | ||
2 | #define __UM_MODULE_H | ||
3 | |||
4 | /* UML is simple */ | ||
5 | struct mod_arch_specific | ||
6 | { | ||
7 | }; | ||
8 | |||
9 | #ifdef CONFIG_X86_32 | ||
10 | |||
11 | #define Elf_Shdr Elf32_Shdr | ||
12 | #define Elf_Sym Elf32_Sym | ||
13 | #define Elf_Ehdr Elf32_Ehdr | ||
14 | |||
15 | #else | ||
16 | |||
17 | #define Elf_Shdr Elf64_Shdr | ||
18 | #define Elf_Sym Elf64_Sym | ||
19 | #define Elf_Ehdr Elf64_Ehdr | ||
20 | |||
21 | #endif | ||
22 | |||
23 | #endif | ||
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h new file mode 100644 index 000000000000..118c143a9cb4 --- /dev/null +++ b/arch/x86/um/asm/processor.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __UM_PROCESSOR_H | ||
2 | #define __UM_PROCESSOR_H | ||
3 | |||
4 | /* include faultinfo structure */ | ||
5 | #include <sysdep/faultinfo.h> | ||
6 | |||
7 | #ifdef CONFIG_X86_32 | ||
8 | # include "processor_32.h" | ||
9 | #else | ||
10 | # include "processor_64.h" | ||
11 | #endif | ||
12 | |||
13 | #define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP) | ||
14 | #define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_IP) | ||
15 | #define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP) | ||
16 | |||
17 | #define ARCH_IS_STACKGROW(address) \ | ||
18 | (address + 65536 + 32 * sizeof(unsigned long) >= UPT_SP(¤t->thread.regs.regs)) | ||
19 | |||
20 | #include <asm/processor-generic.h> | ||
21 | |||
22 | #endif | ||
diff --git a/arch/x86/um/asm/processor_32.h b/arch/x86/um/asm/processor_32.h new file mode 100644 index 000000000000..018f732704dd --- /dev/null +++ b/arch/x86/um/asm/processor_32.h | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __UM_PROCESSOR_I386_H | ||
7 | #define __UM_PROCESSOR_I386_H | ||
8 | |||
9 | #include <linux/string.h> | ||
10 | #include <asm/segment.h> | ||
11 | #include <asm/ldt.h> | ||
12 | |||
13 | extern int host_has_cmov; | ||
14 | |||
15 | struct uml_tls_struct { | ||
16 | struct user_desc tls; | ||
17 | unsigned flushed:1; | ||
18 | unsigned present:1; | ||
19 | }; | ||
20 | |||
21 | struct arch_thread { | ||
22 | struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
23 | unsigned long debugregs[8]; | ||
24 | int debugregs_seq; | ||
25 | struct faultinfo faultinfo; | ||
26 | }; | ||
27 | |||
28 | #define INIT_ARCH_THREAD { \ | ||
29 | .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \ | ||
30 | { .present = 0, .flushed = 0 } }, \ | ||
31 | .debugregs = { [ 0 ... 7 ] = 0 }, \ | ||
32 | .debugregs_seq = 0, \ | ||
33 | .faultinfo = { 0, 0, 0 } \ | ||
34 | } | ||
35 | |||
36 | static inline void arch_flush_thread(struct arch_thread *thread) | ||
37 | { | ||
38 | /* Clear any TLS still hanging */ | ||
39 | memset(&thread->tls_array, 0, sizeof(thread->tls_array)); | ||
40 | } | ||
41 | |||
42 | static inline void arch_copy_thread(struct arch_thread *from, | ||
43 | struct arch_thread *to) | ||
44 | { | ||
45 | memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array)); | ||
46 | } | ||
47 | |||
48 | #include <asm/user.h> | ||
49 | |||
50 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
51 | static inline void rep_nop(void) | ||
52 | { | ||
53 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
54 | } | ||
55 | |||
56 | #define cpu_relax() rep_nop() | ||
57 | |||
58 | /* | ||
59 | * Default implementation of macro that returns current | ||
60 | * instruction pointer ("program counter"). Stolen | ||
61 | * from asm-i386/processor.h | ||
62 | */ | ||
63 | #define current_text_addr() \ | ||
64 | ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) | ||
65 | |||
66 | #endif | ||
diff --git a/arch/x86/um/asm/processor_64.h b/arch/x86/um/asm/processor_64.h new file mode 100644 index 000000000000..61de92d916c3 --- /dev/null +++ b/arch/x86/um/asm/processor_64.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __UM_PROCESSOR_X86_64_H | ||
8 | #define __UM_PROCESSOR_X86_64_H | ||
9 | |||
10 | struct arch_thread { | ||
11 | unsigned long debugregs[8]; | ||
12 | int debugregs_seq; | ||
13 | unsigned long fs; | ||
14 | struct faultinfo faultinfo; | ||
15 | }; | ||
16 | |||
17 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
18 | static inline void rep_nop(void) | ||
19 | { | ||
20 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
21 | } | ||
22 | |||
23 | #define cpu_relax() rep_nop() | ||
24 | |||
25 | #define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \ | ||
26 | .debugregs_seq = 0, \ | ||
27 | .fs = 0, \ | ||
28 | .faultinfo = { 0, 0, 0 } } | ||
29 | |||
30 | static inline void arch_flush_thread(struct arch_thread *thread) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | static inline void arch_copy_thread(struct arch_thread *from, | ||
35 | struct arch_thread *to) | ||
36 | { | ||
37 | to->fs = from->fs; | ||
38 | } | ||
39 | |||
40 | #include <asm/user.h> | ||
41 | |||
42 | #define current_text_addr() \ | ||
43 | ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; }) | ||
44 | |||
45 | #endif | ||
diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h new file mode 100644 index 000000000000..c8aca8c501b0 --- /dev/null +++ b/arch/x86/um/asm/ptrace.h | |||
@@ -0,0 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | ||
2 | # include "ptrace_32.h" | ||
3 | #else | ||
4 | # include "ptrace_64.h" | ||
5 | #endif | ||
diff --git a/arch/x86/um/asm/ptrace_32.h b/arch/x86/um/asm/ptrace_32.h new file mode 100644 index 000000000000..5d2a59112537 --- /dev/null +++ b/arch/x86/um/asm/ptrace_32.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __UM_PTRACE_I386_H | ||
7 | #define __UM_PTRACE_I386_H | ||
8 | |||
9 | #define HOST_AUDIT_ARCH AUDIT_ARCH_I386 | ||
10 | |||
11 | #include "linux/compiler.h" | ||
12 | #include "asm/ptrace-generic.h" | ||
13 | |||
14 | #define PT_REGS_EAX(r) UPT_EAX(&(r)->regs) | ||
15 | #define PT_REGS_EBX(r) UPT_EBX(&(r)->regs) | ||
16 | #define PT_REGS_ECX(r) UPT_ECX(&(r)->regs) | ||
17 | #define PT_REGS_EDX(r) UPT_EDX(&(r)->regs) | ||
18 | #define PT_REGS_ESI(r) UPT_ESI(&(r)->regs) | ||
19 | #define PT_REGS_EDI(r) UPT_EDI(&(r)->regs) | ||
20 | #define PT_REGS_EBP(r) UPT_EBP(&(r)->regs) | ||
21 | |||
22 | #define PT_REGS_CS(r) UPT_CS(&(r)->regs) | ||
23 | #define PT_REGS_SS(r) UPT_SS(&(r)->regs) | ||
24 | #define PT_REGS_DS(r) UPT_DS(&(r)->regs) | ||
25 | #define PT_REGS_ES(r) UPT_ES(&(r)->regs) | ||
26 | #define PT_REGS_FS(r) UPT_FS(&(r)->regs) | ||
27 | #define PT_REGS_GS(r) UPT_GS(&(r)->regs) | ||
28 | |||
29 | #define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs) | ||
30 | |||
31 | #define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r) | ||
32 | #define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r) | ||
33 | #define PT_FIX_EXEC_STACK(sp) do ; while(0) | ||
34 | |||
35 | #define profile_pc(regs) PT_REGS_IP(regs) | ||
36 | |||
37 | #define user_mode(r) UPT_IS_USER(&(r)->regs) | ||
38 | |||
39 | /* | ||
40 | * Forward declaration to avoid including sysdep/tls.h, which causes a | ||
41 | * circular include, and compilation failures. | ||
42 | */ | ||
43 | struct user_desc; | ||
44 | |||
45 | extern int ptrace_get_thread_area(struct task_struct *child, int idx, | ||
46 | struct user_desc __user *user_desc); | ||
47 | |||
48 | extern int ptrace_set_thread_area(struct task_struct *child, int idx, | ||
49 | struct user_desc __user *user_desc); | ||
50 | |||
51 | #endif | ||
diff --git a/arch/x86/um/asm/ptrace_64.h b/arch/x86/um/asm/ptrace_64.h new file mode 100644 index 000000000000..706a0d80545c --- /dev/null +++ b/arch/x86/um/asm/ptrace_64.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __UM_PTRACE_X86_64_H | ||
8 | #define __UM_PTRACE_X86_64_H | ||
9 | |||
10 | #include "linux/compiler.h" | ||
11 | #include "asm/errno.h" | ||
12 | |||
13 | #define __FRAME_OFFSETS /* Needed to get the R* macros */ | ||
14 | #include "asm/ptrace-generic.h" | ||
15 | |||
16 | #define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64 | ||
17 | |||
18 | #define PT_REGS_RBX(r) UPT_RBX(&(r)->regs) | ||
19 | #define PT_REGS_RCX(r) UPT_RCX(&(r)->regs) | ||
20 | #define PT_REGS_RDX(r) UPT_RDX(&(r)->regs) | ||
21 | #define PT_REGS_RSI(r) UPT_RSI(&(r)->regs) | ||
22 | #define PT_REGS_RDI(r) UPT_RDI(&(r)->regs) | ||
23 | #define PT_REGS_RBP(r) UPT_RBP(&(r)->regs) | ||
24 | #define PT_REGS_RAX(r) UPT_RAX(&(r)->regs) | ||
25 | #define PT_REGS_R8(r) UPT_R8(&(r)->regs) | ||
26 | #define PT_REGS_R9(r) UPT_R9(&(r)->regs) | ||
27 | #define PT_REGS_R10(r) UPT_R10(&(r)->regs) | ||
28 | #define PT_REGS_R11(r) UPT_R11(&(r)->regs) | ||
29 | #define PT_REGS_R12(r) UPT_R12(&(r)->regs) | ||
30 | #define PT_REGS_R13(r) UPT_R13(&(r)->regs) | ||
31 | #define PT_REGS_R14(r) UPT_R14(&(r)->regs) | ||
32 | #define PT_REGS_R15(r) UPT_R15(&(r)->regs) | ||
33 | |||
34 | #define PT_REGS_FS(r) UPT_FS(&(r)->regs) | ||
35 | #define PT_REGS_GS(r) UPT_GS(&(r)->regs) | ||
36 | #define PT_REGS_DS(r) UPT_DS(&(r)->regs) | ||
37 | #define PT_REGS_ES(r) UPT_ES(&(r)->regs) | ||
38 | #define PT_REGS_SS(r) UPT_SS(&(r)->regs) | ||
39 | #define PT_REGS_CS(r) UPT_CS(&(r)->regs) | ||
40 | |||
41 | #define PT_REGS_ORIG_RAX(r) UPT_ORIG_RAX(&(r)->regs) | ||
42 | #define PT_REGS_RIP(r) UPT_IP(&(r)->regs) | ||
43 | #define PT_REGS_SP(r) UPT_SP(&(r)->regs) | ||
44 | |||
45 | #define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs) | ||
46 | |||
47 | /* XXX */ | ||
48 | #define user_mode(r) UPT_IS_USER(&(r)->regs) | ||
49 | #define PT_REGS_ORIG_SYSCALL(r) PT_REGS_RAX(r) | ||
50 | #define PT_REGS_SYSCALL_RET(r) PT_REGS_RAX(r) | ||
51 | |||
52 | #define PT_FIX_EXEC_STACK(sp) do ; while(0) | ||
53 | |||
54 | #define profile_pc(regs) PT_REGS_IP(regs) | ||
55 | |||
56 | struct user_desc; | ||
57 | |||
58 | static inline int ptrace_get_thread_area(struct task_struct *child, int idx, | ||
59 | struct user_desc __user *user_desc) | ||
60 | { | ||
61 | return -ENOSYS; | ||
62 | } | ||
63 | |||
64 | static inline int ptrace_set_thread_area(struct task_struct *child, int idx, | ||
65 | struct user_desc __user *user_desc) | ||
66 | { | ||
67 | return -ENOSYS; | ||
68 | } | ||
69 | |||
70 | extern long arch_prctl(struct task_struct *task, int code, | ||
71 | unsigned long __user *addr); | ||
72 | #endif | ||
diff --git a/arch/x86/um/asm/required-features.h b/arch/x86/um/asm/required-features.h new file mode 100644 index 000000000000..dfb967b2d2f3 --- /dev/null +++ b/arch/x86/um/asm/required-features.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef __UM_REQUIRED_FEATURES_H | ||
2 | #define __UM_REQUIRED_FEATURES_H | ||
3 | |||
4 | /* | ||
5 | * Nothing to see, just need something for the i386 and x86_64 asm | ||
6 | * headers to include. | ||
7 | */ | ||
8 | |||
9 | #endif | ||
diff --git a/arch/x86/um/asm/segment.h b/arch/x86/um/asm/segment.h new file mode 100644 index 000000000000..45183fcd10b6 --- /dev/null +++ b/arch/x86/um/asm/segment.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __UM_SEGMENT_H | ||
2 | #define __UM_SEGMENT_H | ||
3 | |||
4 | extern int host_gdt_entry_tls_min; | ||
5 | |||
6 | #define GDT_ENTRY_TLS_ENTRIES 3 | ||
7 | #define GDT_ENTRY_TLS_MIN host_gdt_entry_tls_min | ||
8 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | ||
9 | |||
10 | #endif | ||
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h new file mode 100644 index 000000000000..a459fd9b7598 --- /dev/null +++ b/arch/x86/um/asm/system.h | |||
@@ -0,0 +1,135 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
2 | #define _ASM_X86_SYSTEM_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* entries in ARCH_DLINFO: */ | ||
14 | #ifdef CONFIG_IA32_EMULATION | ||
15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
16 | #else | ||
17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
18 | #endif | ||
19 | |||
20 | extern unsigned long arch_align_stack(unsigned long sp); | ||
21 | |||
22 | void default_idle(void); | ||
23 | |||
24 | /* | ||
25 | * Force strict CPU ordering. | ||
26 | * And yes, this is required on UP too when we're talking | ||
27 | * to devices. | ||
28 | */ | ||
29 | #ifdef CONFIG_X86_32 | ||
30 | /* | ||
31 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
32 | * nop for these. | ||
33 | */ | ||
34 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
35 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
36 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
37 | #else | ||
38 | #define mb() asm volatile("mfence":::"memory") | ||
39 | #define rmb() asm volatile("lfence":::"memory") | ||
40 | #define wmb() asm volatile("sfence" ::: "memory") | ||
41 | #endif | ||
42 | |||
43 | /** | ||
44 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
45 | * depend on. | ||
46 | * | ||
47 | * No data-dependent reads from memory-like regions are ever reordered | ||
48 | * over this barrier. All reads preceding this primitive are guaranteed | ||
49 | * to access memory (but not necessarily other CPUs' caches) before any | ||
50 | * reads following this primitive that depend on the data return by | ||
51 | * any of the preceding reads. This primitive is much lighter weight than | ||
52 | * rmb() on most CPUs, and is never heavier weight than is | ||
53 | * rmb(). | ||
54 | * | ||
55 | * These ordering constraints are respected by both the local CPU | ||
56 | * and the compiler. | ||
57 | * | ||
58 | * Ordering is not guaranteed by anything other than these primitives, | ||
59 | * not even by data dependencies. See the documentation for | ||
60 | * memory_barrier() for examples and URLs to more information. | ||
61 | * | ||
62 | * For example, the following code would force ordering (the initial | ||
63 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
64 | * | ||
65 | * <programlisting> | ||
66 | * CPU 0 CPU 1 | ||
67 | * | ||
68 | * b = 2; | ||
69 | * memory_barrier(); | ||
70 | * p = &b; q = p; | ||
71 | * read_barrier_depends(); | ||
72 | * d = *q; | ||
73 | * </programlisting> | ||
74 | * | ||
75 | * because the read of "*q" depends on the read of "p" and these | ||
76 | * two reads are separated by a read_barrier_depends(). However, | ||
77 | * the following code, with the same initial values for "a" and "b": | ||
78 | * | ||
79 | * <programlisting> | ||
80 | * CPU 0 CPU 1 | ||
81 | * | ||
82 | * a = 2; | ||
83 | * memory_barrier(); | ||
84 | * b = 3; y = b; | ||
85 | * read_barrier_depends(); | ||
86 | * x = a; | ||
87 | * </programlisting> | ||
88 | * | ||
89 | * does not enforce ordering, since there is no data dependency between | ||
90 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
91 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
92 | * in cases like this where there are no data dependencies. | ||
93 | **/ | ||
94 | |||
95 | #define read_barrier_depends() do { } while (0) | ||
96 | |||
97 | #ifdef CONFIG_SMP | ||
98 | #define smp_mb() mb() | ||
99 | #ifdef CONFIG_X86_PPRO_FENCE | ||
100 | # define smp_rmb() rmb() | ||
101 | #else | ||
102 | # define smp_rmb() barrier() | ||
103 | #endif | ||
104 | #ifdef CONFIG_X86_OOSTORE | ||
105 | # define smp_wmb() wmb() | ||
106 | #else | ||
107 | # define smp_wmb() barrier() | ||
108 | #endif | ||
109 | #define smp_read_barrier_depends() read_barrier_depends() | ||
110 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
111 | #else | ||
112 | #define smp_mb() barrier() | ||
113 | #define smp_rmb() barrier() | ||
114 | #define smp_wmb() barrier() | ||
115 | #define smp_read_barrier_depends() do { } while (0) | ||
116 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
117 | #endif | ||
118 | |||
119 | /* | ||
120 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
121 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
122 | * code region. | ||
123 | * | ||
124 | * (Could use an alternative three way for this if there was one.) | ||
125 | */ | ||
126 | static inline void rdtsc_barrier(void) | ||
127 | { | ||
128 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
129 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
130 | } | ||
131 | |||
132 | extern void *_switch_to(void *prev, void *next, void *last); | ||
133 | #define switch_to(prev, next, last) prev = _switch_to(prev, next, last) | ||
134 | |||
135 | #endif | ||
diff --git a/arch/x86/um/asm/vm-flags.h b/arch/x86/um/asm/vm-flags.h new file mode 100644 index 000000000000..7c297e9e2413 --- /dev/null +++ b/arch/x86/um/asm/vm-flags.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) | ||
3 | * Copyright 2003 PathScale, Inc. | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __VM_FLAGS_X86_H | ||
8 | #define __VM_FLAGS_X86_H | ||
9 | |||
10 | #ifdef CONFIG_X86_32 | ||
11 | |||
12 | #define VM_DATA_DEFAULT_FLAGS \ | ||
13 | (VM_READ | VM_WRITE | \ | ||
14 | ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ | ||
15 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
16 | |||
17 | #else | ||
18 | |||
19 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
20 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
21 | #define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \ | ||
22 | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
23 | |||
24 | #endif | ||
25 | #endif | ||
diff --git a/arch/x86/um/bug.c b/arch/x86/um/bug.c new file mode 100644 index 000000000000..e8034e363d83 --- /dev/null +++ b/arch/x86/um/bug.c | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL V2 | ||
4 | */ | ||
5 | |||
6 | #include <linux/uaccess.h> | ||
7 | |||
8 | /* | ||
9 | * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because | ||
10 | * that's not relevant in skas mode. | ||
11 | */ | ||
12 | |||
13 | int is_valid_bugaddr(unsigned long eip) | ||
14 | { | ||
15 | unsigned short ud2; | ||
16 | |||
17 | if (probe_kernel_address((unsigned short __user *)eip, ud2)) | ||
18 | return 0; | ||
19 | |||
20 | return ud2 == 0x0b0f; | ||
21 | } | ||
diff --git a/arch/x86/um/bugs_32.c b/arch/x86/um/bugs_32.c new file mode 100644 index 000000000000..a1fba5fb9dbe --- /dev/null +++ b/arch/x86/um/bugs_32.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include "kern_util.h" | ||
8 | #include "longjmp.h" | ||
9 | #include "sysdep/ptrace.h" | ||
10 | #include <generated/asm-offsets.h> | ||
11 | |||
12 | /* Set during early boot */ | ||
13 | static int host_has_cmov = 1; | ||
14 | static jmp_buf cmov_test_return; | ||
15 | |||
16 | #define TASK_PID(task) *((int *) &(((char *) (task))[HOST_TASK_PID])) | ||
17 | |||
18 | static void cmov_sigill_test_handler(int sig) | ||
19 | { | ||
20 | host_has_cmov = 0; | ||
21 | longjmp(cmov_test_return, 1); | ||
22 | } | ||
23 | |||
24 | void arch_check_bugs(void) | ||
25 | { | ||
26 | struct sigaction old, new; | ||
27 | |||
28 | printk(UM_KERN_INFO "Checking for host processor cmov support..."); | ||
29 | new.sa_handler = cmov_sigill_test_handler; | ||
30 | |||
31 | /* Make sure that SIGILL is enabled after the handler longjmps back */ | ||
32 | new.sa_flags = SA_NODEFER; | ||
33 | sigemptyset(&new.sa_mask); | ||
34 | sigaction(SIGILL, &new, &old); | ||
35 | |||
36 | if (setjmp(cmov_test_return) == 0) { | ||
37 | unsigned long foo = 0; | ||
38 | __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo)); | ||
39 | printk(UM_KERN_CONT "Yes\n"); | ||
40 | } else | ||
41 | printk(UM_KERN_CONT "No\n"); | ||
42 | |||
43 | sigaction(SIGILL, &old, &new); | ||
44 | } | ||
45 | |||
46 | void arch_examine_signal(int sig, struct uml_pt_regs *regs) | ||
47 | { | ||
48 | unsigned char tmp[2]; | ||
49 | |||
50 | /* | ||
51 | * This is testing for a cmov (0x0f 0x4x) instruction causing a | ||
52 | * SIGILL in init. | ||
53 | */ | ||
54 | if ((sig != SIGILL) || (TASK_PID(get_current()) != 1)) | ||
55 | return; | ||
56 | |||
57 | if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) { | ||
58 | printk(UM_KERN_ERR "SIGILL in init, could not read " | ||
59 | "instructions!\n"); | ||
60 | return; | ||
61 | } | ||
62 | |||
63 | if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40)) | ||
64 | return; | ||
65 | |||
66 | if (host_has_cmov == 0) | ||
67 | printk(UM_KERN_ERR "SIGILL caused by cmov, which this " | ||
68 | "processor doesn't implement. Boot a filesystem " | ||
69 | "compiled for older processors"); | ||
70 | else if (host_has_cmov == 1) | ||
71 | printk(UM_KERN_ERR "SIGILL caused by cmov, which this " | ||
72 | "processor claims to implement"); | ||
73 | else | ||
74 | printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)", | ||
75 | host_has_cmov); | ||
76 | } | ||
diff --git a/arch/x86/um/bugs_64.c b/arch/x86/um/bugs_64.c new file mode 100644 index 000000000000..44e02ba2a265 --- /dev/null +++ b/arch/x86/um/bugs_64.c | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #include "sysdep/ptrace.h" | ||
8 | |||
9 | void arch_check_bugs(void) | ||
10 | { | ||
11 | } | ||
12 | |||
13 | void arch_examine_signal(int sig, struct uml_pt_regs *regs) | ||
14 | { | ||
15 | } | ||
diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S new file mode 100644 index 000000000000..f058d2f82e18 --- /dev/null +++ b/arch/x86/um/checksum_32.S | |||
@@ -0,0 +1,458 @@ | |||
1 | /* | ||
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
3 | * operating system. INET is implemented using the BSD Socket | ||
4 | * interface as the means of communication with the user level. | ||
5 | * | ||
6 | * IP/TCP/UDP checksumming routines | ||
7 | * | ||
8 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | ||
9 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | ||
10 | * Tom May, <ftom@netcom.com> | ||
11 | * Pentium Pro/II routines: | ||
12 | * Alexander Kjeldaas <astor@guardian.no> | ||
13 | * Finn Arne Gangstad <finnag@guardian.no> | ||
14 | * Lots of code moved from tcp.c and ip.c; see those files | ||
15 | * for more names. | ||
16 | * | ||
17 | * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception | ||
18 | * handling. | ||
19 | * Andi Kleen, add zeroing on error | ||
20 | * converted to pure assembler | ||
21 | * | ||
22 | * This program is free software; you can redistribute it and/or | ||
23 | * modify it under the terms of the GNU General Public License | ||
24 | * as published by the Free Software Foundation; either version | ||
25 | * 2 of the License, or (at your option) any later version. | ||
26 | */ | ||
27 | |||
28 | #include <asm/errno.h> | ||
29 | |||
30 | /* | ||
31 | * computes a partial checksum, e.g. for TCP/UDP fragments | ||
32 | */ | ||
33 | |||
34 | /* | ||
35 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | ||
36 | */ | ||
37 | |||
38 | .text | ||
39 | .align 4 | ||
40 | .globl csum_partial | ||
41 | |||
42 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | ||
43 | |||
44 | /* | ||
45 | * Experiments with Ethernet and SLIP connections show that buff | ||
46 | * is aligned on either a 2-byte or 4-byte boundary. We get at | ||
47 | * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. | ||
48 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte | ||
49 | * alignment for the unrolled loop. | ||
50 | */ | ||
51 | csum_partial: | ||
52 | pushl %esi | ||
53 | pushl %ebx | ||
54 | movl 20(%esp),%eax # Function arg: unsigned int sum | ||
55 | movl 16(%esp),%ecx # Function arg: int len | ||
56 | movl 12(%esp),%esi # Function arg: unsigned char *buff | ||
57 | testl $2, %esi # Check alignment. | ||
58 | jz 2f # Jump if alignment is ok. | ||
59 | subl $2, %ecx # Alignment uses up two bytes. | ||
60 | jae 1f # Jump if we had at least two bytes. | ||
61 | addl $2, %ecx # ecx was < 2. Deal with it. | ||
62 | jmp 4f | ||
63 | 1: movw (%esi), %bx | ||
64 | addl $2, %esi | ||
65 | addw %bx, %ax | ||
66 | adcl $0, %eax | ||
67 | 2: | ||
68 | movl %ecx, %edx | ||
69 | shrl $5, %ecx | ||
70 | jz 2f | ||
71 | testl %esi, %esi | ||
72 | 1: movl (%esi), %ebx | ||
73 | adcl %ebx, %eax | ||
74 | movl 4(%esi), %ebx | ||
75 | adcl %ebx, %eax | ||
76 | movl 8(%esi), %ebx | ||
77 | adcl %ebx, %eax | ||
78 | movl 12(%esi), %ebx | ||
79 | adcl %ebx, %eax | ||
80 | movl 16(%esi), %ebx | ||
81 | adcl %ebx, %eax | ||
82 | movl 20(%esi), %ebx | ||
83 | adcl %ebx, %eax | ||
84 | movl 24(%esi), %ebx | ||
85 | adcl %ebx, %eax | ||
86 | movl 28(%esi), %ebx | ||
87 | adcl %ebx, %eax | ||
88 | lea 32(%esi), %esi | ||
89 | dec %ecx | ||
90 | jne 1b | ||
91 | adcl $0, %eax | ||
92 | 2: movl %edx, %ecx | ||
93 | andl $0x1c, %edx | ||
94 | je 4f | ||
95 | shrl $2, %edx # This clears CF | ||
96 | 3: adcl (%esi), %eax | ||
97 | lea 4(%esi), %esi | ||
98 | dec %edx | ||
99 | jne 3b | ||
100 | adcl $0, %eax | ||
101 | 4: andl $3, %ecx | ||
102 | jz 7f | ||
103 | cmpl $2, %ecx | ||
104 | jb 5f | ||
105 | movw (%esi),%cx | ||
106 | leal 2(%esi),%esi | ||
107 | je 6f | ||
108 | shll $16,%ecx | ||
109 | 5: movb (%esi),%cl | ||
110 | 6: addl %ecx,%eax | ||
111 | adcl $0, %eax | ||
112 | 7: | ||
113 | popl %ebx | ||
114 | popl %esi | ||
115 | ret | ||
116 | |||
117 | #else | ||
118 | |||
119 | /* Version for PentiumII/PPro */ | ||
120 | |||
121 | csum_partial: | ||
122 | pushl %esi | ||
123 | pushl %ebx | ||
124 | movl 20(%esp),%eax # Function arg: unsigned int sum | ||
125 | movl 16(%esp),%ecx # Function arg: int len | ||
126 | movl 12(%esp),%esi # Function arg: const unsigned char *buf | ||
127 | |||
128 | testl $2, %esi | ||
129 | jnz 30f | ||
130 | 10: | ||
131 | movl %ecx, %edx | ||
132 | movl %ecx, %ebx | ||
133 | andl $0x7c, %ebx | ||
134 | shrl $7, %ecx | ||
135 | addl %ebx,%esi | ||
136 | shrl $2, %ebx | ||
137 | negl %ebx | ||
138 | lea 45f(%ebx,%ebx,2), %ebx | ||
139 | testl %esi, %esi | ||
140 | jmp *%ebx | ||
141 | |||
142 | # Handle 2-byte-aligned regions | ||
143 | 20: addw (%esi), %ax | ||
144 | lea 2(%esi), %esi | ||
145 | adcl $0, %eax | ||
146 | jmp 10b | ||
147 | |||
148 | 30: subl $2, %ecx | ||
149 | ja 20b | ||
150 | je 32f | ||
151 | movzbl (%esi),%ebx # csumming 1 byte, 2-aligned | ||
152 | addl %ebx, %eax | ||
153 | adcl $0, %eax | ||
154 | jmp 80f | ||
155 | 32: | ||
156 | addw (%esi), %ax # csumming 2 bytes, 2-aligned | ||
157 | adcl $0, %eax | ||
158 | jmp 80f | ||
159 | |||
160 | 40: | ||
161 | addl -128(%esi), %eax | ||
162 | adcl -124(%esi), %eax | ||
163 | adcl -120(%esi), %eax | ||
164 | adcl -116(%esi), %eax | ||
165 | adcl -112(%esi), %eax | ||
166 | adcl -108(%esi), %eax | ||
167 | adcl -104(%esi), %eax | ||
168 | adcl -100(%esi), %eax | ||
169 | adcl -96(%esi), %eax | ||
170 | adcl -92(%esi), %eax | ||
171 | adcl -88(%esi), %eax | ||
172 | adcl -84(%esi), %eax | ||
173 | adcl -80(%esi), %eax | ||
174 | adcl -76(%esi), %eax | ||
175 | adcl -72(%esi), %eax | ||
176 | adcl -68(%esi), %eax | ||
177 | adcl -64(%esi), %eax | ||
178 | adcl -60(%esi), %eax | ||
179 | adcl -56(%esi), %eax | ||
180 | adcl -52(%esi), %eax | ||
181 | adcl -48(%esi), %eax | ||
182 | adcl -44(%esi), %eax | ||
183 | adcl -40(%esi), %eax | ||
184 | adcl -36(%esi), %eax | ||
185 | adcl -32(%esi), %eax | ||
186 | adcl -28(%esi), %eax | ||
187 | adcl -24(%esi), %eax | ||
188 | adcl -20(%esi), %eax | ||
189 | adcl -16(%esi), %eax | ||
190 | adcl -12(%esi), %eax | ||
191 | adcl -8(%esi), %eax | ||
192 | adcl -4(%esi), %eax | ||
193 | 45: | ||
194 | lea 128(%esi), %esi | ||
195 | adcl $0, %eax | ||
196 | dec %ecx | ||
197 | jge 40b | ||
198 | movl %edx, %ecx | ||
199 | 50: andl $3, %ecx | ||
200 | jz 80f | ||
201 | |||
202 | # Handle the last 1-3 bytes without jumping | ||
203 | notl %ecx # 1->2, 2->1, 3->0, higher bits are masked | ||
204 | movl $0xffffff,%ebx # by the shll and shrl instructions | ||
205 | shll $3,%ecx | ||
206 | shrl %cl,%ebx | ||
207 | andl -128(%esi),%ebx # esi is 4-aligned so should be ok | ||
208 | addl %ebx,%eax | ||
209 | adcl $0,%eax | ||
210 | 80: | ||
211 | popl %ebx | ||
212 | popl %esi | ||
213 | ret | ||
214 | |||
215 | #endif | ||
216 | |||
217 | /* | ||
218 | unsigned int csum_partial_copy_generic (const char *src, char *dst, | ||
219 | int len, int sum, int *src_err_ptr, int *dst_err_ptr) | ||
220 | */ | ||
221 | |||
222 | /* | ||
223 | * Copy from ds while checksumming, otherwise like csum_partial | ||
224 | * | ||
225 | * The macros SRC and DST specify the type of access for the instruction. | ||
226 | * thus we can call a custom exception handler for all access types. | ||
227 | * | ||
228 | * FIXME: could someone double-check whether I haven't mixed up some SRC and | ||
229 | * DST definitions? It's damn hard to trigger all cases. I hope I got | ||
230 | * them all but there's no guarantee. | ||
231 | */ | ||
232 | |||
233 | #define SRC(y...) \ | ||
234 | 9999: y; \ | ||
235 | .section __ex_table, "a"; \ | ||
236 | .long 9999b, 6001f ; \ | ||
237 | .previous | ||
238 | |||
239 | #define DST(y...) \ | ||
240 | 9999: y; \ | ||
241 | .section __ex_table, "a"; \ | ||
242 | .long 9999b, 6002f ; \ | ||
243 | .previous | ||
244 | |||
245 | .align 4 | ||
246 | |||
247 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | ||
248 | |||
249 | #define ARGBASE 16 | ||
250 | #define FP 12 | ||
251 | |||
252 | csum_partial_copy_generic_i386: | ||
253 | subl $4,%esp | ||
254 | pushl %edi | ||
255 | pushl %esi | ||
256 | pushl %ebx | ||
257 | movl ARGBASE+16(%esp),%eax # sum | ||
258 | movl ARGBASE+12(%esp),%ecx # len | ||
259 | movl ARGBASE+4(%esp),%esi # src | ||
260 | movl ARGBASE+8(%esp),%edi # dst | ||
261 | |||
262 | testl $2, %edi # Check alignment. | ||
263 | jz 2f # Jump if alignment is ok. | ||
264 | subl $2, %ecx # Alignment uses up two bytes. | ||
265 | jae 1f # Jump if we had at least two bytes. | ||
266 | addl $2, %ecx # ecx was < 2. Deal with it. | ||
267 | jmp 4f | ||
268 | SRC(1: movw (%esi), %bx ) | ||
269 | addl $2, %esi | ||
270 | DST( movw %bx, (%edi) ) | ||
271 | addl $2, %edi | ||
272 | addw %bx, %ax | ||
273 | adcl $0, %eax | ||
274 | 2: | ||
275 | movl %ecx, FP(%esp) | ||
276 | shrl $5, %ecx | ||
277 | jz 2f | ||
278 | testl %esi, %esi | ||
279 | SRC(1: movl (%esi), %ebx ) | ||
280 | SRC( movl 4(%esi), %edx ) | ||
281 | adcl %ebx, %eax | ||
282 | DST( movl %ebx, (%edi) ) | ||
283 | adcl %edx, %eax | ||
284 | DST( movl %edx, 4(%edi) ) | ||
285 | |||
286 | SRC( movl 8(%esi), %ebx ) | ||
287 | SRC( movl 12(%esi), %edx ) | ||
288 | adcl %ebx, %eax | ||
289 | DST( movl %ebx, 8(%edi) ) | ||
290 | adcl %edx, %eax | ||
291 | DST( movl %edx, 12(%edi) ) | ||
292 | |||
293 | SRC( movl 16(%esi), %ebx ) | ||
294 | SRC( movl 20(%esi), %edx ) | ||
295 | adcl %ebx, %eax | ||
296 | DST( movl %ebx, 16(%edi) ) | ||
297 | adcl %edx, %eax | ||
298 | DST( movl %edx, 20(%edi) ) | ||
299 | |||
300 | SRC( movl 24(%esi), %ebx ) | ||
301 | SRC( movl 28(%esi), %edx ) | ||
302 | adcl %ebx, %eax | ||
303 | DST( movl %ebx, 24(%edi) ) | ||
304 | adcl %edx, %eax | ||
305 | DST( movl %edx, 28(%edi) ) | ||
306 | |||
307 | lea 32(%esi), %esi | ||
308 | lea 32(%edi), %edi | ||
309 | dec %ecx | ||
310 | jne 1b | ||
311 | adcl $0, %eax | ||
312 | 2: movl FP(%esp), %edx | ||
313 | movl %edx, %ecx | ||
314 | andl $0x1c, %edx | ||
315 | je 4f | ||
316 | shrl $2, %edx # This clears CF | ||
317 | SRC(3: movl (%esi), %ebx ) | ||
318 | adcl %ebx, %eax | ||
319 | DST( movl %ebx, (%edi) ) | ||
320 | lea 4(%esi), %esi | ||
321 | lea 4(%edi), %edi | ||
322 | dec %edx | ||
323 | jne 3b | ||
324 | adcl $0, %eax | ||
325 | 4: andl $3, %ecx | ||
326 | jz 7f | ||
327 | cmpl $2, %ecx | ||
328 | jb 5f | ||
329 | SRC( movw (%esi), %cx ) | ||
330 | leal 2(%esi), %esi | ||
331 | DST( movw %cx, (%edi) ) | ||
332 | leal 2(%edi), %edi | ||
333 | je 6f | ||
334 | shll $16,%ecx | ||
335 | SRC(5: movb (%esi), %cl ) | ||
336 | DST( movb %cl, (%edi) ) | ||
337 | 6: addl %ecx, %eax | ||
338 | adcl $0, %eax | ||
339 | 7: | ||
340 | 5000: | ||
341 | |||
342 | # Exception handler: | ||
343 | .section .fixup, "ax" | ||
344 | |||
345 | 6001: | ||
346 | movl ARGBASE+20(%esp), %ebx # src_err_ptr | ||
347 | movl $-EFAULT, (%ebx) | ||
348 | |||
349 | # zero the complete destination - computing the rest | ||
350 | # is too much work | ||
351 | movl ARGBASE+8(%esp), %edi # dst | ||
352 | movl ARGBASE+12(%esp), %ecx # len | ||
353 | xorl %eax,%eax | ||
354 | rep ; stosb | ||
355 | |||
356 | jmp 5000b | ||
357 | |||
358 | 6002: | ||
359 | movl ARGBASE+24(%esp), %ebx # dst_err_ptr | ||
360 | movl $-EFAULT,(%ebx) | ||
361 | jmp 5000b | ||
362 | |||
363 | .previous | ||
364 | |||
365 | popl %ebx | ||
366 | popl %esi | ||
367 | popl %edi | ||
368 | popl %ecx # equivalent to addl $4,%esp | ||
369 | ret | ||
370 | |||
371 | #else | ||
372 | |||
373 | /* Version for PentiumII/PPro */ | ||
374 | |||
375 | #define ROUND1(x) \ | ||
376 | SRC(movl x(%esi), %ebx ) ; \ | ||
377 | addl %ebx, %eax ; \ | ||
378 | DST(movl %ebx, x(%edi) ) ; | ||
379 | |||
380 | #define ROUND(x) \ | ||
381 | SRC(movl x(%esi), %ebx ) ; \ | ||
382 | adcl %ebx, %eax ; \ | ||
383 | DST(movl %ebx, x(%edi) ) ; | ||
384 | |||
385 | #define ARGBASE 12 | ||
386 | |||
387 | csum_partial_copy_generic_i386: | ||
388 | pushl %ebx | ||
389 | pushl %edi | ||
390 | pushl %esi | ||
391 | movl ARGBASE+4(%esp),%esi #src | ||
392 | movl ARGBASE+8(%esp),%edi #dst | ||
393 | movl ARGBASE+12(%esp),%ecx #len | ||
394 | movl ARGBASE+16(%esp),%eax #sum | ||
395 | # movl %ecx, %edx | ||
396 | movl %ecx, %ebx | ||
397 | movl %esi, %edx | ||
398 | shrl $6, %ecx | ||
399 | andl $0x3c, %ebx | ||
400 | negl %ebx | ||
401 | subl %ebx, %esi | ||
402 | subl %ebx, %edi | ||
403 | lea -1(%esi),%edx | ||
404 | andl $-32,%edx | ||
405 | lea 3f(%ebx,%ebx), %ebx | ||
406 | testl %esi, %esi | ||
407 | jmp *%ebx | ||
408 | 1: addl $64,%esi | ||
409 | addl $64,%edi | ||
410 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) | ||
411 | ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) | ||
412 | ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) | ||
413 | ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) | ||
414 | ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) | ||
415 | 3: adcl $0,%eax | ||
416 | addl $64, %edx | ||
417 | dec %ecx | ||
418 | jge 1b | ||
419 | 4: movl ARGBASE+12(%esp),%edx #len | ||
420 | andl $3, %edx | ||
421 | jz 7f | ||
422 | cmpl $2, %edx | ||
423 | jb 5f | ||
424 | SRC( movw (%esi), %dx ) | ||
425 | leal 2(%esi), %esi | ||
426 | DST( movw %dx, (%edi) ) | ||
427 | leal 2(%edi), %edi | ||
428 | je 6f | ||
429 | shll $16,%edx | ||
430 | 5: | ||
431 | SRC( movb (%esi), %dl ) | ||
432 | DST( movb %dl, (%edi) ) | ||
433 | 6: addl %edx, %eax | ||
434 | adcl $0, %eax | ||
435 | 7: | ||
436 | .section .fixup, "ax" | ||
437 | 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr | ||
438 | movl $-EFAULT, (%ebx) | ||
439 | # zero the complete destination (computing the rest is too much work) | ||
440 | movl ARGBASE+8(%esp),%edi # dst | ||
441 | movl ARGBASE+12(%esp),%ecx # len | ||
442 | xorl %eax,%eax | ||
443 | rep; stosb | ||
444 | jmp 7b | ||
445 | 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr | ||
446 | movl $-EFAULT, (%ebx) | ||
447 | jmp 7b | ||
448 | .previous | ||
449 | |||
450 | popl %esi | ||
451 | popl %edi | ||
452 | popl %ebx | ||
453 | ret | ||
454 | |||
455 | #undef ROUND | ||
456 | #undef ROUND1 | ||
457 | |||
458 | #endif | ||
diff --git a/arch/x86/um/delay.c b/arch/x86/um/delay.c new file mode 100644 index 000000000000..f3fe1a688f7e --- /dev/null +++ b/arch/x86/um/delay.c | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * Mostly copied from arch/x86/lib/delay.c | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <asm/param.h> | ||
14 | |||
15 | void __delay(unsigned long loops) | ||
16 | { | ||
17 | asm volatile( | ||
18 | "test %0,%0\n" | ||
19 | "jz 3f\n" | ||
20 | "jmp 1f\n" | ||
21 | |||
22 | ".align 16\n" | ||
23 | "1: jmp 2f\n" | ||
24 | |||
25 | ".align 16\n" | ||
26 | "2: dec %0\n" | ||
27 | " jnz 2b\n" | ||
28 | "3: dec %0\n" | ||
29 | |||
30 | : /* we don't need output */ | ||
31 | : "a" (loops) | ||
32 | ); | ||
33 | } | ||
34 | EXPORT_SYMBOL(__delay); | ||
35 | |||
36 | inline void __const_udelay(unsigned long xloops) | ||
37 | { | ||
38 | int d0; | ||
39 | |||
40 | xloops *= 4; | ||
41 | asm("mull %%edx" | ||
42 | : "=d" (xloops), "=&a" (d0) | ||
43 | : "1" (xloops), "0" | ||
44 | (loops_per_jiffy * (HZ/4))); | ||
45 | |||
46 | __delay(++xloops); | ||
47 | } | ||
48 | EXPORT_SYMBOL(__const_udelay); | ||
49 | |||
50 | void __udelay(unsigned long usecs) | ||
51 | { | ||
52 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ | ||
53 | } | ||
54 | EXPORT_SYMBOL(__udelay); | ||
55 | |||
56 | void __ndelay(unsigned long nsecs) | ||
57 | { | ||
58 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | ||
59 | } | ||
60 | EXPORT_SYMBOL(__ndelay); | ||
diff --git a/arch/x86/um/elfcore.c b/arch/x86/um/elfcore.c new file mode 100644 index 000000000000..6bb49b687c97 --- /dev/null +++ b/arch/x86/um/elfcore.c | |||
@@ -0,0 +1,83 @@ | |||
1 | #include <linux/elf.h> | ||
2 | #include <linux/coredump.h> | ||
3 | #include <linux/fs.h> | ||
4 | #include <linux/mm.h> | ||
5 | |||
6 | #include <asm/elf.h> | ||
7 | |||
8 | |||
9 | Elf32_Half elf_core_extra_phdrs(void) | ||
10 | { | ||
11 | return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; | ||
12 | } | ||
13 | |||
14 | int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, | ||
15 | unsigned long limit) | ||
16 | { | ||
17 | if ( vsyscall_ehdr ) { | ||
18 | const struct elfhdr *const ehdrp = | ||
19 | (struct elfhdr *) vsyscall_ehdr; | ||
20 | const struct elf_phdr *const phdrp = | ||
21 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
22 | int i; | ||
23 | Elf32_Off ofs = 0; | ||
24 | |||
25 | for (i = 0; i < ehdrp->e_phnum; ++i) { | ||
26 | struct elf_phdr phdr = phdrp[i]; | ||
27 | |||
28 | if (phdr.p_type == PT_LOAD) { | ||
29 | ofs = phdr.p_offset = offset; | ||
30 | offset += phdr.p_filesz; | ||
31 | } else { | ||
32 | phdr.p_offset += ofs; | ||
33 | } | ||
34 | phdr.p_paddr = 0; /* match other core phdrs */ | ||
35 | *size += sizeof(phdr); | ||
36 | if (*size > limit | ||
37 | || !dump_write(file, &phdr, sizeof(phdr))) | ||
38 | return 0; | ||
39 | } | ||
40 | } | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | int elf_core_write_extra_data(struct file *file, size_t *size, | ||
45 | unsigned long limit) | ||
46 | { | ||
47 | if ( vsyscall_ehdr ) { | ||
48 | const struct elfhdr *const ehdrp = | ||
49 | (struct elfhdr *) vsyscall_ehdr; | ||
50 | const struct elf_phdr *const phdrp = | ||
51 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
52 | int i; | ||
53 | |||
54 | for (i = 0; i < ehdrp->e_phnum; ++i) { | ||
55 | if (phdrp[i].p_type == PT_LOAD) { | ||
56 | void *addr = (void *) phdrp[i].p_vaddr; | ||
57 | size_t filesz = phdrp[i].p_filesz; | ||
58 | |||
59 | *size += filesz; | ||
60 | if (*size > limit | ||
61 | || !dump_write(file, addr, filesz)) | ||
62 | return 0; | ||
63 | } | ||
64 | } | ||
65 | } | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | size_t elf_core_extra_data_size(void) | ||
70 | { | ||
71 | if ( vsyscall_ehdr ) { | ||
72 | const struct elfhdr *const ehdrp = | ||
73 | (struct elfhdr *)vsyscall_ehdr; | ||
74 | const struct elf_phdr *const phdrp = | ||
75 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
76 | int i; | ||
77 | |||
78 | for (i = 0; i < ehdrp->e_phnum; ++i) | ||
79 | if (phdrp[i].p_type == PT_LOAD) | ||
80 | return (size_t) phdrp[i].p_filesz; | ||
81 | } | ||
82 | return 0; | ||
83 | } | ||
diff --git a/arch/x86/um/fault.c b/arch/x86/um/fault.c new file mode 100644 index 000000000000..d670f68532f4 --- /dev/null +++ b/arch/x86/um/fault.c | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "sysdep/ptrace.h" | ||
7 | |||
8 | /* These two are from asm-um/uaccess.h and linux/module.h, check them. */ | ||
9 | struct exception_table_entry | ||
10 | { | ||
11 | unsigned long insn; | ||
12 | unsigned long fixup; | ||
13 | }; | ||
14 | |||
15 | const struct exception_table_entry *search_exception_tables(unsigned long add); | ||
16 | |||
17 | /* Compare this to arch/i386/mm/extable.c:fixup_exception() */ | ||
18 | int arch_fixup(unsigned long address, struct uml_pt_regs *regs) | ||
19 | { | ||
20 | const struct exception_table_entry *fixup; | ||
21 | |||
22 | fixup = search_exception_tables(address); | ||
23 | if (fixup != 0) { | ||
24 | UPT_IP(regs) = fixup->fixup; | ||
25 | return 1; | ||
26 | } | ||
27 | return 0; | ||
28 | } | ||
diff --git a/arch/x86/um/ksyms.c b/arch/x86/um/ksyms.c new file mode 100644 index 000000000000..2e8f43ec6214 --- /dev/null +++ b/arch/x86/um/ksyms.c | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <asm/string.h> | ||
3 | #include <asm/checksum.h> | ||
4 | |||
5 | #ifndef CONFIG_X86_32 | ||
6 | /*XXX: we need them because they would be exported by x86_64 */ | ||
7 | #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 | ||
8 | EXPORT_SYMBOL(memcpy); | ||
9 | #else | ||
10 | EXPORT_SYMBOL(__memcpy); | ||
11 | #endif | ||
12 | #endif | ||
13 | EXPORT_SYMBOL(csum_partial); | ||
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c new file mode 100644 index 000000000000..26b0e39d2ce9 --- /dev/null +++ b/arch/x86/um/ldt.c | |||
@@ -0,0 +1,502 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <linux/mm.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <asm/unistd.h> | ||
10 | #include "os.h" | ||
11 | #include "proc_mm.h" | ||
12 | #include "skas.h" | ||
13 | #include "skas_ptrace.h" | ||
14 | #include "sysdep/tls.h" | ||
15 | |||
16 | extern int modify_ldt(int func, void *ptr, unsigned long bytecount); | ||
17 | |||
18 | static long write_ldt_entry(struct mm_id *mm_idp, int func, | ||
19 | struct user_desc *desc, void **addr, int done) | ||
20 | { | ||
21 | long res; | ||
22 | |||
23 | if (proc_mm) { | ||
24 | /* | ||
25 | * This is a special handling for the case, that the mm to | ||
26 | * modify isn't current->active_mm. | ||
27 | * If this is called directly by modify_ldt, | ||
28 | * (current->active_mm->context.skas.u == mm_idp) | ||
29 | * will be true. So no call to __switch_mm(mm_idp) is done. | ||
30 | * If this is called in case of init_new_ldt or PTRACE_LDT, | ||
31 | * mm_idp won't belong to current->active_mm, but child->mm. | ||
32 | * So we need to switch child's mm into our userspace, then | ||
33 | * later switch back. | ||
34 | * | ||
35 | * Note: I'm unsure: should interrupts be disabled here? | ||
36 | */ | ||
37 | if (!current->active_mm || current->active_mm == &init_mm || | ||
38 | mm_idp != ¤t->active_mm->context.id) | ||
39 | __switch_mm(mm_idp); | ||
40 | } | ||
41 | |||
42 | if (ptrace_ldt) { | ||
43 | struct ptrace_ldt ldt_op = (struct ptrace_ldt) { | ||
44 | .func = func, | ||
45 | .ptr = desc, | ||
46 | .bytecount = sizeof(*desc)}; | ||
47 | u32 cpu; | ||
48 | int pid; | ||
49 | |||
50 | if (!proc_mm) | ||
51 | pid = mm_idp->u.pid; | ||
52 | else { | ||
53 | cpu = get_cpu(); | ||
54 | pid = userspace_pid[cpu]; | ||
55 | } | ||
56 | |||
57 | res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op); | ||
58 | |||
59 | if (proc_mm) | ||
60 | put_cpu(); | ||
61 | } | ||
62 | else { | ||
63 | void *stub_addr; | ||
64 | res = syscall_stub_data(mm_idp, (unsigned long *)desc, | ||
65 | (sizeof(*desc) + sizeof(long) - 1) & | ||
66 | ~(sizeof(long) - 1), | ||
67 | addr, &stub_addr); | ||
68 | if (!res) { | ||
69 | unsigned long args[] = { func, | ||
70 | (unsigned long)stub_addr, | ||
71 | sizeof(*desc), | ||
72 | 0, 0, 0 }; | ||
73 | res = run_syscall_stub(mm_idp, __NR_modify_ldt, args, | ||
74 | 0, addr, done); | ||
75 | } | ||
76 | } | ||
77 | |||
78 | if (proc_mm) { | ||
79 | /* | ||
80 | * This is the second part of special handling, that makes | ||
81 | * PTRACE_LDT possible to implement. | ||
82 | */ | ||
83 | if (current->active_mm && current->active_mm != &init_mm && | ||
84 | mm_idp != ¤t->active_mm->context.id) | ||
85 | __switch_mm(¤t->active_mm->context.id); | ||
86 | } | ||
87 | |||
88 | return res; | ||
89 | } | ||
90 | |||
91 | static long read_ldt_from_host(void __user * ptr, unsigned long bytecount) | ||
92 | { | ||
93 | int res, n; | ||
94 | struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) { | ||
95 | .func = 0, | ||
96 | .bytecount = bytecount, | ||
97 | .ptr = kmalloc(bytecount, GFP_KERNEL)}; | ||
98 | u32 cpu; | ||
99 | |||
100 | if (ptrace_ldt.ptr == NULL) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | /* | ||
104 | * This is called from sys_modify_ldt only, so userspace_pid gives | ||
105 | * us the right number | ||
106 | */ | ||
107 | |||
108 | cpu = get_cpu(); | ||
109 | res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt); | ||
110 | put_cpu(); | ||
111 | if (res < 0) | ||
112 | goto out; | ||
113 | |||
114 | n = copy_to_user(ptr, ptrace_ldt.ptr, res); | ||
115 | if (n != 0) | ||
116 | res = -EFAULT; | ||
117 | |||
118 | out: | ||
119 | kfree(ptrace_ldt.ptr); | ||
120 | |||
121 | return res; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * In skas mode, we hold our own ldt data in UML. | ||
126 | * Thus, the code implementing sys_modify_ldt_skas | ||
127 | * is very similar to (and mostly stolen from) sys_modify_ldt | ||
128 | * for arch/i386/kernel/ldt.c | ||
129 | * The routines copied and modified in part are: | ||
130 | * - read_ldt | ||
131 | * - read_default_ldt | ||
132 | * - write_ldt | ||
133 | * - sys_modify_ldt_skas | ||
134 | */ | ||
135 | |||
136 | static int read_ldt(void __user * ptr, unsigned long bytecount) | ||
137 | { | ||
138 | int i, err = 0; | ||
139 | unsigned long size; | ||
140 | uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; | ||
141 | |||
142 | if (!ldt->entry_count) | ||
143 | goto out; | ||
144 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) | ||
145 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; | ||
146 | err = bytecount; | ||
147 | |||
148 | if (ptrace_ldt) | ||
149 | return read_ldt_from_host(ptr, bytecount); | ||
150 | |||
151 | mutex_lock(&ldt->lock); | ||
152 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) { | ||
153 | size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; | ||
154 | if (size > bytecount) | ||
155 | size = bytecount; | ||
156 | if (copy_to_user(ptr, ldt->u.entries, size)) | ||
157 | err = -EFAULT; | ||
158 | bytecount -= size; | ||
159 | ptr += size; | ||
160 | } | ||
161 | else { | ||
162 | for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; | ||
163 | i++) { | ||
164 | size = PAGE_SIZE; | ||
165 | if (size > bytecount) | ||
166 | size = bytecount; | ||
167 | if (copy_to_user(ptr, ldt->u.pages[i], size)) { | ||
168 | err = -EFAULT; | ||
169 | break; | ||
170 | } | ||
171 | bytecount -= size; | ||
172 | ptr += size; | ||
173 | } | ||
174 | } | ||
175 | mutex_unlock(&ldt->lock); | ||
176 | |||
177 | if (bytecount == 0 || err == -EFAULT) | ||
178 | goto out; | ||
179 | |||
180 | if (clear_user(ptr, bytecount)) | ||
181 | err = -EFAULT; | ||
182 | |||
183 | out: | ||
184 | return err; | ||
185 | } | ||
186 | |||
187 | static int read_default_ldt(void __user * ptr, unsigned long bytecount) | ||
188 | { | ||
189 | int err; | ||
190 | |||
191 | if (bytecount > 5*LDT_ENTRY_SIZE) | ||
192 | bytecount = 5*LDT_ENTRY_SIZE; | ||
193 | |||
194 | err = bytecount; | ||
195 | /* | ||
196 | * UML doesn't support lcall7 and lcall27. | ||
197 | * So, we don't really have a default ldt, but emulate | ||
198 | * an empty ldt of common host default ldt size. | ||
199 | */ | ||
200 | if (clear_user(ptr, bytecount)) | ||
201 | err = -EFAULT; | ||
202 | |||
203 | return err; | ||
204 | } | ||
205 | |||
206 | static int write_ldt(void __user * ptr, unsigned long bytecount, int func) | ||
207 | { | ||
208 | uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; | ||
209 | struct mm_id * mm_idp = ¤t->mm->context.id; | ||
210 | int i, err; | ||
211 | struct user_desc ldt_info; | ||
212 | struct ldt_entry entry0, *ldt_p; | ||
213 | void *addr = NULL; | ||
214 | |||
215 | err = -EINVAL; | ||
216 | if (bytecount != sizeof(ldt_info)) | ||
217 | goto out; | ||
218 | err = -EFAULT; | ||
219 | if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) | ||
220 | goto out; | ||
221 | |||
222 | err = -EINVAL; | ||
223 | if (ldt_info.entry_number >= LDT_ENTRIES) | ||
224 | goto out; | ||
225 | if (ldt_info.contents == 3) { | ||
226 | if (func == 1) | ||
227 | goto out; | ||
228 | if (ldt_info.seg_not_present == 0) | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | if (!ptrace_ldt) | ||
233 | mutex_lock(&ldt->lock); | ||
234 | |||
235 | err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); | ||
236 | if (err) | ||
237 | goto out_unlock; | ||
238 | else if (ptrace_ldt) { | ||
239 | /* With PTRACE_LDT available, this is used as a flag only */ | ||
240 | ldt->entry_count = 1; | ||
241 | goto out; | ||
242 | } | ||
243 | |||
244 | if (ldt_info.entry_number >= ldt->entry_count && | ||
245 | ldt_info.entry_number >= LDT_DIRECT_ENTRIES) { | ||
246 | for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; | ||
247 | i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; | ||
248 | i++) { | ||
249 | if (i == 0) | ||
250 | memcpy(&entry0, ldt->u.entries, | ||
251 | sizeof(entry0)); | ||
252 | ldt->u.pages[i] = (struct ldt_entry *) | ||
253 | __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
254 | if (!ldt->u.pages[i]) { | ||
255 | err = -ENOMEM; | ||
256 | /* Undo the change in host */ | ||
257 | memset(&ldt_info, 0, sizeof(ldt_info)); | ||
258 | write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); | ||
259 | goto out_unlock; | ||
260 | } | ||
261 | if (i == 0) { | ||
262 | memcpy(ldt->u.pages[0], &entry0, | ||
263 | sizeof(entry0)); | ||
264 | memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, | ||
265 | sizeof(entry0)*(LDT_DIRECT_ENTRIES-1)); | ||
266 | } | ||
267 | ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; | ||
268 | } | ||
269 | } | ||
270 | if (ldt->entry_count <= ldt_info.entry_number) | ||
271 | ldt->entry_count = ldt_info.entry_number + 1; | ||
272 | |||
273 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) | ||
274 | ldt_p = ldt->u.entries + ldt_info.entry_number; | ||
275 | else | ||
276 | ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + | ||
277 | ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; | ||
278 | |||
279 | if (ldt_info.base_addr == 0 && ldt_info.limit == 0 && | ||
280 | (func == 1 || LDT_empty(&ldt_info))) { | ||
281 | ldt_p->a = 0; | ||
282 | ldt_p->b = 0; | ||
283 | } | ||
284 | else{ | ||
285 | if (func == 1) | ||
286 | ldt_info.useable = 0; | ||
287 | ldt_p->a = LDT_entry_a(&ldt_info); | ||
288 | ldt_p->b = LDT_entry_b(&ldt_info); | ||
289 | } | ||
290 | err = 0; | ||
291 | |||
292 | out_unlock: | ||
293 | mutex_unlock(&ldt->lock); | ||
294 | out: | ||
295 | return err; | ||
296 | } | ||
297 | |||
298 | static long do_modify_ldt_skas(int func, void __user *ptr, | ||
299 | unsigned long bytecount) | ||
300 | { | ||
301 | int ret = -ENOSYS; | ||
302 | |||
303 | switch (func) { | ||
304 | case 0: | ||
305 | ret = read_ldt(ptr, bytecount); | ||
306 | break; | ||
307 | case 1: | ||
308 | case 0x11: | ||
309 | ret = write_ldt(ptr, bytecount, func); | ||
310 | break; | ||
311 | case 2: | ||
312 | ret = read_default_ldt(ptr, bytecount); | ||
313 | break; | ||
314 | } | ||
315 | return ret; | ||
316 | } | ||
317 | |||
318 | static DEFINE_SPINLOCK(host_ldt_lock); | ||
319 | static short dummy_list[9] = {0, -1}; | ||
320 | static short * host_ldt_entries = NULL; | ||
321 | |||
322 | static void ldt_get_host_info(void) | ||
323 | { | ||
324 | long ret; | ||
325 | struct ldt_entry * ldt; | ||
326 | short *tmp; | ||
327 | int i, size, k, order; | ||
328 | |||
329 | spin_lock(&host_ldt_lock); | ||
330 | |||
331 | if (host_ldt_entries != NULL) { | ||
332 | spin_unlock(&host_ldt_lock); | ||
333 | return; | ||
334 | } | ||
335 | host_ldt_entries = dummy_list+1; | ||
336 | |||
337 | spin_unlock(&host_ldt_lock); | ||
338 | |||
339 | for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++) | ||
340 | ; | ||
341 | |||
342 | ldt = (struct ldt_entry *) | ||
343 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); | ||
344 | if (ldt == NULL) { | ||
345 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer " | ||
346 | "for host ldt\n"); | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); | ||
351 | if (ret < 0) { | ||
352 | printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n"); | ||
353 | goto out_free; | ||
354 | } | ||
355 | if (ret == 0) { | ||
356 | /* default_ldt is active, simply write an empty entry 0 */ | ||
357 | host_ldt_entries = dummy_list; | ||
358 | goto out_free; | ||
359 | } | ||
360 | |||
361 | for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) { | ||
362 | if (ldt[i].a != 0 || ldt[i].b != 0) | ||
363 | size++; | ||
364 | } | ||
365 | |||
366 | if (size < ARRAY_SIZE(dummy_list)) | ||
367 | host_ldt_entries = dummy_list; | ||
368 | else { | ||
369 | size = (size + 1) * sizeof(dummy_list[0]); | ||
370 | tmp = kmalloc(size, GFP_KERNEL); | ||
371 | if (tmp == NULL) { | ||
372 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate " | ||
373 | "host ldt list\n"); | ||
374 | goto out_free; | ||
375 | } | ||
376 | host_ldt_entries = tmp; | ||
377 | } | ||
378 | |||
379 | for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) { | ||
380 | if (ldt[i].a != 0 || ldt[i].b != 0) | ||
381 | host_ldt_entries[k++] = i; | ||
382 | } | ||
383 | host_ldt_entries[k] = -1; | ||
384 | |||
385 | out_free: | ||
386 | free_pages((unsigned long)ldt, order); | ||
387 | } | ||
388 | |||
389 | long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm) | ||
390 | { | ||
391 | struct user_desc desc; | ||
392 | short * num_p; | ||
393 | int i; | ||
394 | long page, err=0; | ||
395 | void *addr = NULL; | ||
396 | struct proc_mm_op copy; | ||
397 | |||
398 | |||
399 | if (!ptrace_ldt) | ||
400 | mutex_init(&new_mm->arch.ldt.lock); | ||
401 | |||
402 | if (!from_mm) { | ||
403 | memset(&desc, 0, sizeof(desc)); | ||
404 | /* | ||
405 | * We have to initialize a clean ldt. | ||
406 | */ | ||
407 | if (proc_mm) { | ||
408 | /* | ||
409 | * If the new mm was created using proc_mm, host's | ||
410 | * default-ldt currently is assigned, which normally | ||
411 | * contains the call-gates for lcall7 and lcall27. | ||
412 | * To remove these gates, we simply write an empty | ||
413 | * entry as number 0 to the host. | ||
414 | */ | ||
415 | err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1); | ||
416 | } | ||
417 | else{ | ||
418 | /* | ||
419 | * Now we try to retrieve info about the ldt, we | ||
420 | * inherited from the host. All ldt-entries found | ||
421 | * will be reset in the following loop | ||
422 | */ | ||
423 | ldt_get_host_info(); | ||
424 | for (num_p=host_ldt_entries; *num_p != -1; num_p++) { | ||
425 | desc.entry_number = *num_p; | ||
426 | err = write_ldt_entry(&new_mm->id, 1, &desc, | ||
427 | &addr, *(num_p + 1) == -1); | ||
428 | if (err) | ||
429 | break; | ||
430 | } | ||
431 | } | ||
432 | new_mm->arch.ldt.entry_count = 0; | ||
433 | |||
434 | goto out; | ||
435 | } | ||
436 | |||
437 | if (proc_mm) { | ||
438 | /* | ||
439 | * We have a valid from_mm, so we now have to copy the LDT of | ||
440 | * from_mm to new_mm, because using proc_mm an new mm with | ||
441 | * an empty/default LDT was created in new_mm() | ||
442 | */ | ||
443 | copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS, | ||
444 | .u = | ||
445 | { .copy_segments = | ||
446 | from_mm->id.u.mm_fd } } ); | ||
447 | i = os_write_file(new_mm->id.u.mm_fd, ©, sizeof(copy)); | ||
448 | if (i != sizeof(copy)) | ||
449 | printk(KERN_ERR "new_mm : /proc/mm copy_segments " | ||
450 | "failed, err = %d\n", -i); | ||
451 | } | ||
452 | |||
453 | if (!ptrace_ldt) { | ||
454 | /* | ||
455 | * Our local LDT is used to supply the data for | ||
456 | * modify_ldt(READLDT), if PTRACE_LDT isn't available, | ||
457 | * i.e., we have to use the stub for modify_ldt, which | ||
458 | * can't handle the big read buffer of up to 64kB. | ||
459 | */ | ||
460 | mutex_lock(&from_mm->arch.ldt.lock); | ||
461 | if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES) | ||
462 | memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, | ||
463 | sizeof(new_mm->arch.ldt.u.entries)); | ||
464 | else { | ||
465 | i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; | ||
466 | while (i-->0) { | ||
467 | page = __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
468 | if (!page) { | ||
469 | err = -ENOMEM; | ||
470 | break; | ||
471 | } | ||
472 | new_mm->arch.ldt.u.pages[i] = | ||
473 | (struct ldt_entry *) page; | ||
474 | memcpy(new_mm->arch.ldt.u.pages[i], | ||
475 | from_mm->arch.ldt.u.pages[i], PAGE_SIZE); | ||
476 | } | ||
477 | } | ||
478 | new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count; | ||
479 | mutex_unlock(&from_mm->arch.ldt.lock); | ||
480 | } | ||
481 | |||
482 | out: | ||
483 | return err; | ||
484 | } | ||
485 | |||
486 | |||
487 | void free_ldt(struct mm_context *mm) | ||
488 | { | ||
489 | int i; | ||
490 | |||
491 | if (!ptrace_ldt && mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) { | ||
492 | i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; | ||
493 | while (i-- > 0) | ||
494 | free_page((long) mm->arch.ldt.u.pages[i]); | ||
495 | } | ||
496 | mm->arch.ldt.entry_count = 0; | ||
497 | } | ||
498 | |||
499 | int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) | ||
500 | { | ||
501 | return do_modify_ldt_skas(func, ptr, bytecount); | ||
502 | } | ||
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c new file mode 100644 index 000000000000..639900a6fde9 --- /dev/null +++ b/arch/x86/um/mem_32.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <asm/page.h> | ||
11 | #include <asm/mman.h> | ||
12 | |||
13 | static struct vm_area_struct gate_vma; | ||
14 | |||
15 | static int __init gate_vma_init(void) | ||
16 | { | ||
17 | if (!FIXADDR_USER_START) | ||
18 | return 0; | ||
19 | |||
20 | gate_vma.vm_mm = NULL; | ||
21 | gate_vma.vm_start = FIXADDR_USER_START; | ||
22 | gate_vma.vm_end = FIXADDR_USER_END; | ||
23 | gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; | ||
24 | gate_vma.vm_page_prot = __P101; | ||
25 | |||
26 | /* | ||
27 | * Make sure the vDSO gets into every core dump. | ||
28 | * Dumping its contents makes post-mortem fully interpretable later | ||
29 | * without matching up the same kernel and hardware config to see | ||
30 | * what PC values meant. | ||
31 | */ | ||
32 | gate_vma.vm_flags |= VM_ALWAYSDUMP; | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | __initcall(gate_vma_init); | ||
37 | |||
38 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | ||
39 | { | ||
40 | return FIXADDR_USER_START ? &gate_vma : NULL; | ||
41 | } | ||
42 | |||
43 | int in_gate_area_no_mm(unsigned long addr) | ||
44 | { | ||
45 | if (!FIXADDR_USER_START) | ||
46 | return 0; | ||
47 | |||
48 | if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) | ||
49 | return 1; | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | ||
55 | { | ||
56 | struct vm_area_struct *vma = get_gate_vma(mm); | ||
57 | |||
58 | if (!vma) | ||
59 | return 0; | ||
60 | |||
61 | return (addr >= vma->vm_start) && (addr < vma->vm_end); | ||
62 | } | ||
diff --git a/arch/x86/um/mem_64.c b/arch/x86/um/mem_64.c new file mode 100644 index 000000000000..546518727a73 --- /dev/null +++ b/arch/x86/um/mem_64.c | |||
@@ -0,0 +1,26 @@ | |||
1 | #include "linux/mm.h" | ||
2 | #include "asm/page.h" | ||
3 | #include "asm/mman.h" | ||
4 | |||
5 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
6 | { | ||
7 | if (vma->vm_mm && vma->vm_start == um_vdso_addr) | ||
8 | return "[vdso]"; | ||
9 | |||
10 | return NULL; | ||
11 | } | ||
12 | |||
13 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | ||
14 | { | ||
15 | return NULL; | ||
16 | } | ||
17 | |||
18 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | ||
19 | { | ||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | int in_gate_area_no_mm(unsigned long addr) | ||
24 | { | ||
25 | return 0; | ||
26 | } | ||
diff --git a/arch/x86/um/os-Linux/Makefile b/arch/x86/um/os-Linux/Makefile new file mode 100644 index 000000000000..253bfb8cb702 --- /dev/null +++ b/arch/x86/um/os-Linux/Makefile | |||
@@ -0,0 +1,13 @@ | |||
1 | # | ||
2 | # Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | # Licensed under the GPL | ||
4 | # | ||
5 | |||
6 | obj-y = registers.o task_size.o mcontext.o | ||
7 | |||
8 | obj-$(CONFIG_X86_32) += tls.o | ||
9 | obj-$(CONFIG_64BIT) += prctl.o | ||
10 | |||
11 | USER_OBJS := $(obj-y) | ||
12 | |||
13 | include arch/um/scripts/Makefile.rules | ||
diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c new file mode 100644 index 000000000000..1d33d72c6284 --- /dev/null +++ b/arch/x86/um/os-Linux/mcontext.c | |||
@@ -0,0 +1,31 @@ | |||
1 | #include <sys/ucontext.h> | ||
2 | #define __FRAME_OFFSETS | ||
3 | #include <asm/ptrace.h> | ||
4 | #include <sysdep/ptrace.h> | ||
5 | |||
6 | void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc) | ||
7 | { | ||
8 | #ifdef __i386__ | ||
9 | #define COPY2(X,Y) regs->gp[X] = mc->gregs[REG_##Y] | ||
10 | #define COPY(X) regs->gp[X] = mc->gregs[REG_##X] | ||
11 | #define COPY_SEG(X) regs->gp[X] = mc->gregs[REG_##X] & 0xffff; | ||
12 | #define COPY_SEG_CPL3(X) regs->gp[X] = (mc->gregs[REG_##X] & 0xffff) | 3; | ||
13 | COPY_SEG(GS); COPY_SEG(FS); COPY_SEG(ES); COPY_SEG(DS); | ||
14 | COPY(EDI); COPY(ESI); COPY(EBP); | ||
15 | COPY2(UESP, ESP); /* sic */ | ||
16 | COPY(EBX); COPY(EDX); COPY(ECX); COPY(EAX); | ||
17 | COPY(EIP); COPY_SEG_CPL3(CS); COPY(EFL); COPY_SEG_CPL3(SS); | ||
18 | #else | ||
19 | #define COPY2(X,Y) regs->gp[X/sizeof(unsigned long)] = mc->gregs[REG_##Y] | ||
20 | #define COPY(X) regs->gp[X/sizeof(unsigned long)] = mc->gregs[REG_##X] | ||
21 | COPY(R8); COPY(R9); COPY(R10); COPY(R11); | ||
22 | COPY(R12); COPY(R13); COPY(R14); COPY(R15); | ||
23 | COPY(RDI); COPY(RSI); COPY(RBP); COPY(RBX); | ||
24 | COPY(RDX); COPY(RAX); COPY(RCX); COPY(RSP); | ||
25 | COPY(RIP); | ||
26 | COPY2(EFLAGS, EFL); | ||
27 | COPY2(CS, CSGSFS); | ||
28 | regs->gp[CS / sizeof(unsigned long)] &= 0xffff; | ||
29 | regs->gp[CS / sizeof(unsigned long)] |= 3; | ||
30 | #endif | ||
31 | } | ||
diff --git a/arch/x86/um/os-Linux/prctl.c b/arch/x86/um/os-Linux/prctl.c new file mode 100644 index 000000000000..9d34eddb517f --- /dev/null +++ b/arch/x86/um/os-Linux/prctl.c | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Jeff Dike (jdike@{addtoit.com,linux.intel.com}) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <sys/ptrace.h> | ||
7 | #include <linux/ptrace.h> | ||
8 | |||
9 | int os_arch_prctl(int pid, int code, unsigned long *addr) | ||
10 | { | ||
11 | return ptrace(PTRACE_ARCH_PRCTL, pid, (unsigned long) addr, code); | ||
12 | } | ||
diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c new file mode 100644 index 000000000000..0cdbb86b012b --- /dev/null +++ b/arch/x86/um/os-Linux/registers.c | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 PathScale, Inc | ||
3 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #include <errno.h> | ||
8 | #include <sys/ptrace.h> | ||
9 | #ifdef __i386__ | ||
10 | #include <sys/user.h> | ||
11 | #endif | ||
12 | #include "longjmp.h" | ||
13 | #include "sysdep/ptrace_user.h" | ||
14 | |||
15 | int save_fp_registers(int pid, unsigned long *fp_regs) | ||
16 | { | ||
17 | if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0) | ||
18 | return -errno; | ||
19 | return 0; | ||
20 | } | ||
21 | |||
22 | int restore_fp_registers(int pid, unsigned long *fp_regs) | ||
23 | { | ||
24 | if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0) | ||
25 | return -errno; | ||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | #ifdef __i386__ | ||
30 | int have_fpx_regs = 1; | ||
31 | int save_fpx_registers(int pid, unsigned long *fp_regs) | ||
32 | { | ||
33 | if (ptrace(PTRACE_GETFPXREGS, pid, 0, fp_regs) < 0) | ||
34 | return -errno; | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | int restore_fpx_registers(int pid, unsigned long *fp_regs) | ||
39 | { | ||
40 | if (ptrace(PTRACE_SETFPXREGS, pid, 0, fp_regs) < 0) | ||
41 | return -errno; | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | int get_fp_registers(int pid, unsigned long *regs) | ||
46 | { | ||
47 | if (have_fpx_regs) | ||
48 | return save_fpx_registers(pid, regs); | ||
49 | else | ||
50 | return save_fp_registers(pid, regs); | ||
51 | } | ||
52 | |||
53 | int put_fp_registers(int pid, unsigned long *regs) | ||
54 | { | ||
55 | if (have_fpx_regs) | ||
56 | return restore_fpx_registers(pid, regs); | ||
57 | else | ||
58 | return restore_fp_registers(pid, regs); | ||
59 | } | ||
60 | |||
61 | void arch_init_registers(int pid) | ||
62 | { | ||
63 | struct user_fpxregs_struct fpx_regs; | ||
64 | int err; | ||
65 | |||
66 | err = ptrace(PTRACE_GETFPXREGS, pid, 0, &fpx_regs); | ||
67 | if (!err) | ||
68 | return; | ||
69 | |||
70 | if (errno != EIO) | ||
71 | panic("check_ptrace : PTRACE_GETFPXREGS failed, errno = %d", | ||
72 | errno); | ||
73 | |||
74 | have_fpx_regs = 0; | ||
75 | } | ||
76 | #else | ||
77 | |||
78 | int get_fp_registers(int pid, unsigned long *regs) | ||
79 | { | ||
80 | return save_fp_registers(pid, regs); | ||
81 | } | ||
82 | |||
83 | int put_fp_registers(int pid, unsigned long *regs) | ||
84 | { | ||
85 | return restore_fp_registers(pid, regs); | ||
86 | } | ||
87 | |||
88 | #endif | ||
89 | |||
90 | unsigned long get_thread_reg(int reg, jmp_buf *buf) | ||
91 | { | ||
92 | switch (reg) { | ||
93 | #ifdef __i386__ | ||
94 | case HOST_IP: | ||
95 | return buf[0]->__eip; | ||
96 | case HOST_SP: | ||
97 | return buf[0]->__esp; | ||
98 | case HOST_BP: | ||
99 | return buf[0]->__ebp; | ||
100 | #else | ||
101 | case HOST_IP: | ||
102 | return buf[0]->__rip; | ||
103 | case HOST_SP: | ||
104 | return buf[0]->__rsp; | ||
105 | case HOST_BP: | ||
106 | return buf[0]->__rbp; | ||
107 | #endif | ||
108 | default: | ||
109 | printk(UM_KERN_ERR "get_thread_regs - unknown register %d\n", | ||
110 | reg); | ||
111 | return 0; | ||
112 | } | ||
113 | } | ||
diff --git a/arch/x86/um/os-Linux/task_size.c b/arch/x86/um/os-Linux/task_size.c new file mode 100644 index 000000000000..efb16c5c9bcf --- /dev/null +++ b/arch/x86/um/os-Linux/task_size.c | |||
@@ -0,0 +1,150 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <stdlib.h> | ||
3 | #include <signal.h> | ||
4 | #include <sys/mman.h> | ||
5 | #include "longjmp.h" | ||
6 | |||
7 | #ifdef __i386__ | ||
8 | |||
9 | static jmp_buf buf; | ||
10 | |||
11 | static void segfault(int sig) | ||
12 | { | ||
13 | longjmp(buf, 1); | ||
14 | } | ||
15 | |||
16 | static int page_ok(unsigned long page) | ||
17 | { | ||
18 | unsigned long *address = (unsigned long *) (page << UM_KERN_PAGE_SHIFT); | ||
19 | unsigned long n = ~0UL; | ||
20 | void *mapped = NULL; | ||
21 | int ok = 0; | ||
22 | |||
23 | /* | ||
24 | * First see if the page is readable. If it is, it may still | ||
25 | * be a VDSO, so we go on to see if it's writable. If not | ||
26 | * then try mapping memory there. If that fails, then we're | ||
27 | * still in the kernel area. As a sanity check, we'll fail if | ||
28 | * the mmap succeeds, but gives us an address different from | ||
29 | * what we wanted. | ||
30 | */ | ||
31 | if (setjmp(buf) == 0) | ||
32 | n = *address; | ||
33 | else { | ||
34 | mapped = mmap(address, UM_KERN_PAGE_SIZE, | ||
35 | PROT_READ | PROT_WRITE, | ||
36 | MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | ||
37 | if (mapped == MAP_FAILED) | ||
38 | return 0; | ||
39 | if (mapped != address) | ||
40 | goto out; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Now, is it writeable? If so, then we're in user address | ||
45 | * space. If not, then try mprotecting it and try the write | ||
46 | * again. | ||
47 | */ | ||
48 | if (setjmp(buf) == 0) { | ||
49 | *address = n; | ||
50 | ok = 1; | ||
51 | goto out; | ||
52 | } else if (mprotect(address, UM_KERN_PAGE_SIZE, | ||
53 | PROT_READ | PROT_WRITE) != 0) | ||
54 | goto out; | ||
55 | |||
56 | if (setjmp(buf) == 0) { | ||
57 | *address = n; | ||
58 | ok = 1; | ||
59 | } | ||
60 | |||
61 | out: | ||
62 | if (mapped != NULL) | ||
63 | munmap(mapped, UM_KERN_PAGE_SIZE); | ||
64 | return ok; | ||
65 | } | ||
66 | |||
67 | unsigned long os_get_top_address(void) | ||
68 | { | ||
69 | struct sigaction sa, old; | ||
70 | unsigned long bottom = 0; | ||
71 | /* | ||
72 | * A 32-bit UML on a 64-bit host gets confused about the VDSO at | ||
73 | * 0xffffe000. It is mapped, is readable, can be reprotected writeable | ||
74 | * and written. However, exec discovers later that it can't be | ||
75 | * unmapped. So, just set the highest address to be checked to just | ||
76 | * below it. This might waste some address space on 4G/4G 32-bit | ||
77 | * hosts, but shouldn't hurt otherwise. | ||
78 | */ | ||
79 | unsigned long top = 0xffffd000 >> UM_KERN_PAGE_SHIFT; | ||
80 | unsigned long test, original; | ||
81 | |||
82 | printf("Locating the bottom of the address space ... "); | ||
83 | fflush(stdout); | ||
84 | |||
85 | /* | ||
86 | * We're going to be longjmping out of the signal handler, so | ||
87 | * SA_DEFER needs to be set. | ||
88 | */ | ||
89 | sa.sa_handler = segfault; | ||
90 | sigemptyset(&sa.sa_mask); | ||
91 | sa.sa_flags = SA_NODEFER; | ||
92 | if (sigaction(SIGSEGV, &sa, &old)) { | ||
93 | perror("os_get_top_address"); | ||
94 | exit(1); | ||
95 | } | ||
96 | |||
97 | /* Manually scan the address space, bottom-up, until we find | ||
98 | * the first valid page (or run out of them). | ||
99 | */ | ||
100 | for (bottom = 0; bottom < top; bottom++) { | ||
101 | if (page_ok(bottom)) | ||
102 | break; | ||
103 | } | ||
104 | |||
105 | /* If we've got this far, we ran out of pages. */ | ||
106 | if (bottom == top) { | ||
107 | fprintf(stderr, "Unable to determine bottom of address " | ||
108 | "space.\n"); | ||
109 | exit(1); | ||
110 | } | ||
111 | |||
112 | printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT); | ||
113 | printf("Locating the top of the address space ... "); | ||
114 | fflush(stdout); | ||
115 | |||
116 | original = bottom; | ||
117 | |||
118 | /* This could happen with a 4G/4G split */ | ||
119 | if (page_ok(top)) | ||
120 | goto out; | ||
121 | |||
122 | do { | ||
123 | test = bottom + (top - bottom) / 2; | ||
124 | if (page_ok(test)) | ||
125 | bottom = test; | ||
126 | else | ||
127 | top = test; | ||
128 | } while (top - bottom > 1); | ||
129 | |||
130 | out: | ||
131 | /* Restore the old SIGSEGV handling */ | ||
132 | if (sigaction(SIGSEGV, &old, NULL)) { | ||
133 | perror("os_get_top_address"); | ||
134 | exit(1); | ||
135 | } | ||
136 | top <<= UM_KERN_PAGE_SHIFT; | ||
137 | printf("0x%x\n", top); | ||
138 | |||
139 | return top; | ||
140 | } | ||
141 | |||
142 | #else | ||
143 | |||
144 | unsigned long os_get_top_address(void) | ||
145 | { | ||
146 | /* The old value of CONFIG_TOP_ADDR */ | ||
147 | return 0x7fc0000000; | ||
148 | } | ||
149 | |||
150 | #endif | ||
diff --git a/arch/x86/um/os-Linux/tls.c b/arch/x86/um/os-Linux/tls.c new file mode 100644 index 000000000000..82276b6071af --- /dev/null +++ b/arch/x86/um/os-Linux/tls.c | |||
@@ -0,0 +1,67 @@ | |||
1 | #include <errno.h> | ||
2 | #include <linux/unistd.h> | ||
3 | |||
4 | #include <sys/ptrace.h> | ||
5 | #include <sys/syscall.h> | ||
6 | #include <unistd.h> | ||
7 | |||
8 | #include "sysdep/tls.h" | ||
9 | |||
10 | #ifndef PTRACE_GET_THREAD_AREA | ||
11 | #define PTRACE_GET_THREAD_AREA 25 | ||
12 | #endif | ||
13 | |||
14 | #ifndef PTRACE_SET_THREAD_AREA | ||
15 | #define PTRACE_SET_THREAD_AREA 26 | ||
16 | #endif | ||
17 | |||
18 | /* Checks whether host supports TLS, and sets *tls_min according to the value | ||
19 | * valid on the host. | ||
20 | * i386 host have it == 6; x86_64 host have it == 12, for i386 emulation. */ | ||
21 | void check_host_supports_tls(int *supports_tls, int *tls_min) | ||
22 | { | ||
23 | /* Values for x86 and x86_64.*/ | ||
24 | int val[] = {GDT_ENTRY_TLS_MIN_I386, GDT_ENTRY_TLS_MIN_X86_64}; | ||
25 | int i; | ||
26 | |||
27 | for (i = 0; i < ARRAY_SIZE(val); i++) { | ||
28 | user_desc_t info; | ||
29 | info.entry_number = val[i]; | ||
30 | |||
31 | if (syscall(__NR_get_thread_area, &info) == 0) { | ||
32 | *tls_min = val[i]; | ||
33 | *supports_tls = 1; | ||
34 | return; | ||
35 | } else { | ||
36 | if (errno == EINVAL) | ||
37 | continue; | ||
38 | else if (errno == ENOSYS) | ||
39 | *supports_tls = 0; | ||
40 | return; | ||
41 | } | ||
42 | } | ||
43 | |||
44 | *supports_tls = 0; | ||
45 | } | ||
46 | |||
47 | int os_set_thread_area(user_desc_t *info, int pid) | ||
48 | { | ||
49 | int ret; | ||
50 | |||
51 | ret = ptrace(PTRACE_SET_THREAD_AREA, pid, info->entry_number, | ||
52 | (unsigned long) info); | ||
53 | if (ret < 0) | ||
54 | ret = -errno; | ||
55 | return ret; | ||
56 | } | ||
57 | |||
58 | int os_get_thread_area(user_desc_t *info, int pid) | ||
59 | { | ||
60 | int ret; | ||
61 | |||
62 | ret = ptrace(PTRACE_GET_THREAD_AREA, pid, info->entry_number, | ||
63 | (unsigned long) info); | ||
64 | if (ret < 0) | ||
65 | ret = -errno; | ||
66 | return ret; | ||
67 | } | ||
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c new file mode 100644 index 000000000000..3b949daa095c --- /dev/null +++ b/arch/x86/um/ptrace_32.c | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/mm.h" | ||
7 | #include "linux/sched.h" | ||
8 | #include "asm/uaccess.h" | ||
9 | #include "skas.h" | ||
10 | |||
11 | extern int arch_switch_tls(struct task_struct *to); | ||
12 | |||
13 | void arch_switch_to(struct task_struct *to) | ||
14 | { | ||
15 | int err = arch_switch_tls(to); | ||
16 | if (!err) | ||
17 | return; | ||
18 | |||
19 | if (err != -EINVAL) | ||
20 | printk(KERN_WARNING "arch_switch_tls failed, errno %d, " | ||
21 | "not EINVAL\n", -err); | ||
22 | else | ||
23 | printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n"); | ||
24 | } | ||
25 | |||
26 | int is_syscall(unsigned long addr) | ||
27 | { | ||
28 | unsigned short instr; | ||
29 | int n; | ||
30 | |||
31 | n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); | ||
32 | if (n) { | ||
33 | /* access_process_vm() grants access to vsyscall and stub, | ||
34 | * while copy_from_user doesn't. Maybe access_process_vm is | ||
35 | * slow, but that doesn't matter, since it will be called only | ||
36 | * in case of singlestepping, if copy_from_user failed. | ||
37 | */ | ||
38 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); | ||
39 | if (n != sizeof(instr)) { | ||
40 | printk(KERN_ERR "is_syscall : failed to read " | ||
41 | "instruction from 0x%lx\n", addr); | ||
42 | return 1; | ||
43 | } | ||
44 | } | ||
45 | /* int 0x80 or sysenter */ | ||
46 | return (instr == 0x80cd) || (instr == 0x340f); | ||
47 | } | ||
48 | |||
49 | /* determines which flags the user has access to. */ | ||
50 | /* 1 = access 0 = no access */ | ||
51 | #define FLAG_MASK 0x00044dd5 | ||
52 | |||
53 | static const int reg_offsets[] = { | ||
54 | [EBX] = HOST_BX, | ||
55 | [ECX] = HOST_CX, | ||
56 | [EDX] = HOST_DX, | ||
57 | [ESI] = HOST_SI, | ||
58 | [EDI] = HOST_DI, | ||
59 | [EBP] = HOST_BP, | ||
60 | [EAX] = HOST_AX, | ||
61 | [DS] = HOST_DS, | ||
62 | [ES] = HOST_ES, | ||
63 | [FS] = HOST_FS, | ||
64 | [GS] = HOST_GS, | ||
65 | [EIP] = HOST_IP, | ||
66 | [CS] = HOST_CS, | ||
67 | [EFL] = HOST_EFLAGS, | ||
68 | [UESP] = HOST_SP, | ||
69 | [SS] = HOST_SS, | ||
70 | }; | ||
71 | |||
72 | int putreg(struct task_struct *child, int regno, unsigned long value) | ||
73 | { | ||
74 | regno >>= 2; | ||
75 | switch (regno) { | ||
76 | case EBX: | ||
77 | case ECX: | ||
78 | case EDX: | ||
79 | case ESI: | ||
80 | case EDI: | ||
81 | case EBP: | ||
82 | case EAX: | ||
83 | case EIP: | ||
84 | case UESP: | ||
85 | break; | ||
86 | case FS: | ||
87 | if (value && (value & 3) != 3) | ||
88 | return -EIO; | ||
89 | break; | ||
90 | case GS: | ||
91 | if (value && (value & 3) != 3) | ||
92 | return -EIO; | ||
93 | break; | ||
94 | case DS: | ||
95 | case ES: | ||
96 | if (value && (value & 3) != 3) | ||
97 | return -EIO; | ||
98 | value &= 0xffff; | ||
99 | break; | ||
100 | case SS: | ||
101 | case CS: | ||
102 | if ((value & 3) != 3) | ||
103 | return -EIO; | ||
104 | value &= 0xffff; | ||
105 | break; | ||
106 | case EFL: | ||
107 | value &= FLAG_MASK; | ||
108 | child->thread.regs.regs.gp[HOST_EFLAGS] |= value; | ||
109 | return 0; | ||
110 | case ORIG_EAX: | ||
111 | child->thread.regs.regs.syscall = value; | ||
112 | return 0; | ||
113 | default : | ||
114 | panic("Bad register in putreg() : %d\n", regno); | ||
115 | } | ||
116 | child->thread.regs.regs.gp[reg_offsets[regno]] = value; | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | int poke_user(struct task_struct *child, long addr, long data) | ||
121 | { | ||
122 | if ((addr & 3) || addr < 0) | ||
123 | return -EIO; | ||
124 | |||
125 | if (addr < MAX_REG_OFFSET) | ||
126 | return putreg(child, addr, data); | ||
127 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
128 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
129 | addr -= offsetof(struct user, u_debugreg[0]); | ||
130 | addr = addr >> 2; | ||
131 | if ((addr == 4) || (addr == 5)) | ||
132 | return -EIO; | ||
133 | child->thread.arch.debugregs[addr] = data; | ||
134 | return 0; | ||
135 | } | ||
136 | return -EIO; | ||
137 | } | ||
138 | |||
139 | unsigned long getreg(struct task_struct *child, int regno) | ||
140 | { | ||
141 | unsigned long mask = ~0UL; | ||
142 | |||
143 | regno >>= 2; | ||
144 | switch (regno) { | ||
145 | case ORIG_EAX: | ||
146 | return child->thread.regs.regs.syscall; | ||
147 | case FS: | ||
148 | case GS: | ||
149 | case DS: | ||
150 | case ES: | ||
151 | case SS: | ||
152 | case CS: | ||
153 | mask = 0xffff; | ||
154 | break; | ||
155 | case EIP: | ||
156 | case UESP: | ||
157 | case EAX: | ||
158 | case EBX: | ||
159 | case ECX: | ||
160 | case EDX: | ||
161 | case ESI: | ||
162 | case EDI: | ||
163 | case EBP: | ||
164 | case EFL: | ||
165 | break; | ||
166 | default: | ||
167 | panic("Bad register in getreg() : %d\n", regno); | ||
168 | } | ||
169 | return mask & child->thread.regs.regs.gp[reg_offsets[regno]]; | ||
170 | } | ||
171 | |||
172 | /* read the word at location addr in the USER area. */ | ||
173 | int peek_user(struct task_struct *child, long addr, long data) | ||
174 | { | ||
175 | unsigned long tmp; | ||
176 | |||
177 | if ((addr & 3) || addr < 0) | ||
178 | return -EIO; | ||
179 | |||
180 | tmp = 0; /* Default return condition */ | ||
181 | if (addr < MAX_REG_OFFSET) { | ||
182 | tmp = getreg(child, addr); | ||
183 | } | ||
184 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
185 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
186 | addr -= offsetof(struct user, u_debugreg[0]); | ||
187 | addr = addr >> 2; | ||
188 | tmp = child->thread.arch.debugregs[addr]; | ||
189 | } | ||
190 | return put_user(tmp, (unsigned long __user *) data); | ||
191 | } | ||
192 | |||
193 | static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
194 | { | ||
195 | int err, n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
196 | struct user_i387_struct fpregs; | ||
197 | |||
198 | err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs); | ||
199 | if (err) | ||
200 | return err; | ||
201 | |||
202 | n = copy_to_user(buf, &fpregs, sizeof(fpregs)); | ||
203 | if(n > 0) | ||
204 | return -EFAULT; | ||
205 | |||
206 | return n; | ||
207 | } | ||
208 | |||
209 | static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
210 | { | ||
211 | int n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
212 | struct user_i387_struct fpregs; | ||
213 | |||
214 | n = copy_from_user(&fpregs, buf, sizeof(fpregs)); | ||
215 | if (n > 0) | ||
216 | return -EFAULT; | ||
217 | |||
218 | return restore_fp_registers(userspace_pid[cpu], | ||
219 | (unsigned long *) &fpregs); | ||
220 | } | ||
221 | |||
222 | static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | ||
223 | { | ||
224 | int err, n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
225 | struct user_fxsr_struct fpregs; | ||
226 | |||
227 | err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs); | ||
228 | if (err) | ||
229 | return err; | ||
230 | |||
231 | n = copy_to_user(buf, &fpregs, sizeof(fpregs)); | ||
232 | if(n > 0) | ||
233 | return -EFAULT; | ||
234 | |||
235 | return n; | ||
236 | } | ||
237 | |||
238 | static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | ||
239 | { | ||
240 | int n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
241 | struct user_fxsr_struct fpregs; | ||
242 | |||
243 | n = copy_from_user(&fpregs, buf, sizeof(fpregs)); | ||
244 | if (n > 0) | ||
245 | return -EFAULT; | ||
246 | |||
247 | return restore_fpx_registers(userspace_pid[cpu], | ||
248 | (unsigned long *) &fpregs); | ||
249 | } | ||
250 | |||
251 | long subarch_ptrace(struct task_struct *child, long request, | ||
252 | unsigned long addr, unsigned long data) | ||
253 | { | ||
254 | int ret = -EIO; | ||
255 | void __user *datap = (void __user *) data; | ||
256 | switch (request) { | ||
257 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
258 | ret = get_fpregs(datap, child); | ||
259 | break; | ||
260 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
261 | ret = set_fpregs(datap, child); | ||
262 | break; | ||
263 | case PTRACE_GETFPXREGS: /* Get the child FPU state. */ | ||
264 | ret = get_fpxregs(datap, child); | ||
265 | break; | ||
266 | case PTRACE_SETFPXREGS: /* Set the child FPU state. */ | ||
267 | ret = set_fpxregs(datap, child); | ||
268 | break; | ||
269 | default: | ||
270 | ret = -EIO; | ||
271 | } | ||
272 | return ret; | ||
273 | } | ||
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c new file mode 100644 index 000000000000..3b52bf0b418a --- /dev/null +++ b/arch/x86/um/ptrace_64.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
4 | * | ||
5 | * Licensed under the GPL | ||
6 | */ | ||
7 | |||
8 | #include <linux/mm.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/errno.h> | ||
11 | #define __FRAME_OFFSETS | ||
12 | #include <asm/ptrace.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | /* | ||
16 | * determines which flags the user has access to. | ||
17 | * 1 = access 0 = no access | ||
18 | */ | ||
19 | #define FLAG_MASK 0x44dd5UL | ||
20 | |||
21 | static const int reg_offsets[] = | ||
22 | { | ||
23 | [R8 >> 3] = HOST_R8, | ||
24 | [R9 >> 3] = HOST_R9, | ||
25 | [R10 >> 3] = HOST_R10, | ||
26 | [R11 >> 3] = HOST_R11, | ||
27 | [R12 >> 3] = HOST_R12, | ||
28 | [R13 >> 3] = HOST_R13, | ||
29 | [R14 >> 3] = HOST_R14, | ||
30 | [R15 >> 3] = HOST_R15, | ||
31 | [RIP >> 3] = HOST_IP, | ||
32 | [RSP >> 3] = HOST_SP, | ||
33 | [RAX >> 3] = HOST_AX, | ||
34 | [RBX >> 3] = HOST_BX, | ||
35 | [RCX >> 3] = HOST_CX, | ||
36 | [RDX >> 3] = HOST_DX, | ||
37 | [RSI >> 3] = HOST_SI, | ||
38 | [RDI >> 3] = HOST_DI, | ||
39 | [RBP >> 3] = HOST_BP, | ||
40 | [CS >> 3] = HOST_CS, | ||
41 | [SS >> 3] = HOST_SS, | ||
42 | [FS_BASE >> 3] = HOST_FS_BASE, | ||
43 | [GS_BASE >> 3] = HOST_GS_BASE, | ||
44 | [DS >> 3] = HOST_DS, | ||
45 | [ES >> 3] = HOST_ES, | ||
46 | [FS >> 3] = HOST_FS, | ||
47 | [GS >> 3] = HOST_GS, | ||
48 | [EFLAGS >> 3] = HOST_EFLAGS, | ||
49 | [ORIG_RAX >> 3] = HOST_ORIG_AX, | ||
50 | }; | ||
51 | |||
52 | int putreg(struct task_struct *child, int regno, unsigned long value) | ||
53 | { | ||
54 | #ifdef TIF_IA32 | ||
55 | /* | ||
56 | * Some code in the 64bit emulation may not be 64bit clean. | ||
57 | * Don't take any chances. | ||
58 | */ | ||
59 | if (test_tsk_thread_flag(child, TIF_IA32)) | ||
60 | value &= 0xffffffff; | ||
61 | #endif | ||
62 | switch (regno) { | ||
63 | case R8: | ||
64 | case R9: | ||
65 | case R10: | ||
66 | case R11: | ||
67 | case R12: | ||
68 | case R13: | ||
69 | case R14: | ||
70 | case R15: | ||
71 | case RIP: | ||
72 | case RSP: | ||
73 | case RAX: | ||
74 | case RBX: | ||
75 | case RCX: | ||
76 | case RDX: | ||
77 | case RSI: | ||
78 | case RDI: | ||
79 | case RBP: | ||
80 | case ORIG_RAX: | ||
81 | break; | ||
82 | |||
83 | case FS: | ||
84 | case GS: | ||
85 | case DS: | ||
86 | case ES: | ||
87 | case SS: | ||
88 | case CS: | ||
89 | if (value && (value & 3) != 3) | ||
90 | return -EIO; | ||
91 | value &= 0xffff; | ||
92 | break; | ||
93 | |||
94 | case FS_BASE: | ||
95 | case GS_BASE: | ||
96 | if (!((value >> 48) == 0 || (value >> 48) == 0xffff)) | ||
97 | return -EIO; | ||
98 | break; | ||
99 | |||
100 | case EFLAGS: | ||
101 | value &= FLAG_MASK; | ||
102 | child->thread.regs.regs.gp[HOST_EFLAGS] |= value; | ||
103 | return 0; | ||
104 | |||
105 | default: | ||
106 | panic("Bad register in putreg(): %d\n", regno); | ||
107 | } | ||
108 | |||
109 | child->thread.regs.regs.gp[reg_offsets[regno >> 3]] = value; | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | int poke_user(struct task_struct *child, long addr, long data) | ||
114 | { | ||
115 | if ((addr & 3) || addr < 0) | ||
116 | return -EIO; | ||
117 | |||
118 | if (addr < MAX_REG_OFFSET) | ||
119 | return putreg(child, addr, data); | ||
120 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
121 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
122 | addr -= offsetof(struct user, u_debugreg[0]); | ||
123 | addr = addr >> 2; | ||
124 | if ((addr == 4) || (addr == 5)) | ||
125 | return -EIO; | ||
126 | child->thread.arch.debugregs[addr] = data; | ||
127 | return 0; | ||
128 | } | ||
129 | return -EIO; | ||
130 | } | ||
131 | |||
132 | unsigned long getreg(struct task_struct *child, int regno) | ||
133 | { | ||
134 | unsigned long mask = ~0UL; | ||
135 | #ifdef TIF_IA32 | ||
136 | if (test_tsk_thread_flag(child, TIF_IA32)) | ||
137 | mask = 0xffffffff; | ||
138 | #endif | ||
139 | switch (regno) { | ||
140 | case R8: | ||
141 | case R9: | ||
142 | case R10: | ||
143 | case R11: | ||
144 | case R12: | ||
145 | case R13: | ||
146 | case R14: | ||
147 | case R15: | ||
148 | case RIP: | ||
149 | case RSP: | ||
150 | case RAX: | ||
151 | case RBX: | ||
152 | case RCX: | ||
153 | case RDX: | ||
154 | case RSI: | ||
155 | case RDI: | ||
156 | case RBP: | ||
157 | case ORIG_RAX: | ||
158 | case EFLAGS: | ||
159 | case FS_BASE: | ||
160 | case GS_BASE: | ||
161 | break; | ||
162 | case FS: | ||
163 | case GS: | ||
164 | case DS: | ||
165 | case ES: | ||
166 | case SS: | ||
167 | case CS: | ||
168 | mask = 0xffff; | ||
169 | break; | ||
170 | default: | ||
171 | panic("Bad register in getreg: %d\n", regno); | ||
172 | } | ||
173 | return mask & child->thread.regs.regs.gp[reg_offsets[regno >> 3]]; | ||
174 | } | ||
175 | |||
176 | int peek_user(struct task_struct *child, long addr, long data) | ||
177 | { | ||
178 | /* read the word at location addr in the USER area. */ | ||
179 | unsigned long tmp; | ||
180 | |||
181 | if ((addr & 3) || addr < 0) | ||
182 | return -EIO; | ||
183 | |||
184 | tmp = 0; /* Default return condition */ | ||
185 | if (addr < MAX_REG_OFFSET) | ||
186 | tmp = getreg(child, addr); | ||
187 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
188 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
189 | addr -= offsetof(struct user, u_debugreg[0]); | ||
190 | addr = addr >> 2; | ||
191 | tmp = child->thread.arch.debugregs[addr]; | ||
192 | } | ||
193 | return put_user(tmp, (unsigned long *) data); | ||
194 | } | ||
195 | |||
196 | /* XXX Mostly copied from sys-i386 */ | ||
197 | int is_syscall(unsigned long addr) | ||
198 | { | ||
199 | unsigned short instr; | ||
200 | int n; | ||
201 | |||
202 | n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); | ||
203 | if (n) { | ||
204 | /* | ||
205 | * access_process_vm() grants access to vsyscall and stub, | ||
206 | * while copy_from_user doesn't. Maybe access_process_vm is | ||
207 | * slow, but that doesn't matter, since it will be called only | ||
208 | * in case of singlestepping, if copy_from_user failed. | ||
209 | */ | ||
210 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); | ||
211 | if (n != sizeof(instr)) { | ||
212 | printk("is_syscall : failed to read instruction from " | ||
213 | "0x%lx\n", addr); | ||
214 | return 1; | ||
215 | } | ||
216 | } | ||
217 | /* sysenter */ | ||
218 | return instr == 0x050f; | ||
219 | } | ||
220 | |||
221 | static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
222 | { | ||
223 | int err, n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
224 | long fpregs[HOST_FP_SIZE]; | ||
225 | |||
226 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | ||
227 | err = save_fp_registers(userspace_pid[cpu], fpregs); | ||
228 | if (err) | ||
229 | return err; | ||
230 | |||
231 | n = copy_to_user(buf, fpregs, sizeof(fpregs)); | ||
232 | if (n > 0) | ||
233 | return -EFAULT; | ||
234 | |||
235 | return n; | ||
236 | } | ||
237 | |||
238 | static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
239 | { | ||
240 | int n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
241 | long fpregs[HOST_FP_SIZE]; | ||
242 | |||
243 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | ||
244 | n = copy_from_user(fpregs, buf, sizeof(fpregs)); | ||
245 | if (n > 0) | ||
246 | return -EFAULT; | ||
247 | |||
248 | return restore_fp_registers(userspace_pid[cpu], fpregs); | ||
249 | } | ||
250 | |||
251 | long subarch_ptrace(struct task_struct *child, long request, | ||
252 | unsigned long addr, unsigned long data) | ||
253 | { | ||
254 | int ret = -EIO; | ||
255 | void __user *datap = (void __user *) data; | ||
256 | |||
257 | switch (request) { | ||
258 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
259 | ret = get_fpregs(datap, child); | ||
260 | break; | ||
261 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
262 | ret = set_fpregs(datap, child); | ||
263 | break; | ||
264 | case PTRACE_ARCH_PRCTL: | ||
265 | /* XXX Calls ptrace on the host - needs some SMP thinking */ | ||
266 | ret = arch_prctl(child, data, (void __user *) addr); | ||
267 | break; | ||
268 | } | ||
269 | |||
270 | return ret; | ||
271 | } | ||
diff --git a/arch/x86/um/ptrace_user.c b/arch/x86/um/ptrace_user.c new file mode 100644 index 000000000000..3960ca1dd35a --- /dev/null +++ b/arch/x86/um/ptrace_user.c | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <errno.h> | ||
7 | #include "ptrace_user.h" | ||
8 | |||
9 | int ptrace_getregs(long pid, unsigned long *regs_out) | ||
10 | { | ||
11 | if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0) | ||
12 | return -errno; | ||
13 | return 0; | ||
14 | } | ||
15 | |||
16 | int ptrace_setregs(long pid, unsigned long *regs) | ||
17 | { | ||
18 | if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0) | ||
19 | return -errno; | ||
20 | return 0; | ||
21 | } | ||
diff --git a/arch/x86/um/setjmp_32.S b/arch/x86/um/setjmp_32.S new file mode 100644 index 000000000000..b766792c9933 --- /dev/null +++ b/arch/x86/um/setjmp_32.S | |||
@@ -0,0 +1,58 @@ | |||
1 | # | ||
2 | # arch/i386/setjmp.S | ||
3 | # | ||
4 | # setjmp/longjmp for the i386 architecture | ||
5 | # | ||
6 | |||
7 | # | ||
8 | # The jmp_buf is assumed to contain the following, in order: | ||
9 | # %ebx | ||
10 | # %esp | ||
11 | # %ebp | ||
12 | # %esi | ||
13 | # %edi | ||
14 | # <return address> | ||
15 | # | ||
16 | |||
17 | .text | ||
18 | .align 4 | ||
19 | .globl setjmp | ||
20 | .type setjmp, @function | ||
21 | setjmp: | ||
22 | #ifdef _REGPARM | ||
23 | movl %eax,%edx | ||
24 | #else | ||
25 | movl 4(%esp),%edx | ||
26 | #endif | ||
27 | popl %ecx # Return address, and adjust the stack | ||
28 | xorl %eax,%eax # Return value | ||
29 | movl %ebx,(%edx) | ||
30 | movl %esp,4(%edx) # Post-return %esp! | ||
31 | pushl %ecx # Make the call/return stack happy | ||
32 | movl %ebp,8(%edx) | ||
33 | movl %esi,12(%edx) | ||
34 | movl %edi,16(%edx) | ||
35 | movl %ecx,20(%edx) # Return address | ||
36 | ret | ||
37 | |||
38 | .size setjmp,.-setjmp | ||
39 | |||
40 | .text | ||
41 | .align 4 | ||
42 | .globl longjmp | ||
43 | .type longjmp, @function | ||
44 | longjmp: | ||
45 | #ifdef _REGPARM | ||
46 | xchgl %eax,%edx | ||
47 | #else | ||
48 | movl 4(%esp),%edx # jmp_ptr address | ||
49 | movl 8(%esp),%eax # Return value | ||
50 | #endif | ||
51 | movl (%edx),%ebx | ||
52 | movl 4(%edx),%esp | ||
53 | movl 8(%edx),%ebp | ||
54 | movl 12(%edx),%esi | ||
55 | movl 16(%edx),%edi | ||
56 | jmp *20(%edx) | ||
57 | |||
58 | .size longjmp,.-longjmp | ||
diff --git a/arch/x86/um/setjmp_64.S b/arch/x86/um/setjmp_64.S new file mode 100644 index 000000000000..45f547b4043e --- /dev/null +++ b/arch/x86/um/setjmp_64.S | |||
@@ -0,0 +1,54 @@ | |||
1 | # | ||
2 | # arch/x86_64/setjmp.S | ||
3 | # | ||
4 | # setjmp/longjmp for the x86-64 architecture | ||
5 | # | ||
6 | |||
7 | # | ||
8 | # The jmp_buf is assumed to contain the following, in order: | ||
9 | # %rbx | ||
10 | # %rsp (post-return) | ||
11 | # %rbp | ||
12 | # %r12 | ||
13 | # %r13 | ||
14 | # %r14 | ||
15 | # %r15 | ||
16 | # <return address> | ||
17 | # | ||
18 | |||
19 | .text | ||
20 | .align 4 | ||
21 | .globl setjmp | ||
22 | .type setjmp, @function | ||
23 | setjmp: | ||
24 | pop %rsi # Return address, and adjust the stack | ||
25 | xorl %eax,%eax # Return value | ||
26 | movq %rbx,(%rdi) | ||
27 | movq %rsp,8(%rdi) # Post-return %rsp! | ||
28 | push %rsi # Make the call/return stack happy | ||
29 | movq %rbp,16(%rdi) | ||
30 | movq %r12,24(%rdi) | ||
31 | movq %r13,32(%rdi) | ||
32 | movq %r14,40(%rdi) | ||
33 | movq %r15,48(%rdi) | ||
34 | movq %rsi,56(%rdi) # Return address | ||
35 | ret | ||
36 | |||
37 | .size setjmp,.-setjmp | ||
38 | |||
39 | .text | ||
40 | .align 4 | ||
41 | .globl longjmp | ||
42 | .type longjmp, @function | ||
43 | longjmp: | ||
44 | movl %esi,%eax # Return value (int) | ||
45 | movq (%rdi),%rbx | ||
46 | movq 8(%rdi),%rsp | ||
47 | movq 16(%rdi),%rbp | ||
48 | movq 24(%rdi),%r12 | ||
49 | movq 32(%rdi),%r13 | ||
50 | movq 40(%rdi),%r14 | ||
51 | movq 48(%rdi),%r15 | ||
52 | jmp *56(%rdi) | ||
53 | |||
54 | .size longjmp,.-longjmp | ||
diff --git a/arch/x86/um/shared/sysdep/archsetjmp.h b/arch/x86/um/shared/sysdep/archsetjmp.h new file mode 100644 index 000000000000..ff7766d28226 --- /dev/null +++ b/arch/x86/um/shared/sysdep/archsetjmp.h | |||
@@ -0,0 +1,5 @@ | |||
1 | #ifdef __i386__ | ||
2 | #include "archsetjmp_32.h" | ||
3 | #else | ||
4 | #include "archsetjmp_64.h" | ||
5 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/archsetjmp_32.h b/arch/x86/um/shared/sysdep/archsetjmp_32.h new file mode 100644 index 000000000000..0f312085ce1d --- /dev/null +++ b/arch/x86/um/shared/sysdep/archsetjmp_32.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * arch/um/include/sysdep-i386/archsetjmp.h | ||
3 | */ | ||
4 | |||
5 | #ifndef _KLIBC_ARCHSETJMP_H | ||
6 | #define _KLIBC_ARCHSETJMP_H | ||
7 | |||
8 | struct __jmp_buf { | ||
9 | unsigned int __ebx; | ||
10 | unsigned int __esp; | ||
11 | unsigned int __ebp; | ||
12 | unsigned int __esi; | ||
13 | unsigned int __edi; | ||
14 | unsigned int __eip; | ||
15 | }; | ||
16 | |||
17 | typedef struct __jmp_buf jmp_buf[1]; | ||
18 | |||
19 | #define JB_IP __eip | ||
20 | #define JB_SP __esp | ||
21 | |||
22 | #endif /* _SETJMP_H */ | ||
diff --git a/arch/x86/um/shared/sysdep/archsetjmp_64.h b/arch/x86/um/shared/sysdep/archsetjmp_64.h new file mode 100644 index 000000000000..2af8f12ca161 --- /dev/null +++ b/arch/x86/um/shared/sysdep/archsetjmp_64.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * arch/um/include/sysdep-x86_64/archsetjmp.h | ||
3 | */ | ||
4 | |||
5 | #ifndef _KLIBC_ARCHSETJMP_H | ||
6 | #define _KLIBC_ARCHSETJMP_H | ||
7 | |||
8 | struct __jmp_buf { | ||
9 | unsigned long __rbx; | ||
10 | unsigned long __rsp; | ||
11 | unsigned long __rbp; | ||
12 | unsigned long __r12; | ||
13 | unsigned long __r13; | ||
14 | unsigned long __r14; | ||
15 | unsigned long __r15; | ||
16 | unsigned long __rip; | ||
17 | }; | ||
18 | |||
19 | typedef struct __jmp_buf jmp_buf[1]; | ||
20 | |||
21 | #define JB_IP __rip | ||
22 | #define JB_SP __rsp | ||
23 | |||
24 | #endif /* _SETJMP_H */ | ||
diff --git a/arch/x86/um/shared/sysdep/faultinfo.h b/arch/x86/um/shared/sysdep/faultinfo.h new file mode 100644 index 000000000000..862ecb1c7781 --- /dev/null +++ b/arch/x86/um/shared/sysdep/faultinfo.h | |||
@@ -0,0 +1,5 @@ | |||
1 | #ifdef __i386__ | ||
2 | #include "faultinfo_32.h" | ||
3 | #else | ||
4 | #include "faultinfo_64.h" | ||
5 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h new file mode 100644 index 000000000000..a26086b8a800 --- /dev/null +++ b/arch/x86/um/shared/sysdep/faultinfo_32.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Fujitsu Siemens Computers GmbH | ||
3 | * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com> | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __FAULTINFO_I386_H | ||
8 | #define __FAULTINFO_I386_H | ||
9 | |||
10 | /* this structure contains the full arch-specific faultinfo | ||
11 | * from the traps. | ||
12 | * On i386, ptrace_faultinfo unfortunately doesn't provide | ||
13 | * all the info, since trap_no is missing. | ||
14 | * All common elements are defined at the same position in | ||
15 | * both structures, thus making it easy to copy the | ||
16 | * contents without knowledge about the structure elements. | ||
17 | */ | ||
18 | struct faultinfo { | ||
19 | int error_code; /* in ptrace_faultinfo misleadingly called is_write */ | ||
20 | unsigned long cr2; /* in ptrace_faultinfo called addr */ | ||
21 | int trap_no; /* missing in ptrace_faultinfo */ | ||
22 | }; | ||
23 | |||
24 | #define FAULT_WRITE(fi) ((fi).error_code & 2) | ||
25 | #define FAULT_ADDRESS(fi) ((fi).cr2) | ||
26 | |||
27 | /* This is Page Fault */ | ||
28 | #define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) | ||
29 | |||
30 | /* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */ | ||
31 | #define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo) | ||
32 | |||
33 | #define PTRACE_FULL_FAULTINFO 0 | ||
34 | |||
35 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h new file mode 100644 index 000000000000..f811cbe15d62 --- /dev/null +++ b/arch/x86/um/shared/sysdep/faultinfo_64.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Fujitsu Siemens Computers GmbH | ||
3 | * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com> | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __FAULTINFO_X86_64_H | ||
8 | #define __FAULTINFO_X86_64_H | ||
9 | |||
10 | /* this structure contains the full arch-specific faultinfo | ||
11 | * from the traps. | ||
12 | * On i386, ptrace_faultinfo unfortunately doesn't provide | ||
13 | * all the info, since trap_no is missing. | ||
14 | * All common elements are defined at the same position in | ||
15 | * both structures, thus making it easy to copy the | ||
16 | * contents without knowledge about the structure elements. | ||
17 | */ | ||
18 | struct faultinfo { | ||
19 | int error_code; /* in ptrace_faultinfo misleadingly called is_write */ | ||
20 | unsigned long cr2; /* in ptrace_faultinfo called addr */ | ||
21 | int trap_no; /* missing in ptrace_faultinfo */ | ||
22 | }; | ||
23 | |||
24 | #define FAULT_WRITE(fi) ((fi).error_code & 2) | ||
25 | #define FAULT_ADDRESS(fi) ((fi).cr2) | ||
26 | |||
27 | /* This is Page Fault */ | ||
28 | #define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) | ||
29 | |||
30 | /* No broken SKAS API, which doesn't pass trap_no, here. */ | ||
31 | #define SEGV_MAYBE_FIXABLE(fi) 0 | ||
32 | |||
33 | #define PTRACE_FULL_FAULTINFO 1 | ||
34 | |||
35 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/kernel-offsets.h b/arch/x86/um/shared/sysdep/kernel-offsets.h new file mode 100644 index 000000000000..5868526b5eef --- /dev/null +++ b/arch/x86/um/shared/sysdep/kernel-offsets.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #include <linux/stddef.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/elf.h> | ||
4 | #include <linux/crypto.h> | ||
5 | #include <asm/mman.h> | ||
6 | |||
7 | #define DEFINE(sym, val) \ | ||
8 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
9 | |||
10 | #define STR(x) #x | ||
11 | #define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : ) | ||
12 | |||
13 | #define BLANK() asm volatile("\n->" : : ) | ||
14 | |||
15 | #define OFFSET(sym, str, mem) \ | ||
16 | DEFINE(sym, offsetof(struct str, mem)); | ||
17 | |||
18 | void foo(void) | ||
19 | { | ||
20 | #include <common-offsets.h> | ||
21 | } | ||
diff --git a/arch/x86/um/shared/sysdep/mcontext.h b/arch/x86/um/shared/sysdep/mcontext.h new file mode 100644 index 000000000000..b724c54da316 --- /dev/null +++ b/arch/x86/um/shared/sysdep/mcontext.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYS_SIGCONTEXT_X86_H | ||
7 | #define __SYS_SIGCONTEXT_X86_H | ||
8 | |||
9 | extern void get_regs_from_mc(struct uml_pt_regs *, mcontext_t *); | ||
10 | |||
11 | #ifdef __i386__ | ||
12 | |||
13 | #define GET_FAULTINFO_FROM_MC(fi, mc) \ | ||
14 | { \ | ||
15 | (fi).cr2 = (mc)->cr2; \ | ||
16 | (fi).error_code = (mc)->gregs[REG_ERR]; \ | ||
17 | (fi).trap_no = (mc)->gregs[REG_TRAPNO]; \ | ||
18 | } | ||
19 | |||
20 | #else | ||
21 | |||
22 | #define GET_FAULTINFO_FROM_MC(fi, mc) \ | ||
23 | { \ | ||
24 | (fi).cr2 = (mc)->gregs[REG_CR2]; \ | ||
25 | (fi).error_code = (mc)->gregs[REG_ERR]; \ | ||
26 | (fi).trap_no = (mc)->gregs[REG_TRAPNO]; \ | ||
27 | } | ||
28 | |||
29 | #endif | ||
30 | |||
31 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/ptrace.h b/arch/x86/um/shared/sysdep/ptrace.h new file mode 100644 index 000000000000..711b1621747f --- /dev/null +++ b/arch/x86/um/shared/sysdep/ptrace.h | |||
@@ -0,0 +1,5 @@ | |||
1 | #ifdef __i386__ | ||
2 | #include "ptrace_32.h" | ||
3 | #else | ||
4 | #include "ptrace_64.h" | ||
5 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/ptrace_32.h b/arch/x86/um/shared/sysdep/ptrace_32.h new file mode 100644 index 000000000000..befd1df32ed0 --- /dev/null +++ b/arch/x86/um/shared/sysdep/ptrace_32.h | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYSDEP_I386_PTRACE_H | ||
7 | #define __SYSDEP_I386_PTRACE_H | ||
8 | |||
9 | #include <generated/user_constants.h> | ||
10 | #include "sysdep/faultinfo.h" | ||
11 | |||
12 | #define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long)) | ||
13 | #define MAX_REG_OFFSET (UM_FRAME_SIZE) | ||
14 | |||
15 | static inline void update_debugregs(int seq) {} | ||
16 | |||
17 | /* syscall emulation path in ptrace */ | ||
18 | |||
19 | #ifndef PTRACE_SYSEMU | ||
20 | #define PTRACE_SYSEMU 31 | ||
21 | #endif | ||
22 | |||
23 | void set_using_sysemu(int value); | ||
24 | int get_using_sysemu(void); | ||
25 | extern int sysemu_supported; | ||
26 | |||
27 | #define REGS_IP(r) ((r)[HOST_IP]) | ||
28 | #define REGS_SP(r) ((r)[HOST_SP]) | ||
29 | #define REGS_EFLAGS(r) ((r)[HOST_EFLAGS]) | ||
30 | #define REGS_EAX(r) ((r)[HOST_AX]) | ||
31 | #define REGS_EBX(r) ((r)[HOST_BX]) | ||
32 | #define REGS_ECX(r) ((r)[HOST_CX]) | ||
33 | #define REGS_EDX(r) ((r)[HOST_DX]) | ||
34 | #define REGS_ESI(r) ((r)[HOST_SI]) | ||
35 | #define REGS_EDI(r) ((r)[HOST_DI]) | ||
36 | #define REGS_EBP(r) ((r)[HOST_BP]) | ||
37 | #define REGS_CS(r) ((r)[HOST_CS]) | ||
38 | #define REGS_SS(r) ((r)[HOST_SS]) | ||
39 | #define REGS_DS(r) ((r)[HOST_DS]) | ||
40 | #define REGS_ES(r) ((r)[HOST_ES]) | ||
41 | #define REGS_FS(r) ((r)[HOST_FS]) | ||
42 | #define REGS_GS(r) ((r)[HOST_GS]) | ||
43 | |||
44 | #define REGS_SET_SYSCALL_RETURN(r, res) REGS_EAX(r) = (res) | ||
45 | |||
46 | #define IP_RESTART_SYSCALL(ip) ((ip) -= 2) | ||
47 | #define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r)) | ||
48 | |||
49 | #ifndef PTRACE_SYSEMU_SINGLESTEP | ||
50 | #define PTRACE_SYSEMU_SINGLESTEP 32 | ||
51 | #endif | ||
52 | |||
53 | struct uml_pt_regs { | ||
54 | unsigned long gp[MAX_REG_NR]; | ||
55 | unsigned long fp[HOST_FPX_SIZE]; | ||
56 | struct faultinfo faultinfo; | ||
57 | long syscall; | ||
58 | int is_user; | ||
59 | }; | ||
60 | |||
61 | #define EMPTY_UML_PT_REGS { } | ||
62 | |||
63 | #define UPT_IP(r) REGS_IP((r)->gp) | ||
64 | #define UPT_SP(r) REGS_SP((r)->gp) | ||
65 | #define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp) | ||
66 | #define UPT_EAX(r) REGS_EAX((r)->gp) | ||
67 | #define UPT_EBX(r) REGS_EBX((r)->gp) | ||
68 | #define UPT_ECX(r) REGS_ECX((r)->gp) | ||
69 | #define UPT_EDX(r) REGS_EDX((r)->gp) | ||
70 | #define UPT_ESI(r) REGS_ESI((r)->gp) | ||
71 | #define UPT_EDI(r) REGS_EDI((r)->gp) | ||
72 | #define UPT_EBP(r) REGS_EBP((r)->gp) | ||
73 | #define UPT_ORIG_EAX(r) ((r)->syscall) | ||
74 | #define UPT_CS(r) REGS_CS((r)->gp) | ||
75 | #define UPT_SS(r) REGS_SS((r)->gp) | ||
76 | #define UPT_DS(r) REGS_DS((r)->gp) | ||
77 | #define UPT_ES(r) REGS_ES((r)->gp) | ||
78 | #define UPT_FS(r) REGS_FS((r)->gp) | ||
79 | #define UPT_GS(r) REGS_GS((r)->gp) | ||
80 | |||
81 | #define UPT_SYSCALL_ARG1(r) UPT_EBX(r) | ||
82 | #define UPT_SYSCALL_ARG2(r) UPT_ECX(r) | ||
83 | #define UPT_SYSCALL_ARG3(r) UPT_EDX(r) | ||
84 | #define UPT_SYSCALL_ARG4(r) UPT_ESI(r) | ||
85 | #define UPT_SYSCALL_ARG5(r) UPT_EDI(r) | ||
86 | #define UPT_SYSCALL_ARG6(r) UPT_EBP(r) | ||
87 | |||
88 | extern int user_context(unsigned long sp); | ||
89 | |||
90 | #define UPT_IS_USER(r) ((r)->is_user) | ||
91 | |||
92 | struct syscall_args { | ||
93 | unsigned long args[6]; | ||
94 | }; | ||
95 | |||
96 | #define SYSCALL_ARGS(r) ((struct syscall_args) \ | ||
97 | { .args = { UPT_SYSCALL_ARG1(r), \ | ||
98 | UPT_SYSCALL_ARG2(r), \ | ||
99 | UPT_SYSCALL_ARG3(r), \ | ||
100 | UPT_SYSCALL_ARG4(r), \ | ||
101 | UPT_SYSCALL_ARG5(r), \ | ||
102 | UPT_SYSCALL_ARG6(r) } } ) | ||
103 | |||
104 | #define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp) | ||
105 | |||
106 | #define UPT_ORIG_SYSCALL(r) UPT_EAX(r) | ||
107 | #define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r) | ||
108 | #define UPT_SYSCALL_RET(r) UPT_EAX(r) | ||
109 | |||
110 | #define UPT_FAULTINFO(r) (&(r)->faultinfo) | ||
111 | |||
112 | extern void arch_init_registers(int pid); | ||
113 | |||
114 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/ptrace_64.h b/arch/x86/um/shared/sysdep/ptrace_64.h new file mode 100644 index 000000000000..031edc53ac57 --- /dev/null +++ b/arch/x86/um/shared/sysdep/ptrace_64.h | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
4 | * | ||
5 | * Licensed under the GPL | ||
6 | */ | ||
7 | |||
8 | #ifndef __SYSDEP_X86_64_PTRACE_H | ||
9 | #define __SYSDEP_X86_64_PTRACE_H | ||
10 | |||
11 | #include <generated/user_constants.h> | ||
12 | #include "sysdep/faultinfo.h" | ||
13 | |||
14 | #define MAX_REG_OFFSET (UM_FRAME_SIZE) | ||
15 | #define MAX_REG_NR ((MAX_REG_OFFSET) / sizeof(unsigned long)) | ||
16 | |||
17 | #define REGS_IP(r) ((r)[HOST_IP]) | ||
18 | #define REGS_SP(r) ((r)[HOST_SP]) | ||
19 | |||
20 | #define REGS_RBX(r) ((r)[HOST_BX]) | ||
21 | #define REGS_RCX(r) ((r)[HOST_CX]) | ||
22 | #define REGS_RDX(r) ((r)[HOST_DX]) | ||
23 | #define REGS_RSI(r) ((r)[HOST_SI]) | ||
24 | #define REGS_RDI(r) ((r)[HOST_DI]) | ||
25 | #define REGS_RBP(r) ((r)[HOST_BP]) | ||
26 | #define REGS_RAX(r) ((r)[HOST_AX]) | ||
27 | #define REGS_R8(r) ((r)[HOST_R8]) | ||
28 | #define REGS_R9(r) ((r)[HOST_R9]) | ||
29 | #define REGS_R10(r) ((r)[HOST_R10]) | ||
30 | #define REGS_R11(r) ((r)[HOST_R11]) | ||
31 | #define REGS_R12(r) ((r)[HOST_R12]) | ||
32 | #define REGS_R13(r) ((r)[HOST_R13]) | ||
33 | #define REGS_R14(r) ((r)[HOST_R14]) | ||
34 | #define REGS_R15(r) ((r)[HOST_R15]) | ||
35 | #define REGS_CS(r) ((r)[HOST_CS]) | ||
36 | #define REGS_EFLAGS(r) ((r)[HOST_EFLAGS]) | ||
37 | #define REGS_SS(r) ((r)[HOST_SS]) | ||
38 | |||
39 | #define HOST_FS_BASE 21 | ||
40 | #define HOST_GS_BASE 22 | ||
41 | #define HOST_DS 23 | ||
42 | #define HOST_ES 24 | ||
43 | #define HOST_FS 25 | ||
44 | #define HOST_GS 26 | ||
45 | |||
46 | /* Also defined in asm/ptrace-x86_64.h, but not in libc headers. So, these | ||
47 | * are already defined for kernel code, but not for userspace code. | ||
48 | */ | ||
49 | #ifndef FS_BASE | ||
50 | /* These aren't defined in ptrace.h, but exist in struct user_regs_struct, | ||
51 | * which is what x86_64 ptrace actually uses. | ||
52 | */ | ||
53 | #define FS_BASE (HOST_FS_BASE * sizeof(long)) | ||
54 | #define GS_BASE (HOST_GS_BASE * sizeof(long)) | ||
55 | #define DS (HOST_DS * sizeof(long)) | ||
56 | #define ES (HOST_ES * sizeof(long)) | ||
57 | #define FS (HOST_FS * sizeof(long)) | ||
58 | #define GS (HOST_GS * sizeof(long)) | ||
59 | #endif | ||
60 | |||
61 | #define REGS_FS_BASE(r) ((r)[HOST_FS_BASE]) | ||
62 | #define REGS_GS_BASE(r) ((r)[HOST_GS_BASE]) | ||
63 | #define REGS_DS(r) ((r)[HOST_DS]) | ||
64 | #define REGS_ES(r) ((r)[HOST_ES]) | ||
65 | #define REGS_FS(r) ((r)[HOST_FS]) | ||
66 | #define REGS_GS(r) ((r)[HOST_GS]) | ||
67 | |||
68 | #define REGS_ORIG_RAX(r) ((r)[HOST_ORIG_AX]) | ||
69 | |||
70 | #define REGS_SET_SYSCALL_RETURN(r, res) REGS_RAX(r) = (res) | ||
71 | |||
72 | #define IP_RESTART_SYSCALL(ip) ((ip) -= 2) | ||
73 | #define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r)) | ||
74 | |||
75 | #define REGS_FAULT_ADDR(r) ((r)->fault_addr) | ||
76 | |||
77 | #define REGS_FAULT_WRITE(r) FAULT_WRITE((r)->fault_type) | ||
78 | |||
79 | #define REGS_TRAP(r) ((r)->trap_type) | ||
80 | |||
81 | #define REGS_ERR(r) ((r)->fault_type) | ||
82 | |||
83 | struct uml_pt_regs { | ||
84 | unsigned long gp[MAX_REG_NR]; | ||
85 | unsigned long fp[HOST_FP_SIZE]; | ||
86 | struct faultinfo faultinfo; | ||
87 | long syscall; | ||
88 | int is_user; | ||
89 | }; | ||
90 | |||
91 | #define EMPTY_UML_PT_REGS { } | ||
92 | |||
93 | #define UPT_RBX(r) REGS_RBX((r)->gp) | ||
94 | #define UPT_RCX(r) REGS_RCX((r)->gp) | ||
95 | #define UPT_RDX(r) REGS_RDX((r)->gp) | ||
96 | #define UPT_RSI(r) REGS_RSI((r)->gp) | ||
97 | #define UPT_RDI(r) REGS_RDI((r)->gp) | ||
98 | #define UPT_RBP(r) REGS_RBP((r)->gp) | ||
99 | #define UPT_RAX(r) REGS_RAX((r)->gp) | ||
100 | #define UPT_R8(r) REGS_R8((r)->gp) | ||
101 | #define UPT_R9(r) REGS_R9((r)->gp) | ||
102 | #define UPT_R10(r) REGS_R10((r)->gp) | ||
103 | #define UPT_R11(r) REGS_R11((r)->gp) | ||
104 | #define UPT_R12(r) REGS_R12((r)->gp) | ||
105 | #define UPT_R13(r) REGS_R13((r)->gp) | ||
106 | #define UPT_R14(r) REGS_R14((r)->gp) | ||
107 | #define UPT_R15(r) REGS_R15((r)->gp) | ||
108 | #define UPT_CS(r) REGS_CS((r)->gp) | ||
109 | #define UPT_FS_BASE(r) REGS_FS_BASE((r)->gp) | ||
110 | #define UPT_FS(r) REGS_FS((r)->gp) | ||
111 | #define UPT_GS_BASE(r) REGS_GS_BASE((r)->gp) | ||
112 | #define UPT_GS(r) REGS_GS((r)->gp) | ||
113 | #define UPT_DS(r) REGS_DS((r)->gp) | ||
114 | #define UPT_ES(r) REGS_ES((r)->gp) | ||
115 | #define UPT_CS(r) REGS_CS((r)->gp) | ||
116 | #define UPT_SS(r) REGS_SS((r)->gp) | ||
117 | #define UPT_ORIG_RAX(r) REGS_ORIG_RAX((r)->gp) | ||
118 | |||
119 | #define UPT_IP(r) REGS_IP((r)->gp) | ||
120 | #define UPT_SP(r) REGS_SP((r)->gp) | ||
121 | |||
122 | #define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp) | ||
123 | #define UPT_SYSCALL_NR(r) ((r)->syscall) | ||
124 | #define UPT_SYSCALL_RET(r) UPT_RAX(r) | ||
125 | |||
126 | extern int user_context(unsigned long sp); | ||
127 | |||
128 | #define UPT_IS_USER(r) ((r)->is_user) | ||
129 | |||
130 | #define UPT_SYSCALL_ARG1(r) UPT_RDI(r) | ||
131 | #define UPT_SYSCALL_ARG2(r) UPT_RSI(r) | ||
132 | #define UPT_SYSCALL_ARG3(r) UPT_RDX(r) | ||
133 | #define UPT_SYSCALL_ARG4(r) UPT_R10(r) | ||
134 | #define UPT_SYSCALL_ARG5(r) UPT_R8(r) | ||
135 | #define UPT_SYSCALL_ARG6(r) UPT_R9(r) | ||
136 | |||
137 | struct syscall_args { | ||
138 | unsigned long args[6]; | ||
139 | }; | ||
140 | |||
141 | #define SYSCALL_ARGS(r) ((struct syscall_args) \ | ||
142 | { .args = { UPT_SYSCALL_ARG1(r), \ | ||
143 | UPT_SYSCALL_ARG2(r), \ | ||
144 | UPT_SYSCALL_ARG3(r), \ | ||
145 | UPT_SYSCALL_ARG4(r), \ | ||
146 | UPT_SYSCALL_ARG5(r), \ | ||
147 | UPT_SYSCALL_ARG6(r) } } ) | ||
148 | |||
149 | #define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp) | ||
150 | |||
151 | #define UPT_FAULTINFO(r) (&(r)->faultinfo) | ||
152 | |||
153 | static inline void arch_init_registers(int pid) | ||
154 | { | ||
155 | } | ||
156 | |||
157 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/ptrace_user.h b/arch/x86/um/shared/sysdep/ptrace_user.h new file mode 100644 index 000000000000..16cd6b5e71f7 --- /dev/null +++ b/arch/x86/um/shared/sysdep/ptrace_user.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #include <generated/user_constants.h> | ||
2 | |||
3 | #define PT_OFFSET(r) ((r) * sizeof(long)) | ||
4 | |||
5 | #define PT_SYSCALL_NR(regs) ((regs)[HOST_ORIG_AX]) | ||
6 | #define PT_SYSCALL_NR_OFFSET PT_OFFSET(HOST_ORIG_AX) | ||
7 | |||
8 | #define PT_SYSCALL_RET_OFFSET PT_OFFSET(HOST_AX) | ||
9 | |||
10 | #define REGS_IP_INDEX HOST_IP | ||
11 | #define REGS_SP_INDEX HOST_SP | ||
12 | |||
13 | #ifdef __i386__ | ||
14 | #define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE) | ||
15 | #else | ||
16 | #define FP_SIZE HOST_FP_SIZE | ||
17 | |||
18 | /* | ||
19 | * x86_64 FC3 doesn't define this in /usr/include/linux/ptrace.h even though | ||
20 | * it's defined in the kernel's include/linux/ptrace.h. Additionally, use the | ||
21 | * 2.4 name and value for 2.4 host compatibility. | ||
22 | */ | ||
23 | #ifndef PTRACE_OLDSETOPTIONS | ||
24 | #define PTRACE_OLDSETOPTIONS 21 | ||
25 | #endif | ||
26 | |||
27 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/skas_ptrace.h b/arch/x86/um/shared/sysdep/skas_ptrace.h new file mode 100644 index 000000000000..453febe98993 --- /dev/null +++ b/arch/x86/um/shared/sysdep/skas_ptrace.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYSDEP_X86_SKAS_PTRACE_H | ||
7 | #define __SYSDEP_X86_SKAS_PTRACE_H | ||
8 | |||
9 | struct ptrace_faultinfo { | ||
10 | int is_write; | ||
11 | unsigned long addr; | ||
12 | }; | ||
13 | |||
14 | struct ptrace_ldt { | ||
15 | int func; | ||
16 | void *ptr; | ||
17 | unsigned long bytecount; | ||
18 | }; | ||
19 | |||
20 | #define PTRACE_LDT 54 | ||
21 | |||
22 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/stub.h b/arch/x86/um/shared/sysdep/stub.h new file mode 100644 index 000000000000..bd161e300102 --- /dev/null +++ b/arch/x86/um/shared/sysdep/stub.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #include <asm/unistd.h> | ||
2 | #include <sys/mman.h> | ||
3 | #include <signal.h> | ||
4 | #include "as-layout.h" | ||
5 | #include "stub-data.h" | ||
6 | |||
7 | #ifdef __i386__ | ||
8 | #include "stub_32.h" | ||
9 | #else | ||
10 | #include "stub_64.h" | ||
11 | #endif | ||
12 | |||
13 | extern void stub_segv_handler(int, siginfo_t *, void *); | ||
14 | extern void stub_clone_handler(void); | ||
diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h new file mode 100644 index 000000000000..51fd256c75f0 --- /dev/null +++ b/arch/x86/um/shared/sysdep/stub_32.h | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYSDEP_STUB_H | ||
7 | #define __SYSDEP_STUB_H | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | |||
11 | #define STUB_SYSCALL_RET EAX | ||
12 | #define STUB_MMAP_NR __NR_mmap2 | ||
13 | #define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT) | ||
14 | |||
15 | static inline long stub_syscall0(long syscall) | ||
16 | { | ||
17 | long ret; | ||
18 | |||
19 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall)); | ||
20 | |||
21 | return ret; | ||
22 | } | ||
23 | |||
24 | static inline long stub_syscall1(long syscall, long arg1) | ||
25 | { | ||
26 | long ret; | ||
27 | |||
28 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1)); | ||
29 | |||
30 | return ret; | ||
31 | } | ||
32 | |||
33 | static inline long stub_syscall2(long syscall, long arg1, long arg2) | ||
34 | { | ||
35 | long ret; | ||
36 | |||
37 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), | ||
38 | "c" (arg2)); | ||
39 | |||
40 | return ret; | ||
41 | } | ||
42 | |||
43 | static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3) | ||
44 | { | ||
45 | long ret; | ||
46 | |||
47 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), | ||
48 | "c" (arg2), "d" (arg3)); | ||
49 | |||
50 | return ret; | ||
51 | } | ||
52 | |||
53 | static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3, | ||
54 | long arg4) | ||
55 | { | ||
56 | long ret; | ||
57 | |||
58 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), | ||
59 | "c" (arg2), "d" (arg3), "S" (arg4)); | ||
60 | |||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3, | ||
65 | long arg4, long arg5) | ||
66 | { | ||
67 | long ret; | ||
68 | |||
69 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), | ||
70 | "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)); | ||
71 | |||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | static inline void trap_myself(void) | ||
76 | { | ||
77 | __asm("int3"); | ||
78 | } | ||
79 | |||
80 | static inline void remap_stack(int fd, unsigned long offset) | ||
81 | { | ||
82 | __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;" | ||
83 | "movl %7, %%ebx ; movl %%eax, (%%ebx)" | ||
84 | : : "g" (STUB_MMAP_NR), "b" (STUB_DATA), | ||
85 | "c" (UM_KERN_PAGE_SIZE), | ||
86 | "d" (PROT_READ | PROT_WRITE), | ||
87 | "S" (MAP_FIXED | MAP_SHARED), "D" (fd), | ||
88 | "a" (offset), | ||
89 | "i" (&((struct stub_data *) STUB_DATA)->err) | ||
90 | : "memory"); | ||
91 | } | ||
92 | |||
93 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h new file mode 100644 index 000000000000..994df93c5ed3 --- /dev/null +++ b/arch/x86/um/shared/sysdep/stub_64.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYSDEP_STUB_H | ||
7 | #define __SYSDEP_STUB_H | ||
8 | |||
9 | #include <sysdep/ptrace_user.h> | ||
10 | |||
11 | #define STUB_SYSCALL_RET PT_INDEX(RAX) | ||
12 | #define STUB_MMAP_NR __NR_mmap | ||
13 | #define MMAP_OFFSET(o) (o) | ||
14 | |||
15 | #define __syscall_clobber "r11","rcx","memory" | ||
16 | #define __syscall "syscall" | ||
17 | |||
18 | static inline long stub_syscall0(long syscall) | ||
19 | { | ||
20 | long ret; | ||
21 | |||
22 | __asm__ volatile (__syscall | ||
23 | : "=a" (ret) | ||
24 | : "0" (syscall) : __syscall_clobber ); | ||
25 | |||
26 | return ret; | ||
27 | } | ||
28 | |||
29 | static inline long stub_syscall2(long syscall, long arg1, long arg2) | ||
30 | { | ||
31 | long ret; | ||
32 | |||
33 | __asm__ volatile (__syscall | ||
34 | : "=a" (ret) | ||
35 | : "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber ); | ||
36 | |||
37 | return ret; | ||
38 | } | ||
39 | |||
40 | static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3) | ||
41 | { | ||
42 | long ret; | ||
43 | |||
44 | __asm__ volatile (__syscall | ||
45 | : "=a" (ret) | ||
46 | : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3) | ||
47 | : __syscall_clobber ); | ||
48 | |||
49 | return ret; | ||
50 | } | ||
51 | |||
52 | static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3, | ||
53 | long arg4) | ||
54 | { | ||
55 | long ret; | ||
56 | |||
57 | __asm__ volatile ("movq %5,%%r10 ; " __syscall | ||
58 | : "=a" (ret) | ||
59 | : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3), | ||
60 | "g" (arg4) | ||
61 | : __syscall_clobber, "r10" ); | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3, | ||
67 | long arg4, long arg5) | ||
68 | { | ||
69 | long ret; | ||
70 | |||
71 | __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall | ||
72 | : "=a" (ret) | ||
73 | : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3), | ||
74 | "g" (arg4), "g" (arg5) | ||
75 | : __syscall_clobber, "r10", "r8" ); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | static inline void trap_myself(void) | ||
81 | { | ||
82 | __asm("int3"); | ||
83 | } | ||
84 | |||
85 | static inline void remap_stack(long fd, unsigned long offset) | ||
86 | { | ||
87 | __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; " | ||
88 | "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; " | ||
89 | "movq %%rax, (%%rbx)": | ||
90 | : "a" (STUB_MMAP_NR), "D" (STUB_DATA), | ||
91 | "S" (UM_KERN_PAGE_SIZE), | ||
92 | "d" (PROT_READ | PROT_WRITE), | ||
93 | "g" (MAP_FIXED | MAP_SHARED), "g" (fd), | ||
94 | "g" (offset), | ||
95 | "i" (&((struct stub_data *) STUB_DATA)->err) | ||
96 | : __syscall_clobber, "r10", "r8", "r9" ); | ||
97 | } | ||
98 | |||
99 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/syscalls.h b/arch/x86/um/shared/sysdep/syscalls.h new file mode 100644 index 000000000000..bd9a89b67e41 --- /dev/null +++ b/arch/x86/um/shared/sysdep/syscalls.h | |||
@@ -0,0 +1,5 @@ | |||
1 | #ifdef __i386__ | ||
2 | #include "syscalls_32.h" | ||
3 | #else | ||
4 | #include "syscalls_64.h" | ||
5 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/syscalls_32.h b/arch/x86/um/shared/sysdep/syscalls_32.h new file mode 100644 index 000000000000..05cb796aecb5 --- /dev/null +++ b/arch/x86/um/shared/sysdep/syscalls_32.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "asm/unistd.h" | ||
7 | #include "sysdep/ptrace.h" | ||
8 | |||
9 | typedef long syscall_handler_t(struct pt_regs); | ||
10 | |||
11 | /* Not declared on x86, incompatible declarations on x86_64, so these have | ||
12 | * to go here rather than in sys_call_table.c | ||
13 | */ | ||
14 | extern syscall_handler_t sys_rt_sigaction; | ||
15 | |||
16 | extern syscall_handler_t *sys_call_table[]; | ||
17 | |||
18 | #define EXECUTE_SYSCALL(syscall, regs) \ | ||
19 | ((long (*)(struct syscall_args)) \ | ||
20 | (*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) | ||
diff --git a/arch/x86/um/shared/sysdep/syscalls_64.h b/arch/x86/um/shared/sysdep/syscalls_64.h new file mode 100644 index 000000000000..8a7d5e1da98e --- /dev/null +++ b/arch/x86/um/shared/sysdep/syscalls_64.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __SYSDEP_X86_64_SYSCALLS_H__ | ||
8 | #define __SYSDEP_X86_64_SYSCALLS_H__ | ||
9 | |||
10 | #include <linux/msg.h> | ||
11 | #include <linux/shm.h> | ||
12 | |||
13 | typedef long syscall_handler_t(void); | ||
14 | |||
15 | extern syscall_handler_t *sys_call_table[]; | ||
16 | |||
17 | #define EXECUTE_SYSCALL(syscall, regs) \ | ||
18 | (((long (*)(long, long, long, long, long, long)) \ | ||
19 | (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \ | ||
20 | UPT_SYSCALL_ARG2(®s->regs), \ | ||
21 | UPT_SYSCALL_ARG3(®s->regs), \ | ||
22 | UPT_SYSCALL_ARG4(®s->regs), \ | ||
23 | UPT_SYSCALL_ARG5(®s->regs), \ | ||
24 | UPT_SYSCALL_ARG6(®s->regs))) | ||
25 | |||
26 | extern long old_mmap(unsigned long addr, unsigned long len, | ||
27 | unsigned long prot, unsigned long flags, | ||
28 | unsigned long fd, unsigned long pgoff); | ||
29 | extern syscall_handler_t sys_modify_ldt; | ||
30 | extern syscall_handler_t sys_arch_prctl; | ||
31 | |||
32 | #endif | ||
diff --git a/arch/x86/um/shared/sysdep/tls.h b/arch/x86/um/shared/sysdep/tls.h new file mode 100644 index 000000000000..27cce00c6b30 --- /dev/null +++ b/arch/x86/um/shared/sysdep/tls.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef _SYSDEP_TLS_H | ||
2 | #define _SYSDEP_TLS_H | ||
3 | |||
4 | # ifndef __KERNEL__ | ||
5 | |||
6 | /* Change name to avoid conflicts with the original one from <asm/ldt.h>, which | ||
7 | * may be named user_desc (but in 2.4 and in header matching its API was named | ||
8 | * modify_ldt_ldt_s). */ | ||
9 | |||
10 | typedef struct um_dup_user_desc { | ||
11 | unsigned int entry_number; | ||
12 | unsigned int base_addr; | ||
13 | unsigned int limit; | ||
14 | unsigned int seg_32bit:1; | ||
15 | unsigned int contents:2; | ||
16 | unsigned int read_exec_only:1; | ||
17 | unsigned int limit_in_pages:1; | ||
18 | unsigned int seg_not_present:1; | ||
19 | unsigned int useable:1; | ||
20 | #ifdef __x86_64__ | ||
21 | unsigned int lm:1; | ||
22 | #endif | ||
23 | } user_desc_t; | ||
24 | |||
25 | # else /* __KERNEL__ */ | ||
26 | |||
27 | typedef struct user_desc user_desc_t; | ||
28 | |||
29 | # endif /* __KERNEL__ */ | ||
30 | |||
31 | extern int os_set_thread_area(user_desc_t *info, int pid); | ||
32 | extern int os_get_thread_area(user_desc_t *info, int pid); | ||
33 | |||
34 | #ifdef __i386__ | ||
35 | #define GDT_ENTRY_TLS_MIN_I386 6 | ||
36 | #define GDT_ENTRY_TLS_MIN_X86_64 12 | ||
37 | #endif | ||
38 | |||
39 | #endif /* _SYSDEP_TLS_H */ | ||
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c new file mode 100644 index 000000000000..4883b9546016 --- /dev/null +++ b/arch/x86/um/signal.c | |||
@@ -0,0 +1,624 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 PathScale, Inc. | ||
3 | * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | |||
8 | #include <linux/personality.h> | ||
9 | #include <linux/ptrace.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <asm/unistd.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | #include <asm/ucontext.h> | ||
14 | #include "frame_kern.h" | ||
15 | #include "skas.h" | ||
16 | |||
17 | #ifdef CONFIG_X86_32 | ||
18 | |||
19 | /* | ||
20 | * FPU tag word conversions. | ||
21 | */ | ||
22 | |||
23 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) | ||
24 | { | ||
25 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | ||
26 | |||
27 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ | ||
28 | tmp = ~twd; | ||
29 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ | ||
30 | /* and move the valid bits to the lower byte. */ | ||
31 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | ||
32 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | ||
33 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | ||
34 | return tmp; | ||
35 | } | ||
36 | |||
37 | static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave) | ||
38 | { | ||
39 | struct _fpxreg *st = NULL; | ||
40 | unsigned long twd = (unsigned long) fxsave->twd; | ||
41 | unsigned long tag; | ||
42 | unsigned long ret = 0xffff0000; | ||
43 | int i; | ||
44 | |||
45 | #define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16) | ||
46 | |||
47 | for (i = 0; i < 8; i++) { | ||
48 | if (twd & 0x1) { | ||
49 | st = (struct _fpxreg *) FPREG_ADDR(fxsave, i); | ||
50 | |||
51 | switch (st->exponent & 0x7fff) { | ||
52 | case 0x7fff: | ||
53 | tag = 2; /* Special */ | ||
54 | break; | ||
55 | case 0x0000: | ||
56 | if ( !st->significand[0] && | ||
57 | !st->significand[1] && | ||
58 | !st->significand[2] && | ||
59 | !st->significand[3] ) { | ||
60 | tag = 1; /* Zero */ | ||
61 | } else { | ||
62 | tag = 2; /* Special */ | ||
63 | } | ||
64 | break; | ||
65 | default: | ||
66 | if (st->significand[3] & 0x8000) { | ||
67 | tag = 0; /* Valid */ | ||
68 | } else { | ||
69 | tag = 2; /* Special */ | ||
70 | } | ||
71 | break; | ||
72 | } | ||
73 | } else { | ||
74 | tag = 3; /* Empty */ | ||
75 | } | ||
76 | ret |= (tag << (2 * i)); | ||
77 | twd = twd >> 1; | ||
78 | } | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | static int convert_fxsr_to_user(struct _fpstate __user *buf, | ||
83 | struct user_fxsr_struct *fxsave) | ||
84 | { | ||
85 | unsigned long env[7]; | ||
86 | struct _fpreg __user *to; | ||
87 | struct _fpxreg *from; | ||
88 | int i; | ||
89 | |||
90 | env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul; | ||
91 | env[1] = (unsigned long)fxsave->swd | 0xffff0000ul; | ||
92 | env[2] = twd_fxsr_to_i387(fxsave); | ||
93 | env[3] = fxsave->fip; | ||
94 | env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16); | ||
95 | env[5] = fxsave->foo; | ||
96 | env[6] = fxsave->fos; | ||
97 | |||
98 | if (__copy_to_user(buf, env, 7 * sizeof(unsigned long))) | ||
99 | return 1; | ||
100 | |||
101 | to = &buf->_st[0]; | ||
102 | from = (struct _fpxreg *) &fxsave->st_space[0]; | ||
103 | for (i = 0; i < 8; i++, to++, from++) { | ||
104 | unsigned long __user *t = (unsigned long __user *)to; | ||
105 | unsigned long *f = (unsigned long *)from; | ||
106 | |||
107 | if (__put_user(*f, t) || | ||
108 | __put_user(*(f + 1), t + 1) || | ||
109 | __put_user(from->exponent, &to->exponent)) | ||
110 | return 1; | ||
111 | } | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave, | ||
116 | struct _fpstate __user *buf) | ||
117 | { | ||
118 | unsigned long env[7]; | ||
119 | struct _fpxreg *to; | ||
120 | struct _fpreg __user *from; | ||
121 | int i; | ||
122 | |||
123 | if (copy_from_user( env, buf, 7 * sizeof(long))) | ||
124 | return 1; | ||
125 | |||
126 | fxsave->cwd = (unsigned short)(env[0] & 0xffff); | ||
127 | fxsave->swd = (unsigned short)(env[1] & 0xffff); | ||
128 | fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); | ||
129 | fxsave->fip = env[3]; | ||
130 | fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16); | ||
131 | fxsave->fcs = (env[4] & 0xffff); | ||
132 | fxsave->foo = env[5]; | ||
133 | fxsave->fos = env[6]; | ||
134 | |||
135 | to = (struct _fpxreg *) &fxsave->st_space[0]; | ||
136 | from = &buf->_st[0]; | ||
137 | for (i = 0; i < 8; i++, to++, from++) { | ||
138 | unsigned long *t = (unsigned long *)to; | ||
139 | unsigned long __user *f = (unsigned long __user *)from; | ||
140 | |||
141 | if (__get_user(*t, f) || | ||
142 | __get_user(*(t + 1), f + 1) || | ||
143 | __get_user(to->exponent, &from->exponent)) | ||
144 | return 1; | ||
145 | } | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | extern int have_fpx_regs; | ||
150 | |||
151 | #endif | ||
152 | |||
153 | static int copy_sc_from_user(struct pt_regs *regs, | ||
154 | struct sigcontext __user *from) | ||
155 | { | ||
156 | struct sigcontext sc; | ||
157 | int err, pid; | ||
158 | |||
159 | err = copy_from_user(&sc, from, sizeof(sc)); | ||
160 | if (err) | ||
161 | return err; | ||
162 | |||
163 | #define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname | ||
164 | |||
165 | #ifdef CONFIG_X86_32 | ||
166 | GETREG(GS, gs); | ||
167 | GETREG(FS, fs); | ||
168 | GETREG(ES, es); | ||
169 | GETREG(DS, ds); | ||
170 | #endif | ||
171 | GETREG(DI, di); | ||
172 | GETREG(SI, si); | ||
173 | GETREG(BP, bp); | ||
174 | GETREG(SP, sp); | ||
175 | GETREG(BX, bx); | ||
176 | GETREG(DX, dx); | ||
177 | GETREG(CX, cx); | ||
178 | GETREG(AX, ax); | ||
179 | GETREG(IP, ip); | ||
180 | |||
181 | #ifdef CONFIG_X86_64 | ||
182 | GETREG(R8, r8); | ||
183 | GETREG(R9, r9); | ||
184 | GETREG(R10, r10); | ||
185 | GETREG(R11, r11); | ||
186 | GETREG(R12, r12); | ||
187 | GETREG(R13, r13); | ||
188 | GETREG(R14, r14); | ||
189 | GETREG(R15, r15); | ||
190 | #endif | ||
191 | |||
192 | GETREG(CS, cs); | ||
193 | GETREG(EFLAGS, flags); | ||
194 | #ifdef CONFIG_X86_32 | ||
195 | GETREG(SS, ss); | ||
196 | #endif | ||
197 | |||
198 | #undef GETREG | ||
199 | |||
200 | pid = userspace_pid[current_thread_info()->cpu]; | ||
201 | #ifdef CONFIG_X86_32 | ||
202 | if (have_fpx_regs) { | ||
203 | struct user_fxsr_struct fpx; | ||
204 | |||
205 | err = copy_from_user(&fpx, | ||
206 | &((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0], | ||
207 | sizeof(struct user_fxsr_struct)); | ||
208 | if (err) | ||
209 | return 1; | ||
210 | |||
211 | err = convert_fxsr_from_user(&fpx, sc.fpstate); | ||
212 | if (err) | ||
213 | return 1; | ||
214 | |||
215 | err = restore_fpx_registers(pid, (unsigned long *) &fpx); | ||
216 | if (err < 0) { | ||
217 | printk(KERN_ERR "copy_sc_from_user - " | ||
218 | "restore_fpx_registers failed, errno = %d\n", | ||
219 | -err); | ||
220 | return 1; | ||
221 | } | ||
222 | } else | ||
223 | #endif | ||
224 | { | ||
225 | struct user_i387_struct fp; | ||
226 | |||
227 | err = copy_from_user(&fp, sc.fpstate, | ||
228 | sizeof(struct user_i387_struct)); | ||
229 | if (err) | ||
230 | return 1; | ||
231 | |||
232 | err = restore_fp_registers(pid, (unsigned long *) &fp); | ||
233 | if (err < 0) { | ||
234 | printk(KERN_ERR "copy_sc_from_user - " | ||
235 | "restore_fp_registers failed, errno = %d\n", | ||
236 | -err); | ||
237 | return 1; | ||
238 | } | ||
239 | } | ||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | static int copy_sc_to_user(struct sigcontext __user *to, | ||
244 | struct _fpstate __user *to_fp, struct pt_regs *regs, | ||
245 | unsigned long mask) | ||
246 | { | ||
247 | struct sigcontext sc; | ||
248 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; | ||
249 | int err, pid; | ||
250 | memset(&sc, 0, sizeof(struct sigcontext)); | ||
251 | |||
252 | #define PUTREG(regno, regname) sc.regname = regs->regs.gp[HOST_##regno] | ||
253 | |||
254 | #ifdef CONFIG_X86_32 | ||
255 | PUTREG(GS, gs); | ||
256 | PUTREG(FS, fs); | ||
257 | PUTREG(ES, es); | ||
258 | PUTREG(DS, ds); | ||
259 | #endif | ||
260 | PUTREG(DI, di); | ||
261 | PUTREG(SI, si); | ||
262 | PUTREG(BP, bp); | ||
263 | PUTREG(SP, sp); | ||
264 | PUTREG(BX, bx); | ||
265 | PUTREG(DX, dx); | ||
266 | PUTREG(CX, cx); | ||
267 | PUTREG(AX, ax); | ||
268 | #ifdef CONFIG_X86_64 | ||
269 | PUTREG(R8, r8); | ||
270 | PUTREG(R9, r9); | ||
271 | PUTREG(R10, r10); | ||
272 | PUTREG(R11, r11); | ||
273 | PUTREG(R12, r12); | ||
274 | PUTREG(R13, r13); | ||
275 | PUTREG(R14, r14); | ||
276 | PUTREG(R15, r15); | ||
277 | #endif | ||
278 | |||
279 | sc.cr2 = fi->cr2; | ||
280 | sc.err = fi->error_code; | ||
281 | sc.trapno = fi->trap_no; | ||
282 | PUTREG(IP, ip); | ||
283 | PUTREG(CS, cs); | ||
284 | PUTREG(EFLAGS, flags); | ||
285 | #ifdef CONFIG_X86_32 | ||
286 | PUTREG(SP, sp_at_signal); | ||
287 | PUTREG(SS, ss); | ||
288 | #endif | ||
289 | #undef PUTREG | ||
290 | sc.oldmask = mask; | ||
291 | sc.fpstate = to_fp; | ||
292 | |||
293 | err = copy_to_user(to, &sc, sizeof(struct sigcontext)); | ||
294 | if (err) | ||
295 | return 1; | ||
296 | |||
297 | pid = userspace_pid[current_thread_info()->cpu]; | ||
298 | |||
299 | #ifdef CONFIG_X86_32 | ||
300 | if (have_fpx_regs) { | ||
301 | struct user_fxsr_struct fpx; | ||
302 | |||
303 | err = save_fpx_registers(pid, (unsigned long *) &fpx); | ||
304 | if (err < 0){ | ||
305 | printk(KERN_ERR "copy_sc_to_user - save_fpx_registers " | ||
306 | "failed, errno = %d\n", err); | ||
307 | return 1; | ||
308 | } | ||
309 | |||
310 | err = convert_fxsr_to_user(to_fp, &fpx); | ||
311 | if (err) | ||
312 | return 1; | ||
313 | |||
314 | err |= __put_user(fpx.swd, &to_fp->status); | ||
315 | err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic); | ||
316 | if (err) | ||
317 | return 1; | ||
318 | |||
319 | if (copy_to_user(&to_fp->_fxsr_env[0], &fpx, | ||
320 | sizeof(struct user_fxsr_struct))) | ||
321 | return 1; | ||
322 | } else | ||
323 | #endif | ||
324 | { | ||
325 | struct user_i387_struct fp; | ||
326 | |||
327 | err = save_fp_registers(pid, (unsigned long *) &fp); | ||
328 | if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct))) | ||
329 | return 1; | ||
330 | } | ||
331 | |||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | #ifdef CONFIG_X86_32 | ||
336 | static int copy_ucontext_to_user(struct ucontext __user *uc, | ||
337 | struct _fpstate __user *fp, sigset_t *set, | ||
338 | unsigned long sp) | ||
339 | { | ||
340 | int err = 0; | ||
341 | |||
342 | err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp); | ||
343 | err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags); | ||
344 | err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size); | ||
345 | err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, 0); | ||
346 | err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set)); | ||
347 | return err; | ||
348 | } | ||
349 | |||
350 | struct sigframe | ||
351 | { | ||
352 | char __user *pretcode; | ||
353 | int sig; | ||
354 | struct sigcontext sc; | ||
355 | struct _fpstate fpstate; | ||
356 | unsigned long extramask[_NSIG_WORDS-1]; | ||
357 | char retcode[8]; | ||
358 | }; | ||
359 | |||
360 | struct rt_sigframe | ||
361 | { | ||
362 | char __user *pretcode; | ||
363 | int sig; | ||
364 | struct siginfo __user *pinfo; | ||
365 | void __user *puc; | ||
366 | struct siginfo info; | ||
367 | struct ucontext uc; | ||
368 | struct _fpstate fpstate; | ||
369 | char retcode[8]; | ||
370 | }; | ||
371 | |||
372 | int setup_signal_stack_sc(unsigned long stack_top, int sig, | ||
373 | struct k_sigaction *ka, struct pt_regs *regs, | ||
374 | sigset_t *mask) | ||
375 | { | ||
376 | struct sigframe __user *frame; | ||
377 | void __user *restorer; | ||
378 | int err = 0; | ||
379 | |||
380 | /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */ | ||
381 | stack_top = ((stack_top + 4) & -16UL) - 4; | ||
382 | frame = (struct sigframe __user *) stack_top - 1; | ||
383 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
384 | return 1; | ||
385 | |||
386 | restorer = frame->retcode; | ||
387 | if (ka->sa.sa_flags & SA_RESTORER) | ||
388 | restorer = ka->sa.sa_restorer; | ||
389 | |||
390 | err |= __put_user(restorer, &frame->pretcode); | ||
391 | err |= __put_user(sig, &frame->sig); | ||
392 | err |= copy_sc_to_user(&frame->sc, &frame->fpstate, regs, mask->sig[0]); | ||
393 | if (_NSIG_WORDS > 1) | ||
394 | err |= __copy_to_user(&frame->extramask, &mask->sig[1], | ||
395 | sizeof(frame->extramask)); | ||
396 | |||
397 | /* | ||
398 | * This is popl %eax ; movl $,%eax ; int $0x80 | ||
399 | * | ||
400 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | ||
401 | * reasons and because gdb uses it as a signature to notice | ||
402 | * signal handler stack frames. | ||
403 | */ | ||
404 | err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); | ||
405 | err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); | ||
406 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); | ||
407 | |||
408 | if (err) | ||
409 | return err; | ||
410 | |||
411 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
412 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | ||
413 | PT_REGS_EAX(regs) = (unsigned long) sig; | ||
414 | PT_REGS_EDX(regs) = (unsigned long) 0; | ||
415 | PT_REGS_ECX(regs) = (unsigned long) 0; | ||
416 | |||
417 | if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) | ||
418 | ptrace_notify(SIGTRAP); | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | int setup_signal_stack_si(unsigned long stack_top, int sig, | ||
423 | struct k_sigaction *ka, struct pt_regs *regs, | ||
424 | siginfo_t *info, sigset_t *mask) | ||
425 | { | ||
426 | struct rt_sigframe __user *frame; | ||
427 | void __user *restorer; | ||
428 | int err = 0; | ||
429 | |||
430 | stack_top &= -8UL; | ||
431 | frame = (struct rt_sigframe __user *) stack_top - 1; | ||
432 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
433 | return 1; | ||
434 | |||
435 | restorer = frame->retcode; | ||
436 | if (ka->sa.sa_flags & SA_RESTORER) | ||
437 | restorer = ka->sa.sa_restorer; | ||
438 | |||
439 | err |= __put_user(restorer, &frame->pretcode); | ||
440 | err |= __put_user(sig, &frame->sig); | ||
441 | err |= __put_user(&frame->info, &frame->pinfo); | ||
442 | err |= __put_user(&frame->uc, &frame->puc); | ||
443 | err |= copy_siginfo_to_user(&frame->info, info); | ||
444 | err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask, | ||
445 | PT_REGS_SP(regs)); | ||
446 | |||
447 | /* | ||
448 | * This is movl $,%eax ; int $0x80 | ||
449 | * | ||
450 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | ||
451 | * reasons and because gdb uses it as a signature to notice | ||
452 | * signal handler stack frames. | ||
453 | */ | ||
454 | err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); | ||
455 | err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); | ||
456 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); | ||
457 | |||
458 | if (err) | ||
459 | return err; | ||
460 | |||
461 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
462 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | ||
463 | PT_REGS_EAX(regs) = (unsigned long) sig; | ||
464 | PT_REGS_EDX(regs) = (unsigned long) &frame->info; | ||
465 | PT_REGS_ECX(regs) = (unsigned long) &frame->uc; | ||
466 | |||
467 | if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) | ||
468 | ptrace_notify(SIGTRAP); | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | long sys_sigreturn(struct pt_regs *regs) | ||
473 | { | ||
474 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); | ||
475 | struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); | ||
476 | sigset_t set; | ||
477 | struct sigcontext __user *sc = &frame->sc; | ||
478 | unsigned long __user *oldmask = &sc->oldmask; | ||
479 | unsigned long __user *extramask = frame->extramask; | ||
480 | int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); | ||
481 | |||
482 | if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || | ||
483 | copy_from_user(&set.sig[1], extramask, sig_size)) | ||
484 | goto segfault; | ||
485 | |||
486 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
487 | set_current_blocked(&set); | ||
488 | |||
489 | if (copy_sc_from_user(¤t->thread.regs, sc)) | ||
490 | goto segfault; | ||
491 | |||
492 | /* Avoid ERESTART handling */ | ||
493 | PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; | ||
494 | return PT_REGS_SYSCALL_RET(¤t->thread.regs); | ||
495 | |||
496 | segfault: | ||
497 | force_sig(SIGSEGV, current); | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | #else | ||
502 | |||
503 | struct rt_sigframe | ||
504 | { | ||
505 | char __user *pretcode; | ||
506 | struct ucontext uc; | ||
507 | struct siginfo info; | ||
508 | struct _fpstate fpstate; | ||
509 | }; | ||
510 | |||
511 | int setup_signal_stack_si(unsigned long stack_top, int sig, | ||
512 | struct k_sigaction *ka, struct pt_regs * regs, | ||
513 | siginfo_t *info, sigset_t *set) | ||
514 | { | ||
515 | struct rt_sigframe __user *frame; | ||
516 | int err = 0; | ||
517 | struct task_struct *me = current; | ||
518 | |||
519 | frame = (struct rt_sigframe __user *) | ||
520 | round_down(stack_top - sizeof(struct rt_sigframe), 16); | ||
521 | /* Subtract 128 for a red zone and 8 for proper alignment */ | ||
522 | frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8); | ||
523 | |||
524 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
525 | goto out; | ||
526 | |||
527 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
528 | err |= copy_siginfo_to_user(&frame->info, info); | ||
529 | if (err) | ||
530 | goto out; | ||
531 | } | ||
532 | |||
533 | /* Create the ucontext. */ | ||
534 | err |= __put_user(0, &frame->uc.uc_flags); | ||
535 | err |= __put_user(0, &frame->uc.uc_link); | ||
536 | err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | ||
537 | err |= __put_user(sas_ss_flags(PT_REGS_SP(regs)), | ||
538 | &frame->uc.uc_stack.ss_flags); | ||
539 | err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
540 | err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, | ||
541 | set->sig[0]); | ||
542 | err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate); | ||
543 | if (sizeof(*set) == 16) { | ||
544 | __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); | ||
545 | __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); | ||
546 | } | ||
547 | else | ||
548 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, | ||
549 | sizeof(*set)); | ||
550 | |||
551 | /* | ||
552 | * Set up to return from userspace. If provided, use a stub | ||
553 | * already in userspace. | ||
554 | */ | ||
555 | /* x86-64 should always use SA_RESTORER. */ | ||
556 | if (ka->sa.sa_flags & SA_RESTORER) | ||
557 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); | ||
558 | else | ||
559 | /* could use a vstub here */ | ||
560 | return err; | ||
561 | |||
562 | if (err) | ||
563 | return err; | ||
564 | |||
565 | /* Set up registers for signal handler */ | ||
566 | { | ||
567 | struct exec_domain *ed = current_thread_info()->exec_domain; | ||
568 | if (unlikely(ed && ed->signal_invmap && sig < 32)) | ||
569 | sig = ed->signal_invmap[sig]; | ||
570 | } | ||
571 | |||
572 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
573 | PT_REGS_RDI(regs) = sig; | ||
574 | /* In case the signal handler was declared without prototypes */ | ||
575 | PT_REGS_RAX(regs) = 0; | ||
576 | |||
577 | /* | ||
578 | * This also works for non SA_SIGINFO handlers because they expect the | ||
579 | * next argument after the signal number on the stack. | ||
580 | */ | ||
581 | PT_REGS_RSI(regs) = (unsigned long) &frame->info; | ||
582 | PT_REGS_RDX(regs) = (unsigned long) &frame->uc; | ||
583 | PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler; | ||
584 | out: | ||
585 | return err; | ||
586 | } | ||
587 | #endif | ||
588 | |||
589 | long sys_rt_sigreturn(struct pt_regs *regs) | ||
590 | { | ||
591 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); | ||
592 | struct rt_sigframe __user *frame = | ||
593 | (struct rt_sigframe __user *)(sp - sizeof(long)); | ||
594 | struct ucontext __user *uc = &frame->uc; | ||
595 | sigset_t set; | ||
596 | |||
597 | if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) | ||
598 | goto segfault; | ||
599 | |||
600 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
601 | set_current_blocked(&set); | ||
602 | |||
603 | if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) | ||
604 | goto segfault; | ||
605 | |||
606 | /* Avoid ERESTART handling */ | ||
607 | PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; | ||
608 | return PT_REGS_SYSCALL_RET(¤t->thread.regs); | ||
609 | |||
610 | segfault: | ||
611 | force_sig(SIGSEGV, current); | ||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | #ifdef CONFIG_X86_32 | ||
616 | long ptregs_sigreturn(void) | ||
617 | { | ||
618 | return sys_sigreturn(NULL); | ||
619 | } | ||
620 | long ptregs_rt_sigreturn(void) | ||
621 | { | ||
622 | return sys_rt_sigreturn(NULL); | ||
623 | } | ||
624 | #endif | ||
diff --git a/arch/x86/um/stub_32.S b/arch/x86/um/stub_32.S new file mode 100644 index 000000000000..54a36ec20cb7 --- /dev/null +++ b/arch/x86/um/stub_32.S | |||
@@ -0,0 +1,51 @@ | |||
1 | #include "as-layout.h" | ||
2 | |||
3 | .globl syscall_stub | ||
4 | .section .__syscall_stub, "ax" | ||
5 | |||
6 | .globl batch_syscall_stub | ||
7 | batch_syscall_stub: | ||
8 | /* load pointer to first operation */ | ||
9 | mov $(STUB_DATA+8), %esp | ||
10 | |||
11 | again: | ||
12 | /* load length of additional data */ | ||
13 | mov 0x0(%esp), %eax | ||
14 | |||
15 | /* if(length == 0) : end of list */ | ||
16 | /* write possible 0 to header */ | ||
17 | mov %eax, STUB_DATA+4 | ||
18 | cmpl $0, %eax | ||
19 | jz done | ||
20 | |||
21 | /* save current pointer */ | ||
22 | mov %esp, STUB_DATA+4 | ||
23 | |||
24 | /* skip additional data */ | ||
25 | add %eax, %esp | ||
26 | |||
27 | /* load syscall-# */ | ||
28 | pop %eax | ||
29 | |||
30 | /* load syscall params */ | ||
31 | pop %ebx | ||
32 | pop %ecx | ||
33 | pop %edx | ||
34 | pop %esi | ||
35 | pop %edi | ||
36 | pop %ebp | ||
37 | |||
38 | /* execute syscall */ | ||
39 | int $0x80 | ||
40 | |||
41 | /* check return value */ | ||
42 | pop %ebx | ||
43 | cmp %ebx, %eax | ||
44 | je again | ||
45 | |||
46 | done: | ||
47 | /* save return value */ | ||
48 | mov %eax, STUB_DATA | ||
49 | |||
50 | /* stop */ | ||
51 | int3 | ||
diff --git a/arch/x86/um/stub_64.S b/arch/x86/um/stub_64.S new file mode 100644 index 000000000000..20e4a96a6dcb --- /dev/null +++ b/arch/x86/um/stub_64.S | |||
@@ -0,0 +1,66 @@ | |||
1 | #include "as-layout.h" | ||
2 | |||
3 | .globl syscall_stub | ||
4 | .section .__syscall_stub, "ax" | ||
5 | syscall_stub: | ||
6 | syscall | ||
7 | /* We don't have 64-bit constants, so this constructs the address | ||
8 | * we need. | ||
9 | */ | ||
10 | movq $(STUB_DATA >> 32), %rbx | ||
11 | salq $32, %rbx | ||
12 | movq $(STUB_DATA & 0xffffffff), %rcx | ||
13 | or %rcx, %rbx | ||
14 | movq %rax, (%rbx) | ||
15 | int3 | ||
16 | |||
17 | .globl batch_syscall_stub | ||
18 | batch_syscall_stub: | ||
19 | mov $(STUB_DATA >> 32), %rbx | ||
20 | sal $32, %rbx | ||
21 | mov $(STUB_DATA & 0xffffffff), %rax | ||
22 | or %rax, %rbx | ||
23 | /* load pointer to first operation */ | ||
24 | mov %rbx, %rsp | ||
25 | add $0x10, %rsp | ||
26 | again: | ||
27 | /* load length of additional data */ | ||
28 | mov 0x0(%rsp), %rax | ||
29 | |||
30 | /* if(length == 0) : end of list */ | ||
31 | /* write possible 0 to header */ | ||
32 | mov %rax, 8(%rbx) | ||
33 | cmp $0, %rax | ||
34 | jz done | ||
35 | |||
36 | /* save current pointer */ | ||
37 | mov %rsp, 8(%rbx) | ||
38 | |||
39 | /* skip additional data */ | ||
40 | add %rax, %rsp | ||
41 | |||
42 | /* load syscall-# */ | ||
43 | pop %rax | ||
44 | |||
45 | /* load syscall params */ | ||
46 | pop %rdi | ||
47 | pop %rsi | ||
48 | pop %rdx | ||
49 | pop %r10 | ||
50 | pop %r8 | ||
51 | pop %r9 | ||
52 | |||
53 | /* execute syscall */ | ||
54 | syscall | ||
55 | |||
56 | /* check return value */ | ||
57 | pop %rcx | ||
58 | cmp %rcx, %rax | ||
59 | je again | ||
60 | |||
61 | done: | ||
62 | /* save return value */ | ||
63 | mov %rax, (%rbx) | ||
64 | |||
65 | /* stop */ | ||
66 | int3 | ||
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c new file mode 100644 index 000000000000..b7450bd22e7d --- /dev/null +++ b/arch/x86/um/stub_segv.c | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "sysdep/stub.h" | ||
7 | #include "sysdep/faultinfo.h" | ||
8 | #include "sysdep/mcontext.h" | ||
9 | |||
10 | void __attribute__ ((__section__ (".__syscall_stub"))) | ||
11 | stub_segv_handler(int sig, siginfo_t *info, void *p) | ||
12 | { | ||
13 | struct ucontext *uc = p; | ||
14 | |||
15 | GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA), | ||
16 | &uc->uc_mcontext); | ||
17 | trap_myself(); | ||
18 | } | ||
19 | |||
diff --git a/arch/x86/um/sys_call_table_32.S b/arch/x86/um/sys_call_table_32.S new file mode 100644 index 000000000000..a7ca80d2dceb --- /dev/null +++ b/arch/x86/um/sys_call_table_32.S | |||
@@ -0,0 +1,26 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | /* Steal i386 syscall table for our purposes, but with some slight changes.*/ | ||
3 | |||
4 | #define sys_iopl sys_ni_syscall | ||
5 | #define sys_ioperm sys_ni_syscall | ||
6 | |||
7 | #define sys_vm86old sys_ni_syscall | ||
8 | #define sys_vm86 sys_ni_syscall | ||
9 | |||
10 | #define old_mmap sys_old_mmap | ||
11 | |||
12 | #define ptregs_fork sys_fork | ||
13 | #define ptregs_execve sys_execve | ||
14 | #define ptregs_iopl sys_iopl | ||
15 | #define ptregs_vm86old sys_vm86old | ||
16 | #define ptregs_clone sys_clone | ||
17 | #define ptregs_vm86 sys_vm86 | ||
18 | #define ptregs_sigaltstack sys_sigaltstack | ||
19 | #define ptregs_vfork sys_vfork | ||
20 | |||
21 | .section .rodata,"a" | ||
22 | |||
23 | #include "../kernel/syscall_table_32.S" | ||
24 | |||
25 | ENTRY(syscall_table_size) | ||
26 | .long .-sys_call_table | ||
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c new file mode 100644 index 000000000000..99522f78b162 --- /dev/null +++ b/arch/x86/um/sys_call_table_64.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c | ||
3 | * with some changes for UML. | ||
4 | */ | ||
5 | |||
6 | #include <linux/linkage.h> | ||
7 | #include <linux/sys.h> | ||
8 | #include <linux/cache.h> | ||
9 | |||
10 | #define __NO_STUBS | ||
11 | |||
12 | /* | ||
13 | * Below you can see, in terms of #define's, the differences between the x86-64 | ||
14 | * and the UML syscall table. | ||
15 | */ | ||
16 | |||
17 | /* Not going to be implemented by UML, since we have no hardware. */ | ||
18 | #define stub_iopl sys_ni_syscall | ||
19 | #define sys_ioperm sys_ni_syscall | ||
20 | |||
21 | /* | ||
22 | * The UML TLS problem. Note that x86_64 does not implement this, so the below | ||
23 | * is needed only for the ia32 compatibility. | ||
24 | */ | ||
25 | |||
26 | /* On UML we call it this way ("old" means it's not mmap2) */ | ||
27 | #define sys_mmap old_mmap | ||
28 | |||
29 | #define stub_clone sys_clone | ||
30 | #define stub_fork sys_fork | ||
31 | #define stub_vfork sys_vfork | ||
32 | #define stub_execve sys_execve | ||
33 | #define stub_rt_sigsuspend sys_rt_sigsuspend | ||
34 | #define stub_sigaltstack sys_sigaltstack | ||
35 | #define stub_rt_sigreturn sys_rt_sigreturn | ||
36 | |||
37 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; | ||
38 | #undef _ASM_X86_UNISTD_64_H | ||
39 | #include "../../x86/include/asm/unistd_64.h" | ||
40 | |||
41 | #undef __SYSCALL | ||
42 | #define __SYSCALL(nr, sym) [ nr ] = sym, | ||
43 | #undef _ASM_X86_UNISTD_64_H | ||
44 | |||
45 | typedef void (*sys_call_ptr_t)(void); | ||
46 | |||
47 | extern void sys_ni_syscall(void); | ||
48 | |||
49 | /* | ||
50 | * We used to have a trick here which made sure that holes in the | ||
51 | * x86_64 table were filled in with sys_ni_syscall, but a comment in | ||
52 | * unistd_64.h says that holes aren't allowed, so the trick was | ||
53 | * removed. | ||
54 | * The trick looked like this | ||
55 | * [0 ... UM_NR_syscall_max] = &sys_ni_syscall | ||
56 | * before including unistd_64.h - the later initializations overwrote | ||
57 | * the sys_ni_syscall filler. | ||
58 | */ | ||
59 | |||
60 | sys_call_ptr_t sys_call_table[] __cacheline_aligned = { | ||
61 | #include <asm/unistd_64.h> | ||
62 | }; | ||
63 | |||
64 | int syscall_table_size = sizeof(sys_call_table); | ||
diff --git a/arch/x86/um/syscalls_32.c b/arch/x86/um/syscalls_32.c new file mode 100644 index 000000000000..70ca357393b8 --- /dev/null +++ b/arch/x86/um/syscalls_32.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/sched.h" | ||
7 | #include "linux/shm.h" | ||
8 | #include "linux/ipc.h" | ||
9 | #include "linux/syscalls.h" | ||
10 | #include "asm/mman.h" | ||
11 | #include "asm/uaccess.h" | ||
12 | #include "asm/unistd.h" | ||
13 | |||
14 | /* | ||
15 | * The prototype on i386 is: | ||
16 | * | ||
17 | * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr) | ||
18 | * | ||
19 | * and the "newtls" arg. on i386 is read by copy_thread directly from the | ||
20 | * register saved on the stack. | ||
21 | */ | ||
22 | long sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
23 | int __user *parent_tid, void *newtls, int __user *child_tid) | ||
24 | { | ||
25 | long ret; | ||
26 | |||
27 | if (!newsp) | ||
28 | newsp = UPT_SP(¤t->thread.regs.regs); | ||
29 | |||
30 | current->thread.forking = 1; | ||
31 | ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, | ||
32 | child_tid); | ||
33 | current->thread.forking = 0; | ||
34 | return ret; | ||
35 | } | ||
36 | |||
37 | long sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
38 | struct old_sigaction __user *oact) | ||
39 | { | ||
40 | struct k_sigaction new_ka, old_ka; | ||
41 | int ret; | ||
42 | |||
43 | if (act) { | ||
44 | old_sigset_t mask; | ||
45 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
46 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
47 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | ||
48 | return -EFAULT; | ||
49 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
50 | __get_user(mask, &act->sa_mask); | ||
51 | siginitset(&new_ka.sa.sa_mask, mask); | ||
52 | } | ||
53 | |||
54 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
55 | |||
56 | if (!ret && oact) { | ||
57 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
58 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
59 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | ||
60 | return -EFAULT; | ||
61 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
62 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
63 | } | ||
64 | |||
65 | return ret; | ||
66 | } | ||
diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c new file mode 100644 index 000000000000..f3d82bb6e15a --- /dev/null +++ b/arch/x86/um/syscalls_64.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Copyright 2003 PathScale, Inc. | ||
4 | * | ||
5 | * Licensed under the GPL | ||
6 | */ | ||
7 | |||
8 | #include "linux/linkage.h" | ||
9 | #include "linux/personality.h" | ||
10 | #include "linux/utsname.h" | ||
11 | #include "asm/prctl.h" /* XXX This should get the constants from libc */ | ||
12 | #include "asm/uaccess.h" | ||
13 | #include "os.h" | ||
14 | |||
15 | long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) | ||
16 | { | ||
17 | unsigned long *ptr = addr, tmp; | ||
18 | long ret; | ||
19 | int pid = task->mm->context.id.u.pid; | ||
20 | |||
21 | /* | ||
22 | * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to | ||
23 | * be safe), we need to call arch_prctl on the host because | ||
24 | * setting %fs may result in something else happening (like a | ||
25 | * GDT or thread.fs being set instead). So, we let the host | ||
26 | * fiddle the registers and thread struct and restore the | ||
27 | * registers afterwards. | ||
28 | * | ||
29 | * So, the saved registers are stored to the process (this | ||
30 | * needed because a stub may have been the last thing to run), | ||
31 | * arch_prctl is run on the host, then the registers are read | ||
32 | * back. | ||
33 | */ | ||
34 | switch (code) { | ||
35 | case ARCH_SET_FS: | ||
36 | case ARCH_SET_GS: | ||
37 | ret = restore_registers(pid, ¤t->thread.regs.regs); | ||
38 | if (ret) | ||
39 | return ret; | ||
40 | break; | ||
41 | case ARCH_GET_FS: | ||
42 | case ARCH_GET_GS: | ||
43 | /* | ||
44 | * With these two, we read to a local pointer and | ||
45 | * put_user it to the userspace pointer that we were | ||
46 | * given. If addr isn't valid (because it hasn't been | ||
47 | * faulted in or is just bogus), we want put_user to | ||
48 | * fault it in (or return -EFAULT) instead of having | ||
49 | * the host return -EFAULT. | ||
50 | */ | ||
51 | ptr = &tmp; | ||
52 | } | ||
53 | |||
54 | ret = os_arch_prctl(pid, code, ptr); | ||
55 | if (ret) | ||
56 | return ret; | ||
57 | |||
58 | switch (code) { | ||
59 | case ARCH_SET_FS: | ||
60 | current->thread.arch.fs = (unsigned long) ptr; | ||
61 | ret = save_registers(pid, ¤t->thread.regs.regs); | ||
62 | break; | ||
63 | case ARCH_SET_GS: | ||
64 | ret = save_registers(pid, ¤t->thread.regs.regs); | ||
65 | break; | ||
66 | case ARCH_GET_FS: | ||
67 | ret = put_user(tmp, addr); | ||
68 | break; | ||
69 | case ARCH_GET_GS: | ||
70 | ret = put_user(tmp, addr); | ||
71 | break; | ||
72 | } | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | long sys_arch_prctl(int code, unsigned long addr) | ||
78 | { | ||
79 | return arch_prctl(current, code, (unsigned long __user *) addr); | ||
80 | } | ||
81 | |||
82 | long sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
83 | void __user *parent_tid, void __user *child_tid) | ||
84 | { | ||
85 | long ret; | ||
86 | |||
87 | if (!newsp) | ||
88 | newsp = UPT_SP(¤t->thread.regs.regs); | ||
89 | current->thread.forking = 1; | ||
90 | ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, | ||
91 | child_tid); | ||
92 | current->thread.forking = 0; | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | void arch_switch_to(struct task_struct *to) | ||
97 | { | ||
98 | if ((to->thread.arch.fs == 0) || (to->mm == NULL)) | ||
99 | return; | ||
100 | |||
101 | arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs); | ||
102 | } | ||
diff --git a/arch/x86/um/sysrq_32.c b/arch/x86/um/sysrq_32.c new file mode 100644 index 000000000000..171b3e9dc867 --- /dev/null +++ b/arch/x86/um/sysrq_32.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/kernel.h" | ||
7 | #include "linux/smp.h" | ||
8 | #include "linux/sched.h" | ||
9 | #include "linux/kallsyms.h" | ||
10 | #include "asm/ptrace.h" | ||
11 | #include "sysrq.h" | ||
12 | |||
13 | /* This is declared by <linux/sched.h> */ | ||
14 | void show_regs(struct pt_regs *regs) | ||
15 | { | ||
16 | printk("\n"); | ||
17 | printk("EIP: %04lx:[<%08lx>] CPU: %d %s", | ||
18 | 0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs), | ||
19 | smp_processor_id(), print_tainted()); | ||
20 | if (PT_REGS_CS(regs) & 3) | ||
21 | printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs), | ||
22 | PT_REGS_SP(regs)); | ||
23 | printk(" EFLAGS: %08lx\n %s\n", PT_REGS_EFLAGS(regs), | ||
24 | print_tainted()); | ||
25 | printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", | ||
26 | PT_REGS_EAX(regs), PT_REGS_EBX(regs), | ||
27 | PT_REGS_ECX(regs), | ||
28 | PT_REGS_EDX(regs)); | ||
29 | printk("ESI: %08lx EDI: %08lx EBP: %08lx", | ||
30 | PT_REGS_ESI(regs), PT_REGS_EDI(regs), | ||
31 | PT_REGS_EBP(regs)); | ||
32 | printk(" DS: %04lx ES: %04lx\n", | ||
33 | 0xffff & PT_REGS_DS(regs), | ||
34 | 0xffff & PT_REGS_ES(regs)); | ||
35 | |||
36 | show_trace(NULL, (unsigned long *) ®s); | ||
37 | } | ||
38 | |||
39 | /* Copied from i386. */ | ||
40 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
41 | { | ||
42 | return p > (void *)tinfo && | ||
43 | p < (void *)tinfo + THREAD_SIZE - 3; | ||
44 | } | ||
45 | |||
46 | /* Adapted from i386 (we also print the address we read from). */ | ||
47 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | ||
48 | unsigned long *stack, unsigned long ebp) | ||
49 | { | ||
50 | unsigned long addr; | ||
51 | |||
52 | #ifdef CONFIG_FRAME_POINTER | ||
53 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | ||
54 | addr = *(unsigned long *)(ebp + 4); | ||
55 | printk("%08lx: [<%08lx>]", ebp + 4, addr); | ||
56 | print_symbol(" %s", addr); | ||
57 | printk("\n"); | ||
58 | ebp = *(unsigned long *)ebp; | ||
59 | } | ||
60 | #else | ||
61 | while (valid_stack_ptr(tinfo, stack)) { | ||
62 | addr = *stack; | ||
63 | if (__kernel_text_address(addr)) { | ||
64 | printk("%08lx: [<%08lx>]", (unsigned long) stack, addr); | ||
65 | print_symbol(" %s", addr); | ||
66 | printk("\n"); | ||
67 | } | ||
68 | stack++; | ||
69 | } | ||
70 | #endif | ||
71 | return ebp; | ||
72 | } | ||
73 | |||
74 | void show_trace(struct task_struct* task, unsigned long * stack) | ||
75 | { | ||
76 | unsigned long ebp; | ||
77 | struct thread_info *context; | ||
78 | |||
79 | /* Turn this into BUG_ON if possible. */ | ||
80 | if (!stack) { | ||
81 | stack = (unsigned long*) &stack; | ||
82 | printk("show_trace: got NULL stack, implicit assumption task == current"); | ||
83 | WARN_ON(1); | ||
84 | } | ||
85 | |||
86 | if (!task) | ||
87 | task = current; | ||
88 | |||
89 | if (task != current) { | ||
90 | ebp = (unsigned long) KSTK_EBP(task); | ||
91 | } else { | ||
92 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); | ||
93 | } | ||
94 | |||
95 | context = (struct thread_info *) | ||
96 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
97 | print_context_stack(context, stack, ebp); | ||
98 | |||
99 | printk("\n"); | ||
100 | } | ||
101 | |||
diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c new file mode 100644 index 000000000000..e8913436d7dc --- /dev/null +++ b/arch/x86/um/sysrq_64.c | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/utsname.h> | ||
11 | #include <asm/current.h> | ||
12 | #include <asm/ptrace.h> | ||
13 | #include "sysrq.h" | ||
14 | |||
15 | void __show_regs(struct pt_regs *regs) | ||
16 | { | ||
17 | printk("\n"); | ||
18 | print_modules(); | ||
19 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current), | ||
20 | current->comm, print_tainted(), init_utsname()->release); | ||
21 | printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff, | ||
22 | PT_REGS_RIP(regs)); | ||
23 | printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_SP(regs), | ||
24 | PT_REGS_EFLAGS(regs)); | ||
25 | printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", | ||
26 | PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs)); | ||
27 | printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", | ||
28 | PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs)); | ||
29 | printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", | ||
30 | PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs)); | ||
31 | printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", | ||
32 | PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs)); | ||
33 | printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", | ||
34 | PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs)); | ||
35 | } | ||
36 | |||
37 | void show_regs(struct pt_regs *regs) | ||
38 | { | ||
39 | __show_regs(regs); | ||
40 | show_trace(current, (unsigned long *) ®s); | ||
41 | } | ||
diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c new file mode 100644 index 000000000000..c6c7131e563b --- /dev/null +++ b/arch/x86/um/tls_32.c | |||
@@ -0,0 +1,396 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/percpu.h" | ||
7 | #include "linux/sched.h" | ||
8 | #include "asm/uaccess.h" | ||
9 | #include "os.h" | ||
10 | #include "skas.h" | ||
11 | #include "sysdep/tls.h" | ||
12 | |||
13 | /* | ||
14 | * If needed we can detect when it's uninitialized. | ||
15 | * | ||
16 | * These are initialized in an initcall and unchanged thereafter. | ||
17 | */ | ||
18 | static int host_supports_tls = -1; | ||
19 | int host_gdt_entry_tls_min; | ||
20 | |||
21 | int do_set_thread_area(struct user_desc *info) | ||
22 | { | ||
23 | int ret; | ||
24 | u32 cpu; | ||
25 | |||
26 | cpu = get_cpu(); | ||
27 | ret = os_set_thread_area(info, userspace_pid[cpu]); | ||
28 | put_cpu(); | ||
29 | |||
30 | if (ret) | ||
31 | printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, " | ||
32 | "index = %d\n", ret, info->entry_number); | ||
33 | |||
34 | return ret; | ||
35 | } | ||
36 | |||
37 | int do_get_thread_area(struct user_desc *info) | ||
38 | { | ||
39 | int ret; | ||
40 | u32 cpu; | ||
41 | |||
42 | cpu = get_cpu(); | ||
43 | ret = os_get_thread_area(info, userspace_pid[cpu]); | ||
44 | put_cpu(); | ||
45 | |||
46 | if (ret) | ||
47 | printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, " | ||
48 | "index = %d\n", ret, info->entry_number); | ||
49 | |||
50 | return ret; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * sys_get_thread_area: get a yet unused TLS descriptor index. | ||
55 | * XXX: Consider leaving one free slot for glibc usage at first place. This must | ||
56 | * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else. | ||
57 | * | ||
58 | * Also, this must be tested when compiling in SKAS mode with dynamic linking | ||
59 | * and running against NPTL. | ||
60 | */ | ||
61 | static int get_free_idx(struct task_struct* task) | ||
62 | { | ||
63 | struct thread_struct *t = &task->thread; | ||
64 | int idx; | ||
65 | |||
66 | if (!t->arch.tls_array) | ||
67 | return GDT_ENTRY_TLS_MIN; | ||
68 | |||
69 | for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) | ||
70 | if (!t->arch.tls_array[idx].present) | ||
71 | return idx + GDT_ENTRY_TLS_MIN; | ||
72 | return -ESRCH; | ||
73 | } | ||
74 | |||
75 | static inline void clear_user_desc(struct user_desc* info) | ||
76 | { | ||
77 | /* Postcondition: LDT_empty(info) returns true. */ | ||
78 | memset(info, 0, sizeof(*info)); | ||
79 | |||
80 | /* | ||
81 | * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain | ||
82 | * indeed an empty user_desc. | ||
83 | */ | ||
84 | info->read_exec_only = 1; | ||
85 | info->seg_not_present = 1; | ||
86 | } | ||
87 | |||
88 | #define O_FORCE 1 | ||
89 | |||
90 | static int load_TLS(int flags, struct task_struct *to) | ||
91 | { | ||
92 | int ret = 0; | ||
93 | int idx; | ||
94 | |||
95 | for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) { | ||
96 | struct uml_tls_struct* curr = | ||
97 | &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; | ||
98 | |||
99 | /* | ||
100 | * Actually, now if it wasn't flushed it gets cleared and | ||
101 | * flushed to the host, which will clear it. | ||
102 | */ | ||
103 | if (!curr->present) { | ||
104 | if (!curr->flushed) { | ||
105 | clear_user_desc(&curr->tls); | ||
106 | curr->tls.entry_number = idx; | ||
107 | } else { | ||
108 | WARN_ON(!LDT_empty(&curr->tls)); | ||
109 | continue; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | if (!(flags & O_FORCE) && curr->flushed) | ||
114 | continue; | ||
115 | |||
116 | ret = do_set_thread_area(&curr->tls); | ||
117 | if (ret) | ||
118 | goto out; | ||
119 | |||
120 | curr->flushed = 1; | ||
121 | } | ||
122 | out: | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Verify if we need to do a flush for the new process, i.e. if there are any | ||
128 | * present desc's, only if they haven't been flushed. | ||
129 | */ | ||
130 | static inline int needs_TLS_update(struct task_struct *task) | ||
131 | { | ||
132 | int i; | ||
133 | int ret = 0; | ||
134 | |||
135 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | ||
136 | struct uml_tls_struct* curr = | ||
137 | &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
138 | |||
139 | /* | ||
140 | * Can't test curr->present, we may need to clear a descriptor | ||
141 | * which had a value. | ||
142 | */ | ||
143 | if (curr->flushed) | ||
144 | continue; | ||
145 | ret = 1; | ||
146 | break; | ||
147 | } | ||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * On a newly forked process, the TLS descriptors haven't yet been flushed. So | ||
153 | * we mark them as such and the first switch_to will do the job. | ||
154 | */ | ||
155 | void clear_flushed_tls(struct task_struct *task) | ||
156 | { | ||
157 | int i; | ||
158 | |||
159 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | ||
160 | struct uml_tls_struct* curr = | ||
161 | &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
162 | |||
163 | /* | ||
164 | * Still correct to do this, if it wasn't present on the host it | ||
165 | * will remain as flushed as it was. | ||
166 | */ | ||
167 | if (!curr->present) | ||
168 | continue; | ||
169 | |||
170 | curr->flushed = 0; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a | ||
176 | * common host process. So this is needed in SKAS0 too. | ||
177 | * | ||
178 | * However, if each thread had a different host process (and this was discussed | ||
179 | * for SMP support) this won't be needed. | ||
180 | * | ||
181 | * And this will not need be used when (and if) we'll add support to the host | ||
182 | * SKAS patch. | ||
183 | */ | ||
184 | |||
185 | int arch_switch_tls(struct task_struct *to) | ||
186 | { | ||
187 | if (!host_supports_tls) | ||
188 | return 0; | ||
189 | |||
190 | /* | ||
191 | * We have no need whatsoever to switch TLS for kernel threads; beyond | ||
192 | * that, that would also result in us calling os_set_thread_area with | ||
193 | * userspace_pid[cpu] == 0, which gives an error. | ||
194 | */ | ||
195 | if (likely(to->mm)) | ||
196 | return load_TLS(O_FORCE, to); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static int set_tls_entry(struct task_struct* task, struct user_desc *info, | ||
202 | int idx, int flushed) | ||
203 | { | ||
204 | struct thread_struct *t = &task->thread; | ||
205 | |||
206 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
207 | return -EINVAL; | ||
208 | |||
209 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info; | ||
210 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1; | ||
211 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed; | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | int arch_copy_tls(struct task_struct *new) | ||
217 | { | ||
218 | struct user_desc info; | ||
219 | int idx, ret = -EFAULT; | ||
220 | |||
221 | if (copy_from_user(&info, | ||
222 | (void __user *) UPT_ESI(&new->thread.regs.regs), | ||
223 | sizeof(info))) | ||
224 | goto out; | ||
225 | |||
226 | ret = -EINVAL; | ||
227 | if (LDT_empty(&info)) | ||
228 | goto out; | ||
229 | |||
230 | idx = info.entry_number; | ||
231 | |||
232 | ret = set_tls_entry(new, &info, idx, 0); | ||
233 | out: | ||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */ | ||
238 | static int get_tls_entry(struct task_struct *task, struct user_desc *info, | ||
239 | int idx) | ||
240 | { | ||
241 | struct thread_struct *t = &task->thread; | ||
242 | |||
243 | if (!t->arch.tls_array) | ||
244 | goto clear; | ||
245 | |||
246 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
247 | return -EINVAL; | ||
248 | |||
249 | if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present) | ||
250 | goto clear; | ||
251 | |||
252 | *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; | ||
253 | |||
254 | out: | ||
255 | /* | ||
256 | * Temporary debugging check, to make sure that things have been | ||
257 | * flushed. This could be triggered if load_TLS() failed. | ||
258 | */ | ||
259 | if (unlikely(task == current && | ||
260 | !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { | ||
261 | printk(KERN_ERR "get_tls_entry: task with pid %d got here " | ||
262 | "without flushed TLS.", current->pid); | ||
263 | } | ||
264 | |||
265 | return 0; | ||
266 | clear: | ||
267 | /* | ||
268 | * When the TLS entry has not been set, the values read to user in the | ||
269 | * tls_array are 0 (because it's cleared at boot, see | ||
270 | * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that. | ||
271 | */ | ||
272 | clear_user_desc(info); | ||
273 | info->entry_number = idx; | ||
274 | goto out; | ||
275 | } | ||
276 | |||
277 | int sys_set_thread_area(struct user_desc __user *user_desc) | ||
278 | { | ||
279 | struct user_desc info; | ||
280 | int idx, ret; | ||
281 | |||
282 | if (!host_supports_tls) | ||
283 | return -ENOSYS; | ||
284 | |||
285 | if (copy_from_user(&info, user_desc, sizeof(info))) | ||
286 | return -EFAULT; | ||
287 | |||
288 | idx = info.entry_number; | ||
289 | |||
290 | if (idx == -1) { | ||
291 | idx = get_free_idx(current); | ||
292 | if (idx < 0) | ||
293 | return idx; | ||
294 | info.entry_number = idx; | ||
295 | /* Tell the user which slot we chose for him.*/ | ||
296 | if (put_user(idx, &user_desc->entry_number)) | ||
297 | return -EFAULT; | ||
298 | } | ||
299 | |||
300 | ret = do_set_thread_area(&info); | ||
301 | if (ret) | ||
302 | return ret; | ||
303 | return set_tls_entry(current, &info, idx, 1); | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * Perform set_thread_area on behalf of the traced child. | ||
308 | * Note: error handling is not done on the deferred load, and this differ from | ||
309 | * i386. However the only possible error are caused by bugs. | ||
310 | */ | ||
311 | int ptrace_set_thread_area(struct task_struct *child, int idx, | ||
312 | struct user_desc __user *user_desc) | ||
313 | { | ||
314 | struct user_desc info; | ||
315 | |||
316 | if (!host_supports_tls) | ||
317 | return -EIO; | ||
318 | |||
319 | if (copy_from_user(&info, user_desc, sizeof(info))) | ||
320 | return -EFAULT; | ||
321 | |||
322 | return set_tls_entry(child, &info, idx, 0); | ||
323 | } | ||
324 | |||
325 | int sys_get_thread_area(struct user_desc __user *user_desc) | ||
326 | { | ||
327 | struct user_desc info; | ||
328 | int idx, ret; | ||
329 | |||
330 | if (!host_supports_tls) | ||
331 | return -ENOSYS; | ||
332 | |||
333 | if (get_user(idx, &user_desc->entry_number)) | ||
334 | return -EFAULT; | ||
335 | |||
336 | ret = get_tls_entry(current, &info, idx); | ||
337 | if (ret < 0) | ||
338 | goto out; | ||
339 | |||
340 | if (copy_to_user(user_desc, &info, sizeof(info))) | ||
341 | ret = -EFAULT; | ||
342 | |||
343 | out: | ||
344 | return ret; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Perform get_thread_area on behalf of the traced child. | ||
349 | */ | ||
350 | int ptrace_get_thread_area(struct task_struct *child, int idx, | ||
351 | struct user_desc __user *user_desc) | ||
352 | { | ||
353 | struct user_desc info; | ||
354 | int ret; | ||
355 | |||
356 | if (!host_supports_tls) | ||
357 | return -EIO; | ||
358 | |||
359 | ret = get_tls_entry(child, &info, idx); | ||
360 | if (ret < 0) | ||
361 | goto out; | ||
362 | |||
363 | if (copy_to_user(user_desc, &info, sizeof(info))) | ||
364 | ret = -EFAULT; | ||
365 | out: | ||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * This code is really i386-only, but it detects and logs x86_64 GDT indexes | ||
371 | * if a 32-bit UML is running on a 64-bit host. | ||
372 | */ | ||
373 | static int __init __setup_host_supports_tls(void) | ||
374 | { | ||
375 | check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min); | ||
376 | if (host_supports_tls) { | ||
377 | printk(KERN_INFO "Host TLS support detected\n"); | ||
378 | printk(KERN_INFO "Detected host type: "); | ||
379 | switch (host_gdt_entry_tls_min) { | ||
380 | case GDT_ENTRY_TLS_MIN_I386: | ||
381 | printk(KERN_CONT "i386"); | ||
382 | break; | ||
383 | case GDT_ENTRY_TLS_MIN_X86_64: | ||
384 | printk(KERN_CONT "x86_64"); | ||
385 | break; | ||
386 | } | ||
387 | printk(KERN_CONT " (GDT indexes %d to %d)\n", | ||
388 | host_gdt_entry_tls_min, | ||
389 | host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES); | ||
390 | } else | ||
391 | printk(KERN_ERR " Host TLS support NOT detected! " | ||
392 | "TLS support inside UML will not work\n"); | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | __initcall(__setup_host_supports_tls); | ||
diff --git a/arch/x86/um/tls_64.c b/arch/x86/um/tls_64.c new file mode 100644 index 000000000000..f7ba46200ecd --- /dev/null +++ b/arch/x86/um/tls_64.c | |||
@@ -0,0 +1,17 @@ | |||
1 | #include "linux/sched.h" | ||
2 | |||
3 | void clear_flushed_tls(struct task_struct *task) | ||
4 | { | ||
5 | } | ||
6 | |||
7 | int arch_copy_tls(struct task_struct *t) | ||
8 | { | ||
9 | /* | ||
10 | * If CLONE_SETTLS is set, we need to save the thread id | ||
11 | * (which is argument 5, child_tid, of clone) so it can be set | ||
12 | * during context switches. | ||
13 | */ | ||
14 | t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)]; | ||
15 | |||
16 | return 0; | ||
17 | } | ||
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c new file mode 100644 index 000000000000..ca49be8ddd0c --- /dev/null +++ b/arch/x86/um/user-offsets.c | |||
@@ -0,0 +1,80 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <stddef.h> | ||
3 | #include <signal.h> | ||
4 | #include <sys/poll.h> | ||
5 | #include <sys/mman.h> | ||
6 | #include <sys/user.h> | ||
7 | #define __FRAME_OFFSETS | ||
8 | #include <asm/ptrace.h> | ||
9 | #include <asm/types.h> | ||
10 | |||
11 | #define DEFINE(sym, val) \ | ||
12 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
13 | |||
14 | #define DEFINE_LONGS(sym, val) \ | ||
15 | asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) | ||
16 | |||
17 | void foo(void) | ||
18 | { | ||
19 | #ifdef __i386__ | ||
20 | DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct)); | ||
21 | DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct)); | ||
22 | |||
23 | DEFINE(HOST_IP, EIP); | ||
24 | DEFINE(HOST_SP, UESP); | ||
25 | DEFINE(HOST_EFLAGS, EFL); | ||
26 | DEFINE(HOST_AX, EAX); | ||
27 | DEFINE(HOST_BX, EBX); | ||
28 | DEFINE(HOST_CX, ECX); | ||
29 | DEFINE(HOST_DX, EDX); | ||
30 | DEFINE(HOST_SI, ESI); | ||
31 | DEFINE(HOST_DI, EDI); | ||
32 | DEFINE(HOST_BP, EBP); | ||
33 | DEFINE(HOST_CS, CS); | ||
34 | DEFINE(HOST_SS, SS); | ||
35 | DEFINE(HOST_DS, DS); | ||
36 | DEFINE(HOST_FS, FS); | ||
37 | DEFINE(HOST_ES, ES); | ||
38 | DEFINE(HOST_GS, GS); | ||
39 | DEFINE(HOST_ORIG_AX, ORIG_EAX); | ||
40 | #else | ||
41 | DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long)); | ||
42 | DEFINE_LONGS(HOST_BX, RBX); | ||
43 | DEFINE_LONGS(HOST_CX, RCX); | ||
44 | DEFINE_LONGS(HOST_DI, RDI); | ||
45 | DEFINE_LONGS(HOST_SI, RSI); | ||
46 | DEFINE_LONGS(HOST_DX, RDX); | ||
47 | DEFINE_LONGS(HOST_BP, RBP); | ||
48 | DEFINE_LONGS(HOST_AX, RAX); | ||
49 | DEFINE_LONGS(HOST_R8, R8); | ||
50 | DEFINE_LONGS(HOST_R9, R9); | ||
51 | DEFINE_LONGS(HOST_R10, R10); | ||
52 | DEFINE_LONGS(HOST_R11, R11); | ||
53 | DEFINE_LONGS(HOST_R12, R12); | ||
54 | DEFINE_LONGS(HOST_R13, R13); | ||
55 | DEFINE_LONGS(HOST_R14, R14); | ||
56 | DEFINE_LONGS(HOST_R15, R15); | ||
57 | DEFINE_LONGS(HOST_ORIG_AX, ORIG_RAX); | ||
58 | DEFINE_LONGS(HOST_CS, CS); | ||
59 | DEFINE_LONGS(HOST_SS, SS); | ||
60 | DEFINE_LONGS(HOST_EFLAGS, EFLAGS); | ||
61 | #if 0 | ||
62 | DEFINE_LONGS(HOST_FS, FS); | ||
63 | DEFINE_LONGS(HOST_GS, GS); | ||
64 | DEFINE_LONGS(HOST_DS, DS); | ||
65 | DEFINE_LONGS(HOST_ES, ES); | ||
66 | #endif | ||
67 | |||
68 | DEFINE_LONGS(HOST_IP, RIP); | ||
69 | DEFINE_LONGS(HOST_SP, RSP); | ||
70 | #endif | ||
71 | |||
72 | DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); | ||
73 | DEFINE(UM_POLLIN, POLLIN); | ||
74 | DEFINE(UM_POLLPRI, POLLPRI); | ||
75 | DEFINE(UM_POLLOUT, POLLOUT); | ||
76 | |||
77 | DEFINE(UM_PROT_READ, PROT_READ); | ||
78 | DEFINE(UM_PROT_WRITE, PROT_WRITE); | ||
79 | DEFINE(UM_PROT_EXEC, PROT_EXEC); | ||
80 | } | ||
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile new file mode 100644 index 000000000000..6c803ca49b5d --- /dev/null +++ b/arch/x86/um/vdso/Makefile | |||
@@ -0,0 +1,90 @@ | |||
1 | # | ||
2 | # Building vDSO images for x86. | ||
3 | # | ||
4 | |||
5 | VDSO64-y := y | ||
6 | |||
7 | vdso-install-$(VDSO64-y) += vdso.so | ||
8 | |||
9 | |||
10 | # files to link into the vdso | ||
11 | vobjs-y := vdso-note.o um_vdso.o | ||
12 | |||
13 | # files to link into kernel | ||
14 | obj-$(VDSO64-y) += vdso.o vma.o | ||
15 | |||
16 | vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) | ||
17 | |||
18 | $(obj)/vdso.o: $(obj)/vdso.so | ||
19 | |||
20 | targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) | ||
21 | |||
22 | export CPPFLAGS_vdso.lds += -P -C | ||
23 | |||
24 | VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ | ||
25 | -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 | ||
26 | |||
27 | $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so | ||
28 | |||
29 | $(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE | ||
30 | $(call if_changed,vdso) | ||
31 | |||
32 | $(obj)/%.so: OBJCOPYFLAGS := -S | ||
33 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | ||
34 | $(call if_changed,objcopy) | ||
35 | |||
36 | # | ||
37 | # Don't omit frame pointers for ease of userspace debugging, but do | ||
38 | # optimize sibling calls. | ||
39 | # | ||
40 | CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ | ||
41 | $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ | ||
42 | -fno-omit-frame-pointer -foptimize-sibling-calls | ||
43 | |||
44 | $(vobjs): KBUILD_CFLAGS += $(CFL) | ||
45 | |||
46 | # | ||
47 | # vDSO code runs in userspace and -pg doesn't help with profiling anyway. | ||
48 | # | ||
49 | CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-arcs -ftest-coverage | ||
50 | CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-arcs -ftest-coverage | ||
51 | |||
52 | targets += vdso-syms.lds | ||
53 | obj-$(VDSO64-y) += vdso-syms.lds | ||
54 | |||
55 | # | ||
56 | # Match symbols in the DSO that look like VDSO*; produce a file of constants. | ||
57 | # | ||
58 | sed-vdsosym := -e 's/^00*/0/' \ | ||
59 | -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p' | ||
60 | quiet_cmd_vdsosym = VDSOSYM $@ | ||
61 | define cmd_vdsosym | ||
62 | $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@ | ||
63 | endef | ||
64 | |||
65 | $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE | ||
66 | $(call if_changed,vdsosym) | ||
67 | |||
68 | # | ||
69 | # The DSO images are built using a special linker script. | ||
70 | # | ||
71 | quiet_cmd_vdso = VDSO $@ | ||
72 | cmd_vdso = $(CC) -nostdlib -o $@ \ | ||
73 | $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ | ||
74 | -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ | ||
75 | sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' | ||
76 | |||
77 | VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | ||
78 | GCOV_PROFILE := n | ||
79 | |||
80 | # | ||
81 | # Install the unstripped copy of vdso*.so listed in $(vdso-install-y). | ||
82 | # | ||
83 | quiet_cmd_vdso_install = INSTALL $@ | ||
84 | cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ | ||
85 | $(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE | ||
86 | @mkdir -p $(MODLIB)/vdso | ||
87 | $(call cmd,vdso_install) | ||
88 | |||
89 | PHONY += vdso_install $(vdso-install-y) | ||
90 | vdso_install: $(vdso-install-y) | ||
diff --git a/arch/x86/um/vdso/checkundef.sh b/arch/x86/um/vdso/checkundef.sh new file mode 100644 index 000000000000..7ee90a9b549d --- /dev/null +++ b/arch/x86/um/vdso/checkundef.sh | |||
@@ -0,0 +1,10 @@ | |||
1 | #!/bin/sh | ||
2 | nm="$1" | ||
3 | file="$2" | ||
4 | $nm "$file" | grep '^ *U' > /dev/null 2>&1 | ||
5 | if [ $? -eq 1 ]; then | ||
6 | exit 0 | ||
7 | else | ||
8 | echo "$file: undefined symbols found" >&2 | ||
9 | exit 1 | ||
10 | fi | ||
diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c new file mode 100644 index 000000000000..7c441b59d375 --- /dev/null +++ b/arch/x86/um/vdso/um_vdso.c | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This vDSO turns all calls into a syscall so that UML can trap them. | ||
9 | */ | ||
10 | |||
11 | |||
12 | /* Disable profiling for userspace code */ | ||
13 | #define DISABLE_BRANCH_PROFILING | ||
14 | |||
15 | #include <linux/time.h> | ||
16 | #include <linux/getcpu.h> | ||
17 | #include <asm/unistd.h> | ||
18 | |||
19 | int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) | ||
20 | { | ||
21 | long ret; | ||
22 | |||
23 | asm("syscall" : "=a" (ret) : | ||
24 | "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); | ||
25 | |||
26 | return ret; | ||
27 | } | ||
28 | int clock_gettime(clockid_t, struct timespec *) | ||
29 | __attribute__((weak, alias("__vdso_clock_gettime"))); | ||
30 | |||
31 | int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | ||
32 | { | ||
33 | long ret; | ||
34 | |||
35 | asm("syscall" : "=a" (ret) : | ||
36 | "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); | ||
37 | |||
38 | return ret; | ||
39 | } | ||
40 | int gettimeofday(struct timeval *, struct timezone *) | ||
41 | __attribute__((weak, alias("__vdso_gettimeofday"))); | ||
42 | |||
43 | time_t __vdso_time(time_t *t) | ||
44 | { | ||
45 | long secs; | ||
46 | |||
47 | asm volatile("syscall" | ||
48 | : "=a" (secs) | ||
49 | : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory"); | ||
50 | |||
51 | return secs; | ||
52 | } | ||
53 | int time(time_t *t) __attribute__((weak, alias("__vdso_time"))); | ||
54 | |||
55 | long | ||
56 | __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) | ||
57 | { | ||
58 | /* | ||
59 | * UML does not support SMP, we can cheat here. :) | ||
60 | */ | ||
61 | |||
62 | if (cpu) | ||
63 | *cpu = 0; | ||
64 | if (node) | ||
65 | *node = 0; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | ||
71 | __attribute__((weak, alias("__vdso_getcpu"))); | ||
diff --git a/arch/x86/um/vdso/vdso-layout.lds.S b/arch/x86/um/vdso/vdso-layout.lds.S new file mode 100644 index 000000000000..634a2cf62046 --- /dev/null +++ b/arch/x86/um/vdso/vdso-layout.lds.S | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Linker script for vDSO. This is an ELF shared object prelinked to | ||
3 | * its virtual address, and with only one read-only segment. | ||
4 | * This script controls its layout. | ||
5 | */ | ||
6 | |||
7 | SECTIONS | ||
8 | { | ||
9 | . = VDSO_PRELINK + SIZEOF_HEADERS; | ||
10 | |||
11 | .hash : { *(.hash) } :text | ||
12 | .gnu.hash : { *(.gnu.hash) } | ||
13 | .dynsym : { *(.dynsym) } | ||
14 | .dynstr : { *(.dynstr) } | ||
15 | .gnu.version : { *(.gnu.version) } | ||
16 | .gnu.version_d : { *(.gnu.version_d) } | ||
17 | .gnu.version_r : { *(.gnu.version_r) } | ||
18 | |||
19 | .note : { *(.note.*) } :text :note | ||
20 | |||
21 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
22 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
23 | |||
24 | .dynamic : { *(.dynamic) } :text :dynamic | ||
25 | |||
26 | .rodata : { *(.rodata*) } :text | ||
27 | .data : { | ||
28 | *(.data*) | ||
29 | *(.sdata*) | ||
30 | *(.got.plt) *(.got) | ||
31 | *(.gnu.linkonce.d.*) | ||
32 | *(.bss*) | ||
33 | *(.dynbss*) | ||
34 | *(.gnu.linkonce.b.*) | ||
35 | } | ||
36 | |||
37 | .altinstructions : { *(.altinstructions) } | ||
38 | .altinstr_replacement : { *(.altinstr_replacement) } | ||
39 | |||
40 | /* | ||
41 | * Align the actual code well away from the non-instruction data. | ||
42 | * This is the best thing for the I-cache. | ||
43 | */ | ||
44 | . = ALIGN(0x100); | ||
45 | |||
46 | .text : { *(.text*) } :text =0x90909090 | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Very old versions of ld do not recognize this name token; use the constant. | ||
51 | */ | ||
52 | #define PT_GNU_EH_FRAME 0x6474e550 | ||
53 | |||
54 | /* | ||
55 | * We must supply the ELF program headers explicitly to get just one | ||
56 | * PT_LOAD segment, and set the flags explicitly to make segments read-only. | ||
57 | */ | ||
58 | PHDRS | ||
59 | { | ||
60 | text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ | ||
61 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
62 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
63 | eh_frame_hdr PT_GNU_EH_FRAME; | ||
64 | } | ||
diff --git a/arch/x86/um/vdso/vdso-note.S b/arch/x86/um/vdso/vdso-note.S new file mode 100644 index 000000000000..79a071e4357e --- /dev/null +++ b/arch/x86/um/vdso/vdso-note.S | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. | ||
3 | * Here we can supply some information useful to userland. | ||
4 | */ | ||
5 | |||
6 | #include <linux/uts.h> | ||
7 | #include <linux/version.h> | ||
8 | #include <linux/elfnote.h> | ||
9 | |||
10 | ELFNOTE_START(Linux, 0, "a") | ||
11 | .long LINUX_VERSION_CODE | ||
12 | ELFNOTE_END | ||
diff --git a/arch/x86/um/vdso/vdso.S b/arch/x86/um/vdso/vdso.S new file mode 100644 index 000000000000..1cb468adacbb --- /dev/null +++ b/arch/x86/um/vdso/vdso.S | |||
@@ -0,0 +1,10 @@ | |||
1 | #include <linux/init.h> | ||
2 | |||
3 | __INITDATA | ||
4 | |||
5 | .globl vdso_start, vdso_end | ||
6 | vdso_start: | ||
7 | .incbin "arch/x86/um/vdso/vdso.so" | ||
8 | vdso_end: | ||
9 | |||
10 | __FINIT | ||
diff --git a/arch/x86/um/vdso/vdso.lds.S b/arch/x86/um/vdso/vdso.lds.S new file mode 100644 index 000000000000..b96b2677cad8 --- /dev/null +++ b/arch/x86/um/vdso/vdso.lds.S | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Linker script for 64-bit vDSO. | ||
3 | * We #include the file to define the layout details. | ||
4 | * Here we only choose the prelinked virtual address. | ||
5 | * | ||
6 | * This file defines the version script giving the user-exported symbols in | ||
7 | * the DSO. We can define local symbols here called VDSO* to make their | ||
8 | * values visible using the asm-x86/vdso.h macros from the kernel proper. | ||
9 | */ | ||
10 | |||
11 | #define VDSO_PRELINK 0xffffffffff700000 | ||
12 | #include "vdso-layout.lds.S" | ||
13 | |||
14 | /* | ||
15 | * This controls what userland symbols we export from the vDSO. | ||
16 | */ | ||
17 | VERSION { | ||
18 | LINUX_2.6 { | ||
19 | global: | ||
20 | clock_gettime; | ||
21 | __vdso_clock_gettime; | ||
22 | gettimeofday; | ||
23 | __vdso_gettimeofday; | ||
24 | getcpu; | ||
25 | __vdso_getcpu; | ||
26 | time; | ||
27 | __vdso_time; | ||
28 | local: *; | ||
29 | }; | ||
30 | } | ||
31 | |||
32 | VDSO64_PRELINK = VDSO_PRELINK; | ||
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c new file mode 100644 index 000000000000..91f4ec9a0a56 --- /dev/null +++ b/arch/x86/um/vdso/vma.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/slab.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <linux/init.h> | ||
14 | |||
15 | unsigned int __read_mostly vdso_enabled = 1; | ||
16 | unsigned long um_vdso_addr; | ||
17 | |||
18 | extern unsigned long task_size; | ||
19 | extern char vdso_start[], vdso_end[]; | ||
20 | |||
21 | static struct page **vdsop; | ||
22 | |||
23 | static int __init init_vdso(void) | ||
24 | { | ||
25 | struct page *um_vdso; | ||
26 | |||
27 | BUG_ON(vdso_end - vdso_start > PAGE_SIZE); | ||
28 | |||
29 | um_vdso_addr = task_size - PAGE_SIZE; | ||
30 | |||
31 | vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL); | ||
32 | if (!vdsop) | ||
33 | goto oom; | ||
34 | |||
35 | um_vdso = alloc_page(GFP_KERNEL); | ||
36 | if (!um_vdso) { | ||
37 | kfree(vdsop); | ||
38 | |||
39 | goto oom; | ||
40 | } | ||
41 | |||
42 | copy_page(page_address(um_vdso), vdso_start); | ||
43 | *vdsop = um_vdso; | ||
44 | |||
45 | return 0; | ||
46 | |||
47 | oom: | ||
48 | printk(KERN_ERR "Cannot allocate vdso\n"); | ||
49 | vdso_enabled = 0; | ||
50 | |||
51 | return -ENOMEM; | ||
52 | } | ||
53 | subsys_initcall(init_vdso); | ||
54 | |||
55 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
56 | { | ||
57 | int err; | ||
58 | struct mm_struct *mm = current->mm; | ||
59 | |||
60 | if (!vdso_enabled) | ||
61 | return 0; | ||
62 | |||
63 | down_write(&mm->mmap_sem); | ||
64 | |||
65 | err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, | ||
66 | VM_READ|VM_EXEC| | ||
67 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
68 | VM_ALWAYSDUMP, | ||
69 | vdsop); | ||
70 | |||
71 | up_write(&mm->mmap_sem); | ||
72 | |||
73 | return err; | ||
74 | } | ||