aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/sys-x86
diff options
context:
space:
mode:
authorAl Viro <viro@ftp.linux.org.uk>2011-08-18 15:06:39 -0400
committerRichard Weinberger <richard@nod.at>2011-11-02 09:15:05 -0400
commit5c48b108ecbf6505d929e64d50dace13ac2bdf34 (patch)
tree016904f84fbe05aa301c5cdfe712d90f6bb828fe /arch/um/sys-x86
parent7bbe7204e93734fe79d8aac3e08a7cb4624b5004 (diff)
um: take arch/um/sys-x86 to arch/x86/um
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um/sys-x86')
-rw-r--r--arch/um/sys-x86/Makefile45
-rw-r--r--arch/um/sys-x86/asm/arch_hweight.h6
-rw-r--r--arch/um/sys-x86/asm/archparam.h20
-rw-r--r--arch/um/sys-x86/asm/checksum.h10
-rw-r--r--arch/um/sys-x86/asm/checksum_32.h201
-rw-r--r--arch/um/sys-x86/asm/checksum_64.h144
-rw-r--r--arch/um/sys-x86/asm/elf.h221
-rw-r--r--arch/um/sys-x86/asm/module.h23
-rw-r--r--arch/um/sys-x86/asm/processor.h15
-rw-r--r--arch/um/sys-x86/asm/processor_32.h73
-rw-r--r--arch/um/sys-x86/asm/processor_64.h51
-rw-r--r--arch/um/sys-x86/asm/ptrace.h5
-rw-r--r--arch/um/sys-x86/asm/ptrace_32.h51
-rw-r--r--arch/um/sys-x86/asm/ptrace_64.h72
-rw-r--r--arch/um/sys-x86/asm/system.h133
-rw-r--r--arch/um/sys-x86/asm/vm-flags.h25
-rw-r--r--arch/um/sys-x86/bug.c21
-rw-r--r--arch/um/sys-x86/bugs_32.c74
-rw-r--r--arch/um/sys-x86/bugs_64.c15
-rw-r--r--arch/um/sys-x86/checksum_32.S458
-rw-r--r--arch/um/sys-x86/delay_32.c60
-rw-r--r--arch/um/sys-x86/delay_64.c60
-rw-r--r--arch/um/sys-x86/elfcore.c83
-rw-r--r--arch/um/sys-x86/fault.c28
-rw-r--r--arch/um/sys-x86/ksyms.c13
-rw-r--r--arch/um/sys-x86/ldt.c502
-rw-r--r--arch/um/sys-x86/mem_32.c62
-rw-r--r--arch/um/sys-x86/mem_64.c26
-rw-r--r--arch/um/sys-x86/ptrace_32.c273
-rw-r--r--arch/um/sys-x86/ptrace_64.c271
-rw-r--r--arch/um/sys-x86/ptrace_user.c21
-rw-r--r--arch/um/sys-x86/setjmp_32.S58
-rw-r--r--arch/um/sys-x86/setjmp_64.S54
-rw-r--r--arch/um/sys-x86/shared/sysdep/archsetjmp.h5
-rw-r--r--arch/um/sys-x86/shared/sysdep/archsetjmp_32.h22
-rw-r--r--arch/um/sys-x86/shared/sysdep/archsetjmp_64.h24
-rw-r--r--arch/um/sys-x86/shared/sysdep/faultinfo.h5
-rw-r--r--arch/um/sys-x86/shared/sysdep/faultinfo_32.h35
-rw-r--r--arch/um/sys-x86/shared/sysdep/faultinfo_64.h35
-rw-r--r--arch/um/sys-x86/shared/sysdep/host_ldt.h5
-rw-r--r--arch/um/sys-x86/shared/sysdep/host_ldt_32.h34
-rw-r--r--arch/um/sys-x86/shared/sysdep/host_ldt_64.h38
-rw-r--r--arch/um/sys-x86/shared/sysdep/kernel-offsets.h21
-rw-r--r--arch/um/sys-x86/shared/sysdep/mcontext.h31
-rw-r--r--arch/um/sys-x86/shared/sysdep/ptrace.h5
-rw-r--r--arch/um/sys-x86/shared/sysdep/ptrace_32.h117
-rw-r--r--arch/um/sys-x86/shared/sysdep/ptrace_64.h160
-rw-r--r--arch/um/sys-x86/shared/sysdep/ptrace_user.h5
-rw-r--r--arch/um/sys-x86/shared/sysdep/ptrace_user_32.h26
-rw-r--r--arch/um/sys-x86/shared/sysdep/ptrace_user_64.h38
-rw-r--r--arch/um/sys-x86/shared/sysdep/skas_ptrace.h22
-rw-r--r--arch/um/sys-x86/shared/sysdep/stub.h14
-rw-r--r--arch/um/sys-x86/shared/sysdep/stub_32.h93
-rw-r--r--arch/um/sys-x86/shared/sysdep/stub_64.h99
-rw-r--r--arch/um/sys-x86/shared/sysdep/syscalls.h5
-rw-r--r--arch/um/sys-x86/shared/sysdep/syscalls_32.h20
-rw-r--r--arch/um/sys-x86/shared/sysdep/syscalls_64.h32
-rw-r--r--arch/um/sys-x86/shared/sysdep/tls.h5
-rw-r--r--arch/um/sys-x86/shared/sysdep/tls_32.h32
-rw-r--r--arch/um/sys-x86/shared/sysdep/tls_64.h29
-rw-r--r--arch/um/sys-x86/signal_32.c498
-rw-r--r--arch/um/sys-x86/signal_64.c255
-rw-r--r--arch/um/sys-x86/stub_32.S51
-rw-r--r--arch/um/sys-x86/stub_64.S66
-rw-r--r--arch/um/sys-x86/stub_segv.c19
-rw-r--r--arch/um/sys-x86/sys_call_table_32.S28
-rw-r--r--arch/um/sys-x86/sys_call_table_64.c64
-rw-r--r--arch/um/sys-x86/syscalls_32.c66
-rw-r--r--arch/um/sys-x86/syscalls_64.c102
-rw-r--r--arch/um/sys-x86/sysrq_32.c101
-rw-r--r--arch/um/sys-x86/sysrq_64.c41
-rw-r--r--arch/um/sys-x86/tls_32.c396
-rw-r--r--arch/um/sys-x86/tls_64.c17
-rw-r--r--arch/um/sys-x86/user-offsets.c79
-rw-r--r--arch/um/sys-x86/vdso/Makefile90
-rw-r--r--arch/um/sys-x86/vdso/checkundef.sh10
-rw-r--r--arch/um/sys-x86/vdso/um_vdso.c71
-rw-r--r--arch/um/sys-x86/vdso/vdso-layout.lds.S64
-rw-r--r--arch/um/sys-x86/vdso/vdso-note.S12
-rw-r--r--arch/um/sys-x86/vdso/vdso.S10
-rw-r--r--arch/um/sys-x86/vdso/vdso.lds.S32
-rw-r--r--arch/um/sys-x86/vdso/vma.c74
82 files changed, 0 insertions, 6277 deletions
diff --git a/arch/um/sys-x86/Makefile b/arch/um/sys-x86/Makefile
deleted file mode 100644
index 81ab3484a430..000000000000
--- a/arch/um/sys-x86/Makefile
+++ /dev/null
@@ -1,45 +0,0 @@
1#
2# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3#
4
5ifeq ($(CONFIG_X86_32),y)
6 BITS := 32
7else
8 BITS := 64
9endif
10
11obj-y = bug.o bugs_$(BITS).o delay_$(BITS).o fault.o ksyms.o ldt.o \
12 ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal_$(BITS).o \
13 stub_$(BITS).o stub_segv.o syscalls_$(BITS).o \
14 sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o mem_$(BITS).o
15
16ifeq ($(CONFIG_X86_32),y)
17
18obj-y += checksum_32.o
19obj-$(CONFIG_BINFMT_ELF) += elfcore.o
20
21subarch-obj-y = lib/string_32.o lib/atomic64_32.o lib/atomic64_cx8_32.o
22subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o
23subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
24subarch-obj-$(CONFIG_MODULES) += kernel/module.o
25
26else
27
28obj-y += vdso/
29
30subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
31 lib/rwsem.o
32
33endif
34
35subarch-obj-$(CONFIG_MODULES) += kernel/module.o
36
37USER_OBJS := bugs_$(BITS).o ptrace_user.o fault.o
38
39extra-y += user-offsets.s
40$(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS)
41
42UNPROFILE_OBJS := stub_segv.o
43CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
44
45include arch/um/scripts/Makefile.rules
diff --git a/arch/um/sys-x86/asm/arch_hweight.h b/arch/um/sys-x86/asm/arch_hweight.h
deleted file mode 100644
index c656cf443f4a..000000000000
--- a/arch/um/sys-x86/asm/arch_hweight.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_UM_HWEIGHT_H
2#define _ASM_UM_HWEIGHT_H
3
4#include <asm-generic/bitops/arch_hweight.h>
5
6#endif
diff --git a/arch/um/sys-x86/asm/archparam.h b/arch/um/sys-x86/asm/archparam.h
deleted file mode 100644
index c17cf68dda0f..000000000000
--- a/arch/um/sys-x86/asm/archparam.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_ARCHPARAM_H
8#define __UM_ARCHPARAM_H
9
10#ifdef CONFIG_X86_32
11
12#ifdef CONFIG_X86_PAE
13#define LAST_PKMAP 512
14#else
15#define LAST_PKMAP 1024
16#endif
17
18#endif
19
20#endif
diff --git a/arch/um/sys-x86/asm/checksum.h b/arch/um/sys-x86/asm/checksum.h
deleted file mode 100644
index b6efe2381b5d..000000000000
--- a/arch/um/sys-x86/asm/checksum.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __UM_CHECKSUM_H
2#define __UM_CHECKSUM_H
3
4#ifdef CONFIG_X86_32
5# include "checksum_32.h"
6#else
7# include "checksum_64.h"
8#endif
9
10#endif
diff --git a/arch/um/sys-x86/asm/checksum_32.h b/arch/um/sys-x86/asm/checksum_32.h
deleted file mode 100644
index caab74252e27..000000000000
--- a/arch/um/sys-x86/asm/checksum_32.h
+++ /dev/null
@@ -1,201 +0,0 @@
1/*
2 * Licensed under the GPL
3 */
4
5#ifndef __UM_SYSDEP_CHECKSUM_H
6#define __UM_SYSDEP_CHECKSUM_H
7
8#include "linux/in6.h"
9#include "linux/string.h"
10
11/*
12 * computes the checksum of a memory block at buff, length len,
13 * and adds in "sum" (32-bit)
14 *
15 * returns a 32-bit number suitable for feeding into itself
16 * or csum_tcpudp_magic
17 *
18 * this function must be called with even lengths, except
19 * for the last fragment, which may be odd
20 *
21 * it's best to have buff aligned on a 32-bit boundary
22 */
23__wsum csum_partial(const void *buff, int len, __wsum sum);
24
25/*
26 * Note: when you get a NULL pointer exception here this means someone
27 * passed in an incorrect kernel address to one of these functions.
28 *
29 * If you use these functions directly please don't forget the
30 * access_ok().
31 */
32
33static __inline__
34__wsum csum_partial_copy_nocheck(const void *src, void *dst,
35 int len, __wsum sum)
36{
37 memcpy(dst, src, len);
38 return csum_partial(dst, len, sum);
39}
40
41/*
42 * the same as csum_partial, but copies from src while it
43 * checksums, and handles user-space pointer exceptions correctly, when needed.
44 *
45 * here even more important to align src and dst on a 32-bit (or even
46 * better 64-bit) boundary
47 */
48
49static __inline__
50__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
51 int len, __wsum sum, int *err_ptr)
52{
53 if (copy_from_user(dst, src, len)) {
54 *err_ptr = -EFAULT;
55 return (__force __wsum)-1;
56 }
57
58 return csum_partial(dst, len, sum);
59}
60
61/*
62 * This is a version of ip_compute_csum() optimized for IP headers,
63 * which always checksum on 4 octet boundaries.
64 *
65 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
66 * Arnt Gulbrandsen.
67 */
68static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
69{
70 unsigned int sum;
71
72 __asm__ __volatile__(
73 "movl (%1), %0 ;\n"
74 "subl $4, %2 ;\n"
75 "jbe 2f ;\n"
76 "addl 4(%1), %0 ;\n"
77 "adcl 8(%1), %0 ;\n"
78 "adcl 12(%1), %0 ;\n"
79"1: adcl 16(%1), %0 ;\n"
80 "lea 4(%1), %1 ;\n"
81 "decl %2 ;\n"
82 "jne 1b ;\n"
83 "adcl $0, %0 ;\n"
84 "movl %0, %2 ;\n"
85 "shrl $16, %0 ;\n"
86 "addw %w2, %w0 ;\n"
87 "adcl $0, %0 ;\n"
88 "notl %0 ;\n"
89"2: ;\n"
90 /* Since the input registers which are loaded with iph and ipl
91 are modified, we must also specify them as outputs, or gcc
92 will assume they contain their original values. */
93 : "=r" (sum), "=r" (iph), "=r" (ihl)
94 : "1" (iph), "2" (ihl)
95 : "memory");
96 return (__force __sum16)sum;
97}
98
99/*
100 * Fold a partial checksum
101 */
102
103static inline __sum16 csum_fold(__wsum sum)
104{
105 __asm__(
106 "addl %1, %0 ;\n"
107 "adcl $0xffff, %0 ;\n"
108 : "=r" (sum)
109 : "r" ((__force u32)sum << 16),
110 "0" ((__force u32)sum & 0xffff0000)
111 );
112 return (__force __sum16)(~(__force u32)sum >> 16);
113}
114
115static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
116 unsigned short len,
117 unsigned short proto,
118 __wsum sum)
119{
120 __asm__(
121 "addl %1, %0 ;\n"
122 "adcl %2, %0 ;\n"
123 "adcl %3, %0 ;\n"
124 "adcl $0, %0 ;\n"
125 : "=r" (sum)
126 : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
127 return sum;
128}
129
130/*
131 * computes the checksum of the TCP/UDP pseudo-header
132 * returns a 16-bit checksum, already complemented
133 */
134static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
135 unsigned short len,
136 unsigned short proto,
137 __wsum sum)
138{
139 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
140}
141
142/*
143 * this routine is used for miscellaneous IP-like checksums, mainly
144 * in icmp.c
145 */
146
147static inline __sum16 ip_compute_csum(const void *buff, int len)
148{
149 return csum_fold (csum_partial(buff, len, 0));
150}
151
152#define _HAVE_ARCH_IPV6_CSUM
153static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
154 const struct in6_addr *daddr,
155 __u32 len, unsigned short proto,
156 __wsum sum)
157{
158 __asm__(
159 "addl 0(%1), %0 ;\n"
160 "adcl 4(%1), %0 ;\n"
161 "adcl 8(%1), %0 ;\n"
162 "adcl 12(%1), %0 ;\n"
163 "adcl 0(%2), %0 ;\n"
164 "adcl 4(%2), %0 ;\n"
165 "adcl 8(%2), %0 ;\n"
166 "adcl 12(%2), %0 ;\n"
167 "adcl %3, %0 ;\n"
168 "adcl %4, %0 ;\n"
169 "adcl $0, %0 ;\n"
170 : "=&r" (sum)
171 : "r" (saddr), "r" (daddr),
172 "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
173
174 return csum_fold(sum);
175}
176
177/*
178 * Copy and checksum to user
179 */
180#define HAVE_CSUM_COPY_USER
181static __inline__ __wsum csum_and_copy_to_user(const void *src,
182 void __user *dst,
183 int len, __wsum sum, int *err_ptr)
184{
185 if (access_ok(VERIFY_WRITE, dst, len)) {
186 if (copy_to_user(dst, src, len)) {
187 *err_ptr = -EFAULT;
188 return (__force __wsum)-1;
189 }
190
191 return csum_partial(src, len, sum);
192 }
193
194 if (len)
195 *err_ptr = -EFAULT;
196
197 return (__force __wsum)-1; /* invalid checksum */
198}
199
200#endif
201
diff --git a/arch/um/sys-x86/asm/checksum_64.h b/arch/um/sys-x86/asm/checksum_64.h
deleted file mode 100644
index a5be9031ea85..000000000000
--- a/arch/um/sys-x86/asm/checksum_64.h
+++ /dev/null
@@ -1,144 +0,0 @@
1/*
2 * Licensed under the GPL
3 */
4
5#ifndef __UM_SYSDEP_CHECKSUM_H
6#define __UM_SYSDEP_CHECKSUM_H
7
8#include "linux/string.h"
9#include "linux/in6.h"
10#include "asm/uaccess.h"
11
12extern __wsum csum_partial(const void *buff, int len, __wsum sum);
13
14/*
15 * Note: when you get a NULL pointer exception here this means someone
16 * passed in an incorrect kernel address to one of these functions.
17 *
18 * If you use these functions directly please don't forget the
19 * access_ok().
20 */
21
22static __inline__
23__wsum csum_partial_copy_nocheck(const void *src, void *dst,
24 int len, __wsum sum)
25{
26 memcpy(dst, src, len);
27 return(csum_partial(dst, len, sum));
28}
29
30static __inline__
31__wsum csum_partial_copy_from_user(const void __user *src,
32 void *dst, int len, __wsum sum,
33 int *err_ptr)
34{
35 if (copy_from_user(dst, src, len)) {
36 *err_ptr = -EFAULT;
37 return (__force __wsum)-1;
38 }
39 return csum_partial(dst, len, sum);
40}
41
42/**
43 * csum_fold - Fold and invert a 32bit checksum.
44 * sum: 32bit unfolded sum
45 *
46 * Fold a 32bit running checksum to 16bit and invert it. This is usually
47 * the last step before putting a checksum into a packet.
48 * Make sure not to mix with 64bit checksums.
49 */
50static inline __sum16 csum_fold(__wsum sum)
51{
52 __asm__(
53 " addl %1,%0\n"
54 " adcl $0xffff,%0"
55 : "=r" (sum)
56 : "r" ((__force u32)sum << 16),
57 "0" ((__force u32)sum & 0xffff0000)
58 );
59 return (__force __sum16)(~(__force u32)sum >> 16);
60}
61
62/**
63 * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
64 * @saddr: source address
65 * @daddr: destination address
66 * @len: length of packet
67 * @proto: ip protocol of packet
68 * @sum: initial sum to be added in (32bit unfolded)
69 *
70 * Returns the pseudo header checksum the input data. Result is
71 * 32bit unfolded.
72 */
73static inline __wsum
74csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
75 unsigned short proto, __wsum sum)
76{
77 asm(" addl %1, %0\n"
78 " adcl %2, %0\n"
79 " adcl %3, %0\n"
80 " adcl $0, %0\n"
81 : "=r" (sum)
82 : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum));
83 return sum;
84}
85
86/*
87 * computes the checksum of the TCP/UDP pseudo-header
88 * returns a 16-bit checksum, already complemented
89 */
90static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
91 unsigned short len,
92 unsigned short proto,
93 __wsum sum)
94{
95 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
96}
97
98/**
99 * ip_fast_csum - Compute the IPv4 header checksum efficiently.
100 * iph: ipv4 header
101 * ihl: length of header / 4
102 */
103static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
104{
105 unsigned int sum;
106
107 asm( " movl (%1), %0\n"
108 " subl $4, %2\n"
109 " jbe 2f\n"
110 " addl 4(%1), %0\n"
111 " adcl 8(%1), %0\n"
112 " adcl 12(%1), %0\n"
113 "1: adcl 16(%1), %0\n"
114 " lea 4(%1), %1\n"
115 " decl %2\n"
116 " jne 1b\n"
117 " adcl $0, %0\n"
118 " movl %0, %2\n"
119 " shrl $16, %0\n"
120 " addw %w2, %w0\n"
121 " adcl $0, %0\n"
122 " notl %0\n"
123 "2:"
124 /* Since the input registers which are loaded with iph and ipl
125 are modified, we must also specify them as outputs, or gcc
126 will assume they contain their original values. */
127 : "=r" (sum), "=r" (iph), "=r" (ihl)
128 : "1" (iph), "2" (ihl)
129 : "memory");
130 return (__force __sum16)sum;
131}
132
133static inline unsigned add32_with_carry(unsigned a, unsigned b)
134{
135 asm("addl %2,%0\n\t"
136 "adcl $0,%0"
137 : "=r" (a)
138 : "0" (a), "r" (b));
139 return a;
140}
141
142extern __sum16 ip_compute_csum(const void *buff, int len);
143
144#endif
diff --git a/arch/um/sys-x86/asm/elf.h b/arch/um/sys-x86/asm/elf.h
deleted file mode 100644
index f3b0633b69a1..000000000000
--- a/arch/um/sys-x86/asm/elf.h
+++ /dev/null
@@ -1,221 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5#ifndef __UM_ELF_X86_H
6#define __UM_ELF_X86_H
7
8#include <asm/user.h>
9#include "skas.h"
10
11#ifdef CONFIG_X86_32
12
13#define R_386_NONE 0
14#define R_386_32 1
15#define R_386_PC32 2
16#define R_386_GOT32 3
17#define R_386_PLT32 4
18#define R_386_COPY 5
19#define R_386_GLOB_DAT 6
20#define R_386_JMP_SLOT 7
21#define R_386_RELATIVE 8
22#define R_386_GOTOFF 9
23#define R_386_GOTPC 10
24#define R_386_NUM 11
25
26/*
27 * This is used to ensure we don't load something for the wrong architecture.
28 */
29#define elf_check_arch(x) \
30 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
31
32#define ELF_CLASS ELFCLASS32
33#define ELF_DATA ELFDATA2LSB
34#define ELF_ARCH EM_386
35
36#define ELF_PLAT_INIT(regs, load_addr) do { \
37 PT_REGS_EBX(regs) = 0; \
38 PT_REGS_ECX(regs) = 0; \
39 PT_REGS_EDX(regs) = 0; \
40 PT_REGS_ESI(regs) = 0; \
41 PT_REGS_EDI(regs) = 0; \
42 PT_REGS_EBP(regs) = 0; \
43 PT_REGS_EAX(regs) = 0; \
44} while (0)
45
46/* Shamelessly stolen from include/asm-i386/elf.h */
47
48#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
49 pr_reg[0] = PT_REGS_EBX(regs); \
50 pr_reg[1] = PT_REGS_ECX(regs); \
51 pr_reg[2] = PT_REGS_EDX(regs); \
52 pr_reg[3] = PT_REGS_ESI(regs); \
53 pr_reg[4] = PT_REGS_EDI(regs); \
54 pr_reg[5] = PT_REGS_EBP(regs); \
55 pr_reg[6] = PT_REGS_EAX(regs); \
56 pr_reg[7] = PT_REGS_DS(regs); \
57 pr_reg[8] = PT_REGS_ES(regs); \
58 /* fake once used fs and gs selectors? */ \
59 pr_reg[9] = PT_REGS_DS(regs); \
60 pr_reg[10] = PT_REGS_DS(regs); \
61 pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \
62 pr_reg[12] = PT_REGS_IP(regs); \
63 pr_reg[13] = PT_REGS_CS(regs); \
64 pr_reg[14] = PT_REGS_EFLAGS(regs); \
65 pr_reg[15] = PT_REGS_SP(regs); \
66 pr_reg[16] = PT_REGS_SS(regs); \
67} while (0);
68
69extern char * elf_aux_platform;
70#define ELF_PLATFORM (elf_aux_platform)
71
72extern unsigned long vsyscall_ehdr;
73extern unsigned long vsyscall_end;
74extern unsigned long __kernel_vsyscall;
75
76/*
77 * This is the range that is readable by user mode, and things
78 * acting like user mode such as get_user_pages.
79 */
80#define FIXADDR_USER_START vsyscall_ehdr
81#define FIXADDR_USER_END vsyscall_end
82
83
84/*
85 * Architecture-neutral AT_ values in 0-17, leave some room
86 * for more of them, start the x86-specific ones at 32.
87 */
88#define AT_SYSINFO 32
89#define AT_SYSINFO_EHDR 33
90
91#define ARCH_DLINFO \
92do { \
93 if ( vsyscall_ehdr ) { \
94 NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \
95 NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \
96 } \
97} while (0)
98
99#else
100
101/* x86-64 relocation types, taken from asm-x86_64/elf.h */
102#define R_X86_64_NONE 0 /* No reloc */
103#define R_X86_64_64 1 /* Direct 64 bit */
104#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
105#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
106#define R_X86_64_PLT32 4 /* 32 bit PLT address */
107#define R_X86_64_COPY 5 /* Copy symbol at runtime */
108#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
109#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
110#define R_X86_64_RELATIVE 8 /* Adjust by program base */
111#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
112 offset to GOT */
113#define R_X86_64_32 10 /* Direct 32 bit zero extended */
114#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
115#define R_X86_64_16 12 /* Direct 16 bit zero extended */
116#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
117#define R_X86_64_8 14 /* Direct 8 bit sign extended */
118#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
119
120#define R_X86_64_NUM 16
121
122/*
123 * This is used to ensure we don't load something for the wrong architecture.
124 */
125#define elf_check_arch(x) \
126 ((x)->e_machine == EM_X86_64)
127
128#define ELF_CLASS ELFCLASS64
129#define ELF_DATA ELFDATA2LSB
130#define ELF_ARCH EM_X86_64
131
132#define ELF_PLAT_INIT(regs, load_addr) do { \
133 PT_REGS_RBX(regs) = 0; \
134 PT_REGS_RCX(regs) = 0; \
135 PT_REGS_RDX(regs) = 0; \
136 PT_REGS_RSI(regs) = 0; \
137 PT_REGS_RDI(regs) = 0; \
138 PT_REGS_RBP(regs) = 0; \
139 PT_REGS_RAX(regs) = 0; \
140 PT_REGS_R8(regs) = 0; \
141 PT_REGS_R9(regs) = 0; \
142 PT_REGS_R10(regs) = 0; \
143 PT_REGS_R11(regs) = 0; \
144 PT_REGS_R12(regs) = 0; \
145 PT_REGS_R13(regs) = 0; \
146 PT_REGS_R14(regs) = 0; \
147 PT_REGS_R15(regs) = 0; \
148} while (0)
149
150#define ELF_CORE_COPY_REGS(pr_reg, _regs) \
151 (pr_reg)[0] = (_regs)->regs.gp[0]; \
152 (pr_reg)[1] = (_regs)->regs.gp[1]; \
153 (pr_reg)[2] = (_regs)->regs.gp[2]; \
154 (pr_reg)[3] = (_regs)->regs.gp[3]; \
155 (pr_reg)[4] = (_regs)->regs.gp[4]; \
156 (pr_reg)[5] = (_regs)->regs.gp[5]; \
157 (pr_reg)[6] = (_regs)->regs.gp[6]; \
158 (pr_reg)[7] = (_regs)->regs.gp[7]; \
159 (pr_reg)[8] = (_regs)->regs.gp[8]; \
160 (pr_reg)[9] = (_regs)->regs.gp[9]; \
161 (pr_reg)[10] = (_regs)->regs.gp[10]; \
162 (pr_reg)[11] = (_regs)->regs.gp[11]; \
163 (pr_reg)[12] = (_regs)->regs.gp[12]; \
164 (pr_reg)[13] = (_regs)->regs.gp[13]; \
165 (pr_reg)[14] = (_regs)->regs.gp[14]; \
166 (pr_reg)[15] = (_regs)->regs.gp[15]; \
167 (pr_reg)[16] = (_regs)->regs.gp[16]; \
168 (pr_reg)[17] = (_regs)->regs.gp[17]; \
169 (pr_reg)[18] = (_regs)->regs.gp[18]; \
170 (pr_reg)[19] = (_regs)->regs.gp[19]; \
171 (pr_reg)[20] = (_regs)->regs.gp[20]; \
172 (pr_reg)[21] = current->thread.arch.fs; \
173 (pr_reg)[22] = 0; \
174 (pr_reg)[23] = 0; \
175 (pr_reg)[24] = 0; \
176 (pr_reg)[25] = 0; \
177 (pr_reg)[26] = 0;
178
179#define ELF_PLATFORM "x86_64"
180
181/* No user-accessible fixmap addresses, i.e. vsyscall */
182#define FIXADDR_USER_START 0
183#define FIXADDR_USER_END 0
184
185#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
186struct linux_binprm;
187extern int arch_setup_additional_pages(struct linux_binprm *bprm,
188 int uses_interp);
189
190extern unsigned long um_vdso_addr;
191#define AT_SYSINFO_EHDR 33
192#define ARCH_DLINFO NEW_AUX_ENT(AT_SYSINFO_EHDR, um_vdso_addr)
193
194#endif
195
196typedef unsigned long elf_greg_t;
197
198#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
199typedef elf_greg_t elf_gregset_t[ELF_NGREG];
200
201typedef struct user_i387_struct elf_fpregset_t;
202
203#define task_pt_regs(t) (&(t)->thread.regs)
204
205struct task_struct;
206
207extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
208
209#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
210
211#define ELF_EXEC_PAGESIZE 4096
212
213#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
214
215extern long elf_aux_hwcap;
216#define ELF_HWCAP (elf_aux_hwcap)
217
218#define SET_PERSONALITY(ex) do ; while(0)
219#define __HAVE_ARCH_GATE_AREA 1
220
221#endif
diff --git a/arch/um/sys-x86/asm/module.h b/arch/um/sys-x86/asm/module.h
deleted file mode 100644
index 61af80e932eb..000000000000
--- a/arch/um/sys-x86/asm/module.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef __UM_MODULE_H
2#define __UM_MODULE_H
3
4/* UML is simple */
5struct mod_arch_specific
6{
7};
8
9#ifdef CONFIG_X86_32
10
11#define Elf_Shdr Elf32_Shdr
12#define Elf_Sym Elf32_Sym
13#define Elf_Ehdr Elf32_Ehdr
14
15#else
16
17#define Elf_Shdr Elf64_Shdr
18#define Elf_Sym Elf64_Sym
19#define Elf_Ehdr Elf64_Ehdr
20
21#endif
22
23#endif
diff --git a/arch/um/sys-x86/asm/processor.h b/arch/um/sys-x86/asm/processor.h
deleted file mode 100644
index d3ac1cecf0f4..000000000000
--- a/arch/um/sys-x86/asm/processor.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef __UM_PROCESSOR_H
2#define __UM_PROCESSOR_H
3
4/* include faultinfo structure */
5#include <sysdep/faultinfo.h>
6
7#ifdef CONFIG_X86_32
8# include "processor_32.h"
9#else
10# include "processor_64.h"
11#endif
12
13#include <asm/processor-generic.h>
14
15#endif
diff --git a/arch/um/sys-x86/asm/processor_32.h b/arch/um/sys-x86/asm/processor_32.h
deleted file mode 100644
index ae0d189aafcf..000000000000
--- a/arch/um/sys-x86/asm/processor_32.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PROCESSOR_I386_H
7#define __UM_PROCESSOR_I386_H
8
9#include <linux/string.h>
10#include <sysdep/host_ldt.h>
11#include <asm/segment.h>
12
13extern int host_has_cmov;
14
15struct uml_tls_struct {
16 struct user_desc tls;
17 unsigned flushed:1;
18 unsigned present:1;
19};
20
21struct arch_thread {
22 struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
23 unsigned long debugregs[8];
24 int debugregs_seq;
25 struct faultinfo faultinfo;
26};
27
28#define INIT_ARCH_THREAD { \
29 .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \
30 { .present = 0, .flushed = 0 } }, \
31 .debugregs = { [ 0 ... 7 ] = 0 }, \
32 .debugregs_seq = 0, \
33 .faultinfo = { 0, 0, 0 } \
34}
35
36static inline void arch_flush_thread(struct arch_thread *thread)
37{
38 /* Clear any TLS still hanging */
39 memset(&thread->tls_array, 0, sizeof(thread->tls_array));
40}
41
42static inline void arch_copy_thread(struct arch_thread *from,
43 struct arch_thread *to)
44{
45 memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
46}
47
48#include <asm/user.h>
49
50/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
51static inline void rep_nop(void)
52{
53 __asm__ __volatile__("rep;nop": : :"memory");
54}
55
56#define cpu_relax() rep_nop()
57
58/*
59 * Default implementation of macro that returns current
60 * instruction pointer ("program counter"). Stolen
61 * from asm-i386/processor.h
62 */
63#define current_text_addr() \
64 ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
65
66#define ARCH_IS_STACKGROW(address) \
67 (address + 32 >= UPT_SP(&current->thread.regs.regs))
68
69#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP)
70#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP)
71#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP)
72
73#endif
diff --git a/arch/um/sys-x86/asm/processor_64.h b/arch/um/sys-x86/asm/processor_64.h
deleted file mode 100644
index 6db812b24f48..000000000000
--- a/arch/um/sys-x86/asm/processor_64.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PROCESSOR_X86_64_H
8#define __UM_PROCESSOR_X86_64_H
9
10struct arch_thread {
11 unsigned long debugregs[8];
12 int debugregs_seq;
13 unsigned long fs;
14 struct faultinfo faultinfo;
15};
16
17/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
18static inline void rep_nop(void)
19{
20 __asm__ __volatile__("rep;nop": : :"memory");
21}
22
23#define cpu_relax() rep_nop()
24
25#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \
26 .debugregs_seq = 0, \
27 .fs = 0, \
28 .faultinfo = { 0, 0, 0 } }
29
30static inline void arch_flush_thread(struct arch_thread *thread)
31{
32}
33
34static inline void arch_copy_thread(struct arch_thread *from,
35 struct arch_thread *to)
36{
37 to->fs = from->fs;
38}
39
40#include <asm/user.h>
41
42#define current_text_addr() \
43 ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
44
45#define ARCH_IS_STACKGROW(address) \
46 (address + 128 >= UPT_SP(&current->thread.regs.regs))
47
48#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP)
49#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP)
50
51#endif
diff --git a/arch/um/sys-x86/asm/ptrace.h b/arch/um/sys-x86/asm/ptrace.h
deleted file mode 100644
index c8aca8c501b0..000000000000
--- a/arch/um/sys-x86/asm/ptrace.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "ptrace_32.h"
3#else
4# include "ptrace_64.h"
5#endif
diff --git a/arch/um/sys-x86/asm/ptrace_32.h b/arch/um/sys-x86/asm/ptrace_32.h
deleted file mode 100644
index 5d2a59112537..000000000000
--- a/arch/um/sys-x86/asm/ptrace_32.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PTRACE_I386_H
7#define __UM_PTRACE_I386_H
8
9#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
10
11#include "linux/compiler.h"
12#include "asm/ptrace-generic.h"
13
14#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs)
15#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs)
16#define PT_REGS_ECX(r) UPT_ECX(&(r)->regs)
17#define PT_REGS_EDX(r) UPT_EDX(&(r)->regs)
18#define PT_REGS_ESI(r) UPT_ESI(&(r)->regs)
19#define PT_REGS_EDI(r) UPT_EDI(&(r)->regs)
20#define PT_REGS_EBP(r) UPT_EBP(&(r)->regs)
21
22#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
23#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
24#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
25#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
26#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
27#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
28
29#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
30
31#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r)
32#define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r)
33#define PT_FIX_EXEC_STACK(sp) do ; while(0)
34
35#define profile_pc(regs) PT_REGS_IP(regs)
36
37#define user_mode(r) UPT_IS_USER(&(r)->regs)
38
39/*
40 * Forward declaration to avoid including sysdep/tls.h, which causes a
41 * circular include, and compilation failures.
42 */
43struct user_desc;
44
45extern int ptrace_get_thread_area(struct task_struct *child, int idx,
46 struct user_desc __user *user_desc);
47
48extern int ptrace_set_thread_area(struct task_struct *child, int idx,
49 struct user_desc __user *user_desc);
50
51#endif
diff --git a/arch/um/sys-x86/asm/ptrace_64.h b/arch/um/sys-x86/asm/ptrace_64.h
deleted file mode 100644
index 83d8c473b905..000000000000
--- a/arch/um/sys-x86/asm/ptrace_64.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PTRACE_X86_64_H
8#define __UM_PTRACE_X86_64_H
9
10#include "linux/compiler.h"
11#include "asm/errno.h"
12
13#define __FRAME_OFFSETS /* Needed to get the R* macros */
14#include "asm/ptrace-generic.h"
15
16#define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64
17
18#define PT_REGS_RBX(r) UPT_RBX(&(r)->regs)
19#define PT_REGS_RCX(r) UPT_RCX(&(r)->regs)
20#define PT_REGS_RDX(r) UPT_RDX(&(r)->regs)
21#define PT_REGS_RSI(r) UPT_RSI(&(r)->regs)
22#define PT_REGS_RDI(r) UPT_RDI(&(r)->regs)
23#define PT_REGS_RBP(r) UPT_RBP(&(r)->regs)
24#define PT_REGS_RAX(r) UPT_RAX(&(r)->regs)
25#define PT_REGS_R8(r) UPT_R8(&(r)->regs)
26#define PT_REGS_R9(r) UPT_R9(&(r)->regs)
27#define PT_REGS_R10(r) UPT_R10(&(r)->regs)
28#define PT_REGS_R11(r) UPT_R11(&(r)->regs)
29#define PT_REGS_R12(r) UPT_R12(&(r)->regs)
30#define PT_REGS_R13(r) UPT_R13(&(r)->regs)
31#define PT_REGS_R14(r) UPT_R14(&(r)->regs)
32#define PT_REGS_R15(r) UPT_R15(&(r)->regs)
33
34#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
35#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
36#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
37#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
38#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
39#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
40
41#define PT_REGS_ORIG_RAX(r) UPT_ORIG_RAX(&(r)->regs)
42#define PT_REGS_RIP(r) UPT_IP(&(r)->regs)
43#define PT_REGS_RSP(r) UPT_SP(&(r)->regs)
44
45#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
46
47/* XXX */
48#define user_mode(r) UPT_IS_USER(&(r)->regs)
49#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_RAX(r)
50#define PT_REGS_SYSCALL_RET(r) PT_REGS_RAX(r)
51
52#define PT_FIX_EXEC_STACK(sp) do ; while(0)
53
54#define profile_pc(regs) PT_REGS_IP(regs)
55
56struct user_desc;
57
58static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
59 struct user_desc __user *user_desc)
60{
61 return -ENOSYS;
62}
63
64static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
65 struct user_desc __user *user_desc)
66{
67 return -ENOSYS;
68}
69
70extern long arch_prctl(struct task_struct *task, int code,
71 unsigned long __user *addr);
72#endif
diff --git a/arch/um/sys-x86/asm/system.h b/arch/um/sys-x86/asm/system.h
deleted file mode 100644
index a89113bc74f2..000000000000
--- a/arch/um/sys-x86/asm/system.h
+++ /dev/null
@@ -1,133 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9#include <asm/system-um.h>
10
11#include <linux/kernel.h>
12#include <linux/irqflags.h>
13
14/* entries in ARCH_DLINFO: */
15#ifdef CONFIG_IA32_EMULATION
16# define AT_VECTOR_SIZE_ARCH 2
17#else
18# define AT_VECTOR_SIZE_ARCH 1
19#endif
20
21extern unsigned long arch_align_stack(unsigned long sp);
22
23void default_idle(void);
24
25/*
26 * Force strict CPU ordering.
27 * And yes, this is required on UP too when we're talking
28 * to devices.
29 */
30#ifdef CONFIG_X86_32
31/*
32 * Some non-Intel clones support out of order store. wmb() ceases to be a
33 * nop for these.
34 */
35#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
36#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
37#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
38#else
39#define mb() asm volatile("mfence":::"memory")
40#define rmb() asm volatile("lfence":::"memory")
41#define wmb() asm volatile("sfence" ::: "memory")
42#endif
43
44/**
45 * read_barrier_depends - Flush all pending reads that subsequents reads
46 * depend on.
47 *
48 * No data-dependent reads from memory-like regions are ever reordered
49 * over this barrier. All reads preceding this primitive are guaranteed
50 * to access memory (but not necessarily other CPUs' caches) before any
51 * reads following this primitive that depend on the data return by
52 * any of the preceding reads. This primitive is much lighter weight than
53 * rmb() on most CPUs, and is never heavier weight than is
54 * rmb().
55 *
56 * These ordering constraints are respected by both the local CPU
57 * and the compiler.
58 *
59 * Ordering is not guaranteed by anything other than these primitives,
60 * not even by data dependencies. See the documentation for
61 * memory_barrier() for examples and URLs to more information.
62 *
63 * For example, the following code would force ordering (the initial
64 * value of "a" is zero, "b" is one, and "p" is "&a"):
65 *
66 * <programlisting>
67 * CPU 0 CPU 1
68 *
69 * b = 2;
70 * memory_barrier();
71 * p = &b; q = p;
72 * read_barrier_depends();
73 * d = *q;
74 * </programlisting>
75 *
76 * because the read of "*q" depends on the read of "p" and these
77 * two reads are separated by a read_barrier_depends(). However,
78 * the following code, with the same initial values for "a" and "b":
79 *
80 * <programlisting>
81 * CPU 0 CPU 1
82 *
83 * a = 2;
84 * memory_barrier();
85 * b = 3; y = b;
86 * read_barrier_depends();
87 * x = a;
88 * </programlisting>
89 *
90 * does not enforce ordering, since there is no data dependency between
91 * the read of "a" and the read of "b". Therefore, on some CPUs, such
92 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
93 * in cases like this where there are no data dependencies.
94 **/
95
96#define read_barrier_depends() do { } while (0)
97
98#ifdef CONFIG_SMP
99#define smp_mb() mb()
100#ifdef CONFIG_X86_PPRO_FENCE
101# define smp_rmb() rmb()
102#else
103# define smp_rmb() barrier()
104#endif
105#ifdef CONFIG_X86_OOSTORE
106# define smp_wmb() wmb()
107#else
108# define smp_wmb() barrier()
109#endif
110#define smp_read_barrier_depends() read_barrier_depends()
111#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
112#else
113#define smp_mb() barrier()
114#define smp_rmb() barrier()
115#define smp_wmb() barrier()
116#define smp_read_barrier_depends() do { } while (0)
117#define set_mb(var, value) do { var = value; barrier(); } while (0)
118#endif
119
120/*
121 * Stop RDTSC speculation. This is needed when you need to use RDTSC
122 * (or get_cycles or vread that possibly accesses the TSC) in a defined
123 * code region.
124 *
125 * (Could use an alternative three way for this if there was one.)
126 */
127static inline void rdtsc_barrier(void)
128{
129 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
130 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
131}
132
133#endif
diff --git a/arch/um/sys-x86/asm/vm-flags.h b/arch/um/sys-x86/asm/vm-flags.h
deleted file mode 100644
index 7c297e9e2413..000000000000
--- a/arch/um/sys-x86/asm/vm-flags.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#ifndef __VM_FLAGS_X86_H
8#define __VM_FLAGS_X86_H
9
10#ifdef CONFIG_X86_32
11
12#define VM_DATA_DEFAULT_FLAGS \
13 (VM_READ | VM_WRITE | \
14 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
15 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
16
17#else
18
19#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
20 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
21#define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \
22 VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
23
24#endif
25#endif
diff --git a/arch/um/sys-x86/bug.c b/arch/um/sys-x86/bug.c
deleted file mode 100644
index e8034e363d83..000000000000
--- a/arch/um/sys-x86/bug.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL V2
4 */
5
6#include <linux/uaccess.h>
7
8/*
9 * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
10 * that's not relevant in skas mode.
11 */
12
13int is_valid_bugaddr(unsigned long eip)
14{
15 unsigned short ud2;
16
17 if (probe_kernel_address((unsigned short __user *)eip, ud2))
18 return 0;
19
20 return ud2 == 0x0b0f;
21}
diff --git a/arch/um/sys-x86/bugs_32.c b/arch/um/sys-x86/bugs_32.c
deleted file mode 100644
index 7058e1fa903b..000000000000
--- a/arch/um/sys-x86/bugs_32.c
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <signal.h>
7#include "kern_util.h"
8#include "longjmp.h"
9#include "task.h"
10#include "sysdep/ptrace.h"
11
12/* Set during early boot */
13static int host_has_cmov = 1;
14static jmp_buf cmov_test_return;
15
16static void cmov_sigill_test_handler(int sig)
17{
18 host_has_cmov = 0;
19 longjmp(cmov_test_return, 1);
20}
21
22void arch_check_bugs(void)
23{
24 struct sigaction old, new;
25
26 printk(UM_KERN_INFO "Checking for host processor cmov support...");
27 new.sa_handler = cmov_sigill_test_handler;
28
29 /* Make sure that SIGILL is enabled after the handler longjmps back */
30 new.sa_flags = SA_NODEFER;
31 sigemptyset(&new.sa_mask);
32 sigaction(SIGILL, &new, &old);
33
34 if (setjmp(cmov_test_return) == 0) {
35 unsigned long foo = 0;
36 __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo));
37 printk(UM_KERN_CONT "Yes\n");
38 } else
39 printk(UM_KERN_CONT "No\n");
40
41 sigaction(SIGILL, &old, &new);
42}
43
44void arch_examine_signal(int sig, struct uml_pt_regs *regs)
45{
46 unsigned char tmp[2];
47
48 /*
49 * This is testing for a cmov (0x0f 0x4x) instruction causing a
50 * SIGILL in init.
51 */
52 if ((sig != SIGILL) || (TASK_PID(get_current()) != 1))
53 return;
54
55 if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) {
56 printk(UM_KERN_ERR "SIGILL in init, could not read "
57 "instructions!\n");
58 return;
59 }
60
61 if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
62 return;
63
64 if (host_has_cmov == 0)
65 printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
66 "processor doesn't implement. Boot a filesystem "
67 "compiled for older processors");
68 else if (host_has_cmov == 1)
69 printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
70 "processor claims to implement");
71 else
72 printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)",
73 host_has_cmov);
74}
diff --git a/arch/um/sys-x86/bugs_64.c b/arch/um/sys-x86/bugs_64.c
deleted file mode 100644
index 44e02ba2a265..000000000000
--- a/arch/um/sys-x86/bugs_64.c
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#include "sysdep/ptrace.h"
8
9void arch_check_bugs(void)
10{
11}
12
13void arch_examine_signal(int sig, struct uml_pt_regs *regs)
14{
15}
diff --git a/arch/um/sys-x86/checksum_32.S b/arch/um/sys-x86/checksum_32.S
deleted file mode 100644
index f058d2f82e18..000000000000
--- a/arch/um/sys-x86/checksum_32.S
+++ /dev/null
@@ -1,458 +0,0 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IP/TCP/UDP checksumming routines
7 *
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Tom May, <ftom@netcom.com>
11 * Pentium Pro/II routines:
12 * Alexander Kjeldaas <astor@guardian.no>
13 * Finn Arne Gangstad <finnag@guardian.no>
14 * Lots of code moved from tcp.c and ip.c; see those files
15 * for more names.
16 *
17 * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
18 * handling.
19 * Andi Kleen, add zeroing on error
20 * converted to pure assembler
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <asm/errno.h>
29
30/*
31 * computes a partial checksum, e.g. for TCP/UDP fragments
32 */
33
34/*
35unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
36 */
37
38.text
39.align 4
40.globl csum_partial
41
42#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
43
44 /*
45 * Experiments with Ethernet and SLIP connections show that buff
46 * is aligned on either a 2-byte or 4-byte boundary. We get at
47 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
48 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
49 * alignment for the unrolled loop.
50 */
51csum_partial:
52 pushl %esi
53 pushl %ebx
54 movl 20(%esp),%eax # Function arg: unsigned int sum
55 movl 16(%esp),%ecx # Function arg: int len
56 movl 12(%esp),%esi # Function arg: unsigned char *buff
57 testl $2, %esi # Check alignment.
58 jz 2f # Jump if alignment is ok.
59 subl $2, %ecx # Alignment uses up two bytes.
60 jae 1f # Jump if we had at least two bytes.
61 addl $2, %ecx # ecx was < 2. Deal with it.
62 jmp 4f
631: movw (%esi), %bx
64 addl $2, %esi
65 addw %bx, %ax
66 adcl $0, %eax
672:
68 movl %ecx, %edx
69 shrl $5, %ecx
70 jz 2f
71 testl %esi, %esi
721: movl (%esi), %ebx
73 adcl %ebx, %eax
74 movl 4(%esi), %ebx
75 adcl %ebx, %eax
76 movl 8(%esi), %ebx
77 adcl %ebx, %eax
78 movl 12(%esi), %ebx
79 adcl %ebx, %eax
80 movl 16(%esi), %ebx
81 adcl %ebx, %eax
82 movl 20(%esi), %ebx
83 adcl %ebx, %eax
84 movl 24(%esi), %ebx
85 adcl %ebx, %eax
86 movl 28(%esi), %ebx
87 adcl %ebx, %eax
88 lea 32(%esi), %esi
89 dec %ecx
90 jne 1b
91 adcl $0, %eax
922: movl %edx, %ecx
93 andl $0x1c, %edx
94 je 4f
95 shrl $2, %edx # This clears CF
963: adcl (%esi), %eax
97 lea 4(%esi), %esi
98 dec %edx
99 jne 3b
100 adcl $0, %eax
1014: andl $3, %ecx
102 jz 7f
103 cmpl $2, %ecx
104 jb 5f
105 movw (%esi),%cx
106 leal 2(%esi),%esi
107 je 6f
108 shll $16,%ecx
1095: movb (%esi),%cl
1106: addl %ecx,%eax
111 adcl $0, %eax
1127:
113 popl %ebx
114 popl %esi
115 ret
116
117#else
118
119/* Version for PentiumII/PPro */
120
121csum_partial:
122 pushl %esi
123 pushl %ebx
124 movl 20(%esp),%eax # Function arg: unsigned int sum
125 movl 16(%esp),%ecx # Function arg: int len
126 movl 12(%esp),%esi # Function arg: const unsigned char *buf
127
128 testl $2, %esi
129 jnz 30f
13010:
131 movl %ecx, %edx
132 movl %ecx, %ebx
133 andl $0x7c, %ebx
134 shrl $7, %ecx
135 addl %ebx,%esi
136 shrl $2, %ebx
137 negl %ebx
138 lea 45f(%ebx,%ebx,2), %ebx
139 testl %esi, %esi
140 jmp *%ebx
141
142 # Handle 2-byte-aligned regions
14320: addw (%esi), %ax
144 lea 2(%esi), %esi
145 adcl $0, %eax
146 jmp 10b
147
14830: subl $2, %ecx
149 ja 20b
150 je 32f
151 movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
152 addl %ebx, %eax
153 adcl $0, %eax
154 jmp 80f
15532:
156 addw (%esi), %ax # csumming 2 bytes, 2-aligned
157 adcl $0, %eax
158 jmp 80f
159
16040:
161 addl -128(%esi), %eax
162 adcl -124(%esi), %eax
163 adcl -120(%esi), %eax
164 adcl -116(%esi), %eax
165 adcl -112(%esi), %eax
166 adcl -108(%esi), %eax
167 adcl -104(%esi), %eax
168 adcl -100(%esi), %eax
169 adcl -96(%esi), %eax
170 adcl -92(%esi), %eax
171 adcl -88(%esi), %eax
172 adcl -84(%esi), %eax
173 adcl -80(%esi), %eax
174 adcl -76(%esi), %eax
175 adcl -72(%esi), %eax
176 adcl -68(%esi), %eax
177 adcl -64(%esi), %eax
178 adcl -60(%esi), %eax
179 adcl -56(%esi), %eax
180 adcl -52(%esi), %eax
181 adcl -48(%esi), %eax
182 adcl -44(%esi), %eax
183 adcl -40(%esi), %eax
184 adcl -36(%esi), %eax
185 adcl -32(%esi), %eax
186 adcl -28(%esi), %eax
187 adcl -24(%esi), %eax
188 adcl -20(%esi), %eax
189 adcl -16(%esi), %eax
190 adcl -12(%esi), %eax
191 adcl -8(%esi), %eax
192 adcl -4(%esi), %eax
19345:
194 lea 128(%esi), %esi
195 adcl $0, %eax
196 dec %ecx
197 jge 40b
198 movl %edx, %ecx
19950: andl $3, %ecx
200 jz 80f
201
202 # Handle the last 1-3 bytes without jumping
203 notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
204 movl $0xffffff,%ebx # by the shll and shrl instructions
205 shll $3,%ecx
206 shrl %cl,%ebx
207 andl -128(%esi),%ebx # esi is 4-aligned so should be ok
208 addl %ebx,%eax
209 adcl $0,%eax
21080:
211 popl %ebx
212 popl %esi
213 ret
214
215#endif
216
217/*
218unsigned int csum_partial_copy_generic (const char *src, char *dst,
219 int len, int sum, int *src_err_ptr, int *dst_err_ptr)
220 */
221
222/*
223 * Copy from ds while checksumming, otherwise like csum_partial
224 *
225 * The macros SRC and DST specify the type of access for the instruction.
226 * thus we can call a custom exception handler for all access types.
227 *
228 * FIXME: could someone double-check whether I haven't mixed up some SRC and
229 * DST definitions? It's damn hard to trigger all cases. I hope I got
230 * them all but there's no guarantee.
231 */
232
233#define SRC(y...) \
234 9999: y; \
235 .section __ex_table, "a"; \
236 .long 9999b, 6001f ; \
237 .previous
238
239#define DST(y...) \
240 9999: y; \
241 .section __ex_table, "a"; \
242 .long 9999b, 6002f ; \
243 .previous
244
245.align 4
246
247#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
248
249#define ARGBASE 16
250#define FP 12
251
252csum_partial_copy_generic_i386:
253 subl $4,%esp
254 pushl %edi
255 pushl %esi
256 pushl %ebx
257 movl ARGBASE+16(%esp),%eax # sum
258 movl ARGBASE+12(%esp),%ecx # len
259 movl ARGBASE+4(%esp),%esi # src
260 movl ARGBASE+8(%esp),%edi # dst
261
262 testl $2, %edi # Check alignment.
263 jz 2f # Jump if alignment is ok.
264 subl $2, %ecx # Alignment uses up two bytes.
265 jae 1f # Jump if we had at least two bytes.
266 addl $2, %ecx # ecx was < 2. Deal with it.
267 jmp 4f
268SRC(1: movw (%esi), %bx )
269 addl $2, %esi
270DST( movw %bx, (%edi) )
271 addl $2, %edi
272 addw %bx, %ax
273 adcl $0, %eax
2742:
275 movl %ecx, FP(%esp)
276 shrl $5, %ecx
277 jz 2f
278 testl %esi, %esi
279SRC(1: movl (%esi), %ebx )
280SRC( movl 4(%esi), %edx )
281 adcl %ebx, %eax
282DST( movl %ebx, (%edi) )
283 adcl %edx, %eax
284DST( movl %edx, 4(%edi) )
285
286SRC( movl 8(%esi), %ebx )
287SRC( movl 12(%esi), %edx )
288 adcl %ebx, %eax
289DST( movl %ebx, 8(%edi) )
290 adcl %edx, %eax
291DST( movl %edx, 12(%edi) )
292
293SRC( movl 16(%esi), %ebx )
294SRC( movl 20(%esi), %edx )
295 adcl %ebx, %eax
296DST( movl %ebx, 16(%edi) )
297 adcl %edx, %eax
298DST( movl %edx, 20(%edi) )
299
300SRC( movl 24(%esi), %ebx )
301SRC( movl 28(%esi), %edx )
302 adcl %ebx, %eax
303DST( movl %ebx, 24(%edi) )
304 adcl %edx, %eax
305DST( movl %edx, 28(%edi) )
306
307 lea 32(%esi), %esi
308 lea 32(%edi), %edi
309 dec %ecx
310 jne 1b
311 adcl $0, %eax
3122: movl FP(%esp), %edx
313 movl %edx, %ecx
314 andl $0x1c, %edx
315 je 4f
316 shrl $2, %edx # This clears CF
317SRC(3: movl (%esi), %ebx )
318 adcl %ebx, %eax
319DST( movl %ebx, (%edi) )
320 lea 4(%esi), %esi
321 lea 4(%edi), %edi
322 dec %edx
323 jne 3b
324 adcl $0, %eax
3254: andl $3, %ecx
326 jz 7f
327 cmpl $2, %ecx
328 jb 5f
329SRC( movw (%esi), %cx )
330 leal 2(%esi), %esi
331DST( movw %cx, (%edi) )
332 leal 2(%edi), %edi
333 je 6f
334 shll $16,%ecx
335SRC(5: movb (%esi), %cl )
336DST( movb %cl, (%edi) )
3376: addl %ecx, %eax
338 adcl $0, %eax
3397:
3405000:
341
342# Exception handler:
343.section .fixup, "ax"
344
3456001:
346 movl ARGBASE+20(%esp), %ebx # src_err_ptr
347 movl $-EFAULT, (%ebx)
348
349 # zero the complete destination - computing the rest
350 # is too much work
351 movl ARGBASE+8(%esp), %edi # dst
352 movl ARGBASE+12(%esp), %ecx # len
353 xorl %eax,%eax
354 rep ; stosb
355
356 jmp 5000b
357
3586002:
359 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
360 movl $-EFAULT,(%ebx)
361 jmp 5000b
362
363.previous
364
365 popl %ebx
366 popl %esi
367 popl %edi
368 popl %ecx # equivalent to addl $4,%esp
369 ret
370
371#else
372
373/* Version for PentiumII/PPro */
374
375#define ROUND1(x) \
376 SRC(movl x(%esi), %ebx ) ; \
377 addl %ebx, %eax ; \
378 DST(movl %ebx, x(%edi) ) ;
379
380#define ROUND(x) \
381 SRC(movl x(%esi), %ebx ) ; \
382 adcl %ebx, %eax ; \
383 DST(movl %ebx, x(%edi) ) ;
384
385#define ARGBASE 12
386
387csum_partial_copy_generic_i386:
388 pushl %ebx
389 pushl %edi
390 pushl %esi
391 movl ARGBASE+4(%esp),%esi #src
392 movl ARGBASE+8(%esp),%edi #dst
393 movl ARGBASE+12(%esp),%ecx #len
394 movl ARGBASE+16(%esp),%eax #sum
395# movl %ecx, %edx
396 movl %ecx, %ebx
397 movl %esi, %edx
398 shrl $6, %ecx
399 andl $0x3c, %ebx
400 negl %ebx
401 subl %ebx, %esi
402 subl %ebx, %edi
403 lea -1(%esi),%edx
404 andl $-32,%edx
405 lea 3f(%ebx,%ebx), %ebx
406 testl %esi, %esi
407 jmp *%ebx
4081: addl $64,%esi
409 addl $64,%edi
410 SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
411 ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
412 ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
413 ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
414 ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
4153: adcl $0,%eax
416 addl $64, %edx
417 dec %ecx
418 jge 1b
4194: movl ARGBASE+12(%esp),%edx #len
420 andl $3, %edx
421 jz 7f
422 cmpl $2, %edx
423 jb 5f
424SRC( movw (%esi), %dx )
425 leal 2(%esi), %esi
426DST( movw %dx, (%edi) )
427 leal 2(%edi), %edi
428 je 6f
429 shll $16,%edx
4305:
431SRC( movb (%esi), %dl )
432DST( movb %dl, (%edi) )
4336: addl %edx, %eax
434 adcl $0, %eax
4357:
436.section .fixup, "ax"
4376001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
438 movl $-EFAULT, (%ebx)
439 # zero the complete destination (computing the rest is too much work)
440 movl ARGBASE+8(%esp),%edi # dst
441 movl ARGBASE+12(%esp),%ecx # len
442 xorl %eax,%eax
443 rep; stosb
444 jmp 7b
4456002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
446 movl $-EFAULT, (%ebx)
447 jmp 7b
448.previous
449
450 popl %esi
451 popl %edi
452 popl %ebx
453 ret
454
455#undef ROUND
456#undef ROUND1
457
458#endif
diff --git a/arch/um/sys-x86/delay_32.c b/arch/um/sys-x86/delay_32.c
deleted file mode 100644
index f3fe1a688f7e..000000000000
--- a/arch/um/sys-x86/delay_32.c
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 * Mostly copied from arch/x86/lib/delay.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <asm/param.h>
14
15void __delay(unsigned long loops)
16{
17 asm volatile(
18 "test %0,%0\n"
19 "jz 3f\n"
20 "jmp 1f\n"
21
22 ".align 16\n"
23 "1: jmp 2f\n"
24
25 ".align 16\n"
26 "2: dec %0\n"
27 " jnz 2b\n"
28 "3: dec %0\n"
29
30 : /* we don't need output */
31 : "a" (loops)
32 );
33}
34EXPORT_SYMBOL(__delay);
35
36inline void __const_udelay(unsigned long xloops)
37{
38 int d0;
39
40 xloops *= 4;
41 asm("mull %%edx"
42 : "=d" (xloops), "=&a" (d0)
43 : "1" (xloops), "0"
44 (loops_per_jiffy * (HZ/4)));
45
46 __delay(++xloops);
47}
48EXPORT_SYMBOL(__const_udelay);
49
50void __udelay(unsigned long usecs)
51{
52 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
53}
54EXPORT_SYMBOL(__udelay);
55
56void __ndelay(unsigned long nsecs)
57{
58 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
59}
60EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-x86/delay_64.c b/arch/um/sys-x86/delay_64.c
deleted file mode 100644
index f3fe1a688f7e..000000000000
--- a/arch/um/sys-x86/delay_64.c
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 * Mostly copied from arch/x86/lib/delay.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <asm/param.h>
14
15void __delay(unsigned long loops)
16{
17 asm volatile(
18 "test %0,%0\n"
19 "jz 3f\n"
20 "jmp 1f\n"
21
22 ".align 16\n"
23 "1: jmp 2f\n"
24
25 ".align 16\n"
26 "2: dec %0\n"
27 " jnz 2b\n"
28 "3: dec %0\n"
29
30 : /* we don't need output */
31 : "a" (loops)
32 );
33}
34EXPORT_SYMBOL(__delay);
35
36inline void __const_udelay(unsigned long xloops)
37{
38 int d0;
39
40 xloops *= 4;
41 asm("mull %%edx"
42 : "=d" (xloops), "=&a" (d0)
43 : "1" (xloops), "0"
44 (loops_per_jiffy * (HZ/4)));
45
46 __delay(++xloops);
47}
48EXPORT_SYMBOL(__const_udelay);
49
50void __udelay(unsigned long usecs)
51{
52 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
53}
54EXPORT_SYMBOL(__udelay);
55
56void __ndelay(unsigned long nsecs)
57{
58 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
59}
60EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-x86/elfcore.c b/arch/um/sys-x86/elfcore.c
deleted file mode 100644
index 6bb49b687c97..000000000000
--- a/arch/um/sys-x86/elfcore.c
+++ /dev/null
@@ -1,83 +0,0 @@
1#include <linux/elf.h>
2#include <linux/coredump.h>
3#include <linux/fs.h>
4#include <linux/mm.h>
5
6#include <asm/elf.h>
7
8
9Elf32_Half elf_core_extra_phdrs(void)
10{
11 return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
12}
13
14int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
15 unsigned long limit)
16{
17 if ( vsyscall_ehdr ) {
18 const struct elfhdr *const ehdrp =
19 (struct elfhdr *) vsyscall_ehdr;
20 const struct elf_phdr *const phdrp =
21 (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
22 int i;
23 Elf32_Off ofs = 0;
24
25 for (i = 0; i < ehdrp->e_phnum; ++i) {
26 struct elf_phdr phdr = phdrp[i];
27
28 if (phdr.p_type == PT_LOAD) {
29 ofs = phdr.p_offset = offset;
30 offset += phdr.p_filesz;
31 } else {
32 phdr.p_offset += ofs;
33 }
34 phdr.p_paddr = 0; /* match other core phdrs */
35 *size += sizeof(phdr);
36 if (*size > limit
37 || !dump_write(file, &phdr, sizeof(phdr)))
38 return 0;
39 }
40 }
41 return 1;
42}
43
44int elf_core_write_extra_data(struct file *file, size_t *size,
45 unsigned long limit)
46{
47 if ( vsyscall_ehdr ) {
48 const struct elfhdr *const ehdrp =
49 (struct elfhdr *) vsyscall_ehdr;
50 const struct elf_phdr *const phdrp =
51 (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
52 int i;
53
54 for (i = 0; i < ehdrp->e_phnum; ++i) {
55 if (phdrp[i].p_type == PT_LOAD) {
56 void *addr = (void *) phdrp[i].p_vaddr;
57 size_t filesz = phdrp[i].p_filesz;
58
59 *size += filesz;
60 if (*size > limit
61 || !dump_write(file, addr, filesz))
62 return 0;
63 }
64 }
65 }
66 return 1;
67}
68
69size_t elf_core_extra_data_size(void)
70{
71 if ( vsyscall_ehdr ) {
72 const struct elfhdr *const ehdrp =
73 (struct elfhdr *)vsyscall_ehdr;
74 const struct elf_phdr *const phdrp =
75 (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
76 int i;
77
78 for (i = 0; i < ehdrp->e_phnum; ++i)
79 if (phdrp[i].p_type == PT_LOAD)
80 return (size_t) phdrp[i].p_filesz;
81 }
82 return 0;
83}
diff --git a/arch/um/sys-x86/fault.c b/arch/um/sys-x86/fault.c
deleted file mode 100644
index d670f68532f4..000000000000
--- a/arch/um/sys-x86/fault.c
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "sysdep/ptrace.h"
7
8/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
9struct exception_table_entry
10{
11 unsigned long insn;
12 unsigned long fixup;
13};
14
15const struct exception_table_entry *search_exception_tables(unsigned long add);
16
17/* Compare this to arch/i386/mm/extable.c:fixup_exception() */
18int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
19{
20 const struct exception_table_entry *fixup;
21
22 fixup = search_exception_tables(address);
23 if (fixup != 0) {
24 UPT_IP(regs) = fixup->fixup;
25 return 1;
26 }
27 return 0;
28}
diff --git a/arch/um/sys-x86/ksyms.c b/arch/um/sys-x86/ksyms.c
deleted file mode 100644
index 2e8f43ec6214..000000000000
--- a/arch/um/sys-x86/ksyms.c
+++ /dev/null
@@ -1,13 +0,0 @@
1#include <linux/module.h>
2#include <asm/string.h>
3#include <asm/checksum.h>
4
5#ifndef CONFIG_X86_32
6/*XXX: we need them because they would be exported by x86_64 */
7#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
8EXPORT_SYMBOL(memcpy);
9#else
10EXPORT_SYMBOL(__memcpy);
11#endif
12#endif
13EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-x86/ldt.c b/arch/um/sys-x86/ldt.c
deleted file mode 100644
index 3f2bf208d884..000000000000
--- a/arch/um/sys-x86/ldt.c
+++ /dev/null
@@ -1,502 +0,0 @@
1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <asm/unistd.h>
10#include "os.h"
11#include "proc_mm.h"
12#include "skas.h"
13#include "skas_ptrace.h"
14#include "sysdep/tls.h"
15
16extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
17
18static long write_ldt_entry(struct mm_id *mm_idp, int func,
19 struct user_desc *desc, void **addr, int done)
20{
21 long res;
22
23 if (proc_mm) {
24 /*
25 * This is a special handling for the case, that the mm to
26 * modify isn't current->active_mm.
27 * If this is called directly by modify_ldt,
28 * (current->active_mm->context.skas.u == mm_idp)
29 * will be true. So no call to __switch_mm(mm_idp) is done.
30 * If this is called in case of init_new_ldt or PTRACE_LDT,
31 * mm_idp won't belong to current->active_mm, but child->mm.
32 * So we need to switch child's mm into our userspace, then
33 * later switch back.
34 *
35 * Note: I'm unsure: should interrupts be disabled here?
36 */
37 if (!current->active_mm || current->active_mm == &init_mm ||
38 mm_idp != &current->active_mm->context.id)
39 __switch_mm(mm_idp);
40 }
41
42 if (ptrace_ldt) {
43 struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
44 .func = func,
45 .ptr = desc,
46 .bytecount = sizeof(*desc)};
47 u32 cpu;
48 int pid;
49
50 if (!proc_mm)
51 pid = mm_idp->u.pid;
52 else {
53 cpu = get_cpu();
54 pid = userspace_pid[cpu];
55 }
56
57 res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
58
59 if (proc_mm)
60 put_cpu();
61 }
62 else {
63 void *stub_addr;
64 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
65 (sizeof(*desc) + sizeof(long) - 1) &
66 ~(sizeof(long) - 1),
67 addr, &stub_addr);
68 if (!res) {
69 unsigned long args[] = { func,
70 (unsigned long)stub_addr,
71 sizeof(*desc),
72 0, 0, 0 };
73 res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
74 0, addr, done);
75 }
76 }
77
78 if (proc_mm) {
79 /*
80 * This is the second part of special handling, that makes
81 * PTRACE_LDT possible to implement.
82 */
83 if (current->active_mm && current->active_mm != &init_mm &&
84 mm_idp != &current->active_mm->context.id)
85 __switch_mm(&current->active_mm->context.id);
86 }
87
88 return res;
89}
90
91static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
92{
93 int res, n;
94 struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
95 .func = 0,
96 .bytecount = bytecount,
97 .ptr = kmalloc(bytecount, GFP_KERNEL)};
98 u32 cpu;
99
100 if (ptrace_ldt.ptr == NULL)
101 return -ENOMEM;
102
103 /*
104 * This is called from sys_modify_ldt only, so userspace_pid gives
105 * us the right number
106 */
107
108 cpu = get_cpu();
109 res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
110 put_cpu();
111 if (res < 0)
112 goto out;
113
114 n = copy_to_user(ptr, ptrace_ldt.ptr, res);
115 if (n != 0)
116 res = -EFAULT;
117
118 out:
119 kfree(ptrace_ldt.ptr);
120
121 return res;
122}
123
124/*
125 * In skas mode, we hold our own ldt data in UML.
126 * Thus, the code implementing sys_modify_ldt_skas
127 * is very similar to (and mostly stolen from) sys_modify_ldt
128 * for arch/i386/kernel/ldt.c
129 * The routines copied and modified in part are:
130 * - read_ldt
131 * - read_default_ldt
132 * - write_ldt
133 * - sys_modify_ldt_skas
134 */
135
136static int read_ldt(void __user * ptr, unsigned long bytecount)
137{
138 int i, err = 0;
139 unsigned long size;
140 uml_ldt_t * ldt = &current->mm->context.ldt;
141
142 if (!ldt->entry_count)
143 goto out;
144 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
145 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
146 err = bytecount;
147
148 if (ptrace_ldt)
149 return read_ldt_from_host(ptr, bytecount);
150
151 mutex_lock(&ldt->lock);
152 if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
153 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
154 if (size > bytecount)
155 size = bytecount;
156 if (copy_to_user(ptr, ldt->u.entries, size))
157 err = -EFAULT;
158 bytecount -= size;
159 ptr += size;
160 }
161 else {
162 for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
163 i++) {
164 size = PAGE_SIZE;
165 if (size > bytecount)
166 size = bytecount;
167 if (copy_to_user(ptr, ldt->u.pages[i], size)) {
168 err = -EFAULT;
169 break;
170 }
171 bytecount -= size;
172 ptr += size;
173 }
174 }
175 mutex_unlock(&ldt->lock);
176
177 if (bytecount == 0 || err == -EFAULT)
178 goto out;
179
180 if (clear_user(ptr, bytecount))
181 err = -EFAULT;
182
183out:
184 return err;
185}
186
187static int read_default_ldt(void __user * ptr, unsigned long bytecount)
188{
189 int err;
190
191 if (bytecount > 5*LDT_ENTRY_SIZE)
192 bytecount = 5*LDT_ENTRY_SIZE;
193
194 err = bytecount;
195 /*
196 * UML doesn't support lcall7 and lcall27.
197 * So, we don't really have a default ldt, but emulate
198 * an empty ldt of common host default ldt size.
199 */
200 if (clear_user(ptr, bytecount))
201 err = -EFAULT;
202
203 return err;
204}
205
206static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
207{
208 uml_ldt_t * ldt = &current->mm->context.ldt;
209 struct mm_id * mm_idp = &current->mm->context.id;
210 int i, err;
211 struct user_desc ldt_info;
212 struct ldt_entry entry0, *ldt_p;
213 void *addr = NULL;
214
215 err = -EINVAL;
216 if (bytecount != sizeof(ldt_info))
217 goto out;
218 err = -EFAULT;
219 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
220 goto out;
221
222 err = -EINVAL;
223 if (ldt_info.entry_number >= LDT_ENTRIES)
224 goto out;
225 if (ldt_info.contents == 3) {
226 if (func == 1)
227 goto out;
228 if (ldt_info.seg_not_present == 0)
229 goto out;
230 }
231
232 if (!ptrace_ldt)
233 mutex_lock(&ldt->lock);
234
235 err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
236 if (err)
237 goto out_unlock;
238 else if (ptrace_ldt) {
239 /* With PTRACE_LDT available, this is used as a flag only */
240 ldt->entry_count = 1;
241 goto out;
242 }
243
244 if (ldt_info.entry_number >= ldt->entry_count &&
245 ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
246 for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
247 i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
248 i++) {
249 if (i == 0)
250 memcpy(&entry0, ldt->u.entries,
251 sizeof(entry0));
252 ldt->u.pages[i] = (struct ldt_entry *)
253 __get_free_page(GFP_KERNEL|__GFP_ZERO);
254 if (!ldt->u.pages[i]) {
255 err = -ENOMEM;
256 /* Undo the change in host */
257 memset(&ldt_info, 0, sizeof(ldt_info));
258 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
259 goto out_unlock;
260 }
261 if (i == 0) {
262 memcpy(ldt->u.pages[0], &entry0,
263 sizeof(entry0));
264 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
265 sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
266 }
267 ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
268 }
269 }
270 if (ldt->entry_count <= ldt_info.entry_number)
271 ldt->entry_count = ldt_info.entry_number + 1;
272
273 if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
274 ldt_p = ldt->u.entries + ldt_info.entry_number;
275 else
276 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
277 ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
278
279 if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
280 (func == 1 || LDT_empty(&ldt_info))) {
281 ldt_p->a = 0;
282 ldt_p->b = 0;
283 }
284 else{
285 if (func == 1)
286 ldt_info.useable = 0;
287 ldt_p->a = LDT_entry_a(&ldt_info);
288 ldt_p->b = LDT_entry_b(&ldt_info);
289 }
290 err = 0;
291
292out_unlock:
293 mutex_unlock(&ldt->lock);
294out:
295 return err;
296}
297
298static long do_modify_ldt_skas(int func, void __user *ptr,
299 unsigned long bytecount)
300{
301 int ret = -ENOSYS;
302
303 switch (func) {
304 case 0:
305 ret = read_ldt(ptr, bytecount);
306 break;
307 case 1:
308 case 0x11:
309 ret = write_ldt(ptr, bytecount, func);
310 break;
311 case 2:
312 ret = read_default_ldt(ptr, bytecount);
313 break;
314 }
315 return ret;
316}
317
318static DEFINE_SPINLOCK(host_ldt_lock);
319static short dummy_list[9] = {0, -1};
320static short * host_ldt_entries = NULL;
321
322static void ldt_get_host_info(void)
323{
324 long ret;
325 struct ldt_entry * ldt;
326 short *tmp;
327 int i, size, k, order;
328
329 spin_lock(&host_ldt_lock);
330
331 if (host_ldt_entries != NULL) {
332 spin_unlock(&host_ldt_lock);
333 return;
334 }
335 host_ldt_entries = dummy_list+1;
336
337 spin_unlock(&host_ldt_lock);
338
339 for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
340 ;
341
342 ldt = (struct ldt_entry *)
343 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
344 if (ldt == NULL) {
345 printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
346 "for host ldt\n");
347 return;
348 }
349
350 ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
351 if (ret < 0) {
352 printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
353 goto out_free;
354 }
355 if (ret == 0) {
356 /* default_ldt is active, simply write an empty entry 0 */
357 host_ldt_entries = dummy_list;
358 goto out_free;
359 }
360
361 for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
362 if (ldt[i].a != 0 || ldt[i].b != 0)
363 size++;
364 }
365
366 if (size < ARRAY_SIZE(dummy_list))
367 host_ldt_entries = dummy_list;
368 else {
369 size = (size + 1) * sizeof(dummy_list[0]);
370 tmp = kmalloc(size, GFP_KERNEL);
371 if (tmp == NULL) {
372 printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
373 "host ldt list\n");
374 goto out_free;
375 }
376 host_ldt_entries = tmp;
377 }
378
379 for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
380 if (ldt[i].a != 0 || ldt[i].b != 0)
381 host_ldt_entries[k++] = i;
382 }
383 host_ldt_entries[k] = -1;
384
385out_free:
386 free_pages((unsigned long)ldt, order);
387}
388
389long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
390{
391 struct user_desc desc;
392 short * num_p;
393 int i;
394 long page, err=0;
395 void *addr = NULL;
396 struct proc_mm_op copy;
397
398
399 if (!ptrace_ldt)
400 mutex_init(&new_mm->ldt.lock);
401
402 if (!from_mm) {
403 memset(&desc, 0, sizeof(desc));
404 /*
405 * We have to initialize a clean ldt.
406 */
407 if (proc_mm) {
408 /*
409 * If the new mm was created using proc_mm, host's
410 * default-ldt currently is assigned, which normally
411 * contains the call-gates for lcall7 and lcall27.
412 * To remove these gates, we simply write an empty
413 * entry as number 0 to the host.
414 */
415 err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
416 }
417 else{
418 /*
419 * Now we try to retrieve info about the ldt, we
420 * inherited from the host. All ldt-entries found
421 * will be reset in the following loop
422 */
423 ldt_get_host_info();
424 for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
425 desc.entry_number = *num_p;
426 err = write_ldt_entry(&new_mm->id, 1, &desc,
427 &addr, *(num_p + 1) == -1);
428 if (err)
429 break;
430 }
431 }
432 new_mm->ldt.entry_count = 0;
433
434 goto out;
435 }
436
437 if (proc_mm) {
438 /*
439 * We have a valid from_mm, so we now have to copy the LDT of
440 * from_mm to new_mm, because using proc_mm an new mm with
441 * an empty/default LDT was created in new_mm()
442 */
443 copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
444 .u =
445 { .copy_segments =
446 from_mm->id.u.mm_fd } } );
447 i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
448 if (i != sizeof(copy))
449 printk(KERN_ERR "new_mm : /proc/mm copy_segments "
450 "failed, err = %d\n", -i);
451 }
452
453 if (!ptrace_ldt) {
454 /*
455 * Our local LDT is used to supply the data for
456 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
457 * i.e., we have to use the stub for modify_ldt, which
458 * can't handle the big read buffer of up to 64kB.
459 */
460 mutex_lock(&from_mm->ldt.lock);
461 if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES)
462 memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
463 sizeof(new_mm->ldt.u.entries));
464 else {
465 i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
466 while (i-->0) {
467 page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
468 if (!page) {
469 err = -ENOMEM;
470 break;
471 }
472 new_mm->ldt.u.pages[i] =
473 (struct ldt_entry *) page;
474 memcpy(new_mm->ldt.u.pages[i],
475 from_mm->ldt.u.pages[i], PAGE_SIZE);
476 }
477 }
478 new_mm->ldt.entry_count = from_mm->ldt.entry_count;
479 mutex_unlock(&from_mm->ldt.lock);
480 }
481
482 out:
483 return err;
484}
485
486
487void free_ldt(struct mm_context *mm)
488{
489 int i;
490
491 if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) {
492 i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
493 while (i-- > 0)
494 free_page((long) mm->ldt.u.pages[i]);
495 }
496 mm->ldt.entry_count = 0;
497}
498
499int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
500{
501 return do_modify_ldt_skas(func, ptr, bytecount);
502}
diff --git a/arch/um/sys-x86/mem_32.c b/arch/um/sys-x86/mem_32.c
deleted file mode 100644
index 639900a6fde9..000000000000
--- a/arch/um/sys-x86/mem_32.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/mm.h>
10#include <asm/page.h>
11#include <asm/mman.h>
12
13static struct vm_area_struct gate_vma;
14
15static int __init gate_vma_init(void)
16{
17 if (!FIXADDR_USER_START)
18 return 0;
19
20 gate_vma.vm_mm = NULL;
21 gate_vma.vm_start = FIXADDR_USER_START;
22 gate_vma.vm_end = FIXADDR_USER_END;
23 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24 gate_vma.vm_page_prot = __P101;
25
26 /*
27 * Make sure the vDSO gets into every core dump.
28 * Dumping its contents makes post-mortem fully interpretable later
29 * without matching up the same kernel and hardware config to see
30 * what PC values meant.
31 */
32 gate_vma.vm_flags |= VM_ALWAYSDUMP;
33
34 return 0;
35}
36__initcall(gate_vma_init);
37
38struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
39{
40 return FIXADDR_USER_START ? &gate_vma : NULL;
41}
42
43int in_gate_area_no_mm(unsigned long addr)
44{
45 if (!FIXADDR_USER_START)
46 return 0;
47
48 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
49 return 1;
50
51 return 0;
52}
53
54int in_gate_area(struct mm_struct *mm, unsigned long addr)
55{
56 struct vm_area_struct *vma = get_gate_vma(mm);
57
58 if (!vma)
59 return 0;
60
61 return (addr >= vma->vm_start) && (addr < vma->vm_end);
62}
diff --git a/arch/um/sys-x86/mem_64.c b/arch/um/sys-x86/mem_64.c
deleted file mode 100644
index 546518727a73..000000000000
--- a/arch/um/sys-x86/mem_64.c
+++ /dev/null
@@ -1,26 +0,0 @@
1#include "linux/mm.h"
2#include "asm/page.h"
3#include "asm/mman.h"
4
5const char *arch_vma_name(struct vm_area_struct *vma)
6{
7 if (vma->vm_mm && vma->vm_start == um_vdso_addr)
8 return "[vdso]";
9
10 return NULL;
11}
12
13struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
14{
15 return NULL;
16}
17
18int in_gate_area(struct mm_struct *mm, unsigned long addr)
19{
20 return 0;
21}
22
23int in_gate_area_no_mm(unsigned long addr)
24{
25 return 0;
26}
diff --git a/arch/um/sys-x86/ptrace_32.c b/arch/um/sys-x86/ptrace_32.c
deleted file mode 100644
index a174fde2531c..000000000000
--- a/arch/um/sys-x86/ptrace_32.c
+++ /dev/null
@@ -1,273 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
7#include "linux/sched.h"
8#include "asm/uaccess.h"
9#include "skas.h"
10
11extern int arch_switch_tls(struct task_struct *to);
12
13void arch_switch_to(struct task_struct *to)
14{
15 int err = arch_switch_tls(to);
16 if (!err)
17 return;
18
19 if (err != -EINVAL)
20 printk(KERN_WARNING "arch_switch_tls failed, errno %d, "
21 "not EINVAL\n", -err);
22 else
23 printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n");
24}
25
26int is_syscall(unsigned long addr)
27{
28 unsigned short instr;
29 int n;
30
31 n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
32 if (n) {
33 /* access_process_vm() grants access to vsyscall and stub,
34 * while copy_from_user doesn't. Maybe access_process_vm is
35 * slow, but that doesn't matter, since it will be called only
36 * in case of singlestepping, if copy_from_user failed.
37 */
38 n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
39 if (n != sizeof(instr)) {
40 printk(KERN_ERR "is_syscall : failed to read "
41 "instruction from 0x%lx\n", addr);
42 return 1;
43 }
44 }
45 /* int 0x80 or sysenter */
46 return (instr == 0x80cd) || (instr == 0x340f);
47}
48
49/* determines which flags the user has access to. */
50/* 1 = access 0 = no access */
51#define FLAG_MASK 0x00044dd5
52
53static const int reg_offsets[] = {
54 [EBX] = HOST_EBX,
55 [ECX] = HOST_ECX,
56 [EDX] = HOST_EDX,
57 [ESI] = HOST_ESI,
58 [EDI] = HOST_EDI,
59 [EBP] = HOST_EBP,
60 [EAX] = HOST_EAX,
61 [DS] = HOST_DS,
62 [ES] = HOST_ES,
63 [FS] = HOST_FS,
64 [GS] = HOST_GS,
65 [EIP] = HOST_IP,
66 [CS] = HOST_CS,
67 [EFL] = HOST_EFLAGS,
68 [UESP] = HOST_SP,
69 [SS] = HOST_SS,
70};
71
72int putreg(struct task_struct *child, int regno, unsigned long value)
73{
74 regno >>= 2;
75 switch (regno) {
76 case EBX:
77 case ECX:
78 case EDX:
79 case ESI:
80 case EDI:
81 case EBP:
82 case EAX:
83 case EIP:
84 case UESP:
85 break;
86 case FS:
87 if (value && (value & 3) != 3)
88 return -EIO;
89 break;
90 case GS:
91 if (value && (value & 3) != 3)
92 return -EIO;
93 break;
94 case DS:
95 case ES:
96 if (value && (value & 3) != 3)
97 return -EIO;
98 value &= 0xffff;
99 break;
100 case SS:
101 case CS:
102 if ((value & 3) != 3)
103 return -EIO;
104 value &= 0xffff;
105 break;
106 case EFL:
107 value &= FLAG_MASK;
108 child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
109 return 0;
110 case ORIG_EAX:
111 child->thread.regs.regs.syscall = value;
112 return 0;
113 default :
114 panic("Bad register in putreg() : %d\n", regno);
115 }
116 child->thread.regs.regs.gp[reg_offsets[regno]] = value;
117 return 0;
118}
119
120int poke_user(struct task_struct *child, long addr, long data)
121{
122 if ((addr & 3) || addr < 0)
123 return -EIO;
124
125 if (addr < MAX_REG_OFFSET)
126 return putreg(child, addr, data);
127 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
128 (addr <= offsetof(struct user, u_debugreg[7]))) {
129 addr -= offsetof(struct user, u_debugreg[0]);
130 addr = addr >> 2;
131 if ((addr == 4) || (addr == 5))
132 return -EIO;
133 child->thread.arch.debugregs[addr] = data;
134 return 0;
135 }
136 return -EIO;
137}
138
139unsigned long getreg(struct task_struct *child, int regno)
140{
141 unsigned long mask = ~0UL;
142
143 regno >>= 2;
144 switch (regno) {
145 case ORIG_EAX:
146 return child->thread.regs.regs.syscall;
147 case FS:
148 case GS:
149 case DS:
150 case ES:
151 case SS:
152 case CS:
153 mask = 0xffff;
154 break;
155 case EIP:
156 case UESP:
157 case EAX:
158 case EBX:
159 case ECX:
160 case EDX:
161 case ESI:
162 case EDI:
163 case EBP:
164 case EFL:
165 break;
166 default:
167 panic("Bad register in getreg() : %d\n", regno);
168 }
169 return mask & child->thread.regs.regs.gp[reg_offsets[regno]];
170}
171
172/* read the word at location addr in the USER area. */
173int peek_user(struct task_struct *child, long addr, long data)
174{
175 unsigned long tmp;
176
177 if ((addr & 3) || addr < 0)
178 return -EIO;
179
180 tmp = 0; /* Default return condition */
181 if (addr < MAX_REG_OFFSET) {
182 tmp = getreg(child, addr);
183 }
184 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
185 (addr <= offsetof(struct user, u_debugreg[7]))) {
186 addr -= offsetof(struct user, u_debugreg[0]);
187 addr = addr >> 2;
188 tmp = child->thread.arch.debugregs[addr];
189 }
190 return put_user(tmp, (unsigned long __user *) data);
191}
192
193static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
194{
195 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
196 struct user_i387_struct fpregs;
197
198 err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
199 if (err)
200 return err;
201
202 n = copy_to_user(buf, &fpregs, sizeof(fpregs));
203 if(n > 0)
204 return -EFAULT;
205
206 return n;
207}
208
209static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
210{
211 int n, cpu = ((struct thread_info *) child->stack)->cpu;
212 struct user_i387_struct fpregs;
213
214 n = copy_from_user(&fpregs, buf, sizeof(fpregs));
215 if (n > 0)
216 return -EFAULT;
217
218 return restore_fp_registers(userspace_pid[cpu],
219 (unsigned long *) &fpregs);
220}
221
222static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
223{
224 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
225 struct user_fxsr_struct fpregs;
226
227 err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
228 if (err)
229 return err;
230
231 n = copy_to_user(buf, &fpregs, sizeof(fpregs));
232 if(n > 0)
233 return -EFAULT;
234
235 return n;
236}
237
238static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
239{
240 int n, cpu = ((struct thread_info *) child->stack)->cpu;
241 struct user_fxsr_struct fpregs;
242
243 n = copy_from_user(&fpregs, buf, sizeof(fpregs));
244 if (n > 0)
245 return -EFAULT;
246
247 return restore_fpx_registers(userspace_pid[cpu],
248 (unsigned long *) &fpregs);
249}
250
251long subarch_ptrace(struct task_struct *child, long request,
252 unsigned long addr, unsigned long data)
253{
254 int ret = -EIO;
255 void __user *datap = (void __user *) data;
256 switch (request) {
257 case PTRACE_GETFPREGS: /* Get the child FPU state. */
258 ret = get_fpregs(datap, child);
259 break;
260 case PTRACE_SETFPREGS: /* Set the child FPU state. */
261 ret = set_fpregs(datap, child);
262 break;
263 case PTRACE_GETFPXREGS: /* Get the child FPU state. */
264 ret = get_fpxregs(datap, child);
265 break;
266 case PTRACE_SETFPXREGS: /* Set the child FPU state. */
267 ret = set_fpxregs(datap, child);
268 break;
269 default:
270 ret = -EIO;
271 }
272 return ret;
273}
diff --git a/arch/um/sys-x86/ptrace_64.c b/arch/um/sys-x86/ptrace_64.c
deleted file mode 100644
index 44e68e0c0d10..000000000000
--- a/arch/um/sys-x86/ptrace_64.c
+++ /dev/null
@@ -1,271 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 *
5 * Licensed under the GPL
6 */
7
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/errno.h>
11#define __FRAME_OFFSETS
12#include <asm/ptrace.h>
13#include <asm/uaccess.h>
14
15/*
16 * determines which flags the user has access to.
17 * 1 = access 0 = no access
18 */
19#define FLAG_MASK 0x44dd5UL
20
21static const int reg_offsets[] =
22{
23 [R8 >> 3] = HOST_R8,
24 [R9 >> 3] = HOST_R9,
25 [R10 >> 3] = HOST_R10,
26 [R11 >> 3] = HOST_R11,
27 [R12 >> 3] = HOST_R12,
28 [R13 >> 3] = HOST_R13,
29 [R14 >> 3] = HOST_R14,
30 [R15 >> 3] = HOST_R15,
31 [RIP >> 3] = HOST_IP,
32 [RSP >> 3] = HOST_SP,
33 [RAX >> 3] = HOST_RAX,
34 [RBX >> 3] = HOST_RBX,
35 [RCX >> 3] = HOST_RCX,
36 [RDX >> 3] = HOST_RDX,
37 [RSI >> 3] = HOST_RSI,
38 [RDI >> 3] = HOST_RDI,
39 [RBP >> 3] = HOST_RBP,
40 [CS >> 3] = HOST_CS,
41 [SS >> 3] = HOST_SS,
42 [FS_BASE >> 3] = HOST_FS_BASE,
43 [GS_BASE >> 3] = HOST_GS_BASE,
44 [DS >> 3] = HOST_DS,
45 [ES >> 3] = HOST_ES,
46 [FS >> 3] = HOST_FS,
47 [GS >> 3] = HOST_GS,
48 [EFLAGS >> 3] = HOST_EFLAGS,
49 [ORIG_RAX >> 3] = HOST_ORIG_RAX,
50};
51
52int putreg(struct task_struct *child, int regno, unsigned long value)
53{
54#ifdef TIF_IA32
55 /*
56 * Some code in the 64bit emulation may not be 64bit clean.
57 * Don't take any chances.
58 */
59 if (test_tsk_thread_flag(child, TIF_IA32))
60 value &= 0xffffffff;
61#endif
62 switch (regno) {
63 case R8:
64 case R9:
65 case R10:
66 case R11:
67 case R12:
68 case R13:
69 case R14:
70 case R15:
71 case RIP:
72 case RSP:
73 case RAX:
74 case RBX:
75 case RCX:
76 case RDX:
77 case RSI:
78 case RDI:
79 case RBP:
80 case ORIG_RAX:
81 break;
82
83 case FS:
84 case GS:
85 case DS:
86 case ES:
87 case SS:
88 case CS:
89 if (value && (value & 3) != 3)
90 return -EIO;
91 value &= 0xffff;
92 break;
93
94 case FS_BASE:
95 case GS_BASE:
96 if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
97 return -EIO;
98 break;
99
100 case EFLAGS:
101 value &= FLAG_MASK;
102 child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
103 return 0;
104
105 default:
106 panic("Bad register in putreg(): %d\n", regno);
107 }
108
109 child->thread.regs.regs.gp[reg_offsets[regno >> 3]] = value;
110 return 0;
111}
112
113int poke_user(struct task_struct *child, long addr, long data)
114{
115 if ((addr & 3) || addr < 0)
116 return -EIO;
117
118 if (addr < MAX_REG_OFFSET)
119 return putreg(child, addr, data);
120 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
121 (addr <= offsetof(struct user, u_debugreg[7]))) {
122 addr -= offsetof(struct user, u_debugreg[0]);
123 addr = addr >> 2;
124 if ((addr == 4) || (addr == 5))
125 return -EIO;
126 child->thread.arch.debugregs[addr] = data;
127 return 0;
128 }
129 return -EIO;
130}
131
132unsigned long getreg(struct task_struct *child, int regno)
133{
134 unsigned long mask = ~0UL;
135#ifdef TIF_IA32
136 if (test_tsk_thread_flag(child, TIF_IA32))
137 mask = 0xffffffff;
138#endif
139 switch (regno) {
140 case R8:
141 case R9:
142 case R10:
143 case R11:
144 case R12:
145 case R13:
146 case R14:
147 case R15:
148 case RIP:
149 case RSP:
150 case RAX:
151 case RBX:
152 case RCX:
153 case RDX:
154 case RSI:
155 case RDI:
156 case RBP:
157 case ORIG_RAX:
158 case EFLAGS:
159 case FS_BASE:
160 case GS_BASE:
161 break;
162 case FS:
163 case GS:
164 case DS:
165 case ES:
166 case SS:
167 case CS:
168 mask = 0xffff;
169 break;
170 default:
171 panic("Bad register in getreg: %d\n", regno);
172 }
173 return mask & child->thread.regs.regs.gp[reg_offsets[regno >> 3]];
174}
175
176int peek_user(struct task_struct *child, long addr, long data)
177{
178 /* read the word at location addr in the USER area. */
179 unsigned long tmp;
180
181 if ((addr & 3) || addr < 0)
182 return -EIO;
183
184 tmp = 0; /* Default return condition */
185 if (addr < MAX_REG_OFFSET)
186 tmp = getreg(child, addr);
187 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
188 (addr <= offsetof(struct user, u_debugreg[7]))) {
189 addr -= offsetof(struct user, u_debugreg[0]);
190 addr = addr >> 2;
191 tmp = child->thread.arch.debugregs[addr];
192 }
193 return put_user(tmp, (unsigned long *) data);
194}
195
196/* XXX Mostly copied from sys-i386 */
197int is_syscall(unsigned long addr)
198{
199 unsigned short instr;
200 int n;
201
202 n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
203 if (n) {
204 /*
205 * access_process_vm() grants access to vsyscall and stub,
206 * while copy_from_user doesn't. Maybe access_process_vm is
207 * slow, but that doesn't matter, since it will be called only
208 * in case of singlestepping, if copy_from_user failed.
209 */
210 n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
211 if (n != sizeof(instr)) {
212 printk("is_syscall : failed to read instruction from "
213 "0x%lx\n", addr);
214 return 1;
215 }
216 }
217 /* sysenter */
218 return instr == 0x050f;
219}
220
221static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
222{
223 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
224 long fpregs[HOST_FP_SIZE];
225
226 BUG_ON(sizeof(*buf) != sizeof(fpregs));
227 err = save_fp_registers(userspace_pid[cpu], fpregs);
228 if (err)
229 return err;
230
231 n = copy_to_user(buf, fpregs, sizeof(fpregs));
232 if (n > 0)
233 return -EFAULT;
234
235 return n;
236}
237
238static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
239{
240 int n, cpu = ((struct thread_info *) child->stack)->cpu;
241 long fpregs[HOST_FP_SIZE];
242
243 BUG_ON(sizeof(*buf) != sizeof(fpregs));
244 n = copy_from_user(fpregs, buf, sizeof(fpregs));
245 if (n > 0)
246 return -EFAULT;
247
248 return restore_fp_registers(userspace_pid[cpu], fpregs);
249}
250
251long subarch_ptrace(struct task_struct *child, long request,
252 unsigned long addr, unsigned long data)
253{
254 int ret = -EIO;
255 void __user *datap = (void __user *) data;
256
257 switch (request) {
258 case PTRACE_GETFPREGS: /* Get the child FPU state. */
259 ret = get_fpregs(datap, child);
260 break;
261 case PTRACE_SETFPREGS: /* Set the child FPU state. */
262 ret = set_fpregs(datap, child);
263 break;
264 case PTRACE_ARCH_PRCTL:
265 /* XXX Calls ptrace on the host - needs some SMP thinking */
266 ret = arch_prctl(child, data, (void __user *) addr);
267 break;
268 }
269
270 return ret;
271}
diff --git a/arch/um/sys-x86/ptrace_user.c b/arch/um/sys-x86/ptrace_user.c
deleted file mode 100644
index 3960ca1dd35a..000000000000
--- a/arch/um/sys-x86/ptrace_user.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <errno.h>
7#include "ptrace_user.h"
8
9int ptrace_getregs(long pid, unsigned long *regs_out)
10{
11 if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
12 return -errno;
13 return 0;
14}
15
16int ptrace_setregs(long pid, unsigned long *regs)
17{
18 if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0)
19 return -errno;
20 return 0;
21}
diff --git a/arch/um/sys-x86/setjmp_32.S b/arch/um/sys-x86/setjmp_32.S
deleted file mode 100644
index b766792c9933..000000000000
--- a/arch/um/sys-x86/setjmp_32.S
+++ /dev/null
@@ -1,58 +0,0 @@
1#
2# arch/i386/setjmp.S
3#
4# setjmp/longjmp for the i386 architecture
5#
6
7#
8# The jmp_buf is assumed to contain the following, in order:
9# %ebx
10# %esp
11# %ebp
12# %esi
13# %edi
14# <return address>
15#
16
17 .text
18 .align 4
19 .globl setjmp
20 .type setjmp, @function
21setjmp:
22#ifdef _REGPARM
23 movl %eax,%edx
24#else
25 movl 4(%esp),%edx
26#endif
27 popl %ecx # Return address, and adjust the stack
28 xorl %eax,%eax # Return value
29 movl %ebx,(%edx)
30 movl %esp,4(%edx) # Post-return %esp!
31 pushl %ecx # Make the call/return stack happy
32 movl %ebp,8(%edx)
33 movl %esi,12(%edx)
34 movl %edi,16(%edx)
35 movl %ecx,20(%edx) # Return address
36 ret
37
38 .size setjmp,.-setjmp
39
40 .text
41 .align 4
42 .globl longjmp
43 .type longjmp, @function
44longjmp:
45#ifdef _REGPARM
46 xchgl %eax,%edx
47#else
48 movl 4(%esp),%edx # jmp_ptr address
49 movl 8(%esp),%eax # Return value
50#endif
51 movl (%edx),%ebx
52 movl 4(%edx),%esp
53 movl 8(%edx),%ebp
54 movl 12(%edx),%esi
55 movl 16(%edx),%edi
56 jmp *20(%edx)
57
58 .size longjmp,.-longjmp
diff --git a/arch/um/sys-x86/setjmp_64.S b/arch/um/sys-x86/setjmp_64.S
deleted file mode 100644
index 45f547b4043e..000000000000
--- a/arch/um/sys-x86/setjmp_64.S
+++ /dev/null
@@ -1,54 +0,0 @@
1#
2# arch/x86_64/setjmp.S
3#
4# setjmp/longjmp for the x86-64 architecture
5#
6
7#
8# The jmp_buf is assumed to contain the following, in order:
9# %rbx
10# %rsp (post-return)
11# %rbp
12# %r12
13# %r13
14# %r14
15# %r15
16# <return address>
17#
18
19 .text
20 .align 4
21 .globl setjmp
22 .type setjmp, @function
23setjmp:
24 pop %rsi # Return address, and adjust the stack
25 xorl %eax,%eax # Return value
26 movq %rbx,(%rdi)
27 movq %rsp,8(%rdi) # Post-return %rsp!
28 push %rsi # Make the call/return stack happy
29 movq %rbp,16(%rdi)
30 movq %r12,24(%rdi)
31 movq %r13,32(%rdi)
32 movq %r14,40(%rdi)
33 movq %r15,48(%rdi)
34 movq %rsi,56(%rdi) # Return address
35 ret
36
37 .size setjmp,.-setjmp
38
39 .text
40 .align 4
41 .globl longjmp
42 .type longjmp, @function
43longjmp:
44 movl %esi,%eax # Return value (int)
45 movq (%rdi),%rbx
46 movq 8(%rdi),%rsp
47 movq 16(%rdi),%rbp
48 movq 24(%rdi),%r12
49 movq 32(%rdi),%r13
50 movq 40(%rdi),%r14
51 movq 48(%rdi),%r15
52 jmp *56(%rdi)
53
54 .size longjmp,.-longjmp
diff --git a/arch/um/sys-x86/shared/sysdep/archsetjmp.h b/arch/um/sys-x86/shared/sysdep/archsetjmp.h
deleted file mode 100644
index ff7766d28226..000000000000
--- a/arch/um/sys-x86/shared/sysdep/archsetjmp.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef __i386__
2#include "archsetjmp_32.h"
3#else
4#include "archsetjmp_64.h"
5#endif
diff --git a/arch/um/sys-x86/shared/sysdep/archsetjmp_32.h b/arch/um/sys-x86/shared/sysdep/archsetjmp_32.h
deleted file mode 100644
index 0f312085ce1d..000000000000
--- a/arch/um/sys-x86/shared/sysdep/archsetjmp_32.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/um/include/sysdep-i386/archsetjmp.h
3 */
4
5#ifndef _KLIBC_ARCHSETJMP_H
6#define _KLIBC_ARCHSETJMP_H
7
8struct __jmp_buf {
9 unsigned int __ebx;
10 unsigned int __esp;
11 unsigned int __ebp;
12 unsigned int __esi;
13 unsigned int __edi;
14 unsigned int __eip;
15};
16
17typedef struct __jmp_buf jmp_buf[1];
18
19#define JB_IP __eip
20#define JB_SP __esp
21
22#endif /* _SETJMP_H */
diff --git a/arch/um/sys-x86/shared/sysdep/archsetjmp_64.h b/arch/um/sys-x86/shared/sysdep/archsetjmp_64.h
deleted file mode 100644
index 2af8f12ca161..000000000000
--- a/arch/um/sys-x86/shared/sysdep/archsetjmp_64.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * arch/um/include/sysdep-x86_64/archsetjmp.h
3 */
4
5#ifndef _KLIBC_ARCHSETJMP_H
6#define _KLIBC_ARCHSETJMP_H
7
8struct __jmp_buf {
9 unsigned long __rbx;
10 unsigned long __rsp;
11 unsigned long __rbp;
12 unsigned long __r12;
13 unsigned long __r13;
14 unsigned long __r14;
15 unsigned long __r15;
16 unsigned long __rip;
17};
18
19typedef struct __jmp_buf jmp_buf[1];
20
21#define JB_IP __rip
22#define JB_SP __rsp
23
24#endif /* _SETJMP_H */
diff --git a/arch/um/sys-x86/shared/sysdep/faultinfo.h b/arch/um/sys-x86/shared/sysdep/faultinfo.h
deleted file mode 100644
index 862ecb1c7781..000000000000
--- a/arch/um/sys-x86/shared/sysdep/faultinfo.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef __i386__
2#include "faultinfo_32.h"
3#else
4#include "faultinfo_64.h"
5#endif
diff --git a/arch/um/sys-x86/shared/sysdep/faultinfo_32.h b/arch/um/sys-x86/shared/sysdep/faultinfo_32.h
deleted file mode 100644
index a26086b8a800..000000000000
--- a/arch/um/sys-x86/shared/sysdep/faultinfo_32.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
4 * Licensed under the GPL
5 */
6
7#ifndef __FAULTINFO_I386_H
8#define __FAULTINFO_I386_H
9
10/* this structure contains the full arch-specific faultinfo
11 * from the traps.
12 * On i386, ptrace_faultinfo unfortunately doesn't provide
13 * all the info, since trap_no is missing.
14 * All common elements are defined at the same position in
15 * both structures, thus making it easy to copy the
16 * contents without knowledge about the structure elements.
17 */
18struct faultinfo {
19 int error_code; /* in ptrace_faultinfo misleadingly called is_write */
20 unsigned long cr2; /* in ptrace_faultinfo called addr */
21 int trap_no; /* missing in ptrace_faultinfo */
22};
23
24#define FAULT_WRITE(fi) ((fi).error_code & 2)
25#define FAULT_ADDRESS(fi) ((fi).cr2)
26
27/* This is Page Fault */
28#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
29
30/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */
31#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo)
32
33#define PTRACE_FULL_FAULTINFO 0
34
35#endif
diff --git a/arch/um/sys-x86/shared/sysdep/faultinfo_64.h b/arch/um/sys-x86/shared/sysdep/faultinfo_64.h
deleted file mode 100644
index f811cbe15d62..000000000000
--- a/arch/um/sys-x86/shared/sysdep/faultinfo_64.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
4 * Licensed under the GPL
5 */
6
7#ifndef __FAULTINFO_X86_64_H
8#define __FAULTINFO_X86_64_H
9
10/* this structure contains the full arch-specific faultinfo
11 * from the traps.
12 * On i386, ptrace_faultinfo unfortunately doesn't provide
13 * all the info, since trap_no is missing.
14 * All common elements are defined at the same position in
15 * both structures, thus making it easy to copy the
16 * contents without knowledge about the structure elements.
17 */
18struct faultinfo {
19 int error_code; /* in ptrace_faultinfo misleadingly called is_write */
20 unsigned long cr2; /* in ptrace_faultinfo called addr */
21 int trap_no; /* missing in ptrace_faultinfo */
22};
23
24#define FAULT_WRITE(fi) ((fi).error_code & 2)
25#define FAULT_ADDRESS(fi) ((fi).cr2)
26
27/* This is Page Fault */
28#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
29
30/* No broken SKAS API, which doesn't pass trap_no, here. */
31#define SEGV_MAYBE_FIXABLE(fi) 0
32
33#define PTRACE_FULL_FAULTINFO 1
34
35#endif
diff --git a/arch/um/sys-x86/shared/sysdep/host_ldt.h b/arch/um/sys-x86/shared/sysdep/host_ldt.h
deleted file mode 100644
index 94518b3e0da5..000000000000
--- a/arch/um/sys-x86/shared/sysdep/host_ldt.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef __i386__
2#include "host_ldt_32.h"
3#else
4#include "host_ldt_64.h"
5#endif
diff --git a/arch/um/sys-x86/shared/sysdep/host_ldt_32.h b/arch/um/sys-x86/shared/sysdep/host_ldt_32.h
deleted file mode 100644
index 0953cc4df652..000000000000
--- a/arch/um/sys-x86/shared/sysdep/host_ldt_32.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef __ASM_HOST_LDT_I386_H
2#define __ASM_HOST_LDT_I386_H
3
4#include <asm/ldt.h>
5
6/*
7 * macros stolen from include/asm-i386/desc.h
8 */
9#define LDT_entry_a(info) \
10 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
11
12#define LDT_entry_b(info) \
13 (((info)->base_addr & 0xff000000) | \
14 (((info)->base_addr & 0x00ff0000) >> 16) | \
15 ((info)->limit & 0xf0000) | \
16 (((info)->read_exec_only ^ 1) << 9) | \
17 ((info)->contents << 10) | \
18 (((info)->seg_not_present ^ 1) << 15) | \
19 ((info)->seg_32bit << 22) | \
20 ((info)->limit_in_pages << 23) | \
21 ((info)->useable << 20) | \
22 0x7000)
23
24#define LDT_empty(info) (\
25 (info)->base_addr == 0 && \
26 (info)->limit == 0 && \
27 (info)->contents == 0 && \
28 (info)->read_exec_only == 1 && \
29 (info)->seg_32bit == 0 && \
30 (info)->limit_in_pages == 0 && \
31 (info)->seg_not_present == 1 && \
32 (info)->useable == 0 )
33
34#endif
diff --git a/arch/um/sys-x86/shared/sysdep/host_ldt_64.h b/arch/um/sys-x86/shared/sysdep/host_ldt_64.h
deleted file mode 100644
index e8b1be1e154f..000000000000
--- a/arch/um/sys-x86/shared/sysdep/host_ldt_64.h
+++ /dev/null
@@ -1,38 +0,0 @@
1#ifndef __ASM_HOST_LDT_X86_64_H
2#define __ASM_HOST_LDT_X86_64_H
3
4#include <asm/ldt.h>
5
6/*
7 * macros stolen from include/asm-x86_64/desc.h
8 */
9#define LDT_entry_a(info) \
10 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
11
12/* Don't allow setting of the lm bit. It is useless anyways because
13 * 64bit system calls require __USER_CS. */
14#define LDT_entry_b(info) \
15 (((info)->base_addr & 0xff000000) | \
16 (((info)->base_addr & 0x00ff0000) >> 16) | \
17 ((info)->limit & 0xf0000) | \
18 (((info)->read_exec_only ^ 1) << 9) | \
19 ((info)->contents << 10) | \
20 (((info)->seg_not_present ^ 1) << 15) | \
21 ((info)->seg_32bit << 22) | \
22 ((info)->limit_in_pages << 23) | \
23 ((info)->useable << 20) | \
24 /* ((info)->lm << 21) | */ \
25 0x7000)
26
27#define LDT_empty(info) (\
28 (info)->base_addr == 0 && \
29 (info)->limit == 0 && \
30 (info)->contents == 0 && \
31 (info)->read_exec_only == 1 && \
32 (info)->seg_32bit == 0 && \
33 (info)->limit_in_pages == 0 && \
34 (info)->seg_not_present == 1 && \
35 (info)->useable == 0 && \
36 (info)->lm == 0)
37
38#endif
diff --git a/arch/um/sys-x86/shared/sysdep/kernel-offsets.h b/arch/um/sys-x86/shared/sysdep/kernel-offsets.h
deleted file mode 100644
index 5868526b5eef..000000000000
--- a/arch/um/sys-x86/shared/sysdep/kernel-offsets.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#include <linux/stddef.h>
2#include <linux/sched.h>
3#include <linux/elf.h>
4#include <linux/crypto.h>
5#include <asm/mman.h>
6
7#define DEFINE(sym, val) \
8 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
9
10#define STR(x) #x
11#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
12
13#define BLANK() asm volatile("\n->" : : )
14
15#define OFFSET(sym, str, mem) \
16 DEFINE(sym, offsetof(struct str, mem));
17
18void foo(void)
19{
20#include <common-offsets.h>
21}
diff --git a/arch/um/sys-x86/shared/sysdep/mcontext.h b/arch/um/sys-x86/shared/sysdep/mcontext.h
deleted file mode 100644
index b724c54da316..000000000000
--- a/arch/um/sys-x86/shared/sysdep/mcontext.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYS_SIGCONTEXT_X86_H
7#define __SYS_SIGCONTEXT_X86_H
8
9extern void get_regs_from_mc(struct uml_pt_regs *, mcontext_t *);
10
11#ifdef __i386__
12
13#define GET_FAULTINFO_FROM_MC(fi, mc) \
14 { \
15 (fi).cr2 = (mc)->cr2; \
16 (fi).error_code = (mc)->gregs[REG_ERR]; \
17 (fi).trap_no = (mc)->gregs[REG_TRAPNO]; \
18 }
19
20#else
21
22#define GET_FAULTINFO_FROM_MC(fi, mc) \
23 { \
24 (fi).cr2 = (mc)->gregs[REG_CR2]; \
25 (fi).error_code = (mc)->gregs[REG_ERR]; \
26 (fi).trap_no = (mc)->gregs[REG_TRAPNO]; \
27 }
28
29#endif
30
31#endif
diff --git a/arch/um/sys-x86/shared/sysdep/ptrace.h b/arch/um/sys-x86/shared/sysdep/ptrace.h
deleted file mode 100644
index 711b1621747f..000000000000
--- a/arch/um/sys-x86/shared/sysdep/ptrace.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef __i386__
2#include "ptrace_32.h"
3#else
4#include "ptrace_64.h"
5#endif
diff --git a/arch/um/sys-x86/shared/sysdep/ptrace_32.h b/arch/um/sys-x86/shared/sysdep/ptrace_32.h
deleted file mode 100644
index ce77fa1e2a15..000000000000
--- a/arch/um/sys-x86/shared/sysdep/ptrace_32.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_I386_PTRACE_H
7#define __SYSDEP_I386_PTRACE_H
8
9#include <generated/user_constants.h>
10#include "sysdep/faultinfo.h"
11
12#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
13#define MAX_REG_OFFSET (UM_FRAME_SIZE)
14
15static inline void update_debugregs(int seq) {}
16
17/* syscall emulation path in ptrace */
18
19#ifndef PTRACE_SYSEMU
20#define PTRACE_SYSEMU 31
21#endif
22
23void set_using_sysemu(int value);
24int get_using_sysemu(void);
25extern int sysemu_supported;
26
27#define REGS_IP(r) ((r)[HOST_IP])
28#define REGS_SP(r) ((r)[HOST_SP])
29#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
30#define REGS_EAX(r) ((r)[HOST_EAX])
31#define REGS_EBX(r) ((r)[HOST_EBX])
32#define REGS_ECX(r) ((r)[HOST_ECX])
33#define REGS_EDX(r) ((r)[HOST_EDX])
34#define REGS_ESI(r) ((r)[HOST_ESI])
35#define REGS_EDI(r) ((r)[HOST_EDI])
36#define REGS_EBP(r) ((r)[HOST_EBP])
37#define REGS_CS(r) ((r)[HOST_CS])
38#define REGS_SS(r) ((r)[HOST_SS])
39#define REGS_DS(r) ((r)[HOST_DS])
40#define REGS_ES(r) ((r)[HOST_ES])
41#define REGS_FS(r) ((r)[HOST_FS])
42#define REGS_GS(r) ((r)[HOST_GS])
43
44#define REGS_SET_SYSCALL_RETURN(r, res) REGS_EAX(r) = (res)
45
46#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
47#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
48
49#ifndef PTRACE_SYSEMU_SINGLESTEP
50#define PTRACE_SYSEMU_SINGLESTEP 32
51#endif
52
53struct uml_pt_regs {
54 unsigned long gp[MAX_REG_NR];
55 unsigned long fp[HOST_FPX_SIZE];
56 struct faultinfo faultinfo;
57 long syscall;
58 int is_user;
59};
60
61#define EMPTY_UML_PT_REGS { }
62
63#define UPT_IP(r) REGS_IP((r)->gp)
64#define UPT_SP(r) REGS_SP((r)->gp)
65#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
66#define UPT_EAX(r) REGS_EAX((r)->gp)
67#define UPT_EBX(r) REGS_EBX((r)->gp)
68#define UPT_ECX(r) REGS_ECX((r)->gp)
69#define UPT_EDX(r) REGS_EDX((r)->gp)
70#define UPT_ESI(r) REGS_ESI((r)->gp)
71#define UPT_EDI(r) REGS_EDI((r)->gp)
72#define UPT_EBP(r) REGS_EBP((r)->gp)
73#define UPT_ORIG_EAX(r) ((r)->syscall)
74#define UPT_CS(r) REGS_CS((r)->gp)
75#define UPT_SS(r) REGS_SS((r)->gp)
76#define UPT_DS(r) REGS_DS((r)->gp)
77#define UPT_ES(r) REGS_ES((r)->gp)
78#define UPT_FS(r) REGS_FS((r)->gp)
79#define UPT_GS(r) REGS_GS((r)->gp)
80
81#define UPT_SYSCALL_ARG1(r) UPT_EBX(r)
82#define UPT_SYSCALL_ARG2(r) UPT_ECX(r)
83#define UPT_SYSCALL_ARG3(r) UPT_EDX(r)
84#define UPT_SYSCALL_ARG4(r) UPT_ESI(r)
85#define UPT_SYSCALL_ARG5(r) UPT_EDI(r)
86#define UPT_SYSCALL_ARG6(r) UPT_EBP(r)
87
88extern int user_context(unsigned long sp);
89
90#define UPT_IS_USER(r) ((r)->is_user)
91
92struct syscall_args {
93 unsigned long args[6];
94};
95
96#define SYSCALL_ARGS(r) ((struct syscall_args) \
97 { .args = { UPT_SYSCALL_ARG1(r), \
98 UPT_SYSCALL_ARG2(r), \
99 UPT_SYSCALL_ARG3(r), \
100 UPT_SYSCALL_ARG4(r), \
101 UPT_SYSCALL_ARG5(r), \
102 UPT_SYSCALL_ARG6(r) } } )
103
104#define UPT_SET_SYSCALL_RETURN(r, res) \
105 REGS_SET_SYSCALL_RETURN((r)->regs, (res))
106
107#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
108
109#define UPT_ORIG_SYSCALL(r) UPT_EAX(r)
110#define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r)
111#define UPT_SYSCALL_RET(r) UPT_EAX(r)
112
113#define UPT_FAULTINFO(r) (&(r)->faultinfo)
114
115extern void arch_init_registers(int pid);
116
117#endif
diff --git a/arch/um/sys-x86/shared/sysdep/ptrace_64.h b/arch/um/sys-x86/shared/sysdep/ptrace_64.h
deleted file mode 100644
index 866fe7e47369..000000000000
--- a/arch/um/sys-x86/shared/sysdep/ptrace_64.h
+++ /dev/null
@@ -1,160 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 *
5 * Licensed under the GPL
6 */
7
8#ifndef __SYSDEP_X86_64_PTRACE_H
9#define __SYSDEP_X86_64_PTRACE_H
10
11#include <generated/user_constants.h>
12#include "sysdep/faultinfo.h"
13
14#define MAX_REG_OFFSET (UM_FRAME_SIZE)
15#define MAX_REG_NR ((MAX_REG_OFFSET) / sizeof(unsigned long))
16
17#define REGS_IP(r) ((r)[HOST_IP])
18#define REGS_SP(r) ((r)[HOST_SP])
19
20#define REGS_RBX(r) ((r)[HOST_RBX])
21#define REGS_RCX(r) ((r)[HOST_RCX])
22#define REGS_RDX(r) ((r)[HOST_RDX])
23#define REGS_RSI(r) ((r)[HOST_RSI])
24#define REGS_RDI(r) ((r)[HOST_RDI])
25#define REGS_RBP(r) ((r)[HOST_RBP])
26#define REGS_RAX(r) ((r)[HOST_RAX])
27#define REGS_R8(r) ((r)[HOST_R8])
28#define REGS_R9(r) ((r)[HOST_R9])
29#define REGS_R10(r) ((r)[HOST_R10])
30#define REGS_R11(r) ((r)[HOST_R11])
31#define REGS_R12(r) ((r)[HOST_R12])
32#define REGS_R13(r) ((r)[HOST_R13])
33#define REGS_R14(r) ((r)[HOST_R14])
34#define REGS_R15(r) ((r)[HOST_R15])
35#define REGS_CS(r) ((r)[HOST_CS])
36#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
37#define REGS_SS(r) ((r)[HOST_SS])
38
39#define HOST_FS_BASE 21
40#define HOST_GS_BASE 22
41#define HOST_DS 23
42#define HOST_ES 24
43#define HOST_FS 25
44#define HOST_GS 26
45
46/* Also defined in asm/ptrace-x86_64.h, but not in libc headers. So, these
47 * are already defined for kernel code, but not for userspace code.
48 */
49#ifndef FS_BASE
50/* These aren't defined in ptrace.h, but exist in struct user_regs_struct,
51 * which is what x86_64 ptrace actually uses.
52 */
53#define FS_BASE (HOST_FS_BASE * sizeof(long))
54#define GS_BASE (HOST_GS_BASE * sizeof(long))
55#define DS (HOST_DS * sizeof(long))
56#define ES (HOST_ES * sizeof(long))
57#define FS (HOST_FS * sizeof(long))
58#define GS (HOST_GS * sizeof(long))
59#endif
60
61#define REGS_FS_BASE(r) ((r)[HOST_FS_BASE])
62#define REGS_GS_BASE(r) ((r)[HOST_GS_BASE])
63#define REGS_DS(r) ((r)[HOST_DS])
64#define REGS_ES(r) ((r)[HOST_ES])
65#define REGS_FS(r) ((r)[HOST_FS])
66#define REGS_GS(r) ((r)[HOST_GS])
67
68#define REGS_ORIG_RAX(r) ((r)[HOST_ORIG_RAX])
69
70#define REGS_SET_SYSCALL_RETURN(r, res) REGS_RAX(r) = (res)
71
72#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
73#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
74
75#define REGS_FAULT_ADDR(r) ((r)->fault_addr)
76
77#define REGS_FAULT_WRITE(r) FAULT_WRITE((r)->fault_type)
78
79#define REGS_TRAP(r) ((r)->trap_type)
80
81#define REGS_ERR(r) ((r)->fault_type)
82
83struct uml_pt_regs {
84 unsigned long gp[MAX_REG_NR];
85 unsigned long fp[HOST_FP_SIZE];
86 struct faultinfo faultinfo;
87 long syscall;
88 int is_user;
89};
90
91#define EMPTY_UML_PT_REGS { }
92
93#define UPT_RBX(r) REGS_RBX((r)->gp)
94#define UPT_RCX(r) REGS_RCX((r)->gp)
95#define UPT_RDX(r) REGS_RDX((r)->gp)
96#define UPT_RSI(r) REGS_RSI((r)->gp)
97#define UPT_RDI(r) REGS_RDI((r)->gp)
98#define UPT_RBP(r) REGS_RBP((r)->gp)
99#define UPT_RAX(r) REGS_RAX((r)->gp)
100#define UPT_R8(r) REGS_R8((r)->gp)
101#define UPT_R9(r) REGS_R9((r)->gp)
102#define UPT_R10(r) REGS_R10((r)->gp)
103#define UPT_R11(r) REGS_R11((r)->gp)
104#define UPT_R12(r) REGS_R12((r)->gp)
105#define UPT_R13(r) REGS_R13((r)->gp)
106#define UPT_R14(r) REGS_R14((r)->gp)
107#define UPT_R15(r) REGS_R15((r)->gp)
108#define UPT_CS(r) REGS_CS((r)->gp)
109#define UPT_FS_BASE(r) REGS_FS_BASE((r)->gp)
110#define UPT_FS(r) REGS_FS((r)->gp)
111#define UPT_GS_BASE(r) REGS_GS_BASE((r)->gp)
112#define UPT_GS(r) REGS_GS((r)->gp)
113#define UPT_DS(r) REGS_DS((r)->gp)
114#define UPT_ES(r) REGS_ES((r)->gp)
115#define UPT_CS(r) REGS_CS((r)->gp)
116#define UPT_SS(r) REGS_SS((r)->gp)
117#define UPT_ORIG_RAX(r) REGS_ORIG_RAX((r)->gp)
118
119#define UPT_IP(r) REGS_IP((r)->gp)
120#define UPT_SP(r) REGS_SP((r)->gp)
121
122#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
123#define UPT_SYSCALL_NR(r) ((r)->syscall)
124#define UPT_SYSCALL_RET(r) UPT_RAX(r)
125
126extern int user_context(unsigned long sp);
127
128#define UPT_IS_USER(r) ((r)->is_user)
129
130#define UPT_SYSCALL_ARG1(r) UPT_RDI(r)
131#define UPT_SYSCALL_ARG2(r) UPT_RSI(r)
132#define UPT_SYSCALL_ARG3(r) UPT_RDX(r)
133#define UPT_SYSCALL_ARG4(r) UPT_R10(r)
134#define UPT_SYSCALL_ARG5(r) UPT_R8(r)
135#define UPT_SYSCALL_ARG6(r) UPT_R9(r)
136
137struct syscall_args {
138 unsigned long args[6];
139};
140
141#define SYSCALL_ARGS(r) ((struct syscall_args) \
142 { .args = { UPT_SYSCALL_ARG1(r), \
143 UPT_SYSCALL_ARG2(r), \
144 UPT_SYSCALL_ARG3(r), \
145 UPT_SYSCALL_ARG4(r), \
146 UPT_SYSCALL_ARG5(r), \
147 UPT_SYSCALL_ARG6(r) } } )
148
149#define UPT_SET_SYSCALL_RETURN(r, res) \
150 REGS_SET_SYSCALL_RETURN((r)->regs, (res))
151
152#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
153
154#define UPT_FAULTINFO(r) (&(r)->faultinfo)
155
156static inline void arch_init_registers(int pid)
157{
158}
159
160#endif
diff --git a/arch/um/sys-x86/shared/sysdep/ptrace_user.h b/arch/um/sys-x86/shared/sysdep/ptrace_user.h
deleted file mode 100644
index a92f883264ed..000000000000
--- a/arch/um/sys-x86/shared/sysdep/ptrace_user.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef __i386__
2#include "ptrace_user_32.h"
3#else
4#include "ptrace_user_64.h"
5#endif
diff --git a/arch/um/sys-x86/shared/sysdep/ptrace_user_32.h b/arch/um/sys-x86/shared/sysdep/ptrace_user_32.h
deleted file mode 100644
index 9d88a79a138b..000000000000
--- a/arch/um/sys-x86/shared/sysdep/ptrace_user_32.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_I386_PTRACE_USER_H__
7#define __SYSDEP_I386_PTRACE_USER_H__
8
9#include <sys/ptrace.h>
10#include <linux/ptrace.h>
11#include <asm/ptrace.h>
12#include <generated/user_constants.h>
13
14#define PT_OFFSET(r) ((r) * sizeof(long))
15
16#define PT_SYSCALL_NR(regs) ((regs)[ORIG_EAX])
17#define PT_SYSCALL_NR_OFFSET PT_OFFSET(ORIG_EAX)
18
19#define PT_SYSCALL_RET_OFFSET PT_OFFSET(EAX)
20
21#define REGS_IP_INDEX EIP
22#define REGS_SP_INDEX UESP
23
24#define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE)
25
26#endif
diff --git a/arch/um/sys-x86/shared/sysdep/ptrace_user_64.h b/arch/um/sys-x86/shared/sysdep/ptrace_user_64.h
deleted file mode 100644
index 2f1b6e33d590..000000000000
--- a/arch/um/sys-x86/shared/sysdep/ptrace_user_64.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __SYSDEP_X86_64_PTRACE_USER_H__
8#define __SYSDEP_X86_64_PTRACE_USER_H__
9
10#define __FRAME_OFFSETS
11#include <sys/ptrace.h>
12#include <linux/ptrace.h>
13#include <asm/ptrace.h>
14#undef __FRAME_OFFSETS
15#include <generated/user_constants.h>
16
17#define PT_INDEX(off) ((off) / sizeof(unsigned long))
18
19#define PT_SYSCALL_NR(regs) ((regs)[PT_INDEX(ORIG_RAX)])
20#define PT_SYSCALL_NR_OFFSET (ORIG_RAX)
21
22#define PT_SYSCALL_RET_OFFSET (RAX)
23
24/*
25 * x86_64 FC3 doesn't define this in /usr/include/linux/ptrace.h even though
26 * it's defined in the kernel's include/linux/ptrace.h. Additionally, use the
27 * 2.4 name and value for 2.4 host compatibility.
28 */
29#ifndef PTRACE_OLDSETOPTIONS
30#define PTRACE_OLDSETOPTIONS 21
31#endif
32
33#define REGS_IP_INDEX PT_INDEX(RIP)
34#define REGS_SP_INDEX PT_INDEX(RSP)
35
36#define FP_SIZE (HOST_FP_SIZE)
37
38#endif
diff --git a/arch/um/sys-x86/shared/sysdep/skas_ptrace.h b/arch/um/sys-x86/shared/sysdep/skas_ptrace.h
deleted file mode 100644
index 453febe98993..000000000000
--- a/arch/um/sys-x86/shared/sysdep/skas_ptrace.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_X86_SKAS_PTRACE_H
7#define __SYSDEP_X86_SKAS_PTRACE_H
8
9struct ptrace_faultinfo {
10 int is_write;
11 unsigned long addr;
12};
13
14struct ptrace_ldt {
15 int func;
16 void *ptr;
17 unsigned long bytecount;
18};
19
20#define PTRACE_LDT 54
21
22#endif
diff --git a/arch/um/sys-x86/shared/sysdep/stub.h b/arch/um/sys-x86/shared/sysdep/stub.h
deleted file mode 100644
index bd161e300102..000000000000
--- a/arch/um/sys-x86/shared/sysdep/stub.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#include <asm/unistd.h>
2#include <sys/mman.h>
3#include <signal.h>
4#include "as-layout.h"
5#include "stub-data.h"
6
7#ifdef __i386__
8#include "stub_32.h"
9#else
10#include "stub_64.h"
11#endif
12
13extern void stub_segv_handler(int, siginfo_t *, void *);
14extern void stub_clone_handler(void);
diff --git a/arch/um/sys-x86/shared/sysdep/stub_32.h b/arch/um/sys-x86/shared/sysdep/stub_32.h
deleted file mode 100644
index 51fd256c75f0..000000000000
--- a/arch/um/sys-x86/shared/sysdep/stub_32.h
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_STUB_H
7#define __SYSDEP_STUB_H
8
9#include <asm/ptrace.h>
10
11#define STUB_SYSCALL_RET EAX
12#define STUB_MMAP_NR __NR_mmap2
13#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
14
15static inline long stub_syscall0(long syscall)
16{
17 long ret;
18
19 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall));
20
21 return ret;
22}
23
24static inline long stub_syscall1(long syscall, long arg1)
25{
26 long ret;
27
28 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1));
29
30 return ret;
31}
32
33static inline long stub_syscall2(long syscall, long arg1, long arg2)
34{
35 long ret;
36
37 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
38 "c" (arg2));
39
40 return ret;
41}
42
43static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
44{
45 long ret;
46
47 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
48 "c" (arg2), "d" (arg3));
49
50 return ret;
51}
52
53static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
54 long arg4)
55{
56 long ret;
57
58 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
59 "c" (arg2), "d" (arg3), "S" (arg4));
60
61 return ret;
62}
63
64static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
65 long arg4, long arg5)
66{
67 long ret;
68
69 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
70 "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5));
71
72 return ret;
73}
74
75static inline void trap_myself(void)
76{
77 __asm("int3");
78}
79
80static inline void remap_stack(int fd, unsigned long offset)
81{
82 __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
83 "movl %7, %%ebx ; movl %%eax, (%%ebx)"
84 : : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
85 "c" (UM_KERN_PAGE_SIZE),
86 "d" (PROT_READ | PROT_WRITE),
87 "S" (MAP_FIXED | MAP_SHARED), "D" (fd),
88 "a" (offset),
89 "i" (&((struct stub_data *) STUB_DATA)->err)
90 : "memory");
91}
92
93#endif
diff --git a/arch/um/sys-x86/shared/sysdep/stub_64.h b/arch/um/sys-x86/shared/sysdep/stub_64.h
deleted file mode 100644
index 994df93c5ed3..000000000000
--- a/arch/um/sys-x86/shared/sysdep/stub_64.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_STUB_H
7#define __SYSDEP_STUB_H
8
9#include <sysdep/ptrace_user.h>
10
11#define STUB_SYSCALL_RET PT_INDEX(RAX)
12#define STUB_MMAP_NR __NR_mmap
13#define MMAP_OFFSET(o) (o)
14
15#define __syscall_clobber "r11","rcx","memory"
16#define __syscall "syscall"
17
18static inline long stub_syscall0(long syscall)
19{
20 long ret;
21
22 __asm__ volatile (__syscall
23 : "=a" (ret)
24 : "0" (syscall) : __syscall_clobber );
25
26 return ret;
27}
28
29static inline long stub_syscall2(long syscall, long arg1, long arg2)
30{
31 long ret;
32
33 __asm__ volatile (__syscall
34 : "=a" (ret)
35 : "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
36
37 return ret;
38}
39
40static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
41{
42 long ret;
43
44 __asm__ volatile (__syscall
45 : "=a" (ret)
46 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
47 : __syscall_clobber );
48
49 return ret;
50}
51
52static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
53 long arg4)
54{
55 long ret;
56
57 __asm__ volatile ("movq %5,%%r10 ; " __syscall
58 : "=a" (ret)
59 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
60 "g" (arg4)
61 : __syscall_clobber, "r10" );
62
63 return ret;
64}
65
66static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
67 long arg4, long arg5)
68{
69 long ret;
70
71 __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
72 : "=a" (ret)
73 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
74 "g" (arg4), "g" (arg5)
75 : __syscall_clobber, "r10", "r8" );
76
77 return ret;
78}
79
80static inline void trap_myself(void)
81{
82 __asm("int3");
83}
84
85static inline void remap_stack(long fd, unsigned long offset)
86{
87 __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
88 "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
89 "movq %%rax, (%%rbx)":
90 : "a" (STUB_MMAP_NR), "D" (STUB_DATA),
91 "S" (UM_KERN_PAGE_SIZE),
92 "d" (PROT_READ | PROT_WRITE),
93 "g" (MAP_FIXED | MAP_SHARED), "g" (fd),
94 "g" (offset),
95 "i" (&((struct stub_data *) STUB_DATA)->err)
96 : __syscall_clobber, "r10", "r8", "r9" );
97}
98
99#endif
diff --git a/arch/um/sys-x86/shared/sysdep/syscalls.h b/arch/um/sys-x86/shared/sysdep/syscalls.h
deleted file mode 100644
index bd9a89b67e41..000000000000
--- a/arch/um/sys-x86/shared/sysdep/syscalls.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef __i386__
2#include "syscalls_32.h"
3#else
4#include "syscalls_64.h"
5#endif
diff --git a/arch/um/sys-x86/shared/sysdep/syscalls_32.h b/arch/um/sys-x86/shared/sysdep/syscalls_32.h
deleted file mode 100644
index 05cb796aecb5..000000000000
--- a/arch/um/sys-x86/shared/sysdep/syscalls_32.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "asm/unistd.h"
7#include "sysdep/ptrace.h"
8
9typedef long syscall_handler_t(struct pt_regs);
10
11/* Not declared on x86, incompatible declarations on x86_64, so these have
12 * to go here rather than in sys_call_table.c
13 */
14extern syscall_handler_t sys_rt_sigaction;
15
16extern syscall_handler_t *sys_call_table[];
17
18#define EXECUTE_SYSCALL(syscall, regs) \
19 ((long (*)(struct syscall_args)) \
20 (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
diff --git a/arch/um/sys-x86/shared/sysdep/syscalls_64.h b/arch/um/sys-x86/shared/sysdep/syscalls_64.h
deleted file mode 100644
index 8a7d5e1da98e..000000000000
--- a/arch/um/sys-x86/shared/sysdep/syscalls_64.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __SYSDEP_X86_64_SYSCALLS_H__
8#define __SYSDEP_X86_64_SYSCALLS_H__
9
10#include <linux/msg.h>
11#include <linux/shm.h>
12
13typedef long syscall_handler_t(void);
14
15extern syscall_handler_t *sys_call_table[];
16
17#define EXECUTE_SYSCALL(syscall, regs) \
18 (((long (*)(long, long, long, long, long, long)) \
19 (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
20 UPT_SYSCALL_ARG2(&regs->regs), \
21 UPT_SYSCALL_ARG3(&regs->regs), \
22 UPT_SYSCALL_ARG4(&regs->regs), \
23 UPT_SYSCALL_ARG5(&regs->regs), \
24 UPT_SYSCALL_ARG6(&regs->regs)))
25
26extern long old_mmap(unsigned long addr, unsigned long len,
27 unsigned long prot, unsigned long flags,
28 unsigned long fd, unsigned long pgoff);
29extern syscall_handler_t sys_modify_ldt;
30extern syscall_handler_t sys_arch_prctl;
31
32#endif
diff --git a/arch/um/sys-x86/shared/sysdep/tls.h b/arch/um/sys-x86/shared/sysdep/tls.h
deleted file mode 100644
index 4d8f75262370..000000000000
--- a/arch/um/sys-x86/shared/sysdep/tls.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef __i386__
2#include "tls_32.h"
3#else
4#include "tls_64.h"
5#endif
diff --git a/arch/um/sys-x86/shared/sysdep/tls_32.h b/arch/um/sys-x86/shared/sysdep/tls_32.h
deleted file mode 100644
index 34550755b2a1..000000000000
--- a/arch/um/sys-x86/shared/sysdep/tls_32.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef _SYSDEP_TLS_H
2#define _SYSDEP_TLS_H
3
4# ifndef __KERNEL__
5
6/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
7 * may be named user_desc (but in 2.4 and in header matching its API was named
8 * modify_ldt_ldt_s). */
9
10typedef struct um_dup_user_desc {
11 unsigned int entry_number;
12 unsigned int base_addr;
13 unsigned int limit;
14 unsigned int seg_32bit:1;
15 unsigned int contents:2;
16 unsigned int read_exec_only:1;
17 unsigned int limit_in_pages:1;
18 unsigned int seg_not_present:1;
19 unsigned int useable:1;
20} user_desc_t;
21
22# else /* __KERNEL__ */
23
24# include <ldt.h>
25typedef struct user_desc user_desc_t;
26
27# endif /* __KERNEL__ */
28
29#define GDT_ENTRY_TLS_MIN_I386 6
30#define GDT_ENTRY_TLS_MIN_X86_64 12
31
32#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/sys-x86/shared/sysdep/tls_64.h b/arch/um/sys-x86/shared/sysdep/tls_64.h
deleted file mode 100644
index 18c000d0357a..000000000000
--- a/arch/um/sys-x86/shared/sysdep/tls_64.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef _SYSDEP_TLS_H
2#define _SYSDEP_TLS_H
3
4# ifndef __KERNEL__
5
6/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
7 * may be named user_desc (but in 2.4 and in header matching its API was named
8 * modify_ldt_ldt_s). */
9
10typedef struct um_dup_user_desc {
11 unsigned int entry_number;
12 unsigned int base_addr;
13 unsigned int limit;
14 unsigned int seg_32bit:1;
15 unsigned int contents:2;
16 unsigned int read_exec_only:1;
17 unsigned int limit_in_pages:1;
18 unsigned int seg_not_present:1;
19 unsigned int useable:1;
20 unsigned int lm:1;
21} user_desc_t;
22
23# else /* __KERNEL__ */
24
25# include <ldt.h>
26typedef struct user_desc user_desc_t;
27
28# endif /* __KERNEL__ */
29#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/sys-x86/signal_32.c b/arch/um/sys-x86/signal_32.c
deleted file mode 100644
index bcbfb0d64813..000000000000
--- a/arch/um/sys-x86/signal_32.c
+++ /dev/null
@@ -1,498 +0,0 @@
1/*
2 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <linux/ptrace.h>
7#include <asm/unistd.h>
8#include <asm/uaccess.h>
9#include <asm/ucontext.h>
10#include "frame_kern.h"
11#include "skas.h"
12
13/*
14 * FPU tag word conversions.
15 */
16
17static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
18{
19 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
20
21 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
22 tmp = ~twd;
23 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
24 /* and move the valid bits to the lower byte. */
25 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
26 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
27 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
28 return tmp;
29}
30
31static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave)
32{
33 struct _fpxreg *st = NULL;
34 unsigned long twd = (unsigned long) fxsave->twd;
35 unsigned long tag;
36 unsigned long ret = 0xffff0000;
37 int i;
38
39#define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16)
40
41 for (i = 0; i < 8; i++) {
42 if (twd & 0x1) {
43 st = (struct _fpxreg *) FPREG_ADDR(fxsave, i);
44
45 switch (st->exponent & 0x7fff) {
46 case 0x7fff:
47 tag = 2; /* Special */
48 break;
49 case 0x0000:
50 if ( !st->significand[0] &&
51 !st->significand[1] &&
52 !st->significand[2] &&
53 !st->significand[3] ) {
54 tag = 1; /* Zero */
55 } else {
56 tag = 2; /* Special */
57 }
58 break;
59 default:
60 if (st->significand[3] & 0x8000) {
61 tag = 0; /* Valid */
62 } else {
63 tag = 2; /* Special */
64 }
65 break;
66 }
67 } else {
68 tag = 3; /* Empty */
69 }
70 ret |= (tag << (2 * i));
71 twd = twd >> 1;
72 }
73 return ret;
74}
75
76static int convert_fxsr_to_user(struct _fpstate __user *buf,
77 struct user_fxsr_struct *fxsave)
78{
79 unsigned long env[7];
80 struct _fpreg __user *to;
81 struct _fpxreg *from;
82 int i;
83
84 env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
85 env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
86 env[2] = twd_fxsr_to_i387(fxsave);
87 env[3] = fxsave->fip;
88 env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
89 env[5] = fxsave->foo;
90 env[6] = fxsave->fos;
91
92 if (__copy_to_user(buf, env, 7 * sizeof(unsigned long)))
93 return 1;
94
95 to = &buf->_st[0];
96 from = (struct _fpxreg *) &fxsave->st_space[0];
97 for (i = 0; i < 8; i++, to++, from++) {
98 unsigned long __user *t = (unsigned long __user *)to;
99 unsigned long *f = (unsigned long *)from;
100
101 if (__put_user(*f, t) ||
102 __put_user(*(f + 1), t + 1) ||
103 __put_user(from->exponent, &to->exponent))
104 return 1;
105 }
106 return 0;
107}
108
109static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave,
110 struct _fpstate __user *buf)
111{
112 unsigned long env[7];
113 struct _fpxreg *to;
114 struct _fpreg __user *from;
115 int i;
116
117 if (copy_from_user( env, buf, 7 * sizeof(long)))
118 return 1;
119
120 fxsave->cwd = (unsigned short)(env[0] & 0xffff);
121 fxsave->swd = (unsigned short)(env[1] & 0xffff);
122 fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
123 fxsave->fip = env[3];
124 fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
125 fxsave->fcs = (env[4] & 0xffff);
126 fxsave->foo = env[5];
127 fxsave->fos = env[6];
128
129 to = (struct _fpxreg *) &fxsave->st_space[0];
130 from = &buf->_st[0];
131 for (i = 0; i < 8; i++, to++, from++) {
132 unsigned long *t = (unsigned long *)to;
133 unsigned long __user *f = (unsigned long __user *)from;
134
135 if (__get_user(*t, f) ||
136 __get_user(*(t + 1), f + 1) ||
137 __get_user(to->exponent, &from->exponent))
138 return 1;
139 }
140 return 0;
141}
142
143extern int have_fpx_regs;
144
145static int copy_sc_from_user(struct pt_regs *regs,
146 struct sigcontext __user *from)
147{
148 struct sigcontext sc;
149 int err, pid;
150
151 err = copy_from_user(&sc, from, sizeof(sc));
152 if (err)
153 return err;
154
155 pid = userspace_pid[current_thread_info()->cpu];
156
157#define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname
158
159 GETREG(GS, gs);
160 GETREG(FS, fs);
161 GETREG(ES, es);
162 GETREG(DS, ds);
163 GETREG(EDI, di);
164 GETREG(ESI, si);
165 GETREG(EBP, bp);
166 GETREG(SP, sp);
167 GETREG(EBX, bx);
168 GETREG(EDX, dx);
169 GETREG(ECX, cx);
170 GETREG(EAX, ax);
171 GETREG(IP, ip);
172 GETREG(CS, cs);
173 GETREG(EFLAGS, flags);
174 GETREG(SS, ss);
175
176#undef GETREG
177 if (have_fpx_regs) {
178 struct user_fxsr_struct fpx;
179
180 err = copy_from_user(&fpx,
181 &((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0],
182 sizeof(struct user_fxsr_struct));
183 if (err)
184 return 1;
185
186 err = convert_fxsr_from_user(&fpx, sc.fpstate);
187 if (err)
188 return 1;
189
190 err = restore_fpx_registers(pid, (unsigned long *) &fpx);
191 if (err < 0) {
192 printk(KERN_ERR "copy_sc_from_user - "
193 "restore_fpx_registers failed, errno = %d\n",
194 -err);
195 return 1;
196 }
197 } else {
198 struct user_i387_struct fp;
199
200 err = copy_from_user(&fp, sc.fpstate,
201 sizeof(struct user_i387_struct));
202 if (err)
203 return 1;
204
205 err = restore_fp_registers(pid, (unsigned long *) &fp);
206 if (err < 0) {
207 printk(KERN_ERR "copy_sc_from_user - "
208 "restore_fp_registers failed, errno = %d\n",
209 -err);
210 return 1;
211 }
212 }
213
214 return 0;
215}
216
217static int copy_sc_to_user(struct sigcontext __user *to,
218 struct _fpstate __user *to_fp, struct pt_regs *regs,
219 unsigned long sp)
220{
221 struct sigcontext sc;
222 struct faultinfo * fi = &current->thread.arch.faultinfo;
223 int err, pid;
224 memset(&sc, 0, sizeof(struct sigcontext));
225
226 sc.gs = REGS_GS(regs->regs.gp);
227 sc.fs = REGS_FS(regs->regs.gp);
228 sc.es = REGS_ES(regs->regs.gp);
229 sc.ds = REGS_DS(regs->regs.gp);
230 sc.di = REGS_EDI(regs->regs.gp);
231 sc.si = REGS_ESI(regs->regs.gp);
232 sc.bp = REGS_EBP(regs->regs.gp);
233 sc.sp = sp;
234 sc.bx = REGS_EBX(regs->regs.gp);
235 sc.dx = REGS_EDX(regs->regs.gp);
236 sc.cx = REGS_ECX(regs->regs.gp);
237 sc.ax = REGS_EAX(regs->regs.gp);
238 sc.ip = REGS_IP(regs->regs.gp);
239 sc.cs = REGS_CS(regs->regs.gp);
240 sc.flags = REGS_EFLAGS(regs->regs.gp);
241 sc.sp_at_signal = regs->regs.gp[UESP];
242 sc.ss = regs->regs.gp[SS];
243 sc.cr2 = fi->cr2;
244 sc.err = fi->error_code;
245 sc.trapno = fi->trap_no;
246
247 to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
248 sc.fpstate = to_fp;
249
250 pid = userspace_pid[current_thread_info()->cpu];
251 if (have_fpx_regs) {
252 struct user_fxsr_struct fpx;
253
254 err = save_fpx_registers(pid, (unsigned long *) &fpx);
255 if (err < 0){
256 printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
257 "failed, errno = %d\n", err);
258 return 1;
259 }
260
261 err = convert_fxsr_to_user(to_fp, &fpx);
262 if (err)
263 return 1;
264
265 err |= __put_user(fpx.swd, &to_fp->status);
266 err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
267 if (err)
268 return 1;
269
270 if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
271 sizeof(struct user_fxsr_struct)))
272 return 1;
273 }
274 else {
275 struct user_i387_struct fp;
276
277 err = save_fp_registers(pid, (unsigned long *) &fp);
278 if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
279 return 1;
280 }
281
282 return copy_to_user(to, &sc, sizeof(sc));
283}
284
285static int copy_ucontext_to_user(struct ucontext __user *uc,
286 struct _fpstate __user *fp, sigset_t *set,
287 unsigned long sp)
288{
289 int err = 0;
290
291 err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp);
292 err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags);
293 err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
294 err |= copy_sc_to_user(&uc->uc_mcontext, fp, &current->thread.regs, sp);
295 err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
296 return err;
297}
298
299struct sigframe
300{
301 char __user *pretcode;
302 int sig;
303 struct sigcontext sc;
304 struct _fpstate fpstate;
305 unsigned long extramask[_NSIG_WORDS-1];
306 char retcode[8];
307};
308
309struct rt_sigframe
310{
311 char __user *pretcode;
312 int sig;
313 struct siginfo __user *pinfo;
314 void __user *puc;
315 struct siginfo info;
316 struct ucontext uc;
317 struct _fpstate fpstate;
318 char retcode[8];
319};
320
321int setup_signal_stack_sc(unsigned long stack_top, int sig,
322 struct k_sigaction *ka, struct pt_regs *regs,
323 sigset_t *mask)
324{
325 struct sigframe __user *frame;
326 void __user *restorer;
327 unsigned long save_sp = PT_REGS_SP(regs);
328 int err = 0;
329
330 /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */
331 stack_top = ((stack_top + 4) & -16UL) - 4;
332 frame = (struct sigframe __user *) stack_top - 1;
333 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
334 return 1;
335
336 restorer = frame->retcode;
337 if (ka->sa.sa_flags & SA_RESTORER)
338 restorer = ka->sa.sa_restorer;
339
340 /* Update SP now because the page fault handler refuses to extend
341 * the stack if the faulting address is too far below the current
342 * SP, which frame now certainly is. If there's an error, the original
343 * value is restored on the way out.
344 * When writing the sigcontext to the stack, we have to write the
345 * original value, so that's passed to copy_sc_to_user, which does
346 * the right thing with it.
347 */
348 PT_REGS_SP(regs) = (unsigned long) frame;
349
350 err |= __put_user(restorer, &frame->pretcode);
351 err |= __put_user(sig, &frame->sig);
352 err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp);
353 err |= __put_user(mask->sig[0], &frame->sc.oldmask);
354 if (_NSIG_WORDS > 1)
355 err |= __copy_to_user(&frame->extramask, &mask->sig[1],
356 sizeof(frame->extramask));
357
358 /*
359 * This is popl %eax ; movl $,%eax ; int $0x80
360 *
361 * WE DO NOT USE IT ANY MORE! It's only left here for historical
362 * reasons and because gdb uses it as a signature to notice
363 * signal handler stack frames.
364 */
365 err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
366 err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
367 err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
368
369 if (err)
370 goto err;
371
372 PT_REGS_SP(regs) = (unsigned long) frame;
373 PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
374 PT_REGS_EAX(regs) = (unsigned long) sig;
375 PT_REGS_EDX(regs) = (unsigned long) 0;
376 PT_REGS_ECX(regs) = (unsigned long) 0;
377
378 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
379 ptrace_notify(SIGTRAP);
380 return 0;
381
382err:
383 PT_REGS_SP(regs) = save_sp;
384 return err;
385}
386
387int setup_signal_stack_si(unsigned long stack_top, int sig,
388 struct k_sigaction *ka, struct pt_regs *regs,
389 siginfo_t *info, sigset_t *mask)
390{
391 struct rt_sigframe __user *frame;
392 void __user *restorer;
393 unsigned long save_sp = PT_REGS_SP(regs);
394 int err = 0;
395
396 stack_top &= -8UL;
397 frame = (struct rt_sigframe __user *) stack_top - 1;
398 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
399 return 1;
400
401 restorer = frame->retcode;
402 if (ka->sa.sa_flags & SA_RESTORER)
403 restorer = ka->sa.sa_restorer;
404
405 /* See comment above about why this is here */
406 PT_REGS_SP(regs) = (unsigned long) frame;
407
408 err |= __put_user(restorer, &frame->pretcode);
409 err |= __put_user(sig, &frame->sig);
410 err |= __put_user(&frame->info, &frame->pinfo);
411 err |= __put_user(&frame->uc, &frame->puc);
412 err |= copy_siginfo_to_user(&frame->info, info);
413 err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask,
414 save_sp);
415
416 /*
417 * This is movl $,%eax ; int $0x80
418 *
419 * WE DO NOT USE IT ANY MORE! It's only left here for historical
420 * reasons and because gdb uses it as a signature to notice
421 * signal handler stack frames.
422 */
423 err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
424 err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
425 err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
426
427 if (err)
428 goto err;
429
430 PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
431 PT_REGS_EAX(regs) = (unsigned long) sig;
432 PT_REGS_EDX(regs) = (unsigned long) &frame->info;
433 PT_REGS_ECX(regs) = (unsigned long) &frame->uc;
434
435 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
436 ptrace_notify(SIGTRAP);
437 return 0;
438
439err:
440 PT_REGS_SP(regs) = save_sp;
441 return err;
442}
443
444long sys_sigreturn(struct pt_regs regs)
445{
446 unsigned long sp = PT_REGS_SP(&current->thread.regs);
447 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
448 sigset_t set;
449 struct sigcontext __user *sc = &frame->sc;
450 unsigned long __user *oldmask = &sc->oldmask;
451 unsigned long __user *extramask = frame->extramask;
452 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
453
454 if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
455 copy_from_user(&set.sig[1], extramask, sig_size))
456 goto segfault;
457
458 sigdelsetmask(&set, ~_BLOCKABLE);
459 set_current_blocked(&set);
460
461 if (copy_sc_from_user(&current->thread.regs, sc))
462 goto segfault;
463
464 /* Avoid ERESTART handling */
465 PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
466 return PT_REGS_SYSCALL_RET(&current->thread.regs);
467
468 segfault:
469 force_sig(SIGSEGV, current);
470 return 0;
471}
472
473long sys_rt_sigreturn(struct pt_regs regs)
474{
475 unsigned long sp = PT_REGS_SP(&current->thread.regs);
476 struct rt_sigframe __user *frame =
477 (struct rt_sigframe __user *) (sp - 4);
478 sigset_t set;
479 struct ucontext __user *uc = &frame->uc;
480 int sig_size = _NSIG_WORDS * sizeof(unsigned long);
481
482 if (copy_from_user(&set, &uc->uc_sigmask, sig_size))
483 goto segfault;
484
485 sigdelsetmask(&set, ~_BLOCKABLE);
486 set_current_blocked(&set);
487
488 if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
489 goto segfault;
490
491 /* Avoid ERESTART handling */
492 PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
493 return PT_REGS_SYSCALL_RET(&current->thread.regs);
494
495 segfault:
496 force_sig(SIGSEGV, current);
497 return 0;
498}
diff --git a/arch/um/sys-x86/signal_64.c b/arch/um/sys-x86/signal_64.c
deleted file mode 100644
index 255b2ca0ce67..000000000000
--- a/arch/um/sys-x86/signal_64.c
+++ /dev/null
@@ -1,255 +0,0 @@
1/*
2 * Copyright (C) 2003 PathScale, Inc.
3 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7#include <linux/personality.h>
8#include <linux/ptrace.h>
9#include <linux/kernel.h>
10#include <asm/unistd.h>
11#include <asm/uaccess.h>
12#include <asm/ucontext.h>
13#include "frame_kern.h"
14#include "skas.h"
15
16static int copy_sc_from_user(struct pt_regs *regs,
17 struct sigcontext __user *from)
18{
19 struct sigcontext sc;
20 struct user_i387_struct fp;
21 void __user *buf;
22 int err;
23
24 err = copy_from_user(&sc, from, sizeof(sc));
25 if (err)
26 return err;
27
28#define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname
29
30 GETREG(R8, r8);
31 GETREG(R9, r9);
32 GETREG(R10, r10);
33 GETREG(R11, r11);
34 GETREG(R12, r12);
35 GETREG(R13, r13);
36 GETREG(R14, r14);
37 GETREG(R15, r15);
38 GETREG(RDI, di);
39 GETREG(RSI, si);
40 GETREG(RBP, bp);
41 GETREG(RBX, bx);
42 GETREG(RDX, dx);
43 GETREG(RAX, ax);
44 GETREG(RCX, cx);
45 GETREG(SP, sp);
46 GETREG(IP, ip);
47 GETREG(EFLAGS, flags);
48 GETREG(CS, cs);
49#undef GETREG
50
51 buf = sc.fpstate;
52
53 err = copy_from_user(&fp, buf, sizeof(struct user_i387_struct));
54 if (err)
55 return 1;
56
57 err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
58 (unsigned long *) &fp);
59 if (err < 0) {
60 printk(KERN_ERR "copy_sc_from_user - "
61 "restore_fp_registers failed, errno = %d\n",
62 -err);
63 return 1;
64 }
65
66 return 0;
67}
68
69static int copy_sc_to_user(struct sigcontext __user *to,
70 struct _fpstate __user *to_fp, struct pt_regs *regs,
71 unsigned long mask, unsigned long sp)
72{
73 struct faultinfo * fi = &current->thread.arch.faultinfo;
74 struct sigcontext sc;
75 struct user_i387_struct fp;
76 int err = 0;
77 memset(&sc, 0, sizeof(struct sigcontext));
78
79#define PUTREG(regno, regname) sc.regname = regs->regs.gp[HOST_##regno]
80
81 PUTREG(RDI, di);
82 PUTREG(RSI, si);
83 PUTREG(RBP, bp);
84 /*
85 * Must use original RSP, which is passed in, rather than what's in
86 * signal frame.
87 */
88 sc.sp = sp;
89 PUTREG(RBX, bx);
90 PUTREG(RDX, dx);
91 PUTREG(RCX, cx);
92 PUTREG(RAX, ax);
93 PUTREG(R8, r8);
94 PUTREG(R9, r9);
95 PUTREG(R10, r10);
96 PUTREG(R11, r11);
97 PUTREG(R12, r12);
98 PUTREG(R13, r13);
99 PUTREG(R14, r14);
100 PUTREG(R15, r15);
101 PUTREG(CS, cs); /* XXX x86_64 doesn't do this */
102
103 sc.cr2 = fi->cr2;
104 sc.err = fi->error_code;
105 sc.trapno = fi->trap_no;
106
107 PUTREG(IP, ip);
108 PUTREG(EFLAGS, flags);
109#undef PUTREG
110
111 sc.oldmask = mask;
112
113 err = copy_to_user(to, &sc, sizeof(struct sigcontext));
114 if (err)
115 return 1;
116
117 err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
118 (unsigned long *) &fp);
119 if (err < 0) {
120 printk(KERN_ERR "copy_sc_from_user - restore_fp_registers "
121 "failed, errno = %d\n", -err);
122 return 1;
123 }
124
125 if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
126 return 1;
127
128 return err;
129}
130
131struct rt_sigframe
132{
133 char __user *pretcode;
134 struct ucontext uc;
135 struct siginfo info;
136 struct _fpstate fpstate;
137};
138
139int setup_signal_stack_si(unsigned long stack_top, int sig,
140 struct k_sigaction *ka, struct pt_regs * regs,
141 siginfo_t *info, sigset_t *set)
142{
143 struct rt_sigframe __user *frame;
144 unsigned long save_sp = PT_REGS_RSP(regs);
145 int err = 0;
146 struct task_struct *me = current;
147
148 frame = (struct rt_sigframe __user *)
149 round_down(stack_top - sizeof(struct rt_sigframe), 16);
150 /* Subtract 128 for a red zone and 8 for proper alignment */
151 frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
152
153 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
154 goto out;
155
156 if (ka->sa.sa_flags & SA_SIGINFO) {
157 err |= copy_siginfo_to_user(&frame->info, info);
158 if (err)
159 goto out;
160 }
161
162 /*
163 * Update SP now because the page fault handler refuses to extend
164 * the stack if the faulting address is too far below the current
165 * SP, which frame now certainly is. If there's an error, the original
166 * value is restored on the way out.
167 * When writing the sigcontext to the stack, we have to write the
168 * original value, so that's passed to copy_sc_to_user, which does
169 * the right thing with it.
170 */
171 PT_REGS_RSP(regs) = (unsigned long) frame;
172
173 /* Create the ucontext. */
174 err |= __put_user(0, &frame->uc.uc_flags);
175 err |= __put_user(0, &frame->uc.uc_link);
176 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
177 err |= __put_user(sas_ss_flags(save_sp),
178 &frame->uc.uc_stack.ss_flags);
179 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
180 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
181 set->sig[0], save_sp);
182 err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
183 if (sizeof(*set) == 16) {
184 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
185 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
186 }
187 else
188 err |= __copy_to_user(&frame->uc.uc_sigmask, set,
189 sizeof(*set));
190
191 /*
192 * Set up to return from userspace. If provided, use a stub
193 * already in userspace.
194 */
195 /* x86-64 should always use SA_RESTORER. */
196 if (ka->sa.sa_flags & SA_RESTORER)
197 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
198 else
199 /* could use a vstub here */
200 goto restore_sp;
201
202 if (err)
203 goto restore_sp;
204
205 /* Set up registers for signal handler */
206 {
207 struct exec_domain *ed = current_thread_info()->exec_domain;
208 if (unlikely(ed && ed->signal_invmap && sig < 32))
209 sig = ed->signal_invmap[sig];
210 }
211
212 PT_REGS_RDI(regs) = sig;
213 /* In case the signal handler was declared without prototypes */
214 PT_REGS_RAX(regs) = 0;
215
216 /*
217 * This also works for non SA_SIGINFO handlers because they expect the
218 * next argument after the signal number on the stack.
219 */
220 PT_REGS_RSI(regs) = (unsigned long) &frame->info;
221 PT_REGS_RDX(regs) = (unsigned long) &frame->uc;
222 PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler;
223 out:
224 return err;
225
226restore_sp:
227 PT_REGS_RSP(regs) = save_sp;
228 return err;
229}
230
231long sys_rt_sigreturn(struct pt_regs *regs)
232{
233 unsigned long sp = PT_REGS_SP(&current->thread.regs);
234 struct rt_sigframe __user *frame =
235 (struct rt_sigframe __user *)(sp - 8);
236 struct ucontext __user *uc = &frame->uc;
237 sigset_t set;
238
239 if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
240 goto segfault;
241
242 sigdelsetmask(&set, ~_BLOCKABLE);
243 set_current_blocked(&set);
244
245 if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
246 goto segfault;
247
248 /* Avoid ERESTART handling */
249 PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
250 return PT_REGS_SYSCALL_RET(&current->thread.regs);
251
252 segfault:
253 force_sig(SIGSEGV, current);
254 return 0;
255}
diff --git a/arch/um/sys-x86/stub_32.S b/arch/um/sys-x86/stub_32.S
deleted file mode 100644
index 54a36ec20cb7..000000000000
--- a/arch/um/sys-x86/stub_32.S
+++ /dev/null
@@ -1,51 +0,0 @@
1#include "as-layout.h"
2
3 .globl syscall_stub
4.section .__syscall_stub, "ax"
5
6 .globl batch_syscall_stub
7batch_syscall_stub:
8 /* load pointer to first operation */
9 mov $(STUB_DATA+8), %esp
10
11again:
12 /* load length of additional data */
13 mov 0x0(%esp), %eax
14
15 /* if(length == 0) : end of list */
16 /* write possible 0 to header */
17 mov %eax, STUB_DATA+4
18 cmpl $0, %eax
19 jz done
20
21 /* save current pointer */
22 mov %esp, STUB_DATA+4
23
24 /* skip additional data */
25 add %eax, %esp
26
27 /* load syscall-# */
28 pop %eax
29
30 /* load syscall params */
31 pop %ebx
32 pop %ecx
33 pop %edx
34 pop %esi
35 pop %edi
36 pop %ebp
37
38 /* execute syscall */
39 int $0x80
40
41 /* check return value */
42 pop %ebx
43 cmp %ebx, %eax
44 je again
45
46done:
47 /* save return value */
48 mov %eax, STUB_DATA
49
50 /* stop */
51 int3
diff --git a/arch/um/sys-x86/stub_64.S b/arch/um/sys-x86/stub_64.S
deleted file mode 100644
index 20e4a96a6dcb..000000000000
--- a/arch/um/sys-x86/stub_64.S
+++ /dev/null
@@ -1,66 +0,0 @@
1#include "as-layout.h"
2
3 .globl syscall_stub
4.section .__syscall_stub, "ax"
5syscall_stub:
6 syscall
7 /* We don't have 64-bit constants, so this constructs the address
8 * we need.
9 */
10 movq $(STUB_DATA >> 32), %rbx
11 salq $32, %rbx
12 movq $(STUB_DATA & 0xffffffff), %rcx
13 or %rcx, %rbx
14 movq %rax, (%rbx)
15 int3
16
17 .globl batch_syscall_stub
18batch_syscall_stub:
19 mov $(STUB_DATA >> 32), %rbx
20 sal $32, %rbx
21 mov $(STUB_DATA & 0xffffffff), %rax
22 or %rax, %rbx
23 /* load pointer to first operation */
24 mov %rbx, %rsp
25 add $0x10, %rsp
26again:
27 /* load length of additional data */
28 mov 0x0(%rsp), %rax
29
30 /* if(length == 0) : end of list */
31 /* write possible 0 to header */
32 mov %rax, 8(%rbx)
33 cmp $0, %rax
34 jz done
35
36 /* save current pointer */
37 mov %rsp, 8(%rbx)
38
39 /* skip additional data */
40 add %rax, %rsp
41
42 /* load syscall-# */
43 pop %rax
44
45 /* load syscall params */
46 pop %rdi
47 pop %rsi
48 pop %rdx
49 pop %r10
50 pop %r8
51 pop %r9
52
53 /* execute syscall */
54 syscall
55
56 /* check return value */
57 pop %rcx
58 cmp %rcx, %rax
59 je again
60
61done:
62 /* save return value */
63 mov %rax, (%rbx)
64
65 /* stop */
66 int3
diff --git a/arch/um/sys-x86/stub_segv.c b/arch/um/sys-x86/stub_segv.c
deleted file mode 100644
index b7450bd22e7d..000000000000
--- a/arch/um/sys-x86/stub_segv.c
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "sysdep/stub.h"
7#include "sysdep/faultinfo.h"
8#include "sysdep/mcontext.h"
9
10void __attribute__ ((__section__ (".__syscall_stub")))
11stub_segv_handler(int sig, siginfo_t *info, void *p)
12{
13 struct ucontext *uc = p;
14
15 GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
16 &uc->uc_mcontext);
17 trap_myself();
18}
19
diff --git a/arch/um/sys-x86/sys_call_table_32.S b/arch/um/sys-x86/sys_call_table_32.S
deleted file mode 100644
index de274071455d..000000000000
--- a/arch/um/sys-x86/sys_call_table_32.S
+++ /dev/null
@@ -1,28 +0,0 @@
1#include <linux/linkage.h>
2/* Steal i386 syscall table for our purposes, but with some slight changes.*/
3
4#define sys_iopl sys_ni_syscall
5#define sys_ioperm sys_ni_syscall
6
7#define sys_vm86old sys_ni_syscall
8#define sys_vm86 sys_ni_syscall
9
10#define old_mmap sys_old_mmap
11
12#define ptregs_fork sys_fork
13#define ptregs_execve sys_execve
14#define ptregs_iopl sys_iopl
15#define ptregs_vm86old sys_vm86old
16#define ptregs_sigreturn sys_sigreturn
17#define ptregs_clone sys_clone
18#define ptregs_vm86 sys_vm86
19#define ptregs_rt_sigreturn sys_rt_sigreturn
20#define ptregs_sigaltstack sys_sigaltstack
21#define ptregs_vfork sys_vfork
22
23.section .rodata,"a"
24
25#include "../../x86/kernel/syscall_table_32.S"
26
27ENTRY(syscall_table_size)
28.long .-sys_call_table
diff --git a/arch/um/sys-x86/sys_call_table_64.c b/arch/um/sys-x86/sys_call_table_64.c
deleted file mode 100644
index f46de82d675c..000000000000
--- a/arch/um/sys-x86/sys_call_table_64.c
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c
3 * with some changes for UML.
4 */
5
6#include <linux/linkage.h>
7#include <linux/sys.h>
8#include <linux/cache.h>
9
10#define __NO_STUBS
11
12/*
13 * Below you can see, in terms of #define's, the differences between the x86-64
14 * and the UML syscall table.
15 */
16
17/* Not going to be implemented by UML, since we have no hardware. */
18#define stub_iopl sys_ni_syscall
19#define sys_ioperm sys_ni_syscall
20
21/*
22 * The UML TLS problem. Note that x86_64 does not implement this, so the below
23 * is needed only for the ia32 compatibility.
24 */
25
26/* On UML we call it this way ("old" means it's not mmap2) */
27#define sys_mmap old_mmap
28
29#define stub_clone sys_clone
30#define stub_fork sys_fork
31#define stub_vfork sys_vfork
32#define stub_execve sys_execve
33#define stub_rt_sigsuspend sys_rt_sigsuspend
34#define stub_sigaltstack sys_sigaltstack
35#define stub_rt_sigreturn sys_rt_sigreturn
36
37#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
38#undef _ASM_X86_UNISTD_64_H
39#include "../../x86/include/asm/unistd_64.h"
40
41#undef __SYSCALL
42#define __SYSCALL(nr, sym) [ nr ] = sym,
43#undef _ASM_X86_UNISTD_64_H
44
45typedef void (*sys_call_ptr_t)(void);
46
47extern void sys_ni_syscall(void);
48
49/*
50 * We used to have a trick here which made sure that holes in the
51 * x86_64 table were filled in with sys_ni_syscall, but a comment in
52 * unistd_64.h says that holes aren't allowed, so the trick was
53 * removed.
54 * The trick looked like this
55 * [0 ... UM_NR_syscall_max] = &sys_ni_syscall
56 * before including unistd_64.h - the later initializations overwrote
57 * the sys_ni_syscall filler.
58 */
59
60sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
61#include "../../x86/include/asm/unistd_64.h"
62};
63
64int syscall_table_size = sizeof(sys_call_table);
diff --git a/arch/um/sys-x86/syscalls_32.c b/arch/um/sys-x86/syscalls_32.c
deleted file mode 100644
index 70ca357393b8..000000000000
--- a/arch/um/sys-x86/syscalls_32.c
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/sched.h"
7#include "linux/shm.h"
8#include "linux/ipc.h"
9#include "linux/syscalls.h"
10#include "asm/mman.h"
11#include "asm/uaccess.h"
12#include "asm/unistd.h"
13
14/*
15 * The prototype on i386 is:
16 *
17 * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
18 *
19 * and the "newtls" arg. on i386 is read by copy_thread directly from the
20 * register saved on the stack.
21 */
22long sys_clone(unsigned long clone_flags, unsigned long newsp,
23 int __user *parent_tid, void *newtls, int __user *child_tid)
24{
25 long ret;
26
27 if (!newsp)
28 newsp = UPT_SP(&current->thread.regs.regs);
29
30 current->thread.forking = 1;
31 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
32 child_tid);
33 current->thread.forking = 0;
34 return ret;
35}
36
37long sys_sigaction(int sig, const struct old_sigaction __user *act,
38 struct old_sigaction __user *oact)
39{
40 struct k_sigaction new_ka, old_ka;
41 int ret;
42
43 if (act) {
44 old_sigset_t mask;
45 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
46 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
47 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
48 return -EFAULT;
49 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
50 __get_user(mask, &act->sa_mask);
51 siginitset(&new_ka.sa.sa_mask, mask);
52 }
53
54 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
55
56 if (!ret && oact) {
57 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
58 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
59 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
60 return -EFAULT;
61 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
62 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
63 }
64
65 return ret;
66}
diff --git a/arch/um/sys-x86/syscalls_64.c b/arch/um/sys-x86/syscalls_64.c
deleted file mode 100644
index f3d82bb6e15a..000000000000
--- a/arch/um/sys-x86/syscalls_64.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 *
5 * Licensed under the GPL
6 */
7
8#include "linux/linkage.h"
9#include "linux/personality.h"
10#include "linux/utsname.h"
11#include "asm/prctl.h" /* XXX This should get the constants from libc */
12#include "asm/uaccess.h"
13#include "os.h"
14
15long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
16{
17 unsigned long *ptr = addr, tmp;
18 long ret;
19 int pid = task->mm->context.id.u.pid;
20
21 /*
22 * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
23 * be safe), we need to call arch_prctl on the host because
24 * setting %fs may result in something else happening (like a
25 * GDT or thread.fs being set instead). So, we let the host
26 * fiddle the registers and thread struct and restore the
27 * registers afterwards.
28 *
29 * So, the saved registers are stored to the process (this
30 * needed because a stub may have been the last thing to run),
31 * arch_prctl is run on the host, then the registers are read
32 * back.
33 */
34 switch (code) {
35 case ARCH_SET_FS:
36 case ARCH_SET_GS:
37 ret = restore_registers(pid, &current->thread.regs.regs);
38 if (ret)
39 return ret;
40 break;
41 case ARCH_GET_FS:
42 case ARCH_GET_GS:
43 /*
44 * With these two, we read to a local pointer and
45 * put_user it to the userspace pointer that we were
46 * given. If addr isn't valid (because it hasn't been
47 * faulted in or is just bogus), we want put_user to
48 * fault it in (or return -EFAULT) instead of having
49 * the host return -EFAULT.
50 */
51 ptr = &tmp;
52 }
53
54 ret = os_arch_prctl(pid, code, ptr);
55 if (ret)
56 return ret;
57
58 switch (code) {
59 case ARCH_SET_FS:
60 current->thread.arch.fs = (unsigned long) ptr;
61 ret = save_registers(pid, &current->thread.regs.regs);
62 break;
63 case ARCH_SET_GS:
64 ret = save_registers(pid, &current->thread.regs.regs);
65 break;
66 case ARCH_GET_FS:
67 ret = put_user(tmp, addr);
68 break;
69 case ARCH_GET_GS:
70 ret = put_user(tmp, addr);
71 break;
72 }
73
74 return ret;
75}
76
77long sys_arch_prctl(int code, unsigned long addr)
78{
79 return arch_prctl(current, code, (unsigned long __user *) addr);
80}
81
82long sys_clone(unsigned long clone_flags, unsigned long newsp,
83 void __user *parent_tid, void __user *child_tid)
84{
85 long ret;
86
87 if (!newsp)
88 newsp = UPT_SP(&current->thread.regs.regs);
89 current->thread.forking = 1;
90 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
91 child_tid);
92 current->thread.forking = 0;
93 return ret;
94}
95
96void arch_switch_to(struct task_struct *to)
97{
98 if ((to->thread.arch.fs == 0) || (to->mm == NULL))
99 return;
100
101 arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
102}
diff --git a/arch/um/sys-x86/sysrq_32.c b/arch/um/sys-x86/sysrq_32.c
deleted file mode 100644
index 171b3e9dc867..000000000000
--- a/arch/um/sys-x86/sysrq_32.c
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/kernel.h"
7#include "linux/smp.h"
8#include "linux/sched.h"
9#include "linux/kallsyms.h"
10#include "asm/ptrace.h"
11#include "sysrq.h"
12
13/* This is declared by <linux/sched.h> */
14void show_regs(struct pt_regs *regs)
15{
16 printk("\n");
17 printk("EIP: %04lx:[<%08lx>] CPU: %d %s",
18 0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs),
19 smp_processor_id(), print_tainted());
20 if (PT_REGS_CS(regs) & 3)
21 printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs),
22 PT_REGS_SP(regs));
23 printk(" EFLAGS: %08lx\n %s\n", PT_REGS_EFLAGS(regs),
24 print_tainted());
25 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26 PT_REGS_EAX(regs), PT_REGS_EBX(regs),
27 PT_REGS_ECX(regs),
28 PT_REGS_EDX(regs));
29 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
30 PT_REGS_ESI(regs), PT_REGS_EDI(regs),
31 PT_REGS_EBP(regs));
32 printk(" DS: %04lx ES: %04lx\n",
33 0xffff & PT_REGS_DS(regs),
34 0xffff & PT_REGS_ES(regs));
35
36 show_trace(NULL, (unsigned long *) &regs);
37}
38
39/* Copied from i386. */
40static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
41{
42 return p > (void *)tinfo &&
43 p < (void *)tinfo + THREAD_SIZE - 3;
44}
45
46/* Adapted from i386 (we also print the address we read from). */
47static inline unsigned long print_context_stack(struct thread_info *tinfo,
48 unsigned long *stack, unsigned long ebp)
49{
50 unsigned long addr;
51
52#ifdef CONFIG_FRAME_POINTER
53 while (valid_stack_ptr(tinfo, (void *)ebp)) {
54 addr = *(unsigned long *)(ebp + 4);
55 printk("%08lx: [<%08lx>]", ebp + 4, addr);
56 print_symbol(" %s", addr);
57 printk("\n");
58 ebp = *(unsigned long *)ebp;
59 }
60#else
61 while (valid_stack_ptr(tinfo, stack)) {
62 addr = *stack;
63 if (__kernel_text_address(addr)) {
64 printk("%08lx: [<%08lx>]", (unsigned long) stack, addr);
65 print_symbol(" %s", addr);
66 printk("\n");
67 }
68 stack++;
69 }
70#endif
71 return ebp;
72}
73
74void show_trace(struct task_struct* task, unsigned long * stack)
75{
76 unsigned long ebp;
77 struct thread_info *context;
78
79 /* Turn this into BUG_ON if possible. */
80 if (!stack) {
81 stack = (unsigned long*) &stack;
82 printk("show_trace: got NULL stack, implicit assumption task == current");
83 WARN_ON(1);
84 }
85
86 if (!task)
87 task = current;
88
89 if (task != current) {
90 ebp = (unsigned long) KSTK_EBP(task);
91 } else {
92 asm ("movl %%ebp, %0" : "=r" (ebp) : );
93 }
94
95 context = (struct thread_info *)
96 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
97 print_context_stack(context, stack, ebp);
98
99 printk("\n");
100}
101
diff --git a/arch/um/sys-x86/sysrq_64.c b/arch/um/sys-x86/sysrq_64.c
deleted file mode 100644
index f4f82beb3508..000000000000
--- a/arch/um/sys-x86/sysrq_64.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/sched.h>
10#include <linux/utsname.h>
11#include <asm/current.h>
12#include <asm/ptrace.h>
13#include "sysrq.h"
14
15void __show_regs(struct pt_regs *regs)
16{
17 printk("\n");
18 print_modules();
19 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
20 current->comm, print_tainted(), init_utsname()->release);
21 printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff,
22 PT_REGS_RIP(regs));
23 printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs),
24 PT_REGS_EFLAGS(regs));
25 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
26 PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs));
27 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
28 PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs));
29 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
30 PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs));
31 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
32 PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs));
33 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
34 PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs));
35}
36
37void show_regs(struct pt_regs *regs)
38{
39 __show_regs(regs);
40 show_trace(current, (unsigned long *) &regs);
41}
diff --git a/arch/um/sys-x86/tls_32.c b/arch/um/sys-x86/tls_32.c
deleted file mode 100644
index c6c7131e563b..000000000000
--- a/arch/um/sys-x86/tls_32.c
+++ /dev/null
@@ -1,396 +0,0 @@
1/*
2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3 * Licensed under the GPL
4 */
5
6#include "linux/percpu.h"
7#include "linux/sched.h"
8#include "asm/uaccess.h"
9#include "os.h"
10#include "skas.h"
11#include "sysdep/tls.h"
12
13/*
14 * If needed we can detect when it's uninitialized.
15 *
16 * These are initialized in an initcall and unchanged thereafter.
17 */
18static int host_supports_tls = -1;
19int host_gdt_entry_tls_min;
20
21int do_set_thread_area(struct user_desc *info)
22{
23 int ret;
24 u32 cpu;
25
26 cpu = get_cpu();
27 ret = os_set_thread_area(info, userspace_pid[cpu]);
28 put_cpu();
29
30 if (ret)
31 printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
32 "index = %d\n", ret, info->entry_number);
33
34 return ret;
35}
36
37int do_get_thread_area(struct user_desc *info)
38{
39 int ret;
40 u32 cpu;
41
42 cpu = get_cpu();
43 ret = os_get_thread_area(info, userspace_pid[cpu]);
44 put_cpu();
45
46 if (ret)
47 printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
48 "index = %d\n", ret, info->entry_number);
49
50 return ret;
51}
52
53/*
54 * sys_get_thread_area: get a yet unused TLS descriptor index.
55 * XXX: Consider leaving one free slot for glibc usage at first place. This must
56 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
57 *
58 * Also, this must be tested when compiling in SKAS mode with dynamic linking
59 * and running against NPTL.
60 */
61static int get_free_idx(struct task_struct* task)
62{
63 struct thread_struct *t = &task->thread;
64 int idx;
65
66 if (!t->arch.tls_array)
67 return GDT_ENTRY_TLS_MIN;
68
69 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
70 if (!t->arch.tls_array[idx].present)
71 return idx + GDT_ENTRY_TLS_MIN;
72 return -ESRCH;
73}
74
75static inline void clear_user_desc(struct user_desc* info)
76{
77 /* Postcondition: LDT_empty(info) returns true. */
78 memset(info, 0, sizeof(*info));
79
80 /*
81 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
82 * indeed an empty user_desc.
83 */
84 info->read_exec_only = 1;
85 info->seg_not_present = 1;
86}
87
88#define O_FORCE 1
89
90static int load_TLS(int flags, struct task_struct *to)
91{
92 int ret = 0;
93 int idx;
94
95 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
96 struct uml_tls_struct* curr =
97 &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
98
99 /*
100 * Actually, now if it wasn't flushed it gets cleared and
101 * flushed to the host, which will clear it.
102 */
103 if (!curr->present) {
104 if (!curr->flushed) {
105 clear_user_desc(&curr->tls);
106 curr->tls.entry_number = idx;
107 } else {
108 WARN_ON(!LDT_empty(&curr->tls));
109 continue;
110 }
111 }
112
113 if (!(flags & O_FORCE) && curr->flushed)
114 continue;
115
116 ret = do_set_thread_area(&curr->tls);
117 if (ret)
118 goto out;
119
120 curr->flushed = 1;
121 }
122out:
123 return ret;
124}
125
126/*
127 * Verify if we need to do a flush for the new process, i.e. if there are any
128 * present desc's, only if they haven't been flushed.
129 */
130static inline int needs_TLS_update(struct task_struct *task)
131{
132 int i;
133 int ret = 0;
134
135 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
136 struct uml_tls_struct* curr =
137 &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
138
139 /*
140 * Can't test curr->present, we may need to clear a descriptor
141 * which had a value.
142 */
143 if (curr->flushed)
144 continue;
145 ret = 1;
146 break;
147 }
148 return ret;
149}
150
151/*
152 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
153 * we mark them as such and the first switch_to will do the job.
154 */
155void clear_flushed_tls(struct task_struct *task)
156{
157 int i;
158
159 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
160 struct uml_tls_struct* curr =
161 &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
162
163 /*
164 * Still correct to do this, if it wasn't present on the host it
165 * will remain as flushed as it was.
166 */
167 if (!curr->present)
168 continue;
169
170 curr->flushed = 0;
171 }
172}
173
174/*
175 * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
176 * common host process. So this is needed in SKAS0 too.
177 *
178 * However, if each thread had a different host process (and this was discussed
179 * for SMP support) this won't be needed.
180 *
181 * And this will not need be used when (and if) we'll add support to the host
182 * SKAS patch.
183 */
184
185int arch_switch_tls(struct task_struct *to)
186{
187 if (!host_supports_tls)
188 return 0;
189
190 /*
191 * We have no need whatsoever to switch TLS for kernel threads; beyond
192 * that, that would also result in us calling os_set_thread_area with
193 * userspace_pid[cpu] == 0, which gives an error.
194 */
195 if (likely(to->mm))
196 return load_TLS(O_FORCE, to);
197
198 return 0;
199}
200
201static int set_tls_entry(struct task_struct* task, struct user_desc *info,
202 int idx, int flushed)
203{
204 struct thread_struct *t = &task->thread;
205
206 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
207 return -EINVAL;
208
209 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
210 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
211 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
212
213 return 0;
214}
215
216int arch_copy_tls(struct task_struct *new)
217{
218 struct user_desc info;
219 int idx, ret = -EFAULT;
220
221 if (copy_from_user(&info,
222 (void __user *) UPT_ESI(&new->thread.regs.regs),
223 sizeof(info)))
224 goto out;
225
226 ret = -EINVAL;
227 if (LDT_empty(&info))
228 goto out;
229
230 idx = info.entry_number;
231
232 ret = set_tls_entry(new, &info, idx, 0);
233out:
234 return ret;
235}
236
237/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
238static int get_tls_entry(struct task_struct *task, struct user_desc *info,
239 int idx)
240{
241 struct thread_struct *t = &task->thread;
242
243 if (!t->arch.tls_array)
244 goto clear;
245
246 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
247 return -EINVAL;
248
249 if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
250 goto clear;
251
252 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
253
254out:
255 /*
256 * Temporary debugging check, to make sure that things have been
257 * flushed. This could be triggered if load_TLS() failed.
258 */
259 if (unlikely(task == current &&
260 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
261 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
262 "without flushed TLS.", current->pid);
263 }
264
265 return 0;
266clear:
267 /*
268 * When the TLS entry has not been set, the values read to user in the
269 * tls_array are 0 (because it's cleared at boot, see
270 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
271 */
272 clear_user_desc(info);
273 info->entry_number = idx;
274 goto out;
275}
276
277int sys_set_thread_area(struct user_desc __user *user_desc)
278{
279 struct user_desc info;
280 int idx, ret;
281
282 if (!host_supports_tls)
283 return -ENOSYS;
284
285 if (copy_from_user(&info, user_desc, sizeof(info)))
286 return -EFAULT;
287
288 idx = info.entry_number;
289
290 if (idx == -1) {
291 idx = get_free_idx(current);
292 if (idx < 0)
293 return idx;
294 info.entry_number = idx;
295 /* Tell the user which slot we chose for him.*/
296 if (put_user(idx, &user_desc->entry_number))
297 return -EFAULT;
298 }
299
300 ret = do_set_thread_area(&info);
301 if (ret)
302 return ret;
303 return set_tls_entry(current, &info, idx, 1);
304}
305
306/*
307 * Perform set_thread_area on behalf of the traced child.
308 * Note: error handling is not done on the deferred load, and this differ from
309 * i386. However the only possible error are caused by bugs.
310 */
311int ptrace_set_thread_area(struct task_struct *child, int idx,
312 struct user_desc __user *user_desc)
313{
314 struct user_desc info;
315
316 if (!host_supports_tls)
317 return -EIO;
318
319 if (copy_from_user(&info, user_desc, sizeof(info)))
320 return -EFAULT;
321
322 return set_tls_entry(child, &info, idx, 0);
323}
324
325int sys_get_thread_area(struct user_desc __user *user_desc)
326{
327 struct user_desc info;
328 int idx, ret;
329
330 if (!host_supports_tls)
331 return -ENOSYS;
332
333 if (get_user(idx, &user_desc->entry_number))
334 return -EFAULT;
335
336 ret = get_tls_entry(current, &info, idx);
337 if (ret < 0)
338 goto out;
339
340 if (copy_to_user(user_desc, &info, sizeof(info)))
341 ret = -EFAULT;
342
343out:
344 return ret;
345}
346
347/*
348 * Perform get_thread_area on behalf of the traced child.
349 */
350int ptrace_get_thread_area(struct task_struct *child, int idx,
351 struct user_desc __user *user_desc)
352{
353 struct user_desc info;
354 int ret;
355
356 if (!host_supports_tls)
357 return -EIO;
358
359 ret = get_tls_entry(child, &info, idx);
360 if (ret < 0)
361 goto out;
362
363 if (copy_to_user(user_desc, &info, sizeof(info)))
364 ret = -EFAULT;
365out:
366 return ret;
367}
368
369/*
370 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
371 * if a 32-bit UML is running on a 64-bit host.
372 */
373static int __init __setup_host_supports_tls(void)
374{
375 check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
376 if (host_supports_tls) {
377 printk(KERN_INFO "Host TLS support detected\n");
378 printk(KERN_INFO "Detected host type: ");
379 switch (host_gdt_entry_tls_min) {
380 case GDT_ENTRY_TLS_MIN_I386:
381 printk(KERN_CONT "i386");
382 break;
383 case GDT_ENTRY_TLS_MIN_X86_64:
384 printk(KERN_CONT "x86_64");
385 break;
386 }
387 printk(KERN_CONT " (GDT indexes %d to %d)\n",
388 host_gdt_entry_tls_min,
389 host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
390 } else
391 printk(KERN_ERR " Host TLS support NOT detected! "
392 "TLS support inside UML will not work\n");
393 return 0;
394}
395
396__initcall(__setup_host_supports_tls);
diff --git a/arch/um/sys-x86/tls_64.c b/arch/um/sys-x86/tls_64.c
deleted file mode 100644
index f7ba46200ecd..000000000000
--- a/arch/um/sys-x86/tls_64.c
+++ /dev/null
@@ -1,17 +0,0 @@
1#include "linux/sched.h"
2
3void clear_flushed_tls(struct task_struct *task)
4{
5}
6
7int arch_copy_tls(struct task_struct *t)
8{
9 /*
10 * If CLONE_SETTLS is set, we need to save the thread id
11 * (which is argument 5, child_tid, of clone) so it can be set
12 * during context switches.
13 */
14 t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
15
16 return 0;
17}
diff --git a/arch/um/sys-x86/user-offsets.c b/arch/um/sys-x86/user-offsets.c
deleted file mode 100644
index 3c19c48a1d48..000000000000
--- a/arch/um/sys-x86/user-offsets.c
+++ /dev/null
@@ -1,79 +0,0 @@
1#include <stdio.h>
2#include <stddef.h>
3#include <signal.h>
4#include <sys/poll.h>
5#include <sys/mman.h>
6#include <sys/user.h>
7#define __FRAME_OFFSETS
8#include <asm/ptrace.h>
9#include <asm/types.h>
10
11#define DEFINE(sym, val) \
12 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
13
14#define DEFINE_LONGS(sym, val) \
15 asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
16
17void foo(void)
18{
19#ifdef __i386__
20 DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct));
21 DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct));
22
23 DEFINE(HOST_IP, EIP);
24 DEFINE(HOST_SP, UESP);
25 DEFINE(HOST_EFLAGS, EFL);
26 DEFINE(HOST_EAX, EAX);
27 DEFINE(HOST_EBX, EBX);
28 DEFINE(HOST_ECX, ECX);
29 DEFINE(HOST_EDX, EDX);
30 DEFINE(HOST_ESI, ESI);
31 DEFINE(HOST_EDI, EDI);
32 DEFINE(HOST_EBP, EBP);
33 DEFINE(HOST_CS, CS);
34 DEFINE(HOST_SS, SS);
35 DEFINE(HOST_DS, DS);
36 DEFINE(HOST_FS, FS);
37 DEFINE(HOST_ES, ES);
38 DEFINE(HOST_GS, GS);
39#else
40 DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
41 DEFINE_LONGS(HOST_RBX, RBX);
42 DEFINE_LONGS(HOST_RCX, RCX);
43 DEFINE_LONGS(HOST_RDI, RDI);
44 DEFINE_LONGS(HOST_RSI, RSI);
45 DEFINE_LONGS(HOST_RDX, RDX);
46 DEFINE_LONGS(HOST_RBP, RBP);
47 DEFINE_LONGS(HOST_RAX, RAX);
48 DEFINE_LONGS(HOST_R8, R8);
49 DEFINE_LONGS(HOST_R9, R9);
50 DEFINE_LONGS(HOST_R10, R10);
51 DEFINE_LONGS(HOST_R11, R11);
52 DEFINE_LONGS(HOST_R12, R12);
53 DEFINE_LONGS(HOST_R13, R13);
54 DEFINE_LONGS(HOST_R14, R14);
55 DEFINE_LONGS(HOST_R15, R15);
56 DEFINE_LONGS(HOST_ORIG_RAX, ORIG_RAX);
57 DEFINE_LONGS(HOST_CS, CS);
58 DEFINE_LONGS(HOST_SS, SS);
59 DEFINE_LONGS(HOST_EFLAGS, EFLAGS);
60#if 0
61 DEFINE_LONGS(HOST_FS, FS);
62 DEFINE_LONGS(HOST_GS, GS);
63 DEFINE_LONGS(HOST_DS, DS);
64 DEFINE_LONGS(HOST_ES, ES);
65#endif
66
67 DEFINE_LONGS(HOST_IP, RIP);
68 DEFINE_LONGS(HOST_SP, RSP);
69#endif
70
71 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
72 DEFINE(UM_POLLIN, POLLIN);
73 DEFINE(UM_POLLPRI, POLLPRI);
74 DEFINE(UM_POLLOUT, POLLOUT);
75
76 DEFINE(UM_PROT_READ, PROT_READ);
77 DEFINE(UM_PROT_WRITE, PROT_WRITE);
78 DEFINE(UM_PROT_EXEC, PROT_EXEC);
79}
diff --git a/arch/um/sys-x86/vdso/Makefile b/arch/um/sys-x86/vdso/Makefile
deleted file mode 100644
index 5dffe6d46686..000000000000
--- a/arch/um/sys-x86/vdso/Makefile
+++ /dev/null
@@ -1,90 +0,0 @@
1#
2# Building vDSO images for x86.
3#
4
5VDSO64-y := y
6
7vdso-install-$(VDSO64-y) += vdso.so
8
9
10# files to link into the vdso
11vobjs-y := vdso-note.o um_vdso.o
12
13# files to link into kernel
14obj-$(VDSO64-y) += vdso.o vma.o
15
16vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
17
18$(obj)/vdso.o: $(obj)/vdso.so
19
20targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
21
22export CPPFLAGS_vdso.lds += -P -C
23
24VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
25 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
26
27$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
28
29$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
30 $(call if_changed,vdso)
31
32$(obj)/%.so: OBJCOPYFLAGS := -S
33$(obj)/%.so: $(obj)/%.so.dbg FORCE
34 $(call if_changed,objcopy)
35
36#
37# Don't omit frame pointers for ease of userspace debugging, but do
38# optimize sibling calls.
39#
40CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
41 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
42 -fno-omit-frame-pointer -foptimize-sibling-calls
43
44$(vobjs): KBUILD_CFLAGS += $(CFL)
45
46#
47# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
48#
49CFLAGS_REMOVE_vdso-note.o = -pg
50CFLAGS_REMOVE_um_vdso.o = -pg
51
52targets += vdso-syms.lds
53obj-$(VDSO64-y) += vdso-syms.lds
54
55#
56# Match symbols in the DSO that look like VDSO*; produce a file of constants.
57#
58sed-vdsosym := -e 's/^00*/0/' \
59 -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
60quiet_cmd_vdsosym = VDSOSYM $@
61define cmd_vdsosym
62 $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
63endef
64
65$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
66 $(call if_changed,vdsosym)
67
68#
69# The DSO images are built using a special linker script.
70#
71quiet_cmd_vdso = VDSO $@
72 cmd_vdso = $(CC) -nostdlib -o $@ \
73 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
74 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
75 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
76
77VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
78GCOV_PROFILE := n
79
80#
81# Install the unstripped copy of vdso*.so listed in $(vdso-install-y).
82#
83quiet_cmd_vdso_install = INSTALL $@
84 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
85$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE
86 @mkdir -p $(MODLIB)/vdso
87 $(call cmd,vdso_install)
88
89PHONY += vdso_install $(vdso-install-y)
90vdso_install: $(vdso-install-y)
diff --git a/arch/um/sys-x86/vdso/checkundef.sh b/arch/um/sys-x86/vdso/checkundef.sh
deleted file mode 100644
index 7ee90a9b549d..000000000000
--- a/arch/um/sys-x86/vdso/checkundef.sh
+++ /dev/null
@@ -1,10 +0,0 @@
1#!/bin/sh
2nm="$1"
3file="$2"
4$nm "$file" | grep '^ *U' > /dev/null 2>&1
5if [ $? -eq 1 ]; then
6 exit 0
7else
8 echo "$file: undefined symbols found" >&2
9 exit 1
10fi
diff --git a/arch/um/sys-x86/vdso/um_vdso.c b/arch/um/sys-x86/vdso/um_vdso.c
deleted file mode 100644
index 7c441b59d375..000000000000
--- a/arch/um/sys-x86/vdso/um_vdso.c
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This vDSO turns all calls into a syscall so that UML can trap them.
9 */
10
11
12/* Disable profiling for userspace code */
13#define DISABLE_BRANCH_PROFILING
14
15#include <linux/time.h>
16#include <linux/getcpu.h>
17#include <asm/unistd.h>
18
19int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
20{
21 long ret;
22
23 asm("syscall" : "=a" (ret) :
24 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
25
26 return ret;
27}
28int clock_gettime(clockid_t, struct timespec *)
29 __attribute__((weak, alias("__vdso_clock_gettime")));
30
31int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
32{
33 long ret;
34
35 asm("syscall" : "=a" (ret) :
36 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
37
38 return ret;
39}
40int gettimeofday(struct timeval *, struct timezone *)
41 __attribute__((weak, alias("__vdso_gettimeofday")));
42
43time_t __vdso_time(time_t *t)
44{
45 long secs;
46
47 asm volatile("syscall"
48 : "=a" (secs)
49 : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
50
51 return secs;
52}
53int time(time_t *t) __attribute__((weak, alias("__vdso_time")));
54
55long
56__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
57{
58 /*
59 * UML does not support SMP, we can cheat here. :)
60 */
61
62 if (cpu)
63 *cpu = 0;
64 if (node)
65 *node = 0;
66
67 return 0;
68}
69
70long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
71 __attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/um/sys-x86/vdso/vdso-layout.lds.S b/arch/um/sys-x86/vdso/vdso-layout.lds.S
deleted file mode 100644
index 634a2cf62046..000000000000
--- a/arch/um/sys-x86/vdso/vdso-layout.lds.S
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Linker script for vDSO. This is an ELF shared object prelinked to
3 * its virtual address, and with only one read-only segment.
4 * This script controls its layout.
5 */
6
7SECTIONS
8{
9 . = VDSO_PRELINK + SIZEOF_HEADERS;
10
11 .hash : { *(.hash) } :text
12 .gnu.hash : { *(.gnu.hash) }
13 .dynsym : { *(.dynsym) }
14 .dynstr : { *(.dynstr) }
15 .gnu.version : { *(.gnu.version) }
16 .gnu.version_d : { *(.gnu.version_d) }
17 .gnu.version_r : { *(.gnu.version_r) }
18
19 .note : { *(.note.*) } :text :note
20
21 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
22 .eh_frame : { KEEP (*(.eh_frame)) } :text
23
24 .dynamic : { *(.dynamic) } :text :dynamic
25
26 .rodata : { *(.rodata*) } :text
27 .data : {
28 *(.data*)
29 *(.sdata*)
30 *(.got.plt) *(.got)
31 *(.gnu.linkonce.d.*)
32 *(.bss*)
33 *(.dynbss*)
34 *(.gnu.linkonce.b.*)
35 }
36
37 .altinstructions : { *(.altinstructions) }
38 .altinstr_replacement : { *(.altinstr_replacement) }
39
40 /*
41 * Align the actual code well away from the non-instruction data.
42 * This is the best thing for the I-cache.
43 */
44 . = ALIGN(0x100);
45
46 .text : { *(.text*) } :text =0x90909090
47}
48
49/*
50 * Very old versions of ld do not recognize this name token; use the constant.
51 */
52#define PT_GNU_EH_FRAME 0x6474e550
53
54/*
55 * We must supply the ELF program headers explicitly to get just one
56 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
57 */
58PHDRS
59{
60 text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
61 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
62 note PT_NOTE FLAGS(4); /* PF_R */
63 eh_frame_hdr PT_GNU_EH_FRAME;
64}
diff --git a/arch/um/sys-x86/vdso/vdso-note.S b/arch/um/sys-x86/vdso/vdso-note.S
deleted file mode 100644
index 79a071e4357e..000000000000
--- a/arch/um/sys-x86/vdso/vdso-note.S
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
3 * Here we can supply some information useful to userland.
4 */
5
6#include <linux/uts.h>
7#include <linux/version.h>
8#include <linux/elfnote.h>
9
10ELFNOTE_START(Linux, 0, "a")
11 .long LINUX_VERSION_CODE
12ELFNOTE_END
diff --git a/arch/um/sys-x86/vdso/vdso.S b/arch/um/sys-x86/vdso/vdso.S
deleted file mode 100644
index 03b053283f86..000000000000
--- a/arch/um/sys-x86/vdso/vdso.S
+++ /dev/null
@@ -1,10 +0,0 @@
1#include <linux/init.h>
2
3__INITDATA
4
5 .globl vdso_start, vdso_end
6vdso_start:
7 .incbin "arch/um/sys-x86/vdso/vdso.so"
8vdso_end:
9
10__FINIT
diff --git a/arch/um/sys-x86/vdso/vdso.lds.S b/arch/um/sys-x86/vdso/vdso.lds.S
deleted file mode 100644
index b96b2677cad8..000000000000
--- a/arch/um/sys-x86/vdso/vdso.lds.S
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Linker script for 64-bit vDSO.
3 * We #include the file to define the layout details.
4 * Here we only choose the prelinked virtual address.
5 *
6 * This file defines the version script giving the user-exported symbols in
7 * the DSO. We can define local symbols here called VDSO* to make their
8 * values visible using the asm-x86/vdso.h macros from the kernel proper.
9 */
10
11#define VDSO_PRELINK 0xffffffffff700000
12#include "vdso-layout.lds.S"
13
14/*
15 * This controls what userland symbols we export from the vDSO.
16 */
17VERSION {
18 LINUX_2.6 {
19 global:
20 clock_gettime;
21 __vdso_clock_gettime;
22 gettimeofday;
23 __vdso_gettimeofday;
24 getcpu;
25 __vdso_getcpu;
26 time;
27 __vdso_time;
28 local: *;
29 };
30}
31
32VDSO64_PRELINK = VDSO_PRELINK;
diff --git a/arch/um/sys-x86/vdso/vma.c b/arch/um/sys-x86/vdso/vma.c
deleted file mode 100644
index 9495c8d0ce37..000000000000
--- a/arch/um/sys-x86/vdso/vma.c
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <asm/page.h>
13#include <linux/init.h>
14
15unsigned int __read_mostly vdso_enabled = 1;
16unsigned long um_vdso_addr;
17
18extern unsigned long task_size;
19extern char vdso_start[], vdso_end[];
20
21static struct page **vdsop;
22
23static int __init init_vdso(void)
24{
25 struct page *um_vdso;
26
27 BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
28
29 um_vdso_addr = task_size - PAGE_SIZE;
30
31 vdsop = kmalloc(GFP_KERNEL, sizeof(struct page *));
32 if (!vdsop)
33 goto oom;
34
35 um_vdso = alloc_page(GFP_KERNEL);
36 if (!um_vdso) {
37 kfree(vdsop);
38
39 goto oom;
40 }
41
42 copy_page(page_address(um_vdso), vdso_start);
43 *vdsop = um_vdso;
44
45 return 0;
46
47oom:
48 printk(KERN_ERR "Cannot allocate vdso\n");
49 vdso_enabled = 0;
50
51 return -ENOMEM;
52}
53subsys_initcall(init_vdso);
54
55int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
56{
57 int err;
58 struct mm_struct *mm = current->mm;
59
60 if (!vdso_enabled)
61 return 0;
62
63 down_write(&mm->mmap_sem);
64
65 err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
66 VM_READ|VM_EXEC|
67 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
68 VM_ALWAYSDUMP,
69 vdsop);
70
71 up_write(&mm->mmap_sem);
72
73 return err;
74}