aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/um/asm
diff options
context:
space:
mode:
authorAl Viro <viro@ftp.linux.org.uk>2011-08-18 15:06:39 -0400
committerRichard Weinberger <richard@nod.at>2011-11-02 09:15:05 -0400
commit5c48b108ecbf6505d929e64d50dace13ac2bdf34 (patch)
tree016904f84fbe05aa301c5cdfe712d90f6bb828fe /arch/x86/um/asm
parent7bbe7204e93734fe79d8aac3e08a7cb4624b5004 (diff)
um: take arch/um/sys-x86 to arch/x86/um
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/x86/um/asm')
-rw-r--r--arch/x86/um/asm/arch_hweight.h6
-rw-r--r--arch/x86/um/asm/archparam.h20
-rw-r--r--arch/x86/um/asm/checksum.h10
-rw-r--r--arch/x86/um/asm/checksum_32.h201
-rw-r--r--arch/x86/um/asm/checksum_64.h144
-rw-r--r--arch/x86/um/asm/elf.h221
-rw-r--r--arch/x86/um/asm/module.h23
-rw-r--r--arch/x86/um/asm/processor.h15
-rw-r--r--arch/x86/um/asm/processor_32.h73
-rw-r--r--arch/x86/um/asm/processor_64.h51
-rw-r--r--arch/x86/um/asm/ptrace.h5
-rw-r--r--arch/x86/um/asm/ptrace_32.h51
-rw-r--r--arch/x86/um/asm/ptrace_64.h72
-rw-r--r--arch/x86/um/asm/system.h133
-rw-r--r--arch/x86/um/asm/vm-flags.h25
15 files changed, 1050 insertions, 0 deletions
diff --git a/arch/x86/um/asm/arch_hweight.h b/arch/x86/um/asm/arch_hweight.h
new file mode 100644
index 000000000000..c656cf443f4a
--- /dev/null
+++ b/arch/x86/um/asm/arch_hweight.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_UM_HWEIGHT_H
2#define _ASM_UM_HWEIGHT_H
3
4#include <asm-generic/bitops/arch_hweight.h>
5
6#endif
diff --git a/arch/x86/um/asm/archparam.h b/arch/x86/um/asm/archparam.h
new file mode 100644
index 000000000000..c17cf68dda0f
--- /dev/null
+++ b/arch/x86/um/asm/archparam.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_ARCHPARAM_H
8#define __UM_ARCHPARAM_H
9
10#ifdef CONFIG_X86_32
11
12#ifdef CONFIG_X86_PAE
13#define LAST_PKMAP 512
14#else
15#define LAST_PKMAP 1024
16#endif
17
18#endif
19
20#endif
diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h
new file mode 100644
index 000000000000..b6efe2381b5d
--- /dev/null
+++ b/arch/x86/um/asm/checksum.h
@@ -0,0 +1,10 @@
1#ifndef __UM_CHECKSUM_H
2#define __UM_CHECKSUM_H
3
4#ifdef CONFIG_X86_32
5# include "checksum_32.h"
6#else
7# include "checksum_64.h"
8#endif
9
10#endif
diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h
new file mode 100644
index 000000000000..caab74252e27
--- /dev/null
+++ b/arch/x86/um/asm/checksum_32.h
@@ -0,0 +1,201 @@
1/*
2 * Licensed under the GPL
3 */
4
5#ifndef __UM_SYSDEP_CHECKSUM_H
6#define __UM_SYSDEP_CHECKSUM_H
7
8#include "linux/in6.h"
9#include "linux/string.h"
10
11/*
12 * computes the checksum of a memory block at buff, length len,
13 * and adds in "sum" (32-bit)
14 *
15 * returns a 32-bit number suitable for feeding into itself
16 * or csum_tcpudp_magic
17 *
18 * this function must be called with even lengths, except
19 * for the last fragment, which may be odd
20 *
21 * it's best to have buff aligned on a 32-bit boundary
22 */
23__wsum csum_partial(const void *buff, int len, __wsum sum);
24
25/*
26 * Note: when you get a NULL pointer exception here this means someone
27 * passed in an incorrect kernel address to one of these functions.
28 *
29 * If you use these functions directly please don't forget the
30 * access_ok().
31 */
32
33static __inline__
34__wsum csum_partial_copy_nocheck(const void *src, void *dst,
35 int len, __wsum sum)
36{
37 memcpy(dst, src, len);
38 return csum_partial(dst, len, sum);
39}
40
41/*
42 * the same as csum_partial, but copies from src while it
43 * checksums, and handles user-space pointer exceptions correctly, when needed.
44 *
45 * here even more important to align src and dst on a 32-bit (or even
46 * better 64-bit) boundary
47 */
48
49static __inline__
50__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
51 int len, __wsum sum, int *err_ptr)
52{
53 if (copy_from_user(dst, src, len)) {
54 *err_ptr = -EFAULT;
55 return (__force __wsum)-1;
56 }
57
58 return csum_partial(dst, len, sum);
59}
60
61/*
62 * This is a version of ip_compute_csum() optimized for IP headers,
63 * which always checksum on 4 octet boundaries.
64 *
65 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
66 * Arnt Gulbrandsen.
67 */
68static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
69{
70 unsigned int sum;
71
72 __asm__ __volatile__(
73 "movl (%1), %0 ;\n"
74 "subl $4, %2 ;\n"
75 "jbe 2f ;\n"
76 "addl 4(%1), %0 ;\n"
77 "adcl 8(%1), %0 ;\n"
78 "adcl 12(%1), %0 ;\n"
79"1: adcl 16(%1), %0 ;\n"
80 "lea 4(%1), %1 ;\n"
81 "decl %2 ;\n"
82 "jne 1b ;\n"
83 "adcl $0, %0 ;\n"
84 "movl %0, %2 ;\n"
85 "shrl $16, %0 ;\n"
86 "addw %w2, %w0 ;\n"
87 "adcl $0, %0 ;\n"
88 "notl %0 ;\n"
89"2: ;\n"
90 /* Since the input registers which are loaded with iph and ipl
91 are modified, we must also specify them as outputs, or gcc
92 will assume they contain their original values. */
93 : "=r" (sum), "=r" (iph), "=r" (ihl)
94 : "1" (iph), "2" (ihl)
95 : "memory");
96 return (__force __sum16)sum;
97}
98
99/*
100 * Fold a partial checksum
101 */
102
103static inline __sum16 csum_fold(__wsum sum)
104{
105 __asm__(
106 "addl %1, %0 ;\n"
107 "adcl $0xffff, %0 ;\n"
108 : "=r" (sum)
109 : "r" ((__force u32)sum << 16),
110 "0" ((__force u32)sum & 0xffff0000)
111 );
112 return (__force __sum16)(~(__force u32)sum >> 16);
113}
114
115static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
116 unsigned short len,
117 unsigned short proto,
118 __wsum sum)
119{
120 __asm__(
121 "addl %1, %0 ;\n"
122 "adcl %2, %0 ;\n"
123 "adcl %3, %0 ;\n"
124 "adcl $0, %0 ;\n"
125 : "=r" (sum)
126 : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
127 return sum;
128}
129
130/*
131 * computes the checksum of the TCP/UDP pseudo-header
132 * returns a 16-bit checksum, already complemented
133 */
134static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
135 unsigned short len,
136 unsigned short proto,
137 __wsum sum)
138{
139 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
140}
141
142/*
143 * this routine is used for miscellaneous IP-like checksums, mainly
144 * in icmp.c
145 */
146
147static inline __sum16 ip_compute_csum(const void *buff, int len)
148{
149 return csum_fold (csum_partial(buff, len, 0));
150}
151
152#define _HAVE_ARCH_IPV6_CSUM
153static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
154 const struct in6_addr *daddr,
155 __u32 len, unsigned short proto,
156 __wsum sum)
157{
158 __asm__(
159 "addl 0(%1), %0 ;\n"
160 "adcl 4(%1), %0 ;\n"
161 "adcl 8(%1), %0 ;\n"
162 "adcl 12(%1), %0 ;\n"
163 "adcl 0(%2), %0 ;\n"
164 "adcl 4(%2), %0 ;\n"
165 "adcl 8(%2), %0 ;\n"
166 "adcl 12(%2), %0 ;\n"
167 "adcl %3, %0 ;\n"
168 "adcl %4, %0 ;\n"
169 "adcl $0, %0 ;\n"
170 : "=&r" (sum)
171 : "r" (saddr), "r" (daddr),
172 "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
173
174 return csum_fold(sum);
175}
176
177/*
178 * Copy and checksum to user
179 */
180#define HAVE_CSUM_COPY_USER
181static __inline__ __wsum csum_and_copy_to_user(const void *src,
182 void __user *dst,
183 int len, __wsum sum, int *err_ptr)
184{
185 if (access_ok(VERIFY_WRITE, dst, len)) {
186 if (copy_to_user(dst, src, len)) {
187 *err_ptr = -EFAULT;
188 return (__force __wsum)-1;
189 }
190
191 return csum_partial(src, len, sum);
192 }
193
194 if (len)
195 *err_ptr = -EFAULT;
196
197 return (__force __wsum)-1; /* invalid checksum */
198}
199
200#endif
201
diff --git a/arch/x86/um/asm/checksum_64.h b/arch/x86/um/asm/checksum_64.h
new file mode 100644
index 000000000000..a5be9031ea85
--- /dev/null
+++ b/arch/x86/um/asm/checksum_64.h
@@ -0,0 +1,144 @@
1/*
2 * Licensed under the GPL
3 */
4
5#ifndef __UM_SYSDEP_CHECKSUM_H
6#define __UM_SYSDEP_CHECKSUM_H
7
8#include "linux/string.h"
9#include "linux/in6.h"
10#include "asm/uaccess.h"
11
12extern __wsum csum_partial(const void *buff, int len, __wsum sum);
13
14/*
15 * Note: when you get a NULL pointer exception here this means someone
16 * passed in an incorrect kernel address to one of these functions.
17 *
18 * If you use these functions directly please don't forget the
19 * access_ok().
20 */
21
22static __inline__
23__wsum csum_partial_copy_nocheck(const void *src, void *dst,
24 int len, __wsum sum)
25{
26 memcpy(dst, src, len);
27 return(csum_partial(dst, len, sum));
28}
29
30static __inline__
31__wsum csum_partial_copy_from_user(const void __user *src,
32 void *dst, int len, __wsum sum,
33 int *err_ptr)
34{
35 if (copy_from_user(dst, src, len)) {
36 *err_ptr = -EFAULT;
37 return (__force __wsum)-1;
38 }
39 return csum_partial(dst, len, sum);
40}
41
42/**
43 * csum_fold - Fold and invert a 32bit checksum.
44 * sum: 32bit unfolded sum
45 *
46 * Fold a 32bit running checksum to 16bit and invert it. This is usually
47 * the last step before putting a checksum into a packet.
48 * Make sure not to mix with 64bit checksums.
49 */
50static inline __sum16 csum_fold(__wsum sum)
51{
52 __asm__(
53 " addl %1,%0\n"
54 " adcl $0xffff,%0"
55 : "=r" (sum)
56 : "r" ((__force u32)sum << 16),
57 "0" ((__force u32)sum & 0xffff0000)
58 );
59 return (__force __sum16)(~(__force u32)sum >> 16);
60}
61
62/**
63 * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
64 * @saddr: source address
65 * @daddr: destination address
66 * @len: length of packet
67 * @proto: ip protocol of packet
68 * @sum: initial sum to be added in (32bit unfolded)
69 *
70 * Returns the pseudo header checksum the input data. Result is
71 * 32bit unfolded.
72 */
73static inline __wsum
74csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
75 unsigned short proto, __wsum sum)
76{
77 asm(" addl %1, %0\n"
78 " adcl %2, %0\n"
79 " adcl %3, %0\n"
80 " adcl $0, %0\n"
81 : "=r" (sum)
82 : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum));
83 return sum;
84}
85
86/*
87 * computes the checksum of the TCP/UDP pseudo-header
88 * returns a 16-bit checksum, already complemented
89 */
90static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
91 unsigned short len,
92 unsigned short proto,
93 __wsum sum)
94{
95 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
96}
97
98/**
99 * ip_fast_csum - Compute the IPv4 header checksum efficiently.
100 * iph: ipv4 header
101 * ihl: length of header / 4
102 */
103static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
104{
105 unsigned int sum;
106
107 asm( " movl (%1), %0\n"
108 " subl $4, %2\n"
109 " jbe 2f\n"
110 " addl 4(%1), %0\n"
111 " adcl 8(%1), %0\n"
112 " adcl 12(%1), %0\n"
113 "1: adcl 16(%1), %0\n"
114 " lea 4(%1), %1\n"
115 " decl %2\n"
116 " jne 1b\n"
117 " adcl $0, %0\n"
118 " movl %0, %2\n"
119 " shrl $16, %0\n"
120 " addw %w2, %w0\n"
121 " adcl $0, %0\n"
122 " notl %0\n"
123 "2:"
124 /* Since the input registers which are loaded with iph and ipl
125 are modified, we must also specify them as outputs, or gcc
126 will assume they contain their original values. */
127 : "=r" (sum), "=r" (iph), "=r" (ihl)
128 : "1" (iph), "2" (ihl)
129 : "memory");
130 return (__force __sum16)sum;
131}
132
133static inline unsigned add32_with_carry(unsigned a, unsigned b)
134{
135 asm("addl %2,%0\n\t"
136 "adcl $0,%0"
137 : "=r" (a)
138 : "0" (a), "r" (b));
139 return a;
140}
141
142extern __sum16 ip_compute_csum(const void *buff, int len);
143
144#endif
diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h
new file mode 100644
index 000000000000..f3b0633b69a1
--- /dev/null
+++ b/arch/x86/um/asm/elf.h
@@ -0,0 +1,221 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5#ifndef __UM_ELF_X86_H
6#define __UM_ELF_X86_H
7
8#include <asm/user.h>
9#include "skas.h"
10
11#ifdef CONFIG_X86_32
12
13#define R_386_NONE 0
14#define R_386_32 1
15#define R_386_PC32 2
16#define R_386_GOT32 3
17#define R_386_PLT32 4
18#define R_386_COPY 5
19#define R_386_GLOB_DAT 6
20#define R_386_JMP_SLOT 7
21#define R_386_RELATIVE 8
22#define R_386_GOTOFF 9
23#define R_386_GOTPC 10
24#define R_386_NUM 11
25
26/*
27 * This is used to ensure we don't load something for the wrong architecture.
28 */
29#define elf_check_arch(x) \
30 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
31
32#define ELF_CLASS ELFCLASS32
33#define ELF_DATA ELFDATA2LSB
34#define ELF_ARCH EM_386
35
36#define ELF_PLAT_INIT(regs, load_addr) do { \
37 PT_REGS_EBX(regs) = 0; \
38 PT_REGS_ECX(regs) = 0; \
39 PT_REGS_EDX(regs) = 0; \
40 PT_REGS_ESI(regs) = 0; \
41 PT_REGS_EDI(regs) = 0; \
42 PT_REGS_EBP(regs) = 0; \
43 PT_REGS_EAX(regs) = 0; \
44} while (0)
45
46/* Shamelessly stolen from include/asm-i386/elf.h */
47
48#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
49 pr_reg[0] = PT_REGS_EBX(regs); \
50 pr_reg[1] = PT_REGS_ECX(regs); \
51 pr_reg[2] = PT_REGS_EDX(regs); \
52 pr_reg[3] = PT_REGS_ESI(regs); \
53 pr_reg[4] = PT_REGS_EDI(regs); \
54 pr_reg[5] = PT_REGS_EBP(regs); \
55 pr_reg[6] = PT_REGS_EAX(regs); \
56 pr_reg[7] = PT_REGS_DS(regs); \
57 pr_reg[8] = PT_REGS_ES(regs); \
58 /* fake once used fs and gs selectors? */ \
59 pr_reg[9] = PT_REGS_DS(regs); \
60 pr_reg[10] = PT_REGS_DS(regs); \
61 pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \
62 pr_reg[12] = PT_REGS_IP(regs); \
63 pr_reg[13] = PT_REGS_CS(regs); \
64 pr_reg[14] = PT_REGS_EFLAGS(regs); \
65 pr_reg[15] = PT_REGS_SP(regs); \
66 pr_reg[16] = PT_REGS_SS(regs); \
67} while (0);
68
69extern char * elf_aux_platform;
70#define ELF_PLATFORM (elf_aux_platform)
71
72extern unsigned long vsyscall_ehdr;
73extern unsigned long vsyscall_end;
74extern unsigned long __kernel_vsyscall;
75
76/*
77 * This is the range that is readable by user mode, and things
78 * acting like user mode such as get_user_pages.
79 */
80#define FIXADDR_USER_START vsyscall_ehdr
81#define FIXADDR_USER_END vsyscall_end
82
83
84/*
85 * Architecture-neutral AT_ values in 0-17, leave some room
86 * for more of them, start the x86-specific ones at 32.
87 */
88#define AT_SYSINFO 32
89#define AT_SYSINFO_EHDR 33
90
91#define ARCH_DLINFO \
92do { \
93 if ( vsyscall_ehdr ) { \
94 NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \
95 NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \
96 } \
97} while (0)
98
99#else
100
101/* x86-64 relocation types, taken from asm-x86_64/elf.h */
102#define R_X86_64_NONE 0 /* No reloc */
103#define R_X86_64_64 1 /* Direct 64 bit */
104#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
105#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
106#define R_X86_64_PLT32 4 /* 32 bit PLT address */
107#define R_X86_64_COPY 5 /* Copy symbol at runtime */
108#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
109#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
110#define R_X86_64_RELATIVE 8 /* Adjust by program base */
111#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
112 offset to GOT */
113#define R_X86_64_32 10 /* Direct 32 bit zero extended */
114#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
115#define R_X86_64_16 12 /* Direct 16 bit zero extended */
116#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
117#define R_X86_64_8 14 /* Direct 8 bit sign extended */
118#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
119
120#define R_X86_64_NUM 16
121
122/*
123 * This is used to ensure we don't load something for the wrong architecture.
124 */
125#define elf_check_arch(x) \
126 ((x)->e_machine == EM_X86_64)
127
128#define ELF_CLASS ELFCLASS64
129#define ELF_DATA ELFDATA2LSB
130#define ELF_ARCH EM_X86_64
131
132#define ELF_PLAT_INIT(regs, load_addr) do { \
133 PT_REGS_RBX(regs) = 0; \
134 PT_REGS_RCX(regs) = 0; \
135 PT_REGS_RDX(regs) = 0; \
136 PT_REGS_RSI(regs) = 0; \
137 PT_REGS_RDI(regs) = 0; \
138 PT_REGS_RBP(regs) = 0; \
139 PT_REGS_RAX(regs) = 0; \
140 PT_REGS_R8(regs) = 0; \
141 PT_REGS_R9(regs) = 0; \
142 PT_REGS_R10(regs) = 0; \
143 PT_REGS_R11(regs) = 0; \
144 PT_REGS_R12(regs) = 0; \
145 PT_REGS_R13(regs) = 0; \
146 PT_REGS_R14(regs) = 0; \
147 PT_REGS_R15(regs) = 0; \
148} while (0)
149
150#define ELF_CORE_COPY_REGS(pr_reg, _regs) \
151 (pr_reg)[0] = (_regs)->regs.gp[0]; \
152 (pr_reg)[1] = (_regs)->regs.gp[1]; \
153 (pr_reg)[2] = (_regs)->regs.gp[2]; \
154 (pr_reg)[3] = (_regs)->regs.gp[3]; \
155 (pr_reg)[4] = (_regs)->regs.gp[4]; \
156 (pr_reg)[5] = (_regs)->regs.gp[5]; \
157 (pr_reg)[6] = (_regs)->regs.gp[6]; \
158 (pr_reg)[7] = (_regs)->regs.gp[7]; \
159 (pr_reg)[8] = (_regs)->regs.gp[8]; \
160 (pr_reg)[9] = (_regs)->regs.gp[9]; \
161 (pr_reg)[10] = (_regs)->regs.gp[10]; \
162 (pr_reg)[11] = (_regs)->regs.gp[11]; \
163 (pr_reg)[12] = (_regs)->regs.gp[12]; \
164 (pr_reg)[13] = (_regs)->regs.gp[13]; \
165 (pr_reg)[14] = (_regs)->regs.gp[14]; \
166 (pr_reg)[15] = (_regs)->regs.gp[15]; \
167 (pr_reg)[16] = (_regs)->regs.gp[16]; \
168 (pr_reg)[17] = (_regs)->regs.gp[17]; \
169 (pr_reg)[18] = (_regs)->regs.gp[18]; \
170 (pr_reg)[19] = (_regs)->regs.gp[19]; \
171 (pr_reg)[20] = (_regs)->regs.gp[20]; \
172 (pr_reg)[21] = current->thread.arch.fs; \
173 (pr_reg)[22] = 0; \
174 (pr_reg)[23] = 0; \
175 (pr_reg)[24] = 0; \
176 (pr_reg)[25] = 0; \
177 (pr_reg)[26] = 0;
178
179#define ELF_PLATFORM "x86_64"
180
181/* No user-accessible fixmap addresses, i.e. vsyscall */
182#define FIXADDR_USER_START 0
183#define FIXADDR_USER_END 0
184
185#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
186struct linux_binprm;
187extern int arch_setup_additional_pages(struct linux_binprm *bprm,
188 int uses_interp);
189
190extern unsigned long um_vdso_addr;
191#define AT_SYSINFO_EHDR 33
192#define ARCH_DLINFO NEW_AUX_ENT(AT_SYSINFO_EHDR, um_vdso_addr)
193
194#endif
195
196typedef unsigned long elf_greg_t;
197
198#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
199typedef elf_greg_t elf_gregset_t[ELF_NGREG];
200
201typedef struct user_i387_struct elf_fpregset_t;
202
203#define task_pt_regs(t) (&(t)->thread.regs)
204
205struct task_struct;
206
207extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
208
209#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
210
211#define ELF_EXEC_PAGESIZE 4096
212
213#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
214
215extern long elf_aux_hwcap;
216#define ELF_HWCAP (elf_aux_hwcap)
217
218#define SET_PERSONALITY(ex) do ; while(0)
219#define __HAVE_ARCH_GATE_AREA 1
220
221#endif
diff --git a/arch/x86/um/asm/module.h b/arch/x86/um/asm/module.h
new file mode 100644
index 000000000000..61af80e932eb
--- /dev/null
+++ b/arch/x86/um/asm/module.h
@@ -0,0 +1,23 @@
1#ifndef __UM_MODULE_H
2#define __UM_MODULE_H
3
4/* UML is simple */
5struct mod_arch_specific
6{
7};
8
9#ifdef CONFIG_X86_32
10
11#define Elf_Shdr Elf32_Shdr
12#define Elf_Sym Elf32_Sym
13#define Elf_Ehdr Elf32_Ehdr
14
15#else
16
17#define Elf_Shdr Elf64_Shdr
18#define Elf_Sym Elf64_Sym
19#define Elf_Ehdr Elf64_Ehdr
20
21#endif
22
23#endif
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
new file mode 100644
index 000000000000..d3ac1cecf0f4
--- /dev/null
+++ b/arch/x86/um/asm/processor.h
@@ -0,0 +1,15 @@
1#ifndef __UM_PROCESSOR_H
2#define __UM_PROCESSOR_H
3
4/* include faultinfo structure */
5#include <sysdep/faultinfo.h>
6
7#ifdef CONFIG_X86_32
8# include "processor_32.h"
9#else
10# include "processor_64.h"
11#endif
12
13#include <asm/processor-generic.h>
14
15#endif
diff --git a/arch/x86/um/asm/processor_32.h b/arch/x86/um/asm/processor_32.h
new file mode 100644
index 000000000000..ae0d189aafcf
--- /dev/null
+++ b/arch/x86/um/asm/processor_32.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PROCESSOR_I386_H
7#define __UM_PROCESSOR_I386_H
8
9#include <linux/string.h>
10#include <sysdep/host_ldt.h>
11#include <asm/segment.h>
12
13extern int host_has_cmov;
14
15struct uml_tls_struct {
16 struct user_desc tls;
17 unsigned flushed:1;
18 unsigned present:1;
19};
20
21struct arch_thread {
22 struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
23 unsigned long debugregs[8];
24 int debugregs_seq;
25 struct faultinfo faultinfo;
26};
27
28#define INIT_ARCH_THREAD { \
29 .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \
30 { .present = 0, .flushed = 0 } }, \
31 .debugregs = { [ 0 ... 7 ] = 0 }, \
32 .debugregs_seq = 0, \
33 .faultinfo = { 0, 0, 0 } \
34}
35
36static inline void arch_flush_thread(struct arch_thread *thread)
37{
38 /* Clear any TLS still hanging */
39 memset(&thread->tls_array, 0, sizeof(thread->tls_array));
40}
41
42static inline void arch_copy_thread(struct arch_thread *from,
43 struct arch_thread *to)
44{
45 memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
46}
47
48#include <asm/user.h>
49
50/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
51static inline void rep_nop(void)
52{
53 __asm__ __volatile__("rep;nop": : :"memory");
54}
55
56#define cpu_relax() rep_nop()
57
58/*
59 * Default implementation of macro that returns current
60 * instruction pointer ("program counter"). Stolen
61 * from asm-i386/processor.h
62 */
63#define current_text_addr() \
64 ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
65
66#define ARCH_IS_STACKGROW(address) \
67 (address + 32 >= UPT_SP(&current->thread.regs.regs))
68
69#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP)
70#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP)
71#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP)
72
73#endif
diff --git a/arch/x86/um/asm/processor_64.h b/arch/x86/um/asm/processor_64.h
new file mode 100644
index 000000000000..6db812b24f48
--- /dev/null
+++ b/arch/x86/um/asm/processor_64.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PROCESSOR_X86_64_H
8#define __UM_PROCESSOR_X86_64_H
9
10struct arch_thread {
11 unsigned long debugregs[8];
12 int debugregs_seq;
13 unsigned long fs;
14 struct faultinfo faultinfo;
15};
16
17/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
18static inline void rep_nop(void)
19{
20 __asm__ __volatile__("rep;nop": : :"memory");
21}
22
23#define cpu_relax() rep_nop()
24
25#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \
26 .debugregs_seq = 0, \
27 .fs = 0, \
28 .faultinfo = { 0, 0, 0 } }
29
30static inline void arch_flush_thread(struct arch_thread *thread)
31{
32}
33
34static inline void arch_copy_thread(struct arch_thread *from,
35 struct arch_thread *to)
36{
37 to->fs = from->fs;
38}
39
40#include <asm/user.h>
41
42#define current_text_addr() \
43 ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
44
45#define ARCH_IS_STACKGROW(address) \
46 (address + 128 >= UPT_SP(&current->thread.regs.regs))
47
48#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP)
49#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP)
50
51#endif
diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h
new file mode 100644
index 000000000000..c8aca8c501b0
--- /dev/null
+++ b/arch/x86/um/asm/ptrace.h
@@ -0,0 +1,5 @@
1#ifdef CONFIG_X86_32
2# include "ptrace_32.h"
3#else
4# include "ptrace_64.h"
5#endif
diff --git a/arch/x86/um/asm/ptrace_32.h b/arch/x86/um/asm/ptrace_32.h
new file mode 100644
index 000000000000..5d2a59112537
--- /dev/null
+++ b/arch/x86/um/asm/ptrace_32.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PTRACE_I386_H
7#define __UM_PTRACE_I386_H
8
9#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
10
11#include "linux/compiler.h"
12#include "asm/ptrace-generic.h"
13
14#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs)
15#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs)
16#define PT_REGS_ECX(r) UPT_ECX(&(r)->regs)
17#define PT_REGS_EDX(r) UPT_EDX(&(r)->regs)
18#define PT_REGS_ESI(r) UPT_ESI(&(r)->regs)
19#define PT_REGS_EDI(r) UPT_EDI(&(r)->regs)
20#define PT_REGS_EBP(r) UPT_EBP(&(r)->regs)
21
22#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
23#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
24#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
25#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
26#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
27#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
28
29#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
30
31#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r)
32#define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r)
33#define PT_FIX_EXEC_STACK(sp) do ; while(0)
34
35#define profile_pc(regs) PT_REGS_IP(regs)
36
37#define user_mode(r) UPT_IS_USER(&(r)->regs)
38
39/*
40 * Forward declaration to avoid including sysdep/tls.h, which causes a
41 * circular include, and compilation failures.
42 */
43struct user_desc;
44
45extern int ptrace_get_thread_area(struct task_struct *child, int idx,
46 struct user_desc __user *user_desc);
47
48extern int ptrace_set_thread_area(struct task_struct *child, int idx,
49 struct user_desc __user *user_desc);
50
51#endif
diff --git a/arch/x86/um/asm/ptrace_64.h b/arch/x86/um/asm/ptrace_64.h
new file mode 100644
index 000000000000..83d8c473b905
--- /dev/null
+++ b/arch/x86/um/asm/ptrace_64.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PTRACE_X86_64_H
8#define __UM_PTRACE_X86_64_H
9
10#include "linux/compiler.h"
11#include "asm/errno.h"
12
13#define __FRAME_OFFSETS /* Needed to get the R* macros */
14#include "asm/ptrace-generic.h"
15
16#define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64
17
18#define PT_REGS_RBX(r) UPT_RBX(&(r)->regs)
19#define PT_REGS_RCX(r) UPT_RCX(&(r)->regs)
20#define PT_REGS_RDX(r) UPT_RDX(&(r)->regs)
21#define PT_REGS_RSI(r) UPT_RSI(&(r)->regs)
22#define PT_REGS_RDI(r) UPT_RDI(&(r)->regs)
23#define PT_REGS_RBP(r) UPT_RBP(&(r)->regs)
24#define PT_REGS_RAX(r) UPT_RAX(&(r)->regs)
25#define PT_REGS_R8(r) UPT_R8(&(r)->regs)
26#define PT_REGS_R9(r) UPT_R9(&(r)->regs)
27#define PT_REGS_R10(r) UPT_R10(&(r)->regs)
28#define PT_REGS_R11(r) UPT_R11(&(r)->regs)
29#define PT_REGS_R12(r) UPT_R12(&(r)->regs)
30#define PT_REGS_R13(r) UPT_R13(&(r)->regs)
31#define PT_REGS_R14(r) UPT_R14(&(r)->regs)
32#define PT_REGS_R15(r) UPT_R15(&(r)->regs)
33
34#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
35#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
36#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
37#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
38#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
39#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
40
41#define PT_REGS_ORIG_RAX(r) UPT_ORIG_RAX(&(r)->regs)
42#define PT_REGS_RIP(r) UPT_IP(&(r)->regs)
43#define PT_REGS_RSP(r) UPT_SP(&(r)->regs)
44
45#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
46
47/* XXX */
48#define user_mode(r) UPT_IS_USER(&(r)->regs)
49#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_RAX(r)
50#define PT_REGS_SYSCALL_RET(r) PT_REGS_RAX(r)
51
52#define PT_FIX_EXEC_STACK(sp) do ; while(0)
53
54#define profile_pc(regs) PT_REGS_IP(regs)
55
56struct user_desc;
57
58static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
59 struct user_desc __user *user_desc)
60{
61 return -ENOSYS;
62}
63
64static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
65 struct user_desc __user *user_desc)
66{
67 return -ENOSYS;
68}
69
70extern long arch_prctl(struct task_struct *task, int code,
71 unsigned long __user *addr);
72#endif
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h
new file mode 100644
index 000000000000..a89113bc74f2
--- /dev/null
+++ b/arch/x86/um/asm/system.h
@@ -0,0 +1,133 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9#include <asm/system-um.h>
10
11#include <linux/kernel.h>
12#include <linux/irqflags.h>
13
14/* entries in ARCH_DLINFO: */
15#ifdef CONFIG_IA32_EMULATION
16# define AT_VECTOR_SIZE_ARCH 2
17#else
18# define AT_VECTOR_SIZE_ARCH 1
19#endif
20
21extern unsigned long arch_align_stack(unsigned long sp);
22
23void default_idle(void);
24
25/*
26 * Force strict CPU ordering.
27 * And yes, this is required on UP too when we're talking
28 * to devices.
29 */
30#ifdef CONFIG_X86_32
31/*
32 * Some non-Intel clones support out of order store. wmb() ceases to be a
33 * nop for these.
34 */
35#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
36#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
37#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
38#else
39#define mb() asm volatile("mfence":::"memory")
40#define rmb() asm volatile("lfence":::"memory")
41#define wmb() asm volatile("sfence" ::: "memory")
42#endif
43
44/**
45 * read_barrier_depends - Flush all pending reads that subsequents reads
46 * depend on.
47 *
48 * No data-dependent reads from memory-like regions are ever reordered
49 * over this barrier. All reads preceding this primitive are guaranteed
50 * to access memory (but not necessarily other CPUs' caches) before any
51 * reads following this primitive that depend on the data return by
52 * any of the preceding reads. This primitive is much lighter weight than
53 * rmb() on most CPUs, and is never heavier weight than is
54 * rmb().
55 *
56 * These ordering constraints are respected by both the local CPU
57 * and the compiler.
58 *
59 * Ordering is not guaranteed by anything other than these primitives,
60 * not even by data dependencies. See the documentation for
61 * memory_barrier() for examples and URLs to more information.
62 *
63 * For example, the following code would force ordering (the initial
64 * value of "a" is zero, "b" is one, and "p" is "&a"):
65 *
66 * <programlisting>
67 * CPU 0 CPU 1
68 *
69 * b = 2;
70 * memory_barrier();
71 * p = &b; q = p;
72 * read_barrier_depends();
73 * d = *q;
74 * </programlisting>
75 *
76 * because the read of "*q" depends on the read of "p" and these
77 * two reads are separated by a read_barrier_depends(). However,
78 * the following code, with the same initial values for "a" and "b":
79 *
80 * <programlisting>
81 * CPU 0 CPU 1
82 *
83 * a = 2;
84 * memory_barrier();
85 * b = 3; y = b;
86 * read_barrier_depends();
87 * x = a;
88 * </programlisting>
89 *
90 * does not enforce ordering, since there is no data dependency between
91 * the read of "a" and the read of "b". Therefore, on some CPUs, such
92 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
93 * in cases like this where there are no data dependencies.
94 **/
95
96#define read_barrier_depends() do { } while (0)
97
98#ifdef CONFIG_SMP
99#define smp_mb() mb()
100#ifdef CONFIG_X86_PPRO_FENCE
101# define smp_rmb() rmb()
102#else
103# define smp_rmb() barrier()
104#endif
105#ifdef CONFIG_X86_OOSTORE
106# define smp_wmb() wmb()
107#else
108# define smp_wmb() barrier()
109#endif
110#define smp_read_barrier_depends() read_barrier_depends()
111#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
112#else
113#define smp_mb() barrier()
114#define smp_rmb() barrier()
115#define smp_wmb() barrier()
116#define smp_read_barrier_depends() do { } while (0)
117#define set_mb(var, value) do { var = value; barrier(); } while (0)
118#endif
119
120/*
121 * Stop RDTSC speculation. This is needed when you need to use RDTSC
122 * (or get_cycles or vread that possibly accesses the TSC) in a defined
123 * code region.
124 *
125 * (Could use an alternative three way for this if there was one.)
126 */
127static inline void rdtsc_barrier(void)
128{
129 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
130 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
131}
132
133#endif
diff --git a/arch/x86/um/asm/vm-flags.h b/arch/x86/um/asm/vm-flags.h
new file mode 100644
index 000000000000..7c297e9e2413
--- /dev/null
+++ b/arch/x86/um/asm/vm-flags.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#ifndef __VM_FLAGS_X86_H
8#define __VM_FLAGS_X86_H
9
10#ifdef CONFIG_X86_32
11
12#define VM_DATA_DEFAULT_FLAGS \
13 (VM_READ | VM_WRITE | \
14 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
15 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
16
17#else
18
19#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
20 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
21#define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \
22 VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
23
24#endif
25#endif