aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/sys-i386
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2008-08-18 21:44:32 -0400
committerH. Peter Anvin <hpa@zytor.com>2008-10-23 01:55:21 -0400
commitf5ad6a42b700d9687bb97cf461e7f2506e3006d6 (patch)
tree1bd92ee745ce1bdf459147dd9cbdb440c72cd9f8 /arch/um/sys-i386
parent17dcf75d3ea11d7e26110ba85677cfadbccecf45 (diff)
x86, um: get rid of sysdep symlink
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/um/sys-i386')
-rw-r--r--arch/um/sys-i386/sysdep/archsetjmp.h22
-rw-r--r--arch/um/sys-i386/sysdep/barrier.h9
-rw-r--r--arch/um/sys-i386/sysdep/checksum.h211
-rw-r--r--arch/um/sys-i386/sysdep/faultinfo.h29
-rw-r--r--arch/um/sys-i386/sysdep/host_ldt.h34
-rw-r--r--arch/um/sys-i386/sysdep/kernel-offsets.h21
-rw-r--r--arch/um/sys-i386/sysdep/ptrace.h171
-rw-r--r--arch/um/sys-i386/sysdep/ptrace_user.h50
-rw-r--r--arch/um/sys-i386/sysdep/sc.h44
-rw-r--r--arch/um/sys-i386/sysdep/sigcontext.h26
-rw-r--r--arch/um/sys-i386/sysdep/skas_ptrace.h22
-rw-r--r--arch/um/sys-i386/sysdep/stub.h102
-rw-r--r--arch/um/sys-i386/sysdep/syscalls.h26
-rw-r--r--arch/um/sys-i386/sysdep/system.h132
-rw-r--r--arch/um/sys-i386/sysdep/tls.h32
-rw-r--r--arch/um/sys-i386/sysdep/vm-flags.h14
16 files changed, 945 insertions, 0 deletions
diff --git a/arch/um/sys-i386/sysdep/archsetjmp.h b/arch/um/sys-i386/sysdep/archsetjmp.h
new file mode 100644
index 00000000000..0f312085ce1
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/archsetjmp.h
@@ -0,0 +1,22 @@
1/*
2 * arch/um/include/sysdep-i386/archsetjmp.h
3 */
4
5#ifndef _KLIBC_ARCHSETJMP_H
6#define _KLIBC_ARCHSETJMP_H
7
8struct __jmp_buf {
9 unsigned int __ebx;
10 unsigned int __esp;
11 unsigned int __ebp;
12 unsigned int __esi;
13 unsigned int __edi;
14 unsigned int __eip;
15};
16
17typedef struct __jmp_buf jmp_buf[1];
18
19#define JB_IP __eip
20#define JB_SP __esp
21
22#endif /* _SETJMP_H */
diff --git a/arch/um/sys-i386/sysdep/barrier.h b/arch/um/sys-i386/sysdep/barrier.h
new file mode 100644
index 00000000000..b58d52c5b2f
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/barrier.h
@@ -0,0 +1,9 @@
1#ifndef __SYSDEP_I386_BARRIER_H
2#define __SYSDEP_I386_BARRIER_H
3
4/* Copied from include/asm-i386 for use by userspace. i386 has the option
5 * of using mfence, but I'm just using this, which works everywhere, for now.
6 */
7#define mb() asm volatile("lock; addl $0,0(%esp)")
8
9#endif
diff --git a/arch/um/sys-i386/sysdep/checksum.h b/arch/um/sys-i386/sysdep/checksum.h
new file mode 100644
index 00000000000..0cb4645cbeb
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/checksum.h
@@ -0,0 +1,211 @@
1/*
2 * Licensed under the GPL
3 */
4
5#ifndef __UM_SYSDEP_CHECKSUM_H
6#define __UM_SYSDEP_CHECKSUM_H
7
8#include "linux/in6.h"
9#include "linux/string.h"
10
11/*
12 * computes the checksum of a memory block at buff, length len,
13 * and adds in "sum" (32-bit)
14 *
15 * returns a 32-bit number suitable for feeding into itself
16 * or csum_tcpudp_magic
17 *
18 * this function must be called with even lengths, except
19 * for the last fragment, which may be odd
20 *
21 * it's best to have buff aligned on a 32-bit boundary
22 */
23__wsum csum_partial(const void *buff, int len, __wsum sum);
24
25/*
26 * Note: when you get a NULL pointer exception here this means someone
27 * passed in an incorrect kernel address to one of these functions.
28 *
29 * If you use these functions directly please don't forget the
30 * access_ok().
31 */
32
33static __inline__
34__wsum csum_partial_copy_nocheck(const void *src, void *dst,
35 int len, __wsum sum)
36{
37 memcpy(dst, src, len);
38 return csum_partial(dst, len, sum);
39}
40
41/*
42 * the same as csum_partial, but copies from src while it
43 * checksums, and handles user-space pointer exceptions correctly, when needed.
44 *
45 * here even more important to align src and dst on a 32-bit (or even
46 * better 64-bit) boundary
47 */
48
49static __inline__
50__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
51 int len, __wsum sum, int *err_ptr)
52{
53 if (copy_from_user(dst, src, len)) {
54 *err_ptr = -EFAULT;
55 return (__force __wsum)-1;
56 }
57
58 return csum_partial(dst, len, sum);
59}
60
61/*
62 * This is a version of ip_compute_csum() optimized for IP headers,
63 * which always checksum on 4 octet boundaries.
64 *
65 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
66 * Arnt Gulbrandsen.
67 */
68static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
69{
70 unsigned int sum;
71
72 __asm__ __volatile__(
73 "movl (%1), %0 ;\n"
74 "subl $4, %2 ;\n"
75 "jbe 2f ;\n"
76 "addl 4(%1), %0 ;\n"
77 "adcl 8(%1), %0 ;\n"
78 "adcl 12(%1), %0 ;\n"
79"1: adcl 16(%1), %0 ;\n"
80 "lea 4(%1), %1 ;\n"
81 "decl %2 ;\n"
82 "jne 1b ;\n"
83 "adcl $0, %0 ;\n"
84 "movl %0, %2 ;\n"
85 "shrl $16, %0 ;\n"
86 "addw %w2, %w0 ;\n"
87 "adcl $0, %0 ;\n"
88 "notl %0 ;\n"
89"2: ;\n"
90 /* Since the input registers which are loaded with iph and ipl
91 are modified, we must also specify them as outputs, or gcc
92 will assume they contain their original values. */
93 : "=r" (sum), "=r" (iph), "=r" (ihl)
94 : "1" (iph), "2" (ihl)
95 : "memory");
96 return (__force __sum16)sum;
97}
98
99/*
100 * Fold a partial checksum
101 */
102
103static inline __sum16 csum_fold(__wsum sum)
104{
105 __asm__(
106 "addl %1, %0 ;\n"
107 "adcl $0xffff, %0 ;\n"
108 : "=r" (sum)
109 : "r" ((__force u32)sum << 16),
110 "0" ((__force u32)sum & 0xffff0000)
111 );
112 return (__force __sum16)(~(__force u32)sum >> 16);
113}
114
115static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
116 unsigned short len,
117 unsigned short proto,
118 __wsum sum)
119{
120 __asm__(
121 "addl %1, %0 ;\n"
122 "adcl %2, %0 ;\n"
123 "adcl %3, %0 ;\n"
124 "adcl $0, %0 ;\n"
125 : "=r" (sum)
126 : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
127 return sum;
128}
129
130/*
131 * computes the checksum of the TCP/UDP pseudo-header
132 * returns a 16-bit checksum, already complemented
133 */
134static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
135 unsigned short len,
136 unsigned short proto,
137 __wsum sum)
138{
139 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
140}
141
142/*
143 * this routine is used for miscellaneous IP-like checksums, mainly
144 * in icmp.c
145 */
146
147static inline __sum16 ip_compute_csum(const void *buff, int len)
148{
149 return csum_fold (csum_partial(buff, len, 0));
150}
151
152#define _HAVE_ARCH_IPV6_CSUM
153static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
154 const struct in6_addr *daddr,
155 __u32 len, unsigned short proto,
156 __wsum sum)
157{
158 __asm__(
159 "addl 0(%1), %0 ;\n"
160 "adcl 4(%1), %0 ;\n"
161 "adcl 8(%1), %0 ;\n"
162 "adcl 12(%1), %0 ;\n"
163 "adcl 0(%2), %0 ;\n"
164 "adcl 4(%2), %0 ;\n"
165 "adcl 8(%2), %0 ;\n"
166 "adcl 12(%2), %0 ;\n"
167 "adcl %3, %0 ;\n"
168 "adcl %4, %0 ;\n"
169 "adcl $0, %0 ;\n"
170 : "=&r" (sum)
171 : "r" (saddr), "r" (daddr),
172 "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
173
174 return csum_fold(sum);
175}
176
177/*
178 * Copy and checksum to user
179 */
180#define HAVE_CSUM_COPY_USER
181static __inline__ __wsum csum_and_copy_to_user(const void *src,
182 void __user *dst,
183 int len, __wsum sum, int *err_ptr)
184{
185 if (access_ok(VERIFY_WRITE, dst, len)) {
186 if (copy_to_user(dst, src, len)) {
187 *err_ptr = -EFAULT;
188 return (__force __wsum)-1;
189 }
190
191 return csum_partial(src, len, sum);
192 }
193
194 if (len)
195 *err_ptr = -EFAULT;
196
197 return (__force __wsum)-1; /* invalid checksum */
198}
199
200#endif
201
202/*
203 * Overrides for Emacs so that we follow Linus's tabbing style.
204 * Emacs will notice this stuff at the end of the file and automatically
205 * adjust the settings for this buffer only. This must remain at the end
206 * of the file.
207 * ---------------------------------------------------------------------------
208 * Local variables:
209 * c-file-style: "linux"
210 * End:
211 */
diff --git a/arch/um/sys-i386/sysdep/faultinfo.h b/arch/um/sys-i386/sysdep/faultinfo.h
new file mode 100644
index 00000000000..db437cc373b
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/faultinfo.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
4 * Licensed under the GPL
5 */
6
7#ifndef __FAULTINFO_I386_H
8#define __FAULTINFO_I386_H
9
10/* this structure contains the full arch-specific faultinfo
11 * from the traps.
12 * On i386, ptrace_faultinfo unfortunately doesn't provide
13 * all the info, since trap_no is missing.
14 * All common elements are defined at the same position in
15 * both structures, thus making it easy to copy the
16 * contents without knowledge about the structure elements.
17 */
18struct faultinfo {
19 int error_code; /* in ptrace_faultinfo misleadingly called is_write */
20 unsigned long cr2; /* in ptrace_faultinfo called addr */
21 int trap_no; /* missing in ptrace_faultinfo */
22};
23
24#define FAULT_WRITE(fi) ((fi).error_code & 2)
25#define FAULT_ADDRESS(fi) ((fi).cr2)
26
27#define PTRACE_FULL_FAULTINFO 0
28
29#endif
diff --git a/arch/um/sys-i386/sysdep/host_ldt.h b/arch/um/sys-i386/sysdep/host_ldt.h
new file mode 100644
index 00000000000..0953cc4df65
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/host_ldt.h
@@ -0,0 +1,34 @@
1#ifndef __ASM_HOST_LDT_I386_H
2#define __ASM_HOST_LDT_I386_H
3
4#include <asm/ldt.h>
5
6/*
7 * macros stolen from include/asm-i386/desc.h
8 */
9#define LDT_entry_a(info) \
10 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
11
12#define LDT_entry_b(info) \
13 (((info)->base_addr & 0xff000000) | \
14 (((info)->base_addr & 0x00ff0000) >> 16) | \
15 ((info)->limit & 0xf0000) | \
16 (((info)->read_exec_only ^ 1) << 9) | \
17 ((info)->contents << 10) | \
18 (((info)->seg_not_present ^ 1) << 15) | \
19 ((info)->seg_32bit << 22) | \
20 ((info)->limit_in_pages << 23) | \
21 ((info)->useable << 20) | \
22 0x7000)
23
24#define LDT_empty(info) (\
25 (info)->base_addr == 0 && \
26 (info)->limit == 0 && \
27 (info)->contents == 0 && \
28 (info)->read_exec_only == 1 && \
29 (info)->seg_32bit == 0 && \
30 (info)->limit_in_pages == 0 && \
31 (info)->seg_not_present == 1 && \
32 (info)->useable == 0 )
33
34#endif
diff --git a/arch/um/sys-i386/sysdep/kernel-offsets.h b/arch/um/sys-i386/sysdep/kernel-offsets.h
new file mode 100644
index 00000000000..5868526b5ee
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/kernel-offsets.h
@@ -0,0 +1,21 @@
1#include <linux/stddef.h>
2#include <linux/sched.h>
3#include <linux/elf.h>
4#include <linux/crypto.h>
5#include <asm/mman.h>
6
7#define DEFINE(sym, val) \
8 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
9
10#define STR(x) #x
11#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
12
13#define BLANK() asm volatile("\n->" : : )
14
15#define OFFSET(sym, str, mem) \
16 DEFINE(sym, offsetof(struct str, mem));
17
18void foo(void)
19{
20#include <common-offsets.h>
21}
diff --git a/arch/um/sys-i386/sysdep/ptrace.h b/arch/um/sys-i386/sysdep/ptrace.h
new file mode 100644
index 00000000000..11c08969d13
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/ptrace.h
@@ -0,0 +1,171 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_I386_PTRACE_H
7#define __SYSDEP_I386_PTRACE_H
8
9#include "uml-config.h"
10#include "user_constants.h"
11#include "sysdep/faultinfo.h"
12
13#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
14#define MAX_REG_OFFSET (UM_FRAME_SIZE)
15
16static inline void update_debugregs(int seq) {}
17
18/* syscall emulation path in ptrace */
19
20#ifndef PTRACE_SYSEMU
21#define PTRACE_SYSEMU 31
22#endif
23
24void set_using_sysemu(int value);
25int get_using_sysemu(void);
26extern int sysemu_supported;
27
28#include "skas_ptregs.h"
29
30#define REGS_IP(r) ((r)[HOST_IP])
31#define REGS_SP(r) ((r)[HOST_SP])
32#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
33#define REGS_EAX(r) ((r)[HOST_EAX])
34#define REGS_EBX(r) ((r)[HOST_EBX])
35#define REGS_ECX(r) ((r)[HOST_ECX])
36#define REGS_EDX(r) ((r)[HOST_EDX])
37#define REGS_ESI(r) ((r)[HOST_ESI])
38#define REGS_EDI(r) ((r)[HOST_EDI])
39#define REGS_EBP(r) ((r)[HOST_EBP])
40#define REGS_CS(r) ((r)[HOST_CS])
41#define REGS_SS(r) ((r)[HOST_SS])
42#define REGS_DS(r) ((r)[HOST_DS])
43#define REGS_ES(r) ((r)[HOST_ES])
44#define REGS_FS(r) ((r)[HOST_FS])
45#define REGS_GS(r) ((r)[HOST_GS])
46
47#define REGS_SET_SYSCALL_RETURN(r, res) REGS_EAX(r) = (res)
48
49#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
50
51#ifndef PTRACE_SYSEMU_SINGLESTEP
52#define PTRACE_SYSEMU_SINGLESTEP 32
53#endif
54
55struct uml_pt_regs {
56 unsigned long gp[MAX_REG_NR];
57 struct faultinfo faultinfo;
58 long syscall;
59 int is_user;
60};
61
62#define EMPTY_UML_PT_REGS { }
63
64#define UPT_IP(r) REGS_IP((r)->gp)
65#define UPT_SP(r) REGS_SP((r)->gp)
66#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
67#define UPT_EAX(r) REGS_EAX((r)->gp)
68#define UPT_EBX(r) REGS_EBX((r)->gp)
69#define UPT_ECX(r) REGS_ECX((r)->gp)
70#define UPT_EDX(r) REGS_EDX((r)->gp)
71#define UPT_ESI(r) REGS_ESI((r)->gp)
72#define UPT_EDI(r) REGS_EDI((r)->gp)
73#define UPT_EBP(r) REGS_EBP((r)->gp)
74#define UPT_ORIG_EAX(r) ((r)->syscall)
75#define UPT_CS(r) REGS_CS((r)->gp)
76#define UPT_SS(r) REGS_SS((r)->gp)
77#define UPT_DS(r) REGS_DS((r)->gp)
78#define UPT_ES(r) REGS_ES((r)->gp)
79#define UPT_FS(r) REGS_FS((r)->gp)
80#define UPT_GS(r) REGS_GS((r)->gp)
81
82#define UPT_SYSCALL_ARG1(r) UPT_EBX(r)
83#define UPT_SYSCALL_ARG2(r) UPT_ECX(r)
84#define UPT_SYSCALL_ARG3(r) UPT_EDX(r)
85#define UPT_SYSCALL_ARG4(r) UPT_ESI(r)
86#define UPT_SYSCALL_ARG5(r) UPT_EDI(r)
87#define UPT_SYSCALL_ARG6(r) UPT_EBP(r)
88
89extern int user_context(unsigned long sp);
90
91#define UPT_IS_USER(r) ((r)->is_user)
92
93struct syscall_args {
94 unsigned long args[6];
95};
96
97#define SYSCALL_ARGS(r) ((struct syscall_args) \
98 { .args = { UPT_SYSCALL_ARG1(r), \
99 UPT_SYSCALL_ARG2(r), \
100 UPT_SYSCALL_ARG3(r), \
101 UPT_SYSCALL_ARG4(r), \
102 UPT_SYSCALL_ARG5(r), \
103 UPT_SYSCALL_ARG6(r) } } )
104
105#define UPT_REG(regs, reg) \
106 ({ unsigned long val; \
107 switch(reg){ \
108 case EIP: val = UPT_IP(regs); break; \
109 case UESP: val = UPT_SP(regs); break; \
110 case EAX: val = UPT_EAX(regs); break; \
111 case EBX: val = UPT_EBX(regs); break; \
112 case ECX: val = UPT_ECX(regs); break; \
113 case EDX: val = UPT_EDX(regs); break; \
114 case ESI: val = UPT_ESI(regs); break; \
115 case EDI: val = UPT_EDI(regs); break; \
116 case EBP: val = UPT_EBP(regs); break; \
117 case ORIG_EAX: val = UPT_ORIG_EAX(regs); break; \
118 case CS: val = UPT_CS(regs); break; \
119 case SS: val = UPT_SS(regs); break; \
120 case DS: val = UPT_DS(regs); break; \
121 case ES: val = UPT_ES(regs); break; \
122 case FS: val = UPT_FS(regs); break; \
123 case GS: val = UPT_GS(regs); break; \
124 case EFL: val = UPT_EFLAGS(regs); break; \
125 default : \
126 panic("Bad register in UPT_REG : %d\n", reg); \
127 val = -1; \
128 } \
129 val; \
130 })
131
132#define UPT_SET(regs, reg, val) \
133 do { \
134 switch(reg){ \
135 case EIP: UPT_IP(regs) = val; break; \
136 case UESP: UPT_SP(regs) = val; break; \
137 case EAX: UPT_EAX(regs) = val; break; \
138 case EBX: UPT_EBX(regs) = val; break; \
139 case ECX: UPT_ECX(regs) = val; break; \
140 case EDX: UPT_EDX(regs) = val; break; \
141 case ESI: UPT_ESI(regs) = val; break; \
142 case EDI: UPT_EDI(regs) = val; break; \
143 case EBP: UPT_EBP(regs) = val; break; \
144 case ORIG_EAX: UPT_ORIG_EAX(regs) = val; break; \
145 case CS: UPT_CS(regs) = val; break; \
146 case SS: UPT_SS(regs) = val; break; \
147 case DS: UPT_DS(regs) = val; break; \
148 case ES: UPT_ES(regs) = val; break; \
149 case FS: UPT_FS(regs) = val; break; \
150 case GS: UPT_GS(regs) = val; break; \
151 case EFL: UPT_EFLAGS(regs) = val; break; \
152 default : \
153 panic("Bad register in UPT_SET : %d\n", reg); \
154 break; \
155 } \
156 } while (0)
157
158#define UPT_SET_SYSCALL_RETURN(r, res) \
159 REGS_SET_SYSCALL_RETURN((r)->regs, (res))
160
161#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
162
163#define UPT_ORIG_SYSCALL(r) UPT_EAX(r)
164#define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r)
165#define UPT_SYSCALL_RET(r) UPT_EAX(r)
166
167#define UPT_FAULTINFO(r) (&(r)->faultinfo)
168
169extern void arch_init_registers(int pid);
170
171#endif
diff --git a/arch/um/sys-i386/sysdep/ptrace_user.h b/arch/um/sys-i386/sysdep/ptrace_user.h
new file mode 100644
index 00000000000..ef56247e414
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/ptrace_user.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_I386_PTRACE_USER_H__
7#define __SYSDEP_I386_PTRACE_USER_H__
8
9#include <sys/ptrace.h>
10#include <linux/ptrace.h>
11#include <asm/ptrace.h>
12#include "user_constants.h"
13
14#define PT_OFFSET(r) ((r) * sizeof(long))
15
16#define PT_SYSCALL_NR(regs) ((regs)[ORIG_EAX])
17#define PT_SYSCALL_NR_OFFSET PT_OFFSET(ORIG_EAX)
18
19#define PT_SYSCALL_ARG1_OFFSET PT_OFFSET(EBX)
20#define PT_SYSCALL_ARG2_OFFSET PT_OFFSET(ECX)
21#define PT_SYSCALL_ARG3_OFFSET PT_OFFSET(EDX)
22#define PT_SYSCALL_ARG4_OFFSET PT_OFFSET(ESI)
23#define PT_SYSCALL_ARG5_OFFSET PT_OFFSET(EDI)
24#define PT_SYSCALL_ARG6_OFFSET PT_OFFSET(EBP)
25
26#define PT_SYSCALL_RET_OFFSET PT_OFFSET(EAX)
27
28#define REGS_SYSCALL_NR EAX /* This is used before a system call */
29#define REGS_SYSCALL_ARG1 EBX
30#define REGS_SYSCALL_ARG2 ECX
31#define REGS_SYSCALL_ARG3 EDX
32#define REGS_SYSCALL_ARG4 ESI
33#define REGS_SYSCALL_ARG5 EDI
34#define REGS_SYSCALL_ARG6 EBP
35
36#define REGS_IP_INDEX EIP
37#define REGS_SP_INDEX UESP
38
39#define PT_IP_OFFSET PT_OFFSET(EIP)
40#define PT_IP(regs) ((regs)[EIP])
41#define PT_SP_OFFSET PT_OFFSET(UESP)
42#define PT_SP(regs) ((regs)[UESP])
43
44#define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE)
45
46#ifndef FRAME_SIZE
47#define FRAME_SIZE (17)
48#endif
49
50#endif
diff --git a/arch/um/sys-i386/sysdep/sc.h b/arch/um/sys-i386/sysdep/sc.h
new file mode 100644
index 00000000000..c57d1780ad3
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/sc.h
@@ -0,0 +1,44 @@
1#ifndef __SYSDEP_I386_SC_H
2#define __SYSDEP_I386_SC_H
3
4#include <user_constants.h>
5
6#define SC_OFFSET(sc, field) \
7 *((unsigned long *) &(((char *) (sc))[HOST_##field]))
8#define SC_FP_OFFSET(sc, field) \
9 *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
10#define SC_FP_OFFSET_PTR(sc, field, type) \
11 ((type *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
12
13#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
14#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
15#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
16#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
17#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
18#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
19#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
20#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
21#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
22#define SC_EAX(sc) SC_OFFSET(sc, SC_EAX)
23#define SC_EBX(sc) SC_OFFSET(sc, SC_EBX)
24#define SC_ECX(sc) SC_OFFSET(sc, SC_ECX)
25#define SC_EDX(sc) SC_OFFSET(sc, SC_EDX)
26#define SC_EDI(sc) SC_OFFSET(sc, SC_EDI)
27#define SC_ESI(sc) SC_OFFSET(sc, SC_ESI)
28#define SC_EBP(sc) SC_OFFSET(sc, SC_EBP)
29#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
30#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
31#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
32#define SC_FPSTATE(sc) SC_OFFSET(sc, SC_FPSTATE)
33#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
34#define SC_FP_CW(sc) SC_FP_OFFSET(sc, SC_FP_CW)
35#define SC_FP_SW(sc) SC_FP_OFFSET(sc, SC_FP_SW)
36#define SC_FP_TAG(sc) SC_FP_OFFSET(sc, SC_FP_TAG)
37#define SC_FP_IPOFF(sc) SC_FP_OFFSET(sc, SC_FP_IPOFF)
38#define SC_FP_CSSEL(sc) SC_FP_OFFSET(sc, SC_FP_CSSEL)
39#define SC_FP_DATAOFF(sc) SC_FP_OFFSET(sc, SC_FP_DATAOFF)
40#define SC_FP_DATASEL(sc) SC_FP_OFFSET(sc, SC_FP_DATASEL)
41#define SC_FP_ST(sc) SC_FP_OFFSET_PTR(sc, SC_FP_ST, struct _fpstate)
42#define SC_FXSR_ENV(sc) SC_FP_OFFSET_PTR(sc, SC_FXSR_ENV, void)
43
44#endif
diff --git a/arch/um/sys-i386/sysdep/sigcontext.h b/arch/um/sys-i386/sysdep/sigcontext.h
new file mode 100644
index 00000000000..f583c87111a
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/sigcontext.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYS_SIGCONTEXT_I386_H
7#define __SYS_SIGCONTEXT_I386_H
8
9#include "sysdep/sc.h"
10
11#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
12
13#define GET_FAULTINFO_FROM_SC(fi, sc) \
14 { \
15 (fi).cr2 = SC_CR2(sc); \
16 (fi).error_code = SC_ERR(sc); \
17 (fi).trap_no = SC_TRAPNO(sc); \
18 }
19
20/* This is Page Fault */
21#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
22
23/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */
24#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo)
25
26#endif
diff --git a/arch/um/sys-i386/sysdep/skas_ptrace.h b/arch/um/sys-i386/sysdep/skas_ptrace.h
new file mode 100644
index 00000000000..e27b8a79177
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/skas_ptrace.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_I386_SKAS_PTRACE_H
7#define __SYSDEP_I386_SKAS_PTRACE_H
8
9struct ptrace_faultinfo {
10 int is_write;
11 unsigned long addr;
12};
13
14struct ptrace_ldt {
15 int func;
16 void *ptr;
17 unsigned long bytecount;
18};
19
20#define PTRACE_LDT 54
21
22#endif
diff --git a/arch/um/sys-i386/sysdep/stub.h b/arch/um/sys-i386/sysdep/stub.h
new file mode 100644
index 00000000000..8c097b87fca
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/stub.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_STUB_H
7#define __SYSDEP_STUB_H
8
9#include <sys/mman.h>
10#include <asm/ptrace.h>
11#include <asm/unistd.h>
12#include "as-layout.h"
13#include "stub-data.h"
14#include "kern_constants.h"
15#include "uml-config.h"
16
17extern void stub_segv_handler(int sig);
18extern void stub_clone_handler(void);
19
20#define STUB_SYSCALL_RET EAX
21#define STUB_MMAP_NR __NR_mmap2
22#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
23
24static inline long stub_syscall0(long syscall)
25{
26 long ret;
27
28 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall));
29
30 return ret;
31}
32
33static inline long stub_syscall1(long syscall, long arg1)
34{
35 long ret;
36
37 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1));
38
39 return ret;
40}
41
42static inline long stub_syscall2(long syscall, long arg1, long arg2)
43{
44 long ret;
45
46 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
47 "c" (arg2));
48
49 return ret;
50}
51
52static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
53{
54 long ret;
55
56 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
57 "c" (arg2), "d" (arg3));
58
59 return ret;
60}
61
62static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
63 long arg4)
64{
65 long ret;
66
67 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
68 "c" (arg2), "d" (arg3), "S" (arg4));
69
70 return ret;
71}
72
73static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
74 long arg4, long arg5)
75{
76 long ret;
77
78 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
79 "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5));
80
81 return ret;
82}
83
84static inline void trap_myself(void)
85{
86 __asm("int3");
87}
88
89static inline void remap_stack(int fd, unsigned long offset)
90{
91 __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
92 "movl %7, %%ebx ; movl %%eax, (%%ebx)"
93 : : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
94 "c" (UM_KERN_PAGE_SIZE),
95 "d" (PROT_READ | PROT_WRITE),
96 "S" (MAP_FIXED | MAP_SHARED), "D" (fd),
97 "a" (offset),
98 "i" (&((struct stub_data *) STUB_DATA)->err)
99 : "memory");
100}
101
102#endif
diff --git a/arch/um/sys-i386/sysdep/syscalls.h b/arch/um/sys-i386/sysdep/syscalls.h
new file mode 100644
index 00000000000..905698197e3
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/syscalls.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "asm/unistd.h"
7#include "sysdep/ptrace.h"
8
9typedef long syscall_handler_t(struct pt_regs);
10
11/* Not declared on x86, incompatible declarations on x86_64, so these have
12 * to go here rather than in sys_call_table.c
13 */
14extern syscall_handler_t sys_rt_sigaction;
15
16extern syscall_handler_t old_mmap_i386;
17
18extern syscall_handler_t *sys_call_table[];
19
20#define EXECUTE_SYSCALL(syscall, regs) \
21 ((long (*)(struct syscall_args)) \
22 (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
23
24extern long sys_mmap2(unsigned long addr, unsigned long len,
25 unsigned long prot, unsigned long flags,
26 unsigned long fd, unsigned long pgoff);
diff --git a/arch/um/sys-i386/sysdep/system.h b/arch/um/sys-i386/sysdep/system.h
new file mode 100644
index 00000000000..d1b93c43620
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/system.h
@@ -0,0 +1,132 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20extern unsigned long arch_align_stack(unsigned long sp);
21
22void default_idle(void);
23
24/*
25 * Force strict CPU ordering.
26 * And yes, this is required on UP too when we're talking
27 * to devices.
28 */
29#ifdef CONFIG_X86_32
30/*
31 * Some non-Intel clones support out of order store. wmb() ceases to be a
32 * nop for these.
33 */
34#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
35#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
36#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
37#else
38#define mb() asm volatile("mfence":::"memory")
39#define rmb() asm volatile("lfence":::"memory")
40#define wmb() asm volatile("sfence" ::: "memory")
41#endif
42
43/**
44 * read_barrier_depends - Flush all pending reads that subsequents reads
45 * depend on.
46 *
47 * No data-dependent reads from memory-like regions are ever reordered
48 * over this barrier. All reads preceding this primitive are guaranteed
49 * to access memory (but not necessarily other CPUs' caches) before any
50 * reads following this primitive that depend on the data return by
51 * any of the preceding reads. This primitive is much lighter weight than
52 * rmb() on most CPUs, and is never heavier weight than is
53 * rmb().
54 *
55 * These ordering constraints are respected by both the local CPU
56 * and the compiler.
57 *
58 * Ordering is not guaranteed by anything other than these primitives,
59 * not even by data dependencies. See the documentation for
60 * memory_barrier() for examples and URLs to more information.
61 *
62 * For example, the following code would force ordering (the initial
63 * value of "a" is zero, "b" is one, and "p" is "&a"):
64 *
65 * <programlisting>
66 * CPU 0 CPU 1
67 *
68 * b = 2;
69 * memory_barrier();
70 * p = &b; q = p;
71 * read_barrier_depends();
72 * d = *q;
73 * </programlisting>
74 *
75 * because the read of "*q" depends on the read of "p" and these
76 * two reads are separated by a read_barrier_depends(). However,
77 * the following code, with the same initial values for "a" and "b":
78 *
79 * <programlisting>
80 * CPU 0 CPU 1
81 *
82 * a = 2;
83 * memory_barrier();
84 * b = 3; y = b;
85 * read_barrier_depends();
86 * x = a;
87 * </programlisting>
88 *
89 * does not enforce ordering, since there is no data dependency between
90 * the read of "a" and the read of "b". Therefore, on some CPUs, such
91 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
92 * in cases like this where there are no data dependencies.
93 **/
94
95#define read_barrier_depends() do { } while (0)
96
97#ifdef CONFIG_SMP
98#define smp_mb() mb()
99#ifdef CONFIG_X86_PPRO_FENCE
100# define smp_rmb() rmb()
101#else
102# define smp_rmb() barrier()
103#endif
104#ifdef CONFIG_X86_OOSTORE
105# define smp_wmb() wmb()
106#else
107# define smp_wmb() barrier()
108#endif
109#define smp_read_barrier_depends() read_barrier_depends()
110#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
111#else
112#define smp_mb() barrier()
113#define smp_rmb() barrier()
114#define smp_wmb() barrier()
115#define smp_read_barrier_depends() do { } while (0)
116#define set_mb(var, value) do { var = value; barrier(); } while (0)
117#endif
118
119/*
120 * Stop RDTSC speculation. This is needed when you need to use RDTSC
121 * (or get_cycles or vread that possibly accesses the TSC) in a defined
122 * code region.
123 *
124 * (Could use an alternative three way for this if there was one.)
125 */
126static inline void rdtsc_barrier(void)
127{
128 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
129 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
130}
131
132#endif
diff --git a/arch/um/sys-i386/sysdep/tls.h b/arch/um/sys-i386/sysdep/tls.h
new file mode 100644
index 00000000000..34550755b2a
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/tls.h
@@ -0,0 +1,32 @@
1#ifndef _SYSDEP_TLS_H
2#define _SYSDEP_TLS_H
3
4# ifndef __KERNEL__
5
6/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
7 * may be named user_desc (but in 2.4 and in header matching its API was named
8 * modify_ldt_ldt_s). */
9
10typedef struct um_dup_user_desc {
11 unsigned int entry_number;
12 unsigned int base_addr;
13 unsigned int limit;
14 unsigned int seg_32bit:1;
15 unsigned int contents:2;
16 unsigned int read_exec_only:1;
17 unsigned int limit_in_pages:1;
18 unsigned int seg_not_present:1;
19 unsigned int useable:1;
20} user_desc_t;
21
22# else /* __KERNEL__ */
23
24# include <ldt.h>
25typedef struct user_desc user_desc_t;
26
27# endif /* __KERNEL__ */
28
29#define GDT_ENTRY_TLS_MIN_I386 6
30#define GDT_ENTRY_TLS_MIN_X86_64 12
31
32#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/sys-i386/sysdep/vm-flags.h b/arch/um/sys-i386/sysdep/vm-flags.h
new file mode 100644
index 00000000000..e0d24c568db
--- /dev/null
+++ b/arch/um/sys-i386/sysdep/vm-flags.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __VM_FLAGS_I386_H
7#define __VM_FLAGS_I386_H
8
9#define VM_DATA_DEFAULT_FLAGS \
10 (VM_READ | VM_WRITE | \
11 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
12 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
13
14#endif