aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/ia32
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:17:21 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:17:21 -0400
commit2db55d344e529492545cb3b755c7e9ba8e4fa94e (patch)
treefea163affb8eec3d927acb498cb3aa1b13c053b8 /arch/x86/ia32
parent33fc6d51336046bd6e8c7d1a42faff881fa6fb45 (diff)
x86_64: move ia32
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/ia32')
-rw-r--r--arch/x86/ia32/Makefile35
-rw-r--r--arch/x86/ia32/audit.c42
-rw-r--r--arch/x86/ia32/fpu32.c183
-rw-r--r--arch/x86/ia32/ia32_aout.c528
-rw-r--r--arch/x86/ia32/ia32_binfmt.c320
-rw-r--r--arch/x86/ia32/ia32_signal.c617
-rw-r--r--arch/x86/ia32/ia32entry.S736
-rw-r--r--arch/x86/ia32/ipc32.c57
-rw-r--r--arch/x86/ia32/mmap32.c79
-rw-r--r--arch/x86/ia32/ptrace32.c404
-rw-r--r--arch/x86/ia32/sys_ia32.c889
-rw-r--r--arch/x86/ia32/syscall32.c83
-rw-r--r--arch/x86/ia32/syscall32_syscall.S17
-rw-r--r--arch/x86/ia32/tls32.c163
-rw-r--r--arch/x86/ia32/vsyscall-sigreturn.S143
-rw-r--r--arch/x86/ia32/vsyscall-syscall.S69
-rw-r--r--arch/x86/ia32/vsyscall-sysenter.S95
-rw-r--r--arch/x86/ia32/vsyscall.lds80
18 files changed, 4540 insertions, 0 deletions
diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
new file mode 100644
index 000000000000..cdae36435e21
--- /dev/null
+++ b/arch/x86/ia32/Makefile
@@ -0,0 +1,35 @@
1#
2# Makefile for the ia32 kernel emulation subsystem.
3#
4
5obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \
6 ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o \
7 mmap32.o
8
9sysv-$(CONFIG_SYSVIPC) := ipc32.o
10obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
11
12obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
13
14audit-class-$(CONFIG_AUDIT) := audit.o
15obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
16
17$(obj)/syscall32_syscall.o: \
18 $(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
19
20# Teach kbuild about targets
21targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
22
23# The DSO images are built using a special linker script
24quiet_cmd_syscall = SYSCALL $@
25 cmd_syscall = $(CC) -m32 -nostdlib -shared -s \
26 $(call ld-option, -Wl$(comma)--hash-style=sysv) \
27 -Wl,-soname=linux-gate.so.1 -o $@ \
28 -Wl,-T,$(filter-out FORCE,$^)
29
30$(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
31$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
32 $(call if_changed,syscall)
33
34AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
35AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
diff --git a/arch/x86/ia32/audit.c b/arch/x86/ia32/audit.c
new file mode 100644
index 000000000000..8850fe40ea34
--- /dev/null
+++ b/arch/x86/ia32/audit.c
@@ -0,0 +1,42 @@
1#include <asm-i386/unistd.h>
2
3unsigned ia32_dir_class[] = {
4#include <asm-generic/audit_dir_write.h>
5~0U
6};
7
8unsigned ia32_chattr_class[] = {
9#include <asm-generic/audit_change_attr.h>
10~0U
11};
12
13unsigned ia32_write_class[] = {
14#include <asm-generic/audit_write.h>
15~0U
16};
17
18unsigned ia32_read_class[] = {
19#include <asm-generic/audit_read.h>
20~0U
21};
22
23unsigned ia32_signal_class[] = {
24#include <asm-generic/audit_signal.h>
25~0U
26};
27
28int ia32_classify_syscall(unsigned syscall)
29{
30 switch(syscall) {
31 case __NR_open:
32 return 2;
33 case __NR_openat:
34 return 3;
35 case __NR_socketcall:
36 return 4;
37 case __NR_execve:
38 return 5;
39 default:
40 return 1;
41 }
42}
diff --git a/arch/x86/ia32/fpu32.c b/arch/x86/ia32/fpu32.c
new file mode 100644
index 000000000000..2c8209a3605a
--- /dev/null
+++ b/arch/x86/ia32/fpu32.c
@@ -0,0 +1,183 @@
1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes.
4 * This is used for ptrace, signals and coredumps in 32bit emulation.
5 */
6
7#include <linux/sched.h>
8#include <asm/sigcontext32.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11#include <asm/i387.h>
12
13static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
14{
15 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
16
17 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
18 tmp = ~twd;
19 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
20 /* and move the valid bits to the lower byte. */
21 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
22 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
23 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
24 return tmp;
25}
26
27static inline unsigned long twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
28{
29 struct _fpxreg *st = NULL;
30 unsigned long tos = (fxsave->swd >> 11) & 7;
31 unsigned long twd = (unsigned long) fxsave->twd;
32 unsigned long tag;
33 unsigned long ret = 0xffff0000;
34 int i;
35
36#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
37
38 for (i = 0 ; i < 8 ; i++) {
39 if (twd & 0x1) {
40 st = FPREG_ADDR( fxsave, (i - tos) & 7 );
41
42 switch (st->exponent & 0x7fff) {
43 case 0x7fff:
44 tag = 2; /* Special */
45 break;
46 case 0x0000:
47 if ( !st->significand[0] &&
48 !st->significand[1] &&
49 !st->significand[2] &&
50 !st->significand[3] ) {
51 tag = 1; /* Zero */
52 } else {
53 tag = 2; /* Special */
54 }
55 break;
56 default:
57 if (st->significand[3] & 0x8000) {
58 tag = 0; /* Valid */
59 } else {
60 tag = 2; /* Special */
61 }
62 break;
63 }
64 } else {
65 tag = 3; /* Empty */
66 }
67 ret |= (tag << (2 * i));
68 twd = twd >> 1;
69 }
70 return ret;
71}
72
73
74static inline int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave,
75 struct _fpstate_ia32 __user *buf)
76{
77 struct _fpxreg *to;
78 struct _fpreg __user *from;
79 int i;
80 u32 v;
81 int err = 0;
82
83#define G(num,val) err |= __get_user(val, num + (u32 __user *)buf)
84 G(0, fxsave->cwd);
85 G(1, fxsave->swd);
86 G(2, fxsave->twd);
87 fxsave->twd = twd_i387_to_fxsr(fxsave->twd);
88 G(3, fxsave->rip);
89 G(4, v);
90 fxsave->fop = v>>16; /* cs ignored */
91 G(5, fxsave->rdp);
92 /* 6: ds ignored */
93#undef G
94 if (err)
95 return -1;
96
97 to = (struct _fpxreg *)&fxsave->st_space[0];
98 from = &buf->_st[0];
99 for (i = 0 ; i < 8 ; i++, to++, from++) {
100 if (__copy_from_user(to, from, sizeof(*from)))
101 return -1;
102 }
103 return 0;
104}
105
106
107static inline int convert_fxsr_to_user(struct _fpstate_ia32 __user *buf,
108 struct i387_fxsave_struct *fxsave,
109 struct pt_regs *regs,
110 struct task_struct *tsk)
111{
112 struct _fpreg __user *to;
113 struct _fpxreg *from;
114 int i;
115 u16 cs,ds;
116 int err = 0;
117
118 if (tsk == current) {
119 /* should be actually ds/cs at fpu exception time,
120 but that information is not available in 64bit mode. */
121 asm("movw %%ds,%0 " : "=r" (ds));
122 asm("movw %%cs,%0 " : "=r" (cs));
123 } else { /* ptrace. task has stopped. */
124 ds = tsk->thread.ds;
125 cs = regs->cs;
126 }
127
128#define P(num,val) err |= __put_user(val, num + (u32 __user *)buf)
129 P(0, (u32)fxsave->cwd | 0xffff0000);
130 P(1, (u32)fxsave->swd | 0xffff0000);
131 P(2, twd_fxsr_to_i387(fxsave));
132 P(3, (u32)fxsave->rip);
133 P(4, cs | ((u32)fxsave->fop) << 16);
134 P(5, fxsave->rdp);
135 P(6, 0xffff0000 | ds);
136#undef P
137
138 if (err)
139 return -1;
140
141 to = &buf->_st[0];
142 from = (struct _fpxreg *) &fxsave->st_space[0];
143 for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
144 if (__copy_to_user(to, from, sizeof(*to)))
145 return -1;
146 }
147 return 0;
148}
149
150int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave)
151{
152 clear_fpu(tsk);
153 if (!fsave) {
154 if (__copy_from_user(&tsk->thread.i387.fxsave,
155 &buf->_fxsr_env[0],
156 sizeof(struct i387_fxsave_struct)))
157 return -1;
158 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
159 set_stopped_child_used_math(tsk);
160 }
161 return convert_fxsr_from_user(&tsk->thread.i387.fxsave, buf);
162}
163
164int save_i387_ia32(struct task_struct *tsk,
165 struct _fpstate_ia32 __user *buf,
166 struct pt_regs *regs,
167 int fsave)
168{
169 int err = 0;
170
171 init_fpu(tsk);
172 if (convert_fxsr_to_user(buf, &tsk->thread.i387.fxsave, regs, tsk))
173 return -1;
174 if (fsave)
175 return 0;
176 err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
177 if (fsave)
178 return err ? -1 : 1;
179 err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
180 err |= __copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
181 sizeof(struct i387_fxsave_struct));
182 return err ? -1 : 1;
183}
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
new file mode 100644
index 000000000000..08781370256d
--- /dev/null
+++ b/arch/x86/ia32/ia32_aout.c
@@ -0,0 +1,528 @@
1/*
2 * a.out loader for x86-64
3 *
4 * Copyright (C) 1991, 1992, 1996 Linus Torvalds
5 * Hacked together by Andi Kleen
6 */
7
8#include <linux/module.h>
9
10#include <linux/time.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/a.out.h>
15#include <linux/errno.h>
16#include <linux/signal.h>
17#include <linux/string.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/stat.h>
21#include <linux/fcntl.h>
22#include <linux/ptrace.h>
23#include <linux/user.h>
24#include <linux/slab.h>
25#include <linux/binfmts.h>
26#include <linux/personality.h>
27#include <linux/init.h>
28
29#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <asm/pgalloc.h>
32#include <asm/cacheflush.h>
33#include <asm/user32.h>
34#include <asm/ia32.h>
35
36#undef WARN_OLD
37#undef CORE_DUMP /* probably broken */
38
39static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
40static int load_aout_library(struct file*);
41
42#ifdef CORE_DUMP
43static int aout_core_dump(long signr, struct pt_regs * regs, struct file *file);
44
45/*
46 * fill in the user structure for a core dump..
47 */
48static void dump_thread32(struct pt_regs * regs, struct user32 * dump)
49{
50 u32 fs,gs;
51
52/* changed the size calculations - should hopefully work better. lbt */
53 dump->magic = CMAGIC;
54 dump->start_code = 0;
55 dump->start_stack = regs->rsp & ~(PAGE_SIZE - 1);
56 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
57 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
58 dump->u_dsize -= dump->u_tsize;
59 dump->u_ssize = 0;
60 dump->u_debugreg[0] = current->thread.debugreg0;
61 dump->u_debugreg[1] = current->thread.debugreg1;
62 dump->u_debugreg[2] = current->thread.debugreg2;
63 dump->u_debugreg[3] = current->thread.debugreg3;
64 dump->u_debugreg[4] = 0;
65 dump->u_debugreg[5] = 0;
66 dump->u_debugreg[6] = current->thread.debugreg6;
67 dump->u_debugreg[7] = current->thread.debugreg7;
68
69 if (dump->start_stack < 0xc0000000)
70 dump->u_ssize = ((unsigned long) (0xc0000000 - dump->start_stack)) >> PAGE_SHIFT;
71
72 dump->regs.ebx = regs->rbx;
73 dump->regs.ecx = regs->rcx;
74 dump->regs.edx = regs->rdx;
75 dump->regs.esi = regs->rsi;
76 dump->regs.edi = regs->rdi;
77 dump->regs.ebp = regs->rbp;
78 dump->regs.eax = regs->rax;
79 dump->regs.ds = current->thread.ds;
80 dump->regs.es = current->thread.es;
81 asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs;
82 asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs;
83 dump->regs.orig_eax = regs->orig_rax;
84 dump->regs.eip = regs->rip;
85 dump->regs.cs = regs->cs;
86 dump->regs.eflags = regs->eflags;
87 dump->regs.esp = regs->rsp;
88 dump->regs.ss = regs->ss;
89
90#if 1 /* FIXME */
91 dump->u_fpvalid = 0;
92#else
93 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
94#endif
95}
96
97#endif
98
99static struct linux_binfmt aout_format = {
100 .module = THIS_MODULE,
101 .load_binary = load_aout_binary,
102 .load_shlib = load_aout_library,
103#ifdef CORE_DUMP
104 .core_dump = aout_core_dump,
105#endif
106 .min_coredump = PAGE_SIZE
107};
108
109static void set_brk(unsigned long start, unsigned long end)
110{
111 start = PAGE_ALIGN(start);
112 end = PAGE_ALIGN(end);
113 if (end <= start)
114 return;
115 down_write(&current->mm->mmap_sem);
116 do_brk(start, end - start);
117 up_write(&current->mm->mmap_sem);
118}
119
120#ifdef CORE_DUMP
121/*
122 * These are the only things you should do on a core-file: use only these
123 * macros to write out all the necessary info.
124 */
125
126static int dump_write(struct file *file, const void *addr, int nr)
127{
128 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
129}
130
131#define DUMP_WRITE(addr, nr) \
132 if (!dump_write(file, (void *)(addr), (nr))) \
133 goto end_coredump;
134
135#define DUMP_SEEK(offset) \
136if (file->f_op->llseek) { \
137 if (file->f_op->llseek(file,(offset),0) != (offset)) \
138 goto end_coredump; \
139} else file->f_pos = (offset)
140
141/*
142 * Routine writes a core dump image in the current directory.
143 * Currently only a stub-function.
144 *
145 * Note that setuid/setgid files won't make a core-dump if the uid/gid
146 * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
147 * field, which also makes sure the core-dumps won't be recursive if the
148 * dumping of the process results in another error..
149 */
150
151static int aout_core_dump(long signr, struct pt_regs * regs, struct file *file)
152{
153 mm_segment_t fs;
154 int has_dumped = 0;
155 unsigned long dump_start, dump_size;
156 struct user32 dump;
157# define START_DATA(u) (u.u_tsize << PAGE_SHIFT)
158# define START_STACK(u) (u.start_stack)
159
160 fs = get_fs();
161 set_fs(KERNEL_DS);
162 has_dumped = 1;
163 current->flags |= PF_DUMPCORE;
164 strncpy(dump.u_comm, current->comm, sizeof(current->comm));
165 dump.u_ar0 = (u32)(((unsigned long)(&dump.regs)) - ((unsigned long)(&dump)));
166 dump.signal = signr;
167 dump_thread32(regs, &dump);
168
169/* If the size of the dump file exceeds the rlimit, then see what would happen
170 if we wrote the stack, but not the data area. */
171 if ((dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE >
172 current->signal->rlim[RLIMIT_CORE].rlim_cur)
173 dump.u_dsize = 0;
174
175/* Make sure we have enough room to write the stack and data areas. */
176 if ((dump.u_ssize+1) * PAGE_SIZE >
177 current->signal->rlim[RLIMIT_CORE].rlim_cur)
178 dump.u_ssize = 0;
179
180/* make sure we actually have a data and stack area to dump */
181 set_fs(USER_DS);
182 if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump), dump.u_dsize << PAGE_SHIFT))
183 dump.u_dsize = 0;
184 if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump), dump.u_ssize << PAGE_SHIFT))
185 dump.u_ssize = 0;
186
187 set_fs(KERNEL_DS);
188/* struct user */
189 DUMP_WRITE(&dump,sizeof(dump));
190/* Now dump all of the user data. Include malloced stuff as well */
191 DUMP_SEEK(PAGE_SIZE);
192/* now we start writing out the user space info */
193 set_fs(USER_DS);
194/* Dump the data area */
195 if (dump.u_dsize != 0) {
196 dump_start = START_DATA(dump);
197 dump_size = dump.u_dsize << PAGE_SHIFT;
198 DUMP_WRITE(dump_start,dump_size);
199 }
200/* Now prepare to dump the stack area */
201 if (dump.u_ssize != 0) {
202 dump_start = START_STACK(dump);
203 dump_size = dump.u_ssize << PAGE_SHIFT;
204 DUMP_WRITE(dump_start,dump_size);
205 }
206/* Finally dump the task struct. Not be used by gdb, but could be useful */
207 set_fs(KERNEL_DS);
208 DUMP_WRITE(current,sizeof(*current));
209end_coredump:
210 set_fs(fs);
211 return has_dumped;
212}
213#endif
214
215/*
216 * create_aout_tables() parses the env- and arg-strings in new user
217 * memory and creates the pointer tables from them, and puts their
218 * addresses on the "stack", returning the new stack pointer value.
219 */
220static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm)
221{
222 u32 __user *argv;
223 u32 __user *envp;
224 u32 __user *sp;
225 int argc = bprm->argc;
226 int envc = bprm->envc;
227
228 sp = (u32 __user *) ((-(unsigned long)sizeof(u32)) & (unsigned long) p);
229 sp -= envc+1;
230 envp = sp;
231 sp -= argc+1;
232 argv = sp;
233 put_user((unsigned long) envp,--sp);
234 put_user((unsigned long) argv,--sp);
235 put_user(argc,--sp);
236 current->mm->arg_start = (unsigned long) p;
237 while (argc-->0) {
238 char c;
239 put_user((u32)(unsigned long)p,argv++);
240 do {
241 get_user(c,p++);
242 } while (c);
243 }
244 put_user(0, argv);
245 current->mm->arg_end = current->mm->env_start = (unsigned long) p;
246 while (envc-->0) {
247 char c;
248 put_user((u32)(unsigned long)p,envp++);
249 do {
250 get_user(c,p++);
251 } while (c);
252 }
253 put_user(0, envp);
254 current->mm->env_end = (unsigned long) p;
255 return sp;
256}
257
258/*
259 * These are the functions used to load a.out style executables and shared
260 * libraries. There is no binary dependent code anywhere else.
261 */
262
263static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
264{
265 struct exec ex;
266 unsigned long error;
267 unsigned long fd_offset;
268 unsigned long rlim;
269 int retval;
270
271 ex = *((struct exec *) bprm->buf); /* exec-header */
272 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
273 N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
274 N_TRSIZE(ex) || N_DRSIZE(ex) ||
275 i_size_read(bprm->file->f_path.dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
276 return -ENOEXEC;
277 }
278
279 fd_offset = N_TXTOFF(ex);
280
281 /* Check initial limits. This avoids letting people circumvent
282 * size limits imposed on them by creating programs with large
283 * arrays in the data or bss.
284 */
285 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
286 if (rlim >= RLIM_INFINITY)
287 rlim = ~0;
288 if (ex.a_data + ex.a_bss > rlim)
289 return -ENOMEM;
290
291 /* Flush all traces of the currently running executable */
292 retval = flush_old_exec(bprm);
293 if (retval)
294 return retval;
295
296 regs->cs = __USER32_CS;
297 regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
298 regs->r13 = regs->r14 = regs->r15 = 0;
299
300 /* OK, This is the point of no return */
301 set_personality(PER_LINUX);
302 set_thread_flag(TIF_IA32);
303 clear_thread_flag(TIF_ABI_PENDING);
304
305 current->mm->end_code = ex.a_text +
306 (current->mm->start_code = N_TXTADDR(ex));
307 current->mm->end_data = ex.a_data +
308 (current->mm->start_data = N_DATADDR(ex));
309 current->mm->brk = ex.a_bss +
310 (current->mm->start_brk = N_BSSADDR(ex));
311 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
312 current->mm->cached_hole_size = 0;
313
314 current->mm->mmap = NULL;
315 compute_creds(bprm);
316 current->flags &= ~PF_FORKNOEXEC;
317
318 if (N_MAGIC(ex) == OMAGIC) {
319 unsigned long text_addr, map_size;
320 loff_t pos;
321
322 text_addr = N_TXTADDR(ex);
323
324 pos = 32;
325 map_size = ex.a_text+ex.a_data;
326
327 down_write(&current->mm->mmap_sem);
328 error = do_brk(text_addr & PAGE_MASK, map_size);
329 up_write(&current->mm->mmap_sem);
330
331 if (error != (text_addr & PAGE_MASK)) {
332 send_sig(SIGKILL, current, 0);
333 return error;
334 }
335
336 error = bprm->file->f_op->read(bprm->file,
337 (char __user *)text_addr,
338 ex.a_text+ex.a_data, &pos);
339 if ((signed long)error < 0) {
340 send_sig(SIGKILL, current, 0);
341 return error;
342 }
343
344 flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data);
345 } else {
346#ifdef WARN_OLD
347 static unsigned long error_time, error_time2;
348 if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
349 (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ)
350 {
351 printk(KERN_NOTICE "executable not page aligned\n");
352 error_time2 = jiffies;
353 }
354
355 if ((fd_offset & ~PAGE_MASK) != 0 &&
356 (jiffies-error_time) > 5*HZ)
357 {
358 printk(KERN_WARNING
359 "fd_offset is not page aligned. Please convert program: %s\n",
360 bprm->file->f_path.dentry->d_name.name);
361 error_time = jiffies;
362 }
363#endif
364
365 if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
366 loff_t pos = fd_offset;
367 down_write(&current->mm->mmap_sem);
368 do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
369 up_write(&current->mm->mmap_sem);
370 bprm->file->f_op->read(bprm->file,
371 (char __user *)N_TXTADDR(ex),
372 ex.a_text+ex.a_data, &pos);
373 flush_icache_range((unsigned long) N_TXTADDR(ex),
374 (unsigned long) N_TXTADDR(ex) +
375 ex.a_text+ex.a_data);
376 goto beyond_if;
377 }
378
379 down_write(&current->mm->mmap_sem);
380 error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
381 PROT_READ | PROT_EXEC,
382 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT,
383 fd_offset);
384 up_write(&current->mm->mmap_sem);
385
386 if (error != N_TXTADDR(ex)) {
387 send_sig(SIGKILL, current, 0);
388 return error;
389 }
390
391 down_write(&current->mm->mmap_sem);
392 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
393 PROT_READ | PROT_WRITE | PROT_EXEC,
394 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT,
395 fd_offset + ex.a_text);
396 up_write(&current->mm->mmap_sem);
397 if (error != N_DATADDR(ex)) {
398 send_sig(SIGKILL, current, 0);
399 return error;
400 }
401 }
402beyond_if:
403 set_binfmt(&aout_format);
404
405 set_brk(current->mm->start_brk, current->mm->brk);
406
407 retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
408 if (retval < 0) {
409 /* Someone check-me: is this error path enough? */
410 send_sig(SIGKILL, current, 0);
411 return retval;
412 }
413
414 current->mm->start_stack =
415 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
416 /* start thread */
417 asm volatile("movl %0,%%fs" :: "r" (0)); \
418 asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS));
419 load_gs_index(0);
420 (regs)->rip = ex.a_entry;
421 (regs)->rsp = current->mm->start_stack;
422 (regs)->eflags = 0x200;
423 (regs)->cs = __USER32_CS;
424 (regs)->ss = __USER32_DS;
425 set_fs(USER_DS);
426 if (unlikely(current->ptrace & PT_PTRACED)) {
427 if (current->ptrace & PT_TRACE_EXEC)
428 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
429 else
430 send_sig(SIGTRAP, current, 0);
431 }
432 return 0;
433}
434
435static int load_aout_library(struct file *file)
436{
437 struct inode * inode;
438 unsigned long bss, start_addr, len;
439 unsigned long error;
440 int retval;
441 struct exec ex;
442
443 inode = file->f_path.dentry->d_inode;
444
445 retval = -ENOEXEC;
446 error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
447 if (error != sizeof(ex))
448 goto out;
449
450 /* We come in here for the regular a.out style of shared libraries */
451 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
452 N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
453 i_size_read(inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
454 goto out;
455 }
456
457 if (N_FLAGS(ex))
458 goto out;
459
460 /* For QMAGIC, the starting address is 0x20 into the page. We mask
461 this off to get the starting address for the page */
462
463 start_addr = ex.a_entry & 0xfffff000;
464
465 if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) {
466 loff_t pos = N_TXTOFF(ex);
467
468#ifdef WARN_OLD
469 static unsigned long error_time;
470 if ((jiffies-error_time) > 5*HZ)
471 {
472 printk(KERN_WARNING
473 "N_TXTOFF is not page aligned. Please convert library: %s\n",
474 file->f_path.dentry->d_name.name);
475 error_time = jiffies;
476 }
477#endif
478 down_write(&current->mm->mmap_sem);
479 do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
480 up_write(&current->mm->mmap_sem);
481
482 file->f_op->read(file, (char __user *)start_addr,
483 ex.a_text + ex.a_data, &pos);
484 flush_icache_range((unsigned long) start_addr,
485 (unsigned long) start_addr + ex.a_text + ex.a_data);
486
487 retval = 0;
488 goto out;
489 }
490 /* Now use mmap to map the library into memory. */
491 down_write(&current->mm->mmap_sem);
492 error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
493 PROT_READ | PROT_WRITE | PROT_EXEC,
494 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT,
495 N_TXTOFF(ex));
496 up_write(&current->mm->mmap_sem);
497 retval = error;
498 if (error != start_addr)
499 goto out;
500
501 len = PAGE_ALIGN(ex.a_text + ex.a_data);
502 bss = ex.a_text + ex.a_data + ex.a_bss;
503 if (bss > len) {
504 down_write(&current->mm->mmap_sem);
505 error = do_brk(start_addr + len, bss - len);
506 up_write(&current->mm->mmap_sem);
507 retval = error;
508 if (error != start_addr + len)
509 goto out;
510 }
511 retval = 0;
512out:
513 return retval;
514}
515
516static int __init init_aout_binfmt(void)
517{
518 return register_binfmt(&aout_format);
519}
520
521static void __exit exit_aout_binfmt(void)
522{
523 unregister_binfmt(&aout_format);
524}
525
526module_init(init_aout_binfmt);
527module_exit(exit_aout_binfmt);
528MODULE_LICENSE("GPL");
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
new file mode 100644
index 000000000000..dffd2ac72747
--- /dev/null
+++ b/arch/x86/ia32/ia32_binfmt.c
@@ -0,0 +1,320 @@
1/*
2 * Written 2000,2002 by Andi Kleen.
3 *
4 * Loosely based on the sparc64 and IA64 32bit emulation loaders.
5 * This tricks binfmt_elf.c into loading 32bit binaries using lots
6 * of ugly preprocessor tricks. Talk about very very poor man's inheritance.
7 */
8#define __ASM_X86_64_ELF_H 1
9
10#undef ELF_CLASS
11#define ELF_CLASS ELFCLASS32
12
13#include <linux/types.h>
14#include <linux/stddef.h>
15#include <linux/rwsem.h>
16#include <linux/sched.h>
17#include <linux/compat.h>
18#include <linux/string.h>
19#include <linux/binfmts.h>
20#include <linux/mm.h>
21#include <linux/security.h>
22
23#include <asm/segment.h>
24#include <asm/ptrace.h>
25#include <asm/processor.h>
26#include <asm/user32.h>
27#include <asm/sigcontext32.h>
28#include <asm/fpu32.h>
29#include <asm/i387.h>
30#include <asm/uaccess.h>
31#include <asm/ia32.h>
32#include <asm/vsyscall32.h>
33
34#define ELF_NAME "elf/i386"
35
36#define AT_SYSINFO 32
37#define AT_SYSINFO_EHDR 33
38
39int sysctl_vsyscall32 = 1;
40
41#undef ARCH_DLINFO
42#define ARCH_DLINFO do { \
43 if (sysctl_vsyscall32) { \
44 current->mm->context.vdso = (void *)VSYSCALL32_BASE; \
45 NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \
46 NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL32_BASE); \
47 } \
48} while(0)
49
50struct file;
51struct elf_phdr;
52
53#define IA32_EMULATOR 1
54
55#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
56
57#undef ELF_ARCH
58#define ELF_ARCH EM_386
59
60#define ELF_DATA ELFDATA2LSB
61
62#define USE_ELF_CORE_DUMP 1
63
64/* Override elfcore.h */
65#define _LINUX_ELFCORE_H 1
66typedef unsigned int elf_greg_t;
67
68#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
69typedef elf_greg_t elf_gregset_t[ELF_NGREG];
70
71struct elf_siginfo
72{
73 int si_signo; /* signal number */
74 int si_code; /* extra code */
75 int si_errno; /* errno */
76};
77
78#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
79
80struct elf_prstatus
81{
82 struct elf_siginfo pr_info; /* Info associated with signal */
83 short pr_cursig; /* Current signal */
84 unsigned int pr_sigpend; /* Set of pending signals */
85 unsigned int pr_sighold; /* Set of held signals */
86 pid_t pr_pid;
87 pid_t pr_ppid;
88 pid_t pr_pgrp;
89 pid_t pr_sid;
90 struct compat_timeval pr_utime; /* User time */
91 struct compat_timeval pr_stime; /* System time */
92 struct compat_timeval pr_cutime; /* Cumulative user time */
93 struct compat_timeval pr_cstime; /* Cumulative system time */
94 elf_gregset_t pr_reg; /* GP registers */
95 int pr_fpvalid; /* True if math co-processor being used. */
96};
97
98#define ELF_PRARGSZ (80) /* Number of chars for args */
99
100struct elf_prpsinfo
101{
102 char pr_state; /* numeric process state */
103 char pr_sname; /* char for pr_state */
104 char pr_zomb; /* zombie */
105 char pr_nice; /* nice val */
106 unsigned int pr_flag; /* flags */
107 __u16 pr_uid;
108 __u16 pr_gid;
109 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
110 /* Lots missing */
111 char pr_fname[16]; /* filename of executable */
112 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
113};
114
115#define __STR(x) #x
116#define STR(x) __STR(x)
117
118#define _GET_SEG(x) \
119 ({ __u32 seg; asm("movl %%" STR(x) ",%0" : "=r"(seg)); seg; })
120
121/* Assumes current==process to be dumped */
122#define ELF_CORE_COPY_REGS(pr_reg, regs) \
123 pr_reg[0] = regs->rbx; \
124 pr_reg[1] = regs->rcx; \
125 pr_reg[2] = regs->rdx; \
126 pr_reg[3] = regs->rsi; \
127 pr_reg[4] = regs->rdi; \
128 pr_reg[5] = regs->rbp; \
129 pr_reg[6] = regs->rax; \
130 pr_reg[7] = _GET_SEG(ds); \
131 pr_reg[8] = _GET_SEG(es); \
132 pr_reg[9] = _GET_SEG(fs); \
133 pr_reg[10] = _GET_SEG(gs); \
134 pr_reg[11] = regs->orig_rax; \
135 pr_reg[12] = regs->rip; \
136 pr_reg[13] = regs->cs; \
137 pr_reg[14] = regs->eflags; \
138 pr_reg[15] = regs->rsp; \
139 pr_reg[16] = regs->ss;
140
141#define user user32
142
143#undef elf_read_implies_exec
144#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
145//#include <asm/ia32.h>
146#include <linux/elf.h>
147
148typedef struct user_i387_ia32_struct elf_fpregset_t;
149typedef struct user32_fxsr_struct elf_fpxregset_t;
150
151
152static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
153{
154 ELF_CORE_COPY_REGS((*elfregs), regs)
155}
156
157static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
158{
159 struct pt_regs *pp = task_pt_regs(t);
160 ELF_CORE_COPY_REGS((*elfregs), pp);
161 /* fix wrong segments */
162 (*elfregs)[7] = t->thread.ds;
163 (*elfregs)[9] = t->thread.fsindex;
164 (*elfregs)[10] = t->thread.gsindex;
165 (*elfregs)[8] = t->thread.es;
166 return 1;
167}
168
169static inline int
170elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu)
171{
172 struct _fpstate_ia32 *fpstate = (void*)fpu;
173 mm_segment_t oldfs = get_fs();
174
175 if (!tsk_used_math(tsk))
176 return 0;
177 if (!regs)
178 regs = task_pt_regs(tsk);
179 if (tsk == current)
180 unlazy_fpu(tsk);
181 set_fs(KERNEL_DS);
182 save_i387_ia32(tsk, fpstate, regs, 1);
183 /* Correct for i386 bug. It puts the fop into the upper 16bits of
184 the tag word (like FXSAVE), not into the fcs*/
185 fpstate->cssel |= fpstate->tag & 0xffff0000;
186 set_fs(oldfs);
187 return 1;
188}
189
190#define ELF_CORE_COPY_XFPREGS 1
191static inline int
192elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
193{
194 struct pt_regs *regs = task_pt_regs(t);
195 if (!tsk_used_math(t))
196 return 0;
197 if (t == current)
198 unlazy_fpu(t);
199 memcpy(xfpu, &t->thread.i387.fxsave, sizeof(elf_fpxregset_t));
200 xfpu->fcs = regs->cs;
201 xfpu->fos = t->thread.ds; /* right? */
202 return 1;
203}
204
205#undef elf_check_arch
206#define elf_check_arch(x) \
207 ((x)->e_machine == EM_386)
208
209extern int force_personality32;
210
211#define ELF_EXEC_PAGESIZE PAGE_SIZE
212#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
213#define ELF_PLATFORM ("i686")
214#define SET_PERSONALITY(ex, ibcs2) \
215do { \
216 unsigned long new_flags = 0; \
217 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
218 new_flags = _TIF_IA32; \
219 if ((current_thread_info()->flags & _TIF_IA32) \
220 != new_flags) \
221 set_thread_flag(TIF_ABI_PENDING); \
222 else \
223 clear_thread_flag(TIF_ABI_PENDING); \
224 /* XXX This overwrites the user set personality */ \
225 current->personality |= force_personality32; \
226} while (0)
227
228/* Override some function names */
229#define elf_format elf32_format
230
231#define init_elf_binfmt init_elf32_binfmt
232#define exit_elf_binfmt exit_elf32_binfmt
233
234#define load_elf_binary load_elf32_binary
235
236#define ELF_PLAT_INIT(r, load_addr) elf32_init(r)
237
238#undef start_thread
239#define start_thread(regs,new_rip,new_rsp) do { \
240 asm volatile("movl %0,%%fs" :: "r" (0)); \
241 asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); \
242 load_gs_index(0); \
243 (regs)->rip = (new_rip); \
244 (regs)->rsp = (new_rsp); \
245 (regs)->eflags = 0x200; \
246 (regs)->cs = __USER32_CS; \
247 (regs)->ss = __USER32_DS; \
248 set_fs(USER_DS); \
249} while(0)
250
251
252#include <linux/module.h>
253
254MODULE_DESCRIPTION("Binary format loader for compatibility with IA32 ELF binaries.");
255MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
256
257#undef MODULE_DESCRIPTION
258#undef MODULE_AUTHOR
259
260static void elf32_init(struct pt_regs *);
261
262#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
263#define arch_setup_additional_pages syscall32_setup_pages
264extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
265
266#include "../../../fs/binfmt_elf.c"
267
268static void elf32_init(struct pt_regs *regs)
269{
270 struct task_struct *me = current;
271 regs->rdi = 0;
272 regs->rsi = 0;
273 regs->rdx = 0;
274 regs->rcx = 0;
275 regs->rax = 0;
276 regs->rbx = 0;
277 regs->rbp = 0;
278 regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
279 regs->r13 = regs->r14 = regs->r15 = 0;
280 me->thread.fs = 0;
281 me->thread.gs = 0;
282 me->thread.fsindex = 0;
283 me->thread.gsindex = 0;
284 me->thread.ds = __USER_DS;
285 me->thread.es = __USER_DS;
286}
287
288#ifdef CONFIG_SYSCTL
289/* Register vsyscall32 into the ABI table */
290#include <linux/sysctl.h>
291
292static ctl_table abi_table2[] = {
293 {
294 .ctl_name = 99,
295 .procname = "vsyscall32",
296 .data = &sysctl_vsyscall32,
297 .maxlen = sizeof(int),
298 .mode = 0644,
299 .proc_handler = proc_dointvec
300 },
301 {}
302};
303
304static ctl_table abi_root_table2[] = {
305 {
306 .ctl_name = CTL_ABI,
307 .procname = "abi",
308 .mode = 0555,
309 .child = abi_table2
310 },
311 {}
312};
313
314static __init int ia32_binfmt_init(void)
315{
316 register_sysctl_table(abi_root_table2);
317 return 0;
318}
319__initcall(ia32_binfmt_init);
320#endif
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
new file mode 100644
index 000000000000..6ea19c25f90d
--- /dev/null
+++ b/arch/x86/ia32/ia32_signal.c
@@ -0,0 +1,617 @@
1/*
2 * linux/arch/x86_64/ia32/ia32_signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
8 * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen
9 */
10
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/errno.h>
17#include <linux/wait.h>
18#include <linux/ptrace.h>
19#include <linux/unistd.h>
20#include <linux/stddef.h>
21#include <linux/personality.h>
22#include <linux/compat.h>
23#include <linux/binfmts.h>
24#include <asm/ucontext.h>
25#include <asm/uaccess.h>
26#include <asm/i387.h>
27#include <asm/ia32.h>
28#include <asm/ptrace.h>
29#include <asm/ia32_unistd.h>
30#include <asm/user32.h>
31#include <asm/sigcontext32.h>
32#include <asm/fpu32.h>
33#include <asm/proto.h>
34#include <asm/vsyscall32.h>
35
36#define DEBUG_SIG 0
37
38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
39
40asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
41void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
42
43int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
44{
45 int err;
46 if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
47 return -EFAULT;
48
49 /* If you change siginfo_t structure, please make sure that
50 this code is fixed accordingly.
51 It should never copy any pad contained in the structure
52 to avoid security leaks, but must copy the generic
53 3 ints plus the relevant union member. */
54 err = __put_user(from->si_signo, &to->si_signo);
55 err |= __put_user(from->si_errno, &to->si_errno);
56 err |= __put_user((short)from->si_code, &to->si_code);
57
58 if (from->si_code < 0) {
59 err |= __put_user(from->si_pid, &to->si_pid);
60 err |= __put_user(from->si_uid, &to->si_uid);
61 err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
62 } else {
63 /* First 32bits of unions are always present:
64 * si_pid === si_band === si_tid === si_addr(LS half) */
65 err |= __put_user(from->_sifields._pad[0], &to->_sifields._pad[0]);
66 switch (from->si_code >> 16) {
67 case __SI_FAULT >> 16:
68 break;
69 case __SI_CHLD >> 16:
70 err |= __put_user(from->si_utime, &to->si_utime);
71 err |= __put_user(from->si_stime, &to->si_stime);
72 err |= __put_user(from->si_status, &to->si_status);
73 /* FALL THROUGH */
74 default:
75 case __SI_KILL >> 16:
76 err |= __put_user(from->si_uid, &to->si_uid);
77 break;
78 case __SI_POLL >> 16:
79 err |= __put_user(from->si_fd, &to->si_fd);
80 break;
81 case __SI_TIMER >> 16:
82 err |= __put_user(from->si_overrun, &to->si_overrun);
83 err |= __put_user(ptr_to_compat(from->si_ptr),
84 &to->si_ptr);
85 break;
86 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
87 case __SI_MESGQ >> 16:
88 err |= __put_user(from->si_uid, &to->si_uid);
89 err |= __put_user(from->si_int, &to->si_int);
90 break;
91 }
92 }
93 return err;
94}
95
96int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
97{
98 int err;
99 u32 ptr32;
100 if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
101 return -EFAULT;
102
103 err = __get_user(to->si_signo, &from->si_signo);
104 err |= __get_user(to->si_errno, &from->si_errno);
105 err |= __get_user(to->si_code, &from->si_code);
106
107 err |= __get_user(to->si_pid, &from->si_pid);
108 err |= __get_user(to->si_uid, &from->si_uid);
109 err |= __get_user(ptr32, &from->si_ptr);
110 to->si_ptr = compat_ptr(ptr32);
111
112 return err;
113}
114
115asmlinkage long
116sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
117{
118 mask &= _BLOCKABLE;
119 spin_lock_irq(&current->sighand->siglock);
120 current->saved_sigmask = current->blocked;
121 siginitset(&current->blocked, mask);
122 recalc_sigpending();
123 spin_unlock_irq(&current->sighand->siglock);
124
125 current->state = TASK_INTERRUPTIBLE;
126 schedule();
127 set_thread_flag(TIF_RESTORE_SIGMASK);
128 return -ERESTARTNOHAND;
129}
130
131asmlinkage long
132sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
133 stack_ia32_t __user *uoss_ptr,
134 struct pt_regs *regs)
135{
136 stack_t uss,uoss;
137 int ret;
138 mm_segment_t seg;
139 if (uss_ptr) {
140 u32 ptr;
141 memset(&uss,0,sizeof(stack_t));
142 if (!access_ok(VERIFY_READ,uss_ptr,sizeof(stack_ia32_t)) ||
143 __get_user(ptr, &uss_ptr->ss_sp) ||
144 __get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
145 __get_user(uss.ss_size, &uss_ptr->ss_size))
146 return -EFAULT;
147 uss.ss_sp = compat_ptr(ptr);
148 }
149 seg = get_fs();
150 set_fs(KERNEL_DS);
151 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->rsp);
152 set_fs(seg);
153 if (ret >= 0 && uoss_ptr) {
154 if (!access_ok(VERIFY_WRITE,uoss_ptr,sizeof(stack_ia32_t)) ||
155 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
156 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
157 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
158 ret = -EFAULT;
159 }
160 return ret;
161}
162
163/*
164 * Do a signal return; undo the signal stack.
165 */
166
167struct sigframe
168{
169 u32 pretcode;
170 int sig;
171 struct sigcontext_ia32 sc;
172 struct _fpstate_ia32 fpstate;
173 unsigned int extramask[_COMPAT_NSIG_WORDS-1];
174 char retcode[8];
175};
176
177struct rt_sigframe
178{
179 u32 pretcode;
180 int sig;
181 u32 pinfo;
182 u32 puc;
183 compat_siginfo_t info;
184 struct ucontext_ia32 uc;
185 struct _fpstate_ia32 fpstate;
186 char retcode[8];
187};
188
189static int
190ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_ia32 __user *sc, unsigned int *peax)
191{
192 unsigned int err = 0;
193
194 /* Always make any pending restarted system calls return -EINTR */
195 current_thread_info()->restart_block.fn = do_no_restart_syscall;
196
197#if DEBUG_SIG
198 printk("SIG restore_sigcontext: sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
199 sc, sc->err, sc->eip, sc->cs, sc->eflags);
200#endif
201#define COPY(x) { \
202 unsigned int reg; \
203 err |= __get_user(reg, &sc->e ##x); \
204 regs->r ## x = reg; \
205}
206
207#define RELOAD_SEG(seg,mask) \
208 { unsigned int cur; \
209 unsigned short pre; \
210 err |= __get_user(pre, &sc->seg); \
211 asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \
212 pre |= mask; \
213 if (pre != cur) loadsegment(seg,pre); }
214
215 /* Reload fs and gs if they have changed in the signal handler.
216 This does not handle long fs/gs base changes in the handler, but
217 does not clobber them at least in the normal case. */
218
219 {
220 unsigned gs, oldgs;
221 err |= __get_user(gs, &sc->gs);
222 gs |= 3;
223 asm("movl %%gs,%0" : "=r" (oldgs));
224 if (gs != oldgs)
225 load_gs_index(gs);
226 }
227 RELOAD_SEG(fs,3);
228 RELOAD_SEG(ds,3);
229 RELOAD_SEG(es,3);
230
231 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
232 COPY(dx); COPY(cx); COPY(ip);
233 /* Don't touch extended registers */
234
235 err |= __get_user(regs->cs, &sc->cs);
236 regs->cs |= 3;
237 err |= __get_user(regs->ss, &sc->ss);
238 regs->ss |= 3;
239
240 {
241 unsigned int tmpflags;
242 err |= __get_user(tmpflags, &sc->eflags);
243 regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
244 regs->orig_rax = -1; /* disable syscall checks */
245 }
246
247 {
248 u32 tmp;
249 struct _fpstate_ia32 __user * buf;
250 err |= __get_user(tmp, &sc->fpstate);
251 buf = compat_ptr(tmp);
252 if (buf) {
253 if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
254 goto badframe;
255 err |= restore_i387_ia32(current, buf, 0);
256 } else {
257 struct task_struct *me = current;
258 if (used_math()) {
259 clear_fpu(me);
260 clear_used_math();
261 }
262 }
263 }
264
265 {
266 u32 tmp;
267 err |= __get_user(tmp, &sc->eax);
268 *peax = tmp;
269 }
270 return err;
271
272badframe:
273 return 1;
274}
275
276asmlinkage long sys32_sigreturn(struct pt_regs *regs)
277{
278 struct sigframe __user *frame = (struct sigframe __user *)(regs->rsp-8);
279 sigset_t set;
280 unsigned int eax;
281
282 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
283 goto badframe;
284 if (__get_user(set.sig[0], &frame->sc.oldmask)
285 || (_COMPAT_NSIG_WORDS > 1
286 && __copy_from_user((((char *) &set.sig) + 4), &frame->extramask,
287 sizeof(frame->extramask))))
288 goto badframe;
289
290 sigdelsetmask(&set, ~_BLOCKABLE);
291 spin_lock_irq(&current->sighand->siglock);
292 current->blocked = set;
293 recalc_sigpending();
294 spin_unlock_irq(&current->sighand->siglock);
295
296 if (ia32_restore_sigcontext(regs, &frame->sc, &eax))
297 goto badframe;
298 return eax;
299
300badframe:
301 signal_fault(regs, frame, "32bit sigreturn");
302 return 0;
303}
304
305asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
306{
307 struct rt_sigframe __user *frame;
308 sigset_t set;
309 unsigned int eax;
310 struct pt_regs tregs;
311
312 frame = (struct rt_sigframe __user *)(regs->rsp - 4);
313
314 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
315 goto badframe;
316 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
317 goto badframe;
318
319 sigdelsetmask(&set, ~_BLOCKABLE);
320 spin_lock_irq(&current->sighand->siglock);
321 current->blocked = set;
322 recalc_sigpending();
323 spin_unlock_irq(&current->sighand->siglock);
324
325 if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
326 goto badframe;
327
328 tregs = *regs;
329 if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
330 goto badframe;
331
332 return eax;
333
334badframe:
335 signal_fault(regs,frame,"32bit rt sigreturn");
336 return 0;
337}
338
339/*
340 * Set up a signal frame.
341 */
342
343static int
344ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate,
345 struct pt_regs *regs, unsigned int mask)
346{
347 int tmp, err = 0;
348
349 tmp = 0;
350 __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
351 err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
352 __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
353 err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
354 __asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp));
355 err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
356 __asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp));
357 err |= __put_user(tmp, (unsigned int __user *)&sc->es);
358
359 err |= __put_user((u32)regs->rdi, &sc->edi);
360 err |= __put_user((u32)regs->rsi, &sc->esi);
361 err |= __put_user((u32)regs->rbp, &sc->ebp);
362 err |= __put_user((u32)regs->rsp, &sc->esp);
363 err |= __put_user((u32)regs->rbx, &sc->ebx);
364 err |= __put_user((u32)regs->rdx, &sc->edx);
365 err |= __put_user((u32)regs->rcx, &sc->ecx);
366 err |= __put_user((u32)regs->rax, &sc->eax);
367 err |= __put_user((u32)regs->cs, &sc->cs);
368 err |= __put_user((u32)regs->ss, &sc->ss);
369 err |= __put_user(current->thread.trap_no, &sc->trapno);
370 err |= __put_user(current->thread.error_code, &sc->err);
371 err |= __put_user((u32)regs->rip, &sc->eip);
372 err |= __put_user((u32)regs->eflags, &sc->eflags);
373 err |= __put_user((u32)regs->rsp, &sc->esp_at_signal);
374
375 tmp = save_i387_ia32(current, fpstate, regs, 0);
376 if (tmp < 0)
377 err = -EFAULT;
378 else {
379 clear_used_math();
380 stts();
381 err |= __put_user(ptr_to_compat(tmp ? fpstate : NULL),
382 &sc->fpstate);
383 }
384
385 /* non-iBCS2 extensions.. */
386 err |= __put_user(mask, &sc->oldmask);
387 err |= __put_user(current->thread.cr2, &sc->cr2);
388
389 return err;
390}
391
392/*
393 * Determine which stack to use..
394 */
395static void __user *
396get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
397{
398 unsigned long rsp;
399
400 /* Default to using normal stack */
401 rsp = regs->rsp;
402
403 /* This is the X/Open sanctioned signal stack switching. */
404 if (ka->sa.sa_flags & SA_ONSTACK) {
405 if (sas_ss_flags(rsp) == 0)
406 rsp = current->sas_ss_sp + current->sas_ss_size;
407 }
408
409 /* This is the legacy signal stack switching. */
410 else if ((regs->ss & 0xffff) != __USER_DS &&
411 !(ka->sa.sa_flags & SA_RESTORER) &&
412 ka->sa.sa_restorer) {
413 rsp = (unsigned long) ka->sa.sa_restorer;
414 }
415
416 rsp -= frame_size;
417 /* Align the stack pointer according to the i386 ABI,
418 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
419 rsp = ((rsp + 4) & -16ul) - 4;
420 return (void __user *) rsp;
421}
422
423int ia32_setup_frame(int sig, struct k_sigaction *ka,
424 compat_sigset_t *set, struct pt_regs * regs)
425{
426 struct sigframe __user *frame;
427 int err = 0;
428
429 frame = get_sigframe(ka, regs, sizeof(*frame));
430
431 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
432 goto give_sigsegv;
433
434 err |= __put_user(sig, &frame->sig);
435 if (err)
436 goto give_sigsegv;
437
438 err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs,
439 set->sig[0]);
440 if (err)
441 goto give_sigsegv;
442
443 if (_COMPAT_NSIG_WORDS > 1) {
444 err |= __copy_to_user(frame->extramask, &set->sig[1],
445 sizeof(frame->extramask));
446 }
447 if (err)
448 goto give_sigsegv;
449
450 /* Return stub is in 32bit vsyscall page */
451 {
452 void __user *restorer;
453 if (current->binfmt->hasvdso)
454 restorer = VSYSCALL32_SIGRETURN;
455 else
456 restorer = (void *)&frame->retcode;
457 if (ka->sa.sa_flags & SA_RESTORER)
458 restorer = ka->sa.sa_restorer;
459 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
460 }
461 /* These are actually not used anymore, but left because some
462 gdb versions depend on them as a marker. */
463 {
464 /* copy_to_user optimizes that into a single 8 byte store */
465 static const struct {
466 u16 poplmovl;
467 u32 val;
468 u16 int80;
469 u16 pad;
470 } __attribute__((packed)) code = {
471 0xb858, /* popl %eax ; movl $...,%eax */
472 __NR_ia32_sigreturn,
473 0x80cd, /* int $0x80 */
474 0,
475 };
476 err |= __copy_to_user(frame->retcode, &code, 8);
477 }
478 if (err)
479 goto give_sigsegv;
480
481 /* Set up registers for signal handler */
482 regs->rsp = (unsigned long) frame;
483 regs->rip = (unsigned long) ka->sa.sa_handler;
484
485 /* Make -mregparm=3 work */
486 regs->rax = sig;
487 regs->rdx = 0;
488 regs->rcx = 0;
489
490 asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
491 asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
492
493 regs->cs = __USER32_CS;
494 regs->ss = __USER32_DS;
495
496 set_fs(USER_DS);
497 regs->eflags &= ~TF_MASK;
498 if (test_thread_flag(TIF_SINGLESTEP))
499 ptrace_notify(SIGTRAP);
500
501#if DEBUG_SIG
502 printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
503 current->comm, current->pid, frame, regs->rip, frame->pretcode);
504#endif
505
506 return 0;
507
508give_sigsegv:
509 force_sigsegv(sig, current);
510 return -EFAULT;
511}
512
513int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
514 compat_sigset_t *set, struct pt_regs * regs)
515{
516 struct rt_sigframe __user *frame;
517 int err = 0;
518
519 frame = get_sigframe(ka, regs, sizeof(*frame));
520
521 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
522 goto give_sigsegv;
523
524 {
525 struct exec_domain *ed = current_thread_info()->exec_domain;
526 err |= __put_user((ed
527 && ed->signal_invmap
528 && sig < 32
529 ? ed->signal_invmap[sig]
530 : sig),
531 &frame->sig);
532 }
533 err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
534 err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
535 err |= copy_siginfo_to_user32(&frame->info, info);
536 if (err)
537 goto give_sigsegv;
538
539 /* Create the ucontext. */
540 err |= __put_user(0, &frame->uc.uc_flags);
541 err |= __put_user(0, &frame->uc.uc_link);
542 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
543 err |= __put_user(sas_ss_flags(regs->rsp),
544 &frame->uc.uc_stack.ss_flags);
545 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
546 err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
547 regs, set->sig[0]);
548 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
549 if (err)
550 goto give_sigsegv;
551
552
553 {
554 void __user *restorer = VSYSCALL32_RTSIGRETURN;
555 if (ka->sa.sa_flags & SA_RESTORER)
556 restorer = ka->sa.sa_restorer;
557 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
558 }
559
560 /* This is movl $,%eax ; int $0x80 */
561 /* Not actually used anymore, but left because some gdb versions
562 need it. */
563 {
564 /* __copy_to_user optimizes that into a single 8 byte store */
565 static const struct {
566 u8 movl;
567 u32 val;
568 u16 int80;
569 u16 pad;
570 u8 pad2;
571 } __attribute__((packed)) code = {
572 0xb8,
573 __NR_ia32_rt_sigreturn,
574 0x80cd,
575 0,
576 };
577 err |= __copy_to_user(frame->retcode, &code, 8);
578 }
579 if (err)
580 goto give_sigsegv;
581
582 /* Set up registers for signal handler */
583 regs->rsp = (unsigned long) frame;
584 regs->rip = (unsigned long) ka->sa.sa_handler;
585
586 /* Make -mregparm=3 work */
587 regs->rax = sig;
588 regs->rdx = (unsigned long) &frame->info;
589 regs->rcx = (unsigned long) &frame->uc;
590
591 /* Make -mregparm=3 work */
592 regs->rax = sig;
593 regs->rdx = (unsigned long) &frame->info;
594 regs->rcx = (unsigned long) &frame->uc;
595
596 asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
597 asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
598
599 regs->cs = __USER32_CS;
600 regs->ss = __USER32_DS;
601
602 set_fs(USER_DS);
603 regs->eflags &= ~TF_MASK;
604 if (test_thread_flag(TIF_SINGLESTEP))
605 ptrace_notify(SIGTRAP);
606
607#if DEBUG_SIG
608 printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
609 current->comm, current->pid, frame, regs->rip, frame->pretcode);
610#endif
611
612 return 0;
613
614give_sigsegv:
615 force_sigsegv(sig, current);
616 return -EFAULT;
617}
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
new file mode 100644
index 000000000000..18b231810908
--- /dev/null
+++ b/arch/x86/ia32/ia32entry.S
@@ -0,0 +1,736 @@
1/*
2 * Compatibility mode system call entry point for x86-64.
3 *
4 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
5 */
6
7#include <asm/dwarf2.h>
8#include <asm/calling.h>
9#include <asm/asm-offsets.h>
10#include <asm/current.h>
11#include <asm/errno.h>
12#include <asm/ia32_unistd.h>
13#include <asm/thread_info.h>
14#include <asm/segment.h>
15#include <asm/vsyscall32.h>
16#include <asm/irqflags.h>
17#include <linux/linkage.h>
18
19#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
20
21 .macro IA32_ARG_FIXUP noebp=0
22 movl %edi,%r8d
23 .if \noebp
24 .else
25 movl %ebp,%r9d
26 .endif
27 xchg %ecx,%esi
28 movl %ebx,%edi
29 movl %edx,%edx /* zero extension */
30 .endm
31
32 /* clobbers %eax */
33 .macro CLEAR_RREGS
34 xorl %eax,%eax
35 movq %rax,R11(%rsp)
36 movq %rax,R10(%rsp)
37 movq %rax,R9(%rsp)
38 movq %rax,R8(%rsp)
39 .endm
40
41 .macro LOAD_ARGS32 offset
42 movl \offset(%rsp),%r11d
43 movl \offset+8(%rsp),%r10d
44 movl \offset+16(%rsp),%r9d
45 movl \offset+24(%rsp),%r8d
46 movl \offset+40(%rsp),%ecx
47 movl \offset+48(%rsp),%edx
48 movl \offset+56(%rsp),%esi
49 movl \offset+64(%rsp),%edi
50 movl \offset+72(%rsp),%eax
51 .endm
52
53 .macro CFI_STARTPROC32 simple
54 CFI_STARTPROC \simple
55 CFI_UNDEFINED r8
56 CFI_UNDEFINED r9
57 CFI_UNDEFINED r10
58 CFI_UNDEFINED r11
59 CFI_UNDEFINED r12
60 CFI_UNDEFINED r13
61 CFI_UNDEFINED r14
62 CFI_UNDEFINED r15
63 .endm
64
65/*
66 * 32bit SYSENTER instruction entry.
67 *
68 * Arguments:
69 * %eax System call number.
70 * %ebx Arg1
71 * %ecx Arg2
72 * %edx Arg3
73 * %esi Arg4
74 * %edi Arg5
75 * %ebp user stack
76 * 0(%ebp) Arg6
77 *
78 * Interrupts off.
79 *
80 * This is purely a fast path. For anything complicated we use the int 0x80
81 * path below. Set up a complete hardware stack frame to share code
82 * with the int 0x80 path.
83 */
84ENTRY(ia32_sysenter_target)
85 CFI_STARTPROC32 simple
86 CFI_SIGNAL_FRAME
87 CFI_DEF_CFA rsp,0
88 CFI_REGISTER rsp,rbp
89 swapgs
90 movq %gs:pda_kernelstack, %rsp
91 addq $(PDA_STACKOFFSET),%rsp
92 /*
93 * No need to follow this irqs on/off section: the syscall
94 * disabled irqs, here we enable it straight after entry:
95 */
96 sti
97 movl %ebp,%ebp /* zero extension */
98 pushq $__USER32_DS
99 CFI_ADJUST_CFA_OFFSET 8
100 /*CFI_REL_OFFSET ss,0*/
101 pushq %rbp
102 CFI_ADJUST_CFA_OFFSET 8
103 CFI_REL_OFFSET rsp,0
104 pushfq
105 CFI_ADJUST_CFA_OFFSET 8
106 /*CFI_REL_OFFSET rflags,0*/
107 movl $VSYSCALL32_SYSEXIT, %r10d
108 CFI_REGISTER rip,r10
109 pushq $__USER32_CS
110 CFI_ADJUST_CFA_OFFSET 8
111 /*CFI_REL_OFFSET cs,0*/
112 movl %eax, %eax
113 pushq %r10
114 CFI_ADJUST_CFA_OFFSET 8
115 CFI_REL_OFFSET rip,0
116 pushq %rax
117 CFI_ADJUST_CFA_OFFSET 8
118 cld
119 SAVE_ARGS 0,0,1
120 /* no need to do an access_ok check here because rbp has been
121 32bit zero extended */
1221: movl (%rbp),%r9d
123 .section __ex_table,"a"
124 .quad 1b,ia32_badarg
125 .previous
126 GET_THREAD_INFO(%r10)
127 orl $TS_COMPAT,threadinfo_status(%r10)
128 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
129 CFI_REMEMBER_STATE
130 jnz sysenter_tracesys
131sysenter_do_call:
132 cmpl $(IA32_NR_syscalls-1),%eax
133 ja ia32_badsys
134 IA32_ARG_FIXUP 1
135 call *ia32_sys_call_table(,%rax,8)
136 movq %rax,RAX-ARGOFFSET(%rsp)
137 GET_THREAD_INFO(%r10)
138 cli
139 TRACE_IRQS_OFF
140 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
141 jnz int_ret_from_sys_call
142 andl $~TS_COMPAT,threadinfo_status(%r10)
143 /* clear IF, that popfq doesn't enable interrupts early */
144 andl $~0x200,EFLAGS-R11(%rsp)
145 RESTORE_ARGS 1,24,1,1,1,1
146 popfq
147 CFI_ADJUST_CFA_OFFSET -8
148 /*CFI_RESTORE rflags*/
149 popq %rcx /* User %esp */
150 CFI_ADJUST_CFA_OFFSET -8
151 CFI_REGISTER rsp,rcx
152 movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */
153 CFI_REGISTER rip,rdx
154 TRACE_IRQS_ON
155 swapgs
156 sti /* sti only takes effect after the next instruction */
157 /* sysexit */
158 .byte 0xf, 0x35
159
160sysenter_tracesys:
161 CFI_RESTORE_STATE
162 SAVE_REST
163 CLEAR_RREGS
164 movq $-ENOSYS,RAX(%rsp) /* really needed? */
165 movq %rsp,%rdi /* &pt_regs -> arg1 */
166 call syscall_trace_enter
167 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
168 RESTORE_REST
169 movl %ebp, %ebp
170 /* no need to do an access_ok check here because rbp has been
171 32bit zero extended */
1721: movl (%rbp),%r9d
173 .section __ex_table,"a"
174 .quad 1b,ia32_badarg
175 .previous
176 jmp sysenter_do_call
177 CFI_ENDPROC
178ENDPROC(ia32_sysenter_target)
179
180/*
181 * 32bit SYSCALL instruction entry.
182 *
183 * Arguments:
184 * %eax System call number.
185 * %ebx Arg1
186 * %ecx return EIP
187 * %edx Arg3
188 * %esi Arg4
189 * %edi Arg5
190 * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
191 * %esp user stack
192 * 0(%esp) Arg6
193 *
194 * Interrupts off.
195 *
196 * This is purely a fast path. For anything complicated we use the int 0x80
197 * path below. Set up a complete hardware stack frame to share code
198 * with the int 0x80 path.
199 */
200ENTRY(ia32_cstar_target)
201 CFI_STARTPROC32 simple
202 CFI_SIGNAL_FRAME
203 CFI_DEF_CFA rsp,PDA_STACKOFFSET
204 CFI_REGISTER rip,rcx
205 /*CFI_REGISTER rflags,r11*/
206 swapgs
207 movl %esp,%r8d
208 CFI_REGISTER rsp,r8
209 movq %gs:pda_kernelstack,%rsp
210 /*
211 * No need to follow this irqs on/off section: the syscall
212 * disabled irqs and here we enable it straight after entry:
213 */
214 sti
215 SAVE_ARGS 8,1,1
216 movl %eax,%eax /* zero extension */
217 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
218 movq %rcx,RIP-ARGOFFSET(%rsp)
219 CFI_REL_OFFSET rip,RIP-ARGOFFSET
220 movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
221 movl %ebp,%ecx
222 movq $__USER32_CS,CS-ARGOFFSET(%rsp)
223 movq $__USER32_DS,SS-ARGOFFSET(%rsp)
224 movq %r11,EFLAGS-ARGOFFSET(%rsp)
225 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
226 movq %r8,RSP-ARGOFFSET(%rsp)
227 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
228 /* no need to do an access_ok check here because r8 has been
229 32bit zero extended */
230 /* hardware stack frame is complete now */
2311: movl (%r8),%r9d
232 .section __ex_table,"a"
233 .quad 1b,ia32_badarg
234 .previous
235 GET_THREAD_INFO(%r10)
236 orl $TS_COMPAT,threadinfo_status(%r10)
237 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
238 CFI_REMEMBER_STATE
239 jnz cstar_tracesys
240cstar_do_call:
241 cmpl $IA32_NR_syscalls-1,%eax
242 ja ia32_badsys
243 IA32_ARG_FIXUP 1
244 call *ia32_sys_call_table(,%rax,8)
245 movq %rax,RAX-ARGOFFSET(%rsp)
246 GET_THREAD_INFO(%r10)
247 cli
248 TRACE_IRQS_OFF
249 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
250 jnz int_ret_from_sys_call
251 andl $~TS_COMPAT,threadinfo_status(%r10)
252 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
253 movl RIP-ARGOFFSET(%rsp),%ecx
254 CFI_REGISTER rip,rcx
255 movl EFLAGS-ARGOFFSET(%rsp),%r11d
256 /*CFI_REGISTER rflags,r11*/
257 TRACE_IRQS_ON
258 movl RSP-ARGOFFSET(%rsp),%esp
259 CFI_RESTORE rsp
260 swapgs
261 sysretl
262
263cstar_tracesys:
264 CFI_RESTORE_STATE
265 SAVE_REST
266 CLEAR_RREGS
267 movq $-ENOSYS,RAX(%rsp) /* really needed? */
268 movq %rsp,%rdi /* &pt_regs -> arg1 */
269 call syscall_trace_enter
270 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
271 RESTORE_REST
272 movl RSP-ARGOFFSET(%rsp), %r8d
273 /* no need to do an access_ok check here because r8 has been
274 32bit zero extended */
2751: movl (%r8),%r9d
276 .section __ex_table,"a"
277 .quad 1b,ia32_badarg
278 .previous
279 jmp cstar_do_call
280END(ia32_cstar_target)
281
282ia32_badarg:
283 movq $-EFAULT,%rax
284 jmp ia32_sysret
285 CFI_ENDPROC
286
287/*
288 * Emulated IA32 system calls via int 0x80.
289 *
290 * Arguments:
291 * %eax System call number.
292 * %ebx Arg1
293 * %ecx Arg2
294 * %edx Arg3
295 * %esi Arg4
296 * %edi Arg5
297 * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
298 *
299 * Notes:
300 * Uses the same stack frame as the x86-64 version.
301 * All registers except %eax must be saved (but ptrace may violate that)
302 * Arguments are zero extended. For system calls that want sign extension and
303 * take long arguments a wrapper is needed. Most calls can just be called
304 * directly.
305 * Assumes it is only called from user space and entered with interrupts off.
306 */
307
308ENTRY(ia32_syscall)
309 CFI_STARTPROC32 simple
310 CFI_SIGNAL_FRAME
311 CFI_DEF_CFA rsp,SS+8-RIP
312 /*CFI_REL_OFFSET ss,SS-RIP*/
313 CFI_REL_OFFSET rsp,RSP-RIP
314 /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
315 /*CFI_REL_OFFSET cs,CS-RIP*/
316 CFI_REL_OFFSET rip,RIP-RIP
317 swapgs
318 /*
319 * No need to follow this irqs on/off section: the syscall
320 * disabled irqs and here we enable it straight after entry:
321 */
322 sti
323 movl %eax,%eax
324 pushq %rax
325 CFI_ADJUST_CFA_OFFSET 8
326 cld
327 /* note the registers are not zero extended to the sf.
328 this could be a problem. */
329 SAVE_ARGS 0,0,1
330 GET_THREAD_INFO(%r10)
331 orl $TS_COMPAT,threadinfo_status(%r10)
332 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
333 jnz ia32_tracesys
334ia32_do_syscall:
335 cmpl $(IA32_NR_syscalls-1),%eax
336 ja ia32_badsys
337 IA32_ARG_FIXUP
338 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
339ia32_sysret:
340 movq %rax,RAX-ARGOFFSET(%rsp)
341 jmp int_ret_from_sys_call
342
343ia32_tracesys:
344 SAVE_REST
345 CLEAR_RREGS
346 movq $-ENOSYS,RAX(%rsp) /* really needed? */
347 movq %rsp,%rdi /* &pt_regs -> arg1 */
348 call syscall_trace_enter
349 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
350 RESTORE_REST
351 jmp ia32_do_syscall
352END(ia32_syscall)
353
354ia32_badsys:
355 movq $0,ORIG_RAX-ARGOFFSET(%rsp)
356 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
357 jmp int_ret_from_sys_call
358
359quiet_ni_syscall:
360 movq $-ENOSYS,%rax
361 ret
362 CFI_ENDPROC
363
364 .macro PTREGSCALL label, func, arg
365 .globl \label
366\label:
367 leaq \func(%rip),%rax
368 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
369 jmp ia32_ptregs_common
370 .endm
371
372 CFI_STARTPROC32
373
374 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
375 PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
376 PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
377 PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
378 PTREGSCALL stub32_execve, sys32_execve, %rcx
379 PTREGSCALL stub32_fork, sys_fork, %rdi
380 PTREGSCALL stub32_clone, sys32_clone, %rdx
381 PTREGSCALL stub32_vfork, sys_vfork, %rdi
382 PTREGSCALL stub32_iopl, sys_iopl, %rsi
383 PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
384
385ENTRY(ia32_ptregs_common)
386 popq %r11
387 CFI_ENDPROC
388 CFI_STARTPROC32 simple
389 CFI_SIGNAL_FRAME
390 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
391 CFI_REL_OFFSET rax,RAX-ARGOFFSET
392 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
393 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
394 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
395 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
396 CFI_REL_OFFSET rip,RIP-ARGOFFSET
397/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
398/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
399 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
400/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
401 SAVE_REST
402 call *%rax
403 RESTORE_REST
404 jmp ia32_sysret /* misbalances the return cache */
405 CFI_ENDPROC
406END(ia32_ptregs_common)
407
408 .section .rodata,"a"
409 .align 8
410ia32_sys_call_table:
411 .quad sys_restart_syscall
412 .quad sys_exit
413 .quad stub32_fork
414 .quad sys_read
415 .quad sys_write
416 .quad compat_sys_open /* 5 */
417 .quad sys_close
418 .quad sys32_waitpid
419 .quad sys_creat
420 .quad sys_link
421 .quad sys_unlink /* 10 */
422 .quad stub32_execve
423 .quad sys_chdir
424 .quad compat_sys_time
425 .quad sys_mknod
426 .quad sys_chmod /* 15 */
427 .quad sys_lchown16
428 .quad quiet_ni_syscall /* old break syscall holder */
429 .quad sys_stat
430 .quad sys32_lseek
431 .quad sys_getpid /* 20 */
432 .quad compat_sys_mount /* mount */
433 .quad sys_oldumount /* old_umount */
434 .quad sys_setuid16
435 .quad sys_getuid16
436 .quad compat_sys_stime /* stime */ /* 25 */
437 .quad sys32_ptrace /* ptrace */
438 .quad sys_alarm
439 .quad sys_fstat /* (old)fstat */
440 .quad sys_pause
441 .quad compat_sys_utime /* 30 */
442 .quad quiet_ni_syscall /* old stty syscall holder */
443 .quad quiet_ni_syscall /* old gtty syscall holder */
444 .quad sys_access
445 .quad sys_nice
446 .quad quiet_ni_syscall /* 35 */ /* old ftime syscall holder */
447 .quad sys_sync
448 .quad sys32_kill
449 .quad sys_rename
450 .quad sys_mkdir
451 .quad sys_rmdir /* 40 */
452 .quad sys_dup
453 .quad sys32_pipe
454 .quad compat_sys_times
455 .quad quiet_ni_syscall /* old prof syscall holder */
456 .quad sys_brk /* 45 */
457 .quad sys_setgid16
458 .quad sys_getgid16
459 .quad sys_signal
460 .quad sys_geteuid16
461 .quad sys_getegid16 /* 50 */
462 .quad sys_acct
463 .quad sys_umount /* new_umount */
464 .quad quiet_ni_syscall /* old lock syscall holder */
465 .quad compat_sys_ioctl
466 .quad compat_sys_fcntl64 /* 55 */
467 .quad quiet_ni_syscall /* old mpx syscall holder */
468 .quad sys_setpgid
469 .quad quiet_ni_syscall /* old ulimit syscall holder */
470 .quad sys32_olduname
471 .quad sys_umask /* 60 */
472 .quad sys_chroot
473 .quad sys32_ustat
474 .quad sys_dup2
475 .quad sys_getppid
476 .quad sys_getpgrp /* 65 */
477 .quad sys_setsid
478 .quad sys32_sigaction
479 .quad sys_sgetmask
480 .quad sys_ssetmask
481 .quad sys_setreuid16 /* 70 */
482 .quad sys_setregid16
483 .quad stub32_sigsuspend
484 .quad compat_sys_sigpending
485 .quad sys_sethostname
486 .quad compat_sys_setrlimit /* 75 */
487 .quad compat_sys_old_getrlimit /* old_getrlimit */
488 .quad compat_sys_getrusage
489 .quad sys32_gettimeofday
490 .quad sys32_settimeofday
491 .quad sys_getgroups16 /* 80 */
492 .quad sys_setgroups16
493 .quad sys32_old_select
494 .quad sys_symlink
495 .quad sys_lstat
496 .quad sys_readlink /* 85 */
497 .quad sys_uselib
498 .quad sys_swapon
499 .quad sys_reboot
500 .quad compat_sys_old_readdir
501 .quad sys32_mmap /* 90 */
502 .quad sys_munmap
503 .quad sys_truncate
504 .quad sys_ftruncate
505 .quad sys_fchmod
506 .quad sys_fchown16 /* 95 */
507 .quad sys_getpriority
508 .quad sys_setpriority
509 .quad quiet_ni_syscall /* old profil syscall holder */
510 .quad compat_sys_statfs
511 .quad compat_sys_fstatfs /* 100 */
512 .quad sys_ioperm
513 .quad compat_sys_socketcall
514 .quad sys_syslog
515 .quad compat_sys_setitimer
516 .quad compat_sys_getitimer /* 105 */
517 .quad compat_sys_newstat
518 .quad compat_sys_newlstat
519 .quad compat_sys_newfstat
520 .quad sys32_uname
521 .quad stub32_iopl /* 110 */
522 .quad sys_vhangup
523 .quad quiet_ni_syscall /* old "idle" system call */
524 .quad sys32_vm86_warning /* vm86old */
525 .quad compat_sys_wait4
526 .quad sys_swapoff /* 115 */
527 .quad compat_sys_sysinfo
528 .quad sys32_ipc
529 .quad sys_fsync
530 .quad stub32_sigreturn
531 .quad stub32_clone /* 120 */
532 .quad sys_setdomainname
533 .quad sys_uname
534 .quad sys_modify_ldt
535 .quad compat_sys_adjtimex
536 .quad sys32_mprotect /* 125 */
537 .quad compat_sys_sigprocmask
538 .quad quiet_ni_syscall /* create_module */
539 .quad sys_init_module
540 .quad sys_delete_module
541 .quad quiet_ni_syscall /* 130 get_kernel_syms */
542 .quad sys32_quotactl
543 .quad sys_getpgid
544 .quad sys_fchdir
545 .quad quiet_ni_syscall /* bdflush */
546 .quad sys_sysfs /* 135 */
547 .quad sys_personality
548 .quad quiet_ni_syscall /* for afs_syscall */
549 .quad sys_setfsuid16
550 .quad sys_setfsgid16
551 .quad sys_llseek /* 140 */
552 .quad compat_sys_getdents
553 .quad compat_sys_select
554 .quad sys_flock
555 .quad sys_msync
556 .quad compat_sys_readv /* 145 */
557 .quad compat_sys_writev
558 .quad sys_getsid
559 .quad sys_fdatasync
560 .quad sys32_sysctl /* sysctl */
561 .quad sys_mlock /* 150 */
562 .quad sys_munlock
563 .quad sys_mlockall
564 .quad sys_munlockall
565 .quad sys_sched_setparam
566 .quad sys_sched_getparam /* 155 */
567 .quad sys_sched_setscheduler
568 .quad sys_sched_getscheduler
569 .quad sys_sched_yield
570 .quad sys_sched_get_priority_max
571 .quad sys_sched_get_priority_min /* 160 */
572 .quad sys32_sched_rr_get_interval
573 .quad compat_sys_nanosleep
574 .quad sys_mremap
575 .quad sys_setresuid16
576 .quad sys_getresuid16 /* 165 */
577 .quad sys32_vm86_warning /* vm86 */
578 .quad quiet_ni_syscall /* query_module */
579 .quad sys_poll
580 .quad compat_sys_nfsservctl
581 .quad sys_setresgid16 /* 170 */
582 .quad sys_getresgid16
583 .quad sys_prctl
584 .quad stub32_rt_sigreturn
585 .quad sys32_rt_sigaction
586 .quad sys32_rt_sigprocmask /* 175 */
587 .quad sys32_rt_sigpending
588 .quad compat_sys_rt_sigtimedwait
589 .quad sys32_rt_sigqueueinfo
590 .quad stub32_rt_sigsuspend
591 .quad sys32_pread /* 180 */
592 .quad sys32_pwrite
593 .quad sys_chown16
594 .quad sys_getcwd
595 .quad sys_capget
596 .quad sys_capset
597 .quad stub32_sigaltstack
598 .quad sys32_sendfile
599 .quad quiet_ni_syscall /* streams1 */
600 .quad quiet_ni_syscall /* streams2 */
601 .quad stub32_vfork /* 190 */
602 .quad compat_sys_getrlimit
603 .quad sys32_mmap2
604 .quad sys32_truncate64
605 .quad sys32_ftruncate64
606 .quad sys32_stat64 /* 195 */
607 .quad sys32_lstat64
608 .quad sys32_fstat64
609 .quad sys_lchown
610 .quad sys_getuid
611 .quad sys_getgid /* 200 */
612 .quad sys_geteuid
613 .quad sys_getegid
614 .quad sys_setreuid
615 .quad sys_setregid
616 .quad sys_getgroups /* 205 */
617 .quad sys_setgroups
618 .quad sys_fchown
619 .quad sys_setresuid
620 .quad sys_getresuid
621 .quad sys_setresgid /* 210 */
622 .quad sys_getresgid
623 .quad sys_chown
624 .quad sys_setuid
625 .quad sys_setgid
626 .quad sys_setfsuid /* 215 */
627 .quad sys_setfsgid
628 .quad sys_pivot_root
629 .quad sys_mincore
630 .quad sys_madvise
631 .quad compat_sys_getdents64 /* 220 getdents64 */
632 .quad compat_sys_fcntl64
633 .quad quiet_ni_syscall /* tux */
634 .quad quiet_ni_syscall /* security */
635 .quad sys_gettid
636 .quad sys32_readahead /* 225 */
637 .quad sys_setxattr
638 .quad sys_lsetxattr
639 .quad sys_fsetxattr
640 .quad sys_getxattr
641 .quad sys_lgetxattr /* 230 */
642 .quad sys_fgetxattr
643 .quad sys_listxattr
644 .quad sys_llistxattr
645 .quad sys_flistxattr
646 .quad sys_removexattr /* 235 */
647 .quad sys_lremovexattr
648 .quad sys_fremovexattr
649 .quad sys_tkill
650 .quad sys_sendfile64
651 .quad compat_sys_futex /* 240 */
652 .quad compat_sys_sched_setaffinity
653 .quad compat_sys_sched_getaffinity
654 .quad sys32_set_thread_area
655 .quad sys32_get_thread_area
656 .quad compat_sys_io_setup /* 245 */
657 .quad sys_io_destroy
658 .quad compat_sys_io_getevents
659 .quad compat_sys_io_submit
660 .quad sys_io_cancel
661 .quad sys32_fadvise64 /* 250 */
662 .quad quiet_ni_syscall /* free_huge_pages */
663 .quad sys_exit_group
664 .quad sys32_lookup_dcookie
665 .quad sys_epoll_create
666 .quad sys_epoll_ctl /* 255 */
667 .quad sys_epoll_wait
668 .quad sys_remap_file_pages
669 .quad sys_set_tid_address
670 .quad compat_sys_timer_create
671 .quad compat_sys_timer_settime /* 260 */
672 .quad compat_sys_timer_gettime
673 .quad sys_timer_getoverrun
674 .quad sys_timer_delete
675 .quad compat_sys_clock_settime
676 .quad compat_sys_clock_gettime /* 265 */
677 .quad compat_sys_clock_getres
678 .quad compat_sys_clock_nanosleep
679 .quad compat_sys_statfs64
680 .quad compat_sys_fstatfs64
681 .quad sys_tgkill /* 270 */
682 .quad compat_sys_utimes
683 .quad sys32_fadvise64_64
684 .quad quiet_ni_syscall /* sys_vserver */
685 .quad sys_mbind
686 .quad compat_sys_get_mempolicy /* 275 */
687 .quad sys_set_mempolicy
688 .quad compat_sys_mq_open
689 .quad sys_mq_unlink
690 .quad compat_sys_mq_timedsend
691 .quad compat_sys_mq_timedreceive /* 280 */
692 .quad compat_sys_mq_notify
693 .quad compat_sys_mq_getsetattr
694 .quad compat_sys_kexec_load /* reserved for kexec */
695 .quad compat_sys_waitid
696 .quad quiet_ni_syscall /* 285: sys_altroot */
697 .quad sys_add_key
698 .quad sys_request_key
699 .quad sys_keyctl
700 .quad sys_ioprio_set
701 .quad sys_ioprio_get /* 290 */
702 .quad sys_inotify_init
703 .quad sys_inotify_add_watch
704 .quad sys_inotify_rm_watch
705 .quad sys_migrate_pages
706 .quad compat_sys_openat /* 295 */
707 .quad sys_mkdirat
708 .quad sys_mknodat
709 .quad sys_fchownat
710 .quad compat_sys_futimesat
711 .quad sys32_fstatat /* 300 */
712 .quad sys_unlinkat
713 .quad sys_renameat
714 .quad sys_linkat
715 .quad sys_symlinkat
716 .quad sys_readlinkat /* 305 */
717 .quad sys_fchmodat
718 .quad sys_faccessat
719 .quad compat_sys_pselect6
720 .quad compat_sys_ppoll
721 .quad sys_unshare /* 310 */
722 .quad compat_sys_set_robust_list
723 .quad compat_sys_get_robust_list
724 .quad sys_splice
725 .quad sys32_sync_file_range
726 .quad sys_tee /* 315 */
727 .quad compat_sys_vmsplice
728 .quad compat_sys_move_pages
729 .quad sys_getcpu
730 .quad sys_epoll_pwait
731 .quad compat_sys_utimensat /* 320 */
732 .quad compat_sys_signalfd
733 .quad compat_sys_timerfd
734 .quad sys_eventfd
735 .quad sys32_fallocate
736ia32_syscall_end:
diff --git a/arch/x86/ia32/ipc32.c b/arch/x86/ia32/ipc32.c
new file mode 100644
index 000000000000..369151dc3213
--- /dev/null
+++ b/arch/x86/ia32/ipc32.c
@@ -0,0 +1,57 @@
1#include <linux/kernel.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <linux/syscalls.h>
5#include <linux/time.h>
6#include <linux/sem.h>
7#include <linux/msg.h>
8#include <linux/shm.h>
9#include <linux/ipc.h>
10#include <linux/compat.h>
11
12#include <asm-i386/ipc.h>
13
14asmlinkage long
15sys32_ipc(u32 call, int first, int second, int third,
16 compat_uptr_t ptr, u32 fifth)
17{
18 int version;
19
20 version = call >> 16; /* hack for backward compatibility */
21 call &= 0xffff;
22
23 switch (call) {
24 case SEMOP:
25 /* struct sembuf is the same on 32 and 64bit :)) */
26 return sys_semtimedop(first, compat_ptr(ptr), second, NULL);
27 case SEMTIMEDOP:
28 return compat_sys_semtimedop(first, compat_ptr(ptr), second,
29 compat_ptr(fifth));
30 case SEMGET:
31 return sys_semget(first, second, third);
32 case SEMCTL:
33 return compat_sys_semctl(first, second, third, compat_ptr(ptr));
34
35 case MSGSND:
36 return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
37 case MSGRCV:
38 return compat_sys_msgrcv(first, second, fifth, third,
39 version, compat_ptr(ptr));
40 case MSGGET:
41 return sys_msgget((key_t) first, second);
42 case MSGCTL:
43 return compat_sys_msgctl(first, second, compat_ptr(ptr));
44
45 case SHMAT:
46 return compat_sys_shmat(first, second, third, version,
47 compat_ptr(ptr));
48 break;
49 case SHMDT:
50 return sys_shmdt(compat_ptr(ptr));
51 case SHMGET:
52 return sys_shmget(first, (unsigned)second, third);
53 case SHMCTL:
54 return compat_sys_shmctl(first, second, compat_ptr(ptr));
55 }
56 return -ENOSYS;
57}
diff --git a/arch/x86/ia32/mmap32.c b/arch/x86/ia32/mmap32.c
new file mode 100644
index 000000000000..e4b84b4a417a
--- /dev/null
+++ b/arch/x86/ia32/mmap32.c
@@ -0,0 +1,79 @@
1/*
2 * linux/arch/x86_64/ia32/mm/mmap.c
3 *
4 * flexible mmap layout support
5 *
6 * Based on the i386 version which was
7 *
8 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
9 * All Rights Reserved.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 *
26 * Started by Ingo Molnar <mingo@elte.hu>
27 */
28
29#include <linux/personality.h>
30#include <linux/mm.h>
31#include <linux/random.h>
32#include <linux/sched.h>
33
34/*
35 * Top of mmap area (just below the process stack).
36 *
37 * Leave an at least ~128 MB hole.
38 */
39#define MIN_GAP (128*1024*1024)
40#define MAX_GAP (TASK_SIZE/6*5)
41
42static inline unsigned long mmap_base(struct mm_struct *mm)
43{
44 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
45 unsigned long random_factor = 0;
46
47 if (current->flags & PF_RANDOMIZE)
48 random_factor = get_random_int() % (1024*1024);
49
50 if (gap < MIN_GAP)
51 gap = MIN_GAP;
52 else if (gap > MAX_GAP)
53 gap = MAX_GAP;
54
55 return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
56}
57
58/*
59 * This function, called very early during the creation of a new
60 * process VM image, sets up which VM layout function to use:
61 */
62void ia32_pick_mmap_layout(struct mm_struct *mm)
63{
64 /*
65 * Fall back to the standard layout if the personality
66 * bit is set, or if the expected stack growth is unlimited:
67 */
68 if (sysctl_legacy_va_layout ||
69 (current->personality & ADDR_COMPAT_LAYOUT) ||
70 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
71 mm->mmap_base = TASK_UNMAPPED_BASE;
72 mm->get_unmapped_area = arch_get_unmapped_area;
73 mm->unmap_area = arch_unmap_area;
74 } else {
75 mm->mmap_base = mmap_base(mm);
76 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
77 mm->unmap_area = arch_unmap_area_topdown;
78 }
79}
diff --git a/arch/x86/ia32/ptrace32.c b/arch/x86/ia32/ptrace32.c
new file mode 100644
index 000000000000..4a233ad6269c
--- /dev/null
+++ b/arch/x86/ia32/ptrace32.c
@@ -0,0 +1,404 @@
1/*
2 * 32bit ptrace for x86-64.
3 *
4 * Copyright 2001,2002 Andi Kleen, SuSE Labs.
5 * Some parts copied from arch/i386/kernel/ptrace.c. See that file for earlier
6 * copyright.
7 *
8 * This allows to access 64bit processes too; but there is no way to see the extended
9 * register contents.
10 */
11
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/sched.h>
15#include <linux/syscalls.h>
16#include <linux/unistd.h>
17#include <linux/mm.h>
18#include <linux/err.h>
19#include <linux/ptrace.h>
20#include <asm/ptrace.h>
21#include <asm/compat.h>
22#include <asm/uaccess.h>
23#include <asm/user32.h>
24#include <asm/user.h>
25#include <asm/errno.h>
26#include <asm/debugreg.h>
27#include <asm/i387.h>
28#include <asm/fpu32.h>
29#include <asm/ia32.h>
30
31/*
32 * Determines which flags the user has access to [1 = access, 0 = no access].
33 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
34 * Also masks reserved bits (31-22, 15, 5, 3, 1).
35 */
36#define FLAG_MASK 0x54dd5UL
37
38#define R32(l,q) \
39 case offsetof(struct user32, regs.l): stack[offsetof(struct pt_regs, q)/8] = val; break
40
41static int putreg32(struct task_struct *child, unsigned regno, u32 val)
42{
43 int i;
44 __u64 *stack = (__u64 *)task_pt_regs(child);
45
46 switch (regno) {
47 case offsetof(struct user32, regs.fs):
48 if (val && (val & 3) != 3) return -EIO;
49 child->thread.fsindex = val & 0xffff;
50 break;
51 case offsetof(struct user32, regs.gs):
52 if (val && (val & 3) != 3) return -EIO;
53 child->thread.gsindex = val & 0xffff;
54 break;
55 case offsetof(struct user32, regs.ds):
56 if (val && (val & 3) != 3) return -EIO;
57 child->thread.ds = val & 0xffff;
58 break;
59 case offsetof(struct user32, regs.es):
60 child->thread.es = val & 0xffff;
61 break;
62 case offsetof(struct user32, regs.ss):
63 if ((val & 3) != 3) return -EIO;
64 stack[offsetof(struct pt_regs, ss)/8] = val & 0xffff;
65 break;
66 case offsetof(struct user32, regs.cs):
67 if ((val & 3) != 3) return -EIO;
68 stack[offsetof(struct pt_regs, cs)/8] = val & 0xffff;
69 break;
70
71 R32(ebx, rbx);
72 R32(ecx, rcx);
73 R32(edx, rdx);
74 R32(edi, rdi);
75 R32(esi, rsi);
76 R32(ebp, rbp);
77 R32(eax, rax);
78 R32(orig_eax, orig_rax);
79 R32(eip, rip);
80 R32(esp, rsp);
81
82 case offsetof(struct user32, regs.eflags): {
83 __u64 *flags = &stack[offsetof(struct pt_regs, eflags)/8];
84 val &= FLAG_MASK;
85 *flags = val | (*flags & ~FLAG_MASK);
86 break;
87 }
88
89 case offsetof(struct user32, u_debugreg[4]):
90 case offsetof(struct user32, u_debugreg[5]):
91 return -EIO;
92
93 case offsetof(struct user32, u_debugreg[0]):
94 child->thread.debugreg0 = val;
95 break;
96
97 case offsetof(struct user32, u_debugreg[1]):
98 child->thread.debugreg1 = val;
99 break;
100
101 case offsetof(struct user32, u_debugreg[2]):
102 child->thread.debugreg2 = val;
103 break;
104
105 case offsetof(struct user32, u_debugreg[3]):
106 child->thread.debugreg3 = val;
107 break;
108
109 case offsetof(struct user32, u_debugreg[6]):
110 child->thread.debugreg6 = val;
111 break;
112
113 case offsetof(struct user32, u_debugreg[7]):
114 val &= ~DR_CONTROL_RESERVED;
115 /* See arch/i386/kernel/ptrace.c for an explanation of
116 * this awkward check.*/
117 for(i=0; i<4; i++)
118 if ((0x5454 >> ((val >> (16 + 4*i)) & 0xf)) & 1)
119 return -EIO;
120 child->thread.debugreg7 = val;
121 if (val)
122 set_tsk_thread_flag(child, TIF_DEBUG);
123 else
124 clear_tsk_thread_flag(child, TIF_DEBUG);
125 break;
126
127 default:
128 if (regno > sizeof(struct user32) || (regno & 3))
129 return -EIO;
130
131 /* Other dummy fields in the virtual user structure are ignored */
132 break;
133 }
134 return 0;
135}
136
137#undef R32
138
139#define R32(l,q) \
140 case offsetof(struct user32, regs.l): *val = stack[offsetof(struct pt_regs, q)/8]; break
141
142static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
143{
144 __u64 *stack = (__u64 *)task_pt_regs(child);
145
146 switch (regno) {
147 case offsetof(struct user32, regs.fs):
148 *val = child->thread.fsindex;
149 break;
150 case offsetof(struct user32, regs.gs):
151 *val = child->thread.gsindex;
152 break;
153 case offsetof(struct user32, regs.ds):
154 *val = child->thread.ds;
155 break;
156 case offsetof(struct user32, regs.es):
157 *val = child->thread.es;
158 break;
159
160 R32(cs, cs);
161 R32(ss, ss);
162 R32(ebx, rbx);
163 R32(ecx, rcx);
164 R32(edx, rdx);
165 R32(edi, rdi);
166 R32(esi, rsi);
167 R32(ebp, rbp);
168 R32(eax, rax);
169 R32(orig_eax, orig_rax);
170 R32(eip, rip);
171 R32(eflags, eflags);
172 R32(esp, rsp);
173
174 case offsetof(struct user32, u_debugreg[0]):
175 *val = child->thread.debugreg0;
176 break;
177 case offsetof(struct user32, u_debugreg[1]):
178 *val = child->thread.debugreg1;
179 break;
180 case offsetof(struct user32, u_debugreg[2]):
181 *val = child->thread.debugreg2;
182 break;
183 case offsetof(struct user32, u_debugreg[3]):
184 *val = child->thread.debugreg3;
185 break;
186 case offsetof(struct user32, u_debugreg[6]):
187 *val = child->thread.debugreg6;
188 break;
189 case offsetof(struct user32, u_debugreg[7]):
190 *val = child->thread.debugreg7;
191 break;
192
193 default:
194 if (regno > sizeof(struct user32) || (regno & 3))
195 return -EIO;
196
197 /* Other dummy fields in the virtual user structure are ignored */
198 *val = 0;
199 break;
200 }
201 return 0;
202}
203
204#undef R32
205
206static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
207{
208 int ret;
209 compat_siginfo_t __user *si32 = compat_ptr(data);
210 siginfo_t ssi;
211 siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
212 if (request == PTRACE_SETSIGINFO) {
213 memset(&ssi, 0, sizeof(siginfo_t));
214 ret = copy_siginfo_from_user32(&ssi, si32);
215 if (ret)
216 return ret;
217 if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
218 return -EFAULT;
219 }
220 ret = sys_ptrace(request, pid, addr, (unsigned long)si);
221 if (ret)
222 return ret;
223 if (request == PTRACE_GETSIGINFO) {
224 if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
225 return -EFAULT;
226 ret = copy_siginfo_to_user32(si32, &ssi);
227 }
228 return ret;
229}
230
231asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
232{
233 struct task_struct *child;
234 struct pt_regs *childregs;
235 void __user *datap = compat_ptr(data);
236 int ret;
237 __u32 val;
238
239 switch (request) {
240 case PTRACE_TRACEME:
241 case PTRACE_ATTACH:
242 case PTRACE_KILL:
243 case PTRACE_CONT:
244 case PTRACE_SINGLESTEP:
245 case PTRACE_DETACH:
246 case PTRACE_SYSCALL:
247 case PTRACE_OLDSETOPTIONS:
248 case PTRACE_SETOPTIONS:
249 case PTRACE_SET_THREAD_AREA:
250 case PTRACE_GET_THREAD_AREA:
251 return sys_ptrace(request, pid, addr, data);
252
253 default:
254 return -EINVAL;
255
256 case PTRACE_PEEKTEXT:
257 case PTRACE_PEEKDATA:
258 case PTRACE_POKEDATA:
259 case PTRACE_POKETEXT:
260 case PTRACE_POKEUSR:
261 case PTRACE_PEEKUSR:
262 case PTRACE_GETREGS:
263 case PTRACE_SETREGS:
264 case PTRACE_SETFPREGS:
265 case PTRACE_GETFPREGS:
266 case PTRACE_SETFPXREGS:
267 case PTRACE_GETFPXREGS:
268 case PTRACE_GETEVENTMSG:
269 break;
270
271 case PTRACE_SETSIGINFO:
272 case PTRACE_GETSIGINFO:
273 return ptrace32_siginfo(request, pid, addr, data);
274 }
275
276 child = ptrace_get_task_struct(pid);
277 if (IS_ERR(child))
278 return PTR_ERR(child);
279
280 ret = ptrace_check_attach(child, request == PTRACE_KILL);
281 if (ret < 0)
282 goto out;
283
284 childregs = task_pt_regs(child);
285
286 switch (request) {
287 case PTRACE_PEEKDATA:
288 case PTRACE_PEEKTEXT:
289 ret = 0;
290 if (access_process_vm(child, addr, &val, sizeof(u32), 0)!=sizeof(u32))
291 ret = -EIO;
292 else
293 ret = put_user(val, (unsigned int __user *)datap);
294 break;
295
296 case PTRACE_POKEDATA:
297 case PTRACE_POKETEXT:
298 ret = 0;
299 if (access_process_vm(child, addr, &data, sizeof(u32), 1)!=sizeof(u32))
300 ret = -EIO;
301 break;
302
303 case PTRACE_PEEKUSR:
304 ret = getreg32(child, addr, &val);
305 if (ret == 0)
306 ret = put_user(val, (__u32 __user *)datap);
307 break;
308
309 case PTRACE_POKEUSR:
310 ret = putreg32(child, addr, data);
311 break;
312
313 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
314 int i;
315 if (!access_ok(VERIFY_WRITE, datap, 16*4)) {
316 ret = -EIO;
317 break;
318 }
319 ret = 0;
320 for ( i = 0; i <= 16*4 ; i += sizeof(__u32) ) {
321 getreg32(child, i, &val);
322 ret |= __put_user(val,(u32 __user *)datap);
323 datap += sizeof(u32);
324 }
325 break;
326 }
327
328 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
329 unsigned long tmp;
330 int i;
331 if (!access_ok(VERIFY_READ, datap, 16*4)) {
332 ret = -EIO;
333 break;
334 }
335 ret = 0;
336 for ( i = 0; i <= 16*4; i += sizeof(u32) ) {
337 ret |= __get_user(tmp, (u32 __user *)datap);
338 putreg32(child, i, tmp);
339 datap += sizeof(u32);
340 }
341 break;
342 }
343
344 case PTRACE_GETFPREGS:
345 ret = -EIO;
346 if (!access_ok(VERIFY_READ, compat_ptr(data),
347 sizeof(struct user_i387_struct)))
348 break;
349 save_i387_ia32(child, datap, childregs, 1);
350 ret = 0;
351 break;
352
353 case PTRACE_SETFPREGS:
354 ret = -EIO;
355 if (!access_ok(VERIFY_WRITE, datap,
356 sizeof(struct user_i387_struct)))
357 break;
358 ret = 0;
359 /* don't check EFAULT to be bug-to-bug compatible to i386 */
360 restore_i387_ia32(child, datap, 1);
361 break;
362
363 case PTRACE_GETFPXREGS: {
364 struct user32_fxsr_struct __user *u = datap;
365 init_fpu(child);
366 ret = -EIO;
367 if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
368 break;
369 ret = -EFAULT;
370 if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u)))
371 break;
372 ret = __put_user(childregs->cs, &u->fcs);
373 ret |= __put_user(child->thread.ds, &u->fos);
374 break;
375 }
376 case PTRACE_SETFPXREGS: {
377 struct user32_fxsr_struct __user *u = datap;
378 unlazy_fpu(child);
379 ret = -EIO;
380 if (!access_ok(VERIFY_READ, u, sizeof(*u)))
381 break;
382 /* no checking to be bug-to-bug compatible with i386. */
383 /* but silence warning */
384 if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)))
385 ;
386 set_stopped_child_used_math(child);
387 child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
388 ret = 0;
389 break;
390 }
391
392 case PTRACE_GETEVENTMSG:
393 ret = put_user(child->ptrace_message,(unsigned int __user *)compat_ptr(data));
394 break;
395
396 default:
397 BUG();
398 }
399
400 out:
401 put_task_struct(child);
402 return ret;
403}
404
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
new file mode 100644
index 000000000000..bee96d614432
--- /dev/null
+++ b/arch/x86/ia32/sys_ia32.c
@@ -0,0 +1,889 @@
1/*
2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Based on
3 * sys_sparc32
4 *
5 * Copyright (C) 2000 VA Linux Co
6 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
7 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10 * Copyright (C) 2000 Hewlett-Packard Co.
11 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
12 * Copyright (C) 2000,2001,2002 Andi Kleen, SuSE Labs (x86-64 port)
13 *
14 * These routines maintain argument size conversion between 32bit and 64bit
15 * environment. In 2.5 most of this should be moved to a generic directory.
16 *
17 * This file assumes that there is a hole at the end of user address space.
18 *
19 * Some of the functions are LE specific currently. These are hopefully all marked.
20 * This should be fixed.
21 */
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/fs.h>
26#include <linux/file.h>
27#include <linux/signal.h>
28#include <linux/syscalls.h>
29#include <linux/resource.h>
30#include <linux/times.h>
31#include <linux/utsname.h>
32#include <linux/smp.h>
33#include <linux/smp_lock.h>
34#include <linux/sem.h>
35#include <linux/msg.h>
36#include <linux/mm.h>
37#include <linux/shm.h>
38#include <linux/slab.h>
39#include <linux/uio.h>
40#include <linux/nfs_fs.h>
41#include <linux/quota.h>
42#include <linux/module.h>
43#include <linux/sunrpc/svc.h>
44#include <linux/nfsd/nfsd.h>
45#include <linux/nfsd/cache.h>
46#include <linux/nfsd/xdr.h>
47#include <linux/nfsd/syscall.h>
48#include <linux/poll.h>
49#include <linux/personality.h>
50#include <linux/stat.h>
51#include <linux/ipc.h>
52#include <linux/rwsem.h>
53#include <linux/binfmts.h>
54#include <linux/init.h>
55#include <linux/aio_abi.h>
56#include <linux/aio.h>
57#include <linux/compat.h>
58#include <linux/vfs.h>
59#include <linux/ptrace.h>
60#include <linux/highuid.h>
61#include <linux/vmalloc.h>
62#include <linux/fsnotify.h>
63#include <linux/sysctl.h>
64#include <asm/mman.h>
65#include <asm/types.h>
66#include <asm/uaccess.h>
67#include <asm/semaphore.h>
68#include <asm/atomic.h>
69#include <asm/ldt.h>
70
71#include <net/scm.h>
72#include <net/sock.h>
73#include <asm/ia32.h>
74
75#define AA(__x) ((unsigned long)(__x))
76
77int cp_compat_stat(struct kstat *kbuf, struct compat_stat __user *ubuf)
78{
79 compat_ino_t ino;
80
81 typeof(ubuf->st_uid) uid = 0;
82 typeof(ubuf->st_gid) gid = 0;
83 SET_UID(uid, kbuf->uid);
84 SET_GID(gid, kbuf->gid);
85 if (!old_valid_dev(kbuf->dev) || !old_valid_dev(kbuf->rdev))
86 return -EOVERFLOW;
87 if (kbuf->size >= 0x7fffffff)
88 return -EOVERFLOW;
89 ino = kbuf->ino;
90 if (sizeof(ino) < sizeof(kbuf->ino) && ino != kbuf->ino)
91 return -EOVERFLOW;
92 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct compat_stat)) ||
93 __put_user (old_encode_dev(kbuf->dev), &ubuf->st_dev) ||
94 __put_user (ino, &ubuf->st_ino) ||
95 __put_user (kbuf->mode, &ubuf->st_mode) ||
96 __put_user (kbuf->nlink, &ubuf->st_nlink) ||
97 __put_user (uid, &ubuf->st_uid) ||
98 __put_user (gid, &ubuf->st_gid) ||
99 __put_user (old_encode_dev(kbuf->rdev), &ubuf->st_rdev) ||
100 __put_user (kbuf->size, &ubuf->st_size) ||
101 __put_user (kbuf->atime.tv_sec, &ubuf->st_atime) ||
102 __put_user (kbuf->atime.tv_nsec, &ubuf->st_atime_nsec) ||
103 __put_user (kbuf->mtime.tv_sec, &ubuf->st_mtime) ||
104 __put_user (kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
105 __put_user (kbuf->ctime.tv_sec, &ubuf->st_ctime) ||
106 __put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
107 __put_user (kbuf->blksize, &ubuf->st_blksize) ||
108 __put_user (kbuf->blocks, &ubuf->st_blocks))
109 return -EFAULT;
110 return 0;
111}
112
113asmlinkage long
114sys32_truncate64(char __user * filename, unsigned long offset_low, unsigned long offset_high)
115{
116 return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
117}
118
119asmlinkage long
120sys32_ftruncate64(unsigned int fd, unsigned long offset_low, unsigned long offset_high)
121{
122 return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
123}
124
125/* Another set for IA32/LFS -- x86_64 struct stat is different due to
126 support for 64bit inode numbers. */
127
128static int
129cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
130{
131 typeof(ubuf->st_uid) uid = 0;
132 typeof(ubuf->st_gid) gid = 0;
133 SET_UID(uid, stat->uid);
134 SET_GID(gid, stat->gid);
135 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
136 __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
137 __put_user (stat->ino, &ubuf->__st_ino) ||
138 __put_user (stat->ino, &ubuf->st_ino) ||
139 __put_user (stat->mode, &ubuf->st_mode) ||
140 __put_user (stat->nlink, &ubuf->st_nlink) ||
141 __put_user (uid, &ubuf->st_uid) ||
142 __put_user (gid, &ubuf->st_gid) ||
143 __put_user (huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
144 __put_user (stat->size, &ubuf->st_size) ||
145 __put_user (stat->atime.tv_sec, &ubuf->st_atime) ||
146 __put_user (stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
147 __put_user (stat->mtime.tv_sec, &ubuf->st_mtime) ||
148 __put_user (stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
149 __put_user (stat->ctime.tv_sec, &ubuf->st_ctime) ||
150 __put_user (stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
151 __put_user (stat->blksize, &ubuf->st_blksize) ||
152 __put_user (stat->blocks, &ubuf->st_blocks))
153 return -EFAULT;
154 return 0;
155}
156
157asmlinkage long
158sys32_stat64(char __user * filename, struct stat64 __user *statbuf)
159{
160 struct kstat stat;
161 int ret = vfs_stat(filename, &stat);
162 if (!ret)
163 ret = cp_stat64(statbuf, &stat);
164 return ret;
165}
166
167asmlinkage long
168sys32_lstat64(char __user * filename, struct stat64 __user *statbuf)
169{
170 struct kstat stat;
171 int ret = vfs_lstat(filename, &stat);
172 if (!ret)
173 ret = cp_stat64(statbuf, &stat);
174 return ret;
175}
176
177asmlinkage long
178sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
179{
180 struct kstat stat;
181 int ret = vfs_fstat(fd, &stat);
182 if (!ret)
183 ret = cp_stat64(statbuf, &stat);
184 return ret;
185}
186
187asmlinkage long
188sys32_fstatat(unsigned int dfd, char __user *filename,
189 struct stat64 __user* statbuf, int flag)
190{
191 struct kstat stat;
192 int error = -EINVAL;
193
194 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
195 goto out;
196
197 if (flag & AT_SYMLINK_NOFOLLOW)
198 error = vfs_lstat_fd(dfd, filename, &stat);
199 else
200 error = vfs_stat_fd(dfd, filename, &stat);
201
202 if (!error)
203 error = cp_stat64(statbuf, &stat);
204
205out:
206 return error;
207}
208
209/*
210 * Linux/i386 didn't use to be able to handle more than
211 * 4 system call parameters, so these system calls used a memory
212 * block for parameter passing..
213 */
214
215struct mmap_arg_struct {
216 unsigned int addr;
217 unsigned int len;
218 unsigned int prot;
219 unsigned int flags;
220 unsigned int fd;
221 unsigned int offset;
222};
223
224asmlinkage long
225sys32_mmap(struct mmap_arg_struct __user *arg)
226{
227 struct mmap_arg_struct a;
228 struct file *file = NULL;
229 unsigned long retval;
230 struct mm_struct *mm ;
231
232 if (copy_from_user(&a, arg, sizeof(a)))
233 return -EFAULT;
234
235 if (a.offset & ~PAGE_MASK)
236 return -EINVAL;
237
238 if (!(a.flags & MAP_ANONYMOUS)) {
239 file = fget(a.fd);
240 if (!file)
241 return -EBADF;
242 }
243
244 mm = current->mm;
245 down_write(&mm->mmap_sem);
246 retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, a.offset>>PAGE_SHIFT);
247 if (file)
248 fput(file);
249
250 up_write(&mm->mmap_sem);
251
252 return retval;
253}
254
255asmlinkage long
256sys32_mprotect(unsigned long start, size_t len, unsigned long prot)
257{
258 return sys_mprotect(start,len,prot);
259}
260
261asmlinkage long
262sys32_pipe(int __user *fd)
263{
264 int retval;
265 int fds[2];
266
267 retval = do_pipe(fds);
268 if (retval)
269 goto out;
270 if (copy_to_user(fd, fds, sizeof(fds)))
271 retval = -EFAULT;
272 out:
273 return retval;
274}
275
276asmlinkage long
277sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
278 struct sigaction32 __user *oact, unsigned int sigsetsize)
279{
280 struct k_sigaction new_ka, old_ka;
281 int ret;
282 compat_sigset_t set32;
283
284 /* XXX: Don't preclude handling different sized sigset_t's. */
285 if (sigsetsize != sizeof(compat_sigset_t))
286 return -EINVAL;
287
288 if (act) {
289 compat_uptr_t handler, restorer;
290
291 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
292 __get_user(handler, &act->sa_handler) ||
293 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
294 __get_user(restorer, &act->sa_restorer)||
295 __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t)))
296 return -EFAULT;
297 new_ka.sa.sa_handler = compat_ptr(handler);
298 new_ka.sa.sa_restorer = compat_ptr(restorer);
299 /* FIXME: here we rely on _COMPAT_NSIG_WORS to be >= than _NSIG_WORDS << 1 */
300 switch (_NSIG_WORDS) {
301 case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
302 | (((long)set32.sig[7]) << 32);
303 case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
304 | (((long)set32.sig[5]) << 32);
305 case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
306 | (((long)set32.sig[3]) << 32);
307 case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
308 | (((long)set32.sig[1]) << 32);
309 }
310 }
311
312 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
313
314 if (!ret && oact) {
315 /* FIXME: here we rely on _COMPAT_NSIG_WORS to be >= than _NSIG_WORDS << 1 */
316 switch (_NSIG_WORDS) {
317 case 4:
318 set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
319 set32.sig[6] = old_ka.sa.sa_mask.sig[3];
320 case 3:
321 set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
322 set32.sig[4] = old_ka.sa.sa_mask.sig[2];
323 case 2:
324 set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
325 set32.sig[2] = old_ka.sa.sa_mask.sig[1];
326 case 1:
327 set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
328 set32.sig[0] = old_ka.sa.sa_mask.sig[0];
329 }
330 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
331 __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
332 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) ||
333 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
334 __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t)))
335 return -EFAULT;
336 }
337
338 return ret;
339}
340
341asmlinkage long
342sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact)
343{
344 struct k_sigaction new_ka, old_ka;
345 int ret;
346
347 if (act) {
348 compat_old_sigset_t mask;
349 compat_uptr_t handler, restorer;
350
351 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
352 __get_user(handler, &act->sa_handler) ||
353 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
354 __get_user(restorer, &act->sa_restorer) ||
355 __get_user(mask, &act->sa_mask))
356 return -EFAULT;
357
358 new_ka.sa.sa_handler = compat_ptr(handler);
359 new_ka.sa.sa_restorer = compat_ptr(restorer);
360
361 siginitset(&new_ka.sa.sa_mask, mask);
362 }
363
364 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
365
366 if (!ret && oact) {
367 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
368 __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
369 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) ||
370 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
371 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
372 return -EFAULT;
373 }
374
375 return ret;
376}
377
378asmlinkage long
379sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
380 compat_sigset_t __user *oset, unsigned int sigsetsize)
381{
382 sigset_t s;
383 compat_sigset_t s32;
384 int ret;
385 mm_segment_t old_fs = get_fs();
386
387 if (set) {
388 if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
389 return -EFAULT;
390 switch (_NSIG_WORDS) {
391 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
392 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
393 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
394 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
395 }
396 }
397 set_fs (KERNEL_DS);
398 ret = sys_rt_sigprocmask(how,
399 set ? (sigset_t __user *)&s : NULL,
400 oset ? (sigset_t __user *)&s : NULL,
401 sigsetsize);
402 set_fs (old_fs);
403 if (ret) return ret;
404 if (oset) {
405 switch (_NSIG_WORDS) {
406 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
407 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
408 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
409 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
410 }
411 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
412 return -EFAULT;
413 }
414 return 0;
415}
416
417static inline long
418get_tv32(struct timeval *o, struct compat_timeval __user *i)
419{
420 int err = -EFAULT;
421 if (access_ok(VERIFY_READ, i, sizeof(*i))) {
422 err = __get_user(o->tv_sec, &i->tv_sec);
423 err |= __get_user(o->tv_usec, &i->tv_usec);
424 }
425 return err;
426}
427
428static inline long
429put_tv32(struct compat_timeval __user *o, struct timeval *i)
430{
431 int err = -EFAULT;
432 if (access_ok(VERIFY_WRITE, o, sizeof(*o))) {
433 err = __put_user(i->tv_sec, &o->tv_sec);
434 err |= __put_user(i->tv_usec, &o->tv_usec);
435 }
436 return err;
437}
438
439extern unsigned int alarm_setitimer(unsigned int seconds);
440
441asmlinkage long
442sys32_alarm(unsigned int seconds)
443{
444 return alarm_setitimer(seconds);
445}
446
447/* Translations due to time_t size differences. Which affects all
448 sorts of things, like timeval and itimerval. */
449
450extern struct timezone sys_tz;
451
452asmlinkage long
453sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
454{
455 if (tv) {
456 struct timeval ktv;
457 do_gettimeofday(&ktv);
458 if (put_tv32(tv, &ktv))
459 return -EFAULT;
460 }
461 if (tz) {
462 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
463 return -EFAULT;
464 }
465 return 0;
466}
467
468asmlinkage long
469sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
470{
471 struct timeval ktv;
472 struct timespec kts;
473 struct timezone ktz;
474
475 if (tv) {
476 if (get_tv32(&ktv, tv))
477 return -EFAULT;
478 kts.tv_sec = ktv.tv_sec;
479 kts.tv_nsec = ktv.tv_usec * NSEC_PER_USEC;
480 }
481 if (tz) {
482 if (copy_from_user(&ktz, tz, sizeof(ktz)))
483 return -EFAULT;
484 }
485
486 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
487}
488
489struct sel_arg_struct {
490 unsigned int n;
491 unsigned int inp;
492 unsigned int outp;
493 unsigned int exp;
494 unsigned int tvp;
495};
496
497asmlinkage long
498sys32_old_select(struct sel_arg_struct __user *arg)
499{
500 struct sel_arg_struct a;
501
502 if (copy_from_user(&a, arg, sizeof(a)))
503 return -EFAULT;
504 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
505 compat_ptr(a.exp), compat_ptr(a.tvp));
506}
507
508extern asmlinkage long
509compat_sys_wait4(compat_pid_t pid, compat_uint_t * stat_addr, int options,
510 struct compat_rusage *ru);
511
512asmlinkage long
513sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
514{
515 return compat_sys_wait4(pid, stat_addr, options, NULL);
516}
517
518/* 32-bit timeval and related flotsam. */
519
520asmlinkage long
521sys32_sysfs(int option, u32 arg1, u32 arg2)
522{
523 return sys_sysfs(option, arg1, arg2);
524}
525
526asmlinkage long
527sys32_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
528{
529 struct timespec t;
530 int ret;
531 mm_segment_t old_fs = get_fs ();
532
533 set_fs (KERNEL_DS);
534 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
535 set_fs (old_fs);
536 if (put_compat_timespec(&t, interval))
537 return -EFAULT;
538 return ret;
539}
540
541asmlinkage long
542sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
543{
544 sigset_t s;
545 compat_sigset_t s32;
546 int ret;
547 mm_segment_t old_fs = get_fs();
548
549 set_fs (KERNEL_DS);
550 ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
551 set_fs (old_fs);
552 if (!ret) {
553 switch (_NSIG_WORDS) {
554 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
555 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
556 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
557 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
558 }
559 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
560 return -EFAULT;
561 }
562 return ret;
563}
564
565asmlinkage long
566sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
567{
568 siginfo_t info;
569 int ret;
570 mm_segment_t old_fs = get_fs();
571
572 if (copy_siginfo_from_user32(&info, uinfo))
573 return -EFAULT;
574 set_fs (KERNEL_DS);
575 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
576 set_fs (old_fs);
577 return ret;
578}
579
580/* These are here just in case some old ia32 binary calls it. */
581asmlinkage long
582sys32_pause(void)
583{
584 current->state = TASK_INTERRUPTIBLE;
585 schedule();
586 return -ERESTARTNOHAND;
587}
588
589
590#ifdef CONFIG_SYSCTL_SYSCALL
591struct sysctl_ia32 {
592 unsigned int name;
593 int nlen;
594 unsigned int oldval;
595 unsigned int oldlenp;
596 unsigned int newval;
597 unsigned int newlen;
598 unsigned int __unused[4];
599};
600
601
602asmlinkage long
603sys32_sysctl(struct sysctl_ia32 __user *args32)
604{
605 struct sysctl_ia32 a32;
606 mm_segment_t old_fs = get_fs ();
607 void __user *oldvalp, *newvalp;
608 size_t oldlen;
609 int __user *namep;
610 long ret;
611
612 if (copy_from_user(&a32, args32, sizeof (a32)))
613 return -EFAULT;
614
615 /*
616 * We need to pre-validate these because we have to disable address checking
617 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
618 * user specifying bad addresses here. Well, since we're dealing with 32 bit
619 * addresses, we KNOW that access_ok() will always succeed, so this is an
620 * expensive NOP, but so what...
621 */
622 namep = compat_ptr(a32.name);
623 oldvalp = compat_ptr(a32.oldval);
624 newvalp = compat_ptr(a32.newval);
625
626 if ((oldvalp && get_user(oldlen, (int __user *)compat_ptr(a32.oldlenp)))
627 || !access_ok(VERIFY_WRITE, namep, 0)
628 || !access_ok(VERIFY_WRITE, oldvalp, 0)
629 || !access_ok(VERIFY_WRITE, newvalp, 0))
630 return -EFAULT;
631
632 set_fs(KERNEL_DS);
633 lock_kernel();
634 ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *)&oldlen,
635 newvalp, (size_t) a32.newlen);
636 unlock_kernel();
637 set_fs(old_fs);
638
639 if (oldvalp && put_user (oldlen, (int __user *)compat_ptr(a32.oldlenp)))
640 return -EFAULT;
641
642 return ret;
643}
644#endif
645
646/* warning: next two assume little endian */
647asmlinkage long
648sys32_pread(unsigned int fd, char __user *ubuf, u32 count, u32 poslo, u32 poshi)
649{
650 return sys_pread64(fd, ubuf, count,
651 ((loff_t)AA(poshi) << 32) | AA(poslo));
652}
653
654asmlinkage long
655sys32_pwrite(unsigned int fd, char __user *ubuf, u32 count, u32 poslo, u32 poshi)
656{
657 return sys_pwrite64(fd, ubuf, count,
658 ((loff_t)AA(poshi) << 32) | AA(poslo));
659}
660
661
662asmlinkage long
663sys32_personality(unsigned long personality)
664{
665 int ret;
666 if (personality(current->personality) == PER_LINUX32 &&
667 personality == PER_LINUX)
668 personality = PER_LINUX32;
669 ret = sys_personality(personality);
670 if (ret == PER_LINUX32)
671 ret = PER_LINUX;
672 return ret;
673}
674
675asmlinkage long
676sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
677{
678 mm_segment_t old_fs = get_fs();
679 int ret;
680 off_t of;
681
682 if (offset && get_user(of, offset))
683 return -EFAULT;
684
685 set_fs(KERNEL_DS);
686 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
687 count);
688 set_fs(old_fs);
689
690 if (offset && put_user(of, offset))
691 return -EFAULT;
692
693 return ret;
694}
695
696asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
697 unsigned long prot, unsigned long flags,
698 unsigned long fd, unsigned long pgoff)
699{
700 struct mm_struct *mm = current->mm;
701 unsigned long error;
702 struct file * file = NULL;
703
704 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
705 if (!(flags & MAP_ANONYMOUS)) {
706 file = fget(fd);
707 if (!file)
708 return -EBADF;
709 }
710
711 down_write(&mm->mmap_sem);
712 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
713 up_write(&mm->mmap_sem);
714
715 if (file)
716 fput(file);
717 return error;
718}
719
720asmlinkage long sys32_olduname(struct oldold_utsname __user * name)
721{
722 int err;
723
724 if (!name)
725 return -EFAULT;
726 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
727 return -EFAULT;
728
729 down_read(&uts_sem);
730
731 err = __copy_to_user(&name->sysname,&utsname()->sysname,
732 __OLD_UTS_LEN);
733 err |= __put_user(0,name->sysname+__OLD_UTS_LEN);
734 err |= __copy_to_user(&name->nodename,&utsname()->nodename,
735 __OLD_UTS_LEN);
736 err |= __put_user(0,name->nodename+__OLD_UTS_LEN);
737 err |= __copy_to_user(&name->release,&utsname()->release,
738 __OLD_UTS_LEN);
739 err |= __put_user(0,name->release+__OLD_UTS_LEN);
740 err |= __copy_to_user(&name->version,&utsname()->version,
741 __OLD_UTS_LEN);
742 err |= __put_user(0,name->version+__OLD_UTS_LEN);
743 {
744 char *arch = "x86_64";
745 if (personality(current->personality) == PER_LINUX32)
746 arch = "i686";
747
748 err |= __copy_to_user(&name->machine, arch, strlen(arch)+1);
749 }
750
751 up_read(&uts_sem);
752
753 err = err ? -EFAULT : 0;
754
755 return err;
756}
757
758long sys32_uname(struct old_utsname __user * name)
759{
760 int err;
761 if (!name)
762 return -EFAULT;
763 down_read(&uts_sem);
764 err = copy_to_user(name, utsname(), sizeof (*name));
765 up_read(&uts_sem);
766 if (personality(current->personality) == PER_LINUX32)
767 err |= copy_to_user(&name->machine, "i686", 5);
768 return err?-EFAULT:0;
769}
770
771long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
772{
773 struct ustat u;
774 mm_segment_t seg;
775 int ret;
776
777 seg = get_fs();
778 set_fs(KERNEL_DS);
779 ret = sys_ustat(dev, (struct ustat __user *)&u);
780 set_fs(seg);
781 if (ret >= 0) {
782 if (!access_ok(VERIFY_WRITE,u32p,sizeof(struct ustat32)) ||
783 __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
784 __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
785 __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
786 __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
787 ret = -EFAULT;
788 }
789 return ret;
790}
791
792asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
793 compat_uptr_t __user *envp, struct pt_regs *regs)
794{
795 long error;
796 char * filename;
797
798 filename = getname(name);
799 error = PTR_ERR(filename);
800 if (IS_ERR(filename))
801 return error;
802 error = compat_do_execve(filename, argv, envp, regs);
803 if (error == 0) {
804 task_lock(current);
805 current->ptrace &= ~PT_DTRACE;
806 task_unlock(current);
807 }
808 putname(filename);
809 return error;
810}
811
812asmlinkage long sys32_clone(unsigned int clone_flags, unsigned int newsp,
813 struct pt_regs *regs)
814{
815 void __user *parent_tid = (void __user *)regs->rdx;
816 void __user *child_tid = (void __user *)regs->rdi;
817 if (!newsp)
818 newsp = regs->rsp;
819 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
820}
821
822/*
823 * Some system calls that need sign extended arguments. This could be done by a generic wrapper.
824 */
825
826long sys32_lseek (unsigned int fd, int offset, unsigned int whence)
827{
828 return sys_lseek(fd, offset, whence);
829}
830
831long sys32_kill(int pid, int sig)
832{
833 return sys_kill(pid, sig);
834}
835
836long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
837 __u32 len_low, __u32 len_high, int advice)
838{
839 return sys_fadvise64_64(fd,
840 (((u64)offset_high)<<32) | offset_low,
841 (((u64)len_high)<<32) | len_low,
842 advice);
843}
844
845long sys32_vm86_warning(void)
846{
847 struct task_struct *me = current;
848 static char lastcomm[sizeof(me->comm)];
849 if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
850 compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
851 me->comm);
852 strncpy(lastcomm, me->comm, sizeof(lastcomm));
853 }
854 return -ENOSYS;
855}
856
857long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
858 char __user * buf, size_t len)
859{
860 return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
861}
862
863asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, size_t count)
864{
865 return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
866}
867
868asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
869 unsigned n_low, unsigned n_hi, int flags)
870{
871 return sys_sync_file_range(fd,
872 ((u64)off_hi << 32) | off_low,
873 ((u64)n_hi << 32) | n_low, flags);
874}
875
876asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, size_t len,
877 int advice)
878{
879 return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
880 len, advice);
881}
882
883asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
884 unsigned offset_hi, unsigned len_lo,
885 unsigned len_hi)
886{
887 return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
888 ((u64)len_hi << 32) | len_lo);
889}
diff --git a/arch/x86/ia32/syscall32.c b/arch/x86/ia32/syscall32.c
new file mode 100644
index 000000000000..15013bac181c
--- /dev/null
+++ b/arch/x86/ia32/syscall32.c
@@ -0,0 +1,83 @@
1/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
2
3/* vsyscall handling for 32bit processes. Map a stub page into it
4 on demand because 32bit cannot reach the kernel's fixmaps */
5
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/kernel.h>
9#include <linux/gfp.h>
10#include <linux/init.h>
11#include <linux/stringify.h>
12#include <linux/security.h>
13#include <asm/proto.h>
14#include <asm/tlbflush.h>
15#include <asm/ia32_unistd.h>
16#include <asm/vsyscall32.h>
17
18extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
19extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
20extern int sysctl_vsyscall32;
21
22static struct page *syscall32_pages[1];
23static int use_sysenter = -1;
24
25struct linux_binprm;
26
27/* Setup a VMA at program startup for the vsyscall page */
28int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
29{
30 struct mm_struct *mm = current->mm;
31 int ret;
32
33 down_write(&mm->mmap_sem);
34 /*
35 * MAYWRITE to allow gdb to COW and set breakpoints
36 *
37 * Make sure the vDSO gets into every core dump.
38 * Dumping its contents makes post-mortem fully interpretable later
39 * without matching up the same kernel and hardware config to see
40 * what PC values meant.
41 */
42 /* Could randomize here */
43 ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE,
44 VM_READ|VM_EXEC|
45 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
46 VM_ALWAYSDUMP,
47 syscall32_pages);
48 up_write(&mm->mmap_sem);
49 return ret;
50}
51
52static int __init init_syscall32(void)
53{
54 char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
55 if (!syscall32_page)
56 panic("Cannot allocate syscall32 page");
57 syscall32_pages[0] = virt_to_page(syscall32_page);
58 if (use_sysenter > 0) {
59 memcpy(syscall32_page, syscall32_sysenter,
60 syscall32_sysenter_end - syscall32_sysenter);
61 } else {
62 memcpy(syscall32_page, syscall32_syscall,
63 syscall32_syscall_end - syscall32_syscall);
64 }
65 return 0;
66}
67
68__initcall(init_syscall32);
69
70/* May not be __init: called during resume */
71void syscall32_cpu_init(void)
72{
73 if (use_sysenter < 0)
74 use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
75
76 /* Load these always in case some future AMD CPU supports
77 SYSENTER from compat mode too. */
78 checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
79 checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
80 checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
81
82 wrmsrl(MSR_CSTAR, ia32_cstar_target);
83}
diff --git a/arch/x86/ia32/syscall32_syscall.S b/arch/x86/ia32/syscall32_syscall.S
new file mode 100644
index 000000000000..933f0f08b1cf
--- /dev/null
+++ b/arch/x86/ia32/syscall32_syscall.S
@@ -0,0 +1,17 @@
1/* 32bit VDSOs mapped into user space. */
2
3 .section ".init.data","aw"
4
5 .globl syscall32_syscall
6 .globl syscall32_syscall_end
7
8syscall32_syscall:
9 .incbin "arch/x86/ia32/vsyscall-syscall.so"
10syscall32_syscall_end:
11
12 .globl syscall32_sysenter
13 .globl syscall32_sysenter_end
14
15syscall32_sysenter:
16 .incbin "arch/x86/ia32/vsyscall-sysenter.so"
17syscall32_sysenter_end:
diff --git a/arch/x86/ia32/tls32.c b/arch/x86/ia32/tls32.c
new file mode 100644
index 000000000000..1cc4340de3ca
--- /dev/null
+++ b/arch/x86/ia32/tls32.c
@@ -0,0 +1,163 @@
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/sched.h>
4#include <linux/user.h>
5
6#include <asm/uaccess.h>
7#include <asm/desc.h>
8#include <asm/system.h>
9#include <asm/ldt.h>
10#include <asm/processor.h>
11#include <asm/proto.h>
12
13/*
14 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
15 */
16static int get_free_idx(void)
17{
18 struct thread_struct *t = &current->thread;
19 int idx;
20
21 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
22 if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
23 return idx + GDT_ENTRY_TLS_MIN;
24 return -ESRCH;
25}
26
27/*
28 * Set a given TLS descriptor:
29 * When you want addresses > 32bit use arch_prctl()
30 */
31int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
32{
33 struct user_desc info;
34 struct n_desc_struct *desc;
35 int cpu, idx;
36
37 if (copy_from_user(&info, u_info, sizeof(info)))
38 return -EFAULT;
39
40 idx = info.entry_number;
41
42 /*
43 * index -1 means the kernel should try to find and
44 * allocate an empty descriptor:
45 */
46 if (idx == -1) {
47 idx = get_free_idx();
48 if (idx < 0)
49 return idx;
50 if (put_user(idx, &u_info->entry_number))
51 return -EFAULT;
52 }
53
54 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
55 return -EINVAL;
56
57 desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
58
59 /*
60 * We must not get preempted while modifying the TLS.
61 */
62 cpu = get_cpu();
63
64 if (LDT_empty(&info)) {
65 desc->a = 0;
66 desc->b = 0;
67 } else {
68 desc->a = LDT_entry_a(&info);
69 desc->b = LDT_entry_b(&info);
70 }
71 if (t == &current->thread)
72 load_TLS(t, cpu);
73
74 put_cpu();
75 return 0;
76}
77
78asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
79{
80 return do_set_thread_area(&current->thread, u_info);
81}
82
83
84/*
85 * Get the current Thread-Local Storage area:
86 */
87
88#define GET_BASE(desc) ( \
89 (((desc)->a >> 16) & 0x0000ffff) | \
90 (((desc)->b << 16) & 0x00ff0000) | \
91 ( (desc)->b & 0xff000000) )
92
93#define GET_LIMIT(desc) ( \
94 ((desc)->a & 0x0ffff) | \
95 ((desc)->b & 0xf0000) )
96
97#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
98#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
99#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
100#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
101#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
102#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
103#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
104
105int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
106{
107 struct user_desc info;
108 struct n_desc_struct *desc;
109 int idx;
110
111 if (get_user(idx, &u_info->entry_number))
112 return -EFAULT;
113 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
114 return -EINVAL;
115
116 desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
117
118 memset(&info, 0, sizeof(struct user_desc));
119 info.entry_number = idx;
120 info.base_addr = GET_BASE(desc);
121 info.limit = GET_LIMIT(desc);
122 info.seg_32bit = GET_32BIT(desc);
123 info.contents = GET_CONTENTS(desc);
124 info.read_exec_only = !GET_WRITABLE(desc);
125 info.limit_in_pages = GET_LIMIT_PAGES(desc);
126 info.seg_not_present = !GET_PRESENT(desc);
127 info.useable = GET_USEABLE(desc);
128 info.lm = GET_LONGMODE(desc);
129
130 if (copy_to_user(u_info, &info, sizeof(info)))
131 return -EFAULT;
132 return 0;
133}
134
135asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info)
136{
137 return do_get_thread_area(&current->thread, u_info);
138}
139
140
141int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
142{
143 struct n_desc_struct *desc;
144 struct user_desc info;
145 struct user_desc __user *cp;
146 int idx;
147
148 cp = (void __user *)childregs->rsi;
149 if (copy_from_user(&info, cp, sizeof(info)))
150 return -EFAULT;
151 if (LDT_empty(&info))
152 return -EINVAL;
153
154 idx = info.entry_number;
155 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
156 return -EINVAL;
157
158 desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
159 desc->a = LDT_entry_a(&info);
160 desc->b = LDT_entry_b(&info);
161
162 return 0;
163}
diff --git a/arch/x86/ia32/vsyscall-sigreturn.S b/arch/x86/ia32/vsyscall-sigreturn.S
new file mode 100644
index 000000000000..b383be00baec
--- /dev/null
+++ b/arch/x86/ia32/vsyscall-sigreturn.S
@@ -0,0 +1,143 @@
1/*
2 * Common code for the sigreturn entry points on the vsyscall page.
3 * This code uses SYSCALL_ENTER_KERNEL (either syscall or int $0x80)
4 * to enter the kernel.
5 * This file is #include'd by vsyscall-*.S to define them after the
6 * vsyscall entry point. The addresses we get for these entry points
7 * by doing ".balign 32" must match in both versions of the page.
8 */
9
10 .code32
11 .section .text.sigreturn,"ax"
12 .balign 32
13 .globl __kernel_sigreturn
14 .type __kernel_sigreturn,@function
15__kernel_sigreturn:
16.LSTART_sigreturn:
17 popl %eax
18 movl $__NR_ia32_sigreturn, %eax
19 SYSCALL_ENTER_KERNEL
20.LEND_sigreturn:
21 .size __kernel_sigreturn,.-.LSTART_sigreturn
22
23 .section .text.rtsigreturn,"ax"
24 .balign 32
25 .globl __kernel_rt_sigreturn
26 .type __kernel_rt_sigreturn,@function
27__kernel_rt_sigreturn:
28.LSTART_rt_sigreturn:
29 movl $__NR_ia32_rt_sigreturn, %eax
30 SYSCALL_ENTER_KERNEL
31.LEND_rt_sigreturn:
32 .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
33
34 .section .eh_frame,"a",@progbits
35.LSTARTFRAMES:
36 .long .LENDCIES-.LSTARTCIES
37.LSTARTCIES:
38 .long 0 /* CIE ID */
39 .byte 1 /* Version number */
40 .string "zRS" /* NUL-terminated augmentation string */
41 .uleb128 1 /* Code alignment factor */
42 .sleb128 -4 /* Data alignment factor */
43 .byte 8 /* Return address register column */
44 .uleb128 1 /* Augmentation value length */
45 .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
46 .byte 0x0c /* DW_CFA_def_cfa */
47 .uleb128 4
48 .uleb128 4
49 .byte 0x88 /* DW_CFA_offset, column 0x8 */
50 .uleb128 1
51 .align 4
52.LENDCIES:
53
54 .long .LENDFDE2-.LSTARTFDE2 /* Length FDE */
55.LSTARTFDE2:
56 .long .LSTARTFDE2-.LSTARTFRAMES /* CIE pointer */
57 /* HACK: The dwarf2 unwind routines will subtract 1 from the
58 return address to get an address in the middle of the
59 presumed call instruction. Since we didn't get here via
60 a call, we need to include the nop before the real start
61 to make up for it. */
62 .long .LSTART_sigreturn-1-. /* PC-relative start address */
63 .long .LEND_sigreturn-.LSTART_sigreturn+1
64 .uleb128 0 /* Augmentation length */
65 /* What follows are the instructions for the table generation.
66 We record the locations of each register saved. This is
67 complicated by the fact that the "CFA" is always assumed to
68 be the value of the stack pointer in the caller. This means
69 that we must define the CFA of this body of code to be the
70 saved value of the stack pointer in the sigcontext. Which
71 also means that there is no fixed relation to the other
72 saved registers, which means that we must use DW_CFA_expression
73 to compute their addresses. It also means that when we
74 adjust the stack with the popl, we have to do it all over again. */
75
76#define do_cfa_expr(offset) \
77 .byte 0x0f; /* DW_CFA_def_cfa_expression */ \
78 .uleb128 1f-0f; /* length */ \
790: .byte 0x74; /* DW_OP_breg4 */ \
80 .sleb128 offset; /* offset */ \
81 .byte 0x06; /* DW_OP_deref */ \
821:
83
84#define do_expr(regno, offset) \
85 .byte 0x10; /* DW_CFA_expression */ \
86 .uleb128 regno; /* regno */ \
87 .uleb128 1f-0f; /* length */ \
880: .byte 0x74; /* DW_OP_breg4 */ \
89 .sleb128 offset; /* offset */ \
901:
91
92 do_cfa_expr(IA32_SIGCONTEXT_esp+4)
93 do_expr(0, IA32_SIGCONTEXT_eax+4)
94 do_expr(1, IA32_SIGCONTEXT_ecx+4)
95 do_expr(2, IA32_SIGCONTEXT_edx+4)
96 do_expr(3, IA32_SIGCONTEXT_ebx+4)
97 do_expr(5, IA32_SIGCONTEXT_ebp+4)
98 do_expr(6, IA32_SIGCONTEXT_esi+4)
99 do_expr(7, IA32_SIGCONTEXT_edi+4)
100 do_expr(8, IA32_SIGCONTEXT_eip+4)
101
102 .byte 0x42 /* DW_CFA_advance_loc 2 -- nop; popl eax. */
103
104 do_cfa_expr(IA32_SIGCONTEXT_esp)
105 do_expr(0, IA32_SIGCONTEXT_eax)
106 do_expr(1, IA32_SIGCONTEXT_ecx)
107 do_expr(2, IA32_SIGCONTEXT_edx)
108 do_expr(3, IA32_SIGCONTEXT_ebx)
109 do_expr(5, IA32_SIGCONTEXT_ebp)
110 do_expr(6, IA32_SIGCONTEXT_esi)
111 do_expr(7, IA32_SIGCONTEXT_edi)
112 do_expr(8, IA32_SIGCONTEXT_eip)
113
114 .align 4
115.LENDFDE2:
116
117 .long .LENDFDE3-.LSTARTFDE3 /* Length FDE */
118.LSTARTFDE3:
119 .long .LSTARTFDE3-.LSTARTFRAMES /* CIE pointer */
120 /* HACK: See above wrt unwind library assumptions. */
121 .long .LSTART_rt_sigreturn-1-. /* PC-relative start address */
122 .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
123 .uleb128 0 /* Augmentation */
124 /* What follows are the instructions for the table generation.
125 We record the locations of each register saved. This is
126 slightly less complicated than the above, since we don't
127 modify the stack pointer in the process. */
128
129 do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_esp)
130 do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_eax)
131 do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ecx)
132 do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_edx)
133 do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ebx)
134 do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ebp)
135 do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_esi)
136 do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_edi)
137 do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_eip)
138
139 .align 4
140.LENDFDE3:
141
142#include "../../x86/kernel/vsyscall-note_32.S"
143
diff --git a/arch/x86/ia32/vsyscall-syscall.S b/arch/x86/ia32/vsyscall-syscall.S
new file mode 100644
index 000000000000..cf9ef678de3e
--- /dev/null
+++ b/arch/x86/ia32/vsyscall-syscall.S
@@ -0,0 +1,69 @@
1/*
2 * Code for the vsyscall page. This version uses the syscall instruction.
3 */
4
5#include <asm/ia32_unistd.h>
6#include <asm/asm-offsets.h>
7#include <asm/segment.h>
8
9 .code32
10 .text
11 .section .text.vsyscall,"ax"
12 .globl __kernel_vsyscall
13 .type __kernel_vsyscall,@function
14__kernel_vsyscall:
15.LSTART_vsyscall:
16 push %ebp
17.Lpush_ebp:
18 movl %ecx, %ebp
19 syscall
20 movl $__USER32_DS, %ecx
21 movl %ecx, %ss
22 movl %ebp, %ecx
23 popl %ebp
24.Lpop_ebp:
25 ret
26.LEND_vsyscall:
27 .size __kernel_vsyscall,.-.LSTART_vsyscall
28
29 .section .eh_frame,"a",@progbits
30.LSTARTFRAME:
31 .long .LENDCIE-.LSTARTCIE
32.LSTARTCIE:
33 .long 0 /* CIE ID */
34 .byte 1 /* Version number */
35 .string "zR" /* NUL-terminated augmentation string */
36 .uleb128 1 /* Code alignment factor */
37 .sleb128 -4 /* Data alignment factor */
38 .byte 8 /* Return address register column */
39 .uleb128 1 /* Augmentation value length */
40 .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
41 .byte 0x0c /* DW_CFA_def_cfa */
42 .uleb128 4
43 .uleb128 4
44 .byte 0x88 /* DW_CFA_offset, column 0x8 */
45 .uleb128 1
46 .align 4
47.LENDCIE:
48
49 .long .LENDFDE1-.LSTARTFDE1 /* Length FDE */
50.LSTARTFDE1:
51 .long .LSTARTFDE1-.LSTARTFRAME /* CIE pointer */
52 .long .LSTART_vsyscall-. /* PC-relative start address */
53 .long .LEND_vsyscall-.LSTART_vsyscall
54 .uleb128 0 /* Augmentation length */
55 /* What follows are the instructions for the table generation.
56 We have to record all changes of the stack pointer. */
57 .byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
58 .byte 0x0e /* DW_CFA_def_cfa_offset */
59 .uleb128 8
60 .byte 0x85, 0x02 /* DW_CFA_offset %ebp -8 */
61 .byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
62 .byte 0xc5 /* DW_CFA_restore %ebp */
63 .byte 0x0e /* DW_CFA_def_cfa_offset */
64 .uleb128 4
65 .align 4
66.LENDFDE1:
67
68#define SYSCALL_ENTER_KERNEL syscall
69#include "vsyscall-sigreturn.S"
diff --git a/arch/x86/ia32/vsyscall-sysenter.S b/arch/x86/ia32/vsyscall-sysenter.S
new file mode 100644
index 000000000000..ae056e553d13
--- /dev/null
+++ b/arch/x86/ia32/vsyscall-sysenter.S
@@ -0,0 +1,95 @@
1/*
2 * Code for the vsyscall page. This version uses the sysenter instruction.
3 */
4
5#include <asm/ia32_unistd.h>
6#include <asm/asm-offsets.h>
7
8 .code32
9 .text
10 .section .text.vsyscall,"ax"
11 .globl __kernel_vsyscall
12 .type __kernel_vsyscall,@function
13__kernel_vsyscall:
14.LSTART_vsyscall:
15 push %ecx
16.Lpush_ecx:
17 push %edx
18.Lpush_edx:
19 push %ebp
20.Lenter_kernel:
21 movl %esp,%ebp
22 sysenter
23 .space 7,0x90
24 jmp .Lenter_kernel
25 /* 16: System call normal return point is here! */
26 pop %ebp
27.Lpop_ebp:
28 pop %edx
29.Lpop_edx:
30 pop %ecx
31.Lpop_ecx:
32 ret
33.LEND_vsyscall:
34 .size __kernel_vsyscall,.-.LSTART_vsyscall
35
36 .section .eh_frame,"a",@progbits
37.LSTARTFRAME:
38 .long .LENDCIE-.LSTARTCIE
39.LSTARTCIE:
40 .long 0 /* CIE ID */
41 .byte 1 /* Version number */
42 .string "zR" /* NUL-terminated augmentation string */
43 .uleb128 1 /* Code alignment factor */
44 .sleb128 -4 /* Data alignment factor */
45 .byte 8 /* Return address register column */
46 .uleb128 1 /* Augmentation value length */
47 .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
48 .byte 0x0c /* DW_CFA_def_cfa */
49 .uleb128 4
50 .uleb128 4
51 .byte 0x88 /* DW_CFA_offset, column 0x8 */
52 .uleb128 1
53 .align 4
54.LENDCIE:
55
56 .long .LENDFDE1-.LSTARTFDE1 /* Length FDE */
57.LSTARTFDE1:
58 .long .LSTARTFDE1-.LSTARTFRAME /* CIE pointer */
59 .long .LSTART_vsyscall-. /* PC-relative start address */
60 .long .LEND_vsyscall-.LSTART_vsyscall
61 .uleb128 0 /* Augmentation length */
62 /* What follows are the instructions for the table generation.
63 We have to record all changes of the stack pointer. */
64 .byte 0x04 /* DW_CFA_advance_loc4 */
65 .long .Lpush_ecx-.LSTART_vsyscall
66 .byte 0x0e /* DW_CFA_def_cfa_offset */
67 .byte 0x08 /* RA at offset 8 now */
68 .byte 0x04 /* DW_CFA_advance_loc4 */
69 .long .Lpush_edx-.Lpush_ecx
70 .byte 0x0e /* DW_CFA_def_cfa_offset */
71 .byte 0x0c /* RA at offset 12 now */
72 .byte 0x04 /* DW_CFA_advance_loc4 */
73 .long .Lenter_kernel-.Lpush_edx
74 .byte 0x0e /* DW_CFA_def_cfa_offset */
75 .byte 0x10 /* RA at offset 16 now */
76 .byte 0x85, 0x04 /* DW_CFA_offset %ebp -16 */
77 /* Finally the epilogue. */
78 .byte 0x04 /* DW_CFA_advance_loc4 */
79 .long .Lpop_ebp-.Lenter_kernel
80 .byte 0x0e /* DW_CFA_def_cfa_offset */
81 .byte 0x12 /* RA at offset 12 now */
82 .byte 0xc5 /* DW_CFA_restore %ebp */
83 .byte 0x04 /* DW_CFA_advance_loc4 */
84 .long .Lpop_edx-.Lpop_ebp
85 .byte 0x0e /* DW_CFA_def_cfa_offset */
86 .byte 0x08 /* RA at offset 8 now */
87 .byte 0x04 /* DW_CFA_advance_loc4 */
88 .long .Lpop_ecx-.Lpop_edx
89 .byte 0x0e /* DW_CFA_def_cfa_offset */
90 .byte 0x04 /* RA at offset 4 now */
91 .align 4
92.LENDFDE1:
93
94#define SYSCALL_ENTER_KERNEL int $0x80
95#include "vsyscall-sigreturn.S"
diff --git a/arch/x86/ia32/vsyscall.lds b/arch/x86/ia32/vsyscall.lds
new file mode 100644
index 000000000000..1dc86ff5bcb9
--- /dev/null
+++ b/arch/x86/ia32/vsyscall.lds
@@ -0,0 +1,80 @@
1/*
2 * Linker script for vsyscall DSO. The vsyscall page is an ELF shared
3 * object prelinked to its virtual address. This script controls its layout.
4 */
5
6/* This must match <asm/fixmap.h>. */
7VSYSCALL_BASE = 0xffffe000;
8
9SECTIONS
10{
11 . = VSYSCALL_BASE + SIZEOF_HEADERS;
12
13 .hash : { *(.hash) } :text
14 .gnu.hash : { *(.gnu.hash) }
15 .dynsym : { *(.dynsym) }
16 .dynstr : { *(.dynstr) }
17 .gnu.version : { *(.gnu.version) }
18 .gnu.version_d : { *(.gnu.version_d) }
19 .gnu.version_r : { *(.gnu.version_r) }
20
21 /* This linker script is used both with -r and with -shared.
22 For the layouts to match, we need to skip more than enough
23 space for the dynamic symbol table et al. If this amount
24 is insufficient, ld -shared will barf. Just increase it here. */
25 . = VSYSCALL_BASE + 0x400;
26
27 .text.vsyscall : { *(.text.vsyscall) } :text =0x90909090
28
29 /* This is an 32bit object and we cannot easily get the offsets
30 into the 64bit kernel. Just hardcode them here. This assumes
31 that all the stubs don't need more than 0x100 bytes. */
32 . = VSYSCALL_BASE + 0x500;
33
34 .text.sigreturn : { *(.text.sigreturn) } :text =0x90909090
35
36 . = VSYSCALL_BASE + 0x600;
37
38 .text.rtsigreturn : { *(.text.rtsigreturn) } :text =0x90909090
39
40 .note : { *(.note.*) } :text :note
41 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
42 .eh_frame : { KEEP (*(.eh_frame)) } :text
43 .dynamic : { *(.dynamic) } :text :dynamic
44 .useless : {
45 *(.got.plt) *(.got)
46 *(.data .data.* .gnu.linkonce.d.*)
47 *(.dynbss)
48 *(.bss .bss.* .gnu.linkonce.b.*)
49 } :text
50}
51
52/*
53 * We must supply the ELF program headers explicitly to get just one
54 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
55 */
56PHDRS
57{
58 text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
59 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
60 note PT_NOTE FLAGS(4); /* PF_R */
61 eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
62}
63
64/*
65 * This controls what symbols we export from the DSO.
66 */
67VERSION
68{
69 LINUX_2.5 {
70 global:
71 __kernel_vsyscall;
72 __kernel_sigreturn;
73 __kernel_rt_sigreturn;
74
75 local: *;
76 };
77}
78
79/* The ELF entry point can be used to set the AT_SYSINFO value. */
80ENTRY(__kernel_vsyscall);