aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/ia32
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/ia32
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/ia64/ia32')
-rw-r--r--arch/ia64/ia32/Makefile12
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c294
-rw-r--r--arch/ia64/ia32/elfcore32.h138
-rw-r--r--arch/ia64/ia32/ia32_entry.S500
-rw-r--r--arch/ia64/ia32/ia32_ioctl.c48
-rw-r--r--arch/ia64/ia32/ia32_ldt.c147
-rw-r--r--arch/ia64/ia32/ia32_signal.c1036
-rw-r--r--arch/ia64/ia32/ia32_support.c264
-rw-r--r--arch/ia64/ia32/ia32_traps.c156
-rw-r--r--arch/ia64/ia32/ia32priv.h544
-rw-r--r--arch/ia64/ia32/sys_ia32.c2747
11 files changed, 5886 insertions, 0 deletions
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile
new file mode 100644
index 000000000000..2ed90da81166
--- /dev/null
+++ b/arch/ia64/ia32/Makefile
@@ -0,0 +1,12 @@
1#
2# Makefile for the ia32 kernel emulation subsystem.
3#
4
5obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \
6 ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
7
8CFLAGS_ia32_ioctl.o += -Ifs/
9
10# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
11# restore_ia32_fpstate_live() can be sure the live register contain user-level state.
12CFLAGS_ia32_signal.o += -mfixed-range=f16-f31
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
new file mode 100644
index 000000000000..31de70b7c67f
--- /dev/null
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -0,0 +1,294 @@
1/*
2 * IA-32 ELF support.
3 *
4 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5 * Copyright (C) 2001 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state
9 * 04/13/01 D. Mosberger dropped saving tssd in ar.k1---it's not needed
10 * 09/14/01 D. Mosberger fixed memory management for gdt/tss page
11 */
12#include <linux/config.h>
13
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/security.h>
17
18#include <asm/param.h>
19#include <asm/signal.h>
20
21#include "ia32priv.h"
22#include "elfcore32.h"
23
24/* Override some function names */
25#undef start_thread
26#define start_thread ia32_start_thread
27#define elf_format elf32_format
28#define init_elf_binfmt init_elf32_binfmt
29#define exit_elf_binfmt exit_elf32_binfmt
30
31#undef CLOCKS_PER_SEC
32#define CLOCKS_PER_SEC IA32_CLOCKS_PER_SEC
33
34extern void ia64_elf32_init (struct pt_regs *regs);
35
36static void elf32_set_personality (void);
37
38#define setup_arg_pages(bprm,tos,exec) ia32_setup_arg_pages(bprm,exec)
39#define elf_map elf32_map
40
41#undef SET_PERSONALITY
42#define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
43
44#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
45
46/* Ugly but avoids duplication */
47#include "../../../fs/binfmt_elf.c"
48
49extern struct page *ia32_shared_page[];
50extern unsigned long *ia32_gdt;
51extern struct page *ia32_gate_page;
52
53struct page *
54ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int *type)
55{
56 struct page *pg = ia32_shared_page[smp_processor_id()];
57 get_page(pg);
58 if (type)
59 *type = VM_FAULT_MINOR;
60 return pg;
61}
62
63struct page *
64ia32_install_gate_page (struct vm_area_struct *vma, unsigned long address, int *type)
65{
66 struct page *pg = ia32_gate_page;
67 get_page(pg);
68 if (type)
69 *type = VM_FAULT_MINOR;
70 return pg;
71}
72
73
74static struct vm_operations_struct ia32_shared_page_vm_ops = {
75 .nopage = ia32_install_shared_page
76};
77
78static struct vm_operations_struct ia32_gate_page_vm_ops = {
79 .nopage = ia32_install_gate_page
80};
81
82void
83ia64_elf32_init (struct pt_regs *regs)
84{
85 struct vm_area_struct *vma;
86
87 /*
88 * Map GDT below 4GB, where the processor can find it. We need to map
89 * it with privilege level 3 because the IVE uses non-privileged accesses to these
90 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
91 */
92 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
93 if (vma) {
94 memset(vma, 0, sizeof(*vma));
95 vma->vm_mm = current->mm;
96 vma->vm_start = IA32_GDT_OFFSET;
97 vma->vm_end = vma->vm_start + PAGE_SIZE;
98 vma->vm_page_prot = PAGE_SHARED;
99 vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
100 vma->vm_ops = &ia32_shared_page_vm_ops;
101 down_write(&current->mm->mmap_sem);
102 {
103 if (insert_vm_struct(current->mm, vma)) {
104 kmem_cache_free(vm_area_cachep, vma);
105 up_write(&current->mm->mmap_sem);
106 BUG();
107 }
108 }
109 up_write(&current->mm->mmap_sem);
110 }
111
112 /*
113 * When user stack is not executable, push sigreturn code to stack makes
114 * segmentation fault raised when returning to kernel. So now sigreturn
115 * code is locked in specific gate page, which is pointed by pretcode
116 * when setup_frame_ia32
117 */
118 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
119 if (vma) {
120 memset(vma, 0, sizeof(*vma));
121 vma->vm_mm = current->mm;
122 vma->vm_start = IA32_GATE_OFFSET;
123 vma->vm_end = vma->vm_start + PAGE_SIZE;
124 vma->vm_page_prot = PAGE_COPY_EXEC;
125 vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
126 | VM_MAYEXEC | VM_RESERVED;
127 vma->vm_ops = &ia32_gate_page_vm_ops;
128 down_write(&current->mm->mmap_sem);
129 {
130 if (insert_vm_struct(current->mm, vma)) {
131 kmem_cache_free(vm_area_cachep, vma);
132 up_write(&current->mm->mmap_sem);
133 BUG();
134 }
135 }
136 up_write(&current->mm->mmap_sem);
137 }
138
139 /*
140 * Install LDT as anonymous memory. This gives us all-zero segment descriptors
141 * until a task modifies them via modify_ldt().
142 */
143 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
144 if (vma) {
145 memset(vma, 0, sizeof(*vma));
146 vma->vm_mm = current->mm;
147 vma->vm_start = IA32_LDT_OFFSET;
148 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
149 vma->vm_page_prot = PAGE_SHARED;
150 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
151 down_write(&current->mm->mmap_sem);
152 {
153 if (insert_vm_struct(current->mm, vma)) {
154 kmem_cache_free(vm_area_cachep, vma);
155 up_write(&current->mm->mmap_sem);
156 BUG();
157 }
158 }
159 up_write(&current->mm->mmap_sem);
160 }
161
162 ia64_psr(regs)->ac = 0; /* turn off alignment checking */
163 regs->loadrs = 0;
164 /*
165 * According to the ABI %edx points to an `atexit' handler. Since we don't have
166 * one we'll set it to 0 and initialize all the other registers just to make
167 * things more deterministic, ala the i386 implementation.
168 */
169 regs->r8 = 0; /* %eax */
170 regs->r11 = 0; /* %ebx */
171 regs->r9 = 0; /* %ecx */
172 regs->r10 = 0; /* %edx */
173 regs->r13 = 0; /* %ebp */
174 regs->r14 = 0; /* %esi */
175 regs->r15 = 0; /* %edi */
176
177 current->thread.eflag = IA32_EFLAG;
178 current->thread.fsr = IA32_FSR_DEFAULT;
179 current->thread.fcr = IA32_FCR_DEFAULT;
180 current->thread.fir = 0;
181 current->thread.fdr = 0;
182
183 /*
184 * Setup GDTD. Note: GDTD is the descrambled version of the pseudo-descriptor
185 * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
186 * architecture manual. Also note that the only fields that are not ignored are
187 * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
188 */
189 regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
190 0, 0, 0, 1, 0, 0, 0));
191 /* Setup the segment selectors */
192 regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
193 regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */
194
195 ia32_load_segment_descriptors(current);
196 ia32_load_state(current);
197}
198
199int
200ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
201{
202 unsigned long stack_base;
203 struct vm_area_struct *mpnt;
204 struct mm_struct *mm = current->mm;
205 int i, ret;
206
207 stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
208 mm->arg_start = bprm->p + stack_base;
209
210 bprm->p += stack_base;
211 if (bprm->loader)
212 bprm->loader += stack_base;
213 bprm->exec += stack_base;
214
215 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
216 if (!mpnt)
217 return -ENOMEM;
218
219 if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))
220 >> PAGE_SHIFT)) {
221 kmem_cache_free(vm_area_cachep, mpnt);
222 return -ENOMEM;
223 }
224
225 memset(mpnt, 0, sizeof(*mpnt));
226
227 down_write(&current->mm->mmap_sem);
228 {
229 mpnt->vm_mm = current->mm;
230 mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
231 mpnt->vm_end = IA32_STACK_TOP;
232 if (executable_stack == EXSTACK_ENABLE_X)
233 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
234 else if (executable_stack == EXSTACK_DISABLE_X)
235 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
236 else
237 mpnt->vm_flags = VM_STACK_FLAGS;
238 mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
239 PAGE_COPY_EXEC: PAGE_COPY;
240 if ((ret = insert_vm_struct(current->mm, mpnt))) {
241 up_write(&current->mm->mmap_sem);
242 kmem_cache_free(vm_area_cachep, mpnt);
243 return ret;
244 }
245 current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
246 }
247
248 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
249 struct page *page = bprm->page[i];
250 if (page) {
251 bprm->page[i] = NULL;
252 install_arg_page(mpnt, page, stack_base);
253 }
254 stack_base += PAGE_SIZE;
255 }
256 up_write(&current->mm->mmap_sem);
257
258 /* Can't do it in ia64_elf32_init(). Needs to be done before calls to
259 elf32_map() */
260 current->thread.ppl = ia32_init_pp_list();
261
262 return 0;
263}
264
265static void
266elf32_set_personality (void)
267{
268 set_personality(PER_LINUX32);
269 current->thread.map_base = IA32_PAGE_OFFSET/3;
270 current->thread.task_size = IA32_PAGE_OFFSET; /* use what Linux/x86 uses... */
271 set_fs(USER_DS); /* set addr limit for new TASK_SIZE */
272}
273
274static unsigned long
275elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
276{
277 unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
278
279 return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type,
280 eppnt->p_offset - pgoff);
281}
282
283#define cpu_uses_ia32el() (local_cpu_data->family > 0x1f)
284
285static int __init check_elf32_binfmt(void)
286{
287 if (cpu_uses_ia32el()) {
288 printk("Please use IA-32 EL for executing IA-32 binaries\n");
289 return unregister_binfmt(&elf_format);
290 }
291 return 0;
292}
293
294module_init(check_elf32_binfmt)
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
new file mode 100644
index 000000000000..b73b8b6b10c1
--- /dev/null
+++ b/arch/ia64/ia32/elfcore32.h
@@ -0,0 +1,138 @@
1/*
2 * IA-32 ELF core dump support.
3 *
4 * Copyright (C) 2003 Arun Sharma <arun.sharma@intel.com>
5 *
6 * Derived from the x86_64 version
7 */
8#ifndef _ELFCORE32_H_
9#define _ELFCORE32_H_
10
11#include <asm/intrinsics.h>
12#include <asm/uaccess.h>
13
14#define USE_ELF_CORE_DUMP 1
15
16/* Override elfcore.h */
17#define _LINUX_ELFCORE_H 1
18typedef unsigned int elf_greg_t;
19
20#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
21typedef elf_greg_t elf_gregset_t[ELF_NGREG];
22
23typedef struct ia32_user_i387_struct elf_fpregset_t;
24typedef struct ia32_user_fxsr_struct elf_fpxregset_t;
25
26struct elf_siginfo
27{
28 int si_signo; /* signal number */
29 int si_code; /* extra code */
30 int si_errno; /* errno */
31};
32
33#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
34
35struct elf_prstatus
36{
37 struct elf_siginfo pr_info; /* Info associated with signal */
38 short pr_cursig; /* Current signal */
39 unsigned int pr_sigpend; /* Set of pending signals */
40 unsigned int pr_sighold; /* Set of held signals */
41 pid_t pr_pid;
42 pid_t pr_ppid;
43 pid_t pr_pgrp;
44 pid_t pr_sid;
45 struct compat_timeval pr_utime; /* User time */
46 struct compat_timeval pr_stime; /* System time */
47 struct compat_timeval pr_cutime; /* Cumulative user time */
48 struct compat_timeval pr_cstime; /* Cumulative system time */
49 elf_gregset_t pr_reg; /* GP registers */
50 int pr_fpvalid; /* True if math co-processor being used. */
51};
52
53#define ELF_PRARGSZ (80) /* Number of chars for args */
54
55struct elf_prpsinfo
56{
57 char pr_state; /* numeric process state */
58 char pr_sname; /* char for pr_state */
59 char pr_zomb; /* zombie */
60 char pr_nice; /* nice val */
61 unsigned int pr_flag; /* flags */
62 __u16 pr_uid;
63 __u16 pr_gid;
64 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
65 /* Lots missing */
66 char pr_fname[16]; /* filename of executable */
67 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
68};
69
70#define ELF_CORE_COPY_REGS(pr_reg, regs) \
71 pr_reg[0] = regs->r11; \
72 pr_reg[1] = regs->r9; \
73 pr_reg[2] = regs->r10; \
74 pr_reg[3] = regs->r14; \
75 pr_reg[4] = regs->r15; \
76 pr_reg[5] = regs->r13; \
77 pr_reg[6] = regs->r8; \
78 pr_reg[7] = regs->r16 & 0xffff; \
79 pr_reg[8] = (regs->r16 >> 16) & 0xffff; \
80 pr_reg[9] = (regs->r16 >> 32) & 0xffff; \
81 pr_reg[10] = (regs->r16 >> 48) & 0xffff; \
82 pr_reg[11] = regs->r1; \
83 pr_reg[12] = regs->cr_iip; \
84 pr_reg[13] = regs->r17 & 0xffff; \
85 pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \
86 pr_reg[15] = regs->r12; \
87 pr_reg[16] = (regs->r17 >> 16) & 0xffff;
88
89static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
90 struct pt_regs *regs)
91{
92 ELF_CORE_COPY_REGS((*elfregs), regs)
93}
94
95static inline int elf_core_copy_task_regs(struct task_struct *t,
96 elf_gregset_t* elfregs)
97{
98 struct pt_regs *pp = ia64_task_regs(t);
99 ELF_CORE_COPY_REGS((*elfregs), pp);
100 return 1;
101}
102
103static inline int
104elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu)
105{
106 struct ia32_user_i387_struct *fpstate = (void*)fpu;
107 mm_segment_t old_fs;
108
109 if (!tsk_used_math(tsk))
110 return 0;
111
112 old_fs = get_fs();
113 set_fs(KERNEL_DS);
114 save_ia32_fpstate(tsk, (struct ia32_user_i387_struct __user *) fpstate);
115 set_fs(old_fs);
116
117 return 1;
118}
119
120#define ELF_CORE_COPY_XFPREGS 1
121static inline int
122elf_core_copy_task_xfpregs(struct task_struct *tsk, elf_fpxregset_t *xfpu)
123{
124 struct ia32_user_fxsr_struct *fpxstate = (void*) xfpu;
125 mm_segment_t old_fs;
126
127 if (!tsk_used_math(tsk))
128 return 0;
129
130 old_fs = get_fs();
131 set_fs(KERNEL_DS);
132 save_ia32_fpxstate(tsk, (struct ia32_user_fxsr_struct __user *) fpxstate);
133 set_fs(old_fs);
134
135 return 1;
136}
137
138#endif /* _ELFCORE32_H_ */
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
new file mode 100644
index 000000000000..829a6d80711c
--- /dev/null
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -0,0 +1,500 @@
1#include <asm/asmmacro.h>
2#include <asm/ia32.h>
3#include <asm/offsets.h>
4#include <asm/signal.h>
5#include <asm/thread_info.h>
6
7#include "../kernel/minstate.h"
8
9 /*
10 * execve() is special because in case of success, we need to
11 * setup a null register window frame (in case an IA-32 process
12 * is exec'ing an IA-64 program).
13 */
14ENTRY(ia32_execve)
15 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)
16 alloc loc1=ar.pfs,3,2,4,0
17 mov loc0=rp
18 .body
19 zxt4 out0=in0 // filename
20 ;; // stop bit between alloc and call
21 zxt4 out1=in1 // argv
22 zxt4 out2=in2 // envp
23 add out3=16,sp // regs
24 br.call.sptk.few rp=sys32_execve
251: cmp.ge p6,p0=r8,r0
26 mov ar.pfs=loc1 // restore ar.pfs
27 ;;
28(p6) mov ar.pfs=r0 // clear ar.pfs in case of success
29 sxt4 r8=r8 // return 64-bit result
30 mov rp=loc0
31 br.ret.sptk.few rp
32END(ia32_execve)
33
34ENTRY(ia32_clone)
35 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
36 alloc r16=ar.pfs,5,2,6,0
37 DO_SAVE_SWITCH_STACK
38 mov loc0=rp
39 mov loc1=r16 // save ar.pfs across do_fork
40 .body
41 zxt4 out1=in1 // newsp
42 mov out3=16 // stacksize (compensates for 16-byte scratch area)
43 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
44 mov out0=in0 // out0 = clone_flags
45 zxt4 out4=in2 // out4 = parent_tidptr
46 zxt4 out5=in4 // out5 = child_tidptr
47 br.call.sptk.many rp=do_fork
48.ret0: .restore sp
49 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
50 mov ar.pfs=loc1
51 mov rp=loc0
52 br.ret.sptk.many rp
53END(ia32_clone)
54
55ENTRY(sys32_rt_sigsuspend)
56 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
57 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs
58 mov loc0=rp
59 mov out0=in0 // mask
60 mov out1=in1 // sigsetsize
61 mov out2=sp // out2 = &sigscratch
62 .fframe 16
63 adds sp=-16,sp // allocate dummy "sigscratch"
64 ;;
65 .body
66 br.call.sptk.many rp=ia32_rt_sigsuspend
671: .restore sp
68 adds sp=16,sp
69 mov rp=loc0
70 mov ar.pfs=loc1
71 br.ret.sptk.many rp
72END(sys32_rt_sigsuspend)
73
74ENTRY(sys32_sigsuspend)
75 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
76 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs
77 mov loc0=rp
78 mov out0=in2 // mask (first two args are ignored)
79 ;;
80 mov out1=sp // out1 = &sigscratch
81 .fframe 16
82 adds sp=-16,sp // allocate dummy "sigscratch"
83 .body
84 br.call.sptk.many rp=ia32_sigsuspend
851: .restore sp
86 adds sp=16,sp
87 mov rp=loc0
88 mov ar.pfs=loc1
89 br.ret.sptk.many rp
90END(sys32_sigsuspend)
91
92GLOBAL_ENTRY(ia32_ret_from_clone)
93 PT_REGS_UNWIND_INFO(0)
94{ /*
95 * Some versions of gas generate bad unwind info if the first instruction of a
96 * procedure doesn't go into the first slot of a bundle. This is a workaround.
97 */
98 nop.m 0
99 nop.i 0
100 /*
101 * We need to call schedule_tail() to complete the scheduling process.
102 * Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
103 * address of the previously executing task.
104 */
105 br.call.sptk.many rp=ia64_invoke_schedule_tail
106}
107.ret1:
108 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
109 ;;
110 ld4 r2=[r2]
111 ;;
112 mov r8=0
113 and r2=_TIF_SYSCALL_TRACEAUDIT,r2
114 ;;
115 cmp.ne p6,p0=r2,r0
116(p6) br.cond.spnt .ia32_strace_check_retval
117 ;; // prevent RAW on r8
118END(ia32_ret_from_clone)
119 // fall thrugh
120GLOBAL_ENTRY(ia32_ret_from_syscall)
121 PT_REGS_UNWIND_INFO(0)
122
123 cmp.ge p6,p7=r8,r0 // syscall executed successfully?
124 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
125 ;;
126 alloc r3=ar.pfs,0,0,0,0 // drop the syscall argument frame
127 st8 [r2]=r8 // store return value in slot for r8
128 br.cond.sptk.many ia64_leave_kernel
129END(ia32_ret_from_syscall)
130
131 //
132 // Invoke a system call, but do some tracing before and after the call.
133 // We MUST preserve the current register frame throughout this routine
134 // because some system calls (such as ia64_execve) directly
135 // manipulate ar.pfs.
136 //
137 // Input:
138 // r8 = syscall number
139 // b6 = syscall entry point
140 //
141GLOBAL_ENTRY(ia32_trace_syscall)
142 PT_REGS_UNWIND_INFO(0)
143 mov r3=-38
144 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp
145 ;;
146 st8 [r2]=r3 // initialize return code to -ENOSYS
147 br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args
148.ret2: // Need to reload arguments (they may be changed by the tracing process)
149 adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1
150 adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13
151 mov r15=IA32_NR_syscalls
152 ;;
153 ld4 r8=[r2],IA64_PT_REGS_R9_OFFSET-IA64_PT_REGS_R1_OFFSET
154 movl r16=ia32_syscall_table
155 ;;
156 ld4 r33=[r2],8 // r9 == ecx
157 ld4 r37=[r3],16 // r13 == ebp
158 cmp.ltu.unc p6,p7=r8,r15
159 ;;
160 ld4 r34=[r2],8 // r10 == edx
161 ld4 r36=[r3],8 // r15 == edi
162(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
163 ;;
164 ld8 r16=[r16]
165 ;;
166 ld4 r32=[r2],8 // r11 == ebx
167 mov b6=r16
168 ld4 r35=[r3],8 // r14 == esi
169 br.call.sptk.few rp=b6 // do the syscall
170.ia32_strace_check_retval:
171 cmp.lt p6,p0=r8,r0 // syscall failed?
172 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
173 ;;
174 st8.spill [r2]=r8 // store return value in slot for r8
175 br.call.sptk.few rp=syscall_trace_leave // give parent a chance to catch return value
176.ret4: alloc r2=ar.pfs,0,0,0,0 // drop the syscall argument frame
177 br.cond.sptk.many ia64_leave_kernel
178END(ia32_trace_syscall)
179
180GLOBAL_ENTRY(sys32_vfork)
181 alloc r16=ar.pfs,2,2,4,0;;
182 mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags
183 br.cond.sptk.few .fork1 // do the work
184END(sys32_vfork)
185
186GLOBAL_ENTRY(sys32_fork)
187 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
188 alloc r16=ar.pfs,2,2,4,0
189 mov out0=SIGCHLD // out0 = clone_flags
190 ;;
191.fork1:
192 mov loc0=rp
193 mov loc1=r16 // save ar.pfs across do_fork
194 DO_SAVE_SWITCH_STACK
195
196 .body
197
198 mov out1=0
199 mov out3=0
200 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
201 br.call.sptk.few rp=do_fork
202.ret5: .restore sp
203 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
204 mov ar.pfs=loc1
205 mov rp=loc0
206 br.ret.sptk.many rp
207END(sys32_fork)
208
209 .rodata
210 .align 8
211 .globl ia32_syscall_table
212ia32_syscall_table:
213 data8 sys_ni_syscall /* 0 - old "setup(" system call*/
214 data8 sys_exit
215 data8 sys32_fork
216 data8 sys_read
217 data8 sys_write
218 data8 sys32_open /* 5 */
219 data8 sys_close
220 data8 sys32_waitpid
221 data8 sys_creat
222 data8 sys_link
223 data8 sys_unlink /* 10 */
224 data8 ia32_execve
225 data8 sys_chdir
226 data8 compat_sys_time
227 data8 sys_mknod
228 data8 sys_chmod /* 15 */
229 data8 sys_lchown /* 16-bit version */
230 data8 sys_ni_syscall /* old break syscall holder */
231 data8 sys_ni_syscall
232 data8 sys32_lseek
233 data8 sys_getpid /* 20 */
234 data8 compat_sys_mount
235 data8 sys_oldumount
236 data8 sys_setuid /* 16-bit version */
237 data8 sys_getuid /* 16-bit version */
238 data8 compat_sys_stime /* 25 */
239 data8 sys32_ptrace
240 data8 sys32_alarm
241 data8 sys_ni_syscall
242 data8 sys32_pause
243 data8 compat_sys_utime /* 30 */
244 data8 sys_ni_syscall /* old stty syscall holder */
245 data8 sys_ni_syscall /* old gtty syscall holder */
246 data8 sys_access
247 data8 sys_nice
248 data8 sys_ni_syscall /* 35 */ /* old ftime syscall holder */
249 data8 sys_sync
250 data8 sys_kill
251 data8 sys_rename
252 data8 sys_mkdir
253 data8 sys_rmdir /* 40 */
254 data8 sys_dup
255 data8 sys32_pipe
256 data8 compat_sys_times
257 data8 sys_ni_syscall /* old prof syscall holder */
258 data8 sys32_brk /* 45 */
259 data8 sys_setgid /* 16-bit version */
260 data8 sys_getgid /* 16-bit version */
261 data8 sys32_signal
262 data8 sys_geteuid /* 16-bit version */
263 data8 sys_getegid /* 16-bit version */ /* 50 */
264 data8 sys_acct
265 data8 sys_umount /* recycled never used phys( */
266 data8 sys_ni_syscall /* old lock syscall holder */
267 data8 compat_sys_ioctl
268 data8 compat_sys_fcntl /* 55 */
269 data8 sys_ni_syscall /* old mpx syscall holder */
270 data8 sys_setpgid
271 data8 sys_ni_syscall /* old ulimit syscall holder */
272 data8 sys_ni_syscall
273 data8 sys_umask /* 60 */
274 data8 sys_chroot
275 data8 sys_ustat
276 data8 sys_dup2
277 data8 sys_getppid
278 data8 sys_getpgrp /* 65 */
279 data8 sys_setsid
280 data8 sys32_sigaction
281 data8 sys_ni_syscall
282 data8 sys_ni_syscall
283 data8 sys_setreuid /* 16-bit version */ /* 70 */
284 data8 sys_setregid /* 16-bit version */
285 data8 sys32_sigsuspend
286 data8 compat_sys_sigpending
287 data8 sys_sethostname
288 data8 compat_sys_setrlimit /* 75 */
289 data8 compat_sys_old_getrlimit
290 data8 compat_sys_getrusage
291 data8 sys32_gettimeofday
292 data8 sys32_settimeofday
293 data8 sys32_getgroups16 /* 80 */
294 data8 sys32_setgroups16
295 data8 sys32_old_select
296 data8 sys_symlink
297 data8 sys_ni_syscall
298 data8 sys_readlink /* 85 */
299 data8 sys_uselib
300 data8 sys_swapon
301 data8 sys_reboot
302 data8 sys32_readdir
303 data8 sys32_mmap /* 90 */
304 data8 sys32_munmap
305 data8 sys_truncate
306 data8 sys_ftruncate
307 data8 sys_fchmod
308 data8 sys_fchown /* 16-bit version */ /* 95 */
309 data8 sys_getpriority
310 data8 sys_setpriority
311 data8 sys_ni_syscall /* old profil syscall holder */
312 data8 compat_sys_statfs
313 data8 compat_sys_fstatfs /* 100 */
314 data8 sys_ni_syscall /* ioperm */
315 data8 compat_sys_socketcall
316 data8 sys_syslog
317 data8 compat_sys_setitimer
318 data8 compat_sys_getitimer /* 105 */
319 data8 compat_sys_newstat
320 data8 compat_sys_newlstat
321 data8 compat_sys_newfstat
322 data8 sys_ni_syscall
323 data8 sys_ni_syscall /* iopl */ /* 110 */
324 data8 sys_vhangup
325 data8 sys_ni_syscall /* used to be sys_idle */
326 data8 sys_ni_syscall
327 data8 compat_sys_wait4
328 data8 sys_swapoff /* 115 */
329 data8 sys32_sysinfo
330 data8 sys32_ipc
331 data8 sys_fsync
332 data8 sys32_sigreturn
333 data8 ia32_clone /* 120 */
334 data8 sys_setdomainname
335 data8 sys32_newuname
336 data8 sys32_modify_ldt
337 data8 sys_ni_syscall /* adjtimex */
338 data8 sys32_mprotect /* 125 */
339 data8 compat_sys_sigprocmask
340 data8 sys_ni_syscall /* create_module */
341 data8 sys_ni_syscall /* init_module */
342 data8 sys_ni_syscall /* delete_module */
343 data8 sys_ni_syscall /* get_kernel_syms */ /* 130 */
344 data8 sys_quotactl
345 data8 sys_getpgid
346 data8 sys_fchdir
347 data8 sys_ni_syscall /* sys_bdflush */
348 data8 sys_sysfs /* 135 */
349 data8 sys32_personality
350 data8 sys_ni_syscall /* for afs_syscall */
351 data8 sys_setfsuid /* 16-bit version */
352 data8 sys_setfsgid /* 16-bit version */
353 data8 sys_llseek /* 140 */
354 data8 compat_sys_getdents
355 data8 compat_sys_select
356 data8 sys_flock
357 data8 sys32_msync
358 data8 compat_sys_readv /* 145 */
359 data8 compat_sys_writev
360 data8 sys_getsid
361 data8 sys_fdatasync
362 data8 sys32_sysctl
363 data8 sys_mlock /* 150 */
364 data8 sys_munlock
365 data8 sys_mlockall
366 data8 sys_munlockall
367 data8 sys_sched_setparam
368 data8 sys_sched_getparam /* 155 */
369 data8 sys_sched_setscheduler
370 data8 sys_sched_getscheduler
371 data8 sys_sched_yield
372 data8 sys_sched_get_priority_max
373 data8 sys_sched_get_priority_min /* 160 */
374 data8 sys32_sched_rr_get_interval
375 data8 compat_sys_nanosleep
376 data8 sys32_mremap
377 data8 sys_setresuid /* 16-bit version */
378 data8 sys32_getresuid16 /* 16-bit version */ /* 165 */
379 data8 sys_ni_syscall /* vm86 */
380 data8 sys_ni_syscall /* sys_query_module */
381 data8 sys_poll
382 data8 sys_ni_syscall /* nfsservctl */
383 data8 sys_setresgid /* 170 */
384 data8 sys32_getresgid16
385 data8 sys_prctl
386 data8 sys32_rt_sigreturn
387 data8 sys32_rt_sigaction
388 data8 sys32_rt_sigprocmask /* 175 */
389 data8 sys_rt_sigpending
390 data8 compat_sys_rt_sigtimedwait
391 data8 sys32_rt_sigqueueinfo
392 data8 sys32_rt_sigsuspend
393 data8 sys32_pread /* 180 */
394 data8 sys32_pwrite
395 data8 sys_chown /* 16-bit version */
396 data8 sys_getcwd
397 data8 sys_capget
398 data8 sys_capset /* 185 */
399 data8 sys32_sigaltstack
400 data8 sys32_sendfile
401 data8 sys_ni_syscall /* streams1 */
402 data8 sys_ni_syscall /* streams2 */
403 data8 sys32_vfork /* 190 */
404 data8 compat_sys_getrlimit
405 data8 sys32_mmap2
406 data8 sys32_truncate64
407 data8 sys32_ftruncate64
408 data8 sys32_stat64 /* 195 */
409 data8 sys32_lstat64
410 data8 sys32_fstat64
411 data8 sys_lchown
412 data8 sys_getuid
413 data8 sys_getgid /* 200 */
414 data8 sys_geteuid
415 data8 sys_getegid
416 data8 sys_setreuid
417 data8 sys_setregid
418 data8 sys_getgroups /* 205 */
419 data8 sys_setgroups
420 data8 sys_fchown
421 data8 sys_setresuid
422 data8 sys_getresuid
423 data8 sys_setresgid /* 210 */
424 data8 sys_getresgid
425 data8 sys_chown
426 data8 sys_setuid
427 data8 sys_setgid
428 data8 sys_setfsuid /* 215 */
429 data8 sys_setfsgid
430 data8 sys_pivot_root
431 data8 sys_mincore
432 data8 sys_madvise
433 data8 compat_sys_getdents64 /* 220 */
434 data8 compat_sys_fcntl64
435 data8 sys_ni_syscall /* reserved for TUX */
436 data8 sys_ni_syscall /* reserved for Security */
437 data8 sys_gettid
438 data8 sys_readahead /* 225 */
439 data8 sys_setxattr
440 data8 sys_lsetxattr
441 data8 sys_fsetxattr
442 data8 sys_getxattr
443 data8 sys_lgetxattr /* 230 */
444 data8 sys_fgetxattr
445 data8 sys_listxattr
446 data8 sys_llistxattr
447 data8 sys_flistxattr
448 data8 sys_removexattr /* 235 */
449 data8 sys_lremovexattr
450 data8 sys_fremovexattr
451 data8 sys_tkill
452 data8 sys_sendfile64
453 data8 compat_sys_futex /* 240 */
454 data8 compat_sys_sched_setaffinity
455 data8 compat_sys_sched_getaffinity
456 data8 sys32_set_thread_area
457 data8 sys32_get_thread_area
458 data8 compat_sys_io_setup /* 245 */
459 data8 sys_io_destroy
460 data8 compat_sys_io_getevents
461 data8 compat_sys_io_submit
462 data8 sys_io_cancel
463 data8 sys_fadvise64 /* 250 */
464 data8 sys_ni_syscall
465 data8 sys_exit_group
466 data8 sys_lookup_dcookie
467 data8 sys_epoll_create
468 data8 sys32_epoll_ctl /* 255 */
469 data8 sys32_epoll_wait
470 data8 sys_remap_file_pages
471 data8 sys_set_tid_address
472 data8 sys32_timer_create
473 data8 compat_sys_timer_settime /* 260 */
474 data8 compat_sys_timer_gettime
475 data8 sys_timer_getoverrun
476 data8 sys_timer_delete
477 data8 compat_sys_clock_settime
478 data8 compat_sys_clock_gettime /* 265 */
479 data8 compat_sys_clock_getres
480 data8 compat_sys_clock_nanosleep
481 data8 compat_sys_statfs64
482 data8 compat_sys_fstatfs64
483 data8 sys_tgkill /* 270 */
484 data8 compat_sys_utimes
485 data8 sys32_fadvise64_64
486 data8 sys_ni_syscall
487 data8 sys_ni_syscall
488 data8 sys_ni_syscall /* 275 */
489 data8 sys_ni_syscall
490 data8 compat_sys_mq_open
491 data8 sys_mq_unlink
492 data8 compat_sys_mq_timedsend
493 data8 compat_sys_mq_timedreceive /* 280 */
494 data8 compat_sys_mq_notify
495 data8 compat_sys_mq_getsetattr
496 data8 sys_ni_syscall /* reserved for kexec */
497 data8 compat_sys_waitid
498
499 // guard against failures to increase IA32_NR_syscalls
500 .org ia32_syscall_table + 8*IA32_NR_syscalls
diff --git a/arch/ia64/ia32/ia32_ioctl.c b/arch/ia64/ia32/ia32_ioctl.c
new file mode 100644
index 000000000000..9845dabe2613
--- /dev/null
+++ b/arch/ia64/ia32/ia32_ioctl.c
@@ -0,0 +1,48 @@
1/*
2 * IA32 Architecture-specific ioctl shim code
3 *
4 * Copyright (C) 2000 VA Linux Co
5 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
6 * Copyright (C) 2001-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
9
10#include <linux/signal.h> /* argh, msdos_fs.h isn't self-contained... */
11#include <linux/syscalls.h>
12#include "ia32priv.h"
13
14#define INCLUDES
15#include "compat_ioctl.c"
16#include <asm/ioctl32.h>
17
18#define IOCTL_NR(a) ((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
19
20#define DO_IOCTL(fd, cmd, arg) ({ \
21 int _ret; \
22 mm_segment_t _old_fs = get_fs(); \
23 \
24 set_fs(KERNEL_DS); \
25 _ret = sys_ioctl(fd, cmd, (unsigned long)arg); \
26 set_fs(_old_fs); \
27 _ret; \
28})
29
30#define CODE
31#include "compat_ioctl.c"
32
33typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
34
35#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
36#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl32_handler_t)(handler), NULL },
37#define IOCTL_TABLE_START \
38 struct ioctl_trans ioctl_start[] = {
39#define IOCTL_TABLE_END \
40 };
41
42IOCTL_TABLE_START
43#define DECLARES
44#include "compat_ioctl.c"
45#include <linux/compat_ioctl.h>
46IOCTL_TABLE_END
47
48int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/ia64/ia32/ia32_ldt.c b/arch/ia64/ia32/ia32_ldt.c
new file mode 100644
index 000000000000..a152738c7d0d
--- /dev/null
+++ b/arch/ia64/ia32/ia32_ldt.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright (C) 2001, 2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 *
5 * Adapted from arch/i386/kernel/ldt.c
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/string.h>
11#include <linux/mm.h>
12#include <linux/smp.h>
13#include <linux/smp_lock.h>
14#include <linux/vmalloc.h>
15
16#include <asm/uaccess.h>
17
18#include "ia32priv.h"
19
20/*
21 * read_ldt() is not really atomic - this is not a problem since synchronization of reads
22 * and writes done to the LDT has to be assured by user-space anyway. Writes are atomic,
23 * to protect the security checks done on new descriptors.
24 */
25static int
26read_ldt (void __user *ptr, unsigned long bytecount)
27{
28 unsigned long bytes_left, n;
29 char __user *src, *dst;
30 char buf[256]; /* temporary buffer (don't overflow kernel stack!) */
31
32 if (bytecount > IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE)
33 bytecount = IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE;
34
35 bytes_left = bytecount;
36
37 src = (void __user *) IA32_LDT_OFFSET;
38 dst = ptr;
39
40 while (bytes_left) {
41 n = sizeof(buf);
42 if (n > bytes_left)
43 n = bytes_left;
44
45 /*
46 * We know we're reading valid memory, but we still must guard against
47 * running out of memory.
48 */
49 if (__copy_from_user(buf, src, n))
50 return -EFAULT;
51
52 if (copy_to_user(dst, buf, n))
53 return -EFAULT;
54
55 src += n;
56 dst += n;
57 bytes_left -= n;
58 }
59 return bytecount;
60}
61
62static int
63read_default_ldt (void __user * ptr, unsigned long bytecount)
64{
65 unsigned long size;
66 int err;
67
68 /* XXX fix me: should return equivalent of default_ldt[0] */
69 err = 0;
70 size = 8;
71 if (size > bytecount)
72 size = bytecount;
73
74 err = size;
75 if (clear_user(ptr, size))
76 err = -EFAULT;
77
78 return err;
79}
80
81static int
82write_ldt (void __user * ptr, unsigned long bytecount, int oldmode)
83{
84 struct ia32_user_desc ldt_info;
85 __u64 entry;
86 int ret;
87
88 if (bytecount != sizeof(ldt_info))
89 return -EINVAL;
90 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
91 return -EFAULT;
92
93 if (ldt_info.entry_number >= IA32_LDT_ENTRIES)
94 return -EINVAL;
95 if (ldt_info.contents == 3) {
96 if (oldmode)
97 return -EINVAL;
98 if (ldt_info.seg_not_present == 0)
99 return -EINVAL;
100 }
101
102 if (ldt_info.base_addr == 0 && ldt_info.limit == 0
103 && (oldmode || (ldt_info.contents == 0 && ldt_info.read_exec_only == 1
104 && ldt_info.seg_32bit == 0 && ldt_info.limit_in_pages == 0
105 && ldt_info.seg_not_present == 1 && ldt_info.useable == 0)))
106 /* allow LDTs to be cleared by the user */
107 entry = 0;
108 else
109 /* we must set the "Accessed" bit as IVE doesn't emulate it */
110 entry = IA32_SEG_DESCRIPTOR(ldt_info.base_addr, ldt_info.limit,
111 (((ldt_info.read_exec_only ^ 1) << 1)
112 | (ldt_info.contents << 2)) | 1,
113 1, 3, ldt_info.seg_not_present ^ 1,
114 (oldmode ? 0 : ldt_info.useable),
115 ldt_info.seg_32bit,
116 ldt_info.limit_in_pages);
117 /*
118 * Install the new entry. We know we're accessing valid (mapped) user-level
119 * memory, but we still need to guard against out-of-memory, hence we must use
120 * put_user().
121 */
122 ret = __put_user(entry, (__u64 __user *) IA32_LDT_OFFSET + ldt_info.entry_number);
123 ia32_load_segment_descriptors(current);
124 return ret;
125}
126
127asmlinkage int
128sys32_modify_ldt (int func, unsigned int ptr, unsigned int bytecount)
129{
130 int ret = -ENOSYS;
131
132 switch (func) {
133 case 0:
134 ret = read_ldt(compat_ptr(ptr), bytecount);
135 break;
136 case 1:
137 ret = write_ldt(compat_ptr(ptr), bytecount, 1);
138 break;
139 case 2:
140 ret = read_default_ldt(compat_ptr(ptr), bytecount);
141 break;
142 case 0x11:
143 ret = write_ldt(compat_ptr(ptr), bytecount, 0);
144 break;
145 }
146 return ret;
147}
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
new file mode 100644
index 000000000000..19b02adce68c
--- /dev/null
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -0,0 +1,1036 @@
1/*
2 * IA32 Architecture-specific signal handling support.
3 *
4 * Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
7 * Copyright (C) 2000 VA Linux Co
8 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
9 *
10 * Derived from i386 and Alpha versions.
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/personality.h>
17#include <linux/ptrace.h>
18#include <linux/sched.h>
19#include <linux/signal.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/stddef.h>
23#include <linux/syscalls.h>
24#include <linux/unistd.h>
25#include <linux/wait.h>
26#include <linux/compat.h>
27
28#include <asm/intrinsics.h>
29#include <asm/uaccess.h>
30#include <asm/rse.h>
31#include <asm/sigcontext.h>
32#include <asm/segment.h>
33
34#include "ia32priv.h"
35
36#include "../kernel/sigframe.h"
37
38#define A(__x) ((unsigned long)(__x))
39
40#define DEBUG_SIG 0
41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
42
43#define __IA32_NR_sigreturn 119
44#define __IA32_NR_rt_sigreturn 173
45
46struct sigframe_ia32
47{
48 int pretcode;
49 int sig;
50 struct sigcontext_ia32 sc;
51 struct _fpstate_ia32 fpstate;
52 unsigned int extramask[_COMPAT_NSIG_WORDS-1];
53 char retcode[8];
54};
55
56struct rt_sigframe_ia32
57{
58 int pretcode;
59 int sig;
60 int pinfo;
61 int puc;
62 compat_siginfo_t info;
63 struct ucontext_ia32 uc;
64 struct _fpstate_ia32 fpstate;
65 char retcode[8];
66};
67
68int
69copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
70{
71 unsigned long tmp;
72 int err;
73
74 if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
75 return -EFAULT;
76
77 err = __get_user(to->si_signo, &from->si_signo);
78 err |= __get_user(to->si_errno, &from->si_errno);
79 err |= __get_user(to->si_code, &from->si_code);
80
81 if (to->si_code < 0)
82 err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
83 else {
84 switch (to->si_code >> 16) {
85 case __SI_CHLD >> 16:
86 err |= __get_user(to->si_utime, &from->si_utime);
87 err |= __get_user(to->si_stime, &from->si_stime);
88 err |= __get_user(to->si_status, &from->si_status);
89 default:
90 err |= __get_user(to->si_pid, &from->si_pid);
91 err |= __get_user(to->si_uid, &from->si_uid);
92 break;
93 case __SI_FAULT >> 16:
94 err |= __get_user(tmp, &from->si_addr);
95 to->si_addr = (void __user *) tmp;
96 break;
97 case __SI_POLL >> 16:
98 err |= __get_user(to->si_band, &from->si_band);
99 err |= __get_user(to->si_fd, &from->si_fd);
100 break;
101 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
102 case __SI_MESGQ >> 16:
103 err |= __get_user(to->si_pid, &from->si_pid);
104 err |= __get_user(to->si_uid, &from->si_uid);
105 err |= __get_user(to->si_int, &from->si_int);
106 break;
107 }
108 }
109 return err;
110}
111
112int
113copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
114{
115 unsigned int addr;
116 int err;
117
118 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
119 return -EFAULT;
120
121 /* If you change siginfo_t structure, please be sure
122 this code is fixed accordingly.
123 It should never copy any pad contained in the structure
124 to avoid security leaks, but must copy the generic
125 3 ints plus the relevant union member.
126 This routine must convert siginfo from 64bit to 32bit as well
127 at the same time. */
128 err = __put_user(from->si_signo, &to->si_signo);
129 err |= __put_user(from->si_errno, &to->si_errno);
130 err |= __put_user((short)from->si_code, &to->si_code);
131 if (from->si_code < 0)
132 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
133 else {
134 switch (from->si_code >> 16) {
135 case __SI_CHLD >> 16:
136 err |= __put_user(from->si_utime, &to->si_utime);
137 err |= __put_user(from->si_stime, &to->si_stime);
138 err |= __put_user(from->si_status, &to->si_status);
139 default:
140 err |= __put_user(from->si_pid, &to->si_pid);
141 err |= __put_user(from->si_uid, &to->si_uid);
142 break;
143 case __SI_FAULT >> 16:
144 /* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */
145 err |= __put_user(from->_sifields._pad[0], &to->si_addr);
146 break;
147 case __SI_POLL >> 16:
148 err |= __put_user(from->si_band, &to->si_band);
149 err |= __put_user(from->si_fd, &to->si_fd);
150 break;
151 case __SI_TIMER >> 16:
152 err |= __put_user(from->si_tid, &to->si_tid);
153 err |= __put_user(from->si_overrun, &to->si_overrun);
154 addr = (unsigned long) from->si_ptr;
155 err |= __put_user(addr, &to->si_ptr);
156 break;
157 case __SI_RT >> 16: /* Not generated by the kernel as of now. */
158 case __SI_MESGQ >> 16:
159 err |= __put_user(from->si_uid, &to->si_uid);
160 err |= __put_user(from->si_pid, &to->si_pid);
161 addr = (unsigned long) from->si_ptr;
162 err |= __put_user(addr, &to->si_ptr);
163 break;
164 }
165 }
166 return err;
167}
168
169
170/*
171 * SAVE and RESTORE of ia32 fpstate info, from ia64 current state
172 * Used in exception handler to pass the fpstate to the user, and restore
173 * the fpstate while returning from the exception handler.
174 *
175 * fpstate info and their mapping to IA64 regs:
176 * fpstate REG(BITS) Attribute Comments
177 * cw ar.fcr(0:12) with bits 7 and 6 not used
178 * sw ar.fsr(0:15)
179 * tag ar.fsr(16:31) with odd numbered bits not used
180 * (read returns 0, writes ignored)
181 * ipoff ar.fir(0:31)
182 * cssel ar.fir(32:47)
183 * dataoff ar.fdr(0:31)
184 * datasel ar.fdr(32:47)
185 *
186 * _st[(0+TOS)%8] f8
187 * _st[(1+TOS)%8] f9
188 * _st[(2+TOS)%8] f10
189 * _st[(3+TOS)%8] f11 (f8..f11 from ptregs)
190 * : : : (f12..f15 from live reg)
191 * : : :
192 * _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13)
193 *
194 * status Same as sw RO
195 * magic 0 as X86_FXSR_MAGIC in ia32
196 * mxcsr Bits(7:15)=ar.fcr(39:47)
197 * Bits(0:5) =ar.fsr(32:37) with bit 6 reserved
198 * _xmm[0..7] f16..f31 (live registers)
199 * with _xmm[0]
200 * Bit(64:127)=f17(0:63)
201 * Bit(0:63)=f16(0:63)
202 * All other fields unused...
203 */
204
205static int
206save_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
207{
208 struct task_struct *tsk = current;
209 struct pt_regs *ptp;
210 struct _fpreg_ia32 *fpregp;
211 char buf[32];
212 unsigned long fsr, fcr, fir, fdr;
213 unsigned long new_fsr;
214 unsigned long num128[2];
215 unsigned long mxcsr=0;
216 int fp_tos, fr8_st_map;
217
218 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
219 return -EFAULT;
220
221 /* Read in fsr, fcr, fir, fdr and copy onto fpstate */
222 fsr = ia64_getreg(_IA64_REG_AR_FSR);
223 fcr = ia64_getreg(_IA64_REG_AR_FCR);
224 fir = ia64_getreg(_IA64_REG_AR_FIR);
225 fdr = ia64_getreg(_IA64_REG_AR_FDR);
226
227 /*
228 * We need to clear the exception state before calling the signal handler. Clear
229 * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
230 * instruction.
231 */
232 new_fsr = fsr & ~0x80ff;
233 ia64_setreg(_IA64_REG_AR_FSR, new_fsr);
234
235 __put_user(fcr & 0xffff, &save->cw);
236 __put_user(fsr & 0xffff, &save->sw);
237 __put_user((fsr>>16) & 0xffff, &save->tag);
238 __put_user(fir, &save->ipoff);
239 __put_user((fir>>32) & 0xffff, &save->cssel);
240 __put_user(fdr, &save->dataoff);
241 __put_user((fdr>>32) & 0xffff, &save->datasel);
242 __put_user(fsr & 0xffff, &save->status);
243
244 mxcsr = ((fcr>>32) & 0xff80) | ((fsr>>32) & 0x3f);
245 __put_user(mxcsr & 0xffff, &save->mxcsr);
246 __put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000
247
248 /*
249 * save f8..f11 from pt_regs
250 * save f12..f15 from live register set
251 */
252 /*
253 * Find the location where f8 has to go in fp reg stack. This depends on
254 * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps
255 * to.
256 */
257 fp_tos = (fsr>>11)&0x7;
258 fr8_st_map = (8-fp_tos)&0x7;
259 ptp = ia64_task_regs(tsk);
260 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
261 ia64f2ia32f(fpregp, &ptp->f8);
262 copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
263 ia64f2ia32f(fpregp, &ptp->f9);
264 copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
265 ia64f2ia32f(fpregp, &ptp->f10);
266 copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
267 ia64f2ia32f(fpregp, &ptp->f11);
268 copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
269
270 ia64_stfe(fpregp, 12);
271 copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
272 ia64_stfe(fpregp, 13);
273 copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
274 ia64_stfe(fpregp, 14);
275 copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
276 ia64_stfe(fpregp, 15);
277 copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
278
279 ia64_stf8(&num128[0], 16);
280 ia64_stf8(&num128[1], 17);
281 copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32));
282
283 ia64_stf8(&num128[0], 18);
284 ia64_stf8(&num128[1], 19);
285 copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32));
286
287 ia64_stf8(&num128[0], 20);
288 ia64_stf8(&num128[1], 21);
289 copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32));
290
291 ia64_stf8(&num128[0], 22);
292 ia64_stf8(&num128[1], 23);
293 copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32));
294
295 ia64_stf8(&num128[0], 24);
296 ia64_stf8(&num128[1], 25);
297 copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32));
298
299 ia64_stf8(&num128[0], 26);
300 ia64_stf8(&num128[1], 27);
301 copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32));
302
303 ia64_stf8(&num128[0], 28);
304 ia64_stf8(&num128[1], 29);
305 copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32));
306
307 ia64_stf8(&num128[0], 30);
308 ia64_stf8(&num128[1], 31);
309 copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32));
310 return 0;
311}
312
313static int
314restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
315{
316 struct task_struct *tsk = current;
317 struct pt_regs *ptp;
318 unsigned int lo, hi;
319 unsigned long num128[2];
320 unsigned long num64, mxcsr;
321 struct _fpreg_ia32 *fpregp;
322 char buf[32];
323 unsigned long fsr, fcr, fir, fdr;
324 int fp_tos, fr8_st_map;
325
326 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
327 return(-EFAULT);
328
329 /*
330 * Updating fsr, fcr, fir, fdr.
331 * Just a bit more complicated than save.
332 * - Need to make sure that we don't write any value other than the
333 * specific fpstate info
334 * - Need to make sure that the untouched part of frs, fdr, fir, fcr
335 * should remain same while writing.
336 * So, we do a read, change specific fields and write.
337 */
338 fsr = ia64_getreg(_IA64_REG_AR_FSR);
339 fcr = ia64_getreg(_IA64_REG_AR_FCR);
340 fir = ia64_getreg(_IA64_REG_AR_FIR);
341 fdr = ia64_getreg(_IA64_REG_AR_FDR);
342
343 __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
344 /* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
345 __get_user(lo, (unsigned int __user *)&save->cw);
346 num64 = mxcsr & 0xff10;
347 num64 = (num64 << 32) | (lo & 0x1f3f);
348 fcr = (fcr & (~0xff1000001f3fUL)) | num64;
349
350 /* setting bits 0..31 with sw and tag and 32..37 from mxcsr */
351 __get_user(lo, (unsigned int __user *)&save->sw);
352 /* set bits 15,7 (fsw.b, fsw.es) to reflect the current error status */
353 if ( !(lo & 0x7f) )
354 lo &= (~0x8080);
355 __get_user(hi, (unsigned int __user *)&save->tag);
356 num64 = mxcsr & 0x3f;
357 num64 = (num64 << 16) | (hi & 0xffff);
358 num64 = (num64 << 16) | (lo & 0xffff);
359 fsr = (fsr & (~0x3fffffffffUL)) | num64;
360
361 /* setting bits 0..47 with cssel and ipoff */
362 __get_user(lo, (unsigned int __user *)&save->ipoff);
363 __get_user(hi, (unsigned int __user *)&save->cssel);
364 num64 = hi & 0xffff;
365 num64 = (num64 << 32) | lo;
366 fir = (fir & (~0xffffffffffffUL)) | num64;
367
368 /* setting bits 0..47 with datasel and dataoff */
369 __get_user(lo, (unsigned int __user *)&save->dataoff);
370 __get_user(hi, (unsigned int __user *)&save->datasel);
371 num64 = hi & 0xffff;
372 num64 = (num64 << 32) | lo;
373 fdr = (fdr & (~0xffffffffffffUL)) | num64;
374
375 ia64_setreg(_IA64_REG_AR_FSR, fsr);
376 ia64_setreg(_IA64_REG_AR_FCR, fcr);
377 ia64_setreg(_IA64_REG_AR_FIR, fir);
378 ia64_setreg(_IA64_REG_AR_FDR, fdr);
379
380 /*
381 * restore f8..f11 onto pt_regs
382 * restore f12..f15 onto live registers
383 */
384 /*
385 * Find the location where f8 has to go in fp reg stack. This depends on
386 * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps
387 * to.
388 */
389 fp_tos = (fsr>>11)&0x7;
390 fr8_st_map = (8-fp_tos)&0x7;
391 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
392
393 ptp = ia64_task_regs(tsk);
394 copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
395 ia32f2ia64f(&ptp->f8, fpregp);
396 copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
397 ia32f2ia64f(&ptp->f9, fpregp);
398 copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
399 ia32f2ia64f(&ptp->f10, fpregp);
400 copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
401 ia32f2ia64f(&ptp->f11, fpregp);
402
403 copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
404 ia64_ldfe(12, fpregp);
405 copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
406 ia64_ldfe(13, fpregp);
407 copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
408 ia64_ldfe(14, fpregp);
409 copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
410 ia64_ldfe(15, fpregp);
411
412 copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32));
413 ia64_ldf8(16, &num128[0]);
414 ia64_ldf8(17, &num128[1]);
415
416 copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32));
417 ia64_ldf8(18, &num128[0]);
418 ia64_ldf8(19, &num128[1]);
419
420 copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32));
421 ia64_ldf8(20, &num128[0]);
422 ia64_ldf8(21, &num128[1]);
423
424 copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32));
425 ia64_ldf8(22, &num128[0]);
426 ia64_ldf8(23, &num128[1]);
427
428 copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32));
429 ia64_ldf8(24, &num128[0]);
430 ia64_ldf8(25, &num128[1]);
431
432 copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32));
433 ia64_ldf8(26, &num128[0]);
434 ia64_ldf8(27, &num128[1]);
435
436 copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32));
437 ia64_ldf8(28, &num128[0]);
438 ia64_ldf8(29, &num128[1]);
439
440 copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32));
441 ia64_ldf8(30, &num128[0]);
442 ia64_ldf8(31, &num128[1]);
443 return 0;
444}
445
446static inline void
447sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int restorer)
448{
449 if (handler + 1 <= 2)
450 /* SIG_DFL, SIG_IGN, or SIG_ERR: must sign-extend to 64-bits */
451 sa->sa.sa_handler = (__sighandler_t) A((int) handler);
452 else
453 sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler);
454}
455
456long
457__ia32_rt_sigsuspend (compat_sigset_t *sset, unsigned int sigsetsize, struct sigscratch *scr)
458{
459 extern long ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall);
460 sigset_t oldset, set;
461
462 scr->scratch_unat = 0; /* avoid leaking kernel bits to user level */
463 memset(&set, 0, sizeof(&set));
464
465 if (memcpy(&set.sig, &sset->sig, sigsetsize))
466 return -EFAULT;
467
468 sigdelsetmask(&set, ~_BLOCKABLE);
469
470 spin_lock_irq(&current->sighand->siglock);
471 {
472 oldset = current->blocked;
473 current->blocked = set;
474 recalc_sigpending();
475 }
476 spin_unlock_irq(&current->sighand->siglock);
477
478 /*
479 * The return below usually returns to the signal handler. We need to pre-set the
480 * correct error code here to ensure that the right values get saved in sigcontext
481 * by ia64_do_signal.
482 */
483 scr->pt.r8 = -EINTR;
484 while (1) {
485 current->state = TASK_INTERRUPTIBLE;
486 schedule();
487 if (ia64_do_signal(&oldset, scr, 1))
488 return -EINTR;
489 }
490}
491
492asmlinkage long
493ia32_rt_sigsuspend (compat_sigset_t __user *uset, unsigned int sigsetsize, struct sigscratch *scr)
494{
495 compat_sigset_t set;
496
497 if (sigsetsize > sizeof(compat_sigset_t))
498 return -EINVAL;
499
500 if (copy_from_user(&set.sig, &uset->sig, sigsetsize))
501 return -EFAULT;
502
503 return __ia32_rt_sigsuspend(&set, sigsetsize, scr);
504}
505
506asmlinkage long
507ia32_sigsuspend (unsigned int mask, struct sigscratch *scr)
508{
509 return __ia32_rt_sigsuspend((compat_sigset_t *) &mask, sizeof(mask), scr);
510}
511
512asmlinkage long
513sys32_signal (int sig, unsigned int handler)
514{
515 struct k_sigaction new_sa, old_sa;
516 int ret;
517
518 sigact_set_handler(&new_sa, handler, 0);
519 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
520
521 ret = do_sigaction(sig, &new_sa, &old_sa);
522
523 return ret ? ret : IA32_SA_HANDLER(&old_sa);
524}
525
526asmlinkage long
527sys32_rt_sigaction (int sig, struct sigaction32 __user *act,
528 struct sigaction32 __user *oact, unsigned int sigsetsize)
529{
530 struct k_sigaction new_ka, old_ka;
531 unsigned int handler, restorer;
532 int ret;
533
534 /* XXX: Don't preclude handling different sized sigset_t's. */
535 if (sigsetsize != sizeof(compat_sigset_t))
536 return -EINVAL;
537
538 if (act) {
539 ret = get_user(handler, &act->sa_handler);
540 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
541 ret |= get_user(restorer, &act->sa_restorer);
542 ret |= copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, sizeof(compat_sigset_t));
543 if (ret)
544 return -EFAULT;
545
546 sigact_set_handler(&new_ka, handler, restorer);
547 }
548
549 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
550
551 if (!ret && oact) {
552 ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler);
553 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
554 ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer);
555 ret |= copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask, sizeof(compat_sigset_t));
556 }
557 return ret;
558}
559
560
561asmlinkage long
562sys32_rt_sigprocmask (int how, compat_sigset_t __user *set, compat_sigset_t __user *oset,
563 unsigned int sigsetsize)
564{
565 mm_segment_t old_fs = get_fs();
566 sigset_t s;
567 long ret;
568
569 if (sigsetsize > sizeof(s))
570 return -EINVAL;
571
572 if (set) {
573 memset(&s, 0, sizeof(s));
574 if (copy_from_user(&s.sig, set, sigsetsize))
575 return -EFAULT;
576 }
577 set_fs(KERNEL_DS);
578 ret = sys_rt_sigprocmask(how,
579 set ? (sigset_t __user *) &s : NULL,
580 oset ? (sigset_t __user *) &s : NULL, sizeof(s));
581 set_fs(old_fs);
582 if (ret)
583 return ret;
584 if (oset) {
585 if (copy_to_user(oset, &s.sig, sigsetsize))
586 return -EFAULT;
587 }
588 return 0;
589}
590
591asmlinkage long
592sys32_rt_sigqueueinfo (int pid, int sig, compat_siginfo_t __user *uinfo)
593{
594 mm_segment_t old_fs = get_fs();
595 siginfo_t info;
596 int ret;
597
598 if (copy_siginfo_from_user32(&info, uinfo))
599 return -EFAULT;
600 set_fs(KERNEL_DS);
601 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
602 set_fs(old_fs);
603 return ret;
604}
605
606asmlinkage long
607sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact)
608{
609 struct k_sigaction new_ka, old_ka;
610 unsigned int handler, restorer;
611 int ret;
612
613 if (act) {
614 compat_old_sigset_t mask;
615
616 ret = get_user(handler, &act->sa_handler);
617 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
618 ret |= get_user(restorer, &act->sa_restorer);
619 ret |= get_user(mask, &act->sa_mask);
620 if (ret)
621 return ret;
622
623 sigact_set_handler(&new_ka, handler, restorer);
624 siginitset(&new_ka.sa.sa_mask, mask);
625 }
626
627 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
628
629 if (!ret && oact) {
630 ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler);
631 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
632 ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer);
633 ret |= put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
634 }
635
636 return ret;
637}
638
639static int
640setup_sigcontext_ia32 (struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate,
641 struct pt_regs *regs, unsigned long mask)
642{
643 int err = 0;
644 unsigned long flag;
645
646 if (!access_ok(VERIFY_WRITE, sc, sizeof(*sc)))
647 return -EFAULT;
648
649 err |= __put_user((regs->r16 >> 32) & 0xffff, (unsigned int __user *)&sc->fs);
650 err |= __put_user((regs->r16 >> 48) & 0xffff, (unsigned int __user *)&sc->gs);
651 err |= __put_user((regs->r16 >> 16) & 0xffff, (unsigned int __user *)&sc->es);
652 err |= __put_user(regs->r16 & 0xffff, (unsigned int __user *)&sc->ds);
653 err |= __put_user(regs->r15, &sc->edi);
654 err |= __put_user(regs->r14, &sc->esi);
655 err |= __put_user(regs->r13, &sc->ebp);
656 err |= __put_user(regs->r12, &sc->esp);
657 err |= __put_user(regs->r11, &sc->ebx);
658 err |= __put_user(regs->r10, &sc->edx);
659 err |= __put_user(regs->r9, &sc->ecx);
660 err |= __put_user(regs->r8, &sc->eax);
661#if 0
662 err |= __put_user(current->tss.trap_no, &sc->trapno);
663 err |= __put_user(current->tss.error_code, &sc->err);
664#endif
665 err |= __put_user(regs->cr_iip, &sc->eip);
666 err |= __put_user(regs->r17 & 0xffff, (unsigned int __user *)&sc->cs);
667 /*
668 * `eflags' is in an ar register for this context
669 */
670 flag = ia64_getreg(_IA64_REG_AR_EFLAG);
671 err |= __put_user((unsigned int)flag, &sc->eflags);
672 err |= __put_user(regs->r12, &sc->esp_at_signal);
673 err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int __user *)&sc->ss);
674
675 if ( save_ia32_fpstate_live(fpstate) < 0 )
676 err = -EFAULT;
677 else
678 err |= __put_user((u32)(u64)fpstate, &sc->fpstate);
679
680#if 0
681 tmp = save_i387(fpstate);
682 if (tmp < 0)
683 err = 1;
684 else
685 err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
686
687 /* non-iBCS2 extensions.. */
688#endif
689 err |= __put_user(mask, &sc->oldmask);
690#if 0
691 err |= __put_user(current->tss.cr2, &sc->cr2);
692#endif
693 return err;
694}
695
696static int
697restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 __user *sc, int *peax)
698{
699 unsigned int err = 0;
700
701 /* Always make any pending restarted system calls return -EINTR */
702 current_thread_info()->restart_block.fn = do_no_restart_syscall;
703
704 if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
705 return(-EFAULT);
706
707#define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x)
708
709#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48)
710#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32)
711#define copyseg_cs(tmp) (regs->r17 |= tmp)
712#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16)
713#define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16)
714#define copyseg_ds(tmp) (regs->r16 |= tmp)
715
716#define COPY_SEG(seg) \
717 { \
718 unsigned short tmp; \
719 err |= __get_user(tmp, &sc->seg); \
720 copyseg_##seg(tmp); \
721 }
722#define COPY_SEG_STRICT(seg) \
723 { \
724 unsigned short tmp; \
725 err |= __get_user(tmp, &sc->seg); \
726 copyseg_##seg(tmp|3); \
727 }
728
729 /* To make COPY_SEGs easier, we zero r16, r17 */
730 regs->r16 = 0;
731 regs->r17 = 0;
732
733 COPY_SEG(gs);
734 COPY_SEG(fs);
735 COPY_SEG(es);
736 COPY_SEG(ds);
737 COPY(r15, edi);
738 COPY(r14, esi);
739 COPY(r13, ebp);
740 COPY(r12, esp);
741 COPY(r11, ebx);
742 COPY(r10, edx);
743 COPY(r9, ecx);
744 COPY(cr_iip, eip);
745 COPY_SEG_STRICT(cs);
746 COPY_SEG_STRICT(ss);
747 ia32_load_segment_descriptors(current);
748 {
749 unsigned int tmpflags;
750 unsigned long flag;
751
752 /*
753 * IA32 `eflags' is not part of `pt_regs', it's in an ar register which
754 * is part of the thread context. Fortunately, we are executing in the
755 * IA32 process's context.
756 */
757 err |= __get_user(tmpflags, &sc->eflags);
758 flag = ia64_getreg(_IA64_REG_AR_EFLAG);
759 flag &= ~0x40DD5;
760 flag |= (tmpflags & 0x40DD5);
761 ia64_setreg(_IA64_REG_AR_EFLAG, flag);
762
763 regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
764 }
765
766 {
767 struct _fpstate_ia32 __user *buf = NULL;
768 u32 fpstate_ptr;
769 err |= get_user(fpstate_ptr, &(sc->fpstate));
770 buf = compat_ptr(fpstate_ptr);
771 if (buf) {
772 err |= restore_ia32_fpstate_live(buf);
773 }
774 }
775
776#if 0
777 {
778 struct _fpstate * buf;
779 err |= __get_user(buf, &sc->fpstate);
780 if (buf) {
781 if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
782 goto badframe;
783 err |= restore_i387(buf);
784 }
785 }
786#endif
787
788 err |= __get_user(*peax, &sc->eax);
789 return err;
790
791#if 0
792 badframe:
793 return 1;
794#endif
795}
796
797/*
798 * Determine which stack to use..
799 */
800static inline void __user *
801get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
802{
803 unsigned long esp;
804
805 /* Default to using normal stack (truncate off sign-extension of bit 31: */
806 esp = (unsigned int) regs->r12;
807
808 /* This is the X/Open sanctioned signal stack switching. */
809 if (ka->sa.sa_flags & SA_ONSTACK) {
810 if (!on_sig_stack(esp))
811 esp = current->sas_ss_sp + current->sas_ss_size;
812 }
813 /* Legacy stack switching not supported */
814
815 return (void __user *)((esp - frame_size) & -8ul);
816}
817
818static int
819setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs)
820{
821 struct exec_domain *ed = current_thread_info()->exec_domain;
822 struct sigframe_ia32 __user *frame;
823 int err = 0;
824
825 frame = get_sigframe(ka, regs, sizeof(*frame));
826
827 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
828 goto give_sigsegv;
829
830 err |= __put_user((ed && ed->signal_invmap && sig < 32
831 ? (int)(ed->signal_invmap[sig]) : sig), &frame->sig);
832
833 err |= setup_sigcontext_ia32(&frame->sc, &frame->fpstate, regs, set->sig[0]);
834
835 if (_COMPAT_NSIG_WORDS > 1)
836 err |= __copy_to_user(frame->extramask, (char *) &set->sig + 4,
837 sizeof(frame->extramask));
838
839 /* Set up to return from userspace. If provided, use a stub
840 already in userspace. */
841 if (ka->sa.sa_flags & SA_RESTORER) {
842 unsigned int restorer = IA32_SA_RESTORER(ka);
843 err |= __put_user(restorer, &frame->pretcode);
844 } else {
845 /* Pointing to restorer in ia32 gate page */
846 err |= __put_user(IA32_GATE_OFFSET, &frame->pretcode);
847 }
848
849 /* This is popl %eax ; movl $,%eax ; int $0x80
850 * and there for historical reasons only.
851 * See arch/i386/kernel/signal.c
852 */
853
854 err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
855 err |= __put_user(__IA32_NR_sigreturn, (int __user *)(frame->retcode+2));
856 err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
857
858 if (err)
859 goto give_sigsegv;
860
861 /* Set up registers for signal handler */
862 regs->r12 = (unsigned long) frame;
863 regs->cr_iip = IA32_SA_HANDLER(ka);
864
865 set_fs(USER_DS);
866
867#if 0
868 regs->eflags &= ~TF_MASK;
869#endif
870
871#if 0
872 printk("SIG deliver (%s:%d): sig=%d sp=%p pc=%lx ra=%x\n",
873 current->comm, current->pid, sig, (void *) frame, regs->cr_iip, frame->pretcode);
874#endif
875
876 return 1;
877
878 give_sigsegv:
879 force_sigsegv(sig, current);
880 return 0;
881}
882
883static int
884setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info,
885 sigset_t *set, struct pt_regs * regs)
886{
887 struct exec_domain *ed = current_thread_info()->exec_domain;
888 compat_uptr_t pinfo, puc;
889 struct rt_sigframe_ia32 __user *frame;
890 int err = 0;
891
892 frame = get_sigframe(ka, regs, sizeof(*frame));
893
894 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
895 goto give_sigsegv;
896
897 err |= __put_user((ed && ed->signal_invmap
898 && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig);
899
900 pinfo = (long __user) &frame->info;
901 puc = (long __user) &frame->uc;
902 err |= __put_user(pinfo, &frame->pinfo);
903 err |= __put_user(puc, &frame->puc);
904 err |= copy_siginfo_to_user32(&frame->info, info);
905
906 /* Create the ucontext. */
907 err |= __put_user(0, &frame->uc.uc_flags);
908 err |= __put_user(0, &frame->uc.uc_link);
909 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
910 err |= __put_user(sas_ss_flags(regs->r12), &frame->uc.uc_stack.ss_flags);
911 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
912 err |= setup_sigcontext_ia32(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]);
913 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
914 if (err)
915 goto give_sigsegv;
916
917 /* Set up to return from userspace. If provided, use a stub
918 already in userspace. */
919 if (ka->sa.sa_flags & SA_RESTORER) {
920 unsigned int restorer = IA32_SA_RESTORER(ka);
921 err |= __put_user(restorer, &frame->pretcode);
922 } else {
923 /* Pointing to rt_restorer in ia32 gate page */
924 err |= __put_user(IA32_GATE_OFFSET + 8, &frame->pretcode);
925 }
926
927 /* This is movl $,%eax ; int $0x80
928 * and there for historical reasons only.
929 * See arch/i386/kernel/signal.c
930 */
931
932 err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
933 err |= __put_user(__IA32_NR_rt_sigreturn, (int __user *)(frame->retcode+1));
934 err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
935
936 if (err)
937 goto give_sigsegv;
938
939 /* Set up registers for signal handler */
940 regs->r12 = (unsigned long) frame;
941 regs->cr_iip = IA32_SA_HANDLER(ka);
942
943 set_fs(USER_DS);
944
945#if 0
946 regs->eflags &= ~TF_MASK;
947#endif
948
949#if 0
950 printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n",
951 current->comm, current->pid, (void *) frame, regs->cr_iip, frame->pretcode);
952#endif
953
954 return 1;
955
956give_sigsegv:
957 force_sigsegv(sig, current);
958 return 0;
959}
960
961int
962ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
963 sigset_t *set, struct pt_regs *regs)
964{
965 /* Set up the stack frame */
966 if (ka->sa.sa_flags & SA_SIGINFO)
967 return setup_rt_frame_ia32(sig, ka, info, set, regs);
968 else
969 return setup_frame_ia32(sig, ka, set, regs);
970}
971
972asmlinkage long
973sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5,
974 int arg6, int arg7, struct pt_regs regs)
975{
976 unsigned long esp = (unsigned int) regs.r12;
977 struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8);
978 sigset_t set;
979 int eax;
980
981 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
982 goto badframe;
983
984 if (__get_user(set.sig[0], &frame->sc.oldmask)
985 || (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((char *) &set.sig + 4, &frame->extramask,
986 sizeof(frame->extramask))))
987 goto badframe;
988
989 sigdelsetmask(&set, ~_BLOCKABLE);
990 spin_lock_irq(&current->sighand->siglock);
991 current->blocked = set;
992 recalc_sigpending();
993 spin_unlock_irq(&current->sighand->siglock);
994
995 if (restore_sigcontext_ia32(&regs, &frame->sc, &eax))
996 goto badframe;
997 return eax;
998
999 badframe:
1000 force_sig(SIGSEGV, current);
1001 return 0;
1002}
1003
1004asmlinkage long
1005sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4,
1006 int arg5, int arg6, int arg7, struct pt_regs regs)
1007{
1008 unsigned long esp = (unsigned int) regs.r12;
1009 struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4);
1010 sigset_t set;
1011 int eax;
1012
1013 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
1014 goto badframe;
1015 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
1016 goto badframe;
1017
1018 sigdelsetmask(&set, ~_BLOCKABLE);
1019 spin_lock_irq(&current->sighand->siglock);
1020 current->blocked = set;
1021 recalc_sigpending();
1022 spin_unlock_irq(&current->sighand->siglock);
1023
1024 if (restore_sigcontext_ia32(&regs, &frame->uc.uc_mcontext, &eax))
1025 goto badframe;
1026
1027 /* It is more difficult to avoid calling this function than to
1028 call it and ignore errors. */
1029 do_sigaltstack((stack_t __user *) &frame->uc.uc_stack, NULL, esp);
1030
1031 return eax;
1032
1033 badframe:
1034 force_sig(SIGSEGV, current);
1035 return 0;
1036}
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
new file mode 100644
index 000000000000..4f630043b3ae
--- /dev/null
+++ b/arch/ia64/ia32/ia32_support.c
@@ -0,0 +1,264 @@
1/*
2 * IA32 helper functions
3 *
4 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5 * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2001-2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
10 * 02/19/01 D. Mosberger dropped tssd; it's not needed
11 * 09/14/01 D. Mosberger fixed memory management for gdt/tss page
12 * 09/29/01 D. Mosberger added ia32_load_segment_descriptors()
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/personality.h>
19#include <linux/sched.h>
20
21#include <asm/intrinsics.h>
22#include <asm/page.h>
23#include <asm/pgtable.h>
24#include <asm/system.h>
25#include <asm/processor.h>
26#include <asm/uaccess.h>
27
28#include "ia32priv.h"
29
30extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
31
32struct exec_domain ia32_exec_domain;
33struct page *ia32_shared_page[NR_CPUS];
34unsigned long *ia32_boot_gdt;
35unsigned long *cpu_gdt_table[NR_CPUS];
36struct page *ia32_gate_page;
37
38static unsigned long
39load_desc (u16 selector)
40{
41 unsigned long *table, limit, index;
42
43 if (!selector)
44 return 0;
45 if (selector & IA32_SEGSEL_TI) {
46 table = (unsigned long *) IA32_LDT_OFFSET;
47 limit = IA32_LDT_ENTRIES;
48 } else {
49 table = cpu_gdt_table[smp_processor_id()];
50 limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
51 }
52 index = selector >> IA32_SEGSEL_INDEX_SHIFT;
53 if (index >= limit)
54 return 0;
55 return IA32_SEG_UNSCRAMBLE(table[index]);
56}
57
58void
59ia32_load_segment_descriptors (struct task_struct *task)
60{
61 struct pt_regs *regs = ia64_task_regs(task);
62
63 /* Setup the segment descriptors */
64 regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
65 regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
66 regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
67 regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
68 regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
69 regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
70}
71
72int
73ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
74{
75 struct desc_struct *desc;
76 struct ia32_user_desc info;
77 int idx;
78
79 if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info)))
80 return -EFAULT;
81 if (LDT_empty(&info))
82 return -EINVAL;
83
84 idx = info.entry_number;
85 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
86 return -EINVAL;
87
88 desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
89 desc->a = LDT_entry_a(&info);
90 desc->b = LDT_entry_b(&info);
91
92 /* XXX: can this be done in a cleaner way ? */
93 load_TLS(&child->thread, smp_processor_id());
94 ia32_load_segment_descriptors(child);
95 load_TLS(&current->thread, smp_processor_id());
96
97 return 0;
98}
99
100void
101ia32_save_state (struct task_struct *t)
102{
103 t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
104 t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
105 t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
106 t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
107 t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
108 ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
109 ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
110}
111
112void
113ia32_load_state (struct task_struct *t)
114{
115 unsigned long eflag, fsr, fcr, fir, fdr, tssd;
116 struct pt_regs *regs = ia64_task_regs(t);
117
118 eflag = t->thread.eflag;
119 fsr = t->thread.fsr;
120 fcr = t->thread.fcr;
121 fir = t->thread.fir;
122 fdr = t->thread.fdr;
123 tssd = load_desc(_TSS); /* TSSD */
124
125 ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
126 ia64_setreg(_IA64_REG_AR_FSR, fsr);
127 ia64_setreg(_IA64_REG_AR_FCR, fcr);
128 ia64_setreg(_IA64_REG_AR_FIR, fir);
129 ia64_setreg(_IA64_REG_AR_FDR, fdr);
130 current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
131 current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
132 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
133 ia64_set_kr(IA64_KR_TSSD, tssd);
134
135 regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
136 regs->r30 = load_desc(_LDT); /* LDTD */
137 load_TLS(&t->thread, smp_processor_id());
138}
139
140/*
141 * Setup IA32 GDT and TSS
142 */
143void
144ia32_gdt_init (void)
145{
146 int cpu = smp_processor_id();
147
148 ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
149 if (!ia32_shared_page[cpu])
150 panic("failed to allocate ia32_shared_page[%d]\n", cpu);
151
152 cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
153
154 /* Copy from the boot cpu's GDT */
155 memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
156}
157
158
159/*
160 * Setup IA32 GDT and TSS
161 */
162static void
163ia32_boot_gdt_init (void)
164{
165 unsigned long ldt_size;
166
167 ia32_shared_page[0] = alloc_page(GFP_KERNEL);
168 if (!ia32_shared_page[0])
169 panic("failed to allocate ia32_shared_page[0]\n");
170
171 ia32_boot_gdt = page_address(ia32_shared_page[0]);
172 cpu_gdt_table[0] = ia32_boot_gdt;
173
174 /* CS descriptor in IA-32 (scrambled) format */
175 ia32_boot_gdt[__USER_CS >> 3]
176 = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
177 0xb, 1, 3, 1, 1, 1, 1);
178
179 /* DS descriptor in IA-32 (scrambled) format */
180 ia32_boot_gdt[__USER_DS >> 3]
181 = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
182 0x3, 1, 3, 1, 1, 1, 1);
183
184 ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
185 ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
186 0xb, 0, 3, 1, 1, 1, 0);
187 ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
188 0x2, 0, 3, 1, 1, 1, 0);
189}
190
191static void
192ia32_gate_page_init(void)
193{
194 unsigned long *sr;
195
196 ia32_gate_page = alloc_page(GFP_KERNEL);
197 sr = page_address(ia32_gate_page);
198 /* This is popl %eax ; movl $,%eax ; int $0x80 */
199 *sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48);
200
201 /* This is movl $,%eax ; int $0x80 */
202 *sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40);
203}
204
205void
206ia32_mem_init(void)
207{
208 ia32_boot_gdt_init();
209 ia32_gate_page_init();
210}
211
212/*
213 * Handle bad IA32 interrupt via syscall
214 */
215void
216ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
217{
218 siginfo_t siginfo;
219
220 die_if_kernel("Bad IA-32 interrupt", regs, int_num);
221
222 siginfo.si_signo = SIGTRAP;
223 siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
224 siginfo.si_flags = 0;
225 siginfo.si_isr = 0;
226 siginfo.si_addr = NULL;
227 siginfo.si_imm = 0;
228 siginfo.si_code = TRAP_BRKPT;
229 force_sig_info(SIGTRAP, &siginfo, current);
230}
231
232void
233ia32_cpu_init (void)
234{
235 /* initialize global ia32 state - CR0 and CR4 */
236 ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
237}
238
239static int __init
240ia32_init (void)
241{
242 ia32_exec_domain.name = "Linux/x86";
243 ia32_exec_domain.handler = NULL;
244 ia32_exec_domain.pers_low = PER_LINUX32;
245 ia32_exec_domain.pers_high = PER_LINUX32;
246 ia32_exec_domain.signal_map = default_exec_domain.signal_map;
247 ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
248 register_exec_domain(&ia32_exec_domain);
249
250#if PAGE_SHIFT > IA32_PAGE_SHIFT
251 {
252 extern kmem_cache_t *partial_page_cachep;
253
254 partial_page_cachep = kmem_cache_create("partial_page_cache",
255 sizeof(struct partial_page), 0, 0,
256 NULL, NULL);
257 if (!partial_page_cachep)
258 panic("Cannot create partial page SLAB cache");
259 }
260#endif
261 return 0;
262}
263
264__initcall(ia32_init);
diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c
new file mode 100644
index 000000000000..e486042672f1
--- /dev/null
+++ b/arch/ia64/ia32/ia32_traps.c
@@ -0,0 +1,156 @@
1/*
2 * IA-32 exception handlers
3 *
4 * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
5 * Copyright (C) 2001-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 06/16/00 A. Mallick added siginfo for most cases (close to IA32)
9 * 09/29/00 D. Mosberger added ia32_intercept()
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14
15#include "ia32priv.h"
16
17#include <asm/intrinsics.h>
18#include <asm/ptrace.h>
19
20int
21ia32_intercept (struct pt_regs *regs, unsigned long isr)
22{
23 switch ((isr >> 16) & 0xff) {
24 case 0: /* Instruction intercept fault */
25 case 4: /* Locked Data reference fault */
26 case 1: /* Gate intercept trap */
27 return -1;
28
29 case 2: /* System flag trap */
30 if (((isr >> 14) & 0x3) >= 2) {
31 /* MOV SS, POP SS instructions */
32 ia64_psr(regs)->id = 1;
33 return 0;
34 } else
35 return -1;
36 }
37 return -1;
38}
39
40int
41ia32_exception (struct pt_regs *regs, unsigned long isr)
42{
43 struct siginfo siginfo;
44
45 /* initialize these fields to avoid leaking kernel bits to user space: */
46 siginfo.si_errno = 0;
47 siginfo.si_flags = 0;
48 siginfo.si_isr = 0;
49 siginfo.si_imm = 0;
50 switch ((isr >> 16) & 0xff) {
51 case 1:
52 case 2:
53 siginfo.si_signo = SIGTRAP;
54 if (isr == 0)
55 siginfo.si_code = TRAP_TRACE;
56 else if (isr & 0x4)
57 siginfo.si_code = TRAP_BRANCH;
58 else
59 siginfo.si_code = TRAP_BRKPT;
60 break;
61
62 case 3:
63 siginfo.si_signo = SIGTRAP;
64 siginfo.si_code = TRAP_BRKPT;
65 break;
66
67 case 0: /* Divide fault */
68 siginfo.si_signo = SIGFPE;
69 siginfo.si_code = FPE_INTDIV;
70 break;
71
72 case 4: /* Overflow */
73 case 5: /* Bounds fault */
74 siginfo.si_signo = SIGFPE;
75 siginfo.si_code = 0;
76 break;
77
78 case 6: /* Invalid Op-code */
79 siginfo.si_signo = SIGILL;
80 siginfo.si_code = ILL_ILLOPN;
81 break;
82
83 case 7: /* FP DNA */
84 case 8: /* Double Fault */
85 case 9: /* Invalid TSS */
86 case 11: /* Segment not present */
87 case 12: /* Stack fault */
88 case 13: /* General Protection Fault */
89 siginfo.si_signo = SIGSEGV;
90 siginfo.si_code = 0;
91 break;
92
93 case 16: /* Pending FP error */
94 {
95 unsigned long fsr, fcr;
96
97 fsr = ia64_getreg(_IA64_REG_AR_FSR);
98 fcr = ia64_getreg(_IA64_REG_AR_FCR);
99
100 siginfo.si_signo = SIGFPE;
101 /*
102 * (~cwd & swd) will mask out exceptions that are not set to unmasked
103 * status. 0x3f is the exception bits in these regs, 0x200 is the
104 * C1 reg you need in case of a stack fault, 0x040 is the stack
105 * fault bit. We should only be taking one exception at a time,
106 * so if this combination doesn't produce any single exception,
107 * then we have a bad program that isn't synchronizing its FPU usage
108 * and it will suffer the consequences since we won't be able to
109 * fully reproduce the context of the exception
110 */
111 siginfo.si_isr = isr;
112 siginfo.si_flags = __ISR_VALID;
113 switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) {
114 case 0x000:
115 default:
116 siginfo.si_code = 0;
117 break;
118 case 0x001: /* Invalid Op */
119 case 0x040: /* Stack Fault */
120 case 0x240: /* Stack Fault | Direction */
121 siginfo.si_code = FPE_FLTINV;
122 break;
123 case 0x002: /* Denormalize */
124 case 0x010: /* Underflow */
125 siginfo.si_code = FPE_FLTUND;
126 break;
127 case 0x004: /* Zero Divide */
128 siginfo.si_code = FPE_FLTDIV;
129 break;
130 case 0x008: /* Overflow */
131 siginfo.si_code = FPE_FLTOVF;
132 break;
133 case 0x020: /* Precision */
134 siginfo.si_code = FPE_FLTRES;
135 break;
136 }
137
138 break;
139 }
140
141 case 17: /* Alignment check */
142 siginfo.si_signo = SIGSEGV;
143 siginfo.si_code = BUS_ADRALN;
144 break;
145
146 case 19: /* SSE Numeric error */
147 siginfo.si_signo = SIGFPE;
148 siginfo.si_code = 0;
149 break;
150
151 default:
152 return -1;
153 }
154 force_sig_info(siginfo.si_signo, &siginfo, current);
155 return 0;
156}
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
new file mode 100644
index 000000000000..b2de948bdaea
--- /dev/null
+++ b/arch/ia64/ia32/ia32priv.h
@@ -0,0 +1,544 @@
1#ifndef _ASM_IA64_IA32_PRIV_H
2#define _ASM_IA64_IA32_PRIV_H
3
4#include <linux/config.h>
5
6#include <asm/ia32.h>
7
8#ifdef CONFIG_IA32_SUPPORT
9
10#include <linux/binfmts.h>
11#include <linux/compat.h>
12#include <linux/rbtree.h>
13
14#include <asm/processor.h>
15
16/*
17 * 32 bit structures for IA32 support.
18 */
19
20#define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT)
21#define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1))
22#define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK)
23#define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */
24
25/*
26 * partially mapped pages provide precise accounting of which 4k sub pages
27 * are mapped and which ones are not, thereby improving IA-32 compatibility.
28 */
29struct partial_page {
30 struct partial_page *next; /* linked list, sorted by address */
31 struct rb_node pp_rb;
32 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*32
33 * should suffice.*/
34 unsigned int bitmap;
35 unsigned int base;
36};
37
38struct partial_page_list {
39 struct partial_page *pp_head; /* list head, points to the lowest
40 * addressed partial page */
41 struct rb_root ppl_rb;
42 struct partial_page *pp_hint; /* pp_hint->next is the last
43 * accessed partial page */
44 atomic_t pp_count; /* reference count */
45};
46
47#if PAGE_SHIFT > IA32_PAGE_SHIFT
48struct partial_page_list* ia32_init_pp_list (void);
49#else
50# define ia32_init_pp_list() 0
51#endif
52
53/* sigcontext.h */
54/*
55 * As documented in the iBCS2 standard..
56 *
57 * The first part of "struct _fpstate" is just the
58 * normal i387 hardware setup, the extra "status"
59 * word is used to save the coprocessor status word
60 * before entering the handler.
61 */
62struct _fpreg_ia32 {
63 unsigned short significand[4];
64 unsigned short exponent;
65};
66
67struct _fpxreg_ia32 {
68 unsigned short significand[4];
69 unsigned short exponent;
70 unsigned short padding[3];
71};
72
73struct _xmmreg_ia32 {
74 unsigned int element[4];
75};
76
77
78struct _fpstate_ia32 {
79 unsigned int cw,
80 sw,
81 tag,
82 ipoff,
83 cssel,
84 dataoff,
85 datasel;
86 struct _fpreg_ia32 _st[8];
87 unsigned short status;
88 unsigned short magic; /* 0xffff = regular FPU data only */
89
90 /* FXSR FPU environment */
91 unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */
92 unsigned int mxcsr;
93 unsigned int reserved;
94 struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */
95 struct _xmmreg_ia32 _xmm[8];
96 unsigned int padding[56];
97};
98
99struct sigcontext_ia32 {
100 unsigned short gs, __gsh;
101 unsigned short fs, __fsh;
102 unsigned short es, __esh;
103 unsigned short ds, __dsh;
104 unsigned int edi;
105 unsigned int esi;
106 unsigned int ebp;
107 unsigned int esp;
108 unsigned int ebx;
109 unsigned int edx;
110 unsigned int ecx;
111 unsigned int eax;
112 unsigned int trapno;
113 unsigned int err;
114 unsigned int eip;
115 unsigned short cs, __csh;
116 unsigned int eflags;
117 unsigned int esp_at_signal;
118 unsigned short ss, __ssh;
119 unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
120 unsigned int oldmask;
121 unsigned int cr2;
122};
123
124/* user.h */
125/*
126 * IA32 (Pentium III/4) FXSR, SSE support
127 *
128 * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
129 * interacting with the FXSR-format floating point environment. Floating
130 * point data can be accessed in the regular format in the usual manner,
131 * and both the standard and SIMD floating point data can be accessed via
132 * the new ptrace requests. In either case, changes to the FPU environment
133 * will be reflected in the task's state as expected.
134 */
135struct ia32_user_i387_struct {
136 int cwd;
137 int swd;
138 int twd;
139 int fip;
140 int fcs;
141 int foo;
142 int fos;
143 /* 8*10 bytes for each FP-reg = 80 bytes */
144 struct _fpreg_ia32 st_space[8];
145};
146
147struct ia32_user_fxsr_struct {
148 unsigned short cwd;
149 unsigned short swd;
150 unsigned short twd;
151 unsigned short fop;
152 int fip;
153 int fcs;
154 int foo;
155 int fos;
156 int mxcsr;
157 int reserved;
158 int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
159 int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
160 int padding[56];
161};
162
163/* signal.h */
164#define IA32_SET_SA_HANDLER(ka,handler,restorer) \
165 ((ka)->sa.sa_handler = (__sighandler_t) \
166 (((unsigned long)(restorer) << 32) \
167 | ((handler) & 0xffffffff)))
168#define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff)
169#define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32)
170
171#define __IA32_NR_sigreturn 119
172#define __IA32_NR_rt_sigreturn 173
173
174struct sigaction32 {
175 unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */
176 unsigned int sa_flags;
177 unsigned int sa_restorer; /* Another 32 bit pointer */
178 compat_sigset_t sa_mask; /* A 32 bit mask */
179};
180
181struct old_sigaction32 {
182 unsigned int sa_handler; /* Really a pointer, but need to deal
183 with 32 bits */
184 compat_old_sigset_t sa_mask; /* A 32 bit mask */
185 unsigned int sa_flags;
186 unsigned int sa_restorer; /* Another 32 bit pointer */
187};
188
189typedef struct sigaltstack_ia32 {
190 unsigned int ss_sp;
191 int ss_flags;
192 unsigned int ss_size;
193} stack_ia32_t;
194
195struct ucontext_ia32 {
196 unsigned int uc_flags;
197 unsigned int uc_link;
198 stack_ia32_t uc_stack;
199 struct sigcontext_ia32 uc_mcontext;
200 sigset_t uc_sigmask; /* mask last for extensibility */
201};
202
203struct stat64 {
204 unsigned long long st_dev;
205 unsigned char __pad0[4];
206 unsigned int __st_ino;
207 unsigned int st_mode;
208 unsigned int st_nlink;
209 unsigned int st_uid;
210 unsigned int st_gid;
211 unsigned long long st_rdev;
212 unsigned char __pad3[4];
213 unsigned int st_size_lo;
214 unsigned int st_size_hi;
215 unsigned int st_blksize;
216 unsigned int st_blocks; /* Number 512-byte blocks allocated. */
217 unsigned int __pad4; /* future possible st_blocks high bits */
218 unsigned int st_atime;
219 unsigned int st_atime_nsec;
220 unsigned int st_mtime;
221 unsigned int st_mtime_nsec;
222 unsigned int st_ctime;
223 unsigned int st_ctime_nsec;
224 unsigned int st_ino_lo;
225 unsigned int st_ino_hi;
226};
227
228typedef struct compat_siginfo {
229 int si_signo;
230 int si_errno;
231 int si_code;
232
233 union {
234 int _pad[((128/sizeof(int)) - 3)];
235
236 /* kill() */
237 struct {
238 unsigned int _pid; /* sender's pid */
239 unsigned int _uid; /* sender's uid */
240 } _kill;
241
242 /* POSIX.1b timers */
243 struct {
244 timer_t _tid; /* timer id */
245 int _overrun; /* overrun count */
246 char _pad[sizeof(unsigned int) - sizeof(int)];
247 compat_sigval_t _sigval; /* same as below */
248 int _sys_private; /* not to be passed to user */
249 } _timer;
250
251 /* POSIX.1b signals */
252 struct {
253 unsigned int _pid; /* sender's pid */
254 unsigned int _uid; /* sender's uid */
255 compat_sigval_t _sigval;
256 } _rt;
257
258 /* SIGCHLD */
259 struct {
260 unsigned int _pid; /* which child */
261 unsigned int _uid; /* sender's uid */
262 int _status; /* exit code */
263 compat_clock_t _utime;
264 compat_clock_t _stime;
265 } _sigchld;
266
267 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
268 struct {
269 unsigned int _addr; /* faulting insn/memory ref. */
270 } _sigfault;
271
272 /* SIGPOLL */
273 struct {
274 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
275 int _fd;
276 } _sigpoll;
277 } _sifields;
278} compat_siginfo_t;
279
280struct old_linux32_dirent {
281 u32 d_ino;
282 u32 d_offset;
283 u16 d_namlen;
284 char d_name[1];
285};
286
287/*
288 * IA-32 ELF specific definitions for IA-64.
289 */
290
291#define _ASM_IA64_ELF_H /* Don't include elf.h */
292
293#include <linux/sched.h>
294#include <asm/processor.h>
295
296/*
297 * This is used to ensure we don't load something for the wrong architecture.
298 */
299#define elf_check_arch(x) ((x)->e_machine == EM_386)
300
301/*
302 * These are used to set parameters in the core dumps.
303 */
304#define ELF_CLASS ELFCLASS32
305#define ELF_DATA ELFDATA2LSB
306#define ELF_ARCH EM_386
307
308#define IA32_PAGE_OFFSET 0xc0000000
309#define IA32_STACK_TOP IA32_PAGE_OFFSET
310#define IA32_GATE_OFFSET IA32_PAGE_OFFSET
311#define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
312
313/*
314 * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
315 * access them.
316 */
317#define IA32_GDT_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE)
318#define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE)
319#define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 3*PAGE_SIZE)
320
321#define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE
322
323/*
324 * This is the location that an ET_DYN program is loaded if exec'ed.
325 * Typical use of this is to invoke "./ld.so someprog" to test out a
326 * new version of the loader. We need to make sure that it is out of
327 * the way of the program that it will "exec", and that there is
328 * sufficient room for the brk.
329 */
330#define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000)
331
332void ia64_elf32_init(struct pt_regs *regs);
333#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
334
335#define elf_addr_t u32
336
337/* This macro yields a bitmask that programs can use to figure out
338 what instruction set this CPU supports. */
339#define ELF_HWCAP 0
340
341/* This macro yields a string that ld.so will use to load
342 implementation specific libraries for optimization. Not terribly
343 relevant until we have real hardware to play with... */
344#define ELF_PLATFORM NULL
345
346#ifdef __KERNEL__
347# define SET_PERSONALITY(EX,IBCS2) \
348 (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX)
349#endif
350
351#define IA32_EFLAG 0x200
352
353/*
354 * IA-32 ELF specific definitions for IA-64.
355 */
356
357#define __USER_CS 0x23
358#define __USER_DS 0x2B
359
360/*
361 * The per-cpu GDT has 32 entries: see <asm-i386/segment.h>
362 */
363#define GDT_ENTRIES 32
364
365#define GDT_SIZE (GDT_ENTRIES * 8)
366
367#define TSS_ENTRY 14
368#define LDT_ENTRY (TSS_ENTRY + 1)
369
370#define IA32_SEGSEL_RPL (0x3 << 0)
371#define IA32_SEGSEL_TI (0x1 << 2)
372#define IA32_SEGSEL_INDEX_SHIFT 3
373
374#define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
375#define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
376
377#define IA32_SEG_BASE 16
378#define IA32_SEG_TYPE 40
379#define IA32_SEG_SYS 44
380#define IA32_SEG_DPL 45
381#define IA32_SEG_P 47
382#define IA32_SEG_HIGH_LIMIT 48
383#define IA32_SEG_AVL 52
384#define IA32_SEG_DB 54
385#define IA32_SEG_G 55
386#define IA32_SEG_HIGH_BASE 56
387
388#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \
389 (((limit) & 0xffff) \
390 | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \
391 | ((unsigned long) (segtype) << IA32_SEG_TYPE) \
392 | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \
393 | ((unsigned long) (dpl) << IA32_SEG_DPL) \
394 | ((unsigned long) (segpresent) << IA32_SEG_P) \
395 | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \
396 | ((unsigned long) (avl) << IA32_SEG_AVL) \
397 | ((unsigned long) (segdb) << IA32_SEG_DB) \
398 | ((unsigned long) (gran) << IA32_SEG_G) \
399 | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE))
400
401#define SEG_LIM 32
402#define SEG_TYPE 52
403#define SEG_SYS 56
404#define SEG_DPL 57
405#define SEG_P 59
406#define SEG_AVL 60
407#define SEG_DB 62
408#define SEG_G 63
409
410/* Unscramble an IA-32 segment descriptor into the IA-64 format. */
411#define IA32_SEG_UNSCRAMBLE(sd) \
412 ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \
413 | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \
414 | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \
415 | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \
416 | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \
417 | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \
418 | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \
419 | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \
420 | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G))
421
422#define IA32_IOBASE 0x2000000000000000UL /* Virtual address for I/O space */
423
424#define IA32_CR0 0x80000001 /* Enable PG and PE bits */
425#define IA32_CR4 0x600 /* MMXEX and FXSR on */
426
427/*
428 * IA32 floating point control registers starting values
429 */
430
431#define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */
432#define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */
433
434#define IA32_PTRACE_GETREGS 12
435#define IA32_PTRACE_SETREGS 13
436#define IA32_PTRACE_GETFPREGS 14
437#define IA32_PTRACE_SETFPREGS 15
438#define IA32_PTRACE_GETFPXREGS 18
439#define IA32_PTRACE_SETFPXREGS 19
440
441#define ia32_start_thread(regs,new_ip,new_sp) do { \
442 set_fs(USER_DS); \
443 ia64_psr(regs)->cpl = 3; /* set user mode */ \
444 ia64_psr(regs)->ri = 0; /* clear return slot number */ \
445 ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \
446 regs->cr_iip = new_ip; \
447 regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \
448 regs->ar_rnat = 0; \
449 regs->loadrs = 0; \
450 regs->r12 = new_sp; \
451} while (0)
452
453/*
454 * Local Descriptor Table (LDT) related declarations.
455 */
456
457#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */
458#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */
459
460#define LDT_entry_a(info) \
461 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
462
463#define LDT_entry_b(info) \
464 (((info)->base_addr & 0xff000000) | \
465 (((info)->base_addr & 0x00ff0000) >> 16) | \
466 ((info)->limit & 0xf0000) | \
467 (((info)->read_exec_only ^ 1) << 9) | \
468 ((info)->contents << 10) | \
469 (((info)->seg_not_present ^ 1) << 15) | \
470 ((info)->seg_32bit << 22) | \
471 ((info)->limit_in_pages << 23) | \
472 ((info)->useable << 20) | \
473 0x7100)
474
475#define LDT_empty(info) ( \
476 (info)->base_addr == 0 && \
477 (info)->limit == 0 && \
478 (info)->contents == 0 && \
479 (info)->read_exec_only == 1 && \
480 (info)->seg_32bit == 0 && \
481 (info)->limit_in_pages == 0 && \
482 (info)->seg_not_present == 1 && \
483 (info)->useable == 0 )
484
485static inline void
486load_TLS (struct thread_struct *t, unsigned int cpu)
487{
488 extern unsigned long *cpu_gdt_table[NR_CPUS];
489
490 memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long));
491 memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long));
492 memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long));
493}
494
495struct ia32_user_desc {
496 unsigned int entry_number;
497 unsigned int base_addr;
498 unsigned int limit;
499 unsigned int seg_32bit:1;
500 unsigned int contents:2;
501 unsigned int read_exec_only:1;
502 unsigned int limit_in_pages:1;
503 unsigned int seg_not_present:1;
504 unsigned int useable:1;
505};
506
507struct linux_binprm;
508
509extern void ia32_init_addr_space (struct pt_regs *regs);
510extern int ia32_setup_arg_pages (struct linux_binprm *bprm, int exec_stack);
511extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
512extern void ia32_load_segment_descriptors (struct task_struct *task);
513
514#define ia32f2ia64f(dst,src) \
515do { \
516 ia64_ldfe(6,src); \
517 ia64_stop(); \
518 ia64_stf_spill(dst, 6); \
519} while(0)
520
521#define ia64f2ia32f(dst,src) \
522do { \
523 ia64_ldf_fill(6, src); \
524 ia64_stop(); \
525 ia64_stfe(dst, 6); \
526} while(0)
527
528struct user_regs_struct32 {
529 __u32 ebx, ecx, edx, esi, edi, ebp, eax;
530 unsigned short ds, __ds, es, __es;
531 unsigned short fs, __fs, gs, __gs;
532 __u32 orig_eax, eip;
533 unsigned short cs, __cs;
534 __u32 eflags, esp;
535 unsigned short ss, __ss;
536};
537
538/* Prototypes for use in elfcore32.h */
539extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *);
540extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *);
541
542#endif /* !CONFIG_IA32_SUPPORT */
543
544#endif /* _ASM_IA64_IA32_PRIV_H */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
new file mode 100644
index 000000000000..247a21c64aea
--- /dev/null
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -0,0 +1,2747 @@
1/*
2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
3 *
4 * Copyright (C) 2000 VA Linux Co
5 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
6 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
12 *
13 * These routines maintain argument size conversion between 32bit and 64bit
14 * environment.
15 */
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/syscalls.h>
20#include <linux/sysctl.h>
21#include <linux/sched.h>
22#include <linux/fs.h>
23#include <linux/file.h>
24#include <linux/signal.h>
25#include <linux/resource.h>
26#include <linux/times.h>
27#include <linux/utsname.h>
28#include <linux/timex.h>
29#include <linux/smp.h>
30#include <linux/smp_lock.h>
31#include <linux/sem.h>
32#include <linux/msg.h>
33#include <linux/mm.h>
34#include <linux/shm.h>
35#include <linux/slab.h>
36#include <linux/uio.h>
37#include <linux/nfs_fs.h>
38#include <linux/quota.h>
39#include <linux/sunrpc/svc.h>
40#include <linux/nfsd/nfsd.h>
41#include <linux/nfsd/cache.h>
42#include <linux/nfsd/xdr.h>
43#include <linux/nfsd/syscall.h>
44#include <linux/poll.h>
45#include <linux/eventpoll.h>
46#include <linux/personality.h>
47#include <linux/ptrace.h>
48#include <linux/stat.h>
49#include <linux/ipc.h>
50#include <linux/compat.h>
51#include <linux/vfs.h>
52#include <linux/mman.h>
53
54#include <asm/intrinsics.h>
55#include <asm/semaphore.h>
56#include <asm/types.h>
57#include <asm/uaccess.h>
58#include <asm/unistd.h>
59
60#include "ia32priv.h"
61
62#include <net/scm.h>
63#include <net/sock.h>
64
65#define DEBUG 0
66
67#if DEBUG
68# define DBG(fmt...) printk(KERN_DEBUG fmt)
69#else
70# define DBG(fmt...)
71#endif
72
73#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
74
75#define OFFSET4K(a) ((a) & 0xfff)
76#define PAGE_START(addr) ((addr) & PAGE_MASK)
77#define MINSIGSTKSZ_IA32 2048
78
79#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
80#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
81
82/*
83 * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
84 * while doing so.
85 */
86/* XXX make per-mm: */
87static DECLARE_MUTEX(ia32_mmap_sem);
88
89asmlinkage long
90sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
91 struct pt_regs *regs)
92{
93 long error;
94 char *filename;
95 unsigned long old_map_base, old_task_size, tssd;
96
97 filename = getname(name);
98 error = PTR_ERR(filename);
99 if (IS_ERR(filename))
100 return error;
101
102 old_map_base = current->thread.map_base;
103 old_task_size = current->thread.task_size;
104 tssd = ia64_get_kr(IA64_KR_TSSD);
105
106 /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
107 current->thread.map_base = DEFAULT_MAP_BASE;
108 current->thread.task_size = DEFAULT_TASK_SIZE;
109 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
110 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
111
112 error = compat_do_execve(filename, argv, envp, regs);
113 putname(filename);
114
115 if (error < 0) {
116 /* oops, execve failed, switch back to old values... */
117 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
118 ia64_set_kr(IA64_KR_TSSD, tssd);
119 current->thread.map_base = old_map_base;
120 current->thread.task_size = old_task_size;
121 }
122
123 return error;
124}
125
126int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
127{
128 int err;
129
130 if ((u64) stat->size > MAX_NON_LFS ||
131 !old_valid_dev(stat->dev) ||
132 !old_valid_dev(stat->rdev))
133 return -EOVERFLOW;
134
135 if (clear_user(ubuf, sizeof(*ubuf)))
136 return -EFAULT;
137
138 err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
139 err |= __put_user(stat->ino, &ubuf->st_ino);
140 err |= __put_user(stat->mode, &ubuf->st_mode);
141 err |= __put_user(stat->nlink, &ubuf->st_nlink);
142 err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
143 err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
144 err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
145 err |= __put_user(stat->size, &ubuf->st_size);
146 err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
147 err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
148 err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
149 err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
150 err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
151 err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
152 err |= __put_user(stat->blksize, &ubuf->st_blksize);
153 err |= __put_user(stat->blocks, &ubuf->st_blocks);
154 return err;
155}
156
157#if PAGE_SHIFT > IA32_PAGE_SHIFT
158
159
160static int
161get_page_prot (struct vm_area_struct *vma, unsigned long addr)
162{
163 int prot = 0;
164
165 if (!vma || vma->vm_start > addr)
166 return 0;
167
168 if (vma->vm_flags & VM_READ)
169 prot |= PROT_READ;
170 if (vma->vm_flags & VM_WRITE)
171 prot |= PROT_WRITE;
172 if (vma->vm_flags & VM_EXEC)
173 prot |= PROT_EXEC;
174 return prot;
175}
176
177/*
178 * Map a subpage by creating an anonymous page that contains the union of the old page and
179 * the subpage.
180 */
181static unsigned long
182mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
183 loff_t off)
184{
185 void *page = NULL;
186 struct inode *inode;
187 unsigned long ret = 0;
188 struct vm_area_struct *vma = find_vma(current->mm, start);
189 int old_prot = get_page_prot(vma, start);
190
191 DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
192 file, start, end, prot, flags, off);
193
194
195 /* Optimize the case where the old mmap and the new mmap are both anonymous */
196 if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
197 if (clear_user((void __user *) start, end - start)) {
198 ret = -EFAULT;
199 goto out;
200 }
201 goto skip_mmap;
202 }
203
204 page = (void *) get_zeroed_page(GFP_KERNEL);
205 if (!page)
206 return -ENOMEM;
207
208 if (old_prot)
209 copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
210
211 down_write(&current->mm->mmap_sem);
212 {
213 ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
214 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
215 }
216 up_write(&current->mm->mmap_sem);
217
218 if (IS_ERR((void *) ret))
219 goto out;
220
221 if (old_prot) {
222 /* copy back the old page contents. */
223 if (offset_in_page(start))
224 copy_to_user((void __user *) PAGE_START(start), page,
225 offset_in_page(start));
226 if (offset_in_page(end))
227 copy_to_user((void __user *) end, page + offset_in_page(end),
228 PAGE_SIZE - offset_in_page(end));
229 }
230
231 if (!(flags & MAP_ANONYMOUS)) {
232 /* read the file contents */
233 inode = file->f_dentry->d_inode;
234 if (!inode->i_fop || !file->f_op->read
235 || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
236 {
237 ret = -EINVAL;
238 goto out;
239 }
240 }
241
242 skip_mmap:
243 if (!(prot & PROT_WRITE))
244 ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
245 out:
246 if (page)
247 free_page((unsigned long) page);
248 return ret;
249}
250
251/* SLAB cache for partial_page structures */
252kmem_cache_t *partial_page_cachep;
253
254/*
255 * init partial_page_list.
256 * return 0 means kmalloc fail.
257 */
258struct partial_page_list*
259ia32_init_pp_list(void)
260{
261 struct partial_page_list *p;
262
263 if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
264 return p;
265 p->pp_head = NULL;
266 p->ppl_rb = RB_ROOT;
267 p->pp_hint = NULL;
268 atomic_set(&p->pp_count, 1);
269 return p;
270}
271
272/*
273 * Search for the partial page with @start in partial page list @ppl.
274 * If finds the partial page, return the found partial page.
275 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
276 * be used by later __ia32_insert_pp().
277 */
278static struct partial_page *
279__ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
280 struct partial_page **pprev, struct rb_node ***rb_link,
281 struct rb_node **rb_parent)
282{
283 struct partial_page *pp;
284 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
285
286 pp = ppl->pp_hint;
287 if (pp && pp->base == start)
288 return pp;
289
290 __rb_link = &ppl->ppl_rb.rb_node;
291 rb_prev = __rb_parent = NULL;
292
293 while (*__rb_link) {
294 __rb_parent = *__rb_link;
295 pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
296
297 if (pp->base == start) {
298 ppl->pp_hint = pp;
299 return pp;
300 } else if (pp->base < start) {
301 rb_prev = __rb_parent;
302 __rb_link = &__rb_parent->rb_right;
303 } else {
304 __rb_link = &__rb_parent->rb_left;
305 }
306 }
307
308 *rb_link = __rb_link;
309 *rb_parent = __rb_parent;
310 *pprev = NULL;
311 if (rb_prev)
312 *pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
313 return NULL;
314}
315
316/*
317 * insert @pp into @ppl.
318 */
319static void
320__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
321 struct partial_page *prev, struct rb_node **rb_link,
322 struct rb_node *rb_parent)
323{
324 /* link list */
325 if (prev) {
326 pp->next = prev->next;
327 prev->next = pp;
328 } else {
329 ppl->pp_head = pp;
330 if (rb_parent)
331 pp->next = rb_entry(rb_parent,
332 struct partial_page, pp_rb);
333 else
334 pp->next = NULL;
335 }
336
337 /* link rb */
338 rb_link_node(&pp->pp_rb, rb_parent, rb_link);
339 rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
340
341 ppl->pp_hint = pp;
342}
343
344/*
345 * delete @pp from partial page list @ppl.
346 */
347static void
348__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
349 struct partial_page *prev)
350{
351 if (prev) {
352 prev->next = pp->next;
353 if (ppl->pp_hint == pp)
354 ppl->pp_hint = prev;
355 } else {
356 ppl->pp_head = pp->next;
357 if (ppl->pp_hint == pp)
358 ppl->pp_hint = pp->next;
359 }
360 rb_erase(&pp->pp_rb, &ppl->ppl_rb);
361 kmem_cache_free(partial_page_cachep, pp);
362}
363
364static struct partial_page *
365__pp_prev(struct partial_page *pp)
366{
367 struct rb_node *prev = rb_prev(&pp->pp_rb);
368 if (prev)
369 return rb_entry(prev, struct partial_page, pp_rb);
370 else
371 return NULL;
372}
373
374/*
375 * Delete partial pages with address between @start and @end.
376 * @start and @end are page aligned.
377 */
378static void
379__ia32_delete_pp_range(unsigned int start, unsigned int end)
380{
381 struct partial_page *pp, *prev;
382 struct rb_node **rb_link, *rb_parent;
383
384 if (start >= end)
385 return;
386
387 pp = __ia32_find_pp(current->thread.ppl, start, &prev,
388 &rb_link, &rb_parent);
389 if (pp)
390 prev = __pp_prev(pp);
391 else {
392 if (prev)
393 pp = prev->next;
394 else
395 pp = current->thread.ppl->pp_head;
396 }
397
398 while (pp && pp->base < end) {
399 struct partial_page *tmp = pp->next;
400 __ia32_delete_pp(current->thread.ppl, pp, prev);
401 pp = tmp;
402 }
403}
404
405/*
406 * Set the range between @start and @end in bitmap.
407 * @start and @end should be IA32 page aligned and in the same IA64 page.
408 */
409static int
410__ia32_set_pp(unsigned int start, unsigned int end, int flags)
411{
412 struct partial_page *pp, *prev;
413 struct rb_node ** rb_link, *rb_parent;
414 unsigned int pstart, start_bit, end_bit, i;
415
416 pstart = PAGE_START(start);
417 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
418 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
419 if (end_bit == 0)
420 end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
421 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
422 &rb_link, &rb_parent);
423 if (pp) {
424 for (i = start_bit; i < end_bit; i++)
425 set_bit(i, &pp->bitmap);
426 /*
427 * Check: if this partial page has been set to a full page,
428 * then delete it.
429 */
430 if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
431 PAGE_SIZE/IA32_PAGE_SIZE) {
432 __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
433 }
434 return 0;
435 }
436
437 /*
438 * MAP_FIXED may lead to overlapping mmap.
439 * In this case, the requested mmap area may already mmaped as a full
440 * page. So check vma before adding a new partial page.
441 */
442 if (flags & MAP_FIXED) {
443 struct vm_area_struct *vma = find_vma(current->mm, pstart);
444 if (vma && vma->vm_start <= pstart)
445 return 0;
446 }
447
448 /* new a partial_page */
449 pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
450 if (!pp)
451 return -ENOMEM;
452 pp->base = pstart;
453 pp->bitmap = 0;
454 for (i=start_bit; i<end_bit; i++)
455 set_bit(i, &(pp->bitmap));
456 pp->next = NULL;
457 __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
458 return 0;
459}
460
461/*
462 * @start and @end should be IA32 page aligned, but don't need to be in the
463 * same IA64 page. Split @start and @end to make sure they're in the same IA64
464 * page, then call __ia32_set_pp().
465 */
466static void
467ia32_set_pp(unsigned int start, unsigned int end, int flags)
468{
469 down_write(&current->mm->mmap_sem);
470 if (flags & MAP_FIXED) {
471 /*
472 * MAP_FIXED may lead to overlapping mmap. When this happens,
473 * a series of complete IA64 pages results in deletion of
474 * old partial pages in that range.
475 */
476 __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
477 }
478
479 if (end < PAGE_ALIGN(start)) {
480 __ia32_set_pp(start, end, flags);
481 } else {
482 if (offset_in_page(start))
483 __ia32_set_pp(start, PAGE_ALIGN(start), flags);
484 if (offset_in_page(end))
485 __ia32_set_pp(PAGE_START(end), end, flags);
486 }
487 up_write(&current->mm->mmap_sem);
488}
489
490/*
491 * Unset the range between @start and @end in bitmap.
492 * @start and @end should be IA32 page aligned and in the same IA64 page.
493 * After doing that, if the bitmap is 0, then free the page and return 1,
494 * else return 0;
495 * If not find the partial page in the list, then
496 * If the vma exists, then the full page is set to a partial page;
497 * Else return -ENOMEM.
498 */
499static int
500__ia32_unset_pp(unsigned int start, unsigned int end)
501{
502 struct partial_page *pp, *prev;
503 struct rb_node ** rb_link, *rb_parent;
504 unsigned int pstart, start_bit, end_bit, i;
505 struct vm_area_struct *vma;
506
507 pstart = PAGE_START(start);
508 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
509 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
510 if (end_bit == 0)
511 end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
512
513 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
514 &rb_link, &rb_parent);
515 if (pp) {
516 for (i = start_bit; i < end_bit; i++)
517 clear_bit(i, &pp->bitmap);
518 if (pp->bitmap == 0) {
519 __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
520 return 1;
521 }
522 return 0;
523 }
524
525 vma = find_vma(current->mm, pstart);
526 if (!vma || vma->vm_start > pstart) {
527 return -ENOMEM;
528 }
529
530 /* new a partial_page */
531 pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
532 if (!pp)
533 return -ENOMEM;
534 pp->base = pstart;
535 pp->bitmap = 0;
536 for (i = 0; i < start_bit; i++)
537 set_bit(i, &(pp->bitmap));
538 for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
539 set_bit(i, &(pp->bitmap));
540 pp->next = NULL;
541 __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
542 return 0;
543}
544
545/*
546 * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
547 * __ia32_delete_pp_range(). Unset possible partial pages by calling
548 * __ia32_unset_pp().
549 * The returned value see __ia32_unset_pp().
550 */
551static int
552ia32_unset_pp(unsigned int *startp, unsigned int *endp)
553{
554 unsigned int start = *startp, end = *endp;
555 int ret = 0;
556
557 down_write(&current->mm->mmap_sem);
558
559 __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
560
561 if (end < PAGE_ALIGN(start)) {
562 ret = __ia32_unset_pp(start, end);
563 if (ret == 1) {
564 *startp = PAGE_START(start);
565 *endp = PAGE_ALIGN(end);
566 }
567 if (ret == 0) {
568 /* to shortcut sys_munmap() in sys32_munmap() */
569 *startp = PAGE_START(start);
570 *endp = PAGE_START(end);
571 }
572 } else {
573 if (offset_in_page(start)) {
574 ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
575 if (ret == 1)
576 *startp = PAGE_START(start);
577 if (ret == 0)
578 *startp = PAGE_ALIGN(start);
579 if (ret < 0)
580 goto out;
581 }
582 if (offset_in_page(end)) {
583 ret = __ia32_unset_pp(PAGE_START(end), end);
584 if (ret == 1)
585 *endp = PAGE_ALIGN(end);
586 if (ret == 0)
587 *endp = PAGE_START(end);
588 }
589 }
590
591 out:
592 up_write(&current->mm->mmap_sem);
593 return ret;
594}
595
596/*
597 * Compare the range between @start and @end with bitmap in partial page.
598 * @start and @end should be IA32 page aligned and in the same IA64 page.
599 */
600static int
601__ia32_compare_pp(unsigned int start, unsigned int end)
602{
603 struct partial_page *pp, *prev;
604 struct rb_node ** rb_link, *rb_parent;
605 unsigned int pstart, start_bit, end_bit, size;
606 unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
607
608 pstart = PAGE_START(start);
609
610 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
611 &rb_link, &rb_parent);
612 if (!pp)
613 return 1;
614
615 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
616 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
617 size = sizeof(pp->bitmap) * 8;
618 first_bit = find_first_bit(&pp->bitmap, size);
619 next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
620 if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
621 /* exceeds the first range in bitmap */
622 return -ENOMEM;
623 } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
624 first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
625 if ((next_zero_bit < first_bit) && (first_bit < size))
626 return 1; /* has next range */
627 else
628 return 0; /* no next range */
629 } else
630 return 1;
631}
632
633/*
634 * @start and @end should be IA32 page aligned, but don't need to be in the
635 * same IA64 page. Split @start and @end to make sure they're in the same IA64
636 * page, then call __ia32_compare_pp().
637 *
638 * Take this as example: the range is the 1st and 2nd 4K page.
639 * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
640 * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
641 * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
642 * bitmap = 00000101.
643 */
644static int
645ia32_compare_pp(unsigned int *startp, unsigned int *endp)
646{
647 unsigned int start = *startp, end = *endp;
648 int retval = 0;
649
650 down_write(&current->mm->mmap_sem);
651
652 if (end < PAGE_ALIGN(start)) {
653 retval = __ia32_compare_pp(start, end);
654 if (retval == 0) {
655 *startp = PAGE_START(start);
656 *endp = PAGE_ALIGN(end);
657 }
658 } else {
659 if (offset_in_page(start)) {
660 retval = __ia32_compare_pp(start,
661 PAGE_ALIGN(start));
662 if (retval == 0)
663 *startp = PAGE_START(start);
664 if (retval < 0)
665 goto out;
666 }
667 if (offset_in_page(end)) {
668 retval = __ia32_compare_pp(PAGE_START(end), end);
669 if (retval == 0)
670 *endp = PAGE_ALIGN(end);
671 }
672 }
673
674 out:
675 up_write(&current->mm->mmap_sem);
676 return retval;
677}
678
679static void
680__ia32_drop_pp_list(struct partial_page_list *ppl)
681{
682 struct partial_page *pp = ppl->pp_head;
683
684 while (pp) {
685 struct partial_page *next = pp->next;
686 kmem_cache_free(partial_page_cachep, pp);
687 pp = next;
688 }
689
690 kfree(ppl);
691}
692
693void
694ia32_drop_partial_page_list(struct task_struct *task)
695{
696 struct partial_page_list* ppl = task->thread.ppl;
697
698 if (ppl && atomic_dec_and_test(&ppl->pp_count))
699 __ia32_drop_pp_list(ppl);
700}
701
702/*
703 * Copy current->thread.ppl to ppl (already initialized).
704 */
705static int
706__ia32_copy_pp_list(struct partial_page_list *ppl)
707{
708 struct partial_page *pp, *tmp, *prev;
709 struct rb_node **rb_link, *rb_parent;
710
711 ppl->pp_head = NULL;
712 ppl->pp_hint = NULL;
713 ppl->ppl_rb = RB_ROOT;
714 rb_link = &ppl->ppl_rb.rb_node;
715 rb_parent = NULL;
716 prev = NULL;
717
718 for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
719 tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
720 if (!tmp)
721 return -ENOMEM;
722 *tmp = *pp;
723 __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
724 prev = tmp;
725 rb_link = &tmp->pp_rb.rb_right;
726 rb_parent = &tmp->pp_rb;
727 }
728 return 0;
729}
730
731int
732ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
733{
734 int retval = 0;
735
736 if (clone_flags & CLONE_VM) {
737 atomic_inc(&current->thread.ppl->pp_count);
738 p->thread.ppl = current->thread.ppl;
739 } else {
740 p->thread.ppl = ia32_init_pp_list();
741 if (!p->thread.ppl)
742 return -ENOMEM;
743 down_write(&current->mm->mmap_sem);
744 {
745 retval = __ia32_copy_pp_list(p->thread.ppl);
746 }
747 up_write(&current->mm->mmap_sem);
748 }
749
750 return retval;
751}
752
753static unsigned long
754emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
755 loff_t off)
756{
757 unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
758 struct inode *inode;
759 loff_t poff;
760
761 end = start + len;
762 pstart = PAGE_START(start);
763 pend = PAGE_ALIGN(end);
764
765 if (flags & MAP_FIXED) {
766 ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
767 if (start > pstart) {
768 if (flags & MAP_SHARED)
769 printk(KERN_INFO
770 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
771 current->comm, current->pid, start);
772 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
773 off);
774 if (IS_ERR((void *) ret))
775 return ret;
776 pstart += PAGE_SIZE;
777 if (pstart >= pend)
778 goto out; /* done */
779 }
780 if (end < pend) {
781 if (flags & MAP_SHARED)
782 printk(KERN_INFO
783 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
784 current->comm, current->pid, end);
785 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
786 (off + len) - offset_in_page(end));
787 if (IS_ERR((void *) ret))
788 return ret;
789 pend -= PAGE_SIZE;
790 if (pstart >= pend)
791 goto out; /* done */
792 }
793 } else {
794 /*
795 * If a start address was specified, use it if the entire rounded out area
796 * is available.
797 */
798 if (start && !pstart)
799 fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
800 tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
801 if (tmp != pstart) {
802 pstart = tmp;
803 start = pstart + offset_in_page(off); /* make start congruent with off */
804 end = start + len;
805 pend = PAGE_ALIGN(end);
806 }
807 }
808
809 poff = off + (pstart - start); /* note: (pstart - start) may be negative */
810 is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
811
812 if ((flags & MAP_SHARED) && !is_congruent)
813 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
814 "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off);
815
816 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
817 is_congruent ? "congruent" : "not congruent", poff);
818
819 down_write(&current->mm->mmap_sem);
820 {
821 if (!(flags & MAP_ANONYMOUS) && is_congruent)
822 ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
823 else
824 ret = do_mmap(NULL, pstart, pend - pstart,
825 prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
826 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
827 }
828 up_write(&current->mm->mmap_sem);
829
830 if (IS_ERR((void *) ret))
831 return ret;
832
833 if (!is_congruent) {
834 /* read the file contents */
835 inode = file->f_dentry->d_inode;
836 if (!inode->i_fop || !file->f_op->read
837 || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
838 < 0))
839 {
840 sys_munmap(pstart, pend - pstart);
841 return -EINVAL;
842 }
843 if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
844 return -EINVAL;
845 }
846
847 if (!(flags & MAP_FIXED))
848 ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
849out:
850 return start;
851}
852
853#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
854
855static inline unsigned int
856get_prot32 (unsigned int prot)
857{
858 if (prot & PROT_WRITE)
859 /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
860 prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
861 else if (prot & (PROT_READ | PROT_EXEC))
862 /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
863 prot |= (PROT_READ | PROT_EXEC);
864
865 return prot;
866}
867
868unsigned long
869ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
870 loff_t offset)
871{
872 DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
873 file, addr, len, prot, flags, offset);
874
875 if (file && (!file->f_op || !file->f_op->mmap))
876 return -ENODEV;
877
878 len = IA32_PAGE_ALIGN(len);
879 if (len == 0)
880 return addr;
881
882 if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
883 {
884 if (flags & MAP_FIXED)
885 return -ENOMEM;
886 else
887 return -EINVAL;
888 }
889
890 if (OFFSET4K(offset))
891 return -EINVAL;
892
893 prot = get_prot32(prot);
894
895#if PAGE_SHIFT > IA32_PAGE_SHIFT
896 down(&ia32_mmap_sem);
897 {
898 addr = emulate_mmap(file, addr, len, prot, flags, offset);
899 }
900 up(&ia32_mmap_sem);
901#else
902 down_write(&current->mm->mmap_sem);
903 {
904 addr = do_mmap(file, addr, len, prot, flags, offset);
905 }
906 up_write(&current->mm->mmap_sem);
907#endif
908 DBG("ia32_do_mmap: returning 0x%lx\n", addr);
909 return addr;
910}
911
912/*
913 * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
914 * system calls used a memory block for parameter passing..
915 */
916
917struct mmap_arg_struct {
918 unsigned int addr;
919 unsigned int len;
920 unsigned int prot;
921 unsigned int flags;
922 unsigned int fd;
923 unsigned int offset;
924};
925
926asmlinkage long
927sys32_mmap (struct mmap_arg_struct __user *arg)
928{
929 struct mmap_arg_struct a;
930 struct file *file = NULL;
931 unsigned long addr;
932 int flags;
933
934 if (copy_from_user(&a, arg, sizeof(a)))
935 return -EFAULT;
936
937 if (OFFSET4K(a.offset))
938 return -EINVAL;
939
940 flags = a.flags;
941
942 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
943 if (!(flags & MAP_ANONYMOUS)) {
944 file = fget(a.fd);
945 if (!file)
946 return -EBADF;
947 }
948
949 addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
950
951 if (file)
952 fput(file);
953 return addr;
954}
955
956asmlinkage long
957sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
958 unsigned int fd, unsigned int pgoff)
959{
960 struct file *file = NULL;
961 unsigned long retval;
962
963 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
964 if (!(flags & MAP_ANONYMOUS)) {
965 file = fget(fd);
966 if (!file)
967 return -EBADF;
968 }
969
970 retval = ia32_do_mmap(file, addr, len, prot, flags,
971 (unsigned long) pgoff << IA32_PAGE_SHIFT);
972
973 if (file)
974 fput(file);
975 return retval;
976}
977
978asmlinkage long
979sys32_munmap (unsigned int start, unsigned int len)
980{
981 unsigned int end = start + len;
982 long ret;
983
984#if PAGE_SHIFT <= IA32_PAGE_SHIFT
985 ret = sys_munmap(start, end - start);
986#else
987 if (OFFSET4K(start))
988 return -EINVAL;
989
990 end = IA32_PAGE_ALIGN(end);
991 if (start >= end)
992 return -EINVAL;
993
994 ret = ia32_unset_pp(&start, &end);
995 if (ret < 0)
996 return ret;
997
998 if (start >= end)
999 return 0;
1000
1001 down(&ia32_mmap_sem);
1002 {
1003 ret = sys_munmap(start, end - start);
1004 }
1005 up(&ia32_mmap_sem);
1006#endif
1007 return ret;
1008}
1009
1010#if PAGE_SHIFT > IA32_PAGE_SHIFT
1011
1012/*
1013 * When mprotect()ing a partial page, we set the permission to the union of the old
1014 * settings and the new settings. In other words, it's only possible to make access to a
1015 * partial page less restrictive.
1016 */
1017static long
1018mprotect_subpage (unsigned long address, int new_prot)
1019{
1020 int old_prot;
1021 struct vm_area_struct *vma;
1022
1023 if (new_prot == PROT_NONE)
1024 return 0; /* optimize case where nothing changes... */
1025 vma = find_vma(current->mm, address);
1026 old_prot = get_page_prot(vma, address);
1027 return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
1028}
1029
1030#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
1031
1032asmlinkage long
1033sys32_mprotect (unsigned int start, unsigned int len, int prot)
1034{
1035 unsigned int end = start + len;
1036#if PAGE_SHIFT > IA32_PAGE_SHIFT
1037 long retval = 0;
1038#endif
1039
1040 prot = get_prot32(prot);
1041
1042#if PAGE_SHIFT <= IA32_PAGE_SHIFT
1043 return sys_mprotect(start, end - start, prot);
1044#else
1045 if (OFFSET4K(start))
1046 return -EINVAL;
1047
1048 end = IA32_PAGE_ALIGN(end);
1049 if (end < start)
1050 return -EINVAL;
1051
1052 retval = ia32_compare_pp(&start, &end);
1053
1054 if (retval < 0)
1055 return retval;
1056
1057 down(&ia32_mmap_sem);
1058 {
1059 if (offset_in_page(start)) {
1060 /* start address is 4KB aligned but not page aligned. */
1061 retval = mprotect_subpage(PAGE_START(start), prot);
1062 if (retval < 0)
1063 goto out;
1064
1065 start = PAGE_ALIGN(start);
1066 if (start >= end)
1067 goto out; /* retval is already zero... */
1068 }
1069
1070 if (offset_in_page(end)) {
1071 /* end address is 4KB aligned but not page aligned. */
1072 retval = mprotect_subpage(PAGE_START(end), prot);
1073 if (retval < 0)
1074 goto out;
1075
1076 end = PAGE_START(end);
1077 }
1078 retval = sys_mprotect(start, end - start, prot);
1079 }
1080 out:
1081 up(&ia32_mmap_sem);
1082 return retval;
1083#endif
1084}
1085
1086asmlinkage long
1087sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
1088 unsigned int flags, unsigned int new_addr)
1089{
1090 long ret;
1091
1092#if PAGE_SHIFT <= IA32_PAGE_SHIFT
1093 ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1094#else
1095 unsigned int old_end, new_end;
1096
1097 if (OFFSET4K(addr))
1098 return -EINVAL;
1099
1100 old_len = IA32_PAGE_ALIGN(old_len);
1101 new_len = IA32_PAGE_ALIGN(new_len);
1102 old_end = addr + old_len;
1103 new_end = addr + new_len;
1104
1105 if (!new_len)
1106 return -EINVAL;
1107
1108 if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
1109 return -EINVAL;
1110
1111 if (old_len >= new_len) {
1112 ret = sys32_munmap(addr + new_len, old_len - new_len);
1113 if (ret && old_len != new_len)
1114 return ret;
1115 ret = addr;
1116 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
1117 return ret;
1118 old_len = new_len;
1119 }
1120
1121 addr = PAGE_START(addr);
1122 old_len = PAGE_ALIGN(old_end) - addr;
1123 new_len = PAGE_ALIGN(new_end) - addr;
1124
1125 down(&ia32_mmap_sem);
1126 {
1127 ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1128 }
1129 up(&ia32_mmap_sem);
1130
1131 if ((ret >= 0) && (old_len < new_len)) {
1132 /* mremap expanded successfully */
1133 ia32_set_pp(old_end, new_end, flags);
1134 }
1135#endif
1136 return ret;
1137}
1138
1139asmlinkage long
1140sys32_pipe (int __user *fd)
1141{
1142 int retval;
1143 int fds[2];
1144
1145 retval = do_pipe(fds);
1146 if (retval)
1147 goto out;
1148 if (copy_to_user(fd, fds, sizeof(fds)))
1149 retval = -EFAULT;
1150 out:
1151 return retval;
1152}
1153
1154static inline long
1155get_tv32 (struct timeval *o, struct compat_timeval __user *i)
1156{
1157 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
1158 (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
1159}
1160
1161static inline long
1162put_tv32 (struct compat_timeval __user *o, struct timeval *i)
1163{
1164 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
1165 (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
1166}
1167
1168asmlinkage unsigned long
1169sys32_alarm (unsigned int seconds)
1170{
1171 struct itimerval it_new, it_old;
1172 unsigned int oldalarm;
1173
1174 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1175 it_new.it_value.tv_sec = seconds;
1176 it_new.it_value.tv_usec = 0;
1177 do_setitimer(ITIMER_REAL, &it_new, &it_old);
1178 oldalarm = it_old.it_value.tv_sec;
1179 /* ehhh.. We can't return 0 if we have an alarm pending.. */
1180 /* And we'd better return too much than too little anyway */
1181 if (it_old.it_value.tv_usec)
1182 oldalarm++;
1183 return oldalarm;
1184}
1185
1186/* Translations due to time_t size differences. Which affects all
1187 sorts of things, like timeval and itimerval. */
1188
1189extern struct timezone sys_tz;
1190
1191asmlinkage long
1192sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
1193{
1194 if (tv) {
1195 struct timeval ktv;
1196 do_gettimeofday(&ktv);
1197 if (put_tv32(tv, &ktv))
1198 return -EFAULT;
1199 }
1200 if (tz) {
1201 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
1202 return -EFAULT;
1203 }
1204 return 0;
1205}
1206
1207asmlinkage long
1208sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
1209{
1210 struct timeval ktv;
1211 struct timespec kts;
1212 struct timezone ktz;
1213
1214 if (tv) {
1215 if (get_tv32(&ktv, tv))
1216 return -EFAULT;
1217 kts.tv_sec = ktv.tv_sec;
1218 kts.tv_nsec = ktv.tv_usec * 1000;
1219 }
1220 if (tz) {
1221 if (copy_from_user(&ktz, tz, sizeof(ktz)))
1222 return -EFAULT;
1223 }
1224
1225 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
1226}
1227
1228struct getdents32_callback {
1229 struct compat_dirent __user *current_dir;
1230 struct compat_dirent __user *previous;
1231 int count;
1232 int error;
1233};
1234
1235struct readdir32_callback {
1236 struct old_linux32_dirent __user * dirent;
1237 int count;
1238};
1239
1240static int
1241filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
1242 unsigned int d_type)
1243{
1244 struct compat_dirent __user * dirent;
1245 struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
1246 int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4);
1247
1248 buf->error = -EINVAL; /* only used if we fail.. */
1249 if (reclen > buf->count)
1250 return -EINVAL;
1251 buf->error = -EFAULT; /* only used if we fail.. */
1252 dirent = buf->previous;
1253 if (dirent)
1254 if (put_user(offset, &dirent->d_off))
1255 return -EFAULT;
1256 dirent = buf->current_dir;
1257 buf->previous = dirent;
1258 if (put_user(ino, &dirent->d_ino)
1259 || put_user(reclen, &dirent->d_reclen)
1260 || copy_to_user(dirent->d_name, name, namlen)
1261 || put_user(0, dirent->d_name + namlen))
1262 return -EFAULT;
1263 dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen);
1264 buf->current_dir = dirent;
1265 buf->count -= reclen;
1266 return 0;
1267}
1268
1269asmlinkage long
1270sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count)
1271{
1272 struct file * file;
1273 struct compat_dirent __user * lastdirent;
1274 struct getdents32_callback buf;
1275 int error;
1276
1277 error = -EBADF;
1278 file = fget(fd);
1279 if (!file)
1280 goto out;
1281
1282 buf.current_dir = dirent;
1283 buf.previous = NULL;
1284 buf.count = count;
1285 buf.error = 0;
1286
1287 error = vfs_readdir(file, filldir32, &buf);
1288 if (error < 0)
1289 goto out_putf;
1290 error = buf.error;
1291 lastdirent = buf.previous;
1292 if (lastdirent) {
1293 error = -EINVAL;
1294 if (put_user(file->f_pos, &lastdirent->d_off))
1295 goto out_putf;
1296 error = count - buf.count;
1297 }
1298
1299out_putf:
1300 fput(file);
1301out:
1302 return error;
1303}
1304
1305static int
1306fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
1307 unsigned int d_type)
1308{
1309 struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
1310 struct old_linux32_dirent __user * dirent;
1311
1312 if (buf->count)
1313 return -EINVAL;
1314 buf->count++;
1315 dirent = buf->dirent;
1316 if (put_user(ino, &dirent->d_ino)
1317 || put_user(offset, &dirent->d_offset)
1318 || put_user(namlen, &dirent->d_namlen)
1319 || copy_to_user(dirent->d_name, name, namlen)
1320 || put_user(0, dirent->d_name + namlen))
1321 return -EFAULT;
1322 return 0;
1323}
1324
1325asmlinkage long
1326sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count)
1327{
1328 int error;
1329 struct file * file;
1330 struct readdir32_callback buf;
1331
1332 error = -EBADF;
1333 file = fget(fd);
1334 if (!file)
1335 goto out;
1336
1337 buf.count = 0;
1338 buf.dirent = dirent;
1339
1340 error = vfs_readdir(file, fillonedir32, &buf);
1341 if (error >= 0)
1342 error = buf.count;
1343 fput(file);
1344out:
1345 return error;
1346}
1347
1348struct sel_arg_struct {
1349 unsigned int n;
1350 unsigned int inp;
1351 unsigned int outp;
1352 unsigned int exp;
1353 unsigned int tvp;
1354};
1355
1356asmlinkage long
1357sys32_old_select (struct sel_arg_struct __user *arg)
1358{
1359 struct sel_arg_struct a;
1360
1361 if (copy_from_user(&a, arg, sizeof(a)))
1362 return -EFAULT;
1363 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1364 compat_ptr(a.exp), compat_ptr(a.tvp));
1365}
1366
1367#define SEMOP 1
1368#define SEMGET 2
1369#define SEMCTL 3
1370#define SEMTIMEDOP 4
1371#define MSGSND 11
1372#define MSGRCV 12
1373#define MSGGET 13
1374#define MSGCTL 14
1375#define SHMAT 21
1376#define SHMDT 22
1377#define SHMGET 23
1378#define SHMCTL 24
1379
1380asmlinkage long
1381sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
1382{
1383 int version;
1384
1385 version = call >> 16; /* hack for backward compatibility */
1386 call &= 0xffff;
1387
1388 switch (call) {
1389 case SEMTIMEDOP:
1390 if (fifth)
1391 return compat_sys_semtimedop(first, compat_ptr(ptr),
1392 second, compat_ptr(fifth));
1393 /* else fall through for normal semop() */
1394 case SEMOP:
1395 /* struct sembuf is the same on 32 and 64bit :)) */
1396 return sys_semtimedop(first, compat_ptr(ptr), second,
1397 NULL);
1398 case SEMGET:
1399 return sys_semget(first, second, third);
1400 case SEMCTL:
1401 return compat_sys_semctl(first, second, third, compat_ptr(ptr));
1402
1403 case MSGSND:
1404 return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
1405 case MSGRCV:
1406 return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
1407 case MSGGET:
1408 return sys_msgget((key_t) first, second);
1409 case MSGCTL:
1410 return compat_sys_msgctl(first, second, compat_ptr(ptr));
1411
1412 case SHMAT:
1413 return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
1414 break;
1415 case SHMDT:
1416 return sys_shmdt(compat_ptr(ptr));
1417 case SHMGET:
1418 return sys_shmget(first, (unsigned)second, third);
1419 case SHMCTL:
1420 return compat_sys_shmctl(first, second, compat_ptr(ptr));
1421
1422 default:
1423 return -ENOSYS;
1424 }
1425 return -EINVAL;
1426}
1427
1428asmlinkage long
1429compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
1430 struct compat_rusage *ru);
1431
1432asmlinkage long
1433sys32_waitpid (int pid, unsigned int *stat_addr, int options)
1434{
1435 return compat_sys_wait4(pid, stat_addr, options, NULL);
1436}
1437
1438static unsigned int
1439ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
1440{
1441 size_t copied;
1442 unsigned int ret;
1443
1444 copied = access_process_vm(child, addr, val, sizeof(*val), 0);
1445 return (copied != sizeof(ret)) ? -EIO : 0;
1446}
1447
1448static unsigned int
1449ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
1450{
1451
1452 if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
1453 return -EIO;
1454 return 0;
1455}
1456
1457/*
1458 * The order in which registers are stored in the ptrace regs structure
1459 */
1460#define PT_EBX 0
1461#define PT_ECX 1
1462#define PT_EDX 2
1463#define PT_ESI 3
1464#define PT_EDI 4
1465#define PT_EBP 5
1466#define PT_EAX 6
1467#define PT_DS 7
1468#define PT_ES 8
1469#define PT_FS 9
1470#define PT_GS 10
1471#define PT_ORIG_EAX 11
1472#define PT_EIP 12
1473#define PT_CS 13
1474#define PT_EFL 14
1475#define PT_UESP 15
1476#define PT_SS 16
1477
1478static unsigned int
1479getreg (struct task_struct *child, int regno)
1480{
1481 struct pt_regs *child_regs;
1482
1483 child_regs = ia64_task_regs(child);
1484 switch (regno / sizeof(int)) {
1485 case PT_EBX: return child_regs->r11;
1486 case PT_ECX: return child_regs->r9;
1487 case PT_EDX: return child_regs->r10;
1488 case PT_ESI: return child_regs->r14;
1489 case PT_EDI: return child_regs->r15;
1490 case PT_EBP: return child_regs->r13;
1491 case PT_EAX: return child_regs->r8;
1492 case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
1493 case PT_EIP: return child_regs->cr_iip;
1494 case PT_UESP: return child_regs->r12;
1495 case PT_EFL: return child->thread.eflag;
1496 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1497 return __USER_DS;
1498 case PT_CS: return __USER_CS;
1499 default:
1500 printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
1501 break;
1502 }
1503 return 0;
1504}
1505
1506static void
1507putreg (struct task_struct *child, int regno, unsigned int value)
1508{
1509 struct pt_regs *child_regs;
1510
1511 child_regs = ia64_task_regs(child);
1512 switch (regno / sizeof(int)) {
1513 case PT_EBX: child_regs->r11 = value; break;
1514 case PT_ECX: child_regs->r9 = value; break;
1515 case PT_EDX: child_regs->r10 = value; break;
1516 case PT_ESI: child_regs->r14 = value; break;
1517 case PT_EDI: child_regs->r15 = value; break;
1518 case PT_EBP: child_regs->r13 = value; break;
1519 case PT_EAX: child_regs->r8 = value; break;
1520 case PT_ORIG_EAX: child_regs->r1 = value; break;
1521 case PT_EIP: child_regs->cr_iip = value; break;
1522 case PT_UESP: child_regs->r12 = value; break;
1523 case PT_EFL: child->thread.eflag = value; break;
1524 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1525 if (value != __USER_DS)
1526 printk(KERN_ERR
1527 "ia32.putreg: attempt to set invalid segment register %d = %x\n",
1528 regno, value);
1529 break;
1530 case PT_CS:
1531 if (value != __USER_CS)
1532 printk(KERN_ERR
1533 "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
1534 regno, value);
1535 break;
1536 default:
1537 printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
1538 break;
1539 }
1540}
1541
1542static void
1543put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
1544 struct switch_stack *swp, int tos)
1545{
1546 struct _fpreg_ia32 *f;
1547 char buf[32];
1548
1549 f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
1550 if ((regno += tos) >= 8)
1551 regno -= 8;
1552 switch (regno) {
1553 case 0:
1554 ia64f2ia32f(f, &ptp->f8);
1555 break;
1556 case 1:
1557 ia64f2ia32f(f, &ptp->f9);
1558 break;
1559 case 2:
1560 ia64f2ia32f(f, &ptp->f10);
1561 break;
1562 case 3:
1563 ia64f2ia32f(f, &ptp->f11);
1564 break;
1565 case 4:
1566 case 5:
1567 case 6:
1568 case 7:
1569 ia64f2ia32f(f, &swp->f12 + (regno - 4));
1570 break;
1571 }
1572 copy_to_user(reg, f, sizeof(*reg));
1573}
1574
1575static void
1576get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
1577 struct switch_stack *swp, int tos)
1578{
1579
1580 if ((regno += tos) >= 8)
1581 regno -= 8;
1582 switch (regno) {
1583 case 0:
1584 copy_from_user(&ptp->f8, reg, sizeof(*reg));
1585 break;
1586 case 1:
1587 copy_from_user(&ptp->f9, reg, sizeof(*reg));
1588 break;
1589 case 2:
1590 copy_from_user(&ptp->f10, reg, sizeof(*reg));
1591 break;
1592 case 3:
1593 copy_from_user(&ptp->f11, reg, sizeof(*reg));
1594 break;
1595 case 4:
1596 case 5:
1597 case 6:
1598 case 7:
1599 copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
1600 break;
1601 }
1602 return;
1603}
1604
1605int
1606save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
1607{
1608 struct switch_stack *swp;
1609 struct pt_regs *ptp;
1610 int i, tos;
1611
1612 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1613 return -EFAULT;
1614
1615 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1616 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1617 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1618 __put_user(tsk->thread.fir, &save->fip);
1619 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1620 __put_user(tsk->thread.fdr, &save->foo);
1621 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1622
1623 /*
1624 * Stack frames start with 16-bytes of temp space
1625 */
1626 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1627 ptp = ia64_task_regs(tsk);
1628 tos = (tsk->thread.fsr >> 11) & 7;
1629 for (i = 0; i < 8; i++)
1630 put_fpreg(i, &save->st_space[i], ptp, swp, tos);
1631 return 0;
1632}
1633
1634static int
1635restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
1636{
1637 struct switch_stack *swp;
1638 struct pt_regs *ptp;
1639 int i, tos;
1640 unsigned int fsrlo, fsrhi, num32;
1641
1642 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1643 return(-EFAULT);
1644
1645 __get_user(num32, (unsigned int __user *)&save->cwd);
1646 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1647 __get_user(fsrlo, (unsigned int __user *)&save->swd);
1648 __get_user(fsrhi, (unsigned int __user *)&save->twd);
1649 num32 = (fsrhi << 16) | fsrlo;
1650 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1651 __get_user(num32, (unsigned int __user *)&save->fip);
1652 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1653 __get_user(num32, (unsigned int __user *)&save->foo);
1654 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1655
1656 /*
1657 * Stack frames start with 16-bytes of temp space
1658 */
1659 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1660 ptp = ia64_task_regs(tsk);
1661 tos = (tsk->thread.fsr >> 11) & 7;
1662 for (i = 0; i < 8; i++)
1663 get_fpreg(i, &save->st_space[i], ptp, swp, tos);
1664 return 0;
1665}
1666
1667int
1668save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
1669{
1670 struct switch_stack *swp;
1671 struct pt_regs *ptp;
1672 int i, tos;
1673 unsigned long mxcsr=0;
1674 unsigned long num128[2];
1675
1676 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1677 return -EFAULT;
1678
1679 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1680 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1681 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1682 __put_user(tsk->thread.fir, &save->fip);
1683 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1684 __put_user(tsk->thread.fdr, &save->foo);
1685 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1686
1687 /*
1688 * Stack frames start with 16-bytes of temp space
1689 */
1690 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1691 ptp = ia64_task_regs(tsk);
1692 tos = (tsk->thread.fsr >> 11) & 7;
1693 for (i = 0; i < 8; i++)
1694 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
1695
1696 mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
1697 __put_user(mxcsr & 0xffff, &save->mxcsr);
1698 for (i = 0; i < 8; i++) {
1699 memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
1700 memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
1701 copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
1702 }
1703 return 0;
1704}
1705
1706static int
1707restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
1708{
1709 struct switch_stack *swp;
1710 struct pt_regs *ptp;
1711 int i, tos;
1712 unsigned int fsrlo, fsrhi, num32;
1713 int mxcsr;
1714 unsigned long num64;
1715 unsigned long num128[2];
1716
1717 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1718 return(-EFAULT);
1719
1720 __get_user(num32, (unsigned int __user *)&save->cwd);
1721 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1722 __get_user(fsrlo, (unsigned int __user *)&save->swd);
1723 __get_user(fsrhi, (unsigned int __user *)&save->twd);
1724 num32 = (fsrhi << 16) | fsrlo;
1725 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1726 __get_user(num32, (unsigned int __user *)&save->fip);
1727 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1728 __get_user(num32, (unsigned int __user *)&save->foo);
1729 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1730
1731 /*
1732 * Stack frames start with 16-bytes of temp space
1733 */
1734 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1735 ptp = ia64_task_regs(tsk);
1736 tos = (tsk->thread.fsr >> 11) & 7;
1737 for (i = 0; i < 8; i++)
1738 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
1739
1740 __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
1741 num64 = mxcsr & 0xff10;
1742 tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
1743 num64 = mxcsr & 0x3f;
1744 tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
1745
1746 for (i = 0; i < 8; i++) {
1747 copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
1748 memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
1749 memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
1750 }
1751 return 0;
1752}
1753
1754asmlinkage long
1755sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
1756{
1757 struct task_struct *child;
1758 unsigned int value, tmp;
1759 long i, ret;
1760
1761 lock_kernel();
1762 if (request == PTRACE_TRACEME) {
1763 ret = sys_ptrace(request, pid, addr, data);
1764 goto out;
1765 }
1766
1767 ret = -ESRCH;
1768 read_lock(&tasklist_lock);
1769 child = find_task_by_pid(pid);
1770 if (child)
1771 get_task_struct(child);
1772 read_unlock(&tasklist_lock);
1773 if (!child)
1774 goto out;
1775 ret = -EPERM;
1776 if (pid == 1) /* no messing around with init! */
1777 goto out_tsk;
1778
1779 if (request == PTRACE_ATTACH) {
1780 ret = sys_ptrace(request, pid, addr, data);
1781 goto out_tsk;
1782 }
1783
1784 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1785 if (ret < 0)
1786 goto out_tsk;
1787
1788 switch (request) {
1789 case PTRACE_PEEKTEXT:
1790 case PTRACE_PEEKDATA: /* read word at location addr */
1791 ret = ia32_peek(child, addr, &value);
1792 if (ret == 0)
1793 ret = put_user(value, (unsigned int __user *) compat_ptr(data));
1794 else
1795 ret = -EIO;
1796 goto out_tsk;
1797
1798 case PTRACE_POKETEXT:
1799 case PTRACE_POKEDATA: /* write the word at location addr */
1800 ret = ia32_poke(child, addr, data);
1801 goto out_tsk;
1802
1803 case PTRACE_PEEKUSR: /* read word at addr in USER area */
1804 ret = -EIO;
1805 if ((addr & 3) || addr > 17*sizeof(int))
1806 break;
1807
1808 tmp = getreg(child, addr);
1809 if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
1810 ret = 0;
1811 break;
1812
1813 case PTRACE_POKEUSR: /* write word at addr in USER area */
1814 ret = -EIO;
1815 if ((addr & 3) || addr > 17*sizeof(int))
1816 break;
1817
1818 putreg(child, addr, data);
1819 ret = 0;
1820 break;
1821
1822 case IA32_PTRACE_GETREGS:
1823 if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
1824 ret = -EIO;
1825 break;
1826 }
1827 for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1828 put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
1829 data += sizeof(int);
1830 }
1831 ret = 0;
1832 break;
1833
1834 case IA32_PTRACE_SETREGS:
1835 if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
1836 ret = -EIO;
1837 break;
1838 }
1839 for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1840 get_user(tmp, (unsigned int __user *) compat_ptr(data));
1841 putreg(child, i, tmp);
1842 data += sizeof(int);
1843 }
1844 ret = 0;
1845 break;
1846
1847 case IA32_PTRACE_GETFPREGS:
1848 ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
1849 compat_ptr(data));
1850 break;
1851
1852 case IA32_PTRACE_GETFPXREGS:
1853 ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
1854 compat_ptr(data));
1855 break;
1856
1857 case IA32_PTRACE_SETFPREGS:
1858 ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
1859 compat_ptr(data));
1860 break;
1861
1862 case IA32_PTRACE_SETFPXREGS:
1863 ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
1864 compat_ptr(data));
1865 break;
1866
1867 case PTRACE_GETEVENTMSG:
1868 ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
1869 break;
1870
1871 case PTRACE_SYSCALL: /* continue, stop after next syscall */
1872 case PTRACE_CONT: /* restart after signal. */
1873 case PTRACE_KILL:
1874 case PTRACE_SINGLESTEP: /* execute chile for one instruction */
1875 case PTRACE_DETACH: /* detach a process */
1876 ret = sys_ptrace(request, pid, addr, data);
1877 break;
1878
1879 default:
1880 ret = ptrace_request(child, request, addr, data);
1881 break;
1882
1883 }
1884 out_tsk:
1885 put_task_struct(child);
1886 out:
1887 unlock_kernel();
1888 return ret;
1889}
1890
1891typedef struct {
1892 unsigned int ss_sp;
1893 unsigned int ss_flags;
1894 unsigned int ss_size;
1895} ia32_stack_t;
1896
1897asmlinkage long
1898sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
1899 long arg2, long arg3, long arg4, long arg5, long arg6,
1900 long arg7, struct pt_regs pt)
1901{
1902 stack_t uss, uoss;
1903 ia32_stack_t buf32;
1904 int ret;
1905 mm_segment_t old_fs = get_fs();
1906
1907 if (uss32) {
1908 if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
1909 return -EFAULT;
1910 uss.ss_sp = (void __user *) (long) buf32.ss_sp;
1911 uss.ss_flags = buf32.ss_flags;
1912 /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
1913 check and set it to the user requested value later */
1914 if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
1915 ret = -ENOMEM;
1916 goto out;
1917 }
1918 uss.ss_size = MINSIGSTKSZ;
1919 }
1920 set_fs(KERNEL_DS);
1921 ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
1922 (stack_t __user *) &uoss, pt.r12);
1923 current->sas_ss_size = buf32.ss_size;
1924 set_fs(old_fs);
1925out:
1926 if (ret < 0)
1927 return(ret);
1928 if (uoss32) {
1929 buf32.ss_sp = (long __user) uoss.ss_sp;
1930 buf32.ss_flags = uoss.ss_flags;
1931 buf32.ss_size = uoss.ss_size;
1932 if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
1933 return -EFAULT;
1934 }
1935 return ret;
1936}
1937
1938asmlinkage int
1939sys32_pause (void)
1940{
1941 current->state = TASK_INTERRUPTIBLE;
1942 schedule();
1943 return -ERESTARTNOHAND;
1944}
1945
1946asmlinkage int
1947sys32_msync (unsigned int start, unsigned int len, int flags)
1948{
1949 unsigned int addr;
1950
1951 if (OFFSET4K(start))
1952 return -EINVAL;
1953 addr = PAGE_START(start);
1954 return sys_msync(addr, len + (start - addr), flags);
1955}
1956
1957struct sysctl32 {
1958 unsigned int name;
1959 int nlen;
1960 unsigned int oldval;
1961 unsigned int oldlenp;
1962 unsigned int newval;
1963 unsigned int newlen;
1964 unsigned int __unused[4];
1965};
1966
1967#ifdef CONFIG_SYSCTL
1968asmlinkage long
1969sys32_sysctl (struct sysctl32 __user *args)
1970{
1971 struct sysctl32 a32;
1972 mm_segment_t old_fs = get_fs ();
1973 void __user *oldvalp, *newvalp;
1974 size_t oldlen;
1975 int __user *namep;
1976 long ret;
1977
1978 if (copy_from_user(&a32, args, sizeof(a32)))
1979 return -EFAULT;
1980
1981 /*
1982 * We need to pre-validate these because we have to disable address checking
1983 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
1984 * user specifying bad addresses here. Well, since we're dealing with 32 bit
1985 * addresses, we KNOW that access_ok() will always succeed, so this is an
1986 * expensive NOP, but so what...
1987 */
1988 namep = (int __user *) compat_ptr(a32.name);
1989 oldvalp = compat_ptr(a32.oldval);
1990 newvalp = compat_ptr(a32.newval);
1991
1992 if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1993 || !access_ok(VERIFY_WRITE, namep, 0)
1994 || !access_ok(VERIFY_WRITE, oldvalp, 0)
1995 || !access_ok(VERIFY_WRITE, newvalp, 0))
1996 return -EFAULT;
1997
1998 set_fs(KERNEL_DS);
1999 lock_kernel();
2000 ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
2001 newvalp, (size_t) a32.newlen);
2002 unlock_kernel();
2003 set_fs(old_fs);
2004
2005 if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
2006 return -EFAULT;
2007
2008 return ret;
2009}
2010#endif
2011
2012asmlinkage long
2013sys32_newuname (struct new_utsname __user *name)
2014{
2015 int ret = sys_newuname(name);
2016
2017 if (!ret)
2018 if (copy_to_user(name->machine, "i686\0\0\0", 8))
2019 ret = -EFAULT;
2020 return ret;
2021}
2022
2023asmlinkage long
2024sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
2025{
2026 uid_t a, b, c;
2027 int ret;
2028 mm_segment_t old_fs = get_fs();
2029
2030 set_fs(KERNEL_DS);
2031 ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
2032 set_fs(old_fs);
2033
2034 if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
2035 return -EFAULT;
2036 return ret;
2037}
2038
2039asmlinkage long
2040sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
2041{
2042 gid_t a, b, c;
2043 int ret;
2044 mm_segment_t old_fs = get_fs();
2045
2046 set_fs(KERNEL_DS);
2047 ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
2048 set_fs(old_fs);
2049
2050 if (ret)
2051 return ret;
2052
2053 return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
2054}
2055
2056asmlinkage long
2057sys32_lseek (unsigned int fd, int offset, unsigned int whence)
2058{
2059 /* Sign-extension of "offset" is important here... */
2060 return sys_lseek(fd, offset, whence);
2061}
2062
2063static int
2064groups16_to_user(short __user *grouplist, struct group_info *group_info)
2065{
2066 int i;
2067 short group;
2068
2069 for (i = 0; i < group_info->ngroups; i++) {
2070 group = (short)GROUP_AT(group_info, i);
2071 if (put_user(group, grouplist+i))
2072 return -EFAULT;
2073 }
2074
2075 return 0;
2076}
2077
2078static int
2079groups16_from_user(struct group_info *group_info, short __user *grouplist)
2080{
2081 int i;
2082 short group;
2083
2084 for (i = 0; i < group_info->ngroups; i++) {
2085 if (get_user(group, grouplist+i))
2086 return -EFAULT;
2087 GROUP_AT(group_info, i) = (gid_t)group;
2088 }
2089
2090 return 0;
2091}
2092
2093asmlinkage long
2094sys32_getgroups16 (int gidsetsize, short __user *grouplist)
2095{
2096 int i;
2097
2098 if (gidsetsize < 0)
2099 return -EINVAL;
2100
2101 get_group_info(current->group_info);
2102 i = current->group_info->ngroups;
2103 if (gidsetsize) {
2104 if (i > gidsetsize) {
2105 i = -EINVAL;
2106 goto out;
2107 }
2108 if (groups16_to_user(grouplist, current->group_info)) {
2109 i = -EFAULT;
2110 goto out;
2111 }
2112 }
2113out:
2114 put_group_info(current->group_info);
2115 return i;
2116}
2117
2118asmlinkage long
2119sys32_setgroups16 (int gidsetsize, short __user *grouplist)
2120{
2121 struct group_info *group_info;
2122 int retval;
2123
2124 if (!capable(CAP_SETGID))
2125 return -EPERM;
2126 if ((unsigned)gidsetsize > NGROUPS_MAX)
2127 return -EINVAL;
2128
2129 group_info = groups_alloc(gidsetsize);
2130 if (!group_info)
2131 return -ENOMEM;
2132 retval = groups16_from_user(group_info, grouplist);
2133 if (retval) {
2134 put_group_info(group_info);
2135 return retval;
2136 }
2137
2138 retval = set_current_groups(group_info);
2139 put_group_info(group_info);
2140
2141 return retval;
2142}
2143
2144asmlinkage long
2145sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
2146{
2147 return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
2148}
2149
2150asmlinkage long
2151sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
2152{
2153 return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
2154}
2155
2156static int
2157putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
2158{
2159 int err;
2160 u64 hdev;
2161
2162 if (clear_user(ubuf, sizeof(*ubuf)))
2163 return -EFAULT;
2164
2165 hdev = huge_encode_dev(kbuf->dev);
2166 err = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
2167 err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
2168 err |= __put_user(kbuf->ino, &ubuf->__st_ino);
2169 err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
2170 err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
2171 err |= __put_user(kbuf->mode, &ubuf->st_mode);
2172 err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
2173 err |= __put_user(kbuf->uid, &ubuf->st_uid);
2174 err |= __put_user(kbuf->gid, &ubuf->st_gid);
2175 hdev = huge_encode_dev(kbuf->rdev);
2176 err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
2177 err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
2178 err |= __put_user(kbuf->size, &ubuf->st_size_lo);
2179 err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
2180 err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
2181 err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
2182 err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
2183 err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
2184 err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
2185 err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
2186 err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
2187 err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
2188 return err;
2189}
2190
2191asmlinkage long
2192sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
2193{
2194 struct kstat s;
2195 long ret = vfs_stat(filename, &s);
2196 if (!ret)
2197 ret = putstat64(statbuf, &s);
2198 return ret;
2199}
2200
2201asmlinkage long
2202sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
2203{
2204 struct kstat s;
2205 long ret = vfs_lstat(filename, &s);
2206 if (!ret)
2207 ret = putstat64(statbuf, &s);
2208 return ret;
2209}
2210
2211asmlinkage long
2212sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
2213{
2214 struct kstat s;
2215 long ret = vfs_fstat(fd, &s);
2216 if (!ret)
2217 ret = putstat64(statbuf, &s);
2218 return ret;
2219}
2220
2221struct sysinfo32 {
2222 s32 uptime;
2223 u32 loads[3];
2224 u32 totalram;
2225 u32 freeram;
2226 u32 sharedram;
2227 u32 bufferram;
2228 u32 totalswap;
2229 u32 freeswap;
2230 u16 procs;
2231 u16 pad;
2232 u32 totalhigh;
2233 u32 freehigh;
2234 u32 mem_unit;
2235 char _f[8];
2236};
2237
2238asmlinkage long
2239sys32_sysinfo (struct sysinfo32 __user *info)
2240{
2241 struct sysinfo s;
2242 long ret, err;
2243 int bitcount = 0;
2244 mm_segment_t old_fs = get_fs();
2245
2246 set_fs(KERNEL_DS);
2247 ret = sys_sysinfo((struct sysinfo __user *) &s);
2248 set_fs(old_fs);
2249 /* Check to see if any memory value is too large for 32-bit and
2250 * scale down if needed.
2251 */
2252 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2253 while (s.mem_unit < PAGE_SIZE) {
2254 s.mem_unit <<= 1;
2255 bitcount++;
2256 }
2257 s.totalram >>= bitcount;
2258 s.freeram >>= bitcount;
2259 s.sharedram >>= bitcount;
2260 s.bufferram >>= bitcount;
2261 s.totalswap >>= bitcount;
2262 s.freeswap >>= bitcount;
2263 s.totalhigh >>= bitcount;
2264 s.freehigh >>= bitcount;
2265 }
2266
2267 if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
2268 return -EFAULT;
2269
2270 err = __put_user(s.uptime, &info->uptime);
2271 err |= __put_user(s.loads[0], &info->loads[0]);
2272 err |= __put_user(s.loads[1], &info->loads[1]);
2273 err |= __put_user(s.loads[2], &info->loads[2]);
2274 err |= __put_user(s.totalram, &info->totalram);
2275 err |= __put_user(s.freeram, &info->freeram);
2276 err |= __put_user(s.sharedram, &info->sharedram);
2277 err |= __put_user(s.bufferram, &info->bufferram);
2278 err |= __put_user(s.totalswap, &info->totalswap);
2279 err |= __put_user(s.freeswap, &info->freeswap);
2280 err |= __put_user(s.procs, &info->procs);
2281 err |= __put_user (s.totalhigh, &info->totalhigh);
2282 err |= __put_user (s.freehigh, &info->freehigh);
2283 err |= __put_user (s.mem_unit, &info->mem_unit);
2284 if (err)
2285 return -EFAULT;
2286 return ret;
2287}
2288
2289asmlinkage long
2290sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
2291{
2292 mm_segment_t old_fs = get_fs();
2293 struct timespec t;
2294 long ret;
2295
2296 set_fs(KERNEL_DS);
2297 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
2298 set_fs(old_fs);
2299 if (put_compat_timespec(&t, interval))
2300 return -EFAULT;
2301 return ret;
2302}
2303
2304asmlinkage long
2305sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
2306{
2307 return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
2308}
2309
2310asmlinkage long
2311sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
2312{
2313 return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
2314}
2315
2316asmlinkage long
2317sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
2318{
2319 mm_segment_t old_fs = get_fs();
2320 long ret;
2321 off_t of;
2322
2323 if (offset && get_user(of, offset))
2324 return -EFAULT;
2325
2326 set_fs(KERNEL_DS);
2327 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
2328 set_fs(old_fs);
2329
2330 if (!ret && offset && put_user(of, offset))
2331 return -EFAULT;
2332
2333 return ret;
2334}
2335
2336asmlinkage long
2337sys32_personality (unsigned int personality)
2338{
2339 long ret;
2340
2341 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
2342 personality = PER_LINUX32;
2343 ret = sys_personality(personality);
2344 if (ret == PER_LINUX32)
2345 ret = PER_LINUX;
2346 return ret;
2347}
2348
2349asmlinkage unsigned long
2350sys32_brk (unsigned int brk)
2351{
2352 unsigned long ret, obrk;
2353 struct mm_struct *mm = current->mm;
2354
2355 obrk = mm->brk;
2356 ret = sys_brk(brk);
2357 if (ret < obrk)
2358 clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
2359 return ret;
2360}
2361
2362/*
2363 * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
2364 */
2365asmlinkage long
2366sys32_open (const char __user * filename, int flags, int mode)
2367{
2368 char * tmp;
2369 int fd, error;
2370
2371 tmp = getname(filename);
2372 fd = PTR_ERR(tmp);
2373 if (!IS_ERR(tmp)) {
2374 fd = get_unused_fd();
2375 if (fd >= 0) {
2376 struct file *f = filp_open(tmp, flags, mode);
2377 error = PTR_ERR(f);
2378 if (IS_ERR(f))
2379 goto out_error;
2380 fd_install(fd, f);
2381 }
2382out:
2383 putname(tmp);
2384 }
2385 return fd;
2386
2387out_error:
2388 put_unused_fd(fd);
2389 fd = error;
2390 goto out;
2391}
2392
2393/* Structure for ia32 emulation on ia64 */
2394struct epoll_event32
2395{
2396 u32 events;
2397 u32 data[2];
2398};
2399
2400asmlinkage long
2401sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
2402{
2403 mm_segment_t old_fs = get_fs();
2404 struct epoll_event event64;
2405 int error;
2406 u32 data_halfword;
2407
2408 if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
2409 return -EFAULT;
2410
2411 __get_user(event64.events, &event->events);
2412 __get_user(data_halfword, &event->data[0]);
2413 event64.data = data_halfword;
2414 __get_user(data_halfword, &event->data[1]);
2415 event64.data |= (u64)data_halfword << 32;
2416
2417 set_fs(KERNEL_DS);
2418 error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
2419 set_fs(old_fs);
2420
2421 return error;
2422}
2423
2424asmlinkage long
2425sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
2426 int timeout)
2427{
2428 struct epoll_event *events64 = NULL;
2429 mm_segment_t old_fs = get_fs();
2430 int error, numevents, size;
2431 int evt_idx;
2432 int do_free_pages = 0;
2433
2434 if (maxevents <= 0) {
2435 return -EINVAL;
2436 }
2437
2438 /* Verify that the area passed by the user is writeable */
2439 if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
2440 return -EFAULT;
2441
2442 /*
2443 * Allocate space for the intermediate copy. If the space needed
2444 * is large enough to cause kmalloc to fail, then try again with
2445 * __get_free_pages.
2446 */
2447 size = maxevents * sizeof(struct epoll_event);
2448 events64 = kmalloc(size, GFP_KERNEL);
2449 if (events64 == NULL) {
2450 events64 = (struct epoll_event *)
2451 __get_free_pages(GFP_KERNEL, get_order(size));
2452 if (events64 == NULL)
2453 return -ENOMEM;
2454 do_free_pages = 1;
2455 }
2456
2457 /* Do the system call */
2458 set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
2459 numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
2460 maxevents, timeout);
2461 set_fs(old_fs);
2462
2463 /* Don't modify userspace memory if we're returning an error */
2464 if (numevents > 0) {
2465 /* Translate the 64-bit structures back into the 32-bit
2466 structures */
2467 for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
2468 __put_user(events64[evt_idx].events,
2469 &events[evt_idx].events);
2470 __put_user((u32)events64[evt_idx].data,
2471 &events[evt_idx].data[0]);
2472 __put_user((u32)(events64[evt_idx].data >> 32),
2473 &events[evt_idx].data[1]);
2474 }
2475 }
2476
2477 if (do_free_pages)
2478 free_pages((unsigned long) events64, get_order(size));
2479 else
2480 kfree(events64);
2481 return numevents;
2482}
2483
2484/*
2485 * Get a yet unused TLS descriptor index.
2486 */
2487static int
2488get_free_idx (void)
2489{
2490 struct thread_struct *t = &current->thread;
2491 int idx;
2492
2493 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
2494 if (desc_empty(t->tls_array + idx))
2495 return idx + GDT_ENTRY_TLS_MIN;
2496 return -ESRCH;
2497}
2498
2499/*
2500 * Set a given TLS descriptor:
2501 */
2502asmlinkage int
2503sys32_set_thread_area (struct ia32_user_desc __user *u_info)
2504{
2505 struct thread_struct *t = &current->thread;
2506 struct ia32_user_desc info;
2507 struct desc_struct *desc;
2508 int cpu, idx;
2509
2510 if (copy_from_user(&info, u_info, sizeof(info)))
2511 return -EFAULT;
2512 idx = info.entry_number;
2513
2514 /*
2515 * index -1 means the kernel should try to find and allocate an empty descriptor:
2516 */
2517 if (idx == -1) {
2518 idx = get_free_idx();
2519 if (idx < 0)
2520 return idx;
2521 if (put_user(idx, &u_info->entry_number))
2522 return -EFAULT;
2523 }
2524
2525 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2526 return -EINVAL;
2527
2528 desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
2529
2530 cpu = smp_processor_id();
2531
2532 if (LDT_empty(&info)) {
2533 desc->a = 0;
2534 desc->b = 0;
2535 } else {
2536 desc->a = LDT_entry_a(&info);
2537 desc->b = LDT_entry_b(&info);
2538 }
2539 load_TLS(t, cpu);
2540 return 0;
2541}
2542
2543/*
2544 * Get the current Thread-Local Storage area:
2545 */
2546
2547#define GET_BASE(desc) ( \
2548 (((desc)->a >> 16) & 0x0000ffff) | \
2549 (((desc)->b << 16) & 0x00ff0000) | \
2550 ( (desc)->b & 0xff000000) )
2551
2552#define GET_LIMIT(desc) ( \
2553 ((desc)->a & 0x0ffff) | \
2554 ((desc)->b & 0xf0000) )
2555
2556#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
2557#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
2558#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
2559#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
2560#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
2561#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
2562
2563asmlinkage int
2564sys32_get_thread_area (struct ia32_user_desc __user *u_info)
2565{
2566 struct ia32_user_desc info;
2567 struct desc_struct *desc;
2568 int idx;
2569
2570 if (get_user(idx, &u_info->entry_number))
2571 return -EFAULT;
2572 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2573 return -EINVAL;
2574
2575 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
2576
2577 info.entry_number = idx;
2578 info.base_addr = GET_BASE(desc);
2579 info.limit = GET_LIMIT(desc);
2580 info.seg_32bit = GET_32BIT(desc);
2581 info.contents = GET_CONTENTS(desc);
2582 info.read_exec_only = !GET_WRITABLE(desc);
2583 info.limit_in_pages = GET_LIMIT_PAGES(desc);
2584 info.seg_not_present = !GET_PRESENT(desc);
2585 info.useable = GET_USEABLE(desc);
2586
2587 if (copy_to_user(u_info, &info, sizeof(info)))
2588 return -EFAULT;
2589 return 0;
2590}
2591
2592asmlinkage long
2593sys32_timer_create(u32 clock, struct compat_sigevent __user *se32, timer_t __user *timer_id)
2594{
2595 struct sigevent se;
2596 mm_segment_t oldfs;
2597 timer_t t;
2598 long err;
2599
2600 if (se32 == NULL)
2601 return sys_timer_create(clock, NULL, timer_id);
2602
2603 if (get_compat_sigevent(&se, se32))
2604 return -EFAULT;
2605
2606 if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
2607 return -EFAULT;
2608
2609 oldfs = get_fs();
2610 set_fs(KERNEL_DS);
2611 err = sys_timer_create(clock, (struct sigevent __user *) &se, (timer_t __user *) &t);
2612 set_fs(oldfs);
2613
2614 if (!err)
2615 err = __put_user (t, timer_id);
2616
2617 return err;
2618}
2619
2620long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
2621 __u32 len_low, __u32 len_high, int advice)
2622{
2623 return sys_fadvise64_64(fd,
2624 (((u64)offset_high)<<32) | offset_low,
2625 (((u64)len_high)<<32) | len_low,
2626 advice);
2627}
2628
2629#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
2630
2631asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
2632{
2633 uid_t sruid, seuid;
2634
2635 sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2636 seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2637 return sys_setreuid(sruid, seuid);
2638}
2639
2640asmlinkage long
2641sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
2642 compat_uid_t suid)
2643{
2644 uid_t sruid, seuid, ssuid;
2645
2646 sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2647 seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2648 ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
2649 return sys_setresuid(sruid, seuid, ssuid);
2650}
2651
2652asmlinkage long
2653sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
2654{
2655 gid_t srgid, segid;
2656
2657 srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2658 segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2659 return sys_setregid(srgid, segid);
2660}
2661
2662asmlinkage long
2663sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
2664 compat_gid_t sgid)
2665{
2666 gid_t srgid, segid, ssgid;
2667
2668 srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2669 segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2670 ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
2671 return sys_setresgid(srgid, segid, ssgid);
2672}
2673
2674/* Handle adjtimex compatibility. */
2675
2676struct timex32 {
2677 u32 modes;
2678 s32 offset, freq, maxerror, esterror;
2679 s32 status, constant, precision, tolerance;
2680 struct compat_timeval time;
2681 s32 tick;
2682 s32 ppsfreq, jitter, shift, stabil;
2683 s32 jitcnt, calcnt, errcnt, stbcnt;
2684 s32 :32; s32 :32; s32 :32; s32 :32;
2685 s32 :32; s32 :32; s32 :32; s32 :32;
2686 s32 :32; s32 :32; s32 :32; s32 :32;
2687};
2688
2689extern int do_adjtimex(struct timex *);
2690
2691asmlinkage long
2692sys32_adjtimex(struct timex32 *utp)
2693{
2694 struct timex txc;
2695 int ret;
2696
2697 memset(&txc, 0, sizeof(struct timex));
2698
2699 if(get_user(txc.modes, &utp->modes) ||
2700 __get_user(txc.offset, &utp->offset) ||
2701 __get_user(txc.freq, &utp->freq) ||
2702 __get_user(txc.maxerror, &utp->maxerror) ||
2703 __get_user(txc.esterror, &utp->esterror) ||
2704 __get_user(txc.status, &utp->status) ||
2705 __get_user(txc.constant, &utp->constant) ||
2706 __get_user(txc.precision, &utp->precision) ||
2707 __get_user(txc.tolerance, &utp->tolerance) ||
2708 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
2709 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
2710 __get_user(txc.tick, &utp->tick) ||
2711 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
2712 __get_user(txc.jitter, &utp->jitter) ||
2713 __get_user(txc.shift, &utp->shift) ||
2714 __get_user(txc.stabil, &utp->stabil) ||
2715 __get_user(txc.jitcnt, &utp->jitcnt) ||
2716 __get_user(txc.calcnt, &utp->calcnt) ||
2717 __get_user(txc.errcnt, &utp->errcnt) ||
2718 __get_user(txc.stbcnt, &utp->stbcnt))
2719 return -EFAULT;
2720
2721 ret = do_adjtimex(&txc);
2722
2723 if(put_user(txc.modes, &utp->modes) ||
2724 __put_user(txc.offset, &utp->offset) ||
2725 __put_user(txc.freq, &utp->freq) ||
2726 __put_user(txc.maxerror, &utp->maxerror) ||
2727 __put_user(txc.esterror, &utp->esterror) ||
2728 __put_user(txc.status, &utp->status) ||
2729 __put_user(txc.constant, &utp->constant) ||
2730 __put_user(txc.precision, &utp->precision) ||
2731 __put_user(txc.tolerance, &utp->tolerance) ||
2732 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
2733 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
2734 __put_user(txc.tick, &utp->tick) ||
2735 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
2736 __put_user(txc.jitter, &utp->jitter) ||
2737 __put_user(txc.shift, &utp->shift) ||
2738 __put_user(txc.stabil, &utp->stabil) ||
2739 __put_user(txc.jitcnt, &utp->jitcnt) ||
2740 __put_user(txc.calcnt, &utp->calcnt) ||
2741 __put_user(txc.errcnt, &utp->errcnt) ||
2742 __put_user(txc.stbcnt, &utp->stbcnt))
2743 ret = -EFAULT;
2744
2745 return ret;
2746}
2747#endif /* NOTYET */