diff options
author | Tony Luck <tony.luck@intel.com> | 2010-02-08 13:42:17 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2010-02-08 13:42:17 -0500 |
commit | 32974ad4907cdde6c9de612cd1b2ee0568fb9409 (patch) | |
tree | 46d883f7f4fb3f4a5cee8ec9eb2c6b4939d7ae10 /arch | |
parent | 6339204ecc2aa2067a99595522de0403f0854bb8 (diff) |
[IA64] Remove COMPAT_IA32 support
This has been broken since May 2008 when Al Viro killed altroot support.
Since nobody has complained, it would appear that there are no users of
this code (A plausible theory since the main OSVs that support ia64 prefer
to use the IA32-EL software emulation).
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
37 files changed, 20 insertions, 6321 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 2d7f56a98e0f..9a50d7dd2a0b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -499,23 +499,6 @@ config ARCH_PROC_KCORE_TEXT | |||
499 | def_bool y | 499 | def_bool y |
500 | depends on PROC_KCORE | 500 | depends on PROC_KCORE |
501 | 501 | ||
502 | config IA32_SUPPORT | ||
503 | bool "Support for Linux/x86 binaries" | ||
504 | help | ||
505 | IA-64 processors can execute IA-32 (X86) instructions. By | ||
506 | saying Y here, the kernel will include IA-32 system call | ||
507 | emulation support which makes it possible to transparently | ||
508 | run IA-32 Linux binaries on an IA-64 Linux system. | ||
509 | If in doubt, say Y. | ||
510 | |||
511 | config COMPAT | ||
512 | bool | ||
513 | depends on IA32_SUPPORT | ||
514 | default y | ||
515 | |||
516 | config COMPAT_FOR_U64_ALIGNMENT | ||
517 | def_bool COMPAT | ||
518 | |||
519 | config IA64_MCA_RECOVERY | 502 | config IA64_MCA_RECOVERY |
520 | tristate "MCA recovery from errors other than TLB." | 503 | tristate "MCA recovery from errors other than TLB." |
521 | 504 | ||
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 475e2725fbde..8ae0d2604ce1 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -46,7 +46,6 @@ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o | |||
46 | 46 | ||
47 | libs-y += arch/ia64/lib/ | 47 | libs-y += arch/ia64/lib/ |
48 | core-y += arch/ia64/kernel/ arch/ia64/mm/ | 48 | core-y += arch/ia64/kernel/ arch/ia64/mm/ |
49 | core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/ | ||
50 | core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ | 49 | core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ |
51 | core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ | 50 | core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ |
52 | core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ | 51 | core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ |
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index ace41096b47b..312b12094a1d 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig | |||
@@ -131,8 +131,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y | |||
131 | CONFIG_ARCH_FLATMEM_ENABLE=y | 131 | CONFIG_ARCH_FLATMEM_ENABLE=y |
132 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | 132 | CONFIG_ARCH_SPARSEMEM_ENABLE=y |
133 | # CONFIG_VIRTUAL_MEM_MAP is not set | 133 | # CONFIG_VIRTUAL_MEM_MAP is not set |
134 | CONFIG_IA32_SUPPORT=y | ||
135 | CONFIG_COMPAT=y | ||
136 | # CONFIG_IA64_MCA_RECOVERY is not set | 134 | # CONFIG_IA64_MCA_RECOVERY is not set |
137 | CONFIG_PERFMON=y | 135 | CONFIG_PERFMON=y |
138 | CONFIG_IA64_PALINFO=y | 136 | CONFIG_IA64_PALINFO=y |
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 75645495c2dd..6a4cc506fb5f 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
@@ -205,8 +205,6 @@ CONFIG_VIRTUAL_MEM_MAP=y | |||
205 | CONFIG_HOLES_IN_ZONE=y | 205 | CONFIG_HOLES_IN_ZONE=y |
206 | CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y | 206 | CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y |
207 | CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y | 207 | CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y |
208 | CONFIG_IA32_SUPPORT=y | ||
209 | CONFIG_COMPAT=y | ||
210 | CONFIG_COMPAT_FOR_U64_ALIGNMENT=y | 208 | CONFIG_COMPAT_FOR_U64_ALIGNMENT=y |
211 | CONFIG_IA64_MCA_RECOVERY=y | 209 | CONFIG_IA64_MCA_RECOVERY=y |
212 | CONFIG_PERFMON=y | 210 | CONFIG_PERFMON=y |
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index e86fbd39c795..2dc185b0f9a3 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig | |||
@@ -139,8 +139,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y | |||
139 | CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y | 139 | CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y |
140 | CONFIG_NUMA=y | 140 | CONFIG_NUMA=y |
141 | CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y | 141 | CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y |
142 | CONFIG_IA32_SUPPORT=y | ||
143 | CONFIG_COMPAT=y | ||
144 | CONFIG_IA64_MCA_RECOVERY=y | 142 | CONFIG_IA64_MCA_RECOVERY=y |
145 | CONFIG_PERFMON=y | 143 | CONFIG_PERFMON=y |
146 | CONFIG_IA64_PALINFO=y | 144 | CONFIG_IA64_PALINFO=y |
diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig index 546a772f438e..21a23cdfd41c 100644 --- a/arch/ia64/configs/sim_defconfig +++ b/arch/ia64/configs/sim_defconfig | |||
@@ -130,8 +130,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y | |||
130 | CONFIG_ARCH_FLATMEM_ENABLE=y | 130 | CONFIG_ARCH_FLATMEM_ENABLE=y |
131 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | 131 | CONFIG_ARCH_SPARSEMEM_ENABLE=y |
132 | # CONFIG_VIRTUAL_MEM_MAP is not set | 132 | # CONFIG_VIRTUAL_MEM_MAP is not set |
133 | CONFIG_IA32_SUPPORT=y | ||
134 | CONFIG_COMPAT=y | ||
135 | # CONFIG_IA64_MCA_RECOVERY is not set | 133 | # CONFIG_IA64_MCA_RECOVERY is not set |
136 | # CONFIG_PERFMON is not set | 134 | # CONFIG_PERFMON is not set |
137 | CONFIG_IA64_PALINFO=m | 135 | CONFIG_IA64_PALINFO=m |
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index c522edf23c62..c5a5ea9d54ae 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
@@ -154,7 +154,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y | |||
154 | CONFIG_ARCH_POPULATES_NODE_MAP=y | 154 | CONFIG_ARCH_POPULATES_NODE_MAP=y |
155 | CONFIG_VIRTUAL_MEM_MAP=y | 155 | CONFIG_VIRTUAL_MEM_MAP=y |
156 | CONFIG_HOLES_IN_ZONE=y | 156 | CONFIG_HOLES_IN_ZONE=y |
157 | # CONFIG_IA32_SUPPORT is not set | ||
158 | CONFIG_IA64_MCA_RECOVERY=y | 157 | CONFIG_IA64_MCA_RECOVERY=y |
159 | CONFIG_PERFMON=y | 158 | CONFIG_PERFMON=y |
160 | CONFIG_IA64_PALINFO=y | 159 | CONFIG_IA64_PALINFO=y |
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig index 0bb0714dc19d..c67eafc4bb38 100644 --- a/arch/ia64/configs/xen_domu_defconfig +++ b/arch/ia64/configs/xen_domu_defconfig | |||
@@ -200,8 +200,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y | |||
200 | CONFIG_ARCH_POPULATES_NODE_MAP=y | 200 | CONFIG_ARCH_POPULATES_NODE_MAP=y |
201 | CONFIG_VIRTUAL_MEM_MAP=y | 201 | CONFIG_VIRTUAL_MEM_MAP=y |
202 | CONFIG_HOLES_IN_ZONE=y | 202 | CONFIG_HOLES_IN_ZONE=y |
203 | # CONFIG_IA32_SUPPORT is not set | ||
204 | # CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set | ||
205 | CONFIG_IA64_MCA_RECOVERY=y | 203 | CONFIG_IA64_MCA_RECOVERY=y |
206 | CONFIG_PERFMON=y | 204 | CONFIG_PERFMON=y |
207 | CONFIG_IA64_PALINFO=y | 205 | CONFIG_IA64_PALINFO=y |
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 514f0635dafe..3cec65b534c2 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig | |||
@@ -150,8 +150,6 @@ CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y | |||
150 | CONFIG_ARCH_POPULATES_NODE_MAP=y | 150 | CONFIG_ARCH_POPULATES_NODE_MAP=y |
151 | CONFIG_VIRTUAL_MEM_MAP=y | 151 | CONFIG_VIRTUAL_MEM_MAP=y |
152 | CONFIG_HOLES_IN_ZONE=y | 152 | CONFIG_HOLES_IN_ZONE=y |
153 | CONFIG_IA32_SUPPORT=y | ||
154 | CONFIG_COMPAT=y | ||
155 | CONFIG_IA64_MCA_RECOVERY=y | 153 | CONFIG_IA64_MCA_RECOVERY=y |
156 | CONFIG_PERFMON=y | 154 | CONFIG_PERFMON=y |
157 | CONFIG_IA64_PALINFO=y | 155 | CONFIG_IA64_PALINFO=y |
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile deleted file mode 100644 index baad8c7699c0..000000000000 --- a/arch/ia64/ia32/Makefile +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the ia32 kernel emulation subsystem. | ||
3 | # | ||
4 | |||
5 | obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \ | ||
6 | ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o | ||
7 | obj-$(CONFIG_AUDIT) += audit.o | ||
8 | |||
9 | # Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and | ||
10 | # restore_ia32_fpstate_live() can be sure the live register contain user-level state. | ||
11 | CFLAGS_ia32_signal.o += -mfixed-range=f16-f31 | ||
diff --git a/arch/ia64/ia32/audit.c b/arch/ia64/ia32/audit.c deleted file mode 100644 index 5c93ddd1e42d..000000000000 --- a/arch/ia64/ia32/audit.c +++ /dev/null | |||
@@ -1,42 +0,0 @@ | |||
1 | #include "../../x86/include/asm/unistd_32.h" | ||
2 | |||
3 | unsigned ia32_dir_class[] = { | ||
4 | #include <asm-generic/audit_dir_write.h> | ||
5 | ~0U | ||
6 | }; | ||
7 | |||
8 | unsigned ia32_chattr_class[] = { | ||
9 | #include <asm-generic/audit_change_attr.h> | ||
10 | ~0U | ||
11 | }; | ||
12 | |||
13 | unsigned ia32_write_class[] = { | ||
14 | #include <asm-generic/audit_write.h> | ||
15 | ~0U | ||
16 | }; | ||
17 | |||
18 | unsigned ia32_read_class[] = { | ||
19 | #include <asm-generic/audit_read.h> | ||
20 | ~0U | ||
21 | }; | ||
22 | |||
23 | unsigned ia32_signal_class[] = { | ||
24 | #include <asm-generic/audit_signal.h> | ||
25 | ~0U | ||
26 | }; | ||
27 | |||
28 | int ia32_classify_syscall(unsigned syscall) | ||
29 | { | ||
30 | switch(syscall) { | ||
31 | case __NR_open: | ||
32 | return 2; | ||
33 | case __NR_openat: | ||
34 | return 3; | ||
35 | case __NR_socketcall: | ||
36 | return 4; | ||
37 | case __NR_execve: | ||
38 | return 5; | ||
39 | default: | ||
40 | return 1; | ||
41 | } | ||
42 | } | ||
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c deleted file mode 100644 index c69552bf893e..000000000000 --- a/arch/ia64/ia32/binfmt_elf32.c +++ /dev/null | |||
@@ -1,245 +0,0 @@ | |||
1 | /* | ||
2 | * IA-32 ELF support. | ||
3 | * | ||
4 | * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> | ||
5 | * Copyright (C) 2001 Hewlett-Packard Co | ||
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | * | ||
8 | * 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state | ||
9 | * 04/13/01 D. Mosberger dropped saving tssd in ar.k1---it's not needed | ||
10 | * 09/14/01 D. Mosberger fixed memory management for gdt/tss page | ||
11 | */ | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/security.h> | ||
16 | |||
17 | #include <asm/param.h> | ||
18 | #include <asm/signal.h> | ||
19 | |||
20 | #include "ia32priv.h" | ||
21 | #include "elfcore32.h" | ||
22 | |||
23 | /* Override some function names */ | ||
24 | #undef start_thread | ||
25 | #define start_thread ia32_start_thread | ||
26 | #define elf_format elf32_format | ||
27 | #define init_elf_binfmt init_elf32_binfmt | ||
28 | #define exit_elf_binfmt exit_elf32_binfmt | ||
29 | |||
30 | #undef CLOCKS_PER_SEC | ||
31 | #define CLOCKS_PER_SEC IA32_CLOCKS_PER_SEC | ||
32 | |||
33 | extern void ia64_elf32_init (struct pt_regs *regs); | ||
34 | |||
35 | static void elf32_set_personality (void); | ||
36 | |||
37 | static unsigned long __attribute ((unused)) | ||
38 | randomize_stack_top(unsigned long stack_top); | ||
39 | |||
40 | #define setup_arg_pages(bprm,tos,exec) ia32_setup_arg_pages(bprm,exec) | ||
41 | #define elf_map elf32_map | ||
42 | |||
43 | #undef SET_PERSONALITY | ||
44 | #define SET_PERSONALITY(ex) elf32_set_personality() | ||
45 | |||
46 | #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack)) | ||
47 | |||
48 | /* Ugly but avoids duplication */ | ||
49 | #include "../../../fs/binfmt_elf.c" | ||
50 | |||
51 | extern struct page *ia32_shared_page[]; | ||
52 | extern unsigned long *ia32_gdt; | ||
53 | extern struct page *ia32_gate_page; | ||
54 | |||
55 | int | ||
56 | ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf) | ||
57 | { | ||
58 | vmf->page = ia32_shared_page[smp_processor_id()]; | ||
59 | get_page(vmf->page); | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | int | ||
64 | ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf) | ||
65 | { | ||
66 | vmf->page = ia32_gate_page; | ||
67 | get_page(vmf->page); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | |||
72 | static const struct vm_operations_struct ia32_shared_page_vm_ops = { | ||
73 | .fault = ia32_install_shared_page | ||
74 | }; | ||
75 | |||
76 | static const struct vm_operations_struct ia32_gate_page_vm_ops = { | ||
77 | .fault = ia32_install_gate_page | ||
78 | }; | ||
79 | |||
80 | void | ||
81 | ia64_elf32_init (struct pt_regs *regs) | ||
82 | { | ||
83 | struct vm_area_struct *vma; | ||
84 | |||
85 | /* | ||
86 | * Map GDT below 4GB, where the processor can find it. We need to map | ||
87 | * it with privilege level 3 because the IVE uses non-privileged accesses to these | ||
88 | * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. | ||
89 | */ | ||
90 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | ||
91 | if (vma) { | ||
92 | vma->vm_mm = current->mm; | ||
93 | vma->vm_start = IA32_GDT_OFFSET; | ||
94 | vma->vm_end = vma->vm_start + PAGE_SIZE; | ||
95 | vma->vm_page_prot = PAGE_SHARED; | ||
96 | vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED; | ||
97 | vma->vm_ops = &ia32_shared_page_vm_ops; | ||
98 | down_write(¤t->mm->mmap_sem); | ||
99 | { | ||
100 | if (insert_vm_struct(current->mm, vma)) { | ||
101 | kmem_cache_free(vm_area_cachep, vma); | ||
102 | up_write(¤t->mm->mmap_sem); | ||
103 | BUG(); | ||
104 | } | ||
105 | } | ||
106 | up_write(¤t->mm->mmap_sem); | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * When user stack is not executable, push sigreturn code to stack makes | ||
111 | * segmentation fault raised when returning to kernel. So now sigreturn | ||
112 | * code is locked in specific gate page, which is pointed by pretcode | ||
113 | * when setup_frame_ia32 | ||
114 | */ | ||
115 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | ||
116 | if (vma) { | ||
117 | vma->vm_mm = current->mm; | ||
118 | vma->vm_start = IA32_GATE_OFFSET; | ||
119 | vma->vm_end = vma->vm_start + PAGE_SIZE; | ||
120 | vma->vm_page_prot = PAGE_COPY_EXEC; | ||
121 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | ||
122 | | VM_MAYEXEC | VM_RESERVED; | ||
123 | vma->vm_ops = &ia32_gate_page_vm_ops; | ||
124 | down_write(¤t->mm->mmap_sem); | ||
125 | { | ||
126 | if (insert_vm_struct(current->mm, vma)) { | ||
127 | kmem_cache_free(vm_area_cachep, vma); | ||
128 | up_write(¤t->mm->mmap_sem); | ||
129 | BUG(); | ||
130 | } | ||
131 | } | ||
132 | up_write(¤t->mm->mmap_sem); | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Install LDT as anonymous memory. This gives us all-zero segment descriptors | ||
137 | * until a task modifies them via modify_ldt(). | ||
138 | */ | ||
139 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | ||
140 | if (vma) { | ||
141 | vma->vm_mm = current->mm; | ||
142 | vma->vm_start = IA32_LDT_OFFSET; | ||
143 | vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); | ||
144 | vma->vm_page_prot = PAGE_SHARED; | ||
145 | vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE; | ||
146 | down_write(¤t->mm->mmap_sem); | ||
147 | { | ||
148 | if (insert_vm_struct(current->mm, vma)) { | ||
149 | kmem_cache_free(vm_area_cachep, vma); | ||
150 | up_write(¤t->mm->mmap_sem); | ||
151 | BUG(); | ||
152 | } | ||
153 | } | ||
154 | up_write(¤t->mm->mmap_sem); | ||
155 | } | ||
156 | |||
157 | ia64_psr(regs)->ac = 0; /* turn off alignment checking */ | ||
158 | regs->loadrs = 0; | ||
159 | /* | ||
160 | * According to the ABI %edx points to an `atexit' handler. Since we don't have | ||
161 | * one we'll set it to 0 and initialize all the other registers just to make | ||
162 | * things more deterministic, ala the i386 implementation. | ||
163 | */ | ||
164 | regs->r8 = 0; /* %eax */ | ||
165 | regs->r11 = 0; /* %ebx */ | ||
166 | regs->r9 = 0; /* %ecx */ | ||
167 | regs->r10 = 0; /* %edx */ | ||
168 | regs->r13 = 0; /* %ebp */ | ||
169 | regs->r14 = 0; /* %esi */ | ||
170 | regs->r15 = 0; /* %edi */ | ||
171 | |||
172 | current->thread.eflag = IA32_EFLAG; | ||
173 | current->thread.fsr = IA32_FSR_DEFAULT; | ||
174 | current->thread.fcr = IA32_FCR_DEFAULT; | ||
175 | current->thread.fir = 0; | ||
176 | current->thread.fdr = 0; | ||
177 | |||
178 | /* | ||
179 | * Setup GDTD. Note: GDTD is the descrambled version of the pseudo-descriptor | ||
180 | * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32 | ||
181 | * architecture manual. Also note that the only fields that are not ignored are | ||
182 | * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0). | ||
183 | */ | ||
184 | regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1, | ||
185 | 0, 0, 0, 1, 0, 0, 0)); | ||
186 | /* Setup the segment selectors */ | ||
187 | regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */ | ||
188 | regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */ | ||
189 | |||
190 | ia32_load_segment_descriptors(current); | ||
191 | ia32_load_state(current); | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * Undo the override of setup_arg_pages() without this ia32_setup_arg_pages() | ||
196 | * will suffer infinite self recursion. | ||
197 | */ | ||
198 | #undef setup_arg_pages | ||
199 | |||
200 | int | ||
201 | ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack) | ||
202 | { | ||
203 | int ret; | ||
204 | |||
205 | ret = setup_arg_pages(bprm, IA32_STACK_TOP, executable_stack); | ||
206 | if (!ret) { | ||
207 | /* | ||
208 | * Can't do it in ia64_elf32_init(). Needs to be done before | ||
209 | * calls to elf32_map() | ||
210 | */ | ||
211 | current->thread.ppl = ia32_init_pp_list(); | ||
212 | } | ||
213 | |||
214 | return ret; | ||
215 | } | ||
216 | |||
217 | static void | ||
218 | elf32_set_personality (void) | ||
219 | { | ||
220 | set_personality(PER_LINUX32); | ||
221 | current->thread.map_base = IA32_PAGE_OFFSET/3; | ||
222 | } | ||
223 | |||
224 | static unsigned long | ||
225 | elf32_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt, | ||
226 | int prot, int type, unsigned long unused) | ||
227 | { | ||
228 | unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK; | ||
229 | |||
230 | return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type, | ||
231 | eppnt->p_offset - pgoff); | ||
232 | } | ||
233 | |||
234 | #define cpu_uses_ia32el() (local_cpu_data->family > 0x1f) | ||
235 | |||
236 | static int __init check_elf32_binfmt(void) | ||
237 | { | ||
238 | if (cpu_uses_ia32el()) { | ||
239 | printk("Please use IA-32 EL for executing IA-32 binaries\n"); | ||
240 | unregister_binfmt(&elf_format); | ||
241 | } | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | module_init(check_elf32_binfmt) | ||
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h deleted file mode 100644 index 657725742617..000000000000 --- a/arch/ia64/ia32/elfcore32.h +++ /dev/null | |||
@@ -1,148 +0,0 @@ | |||
1 | /* | ||
2 | * IA-32 ELF core dump support. | ||
3 | * | ||
4 | * Copyright (C) 2003 Arun Sharma <arun.sharma@intel.com> | ||
5 | * | ||
6 | * Derived from the x86_64 version | ||
7 | */ | ||
8 | #ifndef _ELFCORE32_H_ | ||
9 | #define _ELFCORE32_H_ | ||
10 | |||
11 | #include <asm/intrinsics.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | |||
14 | /* Override elfcore.h */ | ||
15 | #define _LINUX_ELFCORE_H 1 | ||
16 | typedef unsigned int elf_greg_t; | ||
17 | |||
18 | #define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t)) | ||
19 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
20 | |||
21 | typedef struct ia32_user_i387_struct elf_fpregset_t; | ||
22 | typedef struct ia32_user_fxsr_struct elf_fpxregset_t; | ||
23 | |||
24 | struct elf_siginfo | ||
25 | { | ||
26 | int si_signo; /* signal number */ | ||
27 | int si_code; /* extra code */ | ||
28 | int si_errno; /* errno */ | ||
29 | }; | ||
30 | |||
31 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
32 | /* | ||
33 | * Hacks are here since types between compat_timeval (= pair of s32) and | ||
34 | * ia64-native timeval (= pair of s64) are not compatible, at least a file | ||
35 | * arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on | ||
36 | * use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval(). | ||
37 | */ | ||
38 | #define cputime_to_timeval(a,b) \ | ||
39 | do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0) | ||
40 | #else | ||
41 | #define jiffies_to_timeval(a,b) \ | ||
42 | do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0) | ||
43 | #endif | ||
44 | |||
45 | struct elf_prstatus | ||
46 | { | ||
47 | struct elf_siginfo pr_info; /* Info associated with signal */ | ||
48 | short pr_cursig; /* Current signal */ | ||
49 | unsigned int pr_sigpend; /* Set of pending signals */ | ||
50 | unsigned int pr_sighold; /* Set of held signals */ | ||
51 | pid_t pr_pid; | ||
52 | pid_t pr_ppid; | ||
53 | pid_t pr_pgrp; | ||
54 | pid_t pr_sid; | ||
55 | struct compat_timeval pr_utime; /* User time */ | ||
56 | struct compat_timeval pr_stime; /* System time */ | ||
57 | struct compat_timeval pr_cutime; /* Cumulative user time */ | ||
58 | struct compat_timeval pr_cstime; /* Cumulative system time */ | ||
59 | elf_gregset_t pr_reg; /* GP registers */ | ||
60 | int pr_fpvalid; /* True if math co-processor being used. */ | ||
61 | }; | ||
62 | |||
63 | #define ELF_PRARGSZ (80) /* Number of chars for args */ | ||
64 | |||
65 | struct elf_prpsinfo | ||
66 | { | ||
67 | char pr_state; /* numeric process state */ | ||
68 | char pr_sname; /* char for pr_state */ | ||
69 | char pr_zomb; /* zombie */ | ||
70 | char pr_nice; /* nice val */ | ||
71 | unsigned int pr_flag; /* flags */ | ||
72 | __u16 pr_uid; | ||
73 | __u16 pr_gid; | ||
74 | pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; | ||
75 | /* Lots missing */ | ||
76 | char pr_fname[16]; /* filename of executable */ | ||
77 | char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ | ||
78 | }; | ||
79 | |||
80 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ | ||
81 | pr_reg[0] = regs->r11; \ | ||
82 | pr_reg[1] = regs->r9; \ | ||
83 | pr_reg[2] = regs->r10; \ | ||
84 | pr_reg[3] = regs->r14; \ | ||
85 | pr_reg[4] = regs->r15; \ | ||
86 | pr_reg[5] = regs->r13; \ | ||
87 | pr_reg[6] = regs->r8; \ | ||
88 | pr_reg[7] = regs->r16 & 0xffff; \ | ||
89 | pr_reg[8] = (regs->r16 >> 16) & 0xffff; \ | ||
90 | pr_reg[9] = (regs->r16 >> 32) & 0xffff; \ | ||
91 | pr_reg[10] = (regs->r16 >> 48) & 0xffff; \ | ||
92 | pr_reg[11] = regs->r1; \ | ||
93 | pr_reg[12] = regs->cr_iip; \ | ||
94 | pr_reg[13] = regs->r17 & 0xffff; \ | ||
95 | pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \ | ||
96 | pr_reg[15] = regs->r12; \ | ||
97 | pr_reg[16] = (regs->r17 >> 16) & 0xffff; | ||
98 | |||
99 | static inline void elf_core_copy_regs(elf_gregset_t *elfregs, | ||
100 | struct pt_regs *regs) | ||
101 | { | ||
102 | ELF_CORE_COPY_REGS((*elfregs), regs) | ||
103 | } | ||
104 | |||
105 | static inline int elf_core_copy_task_regs(struct task_struct *t, | ||
106 | elf_gregset_t* elfregs) | ||
107 | { | ||
108 | ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t)); | ||
109 | return 1; | ||
110 | } | ||
111 | |||
112 | static inline int | ||
113 | elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu) | ||
114 | { | ||
115 | struct ia32_user_i387_struct *fpstate = (void*)fpu; | ||
116 | mm_segment_t old_fs; | ||
117 | |||
118 | if (!tsk_used_math(tsk)) | ||
119 | return 0; | ||
120 | |||
121 | old_fs = get_fs(); | ||
122 | set_fs(KERNEL_DS); | ||
123 | save_ia32_fpstate(tsk, (struct ia32_user_i387_struct __user *) fpstate); | ||
124 | set_fs(old_fs); | ||
125 | |||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | #define ELF_CORE_COPY_XFPREGS 1 | ||
130 | #define ELF_CORE_XFPREG_TYPE NT_PRXFPREG | ||
131 | static inline int | ||
132 | elf_core_copy_task_xfpregs(struct task_struct *tsk, elf_fpxregset_t *xfpu) | ||
133 | { | ||
134 | struct ia32_user_fxsr_struct *fpxstate = (void*) xfpu; | ||
135 | mm_segment_t old_fs; | ||
136 | |||
137 | if (!tsk_used_math(tsk)) | ||
138 | return 0; | ||
139 | |||
140 | old_fs = get_fs(); | ||
141 | set_fs(KERNEL_DS); | ||
142 | save_ia32_fpxstate(tsk, (struct ia32_user_fxsr_struct __user *) fpxstate); | ||
143 | set_fs(old_fs); | ||
144 | |||
145 | return 1; | ||
146 | } | ||
147 | |||
148 | #endif /* _ELFCORE32_H_ */ | ||
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S deleted file mode 100644 index 2fd7479aa216..000000000000 --- a/arch/ia64/ia32/ia32_entry.S +++ /dev/null | |||
@@ -1,468 +0,0 @@ | |||
1 | #include <asm/asmmacro.h> | ||
2 | #include <asm/ia32.h> | ||
3 | #include <asm/asm-offsets.h> | ||
4 | #include <asm/signal.h> | ||
5 | #include <asm/thread_info.h> | ||
6 | |||
7 | #include "../kernel/minstate.h" | ||
8 | |||
9 | /* | ||
10 | * execve() is special because in case of success, we need to | ||
11 | * setup a null register window frame (in case an IA-32 process | ||
12 | * is exec'ing an IA-64 program). | ||
13 | */ | ||
14 | ENTRY(ia32_execve) | ||
15 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3) | ||
16 | alloc loc1=ar.pfs,3,2,4,0 | ||
17 | mov loc0=rp | ||
18 | .body | ||
19 | zxt4 out0=in0 // filename | ||
20 | ;; // stop bit between alloc and call | ||
21 | zxt4 out1=in1 // argv | ||
22 | zxt4 out2=in2 // envp | ||
23 | add out3=16,sp // regs | ||
24 | br.call.sptk.few rp=sys32_execve | ||
25 | 1: cmp.ge p6,p0=r8,r0 | ||
26 | mov ar.pfs=loc1 // restore ar.pfs | ||
27 | ;; | ||
28 | (p6) mov ar.pfs=r0 // clear ar.pfs in case of success | ||
29 | sxt4 r8=r8 // return 64-bit result | ||
30 | mov rp=loc0 | ||
31 | br.ret.sptk.few rp | ||
32 | END(ia32_execve) | ||
33 | |||
34 | ENTRY(ia32_clone) | ||
35 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) | ||
36 | alloc r16=ar.pfs,5,2,6,0 | ||
37 | DO_SAVE_SWITCH_STACK | ||
38 | mov loc0=rp | ||
39 | mov loc1=r16 // save ar.pfs across do_fork | ||
40 | .body | ||
41 | zxt4 out1=in1 // newsp | ||
42 | mov out3=16 // stacksize (compensates for 16-byte scratch area) | ||
43 | adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s | ||
44 | mov out0=in0 // out0 = clone_flags | ||
45 | zxt4 out4=in2 // out4 = parent_tidptr | ||
46 | zxt4 out5=in4 // out5 = child_tidptr | ||
47 | br.call.sptk.many rp=do_fork | ||
48 | .ret0: .restore sp | ||
49 | adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack | ||
50 | mov ar.pfs=loc1 | ||
51 | mov rp=loc0 | ||
52 | br.ret.sptk.many rp | ||
53 | END(ia32_clone) | ||
54 | |||
55 | GLOBAL_ENTRY(ia32_ret_from_clone) | ||
56 | PT_REGS_UNWIND_INFO(0) | ||
57 | { /* | ||
58 | * Some versions of gas generate bad unwind info if the first instruction of a | ||
59 | * procedure doesn't go into the first slot of a bundle. This is a workaround. | ||
60 | */ | ||
61 | nop.m 0 | ||
62 | nop.i 0 | ||
63 | /* | ||
64 | * We need to call schedule_tail() to complete the scheduling process. | ||
65 | * Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the | ||
66 | * address of the previously executing task. | ||
67 | */ | ||
68 | br.call.sptk.many rp=ia64_invoke_schedule_tail | ||
69 | } | ||
70 | .ret1: | ||
71 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 | ||
72 | ;; | ||
73 | ld4 r2=[r2] | ||
74 | ;; | ||
75 | mov r8=0 | ||
76 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 | ||
77 | ;; | ||
78 | cmp.ne p6,p0=r2,r0 | ||
79 | (p6) br.cond.spnt .ia32_strace_check_retval | ||
80 | ;; // prevent RAW on r8 | ||
81 | END(ia32_ret_from_clone) | ||
82 | // fall through | ||
83 | GLOBAL_ENTRY(ia32_ret_from_syscall) | ||
84 | PT_REGS_UNWIND_INFO(0) | ||
85 | |||
86 | cmp.ge p6,p7=r8,r0 // syscall executed successfully? | ||
87 | adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 | ||
88 | ;; | ||
89 | alloc r3=ar.pfs,0,0,0,0 // drop the syscall argument frame | ||
90 | st8 [r2]=r8 // store return value in slot for r8 | ||
91 | br.cond.sptk.many ia64_leave_kernel | ||
92 | END(ia32_ret_from_syscall) | ||
93 | |||
94 | // | ||
95 | // Invoke a system call, but do some tracing before and after the call. | ||
96 | // We MUST preserve the current register frame throughout this routine | ||
97 | // because some system calls (such as ia64_execve) directly | ||
98 | // manipulate ar.pfs. | ||
99 | // | ||
100 | // Input: | ||
101 | // r8 = syscall number | ||
102 | // b6 = syscall entry point | ||
103 | // | ||
104 | GLOBAL_ENTRY(ia32_trace_syscall) | ||
105 | PT_REGS_UNWIND_INFO(0) | ||
106 | mov r3=-38 | ||
107 | adds r2=IA64_PT_REGS_R8_OFFSET+16,sp | ||
108 | ;; | ||
109 | st8 [r2]=r3 // initialize return code to -ENOSYS | ||
110 | br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args | ||
111 | cmp.lt p6,p0=r8,r0 // check tracehook | ||
112 | adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 | ||
113 | ;; | ||
114 | (p6) st8.spill [r2]=r8 // store return value in slot for r8 | ||
115 | (p6) br.spnt.few .ret4 | ||
116 | .ret2: // Need to reload arguments (they may be changed by the tracing process) | ||
117 | adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1 | ||
118 | adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13 | ||
119 | mov r15=IA32_NR_syscalls | ||
120 | ;; | ||
121 | ld4 r8=[r2],IA64_PT_REGS_R9_OFFSET-IA64_PT_REGS_R1_OFFSET | ||
122 | movl r16=ia32_syscall_table | ||
123 | ;; | ||
124 | ld4 r33=[r2],8 // r9 == ecx | ||
125 | ld4 r37=[r3],16 // r13 == ebp | ||
126 | cmp.ltu.unc p6,p7=r8,r15 | ||
127 | ;; | ||
128 | ld4 r34=[r2],8 // r10 == edx | ||
129 | ld4 r36=[r3],8 // r15 == edi | ||
130 | (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number | ||
131 | ;; | ||
132 | ld8 r16=[r16] | ||
133 | ;; | ||
134 | ld4 r32=[r2],8 // r11 == ebx | ||
135 | mov b6=r16 | ||
136 | ld4 r35=[r3],8 // r14 == esi | ||
137 | br.call.sptk.few rp=b6 // do the syscall | ||
138 | .ia32_strace_check_retval: | ||
139 | cmp.lt p6,p0=r8,r0 // syscall failed? | ||
140 | adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 | ||
141 | ;; | ||
142 | st8.spill [r2]=r8 // store return value in slot for r8 | ||
143 | br.call.sptk.few rp=syscall_trace_leave // give parent a chance to catch return value | ||
144 | .ret4: alloc r2=ar.pfs,0,0,0,0 // drop the syscall argument frame | ||
145 | br.cond.sptk.many ia64_leave_kernel | ||
146 | END(ia32_trace_syscall) | ||
147 | |||
148 | GLOBAL_ENTRY(sys32_vfork) | ||
149 | alloc r16=ar.pfs,2,2,4,0;; | ||
150 | mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags | ||
151 | br.cond.sptk.few .fork1 // do the work | ||
152 | END(sys32_vfork) | ||
153 | |||
154 | GLOBAL_ENTRY(sys32_fork) | ||
155 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) | ||
156 | alloc r16=ar.pfs,2,2,4,0 | ||
157 | mov out0=SIGCHLD // out0 = clone_flags | ||
158 | ;; | ||
159 | .fork1: | ||
160 | mov loc0=rp | ||
161 | mov loc1=r16 // save ar.pfs across do_fork | ||
162 | DO_SAVE_SWITCH_STACK | ||
163 | |||
164 | .body | ||
165 | |||
166 | mov out1=0 | ||
167 | mov out3=0 | ||
168 | adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s | ||
169 | br.call.sptk.few rp=do_fork | ||
170 | .ret5: .restore sp | ||
171 | adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack | ||
172 | mov ar.pfs=loc1 | ||
173 | mov rp=loc0 | ||
174 | br.ret.sptk.many rp | ||
175 | END(sys32_fork) | ||
176 | |||
177 | .rodata | ||
178 | .align 8 | ||
179 | .globl ia32_syscall_table | ||
180 | ia32_syscall_table: | ||
181 | data8 sys_ni_syscall /* 0 - old "setup(" system call*/ | ||
182 | data8 sys_exit | ||
183 | data8 sys32_fork | ||
184 | data8 sys_read | ||
185 | data8 sys_write | ||
186 | data8 compat_sys_open /* 5 */ | ||
187 | data8 sys_close | ||
188 | data8 sys32_waitpid | ||
189 | data8 sys_creat | ||
190 | data8 sys_link | ||
191 | data8 sys_unlink /* 10 */ | ||
192 | data8 ia32_execve | ||
193 | data8 sys_chdir | ||
194 | data8 compat_sys_time | ||
195 | data8 sys_mknod | ||
196 | data8 sys_chmod /* 15 */ | ||
197 | data8 sys_lchown /* 16-bit version */ | ||
198 | data8 sys_ni_syscall /* old break syscall holder */ | ||
199 | data8 sys_ni_syscall | ||
200 | data8 sys32_lseek | ||
201 | data8 sys_getpid /* 20 */ | ||
202 | data8 compat_sys_mount | ||
203 | data8 sys_oldumount | ||
204 | data8 sys_setuid /* 16-bit version */ | ||
205 | data8 sys_getuid /* 16-bit version */ | ||
206 | data8 compat_sys_stime /* 25 */ | ||
207 | data8 compat_sys_ptrace | ||
208 | data8 sys32_alarm | ||
209 | data8 sys_ni_syscall | ||
210 | data8 sys_pause | ||
211 | data8 compat_sys_utime /* 30 */ | ||
212 | data8 sys_ni_syscall /* old stty syscall holder */ | ||
213 | data8 sys_ni_syscall /* old gtty syscall holder */ | ||
214 | data8 sys_access | ||
215 | data8 sys_nice | ||
216 | data8 sys_ni_syscall /* 35 */ /* old ftime syscall holder */ | ||
217 | data8 sys_sync | ||
218 | data8 sys_kill | ||
219 | data8 sys_rename | ||
220 | data8 sys_mkdir | ||
221 | data8 sys_rmdir /* 40 */ | ||
222 | data8 sys_dup | ||
223 | data8 sys_ia64_pipe | ||
224 | data8 compat_sys_times | ||
225 | data8 sys_ni_syscall /* old prof syscall holder */ | ||
226 | data8 sys32_brk /* 45 */ | ||
227 | data8 sys_setgid /* 16-bit version */ | ||
228 | data8 sys_getgid /* 16-bit version */ | ||
229 | data8 sys32_signal | ||
230 | data8 sys_geteuid /* 16-bit version */ | ||
231 | data8 sys_getegid /* 16-bit version */ /* 50 */ | ||
232 | data8 sys_acct | ||
233 | data8 sys_umount /* recycled never used phys( */ | ||
234 | data8 sys_ni_syscall /* old lock syscall holder */ | ||
235 | data8 compat_sys_ioctl | ||
236 | data8 compat_sys_fcntl /* 55 */ | ||
237 | data8 sys_ni_syscall /* old mpx syscall holder */ | ||
238 | data8 sys_setpgid | ||
239 | data8 sys_ni_syscall /* old ulimit syscall holder */ | ||
240 | data8 sys_ni_syscall | ||
241 | data8 sys_umask /* 60 */ | ||
242 | data8 sys_chroot | ||
243 | data8 compat_sys_ustat | ||
244 | data8 sys_dup2 | ||
245 | data8 sys_getppid | ||
246 | data8 sys_getpgrp /* 65 */ | ||
247 | data8 sys_setsid | ||
248 | data8 sys32_sigaction | ||
249 | data8 sys_ni_syscall | ||
250 | data8 sys_ni_syscall | ||
251 | data8 sys_setreuid /* 16-bit version */ /* 70 */ | ||
252 | data8 sys_setregid /* 16-bit version */ | ||
253 | data8 sys32_sigsuspend | ||
254 | data8 compat_sys_sigpending | ||
255 | data8 sys_sethostname | ||
256 | data8 compat_sys_setrlimit /* 75 */ | ||
257 | data8 compat_sys_old_getrlimit | ||
258 | data8 compat_sys_getrusage | ||
259 | data8 compat_sys_gettimeofday | ||
260 | data8 compat_sys_settimeofday | ||
261 | data8 sys32_getgroups16 /* 80 */ | ||
262 | data8 sys32_setgroups16 | ||
263 | data8 sys32_old_select | ||
264 | data8 sys_symlink | ||
265 | data8 sys_ni_syscall | ||
266 | data8 sys_readlink /* 85 */ | ||
267 | data8 sys_uselib | ||
268 | data8 sys_swapon | ||
269 | data8 sys_reboot | ||
270 | data8 compat_sys_old_readdir | ||
271 | data8 sys32_mmap /* 90 */ | ||
272 | data8 sys32_munmap | ||
273 | data8 sys_truncate | ||
274 | data8 sys_ftruncate | ||
275 | data8 sys_fchmod | ||
276 | data8 sys_fchown /* 16-bit version */ /* 95 */ | ||
277 | data8 sys_getpriority | ||
278 | data8 sys_setpriority | ||
279 | data8 sys_ni_syscall /* old profil syscall holder */ | ||
280 | data8 compat_sys_statfs | ||
281 | data8 compat_sys_fstatfs /* 100 */ | ||
282 | data8 sys_ni_syscall /* ioperm */ | ||
283 | data8 compat_sys_socketcall | ||
284 | data8 sys_syslog | ||
285 | data8 compat_sys_setitimer | ||
286 | data8 compat_sys_getitimer /* 105 */ | ||
287 | data8 compat_sys_newstat | ||
288 | data8 compat_sys_newlstat | ||
289 | data8 compat_sys_newfstat | ||
290 | data8 sys_ni_syscall | ||
291 | data8 sys_ni_syscall /* iopl */ /* 110 */ | ||
292 | data8 sys_vhangup | ||
293 | data8 sys_ni_syscall /* used to be sys_idle */ | ||
294 | data8 sys_ni_syscall | ||
295 | data8 compat_sys_wait4 | ||
296 | data8 sys_swapoff /* 115 */ | ||
297 | data8 compat_sys_sysinfo | ||
298 | data8 sys32_ipc | ||
299 | data8 sys_fsync | ||
300 | data8 sys32_sigreturn | ||
301 | data8 ia32_clone /* 120 */ | ||
302 | data8 sys_setdomainname | ||
303 | data8 sys32_newuname | ||
304 | data8 sys32_modify_ldt | ||
305 | data8 compat_sys_adjtimex | ||
306 | data8 sys32_mprotect /* 125 */ | ||
307 | data8 compat_sys_sigprocmask | ||
308 | data8 sys_ni_syscall /* create_module */ | ||
309 | data8 sys_ni_syscall /* init_module */ | ||
310 | data8 sys_ni_syscall /* delete_module */ | ||
311 | data8 sys_ni_syscall /* get_kernel_syms */ /* 130 */ | ||
312 | data8 sys32_quotactl | ||
313 | data8 sys_getpgid | ||
314 | data8 sys_fchdir | ||
315 | data8 sys_ni_syscall /* sys_bdflush */ | ||
316 | data8 sys_sysfs /* 135 */ | ||
317 | data8 sys32_personality | ||
318 | data8 sys_ni_syscall /* for afs_syscall */ | ||
319 | data8 sys_setfsuid /* 16-bit version */ | ||
320 | data8 sys_setfsgid /* 16-bit version */ | ||
321 | data8 sys_llseek /* 140 */ | ||
322 | data8 compat_sys_getdents | ||
323 | data8 compat_sys_select | ||
324 | data8 sys_flock | ||
325 | data8 sys32_msync | ||
326 | data8 compat_sys_readv /* 145 */ | ||
327 | data8 compat_sys_writev | ||
328 | data8 sys_getsid | ||
329 | data8 sys_fdatasync | ||
330 | data8 compat_sys_sysctl | ||
331 | data8 sys_mlock /* 150 */ | ||
332 | data8 sys_munlock | ||
333 | data8 sys_mlockall | ||
334 | data8 sys_munlockall | ||
335 | data8 sys_sched_setparam | ||
336 | data8 sys_sched_getparam /* 155 */ | ||
337 | data8 sys_sched_setscheduler | ||
338 | data8 sys_sched_getscheduler | ||
339 | data8 sys_sched_yield | ||
340 | data8 sys_sched_get_priority_max | ||
341 | data8 sys_sched_get_priority_min /* 160 */ | ||
342 | data8 sys32_sched_rr_get_interval | ||
343 | data8 compat_sys_nanosleep | ||
344 | data8 sys32_mremap | ||
345 | data8 sys_setresuid /* 16-bit version */ | ||
346 | data8 sys32_getresuid16 /* 16-bit version */ /* 165 */ | ||
347 | data8 sys_ni_syscall /* vm86 */ | ||
348 | data8 sys_ni_syscall /* sys_query_module */ | ||
349 | data8 sys_poll | ||
350 | data8 sys_ni_syscall /* nfsservctl */ | ||
351 | data8 sys_setresgid /* 170 */ | ||
352 | data8 sys32_getresgid16 | ||
353 | data8 sys_prctl | ||
354 | data8 sys32_rt_sigreturn | ||
355 | data8 sys32_rt_sigaction | ||
356 | data8 sys32_rt_sigprocmask /* 175 */ | ||
357 | data8 sys_rt_sigpending | ||
358 | data8 compat_sys_rt_sigtimedwait | ||
359 | data8 sys32_rt_sigqueueinfo | ||
360 | data8 compat_sys_rt_sigsuspend | ||
361 | data8 sys32_pread /* 180 */ | ||
362 | data8 sys32_pwrite | ||
363 | data8 sys_chown /* 16-bit version */ | ||
364 | data8 sys_getcwd | ||
365 | data8 sys_capget | ||
366 | data8 sys_capset /* 185 */ | ||
367 | data8 sys32_sigaltstack | ||
368 | data8 sys32_sendfile | ||
369 | data8 sys_ni_syscall /* streams1 */ | ||
370 | data8 sys_ni_syscall /* streams2 */ | ||
371 | data8 sys32_vfork /* 190 */ | ||
372 | data8 compat_sys_getrlimit | ||
373 | data8 sys32_mmap2 | ||
374 | data8 sys32_truncate64 | ||
375 | data8 sys32_ftruncate64 | ||
376 | data8 sys32_stat64 /* 195 */ | ||
377 | data8 sys32_lstat64 | ||
378 | data8 sys32_fstat64 | ||
379 | data8 sys_lchown | ||
380 | data8 sys_getuid | ||
381 | data8 sys_getgid /* 200 */ | ||
382 | data8 sys_geteuid | ||
383 | data8 sys_getegid | ||
384 | data8 sys_setreuid | ||
385 | data8 sys_setregid | ||
386 | data8 sys_getgroups /* 205 */ | ||
387 | data8 sys_setgroups | ||
388 | data8 sys_fchown | ||
389 | data8 sys_setresuid | ||
390 | data8 sys_getresuid | ||
391 | data8 sys_setresgid /* 210 */ | ||
392 | data8 sys_getresgid | ||
393 | data8 sys_chown | ||
394 | data8 sys_setuid | ||
395 | data8 sys_setgid | ||
396 | data8 sys_setfsuid /* 215 */ | ||
397 | data8 sys_setfsgid | ||
398 | data8 sys_pivot_root | ||
399 | data8 sys_mincore | ||
400 | data8 sys_madvise | ||
401 | data8 compat_sys_getdents64 /* 220 */ | ||
402 | data8 compat_sys_fcntl64 | ||
403 | data8 sys_ni_syscall /* reserved for TUX */ | ||
404 | data8 sys_ni_syscall /* reserved for Security */ | ||
405 | data8 sys_gettid | ||
406 | data8 sys_readahead /* 225 */ | ||
407 | data8 sys_setxattr | ||
408 | data8 sys_lsetxattr | ||
409 | data8 sys_fsetxattr | ||
410 | data8 sys_getxattr | ||
411 | data8 sys_lgetxattr /* 230 */ | ||
412 | data8 sys_fgetxattr | ||
413 | data8 sys_listxattr | ||
414 | data8 sys_llistxattr | ||
415 | data8 sys_flistxattr | ||
416 | data8 sys_removexattr /* 235 */ | ||
417 | data8 sys_lremovexattr | ||
418 | data8 sys_fremovexattr | ||
419 | data8 sys_tkill | ||
420 | data8 sys_sendfile64 | ||
421 | data8 compat_sys_futex /* 240 */ | ||
422 | data8 compat_sys_sched_setaffinity | ||
423 | data8 compat_sys_sched_getaffinity | ||
424 | data8 sys32_set_thread_area | ||
425 | data8 sys32_get_thread_area | ||
426 | data8 compat_sys_io_setup /* 245 */ | ||
427 | data8 sys_io_destroy | ||
428 | data8 compat_sys_io_getevents | ||
429 | data8 compat_sys_io_submit | ||
430 | data8 sys_io_cancel | ||
431 | data8 sys_fadvise64 /* 250 */ | ||
432 | data8 sys_ni_syscall | ||
433 | data8 sys_exit_group | ||
434 | data8 sys_lookup_dcookie | ||
435 | data8 sys_epoll_create | ||
436 | data8 sys32_epoll_ctl /* 255 */ | ||
437 | data8 sys32_epoll_wait | ||
438 | data8 sys_remap_file_pages | ||
439 | data8 sys_set_tid_address | ||
440 | data8 compat_sys_timer_create | ||
441 | data8 compat_sys_timer_settime /* 260 */ | ||
442 | data8 compat_sys_timer_gettime | ||
443 | data8 sys_timer_getoverrun | ||
444 | data8 sys_timer_delete | ||
445 | data8 compat_sys_clock_settime | ||
446 | data8 compat_sys_clock_gettime /* 265 */ | ||
447 | data8 compat_sys_clock_getres | ||
448 | data8 compat_sys_clock_nanosleep | ||
449 | data8 compat_sys_statfs64 | ||
450 | data8 compat_sys_fstatfs64 | ||
451 | data8 sys_tgkill /* 270 */ | ||
452 | data8 compat_sys_utimes | ||
453 | data8 sys32_fadvise64_64 | ||
454 | data8 sys_ni_syscall | ||
455 | data8 sys_ni_syscall | ||
456 | data8 sys_ni_syscall /* 275 */ | ||
457 | data8 sys_ni_syscall | ||
458 | data8 compat_sys_mq_open | ||
459 | data8 sys_mq_unlink | ||
460 | data8 compat_sys_mq_timedsend | ||
461 | data8 compat_sys_mq_timedreceive /* 280 */ | ||
462 | data8 compat_sys_mq_notify | ||
463 | data8 compat_sys_mq_getsetattr | ||
464 | data8 sys_ni_syscall /* reserved for kexec */ | ||
465 | data8 compat_sys_waitid | ||
466 | |||
467 | // guard against failures to increase IA32_NR_syscalls | ||
468 | .org ia32_syscall_table + 8*IA32_NR_syscalls | ||
diff --git a/arch/ia64/ia32/ia32_ldt.c b/arch/ia64/ia32/ia32_ldt.c deleted file mode 100644 index 16d51c146849..000000000000 --- a/arch/ia64/ia32/ia32_ldt.c +++ /dev/null | |||
@@ -1,146 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001, 2004 Hewlett-Packard Co | ||
3 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
4 | * | ||
5 | * Adapted from arch/i386/kernel/ldt.c | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/vmalloc.h> | ||
14 | |||
15 | #include <asm/uaccess.h> | ||
16 | |||
17 | #include "ia32priv.h" | ||
18 | |||
19 | /* | ||
20 | * read_ldt() is not really atomic - this is not a problem since synchronization of reads | ||
21 | * and writes done to the LDT has to be assured by user-space anyway. Writes are atomic, | ||
22 | * to protect the security checks done on new descriptors. | ||
23 | */ | ||
24 | static int | ||
25 | read_ldt (void __user *ptr, unsigned long bytecount) | ||
26 | { | ||
27 | unsigned long bytes_left, n; | ||
28 | char __user *src, *dst; | ||
29 | char buf[256]; /* temporary buffer (don't overflow kernel stack!) */ | ||
30 | |||
31 | if (bytecount > IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE) | ||
32 | bytecount = IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE; | ||
33 | |||
34 | bytes_left = bytecount; | ||
35 | |||
36 | src = (void __user *) IA32_LDT_OFFSET; | ||
37 | dst = ptr; | ||
38 | |||
39 | while (bytes_left) { | ||
40 | n = sizeof(buf); | ||
41 | if (n > bytes_left) | ||
42 | n = bytes_left; | ||
43 | |||
44 | /* | ||
45 | * We know we're reading valid memory, but we still must guard against | ||
46 | * running out of memory. | ||
47 | */ | ||
48 | if (__copy_from_user(buf, src, n)) | ||
49 | return -EFAULT; | ||
50 | |||
51 | if (copy_to_user(dst, buf, n)) | ||
52 | return -EFAULT; | ||
53 | |||
54 | src += n; | ||
55 | dst += n; | ||
56 | bytes_left -= n; | ||
57 | } | ||
58 | return bytecount; | ||
59 | } | ||
60 | |||
61 | static int | ||
62 | read_default_ldt (void __user * ptr, unsigned long bytecount) | ||
63 | { | ||
64 | unsigned long size; | ||
65 | int err; | ||
66 | |||
67 | /* XXX fix me: should return equivalent of default_ldt[0] */ | ||
68 | err = 0; | ||
69 | size = 8; | ||
70 | if (size > bytecount) | ||
71 | size = bytecount; | ||
72 | |||
73 | err = size; | ||
74 | if (clear_user(ptr, size)) | ||
75 | err = -EFAULT; | ||
76 | |||
77 | return err; | ||
78 | } | ||
79 | |||
80 | static int | ||
81 | write_ldt (void __user * ptr, unsigned long bytecount, int oldmode) | ||
82 | { | ||
83 | struct ia32_user_desc ldt_info; | ||
84 | __u64 entry; | ||
85 | int ret; | ||
86 | |||
87 | if (bytecount != sizeof(ldt_info)) | ||
88 | return -EINVAL; | ||
89 | if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) | ||
90 | return -EFAULT; | ||
91 | |||
92 | if (ldt_info.entry_number >= IA32_LDT_ENTRIES) | ||
93 | return -EINVAL; | ||
94 | if (ldt_info.contents == 3) { | ||
95 | if (oldmode) | ||
96 | return -EINVAL; | ||
97 | if (ldt_info.seg_not_present == 0) | ||
98 | return -EINVAL; | ||
99 | } | ||
100 | |||
101 | if (ldt_info.base_addr == 0 && ldt_info.limit == 0 | ||
102 | && (oldmode || (ldt_info.contents == 0 && ldt_info.read_exec_only == 1 | ||
103 | && ldt_info.seg_32bit == 0 && ldt_info.limit_in_pages == 0 | ||
104 | && ldt_info.seg_not_present == 1 && ldt_info.useable == 0))) | ||
105 | /* allow LDTs to be cleared by the user */ | ||
106 | entry = 0; | ||
107 | else | ||
108 | /* we must set the "Accessed" bit as IVE doesn't emulate it */ | ||
109 | entry = IA32_SEG_DESCRIPTOR(ldt_info.base_addr, ldt_info.limit, | ||
110 | (((ldt_info.read_exec_only ^ 1) << 1) | ||
111 | | (ldt_info.contents << 2)) | 1, | ||
112 | 1, 3, ldt_info.seg_not_present ^ 1, | ||
113 | (oldmode ? 0 : ldt_info.useable), | ||
114 | ldt_info.seg_32bit, | ||
115 | ldt_info.limit_in_pages); | ||
116 | /* | ||
117 | * Install the new entry. We know we're accessing valid (mapped) user-level | ||
118 | * memory, but we still need to guard against out-of-memory, hence we must use | ||
119 | * put_user(). | ||
120 | */ | ||
121 | ret = __put_user(entry, (__u64 __user *) IA32_LDT_OFFSET + ldt_info.entry_number); | ||
122 | ia32_load_segment_descriptors(current); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | asmlinkage int | ||
127 | sys32_modify_ldt (int func, unsigned int ptr, unsigned int bytecount) | ||
128 | { | ||
129 | int ret = -ENOSYS; | ||
130 | |||
131 | switch (func) { | ||
132 | case 0: | ||
133 | ret = read_ldt(compat_ptr(ptr), bytecount); | ||
134 | break; | ||
135 | case 1: | ||
136 | ret = write_ldt(compat_ptr(ptr), bytecount, 1); | ||
137 | break; | ||
138 | case 2: | ||
139 | ret = read_default_ldt(compat_ptr(ptr), bytecount); | ||
140 | break; | ||
141 | case 0x11: | ||
142 | ret = write_ldt(compat_ptr(ptr), bytecount, 0); | ||
143 | break; | ||
144 | } | ||
145 | return ret; | ||
146 | } | ||
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c deleted file mode 100644 index b763ca19ef17..000000000000 --- a/arch/ia64/ia32/ia32_signal.c +++ /dev/null | |||
@@ -1,1010 +0,0 @@ | |||
1 | /* | ||
2 | * IA32 Architecture-specific signal handling support. | ||
3 | * | ||
4 | * Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> | ||
7 | * Copyright (C) 2000 VA Linux Co | ||
8 | * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> | ||
9 | * | ||
10 | * Derived from i386 and Alpha versions. | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/personality.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/signal.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/syscalls.h> | ||
23 | #include <linux/unistd.h> | ||
24 | #include <linux/wait.h> | ||
25 | #include <linux/compat.h> | ||
26 | |||
27 | #include <asm/intrinsics.h> | ||
28 | #include <asm/uaccess.h> | ||
29 | #include <asm/rse.h> | ||
30 | #include <asm/sigcontext.h> | ||
31 | |||
32 | #include "ia32priv.h" | ||
33 | |||
34 | #include "../kernel/sigframe.h" | ||
35 | |||
36 | #define A(__x) ((unsigned long)(__x)) | ||
37 | |||
38 | #define DEBUG_SIG 0 | ||
39 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
40 | |||
41 | #define __IA32_NR_sigreturn 119 | ||
42 | #define __IA32_NR_rt_sigreturn 173 | ||
43 | |||
44 | struct sigframe_ia32 | ||
45 | { | ||
46 | int pretcode; | ||
47 | int sig; | ||
48 | struct sigcontext_ia32 sc; | ||
49 | struct _fpstate_ia32 fpstate; | ||
50 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; | ||
51 | char retcode[8]; | ||
52 | }; | ||
53 | |||
54 | struct rt_sigframe_ia32 | ||
55 | { | ||
56 | int pretcode; | ||
57 | int sig; | ||
58 | int pinfo; | ||
59 | int puc; | ||
60 | compat_siginfo_t info; | ||
61 | struct ucontext_ia32 uc; | ||
62 | struct _fpstate_ia32 fpstate; | ||
63 | char retcode[8]; | ||
64 | }; | ||
65 | |||
66 | int | ||
67 | copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from) | ||
68 | { | ||
69 | unsigned long tmp; | ||
70 | int err; | ||
71 | |||
72 | if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) | ||
73 | return -EFAULT; | ||
74 | |||
75 | err = __get_user(to->si_signo, &from->si_signo); | ||
76 | err |= __get_user(to->si_errno, &from->si_errno); | ||
77 | err |= __get_user(to->si_code, &from->si_code); | ||
78 | |||
79 | if (to->si_code < 0) | ||
80 | err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); | ||
81 | else { | ||
82 | switch (to->si_code >> 16) { | ||
83 | case __SI_CHLD >> 16: | ||
84 | err |= __get_user(to->si_utime, &from->si_utime); | ||
85 | err |= __get_user(to->si_stime, &from->si_stime); | ||
86 | err |= __get_user(to->si_status, &from->si_status); | ||
87 | default: | ||
88 | err |= __get_user(to->si_pid, &from->si_pid); | ||
89 | err |= __get_user(to->si_uid, &from->si_uid); | ||
90 | break; | ||
91 | case __SI_FAULT >> 16: | ||
92 | err |= __get_user(tmp, &from->si_addr); | ||
93 | to->si_addr = (void __user *) tmp; | ||
94 | break; | ||
95 | case __SI_POLL >> 16: | ||
96 | err |= __get_user(to->si_band, &from->si_band); | ||
97 | err |= __get_user(to->si_fd, &from->si_fd); | ||
98 | break; | ||
99 | case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ | ||
100 | case __SI_MESGQ >> 16: | ||
101 | err |= __get_user(to->si_pid, &from->si_pid); | ||
102 | err |= __get_user(to->si_uid, &from->si_uid); | ||
103 | err |= __get_user(to->si_int, &from->si_int); | ||
104 | break; | ||
105 | } | ||
106 | } | ||
107 | return err; | ||
108 | } | ||
109 | |||
110 | int | ||
111 | copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from) | ||
112 | { | ||
113 | unsigned int addr; | ||
114 | int err; | ||
115 | |||
116 | if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) | ||
117 | return -EFAULT; | ||
118 | |||
119 | /* If you change siginfo_t structure, please be sure | ||
120 | this code is fixed accordingly. | ||
121 | It should never copy any pad contained in the structure | ||
122 | to avoid security leaks, but must copy the generic | ||
123 | 3 ints plus the relevant union member. | ||
124 | This routine must convert siginfo from 64bit to 32bit as well | ||
125 | at the same time. */ | ||
126 | err = __put_user(from->si_signo, &to->si_signo); | ||
127 | err |= __put_user(from->si_errno, &to->si_errno); | ||
128 | err |= __put_user((short)from->si_code, &to->si_code); | ||
129 | if (from->si_code < 0) | ||
130 | err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); | ||
131 | else { | ||
132 | switch (from->si_code >> 16) { | ||
133 | case __SI_CHLD >> 16: | ||
134 | err |= __put_user(from->si_utime, &to->si_utime); | ||
135 | err |= __put_user(from->si_stime, &to->si_stime); | ||
136 | err |= __put_user(from->si_status, &to->si_status); | ||
137 | default: | ||
138 | err |= __put_user(from->si_pid, &to->si_pid); | ||
139 | err |= __put_user(from->si_uid, &to->si_uid); | ||
140 | break; | ||
141 | case __SI_FAULT >> 16: | ||
142 | /* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */ | ||
143 | err |= __put_user(from->_sifields._pad[0], &to->si_addr); | ||
144 | break; | ||
145 | case __SI_POLL >> 16: | ||
146 | err |= __put_user(from->si_band, &to->si_band); | ||
147 | err |= __put_user(from->si_fd, &to->si_fd); | ||
148 | break; | ||
149 | case __SI_TIMER >> 16: | ||
150 | err |= __put_user(from->si_tid, &to->si_tid); | ||
151 | err |= __put_user(from->si_overrun, &to->si_overrun); | ||
152 | addr = (unsigned long) from->si_ptr; | ||
153 | err |= __put_user(addr, &to->si_ptr); | ||
154 | break; | ||
155 | case __SI_RT >> 16: /* Not generated by the kernel as of now. */ | ||
156 | case __SI_MESGQ >> 16: | ||
157 | err |= __put_user(from->si_uid, &to->si_uid); | ||
158 | err |= __put_user(from->si_pid, &to->si_pid); | ||
159 | addr = (unsigned long) from->si_ptr; | ||
160 | err |= __put_user(addr, &to->si_ptr); | ||
161 | break; | ||
162 | } | ||
163 | } | ||
164 | return err; | ||
165 | } | ||
166 | |||
167 | |||
168 | /* | ||
169 | * SAVE and RESTORE of ia32 fpstate info, from ia64 current state | ||
170 | * Used in exception handler to pass the fpstate to the user, and restore | ||
171 | * the fpstate while returning from the exception handler. | ||
172 | * | ||
173 | * fpstate info and their mapping to IA64 regs: | ||
174 | * fpstate REG(BITS) Attribute Comments | ||
175 | * cw ar.fcr(0:12) with bits 7 and 6 not used | ||
176 | * sw ar.fsr(0:15) | ||
177 | * tag ar.fsr(16:31) with odd numbered bits not used | ||
178 | * (read returns 0, writes ignored) | ||
179 | * ipoff ar.fir(0:31) | ||
180 | * cssel ar.fir(32:47) | ||
181 | * dataoff ar.fdr(0:31) | ||
182 | * datasel ar.fdr(32:47) | ||
183 | * | ||
184 | * _st[(0+TOS)%8] f8 | ||
185 | * _st[(1+TOS)%8] f9 | ||
186 | * _st[(2+TOS)%8] f10 | ||
187 | * _st[(3+TOS)%8] f11 (f8..f11 from ptregs) | ||
188 | * : : : (f12..f15 from live reg) | ||
189 | * : : : | ||
190 | * _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13) | ||
191 | * | ||
192 | * status Same as sw RO | ||
193 | * magic 0 as X86_FXSR_MAGIC in ia32 | ||
194 | * mxcsr Bits(7:15)=ar.fcr(39:47) | ||
195 | * Bits(0:5) =ar.fsr(32:37) with bit 6 reserved | ||
196 | * _xmm[0..7] f16..f31 (live registers) | ||
197 | * with _xmm[0] | ||
198 | * Bit(64:127)=f17(0:63) | ||
199 | * Bit(0:63)=f16(0:63) | ||
200 | * All other fields unused... | ||
201 | */ | ||
202 | |||
203 | static int | ||
204 | save_ia32_fpstate_live (struct _fpstate_ia32 __user *save) | ||
205 | { | ||
206 | struct task_struct *tsk = current; | ||
207 | struct pt_regs *ptp; | ||
208 | struct _fpreg_ia32 *fpregp; | ||
209 | char buf[32]; | ||
210 | unsigned long fsr, fcr, fir, fdr; | ||
211 | unsigned long new_fsr; | ||
212 | unsigned long num128[2]; | ||
213 | unsigned long mxcsr=0; | ||
214 | int fp_tos, fr8_st_map; | ||
215 | |||
216 | if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) | ||
217 | return -EFAULT; | ||
218 | |||
219 | /* Read in fsr, fcr, fir, fdr and copy onto fpstate */ | ||
220 | fsr = ia64_getreg(_IA64_REG_AR_FSR); | ||
221 | fcr = ia64_getreg(_IA64_REG_AR_FCR); | ||
222 | fir = ia64_getreg(_IA64_REG_AR_FIR); | ||
223 | fdr = ia64_getreg(_IA64_REG_AR_FDR); | ||
224 | |||
225 | /* | ||
226 | * We need to clear the exception state before calling the signal handler. Clear | ||
227 | * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex | ||
228 | * instruction. | ||
229 | */ | ||
230 | new_fsr = fsr & ~0x80ff; | ||
231 | ia64_setreg(_IA64_REG_AR_FSR, new_fsr); | ||
232 | |||
233 | __put_user(fcr & 0xffff, &save->cw); | ||
234 | __put_user(fsr & 0xffff, &save->sw); | ||
235 | __put_user((fsr>>16) & 0xffff, &save->tag); | ||
236 | __put_user(fir, &save->ipoff); | ||
237 | __put_user((fir>>32) & 0xffff, &save->cssel); | ||
238 | __put_user(fdr, &save->dataoff); | ||
239 | __put_user((fdr>>32) & 0xffff, &save->datasel); | ||
240 | __put_user(fsr & 0xffff, &save->status); | ||
241 | |||
242 | mxcsr = ((fcr>>32) & 0xff80) | ((fsr>>32) & 0x3f); | ||
243 | __put_user(mxcsr & 0xffff, &save->mxcsr); | ||
244 | __put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000 | ||
245 | |||
246 | /* | ||
247 | * save f8..f11 from pt_regs | ||
248 | * save f12..f15 from live register set | ||
249 | */ | ||
250 | /* | ||
251 | * Find the location where f8 has to go in fp reg stack. This depends on | ||
252 | * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps | ||
253 | * to. | ||
254 | */ | ||
255 | fp_tos = (fsr>>11)&0x7; | ||
256 | fr8_st_map = (8-fp_tos)&0x7; | ||
257 | ptp = task_pt_regs(tsk); | ||
258 | fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); | ||
259 | ia64f2ia32f(fpregp, &ptp->f8); | ||
260 | copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | ||
261 | ia64f2ia32f(fpregp, &ptp->f9); | ||
262 | copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | ||
263 | ia64f2ia32f(fpregp, &ptp->f10); | ||
264 | copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | ||
265 | ia64f2ia32f(fpregp, &ptp->f11); | ||
266 | copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | ||
267 | |||
268 | ia64_stfe(fpregp, 12); | ||
269 | copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | ||
270 | ia64_stfe(fpregp, 13); | ||
271 | copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | ||
272 | ia64_stfe(fpregp, 14); | ||
273 | copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | ||
274 | ia64_stfe(fpregp, 15); | ||
275 | copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | ||
276 | |||
277 | ia64_stf8(&num128[0], 16); | ||
278 | ia64_stf8(&num128[1], 17); | ||
279 | copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32)); | ||
280 | |||
281 | ia64_stf8(&num128[0], 18); | ||
282 | ia64_stf8(&num128[1], 19); | ||
283 | copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32)); | ||
284 | |||
285 | ia64_stf8(&num128[0], 20); | ||
286 | ia64_stf8(&num128[1], 21); | ||
287 | copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32)); | ||
288 | |||
289 | ia64_stf8(&num128[0], 22); | ||
290 | ia64_stf8(&num128[1], 23); | ||
291 | copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32)); | ||
292 | |||
293 | ia64_stf8(&num128[0], 24); | ||
294 | ia64_stf8(&num128[1], 25); | ||
295 | copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32)); | ||
296 | |||
297 | ia64_stf8(&num128[0], 26); | ||
298 | ia64_stf8(&num128[1], 27); | ||
299 | copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32)); | ||
300 | |||
301 | ia64_stf8(&num128[0], 28); | ||
302 | ia64_stf8(&num128[1], 29); | ||
303 | copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32)); | ||
304 | |||
305 | ia64_stf8(&num128[0], 30); | ||
306 | ia64_stf8(&num128[1], 31); | ||
307 | copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32)); | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static int | ||
312 | restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save) | ||
313 | { | ||
314 | struct task_struct *tsk = current; | ||
315 | struct pt_regs *ptp; | ||
316 | unsigned int lo, hi; | ||
317 | unsigned long num128[2]; | ||
318 | unsigned long num64, mxcsr; | ||
319 | struct _fpreg_ia32 *fpregp; | ||
320 | char buf[32]; | ||
321 | unsigned long fsr, fcr, fir, fdr; | ||
322 | int fp_tos, fr8_st_map; | ||
323 | |||
324 | if (!access_ok(VERIFY_READ, save, sizeof(*save))) | ||
325 | return(-EFAULT); | ||
326 | |||
327 | /* | ||
328 | * Updating fsr, fcr, fir, fdr. | ||
329 | * Just a bit more complicated than save. | ||
330 | * - Need to make sure that we don't write any value other than the | ||
331 | * specific fpstate info | ||
332 | * - Need to make sure that the untouched part of frs, fdr, fir, fcr | ||
333 | * should remain same while writing. | ||
334 | * So, we do a read, change specific fields and write. | ||
335 | */ | ||
336 | fsr = ia64_getreg(_IA64_REG_AR_FSR); | ||
337 | fcr = ia64_getreg(_IA64_REG_AR_FCR); | ||
338 | fir = ia64_getreg(_IA64_REG_AR_FIR); | ||
339 | fdr = ia64_getreg(_IA64_REG_AR_FDR); | ||
340 | |||
341 | __get_user(mxcsr, (unsigned int __user *)&save->mxcsr); | ||
342 | /* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */ | ||
343 | __get_user(lo, (unsigned int __user *)&save->cw); | ||
344 | num64 = mxcsr & 0xff10; | ||
345 | num64 = (num64 << 32) | (lo & 0x1f3f); | ||
346 | fcr = (fcr & (~0xff1000001f3fUL)) | num64; | ||
347 | |||
348 | /* setting bits 0..31 with sw and tag and 32..37 from mxcsr */ | ||
349 | __get_user(lo, (unsigned int __user *)&save->sw); | ||
350 | /* set bits 15,7 (fsw.b, fsw.es) to reflect the current error status */ | ||
351 | if ( !(lo & 0x7f) ) | ||
352 | lo &= (~0x8080); | ||
353 | __get_user(hi, (unsigned int __user *)&save->tag); | ||
354 | num64 = mxcsr & 0x3f; | ||
355 | num64 = (num64 << 16) | (hi & 0xffff); | ||
356 | num64 = (num64 << 16) | (lo & 0xffff); | ||
357 | fsr = (fsr & (~0x3fffffffffUL)) | num64; | ||
358 | |||
359 | /* setting bits 0..47 with cssel and ipoff */ | ||
360 | __get_user(lo, (unsigned int __user *)&save->ipoff); | ||
361 | __get_user(hi, (unsigned int __user *)&save->cssel); | ||
362 | num64 = hi & 0xffff; | ||
363 | num64 = (num64 << 32) | lo; | ||
364 | fir = (fir & (~0xffffffffffffUL)) | num64; | ||
365 | |||
366 | /* setting bits 0..47 with datasel and dataoff */ | ||
367 | __get_user(lo, (unsigned int __user *)&save->dataoff); | ||
368 | __get_user(hi, (unsigned int __user *)&save->datasel); | ||
369 | num64 = hi & 0xffff; | ||
370 | num64 = (num64 << 32) | lo; | ||
371 | fdr = (fdr & (~0xffffffffffffUL)) | num64; | ||
372 | |||
373 | ia64_setreg(_IA64_REG_AR_FSR, fsr); | ||
374 | ia64_setreg(_IA64_REG_AR_FCR, fcr); | ||
375 | ia64_setreg(_IA64_REG_AR_FIR, fir); | ||
376 | ia64_setreg(_IA64_REG_AR_FDR, fdr); | ||
377 | |||
378 | /* | ||
379 | * restore f8..f11 onto pt_regs | ||
380 | * restore f12..f15 onto live registers | ||
381 | */ | ||
382 | /* | ||
383 | * Find the location where f8 has to go in fp reg stack. This depends on | ||
384 | * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps | ||
385 | * to. | ||
386 | */ | ||
387 | fp_tos = (fsr>>11)&0x7; | ||
388 | fr8_st_map = (8-fp_tos)&0x7; | ||
389 | fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); | ||
390 | |||
391 | ptp = task_pt_regs(tsk); | ||
392 | copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | ||
393 | ia32f2ia64f(&ptp->f8, fpregp); | ||
394 | copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | ||
395 | ia32f2ia64f(&ptp->f9, fpregp); | ||
396 | copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | ||
397 | ia32f2ia64f(&ptp->f10, fpregp); | ||
398 | copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | ||
399 | ia32f2ia64f(&ptp->f11, fpregp); | ||
400 | |||
401 | copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | ||
402 | ia64_ldfe(12, fpregp); | ||
403 | copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | ||
404 | ia64_ldfe(13, fpregp); | ||
405 | copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | ||
406 | ia64_ldfe(14, fpregp); | ||
407 | copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | ||
408 | ia64_ldfe(15, fpregp); | ||
409 | |||
410 | copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32)); | ||
411 | ia64_ldf8(16, &num128[0]); | ||
412 | ia64_ldf8(17, &num128[1]); | ||
413 | |||
414 | copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32)); | ||
415 | ia64_ldf8(18, &num128[0]); | ||
416 | ia64_ldf8(19, &num128[1]); | ||
417 | |||
418 | copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32)); | ||
419 | ia64_ldf8(20, &num128[0]); | ||
420 | ia64_ldf8(21, &num128[1]); | ||
421 | |||
422 | copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32)); | ||
423 | ia64_ldf8(22, &num128[0]); | ||
424 | ia64_ldf8(23, &num128[1]); | ||
425 | |||
426 | copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32)); | ||
427 | ia64_ldf8(24, &num128[0]); | ||
428 | ia64_ldf8(25, &num128[1]); | ||
429 | |||
430 | copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32)); | ||
431 | ia64_ldf8(26, &num128[0]); | ||
432 | ia64_ldf8(27, &num128[1]); | ||
433 | |||
434 | copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32)); | ||
435 | ia64_ldf8(28, &num128[0]); | ||
436 | ia64_ldf8(29, &num128[1]); | ||
437 | |||
438 | copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32)); | ||
439 | ia64_ldf8(30, &num128[0]); | ||
440 | ia64_ldf8(31, &num128[1]); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static inline void | ||
445 | sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int restorer) | ||
446 | { | ||
447 | if (handler + 1 <= 2) | ||
448 | /* SIG_DFL, SIG_IGN, or SIG_ERR: must sign-extend to 64-bits */ | ||
449 | sa->sa.sa_handler = (__sighandler_t) A((int) handler); | ||
450 | else | ||
451 | sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler); | ||
452 | } | ||
453 | |||
454 | asmlinkage long | ||
455 | sys32_sigsuspend (int history0, int history1, old_sigset_t mask) | ||
456 | { | ||
457 | mask &= _BLOCKABLE; | ||
458 | spin_lock_irq(¤t->sighand->siglock); | ||
459 | current->saved_sigmask = current->blocked; | ||
460 | siginitset(¤t->blocked, mask); | ||
461 | recalc_sigpending(); | ||
462 | spin_unlock_irq(¤t->sighand->siglock); | ||
463 | |||
464 | current->state = TASK_INTERRUPTIBLE; | ||
465 | schedule(); | ||
466 | set_restore_sigmask(); | ||
467 | return -ERESTARTNOHAND; | ||
468 | } | ||
469 | |||
470 | asmlinkage long | ||
471 | sys32_signal (int sig, unsigned int handler) | ||
472 | { | ||
473 | struct k_sigaction new_sa, old_sa; | ||
474 | int ret; | ||
475 | |||
476 | sigact_set_handler(&new_sa, handler, 0); | ||
477 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | ||
478 | sigemptyset(&new_sa.sa.sa_mask); | ||
479 | |||
480 | ret = do_sigaction(sig, &new_sa, &old_sa); | ||
481 | |||
482 | return ret ? ret : IA32_SA_HANDLER(&old_sa); | ||
483 | } | ||
484 | |||
485 | asmlinkage long | ||
486 | sys32_rt_sigaction (int sig, struct sigaction32 __user *act, | ||
487 | struct sigaction32 __user *oact, unsigned int sigsetsize) | ||
488 | { | ||
489 | struct k_sigaction new_ka, old_ka; | ||
490 | unsigned int handler, restorer; | ||
491 | int ret; | ||
492 | |||
493 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
494 | if (sigsetsize != sizeof(compat_sigset_t)) | ||
495 | return -EINVAL; | ||
496 | |||
497 | if (act) { | ||
498 | ret = get_user(handler, &act->sa_handler); | ||
499 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
500 | ret |= get_user(restorer, &act->sa_restorer); | ||
501 | ret |= copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, sizeof(compat_sigset_t)); | ||
502 | if (ret) | ||
503 | return -EFAULT; | ||
504 | |||
505 | sigact_set_handler(&new_ka, handler, restorer); | ||
506 | } | ||
507 | |||
508 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
509 | |||
510 | if (!ret && oact) { | ||
511 | ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler); | ||
512 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
513 | ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer); | ||
514 | ret |= copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask, sizeof(compat_sigset_t)); | ||
515 | } | ||
516 | return ret; | ||
517 | } | ||
518 | |||
519 | |||
520 | asmlinkage long | ||
521 | sys32_rt_sigprocmask (int how, compat_sigset_t __user *set, compat_sigset_t __user *oset, | ||
522 | unsigned int sigsetsize) | ||
523 | { | ||
524 | mm_segment_t old_fs = get_fs(); | ||
525 | sigset_t s; | ||
526 | long ret; | ||
527 | |||
528 | if (sigsetsize > sizeof(s)) | ||
529 | return -EINVAL; | ||
530 | |||
531 | if (set) { | ||
532 | memset(&s, 0, sizeof(s)); | ||
533 | if (copy_from_user(&s.sig, set, sigsetsize)) | ||
534 | return -EFAULT; | ||
535 | } | ||
536 | set_fs(KERNEL_DS); | ||
537 | ret = sys_rt_sigprocmask(how, | ||
538 | set ? (sigset_t __user *) &s : NULL, | ||
539 | oset ? (sigset_t __user *) &s : NULL, sizeof(s)); | ||
540 | set_fs(old_fs); | ||
541 | if (ret) | ||
542 | return ret; | ||
543 | if (oset) { | ||
544 | if (copy_to_user(oset, &s.sig, sigsetsize)) | ||
545 | return -EFAULT; | ||
546 | } | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | asmlinkage long | ||
551 | sys32_rt_sigqueueinfo (int pid, int sig, compat_siginfo_t __user *uinfo) | ||
552 | { | ||
553 | mm_segment_t old_fs = get_fs(); | ||
554 | siginfo_t info; | ||
555 | int ret; | ||
556 | |||
557 | if (copy_siginfo_from_user32(&info, uinfo)) | ||
558 | return -EFAULT; | ||
559 | set_fs(KERNEL_DS); | ||
560 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); | ||
561 | set_fs(old_fs); | ||
562 | return ret; | ||
563 | } | ||
564 | |||
565 | asmlinkage long | ||
566 | sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact) | ||
567 | { | ||
568 | struct k_sigaction new_ka, old_ka; | ||
569 | unsigned int handler, restorer; | ||
570 | int ret; | ||
571 | |||
572 | if (act) { | ||
573 | compat_old_sigset_t mask; | ||
574 | |||
575 | ret = get_user(handler, &act->sa_handler); | ||
576 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
577 | ret |= get_user(restorer, &act->sa_restorer); | ||
578 | ret |= get_user(mask, &act->sa_mask); | ||
579 | if (ret) | ||
580 | return ret; | ||
581 | |||
582 | sigact_set_handler(&new_ka, handler, restorer); | ||
583 | siginitset(&new_ka.sa.sa_mask, mask); | ||
584 | } | ||
585 | |||
586 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
587 | |||
588 | if (!ret && oact) { | ||
589 | ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler); | ||
590 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
591 | ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer); | ||
592 | ret |= put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
593 | } | ||
594 | |||
595 | return ret; | ||
596 | } | ||
597 | |||
598 | static int | ||
599 | setup_sigcontext_ia32 (struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate, | ||
600 | struct pt_regs *regs, unsigned long mask) | ||
601 | { | ||
602 | int err = 0; | ||
603 | unsigned long flag; | ||
604 | |||
605 | if (!access_ok(VERIFY_WRITE, sc, sizeof(*sc))) | ||
606 | return -EFAULT; | ||
607 | |||
608 | err |= __put_user((regs->r16 >> 32) & 0xffff, (unsigned int __user *)&sc->fs); | ||
609 | err |= __put_user((regs->r16 >> 48) & 0xffff, (unsigned int __user *)&sc->gs); | ||
610 | err |= __put_user((regs->r16 >> 16) & 0xffff, (unsigned int __user *)&sc->es); | ||
611 | err |= __put_user(regs->r16 & 0xffff, (unsigned int __user *)&sc->ds); | ||
612 | err |= __put_user(regs->r15, &sc->edi); | ||
613 | err |= __put_user(regs->r14, &sc->esi); | ||
614 | err |= __put_user(regs->r13, &sc->ebp); | ||
615 | err |= __put_user(regs->r12, &sc->esp); | ||
616 | err |= __put_user(regs->r11, &sc->ebx); | ||
617 | err |= __put_user(regs->r10, &sc->edx); | ||
618 | err |= __put_user(regs->r9, &sc->ecx); | ||
619 | err |= __put_user(regs->r8, &sc->eax); | ||
620 | #if 0 | ||
621 | err |= __put_user(current->tss.trap_no, &sc->trapno); | ||
622 | err |= __put_user(current->tss.error_code, &sc->err); | ||
623 | #endif | ||
624 | err |= __put_user(regs->cr_iip, &sc->eip); | ||
625 | err |= __put_user(regs->r17 & 0xffff, (unsigned int __user *)&sc->cs); | ||
626 | /* | ||
627 | * `eflags' is in an ar register for this context | ||
628 | */ | ||
629 | flag = ia64_getreg(_IA64_REG_AR_EFLAG); | ||
630 | err |= __put_user((unsigned int)flag, &sc->eflags); | ||
631 | err |= __put_user(regs->r12, &sc->esp_at_signal); | ||
632 | err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int __user *)&sc->ss); | ||
633 | |||
634 | if ( save_ia32_fpstate_live(fpstate) < 0 ) | ||
635 | err = -EFAULT; | ||
636 | else | ||
637 | err |= __put_user((u32)(u64)fpstate, &sc->fpstate); | ||
638 | |||
639 | #if 0 | ||
640 | tmp = save_i387(fpstate); | ||
641 | if (tmp < 0) | ||
642 | err = 1; | ||
643 | else | ||
644 | err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); | ||
645 | |||
646 | /* non-iBCS2 extensions.. */ | ||
647 | #endif | ||
648 | err |= __put_user(mask, &sc->oldmask); | ||
649 | #if 0 | ||
650 | err |= __put_user(current->tss.cr2, &sc->cr2); | ||
651 | #endif | ||
652 | return err; | ||
653 | } | ||
654 | |||
655 | static int | ||
656 | restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 __user *sc, int *peax) | ||
657 | { | ||
658 | unsigned int err = 0; | ||
659 | |||
660 | /* Always make any pending restarted system calls return -EINTR */ | ||
661 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
662 | |||
663 | if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) | ||
664 | return(-EFAULT); | ||
665 | |||
666 | #define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x) | ||
667 | |||
668 | #define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48) | ||
669 | #define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32) | ||
670 | #define copyseg_cs(tmp) (regs->r17 |= tmp) | ||
671 | #define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16) | ||
672 | #define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16) | ||
673 | #define copyseg_ds(tmp) (regs->r16 |= tmp) | ||
674 | |||
675 | #define COPY_SEG(seg) \ | ||
676 | { \ | ||
677 | unsigned short tmp; \ | ||
678 | err |= __get_user(tmp, &sc->seg); \ | ||
679 | copyseg_##seg(tmp); \ | ||
680 | } | ||
681 | #define COPY_SEG_STRICT(seg) \ | ||
682 | { \ | ||
683 | unsigned short tmp; \ | ||
684 | err |= __get_user(tmp, &sc->seg); \ | ||
685 | copyseg_##seg(tmp|3); \ | ||
686 | } | ||
687 | |||
688 | /* To make COPY_SEGs easier, we zero r16, r17 */ | ||
689 | regs->r16 = 0; | ||
690 | regs->r17 = 0; | ||
691 | |||
692 | COPY_SEG(gs); | ||
693 | COPY_SEG(fs); | ||
694 | COPY_SEG(es); | ||
695 | COPY_SEG(ds); | ||
696 | COPY(r15, edi); | ||
697 | COPY(r14, esi); | ||
698 | COPY(r13, ebp); | ||
699 | COPY(r12, esp); | ||
700 | COPY(r11, ebx); | ||
701 | COPY(r10, edx); | ||
702 | COPY(r9, ecx); | ||
703 | COPY(cr_iip, eip); | ||
704 | COPY_SEG_STRICT(cs); | ||
705 | COPY_SEG_STRICT(ss); | ||
706 | ia32_load_segment_descriptors(current); | ||
707 | { | ||
708 | unsigned int tmpflags; | ||
709 | unsigned long flag; | ||
710 | |||
711 | /* | ||
712 | * IA32 `eflags' is not part of `pt_regs', it's in an ar register which | ||
713 | * is part of the thread context. Fortunately, we are executing in the | ||
714 | * IA32 process's context. | ||
715 | */ | ||
716 | err |= __get_user(tmpflags, &sc->eflags); | ||
717 | flag = ia64_getreg(_IA64_REG_AR_EFLAG); | ||
718 | flag &= ~0x40DD5; | ||
719 | flag |= (tmpflags & 0x40DD5); | ||
720 | ia64_setreg(_IA64_REG_AR_EFLAG, flag); | ||
721 | |||
722 | regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */ | ||
723 | } | ||
724 | |||
725 | { | ||
726 | struct _fpstate_ia32 __user *buf = NULL; | ||
727 | u32 fpstate_ptr; | ||
728 | err |= get_user(fpstate_ptr, &(sc->fpstate)); | ||
729 | buf = compat_ptr(fpstate_ptr); | ||
730 | if (buf) { | ||
731 | err |= restore_ia32_fpstate_live(buf); | ||
732 | } | ||
733 | } | ||
734 | |||
735 | #if 0 | ||
736 | { | ||
737 | struct _fpstate * buf; | ||
738 | err |= __get_user(buf, &sc->fpstate); | ||
739 | if (buf) { | ||
740 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
741 | goto badframe; | ||
742 | err |= restore_i387(buf); | ||
743 | } | ||
744 | } | ||
745 | #endif | ||
746 | |||
747 | err |= __get_user(*peax, &sc->eax); | ||
748 | return err; | ||
749 | |||
750 | #if 0 | ||
751 | badframe: | ||
752 | return 1; | ||
753 | #endif | ||
754 | } | ||
755 | |||
756 | /* | ||
757 | * Determine which stack to use.. | ||
758 | */ | ||
759 | static inline void __user * | ||
760 | get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | ||
761 | { | ||
762 | unsigned long esp; | ||
763 | |||
764 | /* Default to using normal stack (truncate off sign-extension of bit 31: */ | ||
765 | esp = (unsigned int) regs->r12; | ||
766 | |||
767 | /* This is the X/Open sanctioned signal stack switching. */ | ||
768 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
769 | int onstack = sas_ss_flags(esp); | ||
770 | |||
771 | if (onstack == 0) | ||
772 | esp = current->sas_ss_sp + current->sas_ss_size; | ||
773 | else if (onstack == SS_ONSTACK) { | ||
774 | /* | ||
775 | * If we are on the alternate signal stack and would | ||
776 | * overflow it, don't. Return an always-bogus address | ||
777 | * instead so we will die with SIGSEGV. | ||
778 | */ | ||
779 | if (!likely(on_sig_stack(esp - frame_size))) | ||
780 | return (void __user *) -1L; | ||
781 | } | ||
782 | } | ||
783 | /* Legacy stack switching not supported */ | ||
784 | |||
785 | esp -= frame_size; | ||
786 | /* Align the stack pointer according to the i386 ABI, | ||
787 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ | ||
788 | esp = ((esp + 4) & -16ul) - 4; | ||
789 | return (void __user *) esp; | ||
790 | } | ||
791 | |||
792 | static int | ||
793 | setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) | ||
794 | { | ||
795 | struct exec_domain *ed = current_thread_info()->exec_domain; | ||
796 | struct sigframe_ia32 __user *frame; | ||
797 | int err = 0; | ||
798 | |||
799 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
800 | |||
801 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
802 | goto give_sigsegv; | ||
803 | |||
804 | err |= __put_user((ed && ed->signal_invmap && sig < 32 | ||
805 | ? (int)(ed->signal_invmap[sig]) : sig), &frame->sig); | ||
806 | |||
807 | err |= setup_sigcontext_ia32(&frame->sc, &frame->fpstate, regs, set->sig[0]); | ||
808 | |||
809 | if (_COMPAT_NSIG_WORDS > 1) | ||
810 | err |= __copy_to_user(frame->extramask, (char *) &set->sig + 4, | ||
811 | sizeof(frame->extramask)); | ||
812 | |||
813 | /* Set up to return from userspace. If provided, use a stub | ||
814 | already in userspace. */ | ||
815 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
816 | unsigned int restorer = IA32_SA_RESTORER(ka); | ||
817 | err |= __put_user(restorer, &frame->pretcode); | ||
818 | } else { | ||
819 | /* Pointing to restorer in ia32 gate page */ | ||
820 | err |= __put_user(IA32_GATE_OFFSET, &frame->pretcode); | ||
821 | } | ||
822 | |||
823 | /* This is popl %eax ; movl $,%eax ; int $0x80 | ||
824 | * and there for historical reasons only. | ||
825 | * See arch/i386/kernel/signal.c | ||
826 | */ | ||
827 | |||
828 | err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); | ||
829 | err |= __put_user(__IA32_NR_sigreturn, (int __user *)(frame->retcode+2)); | ||
830 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); | ||
831 | |||
832 | if (err) | ||
833 | goto give_sigsegv; | ||
834 | |||
835 | /* Set up registers for signal handler */ | ||
836 | regs->r12 = (unsigned long) frame; | ||
837 | regs->cr_iip = IA32_SA_HANDLER(ka); | ||
838 | |||
839 | set_fs(USER_DS); | ||
840 | |||
841 | #if 0 | ||
842 | regs->eflags &= ~TF_MASK; | ||
843 | #endif | ||
844 | |||
845 | #if 0 | ||
846 | printk("SIG deliver (%s:%d): sig=%d sp=%p pc=%lx ra=%x\n", | ||
847 | current->comm, current->pid, sig, (void *) frame, regs->cr_iip, frame->pretcode); | ||
848 | #endif | ||
849 | |||
850 | return 1; | ||
851 | |||
852 | give_sigsegv: | ||
853 | force_sigsegv(sig, current); | ||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | static int | ||
858 | setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info, | ||
859 | sigset_t *set, struct pt_regs * regs) | ||
860 | { | ||
861 | struct exec_domain *ed = current_thread_info()->exec_domain; | ||
862 | compat_uptr_t pinfo, puc; | ||
863 | struct rt_sigframe_ia32 __user *frame; | ||
864 | int err = 0; | ||
865 | |||
866 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
867 | |||
868 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
869 | goto give_sigsegv; | ||
870 | |||
871 | err |= __put_user((ed && ed->signal_invmap | ||
872 | && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig); | ||
873 | |||
874 | pinfo = (long __user) &frame->info; | ||
875 | puc = (long __user) &frame->uc; | ||
876 | err |= __put_user(pinfo, &frame->pinfo); | ||
877 | err |= __put_user(puc, &frame->puc); | ||
878 | err |= copy_siginfo_to_user32(&frame->info, info); | ||
879 | |||
880 | /* Create the ucontext. */ | ||
881 | err |= __put_user(0, &frame->uc.uc_flags); | ||
882 | err |= __put_user(0, &frame->uc.uc_link); | ||
883 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | ||
884 | err |= __put_user(sas_ss_flags(regs->r12), &frame->uc.uc_stack.ss_flags); | ||
885 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
886 | err |= setup_sigcontext_ia32(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); | ||
887 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
888 | if (err) | ||
889 | goto give_sigsegv; | ||
890 | |||
891 | /* Set up to return from userspace. If provided, use a stub | ||
892 | already in userspace. */ | ||
893 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
894 | unsigned int restorer = IA32_SA_RESTORER(ka); | ||
895 | err |= __put_user(restorer, &frame->pretcode); | ||
896 | } else { | ||
897 | /* Pointing to rt_restorer in ia32 gate page */ | ||
898 | err |= __put_user(IA32_GATE_OFFSET + 8, &frame->pretcode); | ||
899 | } | ||
900 | |||
901 | /* This is movl $,%eax ; int $0x80 | ||
902 | * and there for historical reasons only. | ||
903 | * See arch/i386/kernel/signal.c | ||
904 | */ | ||
905 | |||
906 | err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); | ||
907 | err |= __put_user(__IA32_NR_rt_sigreturn, (int __user *)(frame->retcode+1)); | ||
908 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); | ||
909 | |||
910 | if (err) | ||
911 | goto give_sigsegv; | ||
912 | |||
913 | /* Set up registers for signal handler */ | ||
914 | regs->r12 = (unsigned long) frame; | ||
915 | regs->cr_iip = IA32_SA_HANDLER(ka); | ||
916 | |||
917 | set_fs(USER_DS); | ||
918 | |||
919 | #if 0 | ||
920 | regs->eflags &= ~TF_MASK; | ||
921 | #endif | ||
922 | |||
923 | #if 0 | ||
924 | printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n", | ||
925 | current->comm, current->pid, (void *) frame, regs->cr_iip, frame->pretcode); | ||
926 | #endif | ||
927 | |||
928 | return 1; | ||
929 | |||
930 | give_sigsegv: | ||
931 | force_sigsegv(sig, current); | ||
932 | return 0; | ||
933 | } | ||
934 | |||
935 | int | ||
936 | ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, | ||
937 | sigset_t *set, struct pt_regs *regs) | ||
938 | { | ||
939 | /* Set up the stack frame */ | ||
940 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
941 | return setup_rt_frame_ia32(sig, ka, info, set, regs); | ||
942 | else | ||
943 | return setup_frame_ia32(sig, ka, set, regs); | ||
944 | } | ||
945 | |||
946 | asmlinkage long | ||
947 | sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, | ||
948 | int arg6, int arg7, struct pt_regs regs) | ||
949 | { | ||
950 | unsigned long esp = (unsigned int) regs.r12; | ||
951 | struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8); | ||
952 | sigset_t set; | ||
953 | int eax; | ||
954 | |||
955 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
956 | goto badframe; | ||
957 | |||
958 | if (__get_user(set.sig[0], &frame->sc.oldmask) | ||
959 | || (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((char *) &set.sig + 4, &frame->extramask, | ||
960 | sizeof(frame->extramask)))) | ||
961 | goto badframe; | ||
962 | |||
963 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
964 | spin_lock_irq(¤t->sighand->siglock); | ||
965 | current->blocked = set; | ||
966 | recalc_sigpending(); | ||
967 | spin_unlock_irq(¤t->sighand->siglock); | ||
968 | |||
969 | if (restore_sigcontext_ia32(®s, &frame->sc, &eax)) | ||
970 | goto badframe; | ||
971 | return eax; | ||
972 | |||
973 | badframe: | ||
974 | force_sig(SIGSEGV, current); | ||
975 | return 0; | ||
976 | } | ||
977 | |||
978 | asmlinkage long | ||
979 | sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, | ||
980 | int arg5, int arg6, int arg7, struct pt_regs regs) | ||
981 | { | ||
982 | unsigned long esp = (unsigned int) regs.r12; | ||
983 | struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4); | ||
984 | sigset_t set; | ||
985 | int eax; | ||
986 | |||
987 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
988 | goto badframe; | ||
989 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
990 | goto badframe; | ||
991 | |||
992 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
993 | spin_lock_irq(¤t->sighand->siglock); | ||
994 | current->blocked = set; | ||
995 | recalc_sigpending(); | ||
996 | spin_unlock_irq(¤t->sighand->siglock); | ||
997 | |||
998 | if (restore_sigcontext_ia32(®s, &frame->uc.uc_mcontext, &eax)) | ||
999 | goto badframe; | ||
1000 | |||
1001 | /* It is more difficult to avoid calling this function than to | ||
1002 | call it and ignore errors. */ | ||
1003 | do_sigaltstack((stack_t __user *) &frame->uc.uc_stack, NULL, esp); | ||
1004 | |||
1005 | return eax; | ||
1006 | |||
1007 | badframe: | ||
1008 | force_sig(SIGSEGV, current); | ||
1009 | return 0; | ||
1010 | } | ||
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c deleted file mode 100644 index a6965ddafc46..000000000000 --- a/arch/ia64/ia32/ia32_support.c +++ /dev/null | |||
@@ -1,253 +0,0 @@ | |||
1 | /* | ||
2 | * IA32 helper functions | ||
3 | * | ||
4 | * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> | ||
5 | * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com> | ||
6 | * Copyright (C) 2001-2002 Hewlett-Packard Co | ||
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
8 | * | ||
9 | * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context | ||
10 | * 02/19/01 D. Mosberger dropped tssd; it's not needed | ||
11 | * 09/14/01 D. Mosberger fixed memory management for gdt/tss page | ||
12 | * 09/29/01 D. Mosberger added ia32_load_segment_descriptors() | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/sched.h> | ||
19 | |||
20 | #include <asm/intrinsics.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/system.h> | ||
24 | #include <asm/processor.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | |||
27 | #include "ia32priv.h" | ||
28 | |||
29 | extern int die_if_kernel (char *str, struct pt_regs *regs, long err); | ||
30 | |||
31 | struct page *ia32_shared_page[NR_CPUS]; | ||
32 | unsigned long *ia32_boot_gdt; | ||
33 | unsigned long *cpu_gdt_table[NR_CPUS]; | ||
34 | struct page *ia32_gate_page; | ||
35 | |||
36 | static unsigned long | ||
37 | load_desc (u16 selector) | ||
38 | { | ||
39 | unsigned long *table, limit, index; | ||
40 | |||
41 | if (!selector) | ||
42 | return 0; | ||
43 | if (selector & IA32_SEGSEL_TI) { | ||
44 | table = (unsigned long *) IA32_LDT_OFFSET; | ||
45 | limit = IA32_LDT_ENTRIES; | ||
46 | } else { | ||
47 | table = cpu_gdt_table[smp_processor_id()]; | ||
48 | limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]); | ||
49 | } | ||
50 | index = selector >> IA32_SEGSEL_INDEX_SHIFT; | ||
51 | if (index >= limit) | ||
52 | return 0; | ||
53 | return IA32_SEG_UNSCRAMBLE(table[index]); | ||
54 | } | ||
55 | |||
56 | void | ||
57 | ia32_load_segment_descriptors (struct task_struct *task) | ||
58 | { | ||
59 | struct pt_regs *regs = task_pt_regs(task); | ||
60 | |||
61 | /* Setup the segment descriptors */ | ||
62 | regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ | ||
63 | regs->r27 = load_desc(regs->r16 >> 0); /* DSD */ | ||
64 | regs->r28 = load_desc(regs->r16 >> 32); /* FSD */ | ||
65 | regs->r29 = load_desc(regs->r16 >> 48); /* GSD */ | ||
66 | regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */ | ||
67 | regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */ | ||
68 | } | ||
69 | |||
70 | int | ||
71 | ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs) | ||
72 | { | ||
73 | struct desc_struct *desc; | ||
74 | struct ia32_user_desc info; | ||
75 | int idx; | ||
76 | |||
77 | if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info))) | ||
78 | return -EFAULT; | ||
79 | if (LDT_empty(&info)) | ||
80 | return -EINVAL; | ||
81 | |||
82 | idx = info.entry_number; | ||
83 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
84 | return -EINVAL; | ||
85 | |||
86 | desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; | ||
87 | desc->a = LDT_entry_a(&info); | ||
88 | desc->b = LDT_entry_b(&info); | ||
89 | |||
90 | /* XXX: can this be done in a cleaner way ? */ | ||
91 | load_TLS(&child->thread, smp_processor_id()); | ||
92 | ia32_load_segment_descriptors(child); | ||
93 | load_TLS(¤t->thread, smp_processor_id()); | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | void | ||
99 | ia32_save_state (struct task_struct *t) | ||
100 | { | ||
101 | t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG); | ||
102 | t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR); | ||
103 | t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR); | ||
104 | t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR); | ||
105 | t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR); | ||
106 | ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob); | ||
107 | ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1); | ||
108 | } | ||
109 | |||
110 | void | ||
111 | ia32_load_state (struct task_struct *t) | ||
112 | { | ||
113 | unsigned long eflag, fsr, fcr, fir, fdr, tssd; | ||
114 | struct pt_regs *regs = task_pt_regs(t); | ||
115 | |||
116 | eflag = t->thread.eflag; | ||
117 | fsr = t->thread.fsr; | ||
118 | fcr = t->thread.fcr; | ||
119 | fir = t->thread.fir; | ||
120 | fdr = t->thread.fdr; | ||
121 | tssd = load_desc(_TSS); /* TSSD */ | ||
122 | |||
123 | ia64_setreg(_IA64_REG_AR_EFLAG, eflag); | ||
124 | ia64_setreg(_IA64_REG_AR_FSR, fsr); | ||
125 | ia64_setreg(_IA64_REG_AR_FCR, fcr); | ||
126 | ia64_setreg(_IA64_REG_AR_FIR, fir); | ||
127 | ia64_setreg(_IA64_REG_AR_FDR, fdr); | ||
128 | current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE); | ||
129 | current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD); | ||
130 | ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); | ||
131 | ia64_set_kr(IA64_KR_TSSD, tssd); | ||
132 | |||
133 | regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17; | ||
134 | regs->r30 = load_desc(_LDT); /* LDTD */ | ||
135 | load_TLS(&t->thread, smp_processor_id()); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Setup IA32 GDT and TSS | ||
140 | */ | ||
141 | void | ||
142 | ia32_gdt_init (void) | ||
143 | { | ||
144 | int cpu = smp_processor_id(); | ||
145 | |||
146 | ia32_shared_page[cpu] = alloc_page(GFP_KERNEL); | ||
147 | if (!ia32_shared_page[cpu]) | ||
148 | panic("failed to allocate ia32_shared_page[%d]\n", cpu); | ||
149 | |||
150 | cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]); | ||
151 | |||
152 | /* Copy from the boot cpu's GDT */ | ||
153 | memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE); | ||
154 | } | ||
155 | |||
156 | |||
157 | /* | ||
158 | * Setup IA32 GDT and TSS | ||
159 | */ | ||
160 | static void | ||
161 | ia32_boot_gdt_init (void) | ||
162 | { | ||
163 | unsigned long ldt_size; | ||
164 | |||
165 | ia32_shared_page[0] = alloc_page(GFP_KERNEL); | ||
166 | if (!ia32_shared_page[0]) | ||
167 | panic("failed to allocate ia32_shared_page[0]\n"); | ||
168 | |||
169 | ia32_boot_gdt = page_address(ia32_shared_page[0]); | ||
170 | cpu_gdt_table[0] = ia32_boot_gdt; | ||
171 | |||
172 | /* CS descriptor in IA-32 (scrambled) format */ | ||
173 | ia32_boot_gdt[__USER_CS >> 3] | ||
174 | = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT, | ||
175 | 0xb, 1, 3, 1, 1, 1, 1); | ||
176 | |||
177 | /* DS descriptor in IA-32 (scrambled) format */ | ||
178 | ia32_boot_gdt[__USER_DS >> 3] | ||
179 | = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT, | ||
180 | 0x3, 1, 3, 1, 1, 1, 1); | ||
181 | |||
182 | ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); | ||
183 | ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235, | ||
184 | 0xb, 0, 3, 1, 1, 1, 0); | ||
185 | ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1, | ||
186 | 0x2, 0, 3, 1, 1, 1, 0); | ||
187 | } | ||
188 | |||
189 | static void | ||
190 | ia32_gate_page_init(void) | ||
191 | { | ||
192 | unsigned long *sr; | ||
193 | |||
194 | ia32_gate_page = alloc_page(GFP_KERNEL); | ||
195 | sr = page_address(ia32_gate_page); | ||
196 | /* This is popl %eax ; movl $,%eax ; int $0x80 */ | ||
197 | *sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48); | ||
198 | |||
199 | /* This is movl $,%eax ; int $0x80 */ | ||
200 | *sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40); | ||
201 | } | ||
202 | |||
203 | void | ||
204 | ia32_mem_init(void) | ||
205 | { | ||
206 | ia32_boot_gdt_init(); | ||
207 | ia32_gate_page_init(); | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Handle bad IA32 interrupt via syscall | ||
212 | */ | ||
213 | void | ||
214 | ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs) | ||
215 | { | ||
216 | siginfo_t siginfo; | ||
217 | |||
218 | if (die_if_kernel("Bad IA-32 interrupt", regs, int_num)) | ||
219 | return; | ||
220 | |||
221 | siginfo.si_signo = SIGTRAP; | ||
222 | siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */ | ||
223 | siginfo.si_flags = 0; | ||
224 | siginfo.si_isr = 0; | ||
225 | siginfo.si_addr = NULL; | ||
226 | siginfo.si_imm = 0; | ||
227 | siginfo.si_code = TRAP_BRKPT; | ||
228 | force_sig_info(SIGTRAP, &siginfo, current); | ||
229 | } | ||
230 | |||
231 | void | ||
232 | ia32_cpu_init (void) | ||
233 | { | ||
234 | /* initialize global ia32 state - CR0 and CR4 */ | ||
235 | ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0)); | ||
236 | } | ||
237 | |||
238 | static int __init | ||
239 | ia32_init (void) | ||
240 | { | ||
241 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | ||
242 | { | ||
243 | extern struct kmem_cache *ia64_partial_page_cachep; | ||
244 | |||
245 | ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache", | ||
246 | sizeof(struct ia64_partial_page), | ||
247 | 0, SLAB_PANIC, NULL); | ||
248 | } | ||
249 | #endif | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | __initcall(ia32_init); | ||
diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c deleted file mode 100644 index e486042672f1..000000000000 --- a/arch/ia64/ia32/ia32_traps.c +++ /dev/null | |||
@@ -1,156 +0,0 @@ | |||
1 | /* | ||
2 | * IA-32 exception handlers | ||
3 | * | ||
4 | * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com> | ||
5 | * Copyright (C) 2001-2002 Hewlett-Packard Co | ||
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | * | ||
8 | * 06/16/00 A. Mallick added siginfo for most cases (close to IA32) | ||
9 | * 09/29/00 D. Mosberger added ia32_intercept() | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/sched.h> | ||
14 | |||
15 | #include "ia32priv.h" | ||
16 | |||
17 | #include <asm/intrinsics.h> | ||
18 | #include <asm/ptrace.h> | ||
19 | |||
20 | int | ||
21 | ia32_intercept (struct pt_regs *regs, unsigned long isr) | ||
22 | { | ||
23 | switch ((isr >> 16) & 0xff) { | ||
24 | case 0: /* Instruction intercept fault */ | ||
25 | case 4: /* Locked Data reference fault */ | ||
26 | case 1: /* Gate intercept trap */ | ||
27 | return -1; | ||
28 | |||
29 | case 2: /* System flag trap */ | ||
30 | if (((isr >> 14) & 0x3) >= 2) { | ||
31 | /* MOV SS, POP SS instructions */ | ||
32 | ia64_psr(regs)->id = 1; | ||
33 | return 0; | ||
34 | } else | ||
35 | return -1; | ||
36 | } | ||
37 | return -1; | ||
38 | } | ||
39 | |||
40 | int | ||
41 | ia32_exception (struct pt_regs *regs, unsigned long isr) | ||
42 | { | ||
43 | struct siginfo siginfo; | ||
44 | |||
45 | /* initialize these fields to avoid leaking kernel bits to user space: */ | ||
46 | siginfo.si_errno = 0; | ||
47 | siginfo.si_flags = 0; | ||
48 | siginfo.si_isr = 0; | ||
49 | siginfo.si_imm = 0; | ||
50 | switch ((isr >> 16) & 0xff) { | ||
51 | case 1: | ||
52 | case 2: | ||
53 | siginfo.si_signo = SIGTRAP; | ||
54 | if (isr == 0) | ||
55 | siginfo.si_code = TRAP_TRACE; | ||
56 | else if (isr & 0x4) | ||
57 | siginfo.si_code = TRAP_BRANCH; | ||
58 | else | ||
59 | siginfo.si_code = TRAP_BRKPT; | ||
60 | break; | ||
61 | |||
62 | case 3: | ||
63 | siginfo.si_signo = SIGTRAP; | ||
64 | siginfo.si_code = TRAP_BRKPT; | ||
65 | break; | ||
66 | |||
67 | case 0: /* Divide fault */ | ||
68 | siginfo.si_signo = SIGFPE; | ||
69 | siginfo.si_code = FPE_INTDIV; | ||
70 | break; | ||
71 | |||
72 | case 4: /* Overflow */ | ||
73 | case 5: /* Bounds fault */ | ||
74 | siginfo.si_signo = SIGFPE; | ||
75 | siginfo.si_code = 0; | ||
76 | break; | ||
77 | |||
78 | case 6: /* Invalid Op-code */ | ||
79 | siginfo.si_signo = SIGILL; | ||
80 | siginfo.si_code = ILL_ILLOPN; | ||
81 | break; | ||
82 | |||
83 | case 7: /* FP DNA */ | ||
84 | case 8: /* Double Fault */ | ||
85 | case 9: /* Invalid TSS */ | ||
86 | case 11: /* Segment not present */ | ||
87 | case 12: /* Stack fault */ | ||
88 | case 13: /* General Protection Fault */ | ||
89 | siginfo.si_signo = SIGSEGV; | ||
90 | siginfo.si_code = 0; | ||
91 | break; | ||
92 | |||
93 | case 16: /* Pending FP error */ | ||
94 | { | ||
95 | unsigned long fsr, fcr; | ||
96 | |||
97 | fsr = ia64_getreg(_IA64_REG_AR_FSR); | ||
98 | fcr = ia64_getreg(_IA64_REG_AR_FCR); | ||
99 | |||
100 | siginfo.si_signo = SIGFPE; | ||
101 | /* | ||
102 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | ||
103 | * status. 0x3f is the exception bits in these regs, 0x200 is the | ||
104 | * C1 reg you need in case of a stack fault, 0x040 is the stack | ||
105 | * fault bit. We should only be taking one exception at a time, | ||
106 | * so if this combination doesn't produce any single exception, | ||
107 | * then we have a bad program that isn't synchronizing its FPU usage | ||
108 | * and it will suffer the consequences since we won't be able to | ||
109 | * fully reproduce the context of the exception | ||
110 | */ | ||
111 | siginfo.si_isr = isr; | ||
112 | siginfo.si_flags = __ISR_VALID; | ||
113 | switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) { | ||
114 | case 0x000: | ||
115 | default: | ||
116 | siginfo.si_code = 0; | ||
117 | break; | ||
118 | case 0x001: /* Invalid Op */ | ||
119 | case 0x040: /* Stack Fault */ | ||
120 | case 0x240: /* Stack Fault | Direction */ | ||
121 | siginfo.si_code = FPE_FLTINV; | ||
122 | break; | ||
123 | case 0x002: /* Denormalize */ | ||
124 | case 0x010: /* Underflow */ | ||
125 | siginfo.si_code = FPE_FLTUND; | ||
126 | break; | ||
127 | case 0x004: /* Zero Divide */ | ||
128 | siginfo.si_code = FPE_FLTDIV; | ||
129 | break; | ||
130 | case 0x008: /* Overflow */ | ||
131 | siginfo.si_code = FPE_FLTOVF; | ||
132 | break; | ||
133 | case 0x020: /* Precision */ | ||
134 | siginfo.si_code = FPE_FLTRES; | ||
135 | break; | ||
136 | } | ||
137 | |||
138 | break; | ||
139 | } | ||
140 | |||
141 | case 17: /* Alignment check */ | ||
142 | siginfo.si_signo = SIGSEGV; | ||
143 | siginfo.si_code = BUS_ADRALN; | ||
144 | break; | ||
145 | |||
146 | case 19: /* SSE Numeric error */ | ||
147 | siginfo.si_signo = SIGFPE; | ||
148 | siginfo.si_code = 0; | ||
149 | break; | ||
150 | |||
151 | default: | ||
152 | return -1; | ||
153 | } | ||
154 | force_sig_info(siginfo.si_signo, &siginfo, current); | ||
155 | return 0; | ||
156 | } | ||
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h deleted file mode 100644 index 0f15349c3c6b..000000000000 --- a/arch/ia64/ia32/ia32priv.h +++ /dev/null | |||
@@ -1,532 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_IA32_PRIV_H | ||
2 | #define _ASM_IA64_IA32_PRIV_H | ||
3 | |||
4 | |||
5 | #include <asm/ia32.h> | ||
6 | |||
7 | #ifdef CONFIG_IA32_SUPPORT | ||
8 | |||
9 | #include <linux/binfmts.h> | ||
10 | #include <linux/compat.h> | ||
11 | #include <linux/rbtree.h> | ||
12 | |||
13 | #include <asm/processor.h> | ||
14 | |||
15 | /* | ||
16 | * 32 bit structures for IA32 support. | ||
17 | */ | ||
18 | |||
19 | #define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT) | ||
20 | #define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1)) | ||
21 | #define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK) | ||
22 | #define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */ | ||
23 | |||
24 | /* | ||
25 | * partially mapped pages provide precise accounting of which 4k sub pages | ||
26 | * are mapped and which ones are not, thereby improving IA-32 compatibility. | ||
27 | */ | ||
28 | struct ia64_partial_page { | ||
29 | struct ia64_partial_page *next; /* linked list, sorted by address */ | ||
30 | struct rb_node pp_rb; | ||
31 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 | ||
32 | * should suffice.*/ | ||
33 | unsigned long bitmap; | ||
34 | unsigned int base; | ||
35 | }; | ||
36 | |||
37 | struct ia64_partial_page_list { | ||
38 | struct ia64_partial_page *pp_head; /* list head, points to the lowest | ||
39 | * addressed partial page */ | ||
40 | struct rb_root ppl_rb; | ||
41 | struct ia64_partial_page *pp_hint; /* pp_hint->next is the last | ||
42 | * accessed partial page */ | ||
43 | atomic_t pp_count; /* reference count */ | ||
44 | }; | ||
45 | |||
46 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | ||
47 | struct ia64_partial_page_list* ia32_init_pp_list (void); | ||
48 | #else | ||
49 | # define ia32_init_pp_list() 0 | ||
50 | #endif | ||
51 | |||
52 | /* sigcontext.h */ | ||
53 | /* | ||
54 | * As documented in the iBCS2 standard.. | ||
55 | * | ||
56 | * The first part of "struct _fpstate" is just the | ||
57 | * normal i387 hardware setup, the extra "status" | ||
58 | * word is used to save the coprocessor status word | ||
59 | * before entering the handler. | ||
60 | */ | ||
61 | struct _fpreg_ia32 { | ||
62 | unsigned short significand[4]; | ||
63 | unsigned short exponent; | ||
64 | }; | ||
65 | |||
66 | struct _fpxreg_ia32 { | ||
67 | unsigned short significand[4]; | ||
68 | unsigned short exponent; | ||
69 | unsigned short padding[3]; | ||
70 | }; | ||
71 | |||
72 | struct _xmmreg_ia32 { | ||
73 | unsigned int element[4]; | ||
74 | }; | ||
75 | |||
76 | |||
77 | struct _fpstate_ia32 { | ||
78 | unsigned int cw, | ||
79 | sw, | ||
80 | tag, | ||
81 | ipoff, | ||
82 | cssel, | ||
83 | dataoff, | ||
84 | datasel; | ||
85 | struct _fpreg_ia32 _st[8]; | ||
86 | unsigned short status; | ||
87 | unsigned short magic; /* 0xffff = regular FPU data only */ | ||
88 | |||
89 | /* FXSR FPU environment */ | ||
90 | unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */ | ||
91 | unsigned int mxcsr; | ||
92 | unsigned int reserved; | ||
93 | struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */ | ||
94 | struct _xmmreg_ia32 _xmm[8]; | ||
95 | unsigned int padding[56]; | ||
96 | }; | ||
97 | |||
98 | struct sigcontext_ia32 { | ||
99 | unsigned short gs, __gsh; | ||
100 | unsigned short fs, __fsh; | ||
101 | unsigned short es, __esh; | ||
102 | unsigned short ds, __dsh; | ||
103 | unsigned int edi; | ||
104 | unsigned int esi; | ||
105 | unsigned int ebp; | ||
106 | unsigned int esp; | ||
107 | unsigned int ebx; | ||
108 | unsigned int edx; | ||
109 | unsigned int ecx; | ||
110 | unsigned int eax; | ||
111 | unsigned int trapno; | ||
112 | unsigned int err; | ||
113 | unsigned int eip; | ||
114 | unsigned short cs, __csh; | ||
115 | unsigned int eflags; | ||
116 | unsigned int esp_at_signal; | ||
117 | unsigned short ss, __ssh; | ||
118 | unsigned int fpstate; /* really (struct _fpstate_ia32 *) */ | ||
119 | unsigned int oldmask; | ||
120 | unsigned int cr2; | ||
121 | }; | ||
122 | |||
123 | /* user.h */ | ||
124 | /* | ||
125 | * IA32 (Pentium III/4) FXSR, SSE support | ||
126 | * | ||
127 | * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for | ||
128 | * interacting with the FXSR-format floating point environment. Floating | ||
129 | * point data can be accessed in the regular format in the usual manner, | ||
130 | * and both the standard and SIMD floating point data can be accessed via | ||
131 | * the new ptrace requests. In either case, changes to the FPU environment | ||
132 | * will be reflected in the task's state as expected. | ||
133 | */ | ||
134 | struct ia32_user_i387_struct { | ||
135 | int cwd; | ||
136 | int swd; | ||
137 | int twd; | ||
138 | int fip; | ||
139 | int fcs; | ||
140 | int foo; | ||
141 | int fos; | ||
142 | /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
143 | struct _fpreg_ia32 st_space[8]; | ||
144 | }; | ||
145 | |||
146 | struct ia32_user_fxsr_struct { | ||
147 | unsigned short cwd; | ||
148 | unsigned short swd; | ||
149 | unsigned short twd; | ||
150 | unsigned short fop; | ||
151 | int fip; | ||
152 | int fcs; | ||
153 | int foo; | ||
154 | int fos; | ||
155 | int mxcsr; | ||
156 | int reserved; | ||
157 | int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
158 | int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ | ||
159 | int padding[56]; | ||
160 | }; | ||
161 | |||
162 | /* signal.h */ | ||
163 | #define IA32_SET_SA_HANDLER(ka,handler,restorer) \ | ||
164 | ((ka)->sa.sa_handler = (__sighandler_t) \ | ||
165 | (((unsigned long)(restorer) << 32) \ | ||
166 | | ((handler) & 0xffffffff))) | ||
167 | #define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff) | ||
168 | #define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32) | ||
169 | |||
170 | #define __IA32_NR_sigreturn 119 | ||
171 | #define __IA32_NR_rt_sigreturn 173 | ||
172 | |||
173 | struct sigaction32 { | ||
174 | unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ | ||
175 | unsigned int sa_flags; | ||
176 | unsigned int sa_restorer; /* Another 32 bit pointer */ | ||
177 | compat_sigset_t sa_mask; /* A 32 bit mask */ | ||
178 | }; | ||
179 | |||
180 | struct old_sigaction32 { | ||
181 | unsigned int sa_handler; /* Really a pointer, but need to deal | ||
182 | with 32 bits */ | ||
183 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ | ||
184 | unsigned int sa_flags; | ||
185 | unsigned int sa_restorer; /* Another 32 bit pointer */ | ||
186 | }; | ||
187 | |||
188 | typedef struct sigaltstack_ia32 { | ||
189 | unsigned int ss_sp; | ||
190 | int ss_flags; | ||
191 | unsigned int ss_size; | ||
192 | } stack_ia32_t; | ||
193 | |||
194 | struct ucontext_ia32 { | ||
195 | unsigned int uc_flags; | ||
196 | unsigned int uc_link; | ||
197 | stack_ia32_t uc_stack; | ||
198 | struct sigcontext_ia32 uc_mcontext; | ||
199 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
200 | }; | ||
201 | |||
202 | struct stat64 { | ||
203 | unsigned long long st_dev; | ||
204 | unsigned char __pad0[4]; | ||
205 | unsigned int __st_ino; | ||
206 | unsigned int st_mode; | ||
207 | unsigned int st_nlink; | ||
208 | unsigned int st_uid; | ||
209 | unsigned int st_gid; | ||
210 | unsigned long long st_rdev; | ||
211 | unsigned char __pad3[4]; | ||
212 | unsigned int st_size_lo; | ||
213 | unsigned int st_size_hi; | ||
214 | unsigned int st_blksize; | ||
215 | unsigned int st_blocks; /* Number 512-byte blocks allocated. */ | ||
216 | unsigned int __pad4; /* future possible st_blocks high bits */ | ||
217 | unsigned int st_atime; | ||
218 | unsigned int st_atime_nsec; | ||
219 | unsigned int st_mtime; | ||
220 | unsigned int st_mtime_nsec; | ||
221 | unsigned int st_ctime; | ||
222 | unsigned int st_ctime_nsec; | ||
223 | unsigned int st_ino_lo; | ||
224 | unsigned int st_ino_hi; | ||
225 | }; | ||
226 | |||
227 | typedef struct compat_siginfo { | ||
228 | int si_signo; | ||
229 | int si_errno; | ||
230 | int si_code; | ||
231 | |||
232 | union { | ||
233 | int _pad[((128/sizeof(int)) - 3)]; | ||
234 | |||
235 | /* kill() */ | ||
236 | struct { | ||
237 | unsigned int _pid; /* sender's pid */ | ||
238 | unsigned int _uid; /* sender's uid */ | ||
239 | } _kill; | ||
240 | |||
241 | /* POSIX.1b timers */ | ||
242 | struct { | ||
243 | compat_timer_t _tid; /* timer id */ | ||
244 | int _overrun; /* overrun count */ | ||
245 | char _pad[sizeof(unsigned int) - sizeof(int)]; | ||
246 | compat_sigval_t _sigval; /* same as below */ | ||
247 | int _sys_private; /* not to be passed to user */ | ||
248 | } _timer; | ||
249 | |||
250 | /* POSIX.1b signals */ | ||
251 | struct { | ||
252 | unsigned int _pid; /* sender's pid */ | ||
253 | unsigned int _uid; /* sender's uid */ | ||
254 | compat_sigval_t _sigval; | ||
255 | } _rt; | ||
256 | |||
257 | /* SIGCHLD */ | ||
258 | struct { | ||
259 | unsigned int _pid; /* which child */ | ||
260 | unsigned int _uid; /* sender's uid */ | ||
261 | int _status; /* exit code */ | ||
262 | compat_clock_t _utime; | ||
263 | compat_clock_t _stime; | ||
264 | } _sigchld; | ||
265 | |||
266 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
267 | struct { | ||
268 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
269 | } _sigfault; | ||
270 | |||
271 | /* SIGPOLL */ | ||
272 | struct { | ||
273 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
274 | int _fd; | ||
275 | } _sigpoll; | ||
276 | } _sifields; | ||
277 | } compat_siginfo_t; | ||
278 | |||
279 | /* | ||
280 | * IA-32 ELF specific definitions for IA-64. | ||
281 | */ | ||
282 | |||
283 | #define _ASM_IA64_ELF_H /* Don't include elf.h */ | ||
284 | |||
285 | #include <linux/sched.h> | ||
286 | |||
287 | /* | ||
288 | * This is used to ensure we don't load something for the wrong architecture. | ||
289 | */ | ||
290 | #define elf_check_arch(x) ((x)->e_machine == EM_386) | ||
291 | |||
292 | /* | ||
293 | * These are used to set parameters in the core dumps. | ||
294 | */ | ||
295 | #define ELF_CLASS ELFCLASS32 | ||
296 | #define ELF_DATA ELFDATA2LSB | ||
297 | #define ELF_ARCH EM_386 | ||
298 | |||
299 | #define IA32_STACK_TOP IA32_PAGE_OFFSET | ||
300 | #define IA32_GATE_OFFSET IA32_PAGE_OFFSET | ||
301 | #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE | ||
302 | |||
303 | /* | ||
304 | * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can | ||
305 | * access them. | ||
306 | */ | ||
307 | #define IA32_GDT_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE) | ||
308 | #define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE) | ||
309 | #define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 3*PAGE_SIZE) | ||
310 | |||
311 | #define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE | ||
312 | |||
313 | /* | ||
314 | * This is the location that an ET_DYN program is loaded if exec'ed. | ||
315 | * Typical use of this is to invoke "./ld.so someprog" to test out a | ||
316 | * new version of the loader. We need to make sure that it is out of | ||
317 | * the way of the program that it will "exec", and that there is | ||
318 | * sufficient room for the brk. | ||
319 | */ | ||
320 | #define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000) | ||
321 | |||
322 | void ia64_elf32_init(struct pt_regs *regs); | ||
323 | #define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) | ||
324 | |||
325 | /* This macro yields a bitmask that programs can use to figure out | ||
326 | what instruction set this CPU supports. */ | ||
327 | #define ELF_HWCAP 0 | ||
328 | |||
329 | /* This macro yields a string that ld.so will use to load | ||
330 | implementation specific libraries for optimization. Not terribly | ||
331 | relevant until we have real hardware to play with... */ | ||
332 | #define ELF_PLATFORM NULL | ||
333 | |||
334 | #ifdef __KERNEL__ | ||
335 | # define SET_PERSONALITY(EX) \ | ||
336 | (current->personality = PER_LINUX) | ||
337 | #endif | ||
338 | |||
339 | #define IA32_EFLAG 0x200 | ||
340 | |||
341 | /* | ||
342 | * IA-32 ELF specific definitions for IA-64. | ||
343 | */ | ||
344 | |||
345 | #define __USER_CS 0x23 | ||
346 | #define __USER_DS 0x2B | ||
347 | |||
348 | /* | ||
349 | * The per-cpu GDT has 32 entries: see <asm-i386/segment.h> | ||
350 | */ | ||
351 | #define GDT_ENTRIES 32 | ||
352 | |||
353 | #define GDT_SIZE (GDT_ENTRIES * 8) | ||
354 | |||
355 | #define TSS_ENTRY 14 | ||
356 | #define LDT_ENTRY (TSS_ENTRY + 1) | ||
357 | |||
358 | #define IA32_SEGSEL_RPL (0x3 << 0) | ||
359 | #define IA32_SEGSEL_TI (0x1 << 2) | ||
360 | #define IA32_SEGSEL_INDEX_SHIFT 3 | ||
361 | |||
362 | #define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT) | ||
363 | #define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT) | ||
364 | |||
365 | #define IA32_SEG_BASE 16 | ||
366 | #define IA32_SEG_TYPE 40 | ||
367 | #define IA32_SEG_SYS 44 | ||
368 | #define IA32_SEG_DPL 45 | ||
369 | #define IA32_SEG_P 47 | ||
370 | #define IA32_SEG_HIGH_LIMIT 48 | ||
371 | #define IA32_SEG_AVL 52 | ||
372 | #define IA32_SEG_DB 54 | ||
373 | #define IA32_SEG_G 55 | ||
374 | #define IA32_SEG_HIGH_BASE 56 | ||
375 | |||
376 | #define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \ | ||
377 | (((limit) & 0xffff) \ | ||
378 | | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \ | ||
379 | | ((unsigned long) (segtype) << IA32_SEG_TYPE) \ | ||
380 | | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \ | ||
381 | | ((unsigned long) (dpl) << IA32_SEG_DPL) \ | ||
382 | | ((unsigned long) (segpresent) << IA32_SEG_P) \ | ||
383 | | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \ | ||
384 | | ((unsigned long) (avl) << IA32_SEG_AVL) \ | ||
385 | | ((unsigned long) (segdb) << IA32_SEG_DB) \ | ||
386 | | ((unsigned long) (gran) << IA32_SEG_G) \ | ||
387 | | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE)) | ||
388 | |||
389 | #define SEG_LIM 32 | ||
390 | #define SEG_TYPE 52 | ||
391 | #define SEG_SYS 56 | ||
392 | #define SEG_DPL 57 | ||
393 | #define SEG_P 59 | ||
394 | #define SEG_AVL 60 | ||
395 | #define SEG_DB 62 | ||
396 | #define SEG_G 63 | ||
397 | |||
398 | /* Unscramble an IA-32 segment descriptor into the IA-64 format. */ | ||
399 | #define IA32_SEG_UNSCRAMBLE(sd) \ | ||
400 | ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \ | ||
401 | | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \ | ||
402 | | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \ | ||
403 | | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \ | ||
404 | | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \ | ||
405 | | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \ | ||
406 | | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \ | ||
407 | | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \ | ||
408 | | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G)) | ||
409 | |||
410 | #define IA32_IOBASE 0x2000000000000000UL /* Virtual address for I/O space */ | ||
411 | |||
412 | #define IA32_CR0 0x80000001 /* Enable PG and PE bits */ | ||
413 | #define IA32_CR4 0x600 /* MMXEX and FXSR on */ | ||
414 | |||
415 | /* | ||
416 | * IA32 floating point control registers starting values | ||
417 | */ | ||
418 | |||
419 | #define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */ | ||
420 | #define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */ | ||
421 | |||
422 | #define IA32_PTRACE_GETREGS 12 | ||
423 | #define IA32_PTRACE_SETREGS 13 | ||
424 | #define IA32_PTRACE_GETFPREGS 14 | ||
425 | #define IA32_PTRACE_SETFPREGS 15 | ||
426 | #define IA32_PTRACE_GETFPXREGS 18 | ||
427 | #define IA32_PTRACE_SETFPXREGS 19 | ||
428 | |||
429 | #define ia32_start_thread(regs,new_ip,new_sp) do { \ | ||
430 | set_fs(USER_DS); \ | ||
431 | ia64_psr(regs)->cpl = 3; /* set user mode */ \ | ||
432 | ia64_psr(regs)->ri = 0; /* clear return slot number */ \ | ||
433 | ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \ | ||
434 | regs->cr_iip = new_ip; \ | ||
435 | regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \ | ||
436 | regs->ar_rnat = 0; \ | ||
437 | regs->loadrs = 0; \ | ||
438 | regs->r12 = new_sp; \ | ||
439 | } while (0) | ||
440 | |||
441 | /* | ||
442 | * Local Descriptor Table (LDT) related declarations. | ||
443 | */ | ||
444 | |||
445 | #define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */ | ||
446 | #define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */ | ||
447 | |||
448 | #define LDT_entry_a(info) \ | ||
449 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | ||
450 | |||
451 | #define LDT_entry_b(info) \ | ||
452 | (((info)->base_addr & 0xff000000) | \ | ||
453 | (((info)->base_addr & 0x00ff0000) >> 16) | \ | ||
454 | ((info)->limit & 0xf0000) | \ | ||
455 | (((info)->read_exec_only ^ 1) << 9) | \ | ||
456 | ((info)->contents << 10) | \ | ||
457 | (((info)->seg_not_present ^ 1) << 15) | \ | ||
458 | ((info)->seg_32bit << 22) | \ | ||
459 | ((info)->limit_in_pages << 23) | \ | ||
460 | ((info)->useable << 20) | \ | ||
461 | 0x7100) | ||
462 | |||
463 | #define LDT_empty(info) ( \ | ||
464 | (info)->base_addr == 0 && \ | ||
465 | (info)->limit == 0 && \ | ||
466 | (info)->contents == 0 && \ | ||
467 | (info)->read_exec_only == 1 && \ | ||
468 | (info)->seg_32bit == 0 && \ | ||
469 | (info)->limit_in_pages == 0 && \ | ||
470 | (info)->seg_not_present == 1 && \ | ||
471 | (info)->useable == 0 ) | ||
472 | |||
473 | static inline void | ||
474 | load_TLS (struct thread_struct *t, unsigned int cpu) | ||
475 | { | ||
476 | extern unsigned long *cpu_gdt_table[NR_CPUS]; | ||
477 | |||
478 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long)); | ||
479 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long)); | ||
480 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long)); | ||
481 | } | ||
482 | |||
483 | struct ia32_user_desc { | ||
484 | unsigned int entry_number; | ||
485 | unsigned int base_addr; | ||
486 | unsigned int limit; | ||
487 | unsigned int seg_32bit:1; | ||
488 | unsigned int contents:2; | ||
489 | unsigned int read_exec_only:1; | ||
490 | unsigned int limit_in_pages:1; | ||
491 | unsigned int seg_not_present:1; | ||
492 | unsigned int useable:1; | ||
493 | }; | ||
494 | |||
495 | struct linux_binprm; | ||
496 | |||
497 | extern void ia32_init_addr_space (struct pt_regs *regs); | ||
498 | extern int ia32_setup_arg_pages (struct linux_binprm *bprm, int exec_stack); | ||
499 | extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t); | ||
500 | extern void ia32_load_segment_descriptors (struct task_struct *task); | ||
501 | |||
502 | #define ia32f2ia64f(dst,src) \ | ||
503 | do { \ | ||
504 | ia64_ldfe(6,src); \ | ||
505 | ia64_stop(); \ | ||
506 | ia64_stf_spill(dst, 6); \ | ||
507 | } while(0) | ||
508 | |||
509 | #define ia64f2ia32f(dst,src) \ | ||
510 | do { \ | ||
511 | ia64_ldf_fill(6, src); \ | ||
512 | ia64_stop(); \ | ||
513 | ia64_stfe(dst, 6); \ | ||
514 | } while(0) | ||
515 | |||
516 | struct user_regs_struct32 { | ||
517 | __u32 ebx, ecx, edx, esi, edi, ebp, eax; | ||
518 | unsigned short ds, __ds, es, __es; | ||
519 | unsigned short fs, __fs, gs, __gs; | ||
520 | __u32 orig_eax, eip; | ||
521 | unsigned short cs, __cs; | ||
522 | __u32 eflags, esp; | ||
523 | unsigned short ss, __ss; | ||
524 | }; | ||
525 | |||
526 | /* Prototypes for use in elfcore32.h */ | ||
527 | extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *); | ||
528 | extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *); | ||
529 | |||
530 | #endif /* !CONFIG_IA32_SUPPORT */ | ||
531 | |||
532 | #endif /* _ASM_IA64_IA32_PRIV_H */ | ||
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c deleted file mode 100644 index 045b746b9808..000000000000 --- a/arch/ia64/ia32/sys_ia32.c +++ /dev/null | |||
@@ -1,2765 +0,0 @@ | |||
1 | /* | ||
2 | * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c. | ||
3 | * | ||
4 | * Copyright (C) 2000 VA Linux Co | ||
5 | * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> | ||
6 | * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> | ||
7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
8 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
9 | * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co | ||
10 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
11 | * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com> | ||
12 | * | ||
13 | * These routines maintain argument size conversion between 32bit and 64bit | ||
14 | * environment. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/syscalls.h> | ||
19 | #include <linux/sysctl.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/file.h> | ||
23 | #include <linux/signal.h> | ||
24 | #include <linux/resource.h> | ||
25 | #include <linux/times.h> | ||
26 | #include <linux/utsname.h> | ||
27 | #include <linux/smp.h> | ||
28 | #include <linux/smp_lock.h> | ||
29 | #include <linux/sem.h> | ||
30 | #include <linux/msg.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/shm.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/uio.h> | ||
35 | #include <linux/socket.h> | ||
36 | #include <linux/quota.h> | ||
37 | #include <linux/poll.h> | ||
38 | #include <linux/eventpoll.h> | ||
39 | #include <linux/personality.h> | ||
40 | #include <linux/ptrace.h> | ||
41 | #include <linux/regset.h> | ||
42 | #include <linux/stat.h> | ||
43 | #include <linux/ipc.h> | ||
44 | #include <linux/capability.h> | ||
45 | #include <linux/compat.h> | ||
46 | #include <linux/vfs.h> | ||
47 | #include <linux/mman.h> | ||
48 | #include <linux/mutex.h> | ||
49 | |||
50 | #include <asm/intrinsics.h> | ||
51 | #include <asm/types.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/unistd.h> | ||
54 | |||
55 | #include "ia32priv.h" | ||
56 | |||
57 | #include <net/scm.h> | ||
58 | #include <net/sock.h> | ||
59 | |||
60 | #define DEBUG 0 | ||
61 | |||
62 | #if DEBUG | ||
63 | # define DBG(fmt...) printk(KERN_DEBUG fmt) | ||
64 | #else | ||
65 | # define DBG(fmt...) | ||
66 | #endif | ||
67 | |||
68 | #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1))) | ||
69 | |||
70 | #define OFFSET4K(a) ((a) & 0xfff) | ||
71 | #define PAGE_START(addr) ((addr) & PAGE_MASK) | ||
72 | #define MINSIGSTKSZ_IA32 2048 | ||
73 | |||
74 | #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid)) | ||
75 | #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid)) | ||
76 | |||
77 | /* | ||
78 | * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore | ||
79 | * while doing so. | ||
80 | */ | ||
81 | /* XXX make per-mm: */ | ||
82 | static DEFINE_MUTEX(ia32_mmap_mutex); | ||
83 | |||
84 | asmlinkage long | ||
85 | sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, | ||
86 | struct pt_regs *regs) | ||
87 | { | ||
88 | long error; | ||
89 | char *filename; | ||
90 | unsigned long old_map_base, old_task_size, tssd; | ||
91 | |||
92 | filename = getname(name); | ||
93 | error = PTR_ERR(filename); | ||
94 | if (IS_ERR(filename)) | ||
95 | return error; | ||
96 | |||
97 | old_map_base = current->thread.map_base; | ||
98 | old_task_size = current->thread.task_size; | ||
99 | tssd = ia64_get_kr(IA64_KR_TSSD); | ||
100 | |||
101 | /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */ | ||
102 | current->thread.map_base = DEFAULT_MAP_BASE; | ||
103 | current->thread.task_size = DEFAULT_TASK_SIZE; | ||
104 | ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); | ||
105 | ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); | ||
106 | |||
107 | error = compat_do_execve(filename, argv, envp, regs); | ||
108 | putname(filename); | ||
109 | |||
110 | if (error < 0) { | ||
111 | /* oops, execve failed, switch back to old values... */ | ||
112 | ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); | ||
113 | ia64_set_kr(IA64_KR_TSSD, tssd); | ||
114 | current->thread.map_base = old_map_base; | ||
115 | current->thread.task_size = old_task_size; | ||
116 | } | ||
117 | |||
118 | return error; | ||
119 | } | ||
120 | |||
121 | |||
122 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | ||
123 | |||
124 | |||
125 | static int | ||
126 | get_page_prot (struct vm_area_struct *vma, unsigned long addr) | ||
127 | { | ||
128 | int prot = 0; | ||
129 | |||
130 | if (!vma || vma->vm_start > addr) | ||
131 | return 0; | ||
132 | |||
133 | if (vma->vm_flags & VM_READ) | ||
134 | prot |= PROT_READ; | ||
135 | if (vma->vm_flags & VM_WRITE) | ||
136 | prot |= PROT_WRITE; | ||
137 | if (vma->vm_flags & VM_EXEC) | ||
138 | prot |= PROT_EXEC; | ||
139 | return prot; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Map a subpage by creating an anonymous page that contains the union of the old page and | ||
144 | * the subpage. | ||
145 | */ | ||
146 | static unsigned long | ||
147 | mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags, | ||
148 | loff_t off) | ||
149 | { | ||
150 | void *page = NULL; | ||
151 | struct inode *inode; | ||
152 | unsigned long ret = 0; | ||
153 | struct vm_area_struct *vma = find_vma(current->mm, start); | ||
154 | int old_prot = get_page_prot(vma, start); | ||
155 | |||
156 | DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n", | ||
157 | file, start, end, prot, flags, off); | ||
158 | |||
159 | |||
160 | /* Optimize the case where the old mmap and the new mmap are both anonymous */ | ||
161 | if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) { | ||
162 | if (clear_user((void __user *) start, end - start)) { | ||
163 | ret = -EFAULT; | ||
164 | goto out; | ||
165 | } | ||
166 | goto skip_mmap; | ||
167 | } | ||
168 | |||
169 | page = (void *) get_zeroed_page(GFP_KERNEL); | ||
170 | if (!page) | ||
171 | return -ENOMEM; | ||
172 | |||
173 | if (old_prot) | ||
174 | copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE); | ||
175 | |||
176 | down_write(¤t->mm->mmap_sem); | ||
177 | { | ||
178 | ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE, | ||
179 | flags | MAP_FIXED | MAP_ANONYMOUS, 0); | ||
180 | } | ||
181 | up_write(¤t->mm->mmap_sem); | ||
182 | |||
183 | if (IS_ERR((void *) ret)) | ||
184 | goto out; | ||
185 | |||
186 | if (old_prot) { | ||
187 | /* copy back the old page contents. */ | ||
188 | if (offset_in_page(start)) | ||
189 | copy_to_user((void __user *) PAGE_START(start), page, | ||
190 | offset_in_page(start)); | ||
191 | if (offset_in_page(end)) | ||
192 | copy_to_user((void __user *) end, page + offset_in_page(end), | ||
193 | PAGE_SIZE - offset_in_page(end)); | ||
194 | } | ||
195 | |||
196 | if (!(flags & MAP_ANONYMOUS)) { | ||
197 | /* read the file contents */ | ||
198 | inode = file->f_path.dentry->d_inode; | ||
199 | if (!inode->i_fop || !file->f_op->read | ||
200 | || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0)) | ||
201 | { | ||
202 | ret = -EINVAL; | ||
203 | goto out; | ||
204 | } | ||
205 | } | ||
206 | |||
207 | skip_mmap: | ||
208 | if (!(prot & PROT_WRITE)) | ||
209 | ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot); | ||
210 | out: | ||
211 | if (page) | ||
212 | free_page((unsigned long) page); | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | /* SLAB cache for ia64_partial_page structures */ | ||
217 | struct kmem_cache *ia64_partial_page_cachep; | ||
218 | |||
219 | /* | ||
220 | * init ia64_partial_page_list. | ||
221 | * return 0 means kmalloc fail. | ||
222 | */ | ||
223 | struct ia64_partial_page_list* | ||
224 | ia32_init_pp_list(void) | ||
225 | { | ||
226 | struct ia64_partial_page_list *p; | ||
227 | |||
228 | if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) | ||
229 | return p; | ||
230 | p->pp_head = NULL; | ||
231 | p->ppl_rb = RB_ROOT; | ||
232 | p->pp_hint = NULL; | ||
233 | atomic_set(&p->pp_count, 1); | ||
234 | return p; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Search for the partial page with @start in partial page list @ppl. | ||
239 | * If finds the partial page, return the found partial page. | ||
240 | * Else, return 0 and provide @pprev, @rb_link, @rb_parent to | ||
241 | * be used by later __ia32_insert_pp(). | ||
242 | */ | ||
243 | static struct ia64_partial_page * | ||
244 | __ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start, | ||
245 | struct ia64_partial_page **pprev, struct rb_node ***rb_link, | ||
246 | struct rb_node **rb_parent) | ||
247 | { | ||
248 | struct ia64_partial_page *pp; | ||
249 | struct rb_node **__rb_link, *__rb_parent, *rb_prev; | ||
250 | |||
251 | pp = ppl->pp_hint; | ||
252 | if (pp && pp->base == start) | ||
253 | return pp; | ||
254 | |||
255 | __rb_link = &ppl->ppl_rb.rb_node; | ||
256 | rb_prev = __rb_parent = NULL; | ||
257 | |||
258 | while (*__rb_link) { | ||
259 | __rb_parent = *__rb_link; | ||
260 | pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb); | ||
261 | |||
262 | if (pp->base == start) { | ||
263 | ppl->pp_hint = pp; | ||
264 | return pp; | ||
265 | } else if (pp->base < start) { | ||
266 | rb_prev = __rb_parent; | ||
267 | __rb_link = &__rb_parent->rb_right; | ||
268 | } else { | ||
269 | __rb_link = &__rb_parent->rb_left; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | *rb_link = __rb_link; | ||
274 | *rb_parent = __rb_parent; | ||
275 | *pprev = NULL; | ||
276 | if (rb_prev) | ||
277 | *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb); | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * insert @pp into @ppl. | ||
283 | */ | ||
284 | static void | ||
285 | __ia32_insert_pp(struct ia64_partial_page_list *ppl, | ||
286 | struct ia64_partial_page *pp, struct ia64_partial_page *prev, | ||
287 | struct rb_node **rb_link, struct rb_node *rb_parent) | ||
288 | { | ||
289 | /* link list */ | ||
290 | if (prev) { | ||
291 | pp->next = prev->next; | ||
292 | prev->next = pp; | ||
293 | } else { | ||
294 | ppl->pp_head = pp; | ||
295 | if (rb_parent) | ||
296 | pp->next = rb_entry(rb_parent, | ||
297 | struct ia64_partial_page, pp_rb); | ||
298 | else | ||
299 | pp->next = NULL; | ||
300 | } | ||
301 | |||
302 | /* link rb */ | ||
303 | rb_link_node(&pp->pp_rb, rb_parent, rb_link); | ||
304 | rb_insert_color(&pp->pp_rb, &ppl->ppl_rb); | ||
305 | |||
306 | ppl->pp_hint = pp; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * delete @pp from partial page list @ppl. | ||
311 | */ | ||
312 | static void | ||
313 | __ia32_delete_pp(struct ia64_partial_page_list *ppl, | ||
314 | struct ia64_partial_page *pp, struct ia64_partial_page *prev) | ||
315 | { | ||
316 | if (prev) { | ||
317 | prev->next = pp->next; | ||
318 | if (ppl->pp_hint == pp) | ||
319 | ppl->pp_hint = prev; | ||
320 | } else { | ||
321 | ppl->pp_head = pp->next; | ||
322 | if (ppl->pp_hint == pp) | ||
323 | ppl->pp_hint = pp->next; | ||
324 | } | ||
325 | rb_erase(&pp->pp_rb, &ppl->ppl_rb); | ||
326 | kmem_cache_free(ia64_partial_page_cachep, pp); | ||
327 | } | ||
328 | |||
329 | static struct ia64_partial_page * | ||
330 | __pp_prev(struct ia64_partial_page *pp) | ||
331 | { | ||
332 | struct rb_node *prev = rb_prev(&pp->pp_rb); | ||
333 | if (prev) | ||
334 | return rb_entry(prev, struct ia64_partial_page, pp_rb); | ||
335 | else | ||
336 | return NULL; | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * Delete partial pages with address between @start and @end. | ||
341 | * @start and @end are page aligned. | ||
342 | */ | ||
343 | static void | ||
344 | __ia32_delete_pp_range(unsigned int start, unsigned int end) | ||
345 | { | ||
346 | struct ia64_partial_page *pp, *prev; | ||
347 | struct rb_node **rb_link, *rb_parent; | ||
348 | |||
349 | if (start >= end) | ||
350 | return; | ||
351 | |||
352 | pp = __ia32_find_pp(current->thread.ppl, start, &prev, | ||
353 | &rb_link, &rb_parent); | ||
354 | if (pp) | ||
355 | prev = __pp_prev(pp); | ||
356 | else { | ||
357 | if (prev) | ||
358 | pp = prev->next; | ||
359 | else | ||
360 | pp = current->thread.ppl->pp_head; | ||
361 | } | ||
362 | |||
363 | while (pp && pp->base < end) { | ||
364 | struct ia64_partial_page *tmp = pp->next; | ||
365 | __ia32_delete_pp(current->thread.ppl, pp, prev); | ||
366 | pp = tmp; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Set the range between @start and @end in bitmap. | ||
372 | * @start and @end should be IA32 page aligned and in the same IA64 page. | ||
373 | */ | ||
374 | static int | ||
375 | __ia32_set_pp(unsigned int start, unsigned int end, int flags) | ||
376 | { | ||
377 | struct ia64_partial_page *pp, *prev; | ||
378 | struct rb_node ** rb_link, *rb_parent; | ||
379 | unsigned int pstart, start_bit, end_bit, i; | ||
380 | |||
381 | pstart = PAGE_START(start); | ||
382 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; | ||
383 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; | ||
384 | if (end_bit == 0) | ||
385 | end_bit = PAGE_SIZE / IA32_PAGE_SIZE; | ||
386 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, | ||
387 | &rb_link, &rb_parent); | ||
388 | if (pp) { | ||
389 | for (i = start_bit; i < end_bit; i++) | ||
390 | set_bit(i, &pp->bitmap); | ||
391 | /* | ||
392 | * Check: if this partial page has been set to a full page, | ||
393 | * then delete it. | ||
394 | */ | ||
395 | if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >= | ||
396 | PAGE_SIZE/IA32_PAGE_SIZE) { | ||
397 | __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp)); | ||
398 | } | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * MAP_FIXED may lead to overlapping mmap. | ||
404 | * In this case, the requested mmap area may already mmaped as a full | ||
405 | * page. So check vma before adding a new partial page. | ||
406 | */ | ||
407 | if (flags & MAP_FIXED) { | ||
408 | struct vm_area_struct *vma = find_vma(current->mm, pstart); | ||
409 | if (vma && vma->vm_start <= pstart) | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | /* new a ia64_partial_page */ | ||
414 | pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); | ||
415 | if (!pp) | ||
416 | return -ENOMEM; | ||
417 | pp->base = pstart; | ||
418 | pp->bitmap = 0; | ||
419 | for (i=start_bit; i<end_bit; i++) | ||
420 | set_bit(i, &(pp->bitmap)); | ||
421 | pp->next = NULL; | ||
422 | __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent); | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * @start and @end should be IA32 page aligned, but don't need to be in the | ||
428 | * same IA64 page. Split @start and @end to make sure they're in the same IA64 | ||
429 | * page, then call __ia32_set_pp(). | ||
430 | */ | ||
431 | static void | ||
432 | ia32_set_pp(unsigned int start, unsigned int end, int flags) | ||
433 | { | ||
434 | down_write(¤t->mm->mmap_sem); | ||
435 | if (flags & MAP_FIXED) { | ||
436 | /* | ||
437 | * MAP_FIXED may lead to overlapping mmap. When this happens, | ||
438 | * a series of complete IA64 pages results in deletion of | ||
439 | * old partial pages in that range. | ||
440 | */ | ||
441 | __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end)); | ||
442 | } | ||
443 | |||
444 | if (end < PAGE_ALIGN(start)) { | ||
445 | __ia32_set_pp(start, end, flags); | ||
446 | } else { | ||
447 | if (offset_in_page(start)) | ||
448 | __ia32_set_pp(start, PAGE_ALIGN(start), flags); | ||
449 | if (offset_in_page(end)) | ||
450 | __ia32_set_pp(PAGE_START(end), end, flags); | ||
451 | } | ||
452 | up_write(¤t->mm->mmap_sem); | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * Unset the range between @start and @end in bitmap. | ||
457 | * @start and @end should be IA32 page aligned and in the same IA64 page. | ||
458 | * After doing that, if the bitmap is 0, then free the page and return 1, | ||
459 | * else return 0; | ||
460 | * If not find the partial page in the list, then | ||
461 | * If the vma exists, then the full page is set to a partial page; | ||
462 | * Else return -ENOMEM. | ||
463 | */ | ||
464 | static int | ||
465 | __ia32_unset_pp(unsigned int start, unsigned int end) | ||
466 | { | ||
467 | struct ia64_partial_page *pp, *prev; | ||
468 | struct rb_node ** rb_link, *rb_parent; | ||
469 | unsigned int pstart, start_bit, end_bit, i; | ||
470 | struct vm_area_struct *vma; | ||
471 | |||
472 | pstart = PAGE_START(start); | ||
473 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; | ||
474 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; | ||
475 | if (end_bit == 0) | ||
476 | end_bit = PAGE_SIZE / IA32_PAGE_SIZE; | ||
477 | |||
478 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, | ||
479 | &rb_link, &rb_parent); | ||
480 | if (pp) { | ||
481 | for (i = start_bit; i < end_bit; i++) | ||
482 | clear_bit(i, &pp->bitmap); | ||
483 | if (pp->bitmap == 0) { | ||
484 | __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp)); | ||
485 | return 1; | ||
486 | } | ||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | vma = find_vma(current->mm, pstart); | ||
491 | if (!vma || vma->vm_start > pstart) { | ||
492 | return -ENOMEM; | ||
493 | } | ||
494 | |||
495 | /* new a ia64_partial_page */ | ||
496 | pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); | ||
497 | if (!pp) | ||
498 | return -ENOMEM; | ||
499 | pp->base = pstart; | ||
500 | pp->bitmap = 0; | ||
501 | for (i = 0; i < start_bit; i++) | ||
502 | set_bit(i, &(pp->bitmap)); | ||
503 | for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++) | ||
504 | set_bit(i, &(pp->bitmap)); | ||
505 | pp->next = NULL; | ||
506 | __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent); | ||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | /* | ||
511 | * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling | ||
512 | * __ia32_delete_pp_range(). Unset possible partial pages by calling | ||
513 | * __ia32_unset_pp(). | ||
514 | * The returned value see __ia32_unset_pp(). | ||
515 | */ | ||
516 | static int | ||
517 | ia32_unset_pp(unsigned int *startp, unsigned int *endp) | ||
518 | { | ||
519 | unsigned int start = *startp, end = *endp; | ||
520 | int ret = 0; | ||
521 | |||
522 | down_write(¤t->mm->mmap_sem); | ||
523 | |||
524 | __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end)); | ||
525 | |||
526 | if (end < PAGE_ALIGN(start)) { | ||
527 | ret = __ia32_unset_pp(start, end); | ||
528 | if (ret == 1) { | ||
529 | *startp = PAGE_START(start); | ||
530 | *endp = PAGE_ALIGN(end); | ||
531 | } | ||
532 | if (ret == 0) { | ||
533 | /* to shortcut sys_munmap() in sys32_munmap() */ | ||
534 | *startp = PAGE_START(start); | ||
535 | *endp = PAGE_START(end); | ||
536 | } | ||
537 | } else { | ||
538 | if (offset_in_page(start)) { | ||
539 | ret = __ia32_unset_pp(start, PAGE_ALIGN(start)); | ||
540 | if (ret == 1) | ||
541 | *startp = PAGE_START(start); | ||
542 | if (ret == 0) | ||
543 | *startp = PAGE_ALIGN(start); | ||
544 | if (ret < 0) | ||
545 | goto out; | ||
546 | } | ||
547 | if (offset_in_page(end)) { | ||
548 | ret = __ia32_unset_pp(PAGE_START(end), end); | ||
549 | if (ret == 1) | ||
550 | *endp = PAGE_ALIGN(end); | ||
551 | if (ret == 0) | ||
552 | *endp = PAGE_START(end); | ||
553 | } | ||
554 | } | ||
555 | |||
556 | out: | ||
557 | up_write(¤t->mm->mmap_sem); | ||
558 | return ret; | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * Compare the range between @start and @end with bitmap in partial page. | ||
563 | * @start and @end should be IA32 page aligned and in the same IA64 page. | ||
564 | */ | ||
565 | static int | ||
566 | __ia32_compare_pp(unsigned int start, unsigned int end) | ||
567 | { | ||
568 | struct ia64_partial_page *pp, *prev; | ||
569 | struct rb_node ** rb_link, *rb_parent; | ||
570 | unsigned int pstart, start_bit, end_bit, size; | ||
571 | unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ | ||
572 | |||
573 | pstart = PAGE_START(start); | ||
574 | |||
575 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, | ||
576 | &rb_link, &rb_parent); | ||
577 | if (!pp) | ||
578 | return 1; | ||
579 | |||
580 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; | ||
581 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; | ||
582 | size = sizeof(pp->bitmap) * 8; | ||
583 | first_bit = find_first_bit(&pp->bitmap, size); | ||
584 | next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit); | ||
585 | if ((start_bit < first_bit) || (end_bit > next_zero_bit)) { | ||
586 | /* exceeds the first range in bitmap */ | ||
587 | return -ENOMEM; | ||
588 | } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) { | ||
589 | first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit); | ||
590 | if ((next_zero_bit < first_bit) && (first_bit < size)) | ||
591 | return 1; /* has next range */ | ||
592 | else | ||
593 | return 0; /* no next range */ | ||
594 | } else | ||
595 | return 1; | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * @start and @end should be IA32 page aligned, but don't need to be in the | ||
600 | * same IA64 page. Split @start and @end to make sure they're in the same IA64 | ||
601 | * page, then call __ia32_compare_pp(). | ||
602 | * | ||
603 | * Take this as example: the range is the 1st and 2nd 4K page. | ||
604 | * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011; | ||
605 | * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111; | ||
606 | * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or | ||
607 | * bitmap = 00000101. | ||
608 | */ | ||
609 | static int | ||
610 | ia32_compare_pp(unsigned int *startp, unsigned int *endp) | ||
611 | { | ||
612 | unsigned int start = *startp, end = *endp; | ||
613 | int retval = 0; | ||
614 | |||
615 | down_write(¤t->mm->mmap_sem); | ||
616 | |||
617 | if (end < PAGE_ALIGN(start)) { | ||
618 | retval = __ia32_compare_pp(start, end); | ||
619 | if (retval == 0) { | ||
620 | *startp = PAGE_START(start); | ||
621 | *endp = PAGE_ALIGN(end); | ||
622 | } | ||
623 | } else { | ||
624 | if (offset_in_page(start)) { | ||
625 | retval = __ia32_compare_pp(start, | ||
626 | PAGE_ALIGN(start)); | ||
627 | if (retval == 0) | ||
628 | *startp = PAGE_START(start); | ||
629 | if (retval < 0) | ||
630 | goto out; | ||
631 | } | ||
632 | if (offset_in_page(end)) { | ||
633 | retval = __ia32_compare_pp(PAGE_START(end), end); | ||
634 | if (retval == 0) | ||
635 | *endp = PAGE_ALIGN(end); | ||
636 | } | ||
637 | } | ||
638 | |||
639 | out: | ||
640 | up_write(¤t->mm->mmap_sem); | ||
641 | return retval; | ||
642 | } | ||
643 | |||
644 | static void | ||
645 | __ia32_drop_pp_list(struct ia64_partial_page_list *ppl) | ||
646 | { | ||
647 | struct ia64_partial_page *pp = ppl->pp_head; | ||
648 | |||
649 | while (pp) { | ||
650 | struct ia64_partial_page *next = pp->next; | ||
651 | kmem_cache_free(ia64_partial_page_cachep, pp); | ||
652 | pp = next; | ||
653 | } | ||
654 | |||
655 | kfree(ppl); | ||
656 | } | ||
657 | |||
658 | void | ||
659 | ia32_drop_ia64_partial_page_list(struct task_struct *task) | ||
660 | { | ||
661 | struct ia64_partial_page_list* ppl = task->thread.ppl; | ||
662 | |||
663 | if (ppl && atomic_dec_and_test(&ppl->pp_count)) | ||
664 | __ia32_drop_pp_list(ppl); | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * Copy current->thread.ppl to ppl (already initialized). | ||
669 | */ | ||
670 | static int | ||
671 | __ia32_copy_pp_list(struct ia64_partial_page_list *ppl) | ||
672 | { | ||
673 | struct ia64_partial_page *pp, *tmp, *prev; | ||
674 | struct rb_node **rb_link, *rb_parent; | ||
675 | |||
676 | ppl->pp_head = NULL; | ||
677 | ppl->pp_hint = NULL; | ||
678 | ppl->ppl_rb = RB_ROOT; | ||
679 | rb_link = &ppl->ppl_rb.rb_node; | ||
680 | rb_parent = NULL; | ||
681 | prev = NULL; | ||
682 | |||
683 | for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { | ||
684 | tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); | ||
685 | if (!tmp) | ||
686 | return -ENOMEM; | ||
687 | *tmp = *pp; | ||
688 | __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent); | ||
689 | prev = tmp; | ||
690 | rb_link = &tmp->pp_rb.rb_right; | ||
691 | rb_parent = &tmp->pp_rb; | ||
692 | } | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | int | ||
697 | ia32_copy_ia64_partial_page_list(struct task_struct *p, | ||
698 | unsigned long clone_flags) | ||
699 | { | ||
700 | int retval = 0; | ||
701 | |||
702 | if (clone_flags & CLONE_VM) { | ||
703 | atomic_inc(¤t->thread.ppl->pp_count); | ||
704 | p->thread.ppl = current->thread.ppl; | ||
705 | } else { | ||
706 | p->thread.ppl = ia32_init_pp_list(); | ||
707 | if (!p->thread.ppl) | ||
708 | return -ENOMEM; | ||
709 | down_write(¤t->mm->mmap_sem); | ||
710 | { | ||
711 | retval = __ia32_copy_pp_list(p->thread.ppl); | ||
712 | } | ||
713 | up_write(¤t->mm->mmap_sem); | ||
714 | } | ||
715 | |||
716 | return retval; | ||
717 | } | ||
718 | |||
719 | static unsigned long | ||
720 | emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags, | ||
721 | loff_t off) | ||
722 | { | ||
723 | unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0; | ||
724 | struct inode *inode; | ||
725 | loff_t poff; | ||
726 | |||
727 | end = start + len; | ||
728 | pstart = PAGE_START(start); | ||
729 | pend = PAGE_ALIGN(end); | ||
730 | |||
731 | if (flags & MAP_FIXED) { | ||
732 | ia32_set_pp((unsigned int)start, (unsigned int)end, flags); | ||
733 | if (start > pstart) { | ||
734 | if (flags & MAP_SHARED) | ||
735 | printk(KERN_INFO | ||
736 | "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", | ||
737 | current->comm, task_pid_nr(current), start); | ||
738 | ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, | ||
739 | off); | ||
740 | if (IS_ERR((void *) ret)) | ||
741 | return ret; | ||
742 | pstart += PAGE_SIZE; | ||
743 | if (pstart >= pend) | ||
744 | goto out; /* done */ | ||
745 | } | ||
746 | if (end < pend) { | ||
747 | if (flags & MAP_SHARED) | ||
748 | printk(KERN_INFO | ||
749 | "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", | ||
750 | current->comm, task_pid_nr(current), end); | ||
751 | ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, | ||
752 | (off + len) - offset_in_page(end)); | ||
753 | if (IS_ERR((void *) ret)) | ||
754 | return ret; | ||
755 | pend -= PAGE_SIZE; | ||
756 | if (pstart >= pend) | ||
757 | goto out; /* done */ | ||
758 | } | ||
759 | } else { | ||
760 | /* | ||
761 | * If a start address was specified, use it if the entire rounded out area | ||
762 | * is available. | ||
763 | */ | ||
764 | if (start && !pstart) | ||
765 | fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */ | ||
766 | tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags); | ||
767 | if (tmp != pstart) { | ||
768 | pstart = tmp; | ||
769 | start = pstart + offset_in_page(off); /* make start congruent with off */ | ||
770 | end = start + len; | ||
771 | pend = PAGE_ALIGN(end); | ||
772 | } | ||
773 | } | ||
774 | |||
775 | poff = off + (pstart - start); /* note: (pstart - start) may be negative */ | ||
776 | is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0); | ||
777 | |||
778 | if ((flags & MAP_SHARED) && !is_congruent) | ||
779 | printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " | ||
780 | "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off); | ||
781 | |||
782 | DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, | ||
783 | is_congruent ? "congruent" : "not congruent", poff); | ||
784 | |||
785 | down_write(¤t->mm->mmap_sem); | ||
786 | { | ||
787 | if (!(flags & MAP_ANONYMOUS) && is_congruent) | ||
788 | ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff); | ||
789 | else | ||
790 | ret = do_mmap(NULL, pstart, pend - pstart, | ||
791 | prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE), | ||
792 | flags | MAP_FIXED | MAP_ANONYMOUS, 0); | ||
793 | } | ||
794 | up_write(¤t->mm->mmap_sem); | ||
795 | |||
796 | if (IS_ERR((void *) ret)) | ||
797 | return ret; | ||
798 | |||
799 | if (!is_congruent) { | ||
800 | /* read the file contents */ | ||
801 | inode = file->f_path.dentry->d_inode; | ||
802 | if (!inode->i_fop || !file->f_op->read | ||
803 | || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff) | ||
804 | < 0)) | ||
805 | { | ||
806 | sys_munmap(pstart, pend - pstart); | ||
807 | return -EINVAL; | ||
808 | } | ||
809 | if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0) | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | |||
813 | if (!(flags & MAP_FIXED)) | ||
814 | ia32_set_pp((unsigned int)start, (unsigned int)end, flags); | ||
815 | out: | ||
816 | return start; | ||
817 | } | ||
818 | |||
819 | #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ | ||
820 | |||
821 | static inline unsigned int | ||
822 | get_prot32 (unsigned int prot) | ||
823 | { | ||
824 | if (prot & PROT_WRITE) | ||
825 | /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */ | ||
826 | prot |= PROT_READ | PROT_WRITE | PROT_EXEC; | ||
827 | else if (prot & (PROT_READ | PROT_EXEC)) | ||
828 | /* on x86, there is no distinction between PROT_READ and PROT_EXEC */ | ||
829 | prot |= (PROT_READ | PROT_EXEC); | ||
830 | |||
831 | return prot; | ||
832 | } | ||
833 | |||
834 | unsigned long | ||
835 | ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags, | ||
836 | loff_t offset) | ||
837 | { | ||
838 | DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n", | ||
839 | file, addr, len, prot, flags, offset); | ||
840 | |||
841 | if (file && (!file->f_op || !file->f_op->mmap)) | ||
842 | return -ENODEV; | ||
843 | |||
844 | len = IA32_PAGE_ALIGN(len); | ||
845 | if (len == 0) | ||
846 | return addr; | ||
847 | |||
848 | if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len) | ||
849 | { | ||
850 | if (flags & MAP_FIXED) | ||
851 | return -ENOMEM; | ||
852 | else | ||
853 | return -EINVAL; | ||
854 | } | ||
855 | |||
856 | if (OFFSET4K(offset)) | ||
857 | return -EINVAL; | ||
858 | |||
859 | prot = get_prot32(prot); | ||
860 | |||
861 | if (flags & MAP_HUGETLB) | ||
862 | return -ENOMEM; | ||
863 | |||
864 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | ||
865 | mutex_lock(&ia32_mmap_mutex); | ||
866 | { | ||
867 | addr = emulate_mmap(file, addr, len, prot, flags, offset); | ||
868 | } | ||
869 | mutex_unlock(&ia32_mmap_mutex); | ||
870 | #else | ||
871 | down_write(¤t->mm->mmap_sem); | ||
872 | { | ||
873 | addr = do_mmap(file, addr, len, prot, flags, offset); | ||
874 | } | ||
875 | up_write(¤t->mm->mmap_sem); | ||
876 | #endif | ||
877 | DBG("ia32_do_mmap: returning 0x%lx\n", addr); | ||
878 | return addr; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these | ||
883 | * system calls used a memory block for parameter passing.. | ||
884 | */ | ||
885 | |||
886 | struct mmap_arg_struct { | ||
887 | unsigned int addr; | ||
888 | unsigned int len; | ||
889 | unsigned int prot; | ||
890 | unsigned int flags; | ||
891 | unsigned int fd; | ||
892 | unsigned int offset; | ||
893 | }; | ||
894 | |||
895 | asmlinkage long | ||
896 | sys32_mmap (struct mmap_arg_struct __user *arg) | ||
897 | { | ||
898 | struct mmap_arg_struct a; | ||
899 | struct file *file = NULL; | ||
900 | unsigned long addr; | ||
901 | int flags; | ||
902 | |||
903 | if (copy_from_user(&a, arg, sizeof(a))) | ||
904 | return -EFAULT; | ||
905 | |||
906 | if (OFFSET4K(a.offset)) | ||
907 | return -EINVAL; | ||
908 | |||
909 | flags = a.flags; | ||
910 | |||
911 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
912 | if (!(flags & MAP_ANONYMOUS)) { | ||
913 | file = fget(a.fd); | ||
914 | if (!file) | ||
915 | return -EBADF; | ||
916 | } | ||
917 | |||
918 | addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset); | ||
919 | |||
920 | if (file) | ||
921 | fput(file); | ||
922 | return addr; | ||
923 | } | ||
924 | |||
925 | asmlinkage long | ||
926 | sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags, | ||
927 | unsigned int fd, unsigned int pgoff) | ||
928 | { | ||
929 | struct file *file = NULL; | ||
930 | unsigned long retval; | ||
931 | |||
932 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
933 | if (!(flags & MAP_ANONYMOUS)) { | ||
934 | file = fget(fd); | ||
935 | if (!file) | ||
936 | return -EBADF; | ||
937 | } | ||
938 | |||
939 | retval = ia32_do_mmap(file, addr, len, prot, flags, | ||
940 | (unsigned long) pgoff << IA32_PAGE_SHIFT); | ||
941 | |||
942 | if (file) | ||
943 | fput(file); | ||
944 | return retval; | ||
945 | } | ||
946 | |||
947 | asmlinkage long | ||
948 | sys32_munmap (unsigned int start, unsigned int len) | ||
949 | { | ||
950 | unsigned int end = start + len; | ||
951 | long ret; | ||
952 | |||
953 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT | ||
954 | ret = sys_munmap(start, end - start); | ||
955 | #else | ||
956 | if (OFFSET4K(start)) | ||
957 | return -EINVAL; | ||
958 | |||
959 | end = IA32_PAGE_ALIGN(end); | ||
960 | if (start >= end) | ||
961 | return -EINVAL; | ||
962 | |||
963 | ret = ia32_unset_pp(&start, &end); | ||
964 | if (ret < 0) | ||
965 | return ret; | ||
966 | |||
967 | if (start >= end) | ||
968 | return 0; | ||
969 | |||
970 | mutex_lock(&ia32_mmap_mutex); | ||
971 | ret = sys_munmap(start, end - start); | ||
972 | mutex_unlock(&ia32_mmap_mutex); | ||
973 | #endif | ||
974 | return ret; | ||
975 | } | ||
976 | |||
977 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | ||
978 | |||
979 | /* | ||
980 | * When mprotect()ing a partial page, we set the permission to the union of the old | ||
981 | * settings and the new settings. In other words, it's only possible to make access to a | ||
982 | * partial page less restrictive. | ||
983 | */ | ||
984 | static long | ||
985 | mprotect_subpage (unsigned long address, int new_prot) | ||
986 | { | ||
987 | int old_prot; | ||
988 | struct vm_area_struct *vma; | ||
989 | |||
990 | if (new_prot == PROT_NONE) | ||
991 | return 0; /* optimize case where nothing changes... */ | ||
992 | vma = find_vma(current->mm, address); | ||
993 | old_prot = get_page_prot(vma, address); | ||
994 | return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot); | ||
995 | } | ||
996 | |||
997 | #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ | ||
998 | |||
999 | asmlinkage long | ||
1000 | sys32_mprotect (unsigned int start, unsigned int len, int prot) | ||
1001 | { | ||
1002 | unsigned int end = start + len; | ||
1003 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | ||
1004 | long retval = 0; | ||
1005 | #endif | ||
1006 | |||
1007 | prot = get_prot32(prot); | ||
1008 | |||
1009 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT | ||
1010 | return sys_mprotect(start, end - start, prot); | ||
1011 | #else | ||
1012 | if (OFFSET4K(start)) | ||
1013 | return -EINVAL; | ||
1014 | |||
1015 | end = IA32_PAGE_ALIGN(end); | ||
1016 | if (end < start) | ||
1017 | return -EINVAL; | ||
1018 | |||
1019 | retval = ia32_compare_pp(&start, &end); | ||
1020 | |||
1021 | if (retval < 0) | ||
1022 | return retval; | ||
1023 | |||
1024 | mutex_lock(&ia32_mmap_mutex); | ||
1025 | { | ||
1026 | if (offset_in_page(start)) { | ||
1027 | /* start address is 4KB aligned but not page aligned. */ | ||
1028 | retval = mprotect_subpage(PAGE_START(start), prot); | ||
1029 | if (retval < 0) | ||
1030 | goto out; | ||
1031 | |||
1032 | start = PAGE_ALIGN(start); | ||
1033 | if (start >= end) | ||
1034 | goto out; /* retval is already zero... */ | ||
1035 | } | ||
1036 | |||
1037 | if (offset_in_page(end)) { | ||
1038 | /* end address is 4KB aligned but not page aligned. */ | ||
1039 | retval = mprotect_subpage(PAGE_START(end), prot); | ||
1040 | if (retval < 0) | ||
1041 | goto out; | ||
1042 | |||
1043 | end = PAGE_START(end); | ||
1044 | } | ||
1045 | retval = sys_mprotect(start, end - start, prot); | ||
1046 | } | ||
1047 | out: | ||
1048 | mutex_unlock(&ia32_mmap_mutex); | ||
1049 | return retval; | ||
1050 | #endif | ||
1051 | } | ||
1052 | |||
1053 | asmlinkage long | ||
1054 | sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len, | ||
1055 | unsigned int flags, unsigned int new_addr) | ||
1056 | { | ||
1057 | long ret; | ||
1058 | |||
1059 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT | ||
1060 | ret = sys_mremap(addr, old_len, new_len, flags, new_addr); | ||
1061 | #else | ||
1062 | unsigned int old_end, new_end; | ||
1063 | |||
1064 | if (OFFSET4K(addr)) | ||
1065 | return -EINVAL; | ||
1066 | |||
1067 | old_len = IA32_PAGE_ALIGN(old_len); | ||
1068 | new_len = IA32_PAGE_ALIGN(new_len); | ||
1069 | old_end = addr + old_len; | ||
1070 | new_end = addr + new_len; | ||
1071 | |||
1072 | if (!new_len) | ||
1073 | return -EINVAL; | ||
1074 | |||
1075 | if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr))) | ||
1076 | return -EINVAL; | ||
1077 | |||
1078 | if (old_len >= new_len) { | ||
1079 | ret = sys32_munmap(addr + new_len, old_len - new_len); | ||
1080 | if (ret && old_len != new_len) | ||
1081 | return ret; | ||
1082 | ret = addr; | ||
1083 | if (!(flags & MREMAP_FIXED) || (new_addr == addr)) | ||
1084 | return ret; | ||
1085 | old_len = new_len; | ||
1086 | } | ||
1087 | |||
1088 | addr = PAGE_START(addr); | ||
1089 | old_len = PAGE_ALIGN(old_end) - addr; | ||
1090 | new_len = PAGE_ALIGN(new_end) - addr; | ||
1091 | |||
1092 | mutex_lock(&ia32_mmap_mutex); | ||
1093 | ret = sys_mremap(addr, old_len, new_len, flags, new_addr); | ||
1094 | mutex_unlock(&ia32_mmap_mutex); | ||
1095 | |||
1096 | if ((ret >= 0) && (old_len < new_len)) { | ||
1097 | /* mremap expanded successfully */ | ||
1098 | ia32_set_pp(old_end, new_end, flags); | ||
1099 | } | ||
1100 | #endif | ||
1101 | return ret; | ||
1102 | } | ||
1103 | |||
1104 | asmlinkage unsigned long | ||
1105 | sys32_alarm (unsigned int seconds) | ||
1106 | { | ||
1107 | return alarm_setitimer(seconds); | ||
1108 | } | ||
1109 | |||
1110 | struct sel_arg_struct { | ||
1111 | unsigned int n; | ||
1112 | unsigned int inp; | ||
1113 | unsigned int outp; | ||
1114 | unsigned int exp; | ||
1115 | unsigned int tvp; | ||
1116 | }; | ||
1117 | |||
1118 | asmlinkage long | ||
1119 | sys32_old_select (struct sel_arg_struct __user *arg) | ||
1120 | { | ||
1121 | struct sel_arg_struct a; | ||
1122 | |||
1123 | if (copy_from_user(&a, arg, sizeof(a))) | ||
1124 | return -EFAULT; | ||
1125 | return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), | ||
1126 | compat_ptr(a.exp), compat_ptr(a.tvp)); | ||
1127 | } | ||
1128 | |||
1129 | #define SEMOP 1 | ||
1130 | #define SEMGET 2 | ||
1131 | #define SEMCTL 3 | ||
1132 | #define SEMTIMEDOP 4 | ||
1133 | #define MSGSND 11 | ||
1134 | #define MSGRCV 12 | ||
1135 | #define MSGGET 13 | ||
1136 | #define MSGCTL 14 | ||
1137 | #define SHMAT 21 | ||
1138 | #define SHMDT 22 | ||
1139 | #define SHMGET 23 | ||
1140 | #define SHMCTL 24 | ||
1141 | |||
1142 | asmlinkage long | ||
1143 | sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) | ||
1144 | { | ||
1145 | int version; | ||
1146 | |||
1147 | version = call >> 16; /* hack for backward compatibility */ | ||
1148 | call &= 0xffff; | ||
1149 | |||
1150 | switch (call) { | ||
1151 | case SEMTIMEDOP: | ||
1152 | if (fifth) | ||
1153 | return compat_sys_semtimedop(first, compat_ptr(ptr), | ||
1154 | second, compat_ptr(fifth)); | ||
1155 | /* else fall through for normal semop() */ | ||
1156 | case SEMOP: | ||
1157 | /* struct sembuf is the same on 32 and 64bit :)) */ | ||
1158 | return sys_semtimedop(first, compat_ptr(ptr), second, | ||
1159 | NULL); | ||
1160 | case SEMGET: | ||
1161 | return sys_semget(first, second, third); | ||
1162 | case SEMCTL: | ||
1163 | return compat_sys_semctl(first, second, third, compat_ptr(ptr)); | ||
1164 | |||
1165 | case MSGSND: | ||
1166 | return compat_sys_msgsnd(first, second, third, compat_ptr(ptr)); | ||
1167 | case MSGRCV: | ||
1168 | return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr)); | ||
1169 | case MSGGET: | ||
1170 | return sys_msgget((key_t) first, second); | ||
1171 | case MSGCTL: | ||
1172 | return compat_sys_msgctl(first, second, compat_ptr(ptr)); | ||
1173 | |||
1174 | case SHMAT: | ||
1175 | return compat_sys_shmat(first, second, third, version, compat_ptr(ptr)); | ||
1176 | break; | ||
1177 | case SHMDT: | ||
1178 | return sys_shmdt(compat_ptr(ptr)); | ||
1179 | case SHMGET: | ||
1180 | return sys_shmget(first, (unsigned)second, third); | ||
1181 | case SHMCTL: | ||
1182 | return compat_sys_shmctl(first, second, compat_ptr(ptr)); | ||
1183 | |||
1184 | default: | ||
1185 | return -ENOSYS; | ||
1186 | } | ||
1187 | return -EINVAL; | ||
1188 | } | ||
1189 | |||
1190 | asmlinkage long | ||
1191 | compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options, | ||
1192 | struct compat_rusage *ru); | ||
1193 | |||
1194 | asmlinkage long | ||
1195 | sys32_waitpid (int pid, unsigned int *stat_addr, int options) | ||
1196 | { | ||
1197 | return compat_sys_wait4(pid, stat_addr, options, NULL); | ||
1198 | } | ||
1199 | |||
1200 | /* | ||
1201 | * The order in which registers are stored in the ptrace regs structure | ||
1202 | */ | ||
1203 | #define PT_EBX 0 | ||
1204 | #define PT_ECX 1 | ||
1205 | #define PT_EDX 2 | ||
1206 | #define PT_ESI 3 | ||
1207 | #define PT_EDI 4 | ||
1208 | #define PT_EBP 5 | ||
1209 | #define PT_EAX 6 | ||
1210 | #define PT_DS 7 | ||
1211 | #define PT_ES 8 | ||
1212 | #define PT_FS 9 | ||
1213 | #define PT_GS 10 | ||
1214 | #define PT_ORIG_EAX 11 | ||
1215 | #define PT_EIP 12 | ||
1216 | #define PT_CS 13 | ||
1217 | #define PT_EFL 14 | ||
1218 | #define PT_UESP 15 | ||
1219 | #define PT_SS 16 | ||
1220 | |||
1221 | static unsigned int | ||
1222 | getreg (struct task_struct *child, int regno) | ||
1223 | { | ||
1224 | struct pt_regs *child_regs; | ||
1225 | |||
1226 | child_regs = task_pt_regs(child); | ||
1227 | switch (regno / sizeof(int)) { | ||
1228 | case PT_EBX: return child_regs->r11; | ||
1229 | case PT_ECX: return child_regs->r9; | ||
1230 | case PT_EDX: return child_regs->r10; | ||
1231 | case PT_ESI: return child_regs->r14; | ||
1232 | case PT_EDI: return child_regs->r15; | ||
1233 | case PT_EBP: return child_regs->r13; | ||
1234 | case PT_EAX: return child_regs->r8; | ||
1235 | case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */ | ||
1236 | case PT_EIP: return child_regs->cr_iip; | ||
1237 | case PT_UESP: return child_regs->r12; | ||
1238 | case PT_EFL: return child->thread.eflag; | ||
1239 | case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: | ||
1240 | return __USER_DS; | ||
1241 | case PT_CS: return __USER_CS; | ||
1242 | default: | ||
1243 | printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno); | ||
1244 | break; | ||
1245 | } | ||
1246 | return 0; | ||
1247 | } | ||
1248 | |||
1249 | static void | ||
1250 | putreg (struct task_struct *child, int regno, unsigned int value) | ||
1251 | { | ||
1252 | struct pt_regs *child_regs; | ||
1253 | |||
1254 | child_regs = task_pt_regs(child); | ||
1255 | switch (regno / sizeof(int)) { | ||
1256 | case PT_EBX: child_regs->r11 = value; break; | ||
1257 | case PT_ECX: child_regs->r9 = value; break; | ||
1258 | case PT_EDX: child_regs->r10 = value; break; | ||
1259 | case PT_ESI: child_regs->r14 = value; break; | ||
1260 | case PT_EDI: child_regs->r15 = value; break; | ||
1261 | case PT_EBP: child_regs->r13 = value; break; | ||
1262 | case PT_EAX: child_regs->r8 = value; break; | ||
1263 | case PT_ORIG_EAX: child_regs->r1 = value; break; | ||
1264 | case PT_EIP: child_regs->cr_iip = value; break; | ||
1265 | case PT_UESP: child_regs->r12 = value; break; | ||
1266 | case PT_EFL: child->thread.eflag = value; break; | ||
1267 | case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: | ||
1268 | if (value != __USER_DS) | ||
1269 | printk(KERN_ERR | ||
1270 | "ia32.putreg: attempt to set invalid segment register %d = %x\n", | ||
1271 | regno, value); | ||
1272 | break; | ||
1273 | case PT_CS: | ||
1274 | if (value != __USER_CS) | ||
1275 | printk(KERN_ERR | ||
1276 | "ia32.putreg: attempt to set invalid segment register %d = %x\n", | ||
1277 | regno, value); | ||
1278 | break; | ||
1279 | default: | ||
1280 | printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno); | ||
1281 | break; | ||
1282 | } | ||
1283 | } | ||
1284 | |||
1285 | static void | ||
1286 | put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp, | ||
1287 | struct switch_stack *swp, int tos) | ||
1288 | { | ||
1289 | struct _fpreg_ia32 *f; | ||
1290 | char buf[32]; | ||
1291 | |||
1292 | f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); | ||
1293 | if ((regno += tos) >= 8) | ||
1294 | regno -= 8; | ||
1295 | switch (regno) { | ||
1296 | case 0: | ||
1297 | ia64f2ia32f(f, &ptp->f8); | ||
1298 | break; | ||
1299 | case 1: | ||
1300 | ia64f2ia32f(f, &ptp->f9); | ||
1301 | break; | ||
1302 | case 2: | ||
1303 | ia64f2ia32f(f, &ptp->f10); | ||
1304 | break; | ||
1305 | case 3: | ||
1306 | ia64f2ia32f(f, &ptp->f11); | ||
1307 | break; | ||
1308 | case 4: | ||
1309 | case 5: | ||
1310 | case 6: | ||
1311 | case 7: | ||
1312 | ia64f2ia32f(f, &swp->f12 + (regno - 4)); | ||
1313 | break; | ||
1314 | } | ||
1315 | copy_to_user(reg, f, sizeof(*reg)); | ||
1316 | } | ||
1317 | |||
1318 | static void | ||
1319 | get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp, | ||
1320 | struct switch_stack *swp, int tos) | ||
1321 | { | ||
1322 | |||
1323 | if ((regno += tos) >= 8) | ||
1324 | regno -= 8; | ||
1325 | switch (regno) { | ||
1326 | case 0: | ||
1327 | copy_from_user(&ptp->f8, reg, sizeof(*reg)); | ||
1328 | break; | ||
1329 | case 1: | ||
1330 | copy_from_user(&ptp->f9, reg, sizeof(*reg)); | ||
1331 | break; | ||
1332 | case 2: | ||
1333 | copy_from_user(&ptp->f10, reg, sizeof(*reg)); | ||
1334 | break; | ||
1335 | case 3: | ||
1336 | copy_from_user(&ptp->f11, reg, sizeof(*reg)); | ||
1337 | break; | ||
1338 | case 4: | ||
1339 | case 5: | ||
1340 | case 6: | ||
1341 | case 7: | ||
1342 | copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg)); | ||
1343 | break; | ||
1344 | } | ||
1345 | return; | ||
1346 | } | ||
1347 | |||
1348 | int | ||
1349 | save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save) | ||
1350 | { | ||
1351 | struct switch_stack *swp; | ||
1352 | struct pt_regs *ptp; | ||
1353 | int i, tos; | ||
1354 | |||
1355 | if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) | ||
1356 | return -EFAULT; | ||
1357 | |||
1358 | __put_user(tsk->thread.fcr & 0xffff, &save->cwd); | ||
1359 | __put_user(tsk->thread.fsr & 0xffff, &save->swd); | ||
1360 | __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd); | ||
1361 | __put_user(tsk->thread.fir, &save->fip); | ||
1362 | __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs); | ||
1363 | __put_user(tsk->thread.fdr, &save->foo); | ||
1364 | __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos); | ||
1365 | |||
1366 | /* | ||
1367 | * Stack frames start with 16-bytes of temp space | ||
1368 | */ | ||
1369 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | ||
1370 | ptp = task_pt_regs(tsk); | ||
1371 | tos = (tsk->thread.fsr >> 11) & 7; | ||
1372 | for (i = 0; i < 8; i++) | ||
1373 | put_fpreg(i, &save->st_space[i], ptp, swp, tos); | ||
1374 | return 0; | ||
1375 | } | ||
1376 | |||
1377 | static int | ||
1378 | restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save) | ||
1379 | { | ||
1380 | struct switch_stack *swp; | ||
1381 | struct pt_regs *ptp; | ||
1382 | int i, tos; | ||
1383 | unsigned int fsrlo, fsrhi, num32; | ||
1384 | |||
1385 | if (!access_ok(VERIFY_READ, save, sizeof(*save))) | ||
1386 | return(-EFAULT); | ||
1387 | |||
1388 | __get_user(num32, (unsigned int __user *)&save->cwd); | ||
1389 | tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f); | ||
1390 | __get_user(fsrlo, (unsigned int __user *)&save->swd); | ||
1391 | __get_user(fsrhi, (unsigned int __user *)&save->twd); | ||
1392 | num32 = (fsrhi << 16) | fsrlo; | ||
1393 | tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32; | ||
1394 | __get_user(num32, (unsigned int __user *)&save->fip); | ||
1395 | tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32; | ||
1396 | __get_user(num32, (unsigned int __user *)&save->foo); | ||
1397 | tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32; | ||
1398 | |||
1399 | /* | ||
1400 | * Stack frames start with 16-bytes of temp space | ||
1401 | */ | ||
1402 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | ||
1403 | ptp = task_pt_regs(tsk); | ||
1404 | tos = (tsk->thread.fsr >> 11) & 7; | ||
1405 | for (i = 0; i < 8; i++) | ||
1406 | get_fpreg(i, &save->st_space[i], ptp, swp, tos); | ||
1407 | return 0; | ||
1408 | } | ||
1409 | |||
1410 | int | ||
1411 | save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save) | ||
1412 | { | ||
1413 | struct switch_stack *swp; | ||
1414 | struct pt_regs *ptp; | ||
1415 | int i, tos; | ||
1416 | unsigned long mxcsr=0; | ||
1417 | unsigned long num128[2]; | ||
1418 | |||
1419 | if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) | ||
1420 | return -EFAULT; | ||
1421 | |||
1422 | __put_user(tsk->thread.fcr & 0xffff, &save->cwd); | ||
1423 | __put_user(tsk->thread.fsr & 0xffff, &save->swd); | ||
1424 | __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd); | ||
1425 | __put_user(tsk->thread.fir, &save->fip); | ||
1426 | __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs); | ||
1427 | __put_user(tsk->thread.fdr, &save->foo); | ||
1428 | __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos); | ||
1429 | |||
1430 | /* | ||
1431 | * Stack frames start with 16-bytes of temp space | ||
1432 | */ | ||
1433 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | ||
1434 | ptp = task_pt_regs(tsk); | ||
1435 | tos = (tsk->thread.fsr >> 11) & 7; | ||
1436 | for (i = 0; i < 8; i++) | ||
1437 | put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); | ||
1438 | |||
1439 | mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f); | ||
1440 | __put_user(mxcsr & 0xffff, &save->mxcsr); | ||
1441 | for (i = 0; i < 8; i++) { | ||
1442 | memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long)); | ||
1443 | memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long)); | ||
1444 | copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32)); | ||
1445 | } | ||
1446 | return 0; | ||
1447 | } | ||
1448 | |||
1449 | static int | ||
1450 | restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save) | ||
1451 | { | ||
1452 | struct switch_stack *swp; | ||
1453 | struct pt_regs *ptp; | ||
1454 | int i, tos; | ||
1455 | unsigned int fsrlo, fsrhi, num32; | ||
1456 | int mxcsr; | ||
1457 | unsigned long num64; | ||
1458 | unsigned long num128[2]; | ||
1459 | |||
1460 | if (!access_ok(VERIFY_READ, save, sizeof(*save))) | ||
1461 | return(-EFAULT); | ||
1462 | |||
1463 | __get_user(num32, (unsigned int __user *)&save->cwd); | ||
1464 | tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f); | ||
1465 | __get_user(fsrlo, (unsigned int __user *)&save->swd); | ||
1466 | __get_user(fsrhi, (unsigned int __user *)&save->twd); | ||
1467 | num32 = (fsrhi << 16) | fsrlo; | ||
1468 | tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32; | ||
1469 | __get_user(num32, (unsigned int __user *)&save->fip); | ||
1470 | tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32; | ||
1471 | __get_user(num32, (unsigned int __user *)&save->foo); | ||
1472 | tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32; | ||
1473 | |||
1474 | /* | ||
1475 | * Stack frames start with 16-bytes of temp space | ||
1476 | */ | ||
1477 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | ||
1478 | ptp = task_pt_regs(tsk); | ||
1479 | tos = (tsk->thread.fsr >> 11) & 7; | ||
1480 | for (i = 0; i < 8; i++) | ||
1481 | get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); | ||
1482 | |||
1483 | __get_user(mxcsr, (unsigned int __user *)&save->mxcsr); | ||
1484 | num64 = mxcsr & 0xff10; | ||
1485 | tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32); | ||
1486 | num64 = mxcsr & 0x3f; | ||
1487 | tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32); | ||
1488 | |||
1489 | for (i = 0; i < 8; i++) { | ||
1490 | copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32)); | ||
1491 | memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long)); | ||
1492 | memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long)); | ||
1493 | } | ||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | ||
1498 | compat_ulong_t caddr, compat_ulong_t cdata) | ||
1499 | { | ||
1500 | unsigned long addr = caddr; | ||
1501 | unsigned long data = cdata; | ||
1502 | unsigned int tmp; | ||
1503 | long i, ret; | ||
1504 | |||
1505 | switch (request) { | ||
1506 | case PTRACE_PEEKUSR: /* read word at addr in USER area */ | ||
1507 | ret = -EIO; | ||
1508 | if ((addr & 3) || addr > 17*sizeof(int)) | ||
1509 | break; | ||
1510 | |||
1511 | tmp = getreg(child, addr); | ||
1512 | if (!put_user(tmp, (unsigned int __user *) compat_ptr(data))) | ||
1513 | ret = 0; | ||
1514 | break; | ||
1515 | |||
1516 | case PTRACE_POKEUSR: /* write word at addr in USER area */ | ||
1517 | ret = -EIO; | ||
1518 | if ((addr & 3) || addr > 17*sizeof(int)) | ||
1519 | break; | ||
1520 | |||
1521 | putreg(child, addr, data); | ||
1522 | ret = 0; | ||
1523 | break; | ||
1524 | |||
1525 | case IA32_PTRACE_GETREGS: | ||
1526 | if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) { | ||
1527 | ret = -EIO; | ||
1528 | break; | ||
1529 | } | ||
1530 | for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { | ||
1531 | put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data)); | ||
1532 | data += sizeof(int); | ||
1533 | } | ||
1534 | ret = 0; | ||
1535 | break; | ||
1536 | |||
1537 | case IA32_PTRACE_SETREGS: | ||
1538 | if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) { | ||
1539 | ret = -EIO; | ||
1540 | break; | ||
1541 | } | ||
1542 | for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { | ||
1543 | get_user(tmp, (unsigned int __user *) compat_ptr(data)); | ||
1544 | putreg(child, i, tmp); | ||
1545 | data += sizeof(int); | ||
1546 | } | ||
1547 | ret = 0; | ||
1548 | break; | ||
1549 | |||
1550 | case IA32_PTRACE_GETFPREGS: | ||
1551 | ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *) | ||
1552 | compat_ptr(data)); | ||
1553 | break; | ||
1554 | |||
1555 | case IA32_PTRACE_GETFPXREGS: | ||
1556 | ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *) | ||
1557 | compat_ptr(data)); | ||
1558 | break; | ||
1559 | |||
1560 | case IA32_PTRACE_SETFPREGS: | ||
1561 | ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *) | ||
1562 | compat_ptr(data)); | ||
1563 | break; | ||
1564 | |||
1565 | case IA32_PTRACE_SETFPXREGS: | ||
1566 | ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *) | ||
1567 | compat_ptr(data)); | ||
1568 | break; | ||
1569 | |||
1570 | default: | ||
1571 | return compat_ptrace_request(child, request, caddr, cdata); | ||
1572 | } | ||
1573 | return ret; | ||
1574 | } | ||
1575 | |||
1576 | typedef struct { | ||
1577 | unsigned int ss_sp; | ||
1578 | unsigned int ss_flags; | ||
1579 | unsigned int ss_size; | ||
1580 | } ia32_stack_t; | ||
1581 | |||
1582 | asmlinkage long | ||
1583 | sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32, | ||
1584 | long arg2, long arg3, long arg4, long arg5, long arg6, | ||
1585 | long arg7, struct pt_regs pt) | ||
1586 | { | ||
1587 | stack_t uss, uoss; | ||
1588 | ia32_stack_t buf32; | ||
1589 | int ret; | ||
1590 | mm_segment_t old_fs = get_fs(); | ||
1591 | |||
1592 | if (uss32) { | ||
1593 | if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t))) | ||
1594 | return -EFAULT; | ||
1595 | uss.ss_sp = (void __user *) (long) buf32.ss_sp; | ||
1596 | uss.ss_flags = buf32.ss_flags; | ||
1597 | /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the | ||
1598 | check and set it to the user requested value later */ | ||
1599 | if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) { | ||
1600 | ret = -ENOMEM; | ||
1601 | goto out; | ||
1602 | } | ||
1603 | uss.ss_size = MINSIGSTKSZ; | ||
1604 | } | ||
1605 | set_fs(KERNEL_DS); | ||
1606 | ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL, | ||
1607 | (stack_t __user *) &uoss, pt.r12); | ||
1608 | current->sas_ss_size = buf32.ss_size; | ||
1609 | set_fs(old_fs); | ||
1610 | out: | ||
1611 | if (ret < 0) | ||
1612 | return(ret); | ||
1613 | if (uoss32) { | ||
1614 | buf32.ss_sp = (long __user) uoss.ss_sp; | ||
1615 | buf32.ss_flags = uoss.ss_flags; | ||
1616 | buf32.ss_size = uoss.ss_size; | ||
1617 | if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t))) | ||
1618 | return -EFAULT; | ||
1619 | } | ||
1620 | return ret; | ||
1621 | } | ||
1622 | |||
1623 | asmlinkage int | ||
1624 | sys32_msync (unsigned int start, unsigned int len, int flags) | ||
1625 | { | ||
1626 | unsigned int addr; | ||
1627 | |||
1628 | if (OFFSET4K(start)) | ||
1629 | return -EINVAL; | ||
1630 | addr = PAGE_START(start); | ||
1631 | return sys_msync(addr, len + (start - addr), flags); | ||
1632 | } | ||
1633 | |||
1634 | asmlinkage long | ||
1635 | sys32_newuname (struct new_utsname __user *name) | ||
1636 | { | ||
1637 | int ret = sys_newuname(name); | ||
1638 | |||
1639 | if (!ret) | ||
1640 | if (copy_to_user(name->machine, "i686\0\0\0", 8)) | ||
1641 | ret = -EFAULT; | ||
1642 | return ret; | ||
1643 | } | ||
1644 | |||
1645 | asmlinkage long | ||
1646 | sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid) | ||
1647 | { | ||
1648 | uid_t a, b, c; | ||
1649 | int ret; | ||
1650 | mm_segment_t old_fs = get_fs(); | ||
1651 | |||
1652 | set_fs(KERNEL_DS); | ||
1653 | ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c); | ||
1654 | set_fs(old_fs); | ||
1655 | |||
1656 | if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid)) | ||
1657 | return -EFAULT; | ||
1658 | return ret; | ||
1659 | } | ||
1660 | |||
1661 | asmlinkage long | ||
1662 | sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid) | ||
1663 | { | ||
1664 | gid_t a, b, c; | ||
1665 | int ret; | ||
1666 | mm_segment_t old_fs = get_fs(); | ||
1667 | |||
1668 | set_fs(KERNEL_DS); | ||
1669 | ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c); | ||
1670 | set_fs(old_fs); | ||
1671 | |||
1672 | if (ret) | ||
1673 | return ret; | ||
1674 | |||
1675 | return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid); | ||
1676 | } | ||
1677 | |||
1678 | asmlinkage long | ||
1679 | sys32_lseek (unsigned int fd, int offset, unsigned int whence) | ||
1680 | { | ||
1681 | /* Sign-extension of "offset" is important here... */ | ||
1682 | return sys_lseek(fd, offset, whence); | ||
1683 | } | ||
1684 | |||
1685 | static int | ||
1686 | groups16_to_user(short __user *grouplist, struct group_info *group_info) | ||
1687 | { | ||
1688 | int i; | ||
1689 | short group; | ||
1690 | |||
1691 | for (i = 0; i < group_info->ngroups; i++) { | ||
1692 | group = (short)GROUP_AT(group_info, i); | ||
1693 | if (put_user(group, grouplist+i)) | ||
1694 | return -EFAULT; | ||
1695 | } | ||
1696 | |||
1697 | return 0; | ||
1698 | } | ||
1699 | |||
1700 | static int | ||
1701 | groups16_from_user(struct group_info *group_info, short __user *grouplist) | ||
1702 | { | ||
1703 | int i; | ||
1704 | short group; | ||
1705 | |||
1706 | for (i = 0; i < group_info->ngroups; i++) { | ||
1707 | if (get_user(group, grouplist+i)) | ||
1708 | return -EFAULT; | ||
1709 | GROUP_AT(group_info, i) = (gid_t)group; | ||
1710 | } | ||
1711 | |||
1712 | return 0; | ||
1713 | } | ||
1714 | |||
1715 | asmlinkage long | ||
1716 | sys32_getgroups16 (int gidsetsize, short __user *grouplist) | ||
1717 | { | ||
1718 | const struct cred *cred = current_cred(); | ||
1719 | int i; | ||
1720 | |||
1721 | if (gidsetsize < 0) | ||
1722 | return -EINVAL; | ||
1723 | |||
1724 | i = cred->group_info->ngroups; | ||
1725 | if (gidsetsize) { | ||
1726 | if (i > gidsetsize) { | ||
1727 | i = -EINVAL; | ||
1728 | goto out; | ||
1729 | } | ||
1730 | if (groups16_to_user(grouplist, cred->group_info)) { | ||
1731 | i = -EFAULT; | ||
1732 | goto out; | ||
1733 | } | ||
1734 | } | ||
1735 | out: | ||
1736 | return i; | ||
1737 | } | ||
1738 | |||
1739 | asmlinkage long | ||
1740 | sys32_setgroups16 (int gidsetsize, short __user *grouplist) | ||
1741 | { | ||
1742 | struct group_info *group_info; | ||
1743 | int retval; | ||
1744 | |||
1745 | if (!capable(CAP_SETGID)) | ||
1746 | return -EPERM; | ||
1747 | if ((unsigned)gidsetsize > NGROUPS_MAX) | ||
1748 | return -EINVAL; | ||
1749 | |||
1750 | group_info = groups_alloc(gidsetsize); | ||
1751 | if (!group_info) | ||
1752 | return -ENOMEM; | ||
1753 | retval = groups16_from_user(group_info, grouplist); | ||
1754 | if (retval) { | ||
1755 | put_group_info(group_info); | ||
1756 | return retval; | ||
1757 | } | ||
1758 | |||
1759 | retval = set_current_groups(group_info); | ||
1760 | put_group_info(group_info); | ||
1761 | |||
1762 | return retval; | ||
1763 | } | ||
1764 | |||
1765 | asmlinkage long | ||
1766 | sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi) | ||
1767 | { | ||
1768 | return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo); | ||
1769 | } | ||
1770 | |||
1771 | asmlinkage long | ||
1772 | sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi) | ||
1773 | { | ||
1774 | return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo); | ||
1775 | } | ||
1776 | |||
1777 | static int | ||
1778 | putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf) | ||
1779 | { | ||
1780 | int err; | ||
1781 | u64 hdev; | ||
1782 | |||
1783 | if (clear_user(ubuf, sizeof(*ubuf))) | ||
1784 | return -EFAULT; | ||
1785 | |||
1786 | hdev = huge_encode_dev(kbuf->dev); | ||
1787 | err = __put_user(hdev, (u32 __user*)&ubuf->st_dev); | ||
1788 | err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1); | ||
1789 | err |= __put_user(kbuf->ino, &ubuf->__st_ino); | ||
1790 | err |= __put_user(kbuf->ino, &ubuf->st_ino_lo); | ||
1791 | err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi); | ||
1792 | err |= __put_user(kbuf->mode, &ubuf->st_mode); | ||
1793 | err |= __put_user(kbuf->nlink, &ubuf->st_nlink); | ||
1794 | err |= __put_user(kbuf->uid, &ubuf->st_uid); | ||
1795 | err |= __put_user(kbuf->gid, &ubuf->st_gid); | ||
1796 | hdev = huge_encode_dev(kbuf->rdev); | ||
1797 | err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev); | ||
1798 | err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1); | ||
1799 | err |= __put_user(kbuf->size, &ubuf->st_size_lo); | ||
1800 | err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi); | ||
1801 | err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime); | ||
1802 | err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec); | ||
1803 | err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime); | ||
1804 | err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec); | ||
1805 | err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime); | ||
1806 | err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec); | ||
1807 | err |= __put_user(kbuf->blksize, &ubuf->st_blksize); | ||
1808 | err |= __put_user(kbuf->blocks, &ubuf->st_blocks); | ||
1809 | return err; | ||
1810 | } | ||
1811 | |||
1812 | asmlinkage long | ||
1813 | sys32_stat64 (char __user *filename, struct stat64 __user *statbuf) | ||
1814 | { | ||
1815 | struct kstat s; | ||
1816 | long ret = vfs_stat(filename, &s); | ||
1817 | if (!ret) | ||
1818 | ret = putstat64(statbuf, &s); | ||
1819 | return ret; | ||
1820 | } | ||
1821 | |||
1822 | asmlinkage long | ||
1823 | sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf) | ||
1824 | { | ||
1825 | struct kstat s; | ||
1826 | long ret = vfs_lstat(filename, &s); | ||
1827 | if (!ret) | ||
1828 | ret = putstat64(statbuf, &s); | ||
1829 | return ret; | ||
1830 | } | ||
1831 | |||
1832 | asmlinkage long | ||
1833 | sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf) | ||
1834 | { | ||
1835 | struct kstat s; | ||
1836 | long ret = vfs_fstat(fd, &s); | ||
1837 | if (!ret) | ||
1838 | ret = putstat64(statbuf, &s); | ||
1839 | return ret; | ||
1840 | } | ||
1841 | |||
1842 | asmlinkage long | ||
1843 | sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval) | ||
1844 | { | ||
1845 | mm_segment_t old_fs = get_fs(); | ||
1846 | struct timespec t; | ||
1847 | long ret; | ||
1848 | |||
1849 | set_fs(KERNEL_DS); | ||
1850 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); | ||
1851 | set_fs(old_fs); | ||
1852 | if (put_compat_timespec(&t, interval)) | ||
1853 | return -EFAULT; | ||
1854 | return ret; | ||
1855 | } | ||
1856 | |||
1857 | asmlinkage long | ||
1858 | sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi) | ||
1859 | { | ||
1860 | return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); | ||
1861 | } | ||
1862 | |||
1863 | asmlinkage long | ||
1864 | sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi) | ||
1865 | { | ||
1866 | return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); | ||
1867 | } | ||
1868 | |||
1869 | asmlinkage long | ||
1870 | sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count) | ||
1871 | { | ||
1872 | mm_segment_t old_fs = get_fs(); | ||
1873 | long ret; | ||
1874 | off_t of; | ||
1875 | |||
1876 | if (offset && get_user(of, offset)) | ||
1877 | return -EFAULT; | ||
1878 | |||
1879 | set_fs(KERNEL_DS); | ||
1880 | ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count); | ||
1881 | set_fs(old_fs); | ||
1882 | |||
1883 | if (offset && put_user(of, offset)) | ||
1884 | return -EFAULT; | ||
1885 | |||
1886 | return ret; | ||
1887 | } | ||
1888 | |||
1889 | asmlinkage long | ||
1890 | sys32_personality (unsigned int personality) | ||
1891 | { | ||
1892 | long ret; | ||
1893 | |||
1894 | if (current->personality == PER_LINUX32 && personality == PER_LINUX) | ||
1895 | personality = PER_LINUX32; | ||
1896 | ret = sys_personality(personality); | ||
1897 | if (ret == PER_LINUX32) | ||
1898 | ret = PER_LINUX; | ||
1899 | return ret; | ||
1900 | } | ||
1901 | |||
1902 | asmlinkage unsigned long | ||
1903 | sys32_brk (unsigned int brk) | ||
1904 | { | ||
1905 | unsigned long ret, obrk; | ||
1906 | struct mm_struct *mm = current->mm; | ||
1907 | |||
1908 | obrk = mm->brk; | ||
1909 | ret = sys_brk(brk); | ||
1910 | if (ret < obrk) | ||
1911 | clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret); | ||
1912 | return ret; | ||
1913 | } | ||
1914 | |||
1915 | /* Structure for ia32 emulation on ia64 */ | ||
1916 | struct epoll_event32 | ||
1917 | { | ||
1918 | u32 events; | ||
1919 | u32 data[2]; | ||
1920 | }; | ||
1921 | |||
1922 | asmlinkage long | ||
1923 | sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event) | ||
1924 | { | ||
1925 | mm_segment_t old_fs = get_fs(); | ||
1926 | struct epoll_event event64; | ||
1927 | int error; | ||
1928 | u32 data_halfword; | ||
1929 | |||
1930 | if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32))) | ||
1931 | return -EFAULT; | ||
1932 | |||
1933 | __get_user(event64.events, &event->events); | ||
1934 | __get_user(data_halfword, &event->data[0]); | ||
1935 | event64.data = data_halfword; | ||
1936 | __get_user(data_halfword, &event->data[1]); | ||
1937 | event64.data |= (u64)data_halfword << 32; | ||
1938 | |||
1939 | set_fs(KERNEL_DS); | ||
1940 | error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64); | ||
1941 | set_fs(old_fs); | ||
1942 | |||
1943 | return error; | ||
1944 | } | ||
1945 | |||
1946 | asmlinkage long | ||
1947 | sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents, | ||
1948 | int timeout) | ||
1949 | { | ||
1950 | struct epoll_event *events64 = NULL; | ||
1951 | mm_segment_t old_fs = get_fs(); | ||
1952 | int numevents, size; | ||
1953 | int evt_idx; | ||
1954 | int do_free_pages = 0; | ||
1955 | |||
1956 | if (maxevents <= 0) { | ||
1957 | return -EINVAL; | ||
1958 | } | ||
1959 | |||
1960 | /* Verify that the area passed by the user is writeable */ | ||
1961 | if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32))) | ||
1962 | return -EFAULT; | ||
1963 | |||
1964 | /* | ||
1965 | * Allocate space for the intermediate copy. If the space needed | ||
1966 | * is large enough to cause kmalloc to fail, then try again with | ||
1967 | * __get_free_pages. | ||
1968 | */ | ||
1969 | size = maxevents * sizeof(struct epoll_event); | ||
1970 | events64 = kmalloc(size, GFP_KERNEL); | ||
1971 | if (events64 == NULL) { | ||
1972 | events64 = (struct epoll_event *) | ||
1973 | __get_free_pages(GFP_KERNEL, get_order(size)); | ||
1974 | if (events64 == NULL) | ||
1975 | return -ENOMEM; | ||
1976 | do_free_pages = 1; | ||
1977 | } | ||
1978 | |||
1979 | /* Do the system call */ | ||
1980 | set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/ | ||
1981 | numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64, | ||
1982 | maxevents, timeout); | ||
1983 | set_fs(old_fs); | ||
1984 | |||
1985 | /* Don't modify userspace memory if we're returning an error */ | ||
1986 | if (numevents > 0) { | ||
1987 | /* Translate the 64-bit structures back into the 32-bit | ||
1988 | structures */ | ||
1989 | for (evt_idx = 0; evt_idx < numevents; evt_idx++) { | ||
1990 | __put_user(events64[evt_idx].events, | ||
1991 | &events[evt_idx].events); | ||
1992 | __put_user((u32)events64[evt_idx].data, | ||
1993 | &events[evt_idx].data[0]); | ||
1994 | __put_user((u32)(events64[evt_idx].data >> 32), | ||
1995 | &events[evt_idx].data[1]); | ||
1996 | } | ||
1997 | } | ||
1998 | |||
1999 | if (do_free_pages) | ||
2000 | free_pages((unsigned long) events64, get_order(size)); | ||
2001 | else | ||
2002 | kfree(events64); | ||
2003 | return numevents; | ||
2004 | } | ||
2005 | |||
2006 | /* | ||
2007 | * Get a yet unused TLS descriptor index. | ||
2008 | */ | ||
2009 | static int | ||
2010 | get_free_idx (void) | ||
2011 | { | ||
2012 | struct thread_struct *t = ¤t->thread; | ||
2013 | int idx; | ||
2014 | |||
2015 | for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) | ||
2016 | if (desc_empty(t->tls_array + idx)) | ||
2017 | return idx + GDT_ENTRY_TLS_MIN; | ||
2018 | return -ESRCH; | ||
2019 | } | ||
2020 | |||
2021 | static void set_tls_desc(struct task_struct *p, int idx, | ||
2022 | const struct ia32_user_desc *info, int n) | ||
2023 | { | ||
2024 | struct thread_struct *t = &p->thread; | ||
2025 | struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN]; | ||
2026 | int cpu; | ||
2027 | |||
2028 | /* | ||
2029 | * We must not get preempted while modifying the TLS. | ||
2030 | */ | ||
2031 | cpu = get_cpu(); | ||
2032 | |||
2033 | while (n-- > 0) { | ||
2034 | if (LDT_empty(info)) { | ||
2035 | desc->a = 0; | ||
2036 | desc->b = 0; | ||
2037 | } else { | ||
2038 | desc->a = LDT_entry_a(info); | ||
2039 | desc->b = LDT_entry_b(info); | ||
2040 | } | ||
2041 | |||
2042 | ++info; | ||
2043 | ++desc; | ||
2044 | } | ||
2045 | |||
2046 | if (t == ¤t->thread) | ||
2047 | load_TLS(t, cpu); | ||
2048 | |||
2049 | put_cpu(); | ||
2050 | } | ||
2051 | |||
2052 | /* | ||
2053 | * Set a given TLS descriptor: | ||
2054 | */ | ||
2055 | asmlinkage int | ||
2056 | sys32_set_thread_area (struct ia32_user_desc __user *u_info) | ||
2057 | { | ||
2058 | struct ia32_user_desc info; | ||
2059 | int idx; | ||
2060 | |||
2061 | if (copy_from_user(&info, u_info, sizeof(info))) | ||
2062 | return -EFAULT; | ||
2063 | idx = info.entry_number; | ||
2064 | |||
2065 | /* | ||
2066 | * index -1 means the kernel should try to find and allocate an empty descriptor: | ||
2067 | */ | ||
2068 | if (idx == -1) { | ||
2069 | idx = get_free_idx(); | ||
2070 | if (idx < 0) | ||
2071 | return idx; | ||
2072 | if (put_user(idx, &u_info->entry_number)) | ||
2073 | return -EFAULT; | ||
2074 | } | ||
2075 | |||
2076 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
2077 | return -EINVAL; | ||
2078 | |||
2079 | set_tls_desc(current, idx, &info, 1); | ||
2080 | return 0; | ||
2081 | } | ||
2082 | |||
2083 | /* | ||
2084 | * Get the current Thread-Local Storage area: | ||
2085 | */ | ||
2086 | |||
2087 | #define GET_BASE(desc) ( \ | ||
2088 | (((desc)->a >> 16) & 0x0000ffff) | \ | ||
2089 | (((desc)->b << 16) & 0x00ff0000) | \ | ||
2090 | ( (desc)->b & 0xff000000) ) | ||
2091 | |||
2092 | #define GET_LIMIT(desc) ( \ | ||
2093 | ((desc)->a & 0x0ffff) | \ | ||
2094 | ((desc)->b & 0xf0000) ) | ||
2095 | |||
2096 | #define GET_32BIT(desc) (((desc)->b >> 22) & 1) | ||
2097 | #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) | ||
2098 | #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) | ||
2099 | #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) | ||
2100 | #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) | ||
2101 | #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) | ||
2102 | |||
2103 | static void fill_user_desc(struct ia32_user_desc *info, int idx, | ||
2104 | const struct desc_struct *desc) | ||
2105 | { | ||
2106 | info->entry_number = idx; | ||
2107 | info->base_addr = GET_BASE(desc); | ||
2108 | info->limit = GET_LIMIT(desc); | ||
2109 | info->seg_32bit = GET_32BIT(desc); | ||
2110 | info->contents = GET_CONTENTS(desc); | ||
2111 | info->read_exec_only = !GET_WRITABLE(desc); | ||
2112 | info->limit_in_pages = GET_LIMIT_PAGES(desc); | ||
2113 | info->seg_not_present = !GET_PRESENT(desc); | ||
2114 | info->useable = GET_USEABLE(desc); | ||
2115 | } | ||
2116 | |||
2117 | asmlinkage int | ||
2118 | sys32_get_thread_area (struct ia32_user_desc __user *u_info) | ||
2119 | { | ||
2120 | struct ia32_user_desc info; | ||
2121 | struct desc_struct *desc; | ||
2122 | int idx; | ||
2123 | |||
2124 | if (get_user(idx, &u_info->entry_number)) | ||
2125 | return -EFAULT; | ||
2126 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
2127 | return -EINVAL; | ||
2128 | |||
2129 | desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; | ||
2130 | fill_user_desc(&info, idx, desc); | ||
2131 | |||
2132 | if (copy_to_user(u_info, &info, sizeof(info))) | ||
2133 | return -EFAULT; | ||
2134 | return 0; | ||
2135 | } | ||
2136 | |||
2137 | struct regset_get { | ||
2138 | void *kbuf; | ||
2139 | void __user *ubuf; | ||
2140 | }; | ||
2141 | |||
2142 | struct regset_set { | ||
2143 | const void *kbuf; | ||
2144 | const void __user *ubuf; | ||
2145 | }; | ||
2146 | |||
2147 | struct regset_getset { | ||
2148 | struct task_struct *target; | ||
2149 | const struct user_regset *regset; | ||
2150 | union { | ||
2151 | struct regset_get get; | ||
2152 | struct regset_set set; | ||
2153 | } u; | ||
2154 | unsigned int pos; | ||
2155 | unsigned int count; | ||
2156 | int ret; | ||
2157 | }; | ||
2158 | |||
2159 | static void getfpreg(struct task_struct *task, int regno, int *val) | ||
2160 | { | ||
2161 | switch (regno / sizeof(int)) { | ||
2162 | case 0: | ||
2163 | *val = task->thread.fcr & 0xffff; | ||
2164 | break; | ||
2165 | case 1: | ||
2166 | *val = task->thread.fsr & 0xffff; | ||
2167 | break; | ||
2168 | case 2: | ||
2169 | *val = (task->thread.fsr>>16) & 0xffff; | ||
2170 | break; | ||
2171 | case 3: | ||
2172 | *val = task->thread.fir; | ||
2173 | break; | ||
2174 | case 4: | ||
2175 | *val = (task->thread.fir>>32) & 0xffff; | ||
2176 | break; | ||
2177 | case 5: | ||
2178 | *val = task->thread.fdr; | ||
2179 | break; | ||
2180 | case 6: | ||
2181 | *val = (task->thread.fdr >> 32) & 0xffff; | ||
2182 | break; | ||
2183 | } | ||
2184 | } | ||
2185 | |||
2186 | static void setfpreg(struct task_struct *task, int regno, int val) | ||
2187 | { | ||
2188 | switch (regno / sizeof(int)) { | ||
2189 | case 0: | ||
2190 | task->thread.fcr = (task->thread.fcr & (~0x1f3f)) | ||
2191 | | (val & 0x1f3f); | ||
2192 | break; | ||
2193 | case 1: | ||
2194 | task->thread.fsr = (task->thread.fsr & (~0xffff)) | val; | ||
2195 | break; | ||
2196 | case 2: | ||
2197 | task->thread.fsr = (task->thread.fsr & (~0xffff0000)) | ||
2198 | | (val << 16); | ||
2199 | break; | ||
2200 | case 3: | ||
2201 | task->thread.fir = (task->thread.fir & (~0xffffffff)) | val; | ||
2202 | break; | ||
2203 | case 5: | ||
2204 | task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val; | ||
2205 | break; | ||
2206 | } | ||
2207 | } | ||
2208 | |||
2209 | static void access_fpreg_ia32(int regno, void *reg, | ||
2210 | struct pt_regs *pt, struct switch_stack *sw, | ||
2211 | int tos, int write) | ||
2212 | { | ||
2213 | void *f; | ||
2214 | |||
2215 | if ((regno += tos) >= 8) | ||
2216 | regno -= 8; | ||
2217 | if (regno < 4) | ||
2218 | f = &pt->f8 + regno; | ||
2219 | else if (regno <= 7) | ||
2220 | f = &sw->f12 + (regno - 4); | ||
2221 | else { | ||
2222 | printk(KERN_ERR "regno must be less than 7 \n"); | ||
2223 | return; | ||
2224 | } | ||
2225 | |||
2226 | if (write) | ||
2227 | memcpy(f, reg, sizeof(struct _fpreg_ia32)); | ||
2228 | else | ||
2229 | memcpy(reg, f, sizeof(struct _fpreg_ia32)); | ||
2230 | } | ||
2231 | |||
2232 | static void do_fpregs_get(struct unw_frame_info *info, void *arg) | ||
2233 | { | ||
2234 | struct regset_getset *dst = arg; | ||
2235 | struct task_struct *task = dst->target; | ||
2236 | struct pt_regs *pt; | ||
2237 | int start, end, tos; | ||
2238 | char buf[80]; | ||
2239 | |||
2240 | if (dst->count == 0 || unw_unwind_to_user(info) < 0) | ||
2241 | return; | ||
2242 | if (dst->pos < 7 * sizeof(int)) { | ||
2243 | end = min((dst->pos + dst->count), | ||
2244 | (unsigned int)(7 * sizeof(int))); | ||
2245 | for (start = dst->pos; start < end; start += sizeof(int)) | ||
2246 | getfpreg(task, start, (int *)(buf + start)); | ||
2247 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
2248 | &dst->u.get.kbuf, &dst->u.get.ubuf, buf, | ||
2249 | 0, 7 * sizeof(int)); | ||
2250 | if (dst->ret || dst->count == 0) | ||
2251 | return; | ||
2252 | } | ||
2253 | if (dst->pos < sizeof(struct ia32_user_i387_struct)) { | ||
2254 | pt = task_pt_regs(task); | ||
2255 | tos = (task->thread.fsr >> 11) & 7; | ||
2256 | end = min(dst->pos + dst->count, | ||
2257 | (unsigned int)(sizeof(struct ia32_user_i387_struct))); | ||
2258 | start = (dst->pos - 7 * sizeof(int)) / | ||
2259 | sizeof(struct _fpreg_ia32); | ||
2260 | end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32); | ||
2261 | for (; start < end; start++) | ||
2262 | access_fpreg_ia32(start, | ||
2263 | (struct _fpreg_ia32 *)buf + start, | ||
2264 | pt, info->sw, tos, 0); | ||
2265 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
2266 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
2267 | buf, 7 * sizeof(int), | ||
2268 | sizeof(struct ia32_user_i387_struct)); | ||
2269 | if (dst->ret || dst->count == 0) | ||
2270 | return; | ||
2271 | } | ||
2272 | } | ||
2273 | |||
2274 | static void do_fpregs_set(struct unw_frame_info *info, void *arg) | ||
2275 | { | ||
2276 | struct regset_getset *dst = arg; | ||
2277 | struct task_struct *task = dst->target; | ||
2278 | struct pt_regs *pt; | ||
2279 | char buf[80]; | ||
2280 | int end, start, tos; | ||
2281 | |||
2282 | if (dst->count == 0 || unw_unwind_to_user(info) < 0) | ||
2283 | return; | ||
2284 | |||
2285 | if (dst->pos < 7 * sizeof(int)) { | ||
2286 | start = dst->pos; | ||
2287 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
2288 | &dst->u.set.kbuf, &dst->u.set.ubuf, buf, | ||
2289 | 0, 7 * sizeof(int)); | ||
2290 | if (dst->ret) | ||
2291 | return; | ||
2292 | for (; start < dst->pos; start += sizeof(int)) | ||
2293 | setfpreg(task, start, *((int *)(buf + start))); | ||
2294 | if (dst->count == 0) | ||
2295 | return; | ||
2296 | } | ||
2297 | if (dst->pos < sizeof(struct ia32_user_i387_struct)) { | ||
2298 | start = (dst->pos - 7 * sizeof(int)) / | ||
2299 | sizeof(struct _fpreg_ia32); | ||
2300 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
2301 | &dst->u.set.kbuf, &dst->u.set.ubuf, | ||
2302 | buf, 7 * sizeof(int), | ||
2303 | sizeof(struct ia32_user_i387_struct)); | ||
2304 | if (dst->ret) | ||
2305 | return; | ||
2306 | pt = task_pt_regs(task); | ||
2307 | tos = (task->thread.fsr >> 11) & 7; | ||
2308 | end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32); | ||
2309 | for (; start < end; start++) | ||
2310 | access_fpreg_ia32(start, | ||
2311 | (struct _fpreg_ia32 *)buf + start, | ||
2312 | pt, info->sw, tos, 1); | ||
2313 | if (dst->count == 0) | ||
2314 | return; | ||
2315 | } | ||
2316 | } | ||
2317 | |||
2318 | #define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member))) | ||
2319 | static void getfpxreg(struct task_struct *task, int start, int end, char *buf) | ||
2320 | { | ||
2321 | int min_val; | ||
2322 | |||
2323 | min_val = min(end, OFFSET(fop)); | ||
2324 | while (start < min_val) { | ||
2325 | if (start == OFFSET(cwd)) | ||
2326 | *((short *)buf) = task->thread.fcr & 0xffff; | ||
2327 | else if (start == OFFSET(swd)) | ||
2328 | *((short *)buf) = task->thread.fsr & 0xffff; | ||
2329 | else if (start == OFFSET(twd)) | ||
2330 | *((short *)buf) = (task->thread.fsr>>16) & 0xffff; | ||
2331 | buf += 2; | ||
2332 | start += 2; | ||
2333 | } | ||
2334 | /* skip fop element */ | ||
2335 | if (start == OFFSET(fop)) { | ||
2336 | start += 2; | ||
2337 | buf += 2; | ||
2338 | } | ||
2339 | while (start < end) { | ||
2340 | if (start == OFFSET(fip)) | ||
2341 | *((int *)buf) = task->thread.fir; | ||
2342 | else if (start == OFFSET(fcs)) | ||
2343 | *((int *)buf) = (task->thread.fir>>32) & 0xffff; | ||
2344 | else if (start == OFFSET(foo)) | ||
2345 | *((int *)buf) = task->thread.fdr; | ||
2346 | else if (start == OFFSET(fos)) | ||
2347 | *((int *)buf) = (task->thread.fdr>>32) & 0xffff; | ||
2348 | else if (start == OFFSET(mxcsr)) | ||
2349 | *((int *)buf) = ((task->thread.fcr>>32) & 0xff80) | ||
2350 | | ((task->thread.fsr>>32) & 0x3f); | ||
2351 | buf += 4; | ||
2352 | start += 4; | ||
2353 | } | ||
2354 | } | ||
2355 | |||
2356 | static void setfpxreg(struct task_struct *task, int start, int end, char *buf) | ||
2357 | { | ||
2358 | int min_val, num32; | ||
2359 | short num; | ||
2360 | unsigned long num64; | ||
2361 | |||
2362 | min_val = min(end, OFFSET(fop)); | ||
2363 | while (start < min_val) { | ||
2364 | num = *((short *)buf); | ||
2365 | if (start == OFFSET(cwd)) { | ||
2366 | task->thread.fcr = (task->thread.fcr & (~0x1f3f)) | ||
2367 | | (num & 0x1f3f); | ||
2368 | } else if (start == OFFSET(swd)) { | ||
2369 | task->thread.fsr = (task->thread.fsr & (~0xffff)) | num; | ||
2370 | } else if (start == OFFSET(twd)) { | ||
2371 | task->thread.fsr = (task->thread.fsr & (~0xffff0000)) | ||
2372 | | (((int)num) << 16); | ||
2373 | } | ||
2374 | buf += 2; | ||
2375 | start += 2; | ||
2376 | } | ||
2377 | /* skip fop element */ | ||
2378 | if (start == OFFSET(fop)) { | ||
2379 | start += 2; | ||
2380 | buf += 2; | ||
2381 | } | ||
2382 | while (start < end) { | ||
2383 | num32 = *((int *)buf); | ||
2384 | if (start == OFFSET(fip)) | ||
2385 | task->thread.fir = (task->thread.fir & (~0xffffffff)) | ||
2386 | | num32; | ||
2387 | else if (start == OFFSET(foo)) | ||
2388 | task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | ||
2389 | | num32; | ||
2390 | else if (start == OFFSET(mxcsr)) { | ||
2391 | num64 = num32 & 0xff10; | ||
2392 | task->thread.fcr = (task->thread.fcr & | ||
2393 | (~0xff1000000000UL)) | (num64<<32); | ||
2394 | num64 = num32 & 0x3f; | ||
2395 | task->thread.fsr = (task->thread.fsr & | ||
2396 | (~0x3f00000000UL)) | (num64<<32); | ||
2397 | } | ||
2398 | buf += 4; | ||
2399 | start += 4; | ||
2400 | } | ||
2401 | } | ||
2402 | |||
2403 | static void do_fpxregs_get(struct unw_frame_info *info, void *arg) | ||
2404 | { | ||
2405 | struct regset_getset *dst = arg; | ||
2406 | struct task_struct *task = dst->target; | ||
2407 | struct pt_regs *pt; | ||
2408 | char buf[128]; | ||
2409 | int start, end, tos; | ||
2410 | |||
2411 | if (dst->count == 0 || unw_unwind_to_user(info) < 0) | ||
2412 | return; | ||
2413 | if (dst->pos < OFFSET(st_space[0])) { | ||
2414 | end = min(dst->pos + dst->count, (unsigned int)32); | ||
2415 | getfpxreg(task, dst->pos, end, buf); | ||
2416 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
2417 | &dst->u.get.kbuf, &dst->u.get.ubuf, buf, | ||
2418 | 0, OFFSET(st_space[0])); | ||
2419 | if (dst->ret || dst->count == 0) | ||
2420 | return; | ||
2421 | } | ||
2422 | if (dst->pos < OFFSET(xmm_space[0])) { | ||
2423 | pt = task_pt_regs(task); | ||
2424 | tos = (task->thread.fsr >> 11) & 7; | ||
2425 | end = min(dst->pos + dst->count, | ||
2426 | (unsigned int)OFFSET(xmm_space[0])); | ||
2427 | start = (dst->pos - OFFSET(st_space[0])) / 16; | ||
2428 | end = (end - OFFSET(st_space[0])) / 16; | ||
2429 | for (; start < end; start++) | ||
2430 | access_fpreg_ia32(start, buf + 16 * start, pt, | ||
2431 | info->sw, tos, 0); | ||
2432 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
2433 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
2434 | buf, OFFSET(st_space[0]), OFFSET(xmm_space[0])); | ||
2435 | if (dst->ret || dst->count == 0) | ||
2436 | return; | ||
2437 | } | ||
2438 | if (dst->pos < OFFSET(padding[0])) | ||
2439 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
2440 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
2441 | &info->sw->f16, OFFSET(xmm_space[0]), | ||
2442 | OFFSET(padding[0])); | ||
2443 | } | ||
2444 | |||
2445 | static void do_fpxregs_set(struct unw_frame_info *info, void *arg) | ||
2446 | { | ||
2447 | struct regset_getset *dst = arg; | ||
2448 | struct task_struct *task = dst->target; | ||
2449 | char buf[128]; | ||
2450 | int start, end; | ||
2451 | |||
2452 | if (dst->count == 0 || unw_unwind_to_user(info) < 0) | ||
2453 | return; | ||
2454 | |||
2455 | if (dst->pos < OFFSET(st_space[0])) { | ||
2456 | start = dst->pos; | ||
2457 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
2458 | &dst->u.set.kbuf, &dst->u.set.ubuf, | ||
2459 | buf, 0, OFFSET(st_space[0])); | ||
2460 | if (dst->ret) | ||
2461 | return; | ||
2462 | setfpxreg(task, start, dst->pos, buf); | ||
2463 | if (dst->count == 0) | ||
2464 | return; | ||
2465 | } | ||
2466 | if (dst->pos < OFFSET(xmm_space[0])) { | ||
2467 | struct pt_regs *pt; | ||
2468 | int tos; | ||
2469 | pt = task_pt_regs(task); | ||
2470 | tos = (task->thread.fsr >> 11) & 7; | ||
2471 | start = (dst->pos - OFFSET(st_space[0])) / 16; | ||
2472 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
2473 | &dst->u.set.kbuf, &dst->u.set.ubuf, | ||
2474 | buf, OFFSET(st_space[0]), OFFSET(xmm_space[0])); | ||
2475 | if (dst->ret) | ||
2476 | return; | ||
2477 | end = (dst->pos - OFFSET(st_space[0])) / 16; | ||
2478 | for (; start < end; start++) | ||
2479 | access_fpreg_ia32(start, buf + 16 * start, pt, info->sw, | ||
2480 | tos, 1); | ||
2481 | if (dst->count == 0) | ||
2482 | return; | ||
2483 | } | ||
2484 | if (dst->pos < OFFSET(padding[0])) | ||
2485 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
2486 | &dst->u.set.kbuf, &dst->u.set.ubuf, | ||
2487 | &info->sw->f16, OFFSET(xmm_space[0]), | ||
2488 | OFFSET(padding[0])); | ||
2489 | } | ||
2490 | #undef OFFSET | ||
2491 | |||
2492 | static int do_regset_call(void (*call)(struct unw_frame_info *, void *), | ||
2493 | struct task_struct *target, | ||
2494 | const struct user_regset *regset, | ||
2495 | unsigned int pos, unsigned int count, | ||
2496 | const void *kbuf, const void __user *ubuf) | ||
2497 | { | ||
2498 | struct regset_getset info = { .target = target, .regset = regset, | ||
2499 | .pos = pos, .count = count, | ||
2500 | .u.set = { .kbuf = kbuf, .ubuf = ubuf }, | ||
2501 | .ret = 0 }; | ||
2502 | |||
2503 | if (target == current) | ||
2504 | unw_init_running(call, &info); | ||
2505 | else { | ||
2506 | struct unw_frame_info ufi; | ||
2507 | memset(&ufi, 0, sizeof(ufi)); | ||
2508 | unw_init_from_blocked_task(&ufi, target); | ||
2509 | (*call)(&ufi, &info); | ||
2510 | } | ||
2511 | |||
2512 | return info.ret; | ||
2513 | } | ||
2514 | |||
2515 | static int ia32_fpregs_get(struct task_struct *target, | ||
2516 | const struct user_regset *regset, | ||
2517 | unsigned int pos, unsigned int count, | ||
2518 | void *kbuf, void __user *ubuf) | ||
2519 | { | ||
2520 | return do_regset_call(do_fpregs_get, target, regset, pos, count, | ||
2521 | kbuf, ubuf); | ||
2522 | } | ||
2523 | |||
2524 | static int ia32_fpregs_set(struct task_struct *target, | ||
2525 | const struct user_regset *regset, | ||
2526 | unsigned int pos, unsigned int count, | ||
2527 | const void *kbuf, const void __user *ubuf) | ||
2528 | { | ||
2529 | return do_regset_call(do_fpregs_set, target, regset, pos, count, | ||
2530 | kbuf, ubuf); | ||
2531 | } | ||
2532 | |||
2533 | static int ia32_fpxregs_get(struct task_struct *target, | ||
2534 | const struct user_regset *regset, | ||
2535 | unsigned int pos, unsigned int count, | ||
2536 | void *kbuf, void __user *ubuf) | ||
2537 | { | ||
2538 | return do_regset_call(do_fpxregs_get, target, regset, pos, count, | ||
2539 | kbuf, ubuf); | ||
2540 | } | ||
2541 | |||
2542 | static int ia32_fpxregs_set(struct task_struct *target, | ||
2543 | const struct user_regset *regset, | ||
2544 | unsigned int pos, unsigned int count, | ||
2545 | const void *kbuf, const void __user *ubuf) | ||
2546 | { | ||
2547 | return do_regset_call(do_fpxregs_set, target, regset, pos, count, | ||
2548 | kbuf, ubuf); | ||
2549 | } | ||
2550 | |||
2551 | static int ia32_genregs_get(struct task_struct *target, | ||
2552 | const struct user_regset *regset, | ||
2553 | unsigned int pos, unsigned int count, | ||
2554 | void *kbuf, void __user *ubuf) | ||
2555 | { | ||
2556 | if (kbuf) { | ||
2557 | u32 *kp = kbuf; | ||
2558 | while (count > 0) { | ||
2559 | *kp++ = getreg(target, pos); | ||
2560 | pos += 4; | ||
2561 | count -= 4; | ||
2562 | } | ||
2563 | } else { | ||
2564 | u32 __user *up = ubuf; | ||
2565 | while (count > 0) { | ||
2566 | if (__put_user(getreg(target, pos), up++)) | ||
2567 | return -EFAULT; | ||
2568 | pos += 4; | ||
2569 | count -= 4; | ||
2570 | } | ||
2571 | } | ||
2572 | return 0; | ||
2573 | } | ||
2574 | |||
2575 | static int ia32_genregs_set(struct task_struct *target, | ||
2576 | const struct user_regset *regset, | ||
2577 | unsigned int pos, unsigned int count, | ||
2578 | const void *kbuf, const void __user *ubuf) | ||
2579 | { | ||
2580 | int ret = 0; | ||
2581 | |||
2582 | if (kbuf) { | ||
2583 | const u32 *kp = kbuf; | ||
2584 | while (!ret && count > 0) { | ||
2585 | putreg(target, pos, *kp++); | ||
2586 | pos += 4; | ||
2587 | count -= 4; | ||
2588 | } | ||
2589 | } else { | ||
2590 | const u32 __user *up = ubuf; | ||
2591 | u32 val; | ||
2592 | while (!ret && count > 0) { | ||
2593 | ret = __get_user(val, up++); | ||
2594 | if (!ret) | ||
2595 | putreg(target, pos, val); | ||
2596 | pos += 4; | ||
2597 | count -= 4; | ||
2598 | } | ||
2599 | } | ||
2600 | return ret; | ||
2601 | } | ||
2602 | |||
2603 | static int ia32_tls_active(struct task_struct *target, | ||
2604 | const struct user_regset *regset) | ||
2605 | { | ||
2606 | struct thread_struct *t = &target->thread; | ||
2607 | int n = GDT_ENTRY_TLS_ENTRIES; | ||
2608 | while (n > 0 && desc_empty(&t->tls_array[n -1])) | ||
2609 | --n; | ||
2610 | return n; | ||
2611 | } | ||
2612 | |||
2613 | static int ia32_tls_get(struct task_struct *target, | ||
2614 | const struct user_regset *regset, unsigned int pos, | ||
2615 | unsigned int count, void *kbuf, void __user *ubuf) | ||
2616 | { | ||
2617 | const struct desc_struct *tls; | ||
2618 | |||
2619 | if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) || | ||
2620 | (pos % sizeof(struct ia32_user_desc)) != 0 || | ||
2621 | (count % sizeof(struct ia32_user_desc)) != 0) | ||
2622 | return -EINVAL; | ||
2623 | |||
2624 | pos /= sizeof(struct ia32_user_desc); | ||
2625 | count /= sizeof(struct ia32_user_desc); | ||
2626 | |||
2627 | tls = &target->thread.tls_array[pos]; | ||
2628 | |||
2629 | if (kbuf) { | ||
2630 | struct ia32_user_desc *info = kbuf; | ||
2631 | while (count-- > 0) | ||
2632 | fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++, | ||
2633 | tls++); | ||
2634 | } else { | ||
2635 | struct ia32_user_desc __user *u_info = ubuf; | ||
2636 | while (count-- > 0) { | ||
2637 | struct ia32_user_desc info; | ||
2638 | fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++); | ||
2639 | if (__copy_to_user(u_info++, &info, sizeof(info))) | ||
2640 | return -EFAULT; | ||
2641 | } | ||
2642 | } | ||
2643 | |||
2644 | return 0; | ||
2645 | } | ||
2646 | |||
2647 | static int ia32_tls_set(struct task_struct *target, | ||
2648 | const struct user_regset *regset, unsigned int pos, | ||
2649 | unsigned int count, const void *kbuf, const void __user *ubuf) | ||
2650 | { | ||
2651 | struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; | ||
2652 | const struct ia32_user_desc *info; | ||
2653 | |||
2654 | if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) || | ||
2655 | (pos % sizeof(struct ia32_user_desc)) != 0 || | ||
2656 | (count % sizeof(struct ia32_user_desc)) != 0) | ||
2657 | return -EINVAL; | ||
2658 | |||
2659 | if (kbuf) | ||
2660 | info = kbuf; | ||
2661 | else if (__copy_from_user(infobuf, ubuf, count)) | ||
2662 | return -EFAULT; | ||
2663 | else | ||
2664 | info = infobuf; | ||
2665 | |||
2666 | set_tls_desc(target, | ||
2667 | GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)), | ||
2668 | info, count / sizeof(struct ia32_user_desc)); | ||
2669 | |||
2670 | return 0; | ||
2671 | } | ||
2672 | |||
2673 | /* | ||
2674 | * This should match arch/i386/kernel/ptrace.c:native_regsets. | ||
2675 | * XXX ioperm? vm86? | ||
2676 | */ | ||
2677 | static const struct user_regset ia32_regsets[] = { | ||
2678 | { | ||
2679 | .core_note_type = NT_PRSTATUS, | ||
2680 | .n = sizeof(struct user_regs_struct32)/4, | ||
2681 | .size = 4, .align = 4, | ||
2682 | .get = ia32_genregs_get, .set = ia32_genregs_set | ||
2683 | }, | ||
2684 | { | ||
2685 | .core_note_type = NT_PRFPREG, | ||
2686 | .n = sizeof(struct ia32_user_i387_struct) / 4, | ||
2687 | .size = 4, .align = 4, | ||
2688 | .get = ia32_fpregs_get, .set = ia32_fpregs_set | ||
2689 | }, | ||
2690 | { | ||
2691 | .core_note_type = NT_PRXFPREG, | ||
2692 | .n = sizeof(struct ia32_user_fxsr_struct) / 4, | ||
2693 | .size = 4, .align = 4, | ||
2694 | .get = ia32_fpxregs_get, .set = ia32_fpxregs_set | ||
2695 | }, | ||
2696 | { | ||
2697 | .core_note_type = NT_386_TLS, | ||
2698 | .n = GDT_ENTRY_TLS_ENTRIES, | ||
2699 | .bias = GDT_ENTRY_TLS_MIN, | ||
2700 | .size = sizeof(struct ia32_user_desc), | ||
2701 | .align = sizeof(struct ia32_user_desc), | ||
2702 | .active = ia32_tls_active, | ||
2703 | .get = ia32_tls_get, .set = ia32_tls_set, | ||
2704 | }, | ||
2705 | }; | ||
2706 | |||
2707 | const struct user_regset_view user_ia32_view = { | ||
2708 | .name = "i386", .e_machine = EM_386, | ||
2709 | .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets) | ||
2710 | }; | ||
2711 | |||
2712 | long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, | ||
2713 | __u32 len_low, __u32 len_high, int advice) | ||
2714 | { | ||
2715 | return sys_fadvise64_64(fd, | ||
2716 | (((u64)offset_high)<<32) | offset_low, | ||
2717 | (((u64)len_high)<<32) | len_low, | ||
2718 | advice); | ||
2719 | } | ||
2720 | |||
2721 | #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ | ||
2722 | |||
2723 | asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid) | ||
2724 | { | ||
2725 | uid_t sruid, seuid; | ||
2726 | |||
2727 | sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid); | ||
2728 | seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid); | ||
2729 | return sys_setreuid(sruid, seuid); | ||
2730 | } | ||
2731 | |||
2732 | asmlinkage long | ||
2733 | sys32_setresuid(compat_uid_t ruid, compat_uid_t euid, | ||
2734 | compat_uid_t suid) | ||
2735 | { | ||
2736 | uid_t sruid, seuid, ssuid; | ||
2737 | |||
2738 | sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid); | ||
2739 | seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid); | ||
2740 | ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid); | ||
2741 | return sys_setresuid(sruid, seuid, ssuid); | ||
2742 | } | ||
2743 | |||
2744 | asmlinkage long | ||
2745 | sys32_setregid(compat_gid_t rgid, compat_gid_t egid) | ||
2746 | { | ||
2747 | gid_t srgid, segid; | ||
2748 | |||
2749 | srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid); | ||
2750 | segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid); | ||
2751 | return sys_setregid(srgid, segid); | ||
2752 | } | ||
2753 | |||
2754 | asmlinkage long | ||
2755 | sys32_setresgid(compat_gid_t rgid, compat_gid_t egid, | ||
2756 | compat_gid_t sgid) | ||
2757 | { | ||
2758 | gid_t srgid, segid, ssgid; | ||
2759 | |||
2760 | srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid); | ||
2761 | segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid); | ||
2762 | ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); | ||
2763 | return sys_setresgid(srgid, segid, ssgid); | ||
2764 | } | ||
2765 | #endif /* NOTYET */ | ||
diff --git a/arch/ia64/include/asm/ia32.h b/arch/ia64/include/asm/ia32.h deleted file mode 100644 index 2390ee145aa1..000000000000 --- a/arch/ia64/include/asm/ia32.h +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_IA32_H | ||
2 | #define _ASM_IA64_IA32_H | ||
3 | |||
4 | |||
5 | #include <asm/ptrace.h> | ||
6 | #include <asm/signal.h> | ||
7 | |||
8 | #define IA32_NR_syscalls 285 /* length of syscall table */ | ||
9 | #define IA32_PAGE_SHIFT 12 /* 4KB pages */ | ||
10 | |||
11 | #ifndef __ASSEMBLY__ | ||
12 | |||
13 | # ifdef CONFIG_IA32_SUPPORT | ||
14 | |||
15 | #define IA32_PAGE_OFFSET 0xc0000000 | ||
16 | |||
17 | extern void ia32_cpu_init (void); | ||
18 | extern void ia32_mem_init (void); | ||
19 | extern void ia32_gdt_init (void); | ||
20 | extern int ia32_exception (struct pt_regs *regs, unsigned long isr); | ||
21 | extern int ia32_intercept (struct pt_regs *regs, unsigned long isr); | ||
22 | extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs); | ||
23 | |||
24 | # endif /* !CONFIG_IA32_SUPPORT */ | ||
25 | |||
26 | /* Declare this unconditionally, so we don't get warnings for unreachable code. */ | ||
27 | extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, | ||
28 | sigset_t *set, struct pt_regs *regs); | ||
29 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | ||
30 | extern int ia32_copy_ia64_partial_page_list(struct task_struct *, | ||
31 | unsigned long); | ||
32 | extern void ia32_drop_ia64_partial_page_list(struct task_struct *); | ||
33 | #else | ||
34 | # define ia32_copy_ia64_partial_page_list(a1, a2) 0 | ||
35 | # define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0) | ||
36 | #endif | ||
37 | |||
38 | #endif /* !__ASSEMBLY__ */ | ||
39 | |||
40 | #endif /* _ASM_IA64_IA32_H */ | ||
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 7fa90f73f6be..348e44d08ce3 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -270,23 +270,6 @@ typedef struct { | |||
270 | (int __user *) (addr)); \ | 270 | (int __user *) (addr)); \ |
271 | }) | 271 | }) |
272 | 272 | ||
273 | #ifdef CONFIG_IA32_SUPPORT | ||
274 | struct desc_struct { | ||
275 | unsigned int a, b; | ||
276 | }; | ||
277 | |||
278 | #define desc_empty(desc) (!((desc)->a | (desc)->b)) | ||
279 | #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | ||
280 | |||
281 | #define GDT_ENTRY_TLS_ENTRIES 3 | ||
282 | #define GDT_ENTRY_TLS_MIN 6 | ||
283 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | ||
284 | |||
285 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | ||
286 | |||
287 | struct ia64_partial_page_list; | ||
288 | #endif | ||
289 | |||
290 | struct thread_struct { | 273 | struct thread_struct { |
291 | __u32 flags; /* various thread flags (see IA64_THREAD_*) */ | 274 | __u32 flags; /* various thread flags (see IA64_THREAD_*) */ |
292 | /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ | 275 | /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ |
@@ -298,29 +281,6 @@ struct thread_struct { | |||
298 | __u64 rbs_bot; /* the base address for the RBS */ | 281 | __u64 rbs_bot; /* the base address for the RBS */ |
299 | int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ | 282 | int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ |
300 | 283 | ||
301 | #ifdef CONFIG_IA32_SUPPORT | ||
302 | __u64 eflag; /* IA32 EFLAGS reg */ | ||
303 | __u64 fsr; /* IA32 floating pt status reg */ | ||
304 | __u64 fcr; /* IA32 floating pt control reg */ | ||
305 | __u64 fir; /* IA32 fp except. instr. reg */ | ||
306 | __u64 fdr; /* IA32 fp except. data reg */ | ||
307 | __u64 old_k1; /* old value of ar.k1 */ | ||
308 | __u64 old_iob; /* old IOBase value */ | ||
309 | struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */ | ||
310 | /* cached TLS descriptors. */ | ||
311 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
312 | |||
313 | # define INIT_THREAD_IA32 .eflag = 0, \ | ||
314 | .fsr = 0, \ | ||
315 | .fcr = 0x17800000037fULL, \ | ||
316 | .fir = 0, \ | ||
317 | .fdr = 0, \ | ||
318 | .old_k1 = 0, \ | ||
319 | .old_iob = 0, \ | ||
320 | .ppl = NULL, | ||
321 | #else | ||
322 | # define INIT_THREAD_IA32 | ||
323 | #endif /* CONFIG_IA32_SUPPORT */ | ||
324 | #ifdef CONFIG_PERFMON | 284 | #ifdef CONFIG_PERFMON |
325 | void *pfm_context; /* pointer to detailed PMU context */ | 285 | void *pfm_context; /* pointer to detailed PMU context */ |
326 | unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ | 286 | unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ |
@@ -342,7 +302,6 @@ struct thread_struct { | |||
342 | .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ | 302 | .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ |
343 | .task_size = DEFAULT_TASK_SIZE, \ | 303 | .task_size = DEFAULT_TASK_SIZE, \ |
344 | .last_fph_cpu = -1, \ | 304 | .last_fph_cpu = -1, \ |
345 | INIT_THREAD_IA32 \ | ||
346 | INIT_THREAD_PM \ | 305 | INIT_THREAD_PM \ |
347 | .dbr = {0, }, \ | 306 | .dbr = {0, }, \ |
348 | .ibr = {0, }, \ | 307 | .ibr = {0, }, \ |
@@ -485,11 +444,6 @@ extern void __ia64_load_fpu (struct ia64_fpreg *fph); | |||
485 | extern void ia64_save_debug_regs (unsigned long *save_area); | 444 | extern void ia64_save_debug_regs (unsigned long *save_area); |
486 | extern void ia64_load_debug_regs (unsigned long *save_area); | 445 | extern void ia64_load_debug_regs (unsigned long *save_area); |
487 | 446 | ||
488 | #ifdef CONFIG_IA32_SUPPORT | ||
489 | extern void ia32_save_state (struct task_struct *task); | ||
490 | extern void ia32_load_state (struct task_struct *task); | ||
491 | #endif | ||
492 | |||
493 | #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | 447 | #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) |
494 | #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | 448 | #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) |
495 | 449 | ||
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 2f758a42f94b..a7ff1c6ab068 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h | |||
@@ -22,33 +22,18 @@ static inline long syscall_get_nr(struct task_struct *task, | |||
22 | if ((long)regs->cr_ifs < 0) /* Not a syscall */ | 22 | if ((long)regs->cr_ifs < 0) /* Not a syscall */ |
23 | return -1; | 23 | return -1; |
24 | 24 | ||
25 | #ifdef CONFIG_IA32_SUPPORT | ||
26 | if (IS_IA32_PROCESS(regs)) | ||
27 | return regs->r1; | ||
28 | #endif | ||
29 | |||
30 | return regs->r15; | 25 | return regs->r15; |
31 | } | 26 | } |
32 | 27 | ||
33 | static inline void syscall_rollback(struct task_struct *task, | 28 | static inline void syscall_rollback(struct task_struct *task, |
34 | struct pt_regs *regs) | 29 | struct pt_regs *regs) |
35 | { | 30 | { |
36 | #ifdef CONFIG_IA32_SUPPORT | ||
37 | if (IS_IA32_PROCESS(regs)) | ||
38 | regs->r8 = regs->r1; | ||
39 | #endif | ||
40 | |||
41 | /* do nothing */ | 31 | /* do nothing */ |
42 | } | 32 | } |
43 | 33 | ||
44 | static inline long syscall_get_error(struct task_struct *task, | 34 | static inline long syscall_get_error(struct task_struct *task, |
45 | struct pt_regs *regs) | 35 | struct pt_regs *regs) |
46 | { | 36 | { |
47 | #ifdef CONFIG_IA32_SUPPORT | ||
48 | if (IS_IA32_PROCESS(regs)) | ||
49 | return regs->r8; | ||
50 | #endif | ||
51 | |||
52 | return regs->r10 == -1 ? regs->r8:0; | 37 | return regs->r10 == -1 ? regs->r8:0; |
53 | } | 38 | } |
54 | 39 | ||
@@ -62,13 +47,6 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
62 | struct pt_regs *regs, | 47 | struct pt_regs *regs, |
63 | int error, long val) | 48 | int error, long val) |
64 | { | 49 | { |
65 | #ifdef CONFIG_IA32_SUPPORT | ||
66 | if (IS_IA32_PROCESS(regs)) { | ||
67 | regs->r8 = (long) error ? error : val; | ||
68 | return; | ||
69 | } | ||
70 | #endif | ||
71 | |||
72 | if (error) { | 50 | if (error) { |
73 | /* error < 0, but ia64 uses > 0 return value */ | 51 | /* error < 0, but ia64 uses > 0 return value */ |
74 | regs->r8 = -error; | 52 | regs->r8 = -error; |
@@ -89,37 +67,6 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
89 | { | 67 | { |
90 | BUG_ON(i + n > 6); | 68 | BUG_ON(i + n > 6); |
91 | 69 | ||
92 | #ifdef CONFIG_IA32_SUPPORT | ||
93 | if (IS_IA32_PROCESS(regs)) { | ||
94 | switch (i + n) { | ||
95 | case 6: | ||
96 | if (!n--) break; | ||
97 | *args++ = regs->r13; | ||
98 | case 5: | ||
99 | if (!n--) break; | ||
100 | *args++ = regs->r15; | ||
101 | case 4: | ||
102 | if (!n--) break; | ||
103 | *args++ = regs->r14; | ||
104 | case 3: | ||
105 | if (!n--) break; | ||
106 | *args++ = regs->r10; | ||
107 | case 2: | ||
108 | if (!n--) break; | ||
109 | *args++ = regs->r9; | ||
110 | case 1: | ||
111 | if (!n--) break; | ||
112 | *args++ = regs->r11; | ||
113 | case 0: | ||
114 | if (!n--) break; | ||
115 | default: | ||
116 | BUG(); | ||
117 | break; | ||
118 | } | ||
119 | |||
120 | return; | ||
121 | } | ||
122 | #endif | ||
123 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); | 70 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); |
124 | } | 71 | } |
125 | 72 | ||
@@ -130,34 +77,6 @@ static inline void syscall_set_arguments(struct task_struct *task, | |||
130 | { | 77 | { |
131 | BUG_ON(i + n > 6); | 78 | BUG_ON(i + n > 6); |
132 | 79 | ||
133 | #ifdef CONFIG_IA32_SUPPORT | ||
134 | if (IS_IA32_PROCESS(regs)) { | ||
135 | switch (i + n) { | ||
136 | case 6: | ||
137 | if (!n--) break; | ||
138 | regs->r13 = *args++; | ||
139 | case 5: | ||
140 | if (!n--) break; | ||
141 | regs->r15 = *args++; | ||
142 | case 4: | ||
143 | if (!n--) break; | ||
144 | regs->r14 = *args++; | ||
145 | case 3: | ||
146 | if (!n--) break; | ||
147 | regs->r10 = *args++; | ||
148 | case 2: | ||
149 | if (!n--) break; | ||
150 | regs->r9 = *args++; | ||
151 | case 1: | ||
152 | if (!n--) break; | ||
153 | regs->r11 = *args++; | ||
154 | case 0: | ||
155 | if (!n--) break; | ||
156 | } | ||
157 | |||
158 | return; | ||
159 | } | ||
160 | #endif | ||
161 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); | 80 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); |
162 | } | 81 | } |
163 | #endif /* _ASM_SYSCALL_H */ | 82 | #endif /* _ASM_SYSCALL_H */ |
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h index 927a381c20ca..9f342a574ce8 100644 --- a/arch/ia64/include/asm/system.h +++ b/arch/ia64/include/asm/system.h | |||
@@ -191,15 +191,6 @@ do { \ | |||
191 | 191 | ||
192 | #ifdef __KERNEL__ | 192 | #ifdef __KERNEL__ |
193 | 193 | ||
194 | #ifdef CONFIG_IA32_SUPPORT | ||
195 | # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) | ||
196 | #else | ||
197 | # define IS_IA32_PROCESS(regs) 0 | ||
198 | struct task_struct; | ||
199 | static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){} | ||
200 | static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){} | ||
201 | #endif | ||
202 | |||
203 | /* | 194 | /* |
204 | * Context switch from one thread to another. If the two threads have | 195 | * Context switch from one thread to another. If the two threads have |
205 | * different address spaces, schedule() has already taken care of | 196 | * different address spaces, schedule() has already taken care of |
@@ -233,7 +224,7 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct | |||
233 | 224 | ||
234 | #define IA64_HAS_EXTRA_STATE(t) \ | 225 | #define IA64_HAS_EXTRA_STATE(t) \ |
235 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ | 226 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ |
236 | || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) | 227 | || PERFMON_IS_SYSWIDE()) |
237 | 228 | ||
238 | #define __switch_to(prev,next,last) do { \ | 229 | #define __switch_to(prev,next,last) do { \ |
239 | IA64_ACCOUNT_ON_SWITCH(prev, next); \ | 230 | IA64_ACCOUNT_ON_SWITCH(prev, next); \ |
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 10a8f21ca9e3..bb8b0fff32b3 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
@@ -335,20 +335,6 @@ | |||
335 | #define __ARCH_WANT_SYS_RT_SIGACTION | 335 | #define __ARCH_WANT_SYS_RT_SIGACTION |
336 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | 336 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND |
337 | 337 | ||
338 | #ifdef CONFIG_IA32_SUPPORT | ||
339 | # define __ARCH_WANT_SYS_FADVISE64 | ||
340 | # define __ARCH_WANT_SYS_GETPGRP | ||
341 | # define __ARCH_WANT_SYS_LLSEEK | ||
342 | # define __ARCH_WANT_SYS_NICE | ||
343 | # define __ARCH_WANT_SYS_OLD_GETRLIMIT | ||
344 | # define __ARCH_WANT_SYS_OLDUMOUNT | ||
345 | # define __ARCH_WANT_SYS_PAUSE | ||
346 | # define __ARCH_WANT_SYS_SIGPENDING | ||
347 | # define __ARCH_WANT_SYS_SIGPROCMASK | ||
348 | # define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | ||
349 | # define __ARCH_WANT_COMPAT_SYS_TIME | ||
350 | #endif | ||
351 | |||
352 | #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) | 338 | #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) |
353 | 339 | ||
354 | #include <linux/types.h> | 340 | #include <linux/types.h> |
diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c index f3802ae89b10..96a9d18ff4c4 100644 --- a/arch/ia64/kernel/audit.c +++ b/arch/ia64/kernel/audit.c | |||
@@ -30,20 +30,11 @@ static unsigned signal_class[] = { | |||
30 | 30 | ||
31 | int audit_classify_arch(int arch) | 31 | int audit_classify_arch(int arch) |
32 | { | 32 | { |
33 | #ifdef CONFIG_IA32_SUPPORT | ||
34 | if (arch == AUDIT_ARCH_I386) | ||
35 | return 1; | ||
36 | #endif | ||
37 | return 0; | 33 | return 0; |
38 | } | 34 | } |
39 | 35 | ||
40 | int audit_classify_syscall(int abi, unsigned syscall) | 36 | int audit_classify_syscall(int abi, unsigned syscall) |
41 | { | 37 | { |
42 | #ifdef CONFIG_IA32_SUPPORT | ||
43 | extern int ia32_classify_syscall(unsigned); | ||
44 | if (abi == AUDIT_ARCH_I386) | ||
45 | return ia32_classify_syscall(syscall); | ||
46 | #endif | ||
47 | switch(syscall) { | 38 | switch(syscall) { |
48 | case __NR_open: | 39 | case __NR_open: |
49 | return 2; | 40 | return 2; |
@@ -58,18 +49,6 @@ int audit_classify_syscall(int abi, unsigned syscall) | |||
58 | 49 | ||
59 | static int __init audit_classes_init(void) | 50 | static int __init audit_classes_init(void) |
60 | { | 51 | { |
61 | #ifdef CONFIG_IA32_SUPPORT | ||
62 | extern __u32 ia32_dir_class[]; | ||
63 | extern __u32 ia32_write_class[]; | ||
64 | extern __u32 ia32_read_class[]; | ||
65 | extern __u32 ia32_chattr_class[]; | ||
66 | extern __u32 ia32_signal_class[]; | ||
67 | audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class); | ||
68 | audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class); | ||
69 | audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class); | ||
70 | audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class); | ||
71 | audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class); | ||
72 | #endif | ||
73 | audit_register_class(AUDIT_CLASS_WRITE, write_class); | 52 | audit_register_class(AUDIT_CLASS_WRITE, write_class); |
74 | audit_register_class(AUDIT_CLASS_READ, read_class); | 53 | audit_register_class(AUDIT_CLASS_READ, read_class); |
75 | audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); | 54 | audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d75b872ca4dc..9a260b317d8d 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -71,15 +71,6 @@ ENTRY(ia64_execve) | |||
71 | add out3=16,sp // regs | 71 | add out3=16,sp // regs |
72 | br.call.sptk.many rp=sys_execve | 72 | br.call.sptk.many rp=sys_execve |
73 | .ret0: | 73 | .ret0: |
74 | #ifdef CONFIG_IA32_SUPPORT | ||
75 | /* | ||
76 | * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers | ||
77 | * from pt_regs. | ||
78 | */ | ||
79 | adds r16=PT(CR_IPSR)+16,sp | ||
80 | ;; | ||
81 | ld8 r16=[r16] | ||
82 | #endif | ||
83 | cmp4.ge p6,p7=r8,r0 | 74 | cmp4.ge p6,p7=r8,r0 |
84 | mov ar.pfs=loc1 // restore ar.pfs | 75 | mov ar.pfs=loc1 // restore ar.pfs |
85 | sxt4 r8=r8 // return 64-bit result | 76 | sxt4 r8=r8 // return 64-bit result |
@@ -108,12 +99,6 @@ ENTRY(ia64_execve) | |||
108 | ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 | 99 | ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 |
109 | ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 | 100 | ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 |
110 | ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 | 101 | ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 |
111 | #ifdef CONFIG_IA32_SUPPORT | ||
112 | tbit.nz p6,p0=r16, IA64_PSR_IS_BIT | ||
113 | movl loc0=ia64_ret_from_ia32_execve | ||
114 | ;; | ||
115 | (p6) mov rp=loc0 | ||
116 | #endif | ||
117 | br.ret.sptk.many rp | 102 | br.ret.sptk.many rp |
118 | END(ia64_execve) | 103 | END(ia64_execve) |
119 | 104 | ||
@@ -848,30 +833,6 @@ __paravirt_work_processed_syscall: | |||
848 | br.cond.sptk.many rbs_switch // B | 833 | br.cond.sptk.many rbs_switch // B |
849 | END(__paravirt_leave_syscall) | 834 | END(__paravirt_leave_syscall) |
850 | 835 | ||
851 | #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE | ||
852 | #ifdef CONFIG_IA32_SUPPORT | ||
853 | GLOBAL_ENTRY(ia64_ret_from_ia32_execve) | ||
854 | PT_REGS_UNWIND_INFO(0) | ||
855 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 | ||
856 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 | ||
857 | ;; | ||
858 | .mem.offset 0,0 | ||
859 | st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit | ||
860 | .mem.offset 8,0 | ||
861 | st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit | ||
862 | #ifdef CONFIG_PARAVIRT | ||
863 | ;; | ||
864 | // don't fall through, ia64_leave_kernel may be #define'd | ||
865 | br.cond.sptk.few ia64_leave_kernel | ||
866 | ;; | ||
867 | #endif /* CONFIG_PARAVIRT */ | ||
868 | END(ia64_ret_from_ia32_execve) | ||
869 | #ifndef CONFIG_PARAVIRT | ||
870 | // fall through | ||
871 | #endif | ||
872 | #endif /* CONFIG_IA32_SUPPORT */ | ||
873 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | ||
874 | |||
875 | GLOBAL_ENTRY(__paravirt_leave_kernel) | 836 | GLOBAL_ENTRY(__paravirt_leave_kernel) |
876 | PT_REGS_UNWIND_INFO(0) | 837 | PT_REGS_UNWIND_INFO(0) |
877 | /* | 838 | /* |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index ec9a5fdfa1b9..179fd122e837 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -49,7 +49,6 @@ | |||
49 | 49 | ||
50 | #include <asm/asmmacro.h> | 50 | #include <asm/asmmacro.h> |
51 | #include <asm/break.h> | 51 | #include <asm/break.h> |
52 | #include <asm/ia32.h> | ||
53 | #include <asm/kregs.h> | 52 | #include <asm/kregs.h> |
54 | #include <asm/asm-offsets.h> | 53 | #include <asm/asm-offsets.h> |
55 | #include <asm/pgtable.h> | 54 | #include <asm/pgtable.h> |
@@ -1386,28 +1385,6 @@ END(ia32_exception) | |||
1386 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) | 1385 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) |
1387 | ENTRY(ia32_intercept) | 1386 | ENTRY(ia32_intercept) |
1388 | DBG_FAULT(46) | 1387 | DBG_FAULT(46) |
1389 | #ifdef CONFIG_IA32_SUPPORT | ||
1390 | mov r31=pr | ||
1391 | MOV_FROM_ISR(r16) | ||
1392 | ;; | ||
1393 | extr.u r17=r16,16,8 // get ISR.code | ||
1394 | mov r18=ar.eflag | ||
1395 | MOV_FROM_IIM(r19) // old eflag value | ||
1396 | ;; | ||
1397 | cmp.ne p6,p0=2,r17 | ||
1398 | (p6) br.cond.spnt 1f // not a system flag fault | ||
1399 | xor r16=r18,r19 | ||
1400 | ;; | ||
1401 | extr.u r17=r16,18,1 // get the eflags.ac bit | ||
1402 | ;; | ||
1403 | cmp.eq p6,p0=0,r17 | ||
1404 | (p6) br.cond.spnt 1f // eflags.ac bit didn't change | ||
1405 | ;; | ||
1406 | mov pr=r31,-1 // restore predicate registers | ||
1407 | RFI | ||
1408 | |||
1409 | 1: | ||
1410 | #endif // CONFIG_IA32_SUPPORT | ||
1411 | FAULT(46) | 1388 | FAULT(46) |
1412 | END(ia32_intercept) | 1389 | END(ia32_intercept) |
1413 | 1390 | ||
@@ -1416,12 +1393,7 @@ END(ia32_intercept) | |||
1416 | // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) | 1393 | // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) |
1417 | ENTRY(ia32_interrupt) | 1394 | ENTRY(ia32_interrupt) |
1418 | DBG_FAULT(47) | 1395 | DBG_FAULT(47) |
1419 | #ifdef CONFIG_IA32_SUPPORT | ||
1420 | mov r31=pr | ||
1421 | br.sptk.many dispatch_to_ia32_handler | ||
1422 | #else | ||
1423 | FAULT(47) | 1396 | FAULT(47) |
1424 | #endif | ||
1425 | END(ia32_interrupt) | 1397 | END(ia32_interrupt) |
1426 | 1398 | ||
1427 | .org ia64_ivt+0x6c00 | 1399 | .org ia64_ivt+0x6c00 |
@@ -1715,89 +1687,3 @@ ENTRY(dispatch_illegal_op_fault) | |||
1715 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel | 1687 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel |
1716 | br.sptk.many ia64_leave_kernel | 1688 | br.sptk.many ia64_leave_kernel |
1717 | END(dispatch_illegal_op_fault) | 1689 | END(dispatch_illegal_op_fault) |
1718 | |||
1719 | #ifdef CONFIG_IA32_SUPPORT | ||
1720 | |||
1721 | /* | ||
1722 | * There is no particular reason for this code to be here, other than that | ||
1723 | * there happens to be space here that would go unused otherwise. If this | ||
1724 | * fault ever gets "unreserved", simply moved the following code to a more | ||
1725 | * suitable spot... | ||
1726 | */ | ||
1727 | |||
1728 | // IA32 interrupt entry point | ||
1729 | |||
1730 | ENTRY(dispatch_to_ia32_handler) | ||
1731 | SAVE_MIN | ||
1732 | ;; | ||
1733 | MOV_FROM_ISR(r14) | ||
1734 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) | ||
1735 | // guarantee that interruption collection is on | ||
1736 | ;; | ||
1737 | SSM_PSR_I(p15, p15, r3) | ||
1738 | adds r3=8,r2 // Base pointer for SAVE_REST | ||
1739 | ;; | ||
1740 | SAVE_REST | ||
1741 | ;; | ||
1742 | mov r15=0x80 | ||
1743 | shr r14=r14,16 // Get interrupt number | ||
1744 | ;; | ||
1745 | cmp.ne p6,p0=r14,r15 | ||
1746 | (p6) br.call.dpnt.many b6=non_ia32_syscall | ||
1747 | |||
1748 | adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions | ||
1749 | adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp | ||
1750 | ;; | ||
1751 | cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 | ||
1752 | ld8 r8=[r14] // get r8 | ||
1753 | ;; | ||
1754 | st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) | ||
1755 | ;; | ||
1756 | alloc r15=ar.pfs,0,0,6,0 // must first in an insn group | ||
1757 | ;; | ||
1758 | ld4 r8=[r14],8 // r8 == eax (syscall number) | ||
1759 | mov r15=IA32_NR_syscalls | ||
1760 | ;; | ||
1761 | cmp.ltu.unc p6,p7=r8,r15 | ||
1762 | ld4 out1=[r14],8 // r9 == ecx | ||
1763 | ;; | ||
1764 | ld4 out2=[r14],8 // r10 == edx | ||
1765 | ;; | ||
1766 | ld4 out0=[r14] // r11 == ebx | ||
1767 | adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp | ||
1768 | ;; | ||
1769 | ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp | ||
1770 | ;; | ||
1771 | ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi | ||
1772 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 | ||
1773 | ;; | ||
1774 | ld4 out4=[r14] // r15 == edi | ||
1775 | movl r16=ia32_syscall_table | ||
1776 | ;; | ||
1777 | (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number | ||
1778 | ld4 r2=[r2] // r2 = current_thread_info()->flags | ||
1779 | ;; | ||
1780 | ld8 r16=[r16] | ||
1781 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit | ||
1782 | ;; | ||
1783 | mov b6=r16 | ||
1784 | movl r15=ia32_ret_from_syscall | ||
1785 | cmp.eq p8,p0=r2,r0 | ||
1786 | ;; | ||
1787 | mov rp=r15 | ||
1788 | (p8) br.call.sptk.many b6=b6 | ||
1789 | br.cond.sptk ia32_trace_syscall | ||
1790 | |||
1791 | non_ia32_syscall: | ||
1792 | alloc r15=ar.pfs,0,0,2,0 | ||
1793 | mov out0=r14 // interrupt # | ||
1794 | add out1=16,sp // pointer to pt_regs | ||
1795 | ;; // avoid WAW on CFM | ||
1796 | br.call.sptk.many rp=ia32_bad_interrupt | ||
1797 | .ret1: movl r15=ia64_leave_kernel | ||
1798 | ;; | ||
1799 | mov rp=r15 | ||
1800 | br.ret.sptk.many rp | ||
1801 | END(dispatch_to_ia32_handler) | ||
1802 | |||
1803 | #endif /* CONFIG_IA32_SUPPORT */ | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 9bcec9945c12..883ecc9cfef5 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <asm/cpu.h> | 33 | #include <asm/cpu.h> |
34 | #include <asm/delay.h> | 34 | #include <asm/delay.h> |
35 | #include <asm/elf.h> | 35 | #include <asm/elf.h> |
36 | #include <asm/ia32.h> | ||
37 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
38 | #include <asm/kexec.h> | 37 | #include <asm/kexec.h> |
39 | #include <asm/pgalloc.h> | 38 | #include <asm/pgalloc.h> |
@@ -358,11 +357,6 @@ ia64_save_extra (struct task_struct *task) | |||
358 | if (info & PFM_CPUINFO_SYST_WIDE) | 357 | if (info & PFM_CPUINFO_SYST_WIDE) |
359 | pfm_syst_wide_update_task(task, info, 0); | 358 | pfm_syst_wide_update_task(task, info, 0); |
360 | #endif | 359 | #endif |
361 | |||
362 | #ifdef CONFIG_IA32_SUPPORT | ||
363 | if (IS_IA32_PROCESS(task_pt_regs(task))) | ||
364 | ia32_save_state(task); | ||
365 | #endif | ||
366 | } | 360 | } |
367 | 361 | ||
368 | void | 362 | void |
@@ -383,11 +377,6 @@ ia64_load_extra (struct task_struct *task) | |||
383 | if (info & PFM_CPUINFO_SYST_WIDE) | 377 | if (info & PFM_CPUINFO_SYST_WIDE) |
384 | pfm_syst_wide_update_task(task, info, 1); | 378 | pfm_syst_wide_update_task(task, info, 1); |
385 | #endif | 379 | #endif |
386 | |||
387 | #ifdef CONFIG_IA32_SUPPORT | ||
388 | if (IS_IA32_PROCESS(task_pt_regs(task))) | ||
389 | ia32_load_state(task); | ||
390 | #endif | ||
391 | } | 380 | } |
392 | 381 | ||
393 | /* | 382 | /* |
@@ -426,7 +415,7 @@ copy_thread(unsigned long clone_flags, | |||
426 | unsigned long user_stack_base, unsigned long user_stack_size, | 415 | unsigned long user_stack_base, unsigned long user_stack_size, |
427 | struct task_struct *p, struct pt_regs *regs) | 416 | struct task_struct *p, struct pt_regs *regs) |
428 | { | 417 | { |
429 | extern char ia64_ret_from_clone, ia32_ret_from_clone; | 418 | extern char ia64_ret_from_clone; |
430 | struct switch_stack *child_stack, *stack; | 419 | struct switch_stack *child_stack, *stack; |
431 | unsigned long rbs, child_rbs, rbs_size; | 420 | unsigned long rbs, child_rbs, rbs_size; |
432 | struct pt_regs *child_ptregs; | 421 | struct pt_regs *child_ptregs; |
@@ -457,7 +446,7 @@ copy_thread(unsigned long clone_flags, | |||
457 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); | 446 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); |
458 | 447 | ||
459 | if (likely(user_mode(child_ptregs))) { | 448 | if (likely(user_mode(child_ptregs))) { |
460 | if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) | 449 | if (clone_flags & CLONE_SETTLS) |
461 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ | 450 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ |
462 | if (user_stack_base) { | 451 | if (user_stack_base) { |
463 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; | 452 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; |
@@ -477,10 +466,7 @@ copy_thread(unsigned long clone_flags, | |||
477 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ | 466 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ |
478 | } | 467 | } |
479 | child_stack->ar_bspstore = child_rbs + rbs_size; | 468 | child_stack->ar_bspstore = child_rbs + rbs_size; |
480 | if (IS_IA32_PROCESS(regs)) | 469 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; |
481 | child_stack->b0 = (unsigned long) &ia32_ret_from_clone; | ||
482 | else | ||
483 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | ||
484 | 470 | ||
485 | /* copy parts of thread_struct: */ | 471 | /* copy parts of thread_struct: */ |
486 | p->thread.ksp = (unsigned long) child_stack - 16; | 472 | p->thread.ksp = (unsigned long) child_stack - 16; |
@@ -515,22 +501,6 @@ copy_thread(unsigned long clone_flags, | |||
515 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | 501 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) |
516 | | THREAD_FLAGS_TO_SET); | 502 | | THREAD_FLAGS_TO_SET); |
517 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ | 503 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ |
518 | #ifdef CONFIG_IA32_SUPPORT | ||
519 | /* | ||
520 | * If we're cloning an IA32 task then save the IA32 extra | ||
521 | * state from the current task to the new task | ||
522 | */ | ||
523 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | ||
524 | ia32_save_state(p); | ||
525 | if (clone_flags & CLONE_SETTLS) | ||
526 | retval = ia32_clone_tls(p, child_ptregs); | ||
527 | |||
528 | /* Copy partially mapped page list */ | ||
529 | if (!retval) | ||
530 | retval = ia32_copy_ia64_partial_page_list(p, | ||
531 | clone_flags); | ||
532 | } | ||
533 | #endif | ||
534 | 504 | ||
535 | #ifdef CONFIG_PERFMON | 505 | #ifdef CONFIG_PERFMON |
536 | if (current->thread.pfm_context) | 506 | if (current->thread.pfm_context) |
@@ -704,15 +674,6 @@ EXPORT_SYMBOL(kernel_thread); | |||
704 | int | 674 | int |
705 | kernel_thread_helper (int (*fn)(void *), void *arg) | 675 | kernel_thread_helper (int (*fn)(void *), void *arg) |
706 | { | 676 | { |
707 | #ifdef CONFIG_IA32_SUPPORT | ||
708 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | ||
709 | /* A kernel thread is always a 64-bit process. */ | ||
710 | current->thread.map_base = DEFAULT_MAP_BASE; | ||
711 | current->thread.task_size = DEFAULT_TASK_SIZE; | ||
712 | ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); | ||
713 | ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); | ||
714 | } | ||
715 | #endif | ||
716 | return (*fn)(arg); | 677 | return (*fn)(arg); |
717 | } | 678 | } |
718 | 679 | ||
@@ -725,14 +686,6 @@ flush_thread (void) | |||
725 | /* drop floating-point and debug-register state if it exists: */ | 686 | /* drop floating-point and debug-register state if it exists: */ |
726 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); | 687 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
727 | ia64_drop_fpu(current); | 688 | ia64_drop_fpu(current); |
728 | #ifdef CONFIG_IA32_SUPPORT | ||
729 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | ||
730 | ia32_drop_ia64_partial_page_list(current); | ||
731 | current->thread.task_size = IA32_PAGE_OFFSET; | ||
732 | set_fs(USER_DS); | ||
733 | memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array)); | ||
734 | } | ||
735 | #endif | ||
736 | } | 689 | } |
737 | 690 | ||
738 | /* | 691 | /* |
@@ -753,8 +706,6 @@ exit_thread (void) | |||
753 | if (current->thread.flags & IA64_THREAD_DBG_VALID) | 706 | if (current->thread.flags & IA64_THREAD_DBG_VALID) |
754 | pfm_release_debug_registers(current); | 707 | pfm_release_debug_registers(current); |
755 | #endif | 708 | #endif |
756 | if (IS_IA32_PROCESS(task_pt_regs(current))) | ||
757 | ia32_drop_ia64_partial_page_list(current); | ||
758 | } | 709 | } |
759 | 710 | ||
760 | unsigned long | 711 | unsigned long |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 9daa87fdb018..b61afbbe076f 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -1250,13 +1250,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | |||
1250 | long syscall; | 1250 | long syscall; |
1251 | int arch; | 1251 | int arch; |
1252 | 1252 | ||
1253 | if (IS_IA32_PROCESS(®s)) { | 1253 | syscall = regs.r15; |
1254 | syscall = regs.r1; | 1254 | arch = AUDIT_ARCH_IA64; |
1255 | arch = AUDIT_ARCH_I386; | ||
1256 | } else { | ||
1257 | syscall = regs.r15; | ||
1258 | arch = AUDIT_ARCH_IA64; | ||
1259 | } | ||
1260 | 1255 | ||
1261 | audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); | 1256 | audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); |
1262 | } | 1257 | } |
@@ -2172,11 +2167,6 @@ static const struct user_regset_view user_ia64_view = { | |||
2172 | 2167 | ||
2173 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) | 2168 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) |
2174 | { | 2169 | { |
2175 | #ifdef CONFIG_IA32_SUPPORT | ||
2176 | extern const struct user_regset_view user_ia32_view; | ||
2177 | if (IS_IA32_PROCESS(task_pt_regs(tsk))) | ||
2178 | return &user_ia32_view; | ||
2179 | #endif | ||
2180 | return &user_ia64_view; | 2170 | return &user_ia64_view; |
2181 | } | 2171 | } |
2182 | 2172 | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index a1ea87919777..41ae6a596b50 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/kexec.h> | 46 | #include <linux/kexec.h> |
47 | #include <linux/crash_dump.h> | 47 | #include <linux/crash_dump.h> |
48 | 48 | ||
49 | #include <asm/ia32.h> | ||
50 | #include <asm/machvec.h> | 49 | #include <asm/machvec.h> |
51 | #include <asm/mca.h> | 50 | #include <asm/mca.h> |
52 | #include <asm/meminit.h> | 51 | #include <asm/meminit.h> |
@@ -1016,10 +1015,6 @@ cpu_init (void) | |||
1016 | ia64_mmu_init(ia64_imva(cpu_data)); | 1015 | ia64_mmu_init(ia64_imva(cpu_data)); |
1017 | ia64_mca_cpu_init(ia64_imva(cpu_data)); | 1016 | ia64_mca_cpu_init(ia64_imva(cpu_data)); |
1018 | 1017 | ||
1019 | #ifdef CONFIG_IA32_SUPPORT | ||
1020 | ia32_cpu_init(); | ||
1021 | #endif | ||
1022 | |||
1023 | /* Clear ITC to eliminate sched_clock() overflows in human time. */ | 1018 | /* Clear ITC to eliminate sched_clock() overflows in human time. */ |
1024 | ia64_set_itc(0); | 1019 | ia64_set_itc(0); |
1025 | 1020 | ||
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index e1821ca4c7df..7bdafc8788bd 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/unistd.h> | 21 | #include <linux/unistd.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | 23 | ||
24 | #include <asm/ia32.h> | ||
25 | #include <asm/intrinsics.h> | 24 | #include <asm/intrinsics.h> |
26 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
27 | #include <asm/rse.h> | 26 | #include <asm/rse.h> |
@@ -425,14 +424,8 @@ static long | |||
425 | handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, | 424 | handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, |
426 | struct sigscratch *scr) | 425 | struct sigscratch *scr) |
427 | { | 426 | { |
428 | if (IS_IA32_PROCESS(&scr->pt)) { | 427 | if (!setup_frame(sig, ka, info, oldset, scr)) |
429 | /* send signal to IA-32 process */ | 428 | return 0; |
430 | if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt)) | ||
431 | return 0; | ||
432 | } else | ||
433 | /* send signal to IA-64 process */ | ||
434 | if (!setup_frame(sig, ka, info, oldset, scr)) | ||
435 | return 0; | ||
436 | 429 | ||
437 | spin_lock_irq(¤t->sighand->siglock); | 430 | spin_lock_irq(¤t->sighand->siglock); |
438 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | 431 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
@@ -462,7 +455,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
462 | siginfo_t info; | 455 | siginfo_t info; |
463 | long restart = in_syscall; | 456 | long restart = in_syscall; |
464 | long errno = scr->pt.r8; | 457 | long errno = scr->pt.r8; |
465 | # define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c)) | ||
466 | 458 | ||
467 | /* | 459 | /* |
468 | * In the ia64_leave_kernel code path, we want the common case to go fast, which | 460 | * In the ia64_leave_kernel code path, we want the common case to go fast, which |
@@ -490,14 +482,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
490 | * inferior call), thus it's important to check for restarting _after_ | 482 | * inferior call), thus it's important to check for restarting _after_ |
491 | * get_signal_to_deliver(). | 483 | * get_signal_to_deliver(). |
492 | */ | 484 | */ |
493 | if (IS_IA32_PROCESS(&scr->pt)) { | 485 | if ((long) scr->pt.r10 != -1) |
494 | if (in_syscall) { | ||
495 | if (errno >= 0) | ||
496 | restart = 0; | ||
497 | else | ||
498 | errno = -errno; | ||
499 | } | ||
500 | } else if ((long) scr->pt.r10 != -1) | ||
501 | /* | 486 | /* |
502 | * A system calls has to be restarted only if one of the error codes | 487 | * A system calls has to be restarted only if one of the error codes |
503 | * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 | 488 | * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 |
@@ -513,22 +498,18 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
513 | switch (errno) { | 498 | switch (errno) { |
514 | case ERESTART_RESTARTBLOCK: | 499 | case ERESTART_RESTARTBLOCK: |
515 | case ERESTARTNOHAND: | 500 | case ERESTARTNOHAND: |
516 | scr->pt.r8 = ERR_CODE(EINTR); | 501 | scr->pt.r8 = EINTR; |
517 | /* note: scr->pt.r10 is already -1 */ | 502 | /* note: scr->pt.r10 is already -1 */ |
518 | break; | 503 | break; |
519 | 504 | ||
520 | case ERESTARTSYS: | 505 | case ERESTARTSYS: |
521 | if ((ka.sa.sa_flags & SA_RESTART) == 0) { | 506 | if ((ka.sa.sa_flags & SA_RESTART) == 0) { |
522 | scr->pt.r8 = ERR_CODE(EINTR); | 507 | scr->pt.r8 = EINTR; |
523 | /* note: scr->pt.r10 is already -1 */ | 508 | /* note: scr->pt.r10 is already -1 */ |
524 | break; | 509 | break; |
525 | } | 510 | } |
526 | case ERESTARTNOINTR: | 511 | case ERESTARTNOINTR: |
527 | if (IS_IA32_PROCESS(&scr->pt)) { | 512 | ia64_decrement_ip(&scr->pt); |
528 | scr->pt.r8 = scr->pt.r1; | ||
529 | scr->pt.cr_iip -= 2; | ||
530 | } else | ||
531 | ia64_decrement_ip(&scr->pt); | ||
532 | restart = 0; /* don't restart twice if handle_signal() fails... */ | 513 | restart = 0; /* don't restart twice if handle_signal() fails... */ |
533 | } | 514 | } |
534 | } | 515 | } |
@@ -555,21 +536,14 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
555 | if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR | 536 | if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR |
556 | || errno == ERESTART_RESTARTBLOCK) | 537 | || errno == ERESTART_RESTARTBLOCK) |
557 | { | 538 | { |
558 | if (IS_IA32_PROCESS(&scr->pt)) { | 539 | /* |
559 | scr->pt.r8 = scr->pt.r1; | 540 | * Note: the syscall number is in r15 which is saved in |
560 | scr->pt.cr_iip -= 2; | 541 | * pt_regs so all we need to do here is adjust ip so that |
561 | if (errno == ERESTART_RESTARTBLOCK) | 542 | * the "break" instruction gets re-executed. |
562 | scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ | 543 | */ |
563 | } else { | 544 | ia64_decrement_ip(&scr->pt); |
564 | /* | 545 | if (errno == ERESTART_RESTARTBLOCK) |
565 | * Note: the syscall number is in r15 which is saved in | 546 | scr->pt.r15 = __NR_restart_syscall; |
566 | * pt_regs so all we need to do here is adjust ip so that | ||
567 | * the "break" instruction gets re-executed. | ||
568 | */ | ||
569 | ia64_decrement_ip(&scr->pt); | ||
570 | if (errno == ERESTART_RESTARTBLOCK) | ||
571 | scr->pt.r15 = __NR_restart_syscall; | ||
572 | } | ||
573 | } | 547 | } |
574 | } | 548 | } |
575 | 549 | ||
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index de100aa7ff03..e5230b2ff2c5 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <asm/cache.h> | 44 | #include <asm/cache.h> |
45 | #include <asm/current.h> | 45 | #include <asm/current.h> |
46 | #include <asm/delay.h> | 46 | #include <asm/delay.h> |
47 | #include <asm/ia32.h> | ||
48 | #include <asm/io.h> | 47 | #include <asm/io.h> |
49 | #include <asm/irq.h> | 48 | #include <asm/irq.h> |
50 | #include <asm/machvec.h> | 49 | #include <asm/machvec.h> |
@@ -443,10 +442,6 @@ smp_callin (void) | |||
443 | calibrate_delay(); | 442 | calibrate_delay(); |
444 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; | 443 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; |
445 | 444 | ||
446 | #ifdef CONFIG_IA32_SUPPORT | ||
447 | ia32_gdt_init(); | ||
448 | #endif | ||
449 | |||
450 | /* | 445 | /* |
451 | * Allow the master to continue. | 446 | * Allow the master to continue. |
452 | */ | 447 | */ |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index f0cda765e681..fd80e70018a9 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/kdebug.h> | 19 | #include <linux/kdebug.h> |
20 | 20 | ||
21 | #include <asm/fpswa.h> | 21 | #include <asm/fpswa.h> |
22 | #include <asm/ia32.h> | ||
23 | #include <asm/intrinsics.h> | 22 | #include <asm/intrinsics.h> |
24 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
25 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
@@ -626,10 +625,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, | |||
626 | break; | 625 | break; |
627 | 626 | ||
628 | case 45: | 627 | case 45: |
629 | #ifdef CONFIG_IA32_SUPPORT | ||
630 | if (ia32_exception(®s, isr) == 0) | ||
631 | return; | ||
632 | #endif | ||
633 | printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); | 628 | printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); |
634 | printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", | 629 | printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", |
635 | iip, ifa, isr); | 630 | iip, ifa, isr); |
@@ -637,10 +632,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, | |||
637 | break; | 632 | break; |
638 | 633 | ||
639 | case 46: | 634 | case 46: |
640 | #ifdef CONFIG_IA32_SUPPORT | ||
641 | if (ia32_intercept(®s, isr) == 0) | ||
642 | return; | ||
643 | #endif | ||
644 | printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); | 635 | printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); |
645 | printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", | 636 | printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", |
646 | iip, ifa, isr, iim); | 637 | iip, ifa, isr, iim); |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 7c0d4814a68d..ca3335ea56cc 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/kexec.h> | 22 | #include <linux/kexec.h> |
23 | 23 | ||
24 | #include <asm/dma.h> | 24 | #include <asm/dma.h> |
25 | #include <asm/ia32.h> | ||
26 | #include <asm/io.h> | 25 | #include <asm/io.h> |
27 | #include <asm/machvec.h> | 26 | #include <asm/machvec.h> |
28 | #include <asm/numa.h> | 27 | #include <asm/numa.h> |
@@ -668,10 +667,6 @@ mem_init (void) | |||
668 | fsyscall_table[i] = sys_call_table[i] | 1; | 667 | fsyscall_table[i] = sys_call_table[i] | 1; |
669 | } | 668 | } |
670 | setup_gate(); | 669 | setup_gate(); |
671 | |||
672 | #ifdef CONFIG_IA32_SUPPORT | ||
673 | ia32_mem_init(); | ||
674 | #endif | ||
675 | } | 670 | } |
676 | 671 | ||
677 | #ifdef CONFIG_MEMORY_HOTPLUG | 672 | #ifdef CONFIG_MEMORY_HOTPLUG |
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S index e32dae444dd6..08847aa12583 100644 --- a/arch/ia64/xen/hypercall.S +++ b/arch/ia64/xen/hypercall.S | |||
@@ -58,11 +58,6 @@ __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) | |||
58 | __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) | 58 | __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) |
59 | __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) | 59 | __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) |
60 | 60 | ||
61 | #ifdef CONFIG_IA32_SUPPORT | ||
62 | __HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG) | ||
63 | __HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8 | ||
64 | #endif /* CONFIG_IA32_SUPPORT */ | ||
65 | |||
66 | GLOBAL_ENTRY(xen_set_rr0_to_rr4) | 61 | GLOBAL_ENTRY(xen_set_rr0_to_rr4) |
67 | mov r8=r32 | 62 | mov r8=r32 |
68 | mov r9=r33 | 63 | mov r9=r33 |
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c index 5e2270a999fa..8adc6a14272a 100644 --- a/arch/ia64/xen/xen_pv_ops.c +++ b/arch/ia64/xen/xen_pv_ops.c | |||
@@ -301,11 +301,6 @@ static void xen_setreg(int regnum, unsigned long val) | |||
301 | case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: | 301 | case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: |
302 | xen_set_kr(regnum - _IA64_REG_AR_KR0, val); | 302 | xen_set_kr(regnum - _IA64_REG_AR_KR0, val); |
303 | break; | 303 | break; |
304 | #ifdef CONFIG_IA32_SUPPORT | ||
305 | case _IA64_REG_AR_EFLAG: | ||
306 | xen_set_eflag(val); | ||
307 | break; | ||
308 | #endif | ||
309 | case _IA64_REG_AR_ITC: | 304 | case _IA64_REG_AR_ITC: |
310 | xen_set_itc(val); | 305 | xen_set_itc(val); |
311 | break; | 306 | break; |
@@ -332,11 +327,6 @@ static unsigned long xen_getreg(int regnum) | |||
332 | case _IA64_REG_PSR: | 327 | case _IA64_REG_PSR: |
333 | res = xen_get_psr(); | 328 | res = xen_get_psr(); |
334 | break; | 329 | break; |
335 | #ifdef CONFIG_IA32_SUPPORT | ||
336 | case _IA64_REG_AR_EFLAG: | ||
337 | res = xen_get_eflag(); | ||
338 | break; | ||
339 | #endif | ||
340 | case _IA64_REG_AR_ITC: | 330 | case _IA64_REG_AR_ITC: |
341 | res = xen_get_itc(); | 331 | res = xen_get_itc(); |
342 | break; | 332 | break; |
@@ -710,9 +700,6 @@ extern unsigned long xen_getreg(int regnum); | |||
710 | 700 | ||
711 | __DEFINE_FUNC(getreg, | 701 | __DEFINE_FUNC(getreg, |
712 | __DEFINE_GET_REG(PSR, PSR) | 702 | __DEFINE_GET_REG(PSR, PSR) |
713 | #ifdef CONFIG_IA32_SUPPORT | ||
714 | __DEFINE_GET_REG(AR_EFLAG, EFLAG) | ||
715 | #endif | ||
716 | 703 | ||
717 | /* get_itc */ | 704 | /* get_itc */ |
718 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | 705 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" |
@@ -789,9 +776,6 @@ __DEFINE_FUNC(setreg, | |||
789 | ";;\n" | 776 | ";;\n" |
790 | "(p6) br.cond.spnt xen_set_itc\n" | 777 | "(p6) br.cond.spnt xen_set_itc\n" |
791 | 778 | ||
792 | #ifdef CONFIG_IA32_SUPPORT | ||
793 | __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG) | ||
794 | #endif | ||
795 | __DEFINE_SET_REG(CR_TPR, SET_TPR) | 779 | __DEFINE_SET_REG(CR_TPR, SET_TPR) |
796 | __DEFINE_SET_REG(CR_EOI, EOI) | 780 | __DEFINE_SET_REG(CR_EOI, EOI) |
797 | 781 | ||