aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-13 00:54:39 -0400
committerIngo Molnar <mingo@elte.hu>2012-03-13 00:54:41 -0400
commite898c6706869fdcbd68b1e7fb0ac7461d98710fe (patch)
tree28054da88ddda324f78f27083b14328113941fbc /arch/x86/kernel
parentc94082656dac74257f63e91f78d5d458ac781fa5 (diff)
parent55283e2537714f9370c4ab847d170acf223daf90 (diff)
Merge branch 'x86/x32' into x86/cleanups
Merge reason: We are going to merge a dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/asm-offsets_64.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c4
-rw-r--r--arch/x86/kernel/entry_64.S44
-rw-r--r--arch/x86/kernel/process_64.c27
-rw-r--r--arch/x86/kernel/ptrace.c99
-rw-r--r--arch/x86/kernel/signal.c138
-rw-r--r--arch/x86/kernel/sys_x86_64.c6
-rw-r--r--arch/x86/kernel/syscall_64.c8
9 files changed, 312 insertions, 30 deletions
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 834e897b1e2..1b4754f82ba 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,6 +1,12 @@
1#include <asm/ia32.h> 1#include <asm/ia32.h>
2 2
3#define __SYSCALL_64(nr, sym, compat) [nr] = 1, 3#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
4#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
5#ifdef CONFIG_X86_X32_ABI
6# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
7#else
8# define __SYSCALL_X32(nr, sym, compat) /* nothing */
9#endif
4static char syscalls_64[] = { 10static char syscalls_64[] = {
5#include <asm/syscalls_64.h> 11#include <asm/syscalls_64.h>
6}; 12};
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 79289632cb2..a041e094b8b 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -167,6 +167,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
167{ 167{
168 int err = 0; 168 int err = 0;
169 mtrr_type type; 169 mtrr_type type;
170 unsigned long base;
170 unsigned long size; 171 unsigned long size;
171 struct mtrr_sentry sentry; 172 struct mtrr_sentry sentry;
172 struct mtrr_gentry gentry; 173 struct mtrr_gentry gentry;
@@ -267,14 +268,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
267#endif 268#endif
268 if (gentry.regnum >= num_var_ranges) 269 if (gentry.regnum >= num_var_ranges)
269 return -EINVAL; 270 return -EINVAL;
270 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 271 mtrr_if->get(gentry.regnum, &base, &size, &type);
271 272
272 /* Hide entries that go above 4GB */ 273 /* Hide entries that go above 4GB */
273 if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) 274 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
274 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) 275 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
275 gentry.base = gentry.size = gentry.type = 0; 276 gentry.base = gentry.size = gentry.type = 0;
276 else { 277 else {
277 gentry.base <<= PAGE_SHIFT; 278 gentry.base = base << PAGE_SHIFT;
278 gentry.size = size << PAGE_SHIFT; 279 gentry.size = size << PAGE_SHIFT;
279 gentry.type = type; 280 gentry.type = type;
280 } 281 }
@@ -321,11 +322,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
321#endif 322#endif
322 if (gentry.regnum >= num_var_ranges) 323 if (gentry.regnum >= num_var_ranges)
323 return -EINVAL; 324 return -EINVAL;
324 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 325 mtrr_if->get(gentry.regnum, &base, &size, &type);
325 /* Hide entries that would overflow */ 326 /* Hide entries that would overflow */
326 if (size != (__typeof__(gentry.size))size) 327 if (size != (__typeof__(gentry.size))size)
327 gentry.base = gentry.size = gentry.type = 0; 328 gentry.base = gentry.size = gentry.type = 0;
328 else { 329 else {
330 gentry.base = base;
329 gentry.size = size; 331 gentry.size = size;
330 gentry.type = type; 332 gentry.type = type;
331 } 333 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 5adce1040b1..63c0e058a40 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -28,7 +28,6 @@
28#include <asm/apic.h> 28#include <asm/apic.h>
29#include <asm/stacktrace.h> 29#include <asm/stacktrace.h>
30#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/compat.h>
32#include <asm/smp.h> 31#include <asm/smp.h>
33#include <asm/alternative.h> 32#include <asm/alternative.h>
34 33
@@ -1595,6 +1594,9 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1595} 1594}
1596 1595
1597#ifdef CONFIG_COMPAT 1596#ifdef CONFIG_COMPAT
1597
1598#include <asm/compat.h>
1599
1598static inline int 1600static inline int
1599perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 1601perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1600{ 1602{
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1333d985177..2925e14fb1d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -482,7 +482,12 @@ GLOBAL(system_call_after_swapgs)
482 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 482 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
483 jnz tracesys 483 jnz tracesys
484system_call_fastpath: 484system_call_fastpath:
485#if __SYSCALL_MASK == ~0
485 cmpq $__NR_syscall_max,%rax 486 cmpq $__NR_syscall_max,%rax
487#else
488 andl $__SYSCALL_MASK,%eax
489 cmpl $__NR_syscall_max,%eax
490#endif
486 ja badsys 491 ja badsys
487 movq %r10,%rcx 492 movq %r10,%rcx
488 call *sys_call_table(,%rax,8) # XXX: rip relative 493 call *sys_call_table(,%rax,8) # XXX: rip relative
@@ -596,7 +601,12 @@ tracesys:
596 */ 601 */
597 LOAD_ARGS ARGOFFSET, 1 602 LOAD_ARGS ARGOFFSET, 1
598 RESTORE_REST 603 RESTORE_REST
604#if __SYSCALL_MASK == ~0
599 cmpq $__NR_syscall_max,%rax 605 cmpq $__NR_syscall_max,%rax
606#else
607 andl $__SYSCALL_MASK,%eax
608 cmpl $__NR_syscall_max,%eax
609#endif
600 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ 610 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
601 movq %r10,%rcx /* fixup for C */ 611 movq %r10,%rcx /* fixup for C */
602 call *sys_call_table(,%rax,8) 612 call *sys_call_table(,%rax,8)
@@ -736,6 +746,40 @@ ENTRY(stub_rt_sigreturn)
736 CFI_ENDPROC 746 CFI_ENDPROC
737END(stub_rt_sigreturn) 747END(stub_rt_sigreturn)
738 748
749#ifdef CONFIG_X86_X32_ABI
750 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
751
752ENTRY(stub_x32_rt_sigreturn)
753 CFI_STARTPROC
754 addq $8, %rsp
755 PARTIAL_FRAME 0
756 SAVE_REST
757 movq %rsp,%rdi
758 FIXUP_TOP_OF_STACK %r11
759 call sys32_x32_rt_sigreturn
760 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
761 RESTORE_REST
762 jmp int_ret_from_sys_call
763 CFI_ENDPROC
764END(stub_x32_rt_sigreturn)
765
766ENTRY(stub_x32_execve)
767 CFI_STARTPROC
768 addq $8, %rsp
769 PARTIAL_FRAME 0
770 SAVE_REST
771 FIXUP_TOP_OF_STACK %r11
772 movq %rsp, %rcx
773 call sys32_execve
774 RESTORE_TOP_OF_STACK %r11
775 movq %rax,RAX(%rsp)
776 RESTORE_REST
777 jmp int_ret_from_sys_call
778 CFI_ENDPROC
779END(stub_x32_execve)
780
781#endif
782
739/* 783/*
740 * Build the entry stubs and pointer table with some assembler magic. 784 * Build the entry stubs and pointer table with some assembler magic.
741 * We pack 7 stubs into a single 32-byte chunk, which will fit in a 785 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index cfa5c90c01d..550e77b1b94 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -365,7 +365,9 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
365void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) 365void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
366{ 366{
367 start_thread_common(regs, new_ip, new_sp, 367 start_thread_common(regs, new_ip, new_sp,
368 __USER32_CS, __USER32_DS, __USER32_DS); 368 test_thread_flag(TIF_X32)
369 ? __USER_CS : __USER32_CS,
370 __USER_DS, __USER_DS);
369} 371}
370#endif 372#endif
371 373
@@ -488,6 +490,8 @@ void set_personality_64bit(void)
488 490
489 /* Make sure to be in 64bit mode */ 491 /* Make sure to be in 64bit mode */
490 clear_thread_flag(TIF_IA32); 492 clear_thread_flag(TIF_IA32);
493 clear_thread_flag(TIF_ADDR32);
494 clear_thread_flag(TIF_X32);
491 495
492 /* Ensure the corresponding mm is not marked. */ 496 /* Ensure the corresponding mm is not marked. */
493 if (current->mm) 497 if (current->mm)
@@ -500,20 +504,31 @@ void set_personality_64bit(void)
500 current->personality &= ~READ_IMPLIES_EXEC; 504 current->personality &= ~READ_IMPLIES_EXEC;
501} 505}
502 506
503void set_personality_ia32(void) 507void set_personality_ia32(bool x32)
504{ 508{
505 /* inherit personality from parent */ 509 /* inherit personality from parent */
506 510
507 /* Make sure to be in 32bit mode */ 511 /* Make sure to be in 32bit mode */
508 set_thread_flag(TIF_IA32); 512 set_thread_flag(TIF_ADDR32);
509 current->personality |= force_personality32;
510 513
511 /* Mark the associated mm as containing 32-bit tasks. */ 514 /* Mark the associated mm as containing 32-bit tasks. */
512 if (current->mm) 515 if (current->mm)
513 current->mm->context.ia32_compat = 1; 516 current->mm->context.ia32_compat = 1;
514 517
515 /* Prepare the first "return" to user space */ 518 if (x32) {
516 current_thread_info()->status |= TS_COMPAT; 519 clear_thread_flag(TIF_IA32);
520 set_thread_flag(TIF_X32);
521 current->personality &= ~READ_IMPLIES_EXEC;
522 /* is_compat_task() uses the presence of the x32
523 syscall bit flag to determine compat status */
524 current_thread_info()->status &= ~TS_COMPAT;
525 } else {
526 set_thread_flag(TIF_IA32);
527 clear_thread_flag(TIF_X32);
528 current->personality |= force_personality32;
529 /* Prepare the first "return" to user space */
530 current_thread_info()->status |= TS_COMPAT;
531 }
517} 532}
518 533
519unsigned long get_wchan(struct task_struct *p) 534unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 50267386b76..93e7877a19c 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1130,6 +1130,100 @@ static int genregs32_set(struct task_struct *target,
1130 return ret; 1130 return ret;
1131} 1131}
1132 1132
1133#ifdef CONFIG_X86_X32_ABI
1134static long x32_arch_ptrace(struct task_struct *child,
1135 compat_long_t request, compat_ulong_t caddr,
1136 compat_ulong_t cdata)
1137{
1138 unsigned long addr = caddr;
1139 unsigned long data = cdata;
1140 void __user *datap = compat_ptr(data);
1141 int ret;
1142
1143 switch (request) {
1144 /* Read 32bits at location addr in the USER area. Only allow
1145 to return the lower 32bits of segment and debug registers. */
1146 case PTRACE_PEEKUSR: {
1147 u32 tmp;
1148
1149 ret = -EIO;
1150 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1151 addr < offsetof(struct user_regs_struct, cs))
1152 break;
1153
1154 tmp = 0; /* Default return condition */
1155 if (addr < sizeof(struct user_regs_struct))
1156 tmp = getreg(child, addr);
1157 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1158 addr <= offsetof(struct user, u_debugreg[7])) {
1159 addr -= offsetof(struct user, u_debugreg[0]);
1160 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1161 }
1162 ret = put_user(tmp, (__u32 __user *)datap);
1163 break;
1164 }
1165
1166 /* Write the word at location addr in the USER area. Only allow
1167 to update segment and debug registers with the upper 32bits
1168 zero-extended. */
1169 case PTRACE_POKEUSR:
1170 ret = -EIO;
1171 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1172 addr < offsetof(struct user_regs_struct, cs))
1173 break;
1174
1175 if (addr < sizeof(struct user_regs_struct))
1176 ret = putreg(child, addr, data);
1177 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1178 addr <= offsetof(struct user, u_debugreg[7])) {
1179 addr -= offsetof(struct user, u_debugreg[0]);
1180 ret = ptrace_set_debugreg(child,
1181 addr / sizeof(data), data);
1182 }
1183 break;
1184
1185 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1186 return copy_regset_to_user(child,
1187 task_user_regset_view(current),
1188 REGSET_GENERAL,
1189 0, sizeof(struct user_regs_struct),
1190 datap);
1191
1192 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1193 return copy_regset_from_user(child,
1194 task_user_regset_view(current),
1195 REGSET_GENERAL,
1196 0, sizeof(struct user_regs_struct),
1197 datap);
1198
1199 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1200 return copy_regset_to_user(child,
1201 task_user_regset_view(current),
1202 REGSET_FP,
1203 0, sizeof(struct user_i387_struct),
1204 datap);
1205
1206 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1207 return copy_regset_from_user(child,
1208 task_user_regset_view(current),
1209 REGSET_FP,
1210 0, sizeof(struct user_i387_struct),
1211 datap);
1212
1213 /* normal 64bit interface to access TLS data.
1214 Works just like arch_prctl, except that the arguments
1215 are reversed. */
1216 case PTRACE_ARCH_PRCTL:
1217 return do_arch_prctl(child, data, addr);
1218
1219 default:
1220 return compat_ptrace_request(child, request, addr, data);
1221 }
1222
1223 return ret;
1224}
1225#endif
1226
1133long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1227long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1134 compat_ulong_t caddr, compat_ulong_t cdata) 1228 compat_ulong_t caddr, compat_ulong_t cdata)
1135{ 1229{
@@ -1139,6 +1233,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1139 int ret; 1233 int ret;
1140 __u32 val; 1234 __u32 val;
1141 1235
1236#ifdef CONFIG_X86_X32_ABI
1237 if (!is_ia32_task())
1238 return x32_arch_ptrace(child, request, caddr, cdata);
1239#endif
1240
1142 switch (request) { 1241 switch (request) {
1143 case PTRACE_PEEKUSR: 1242 case PTRACE_PEEKUSR:
1144 ret = getreg32(child, addr, &val); 1243 ret = getreg32(child, addr, &val);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 46a01bdc27e..c3846b6fb72 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -10,10 +10,8 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/wait.h> 14#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h> 15#include <linux/tracehook.h>
18#include <linux/unistd.h> 16#include <linux/unistd.h>
19#include <linux/stddef.h> 17#include <linux/stddef.h>
@@ -26,10 +24,12 @@
26#include <asm/i387.h> 24#include <asm/i387.h>
27#include <asm/vdso.h> 25#include <asm/vdso.h>
28#include <asm/mce.h> 26#include <asm/mce.h>
27#include <asm/sighandling.h>
29 28
30#ifdef CONFIG_X86_64 29#ifdef CONFIG_X86_64
31#include <asm/proto.h> 30#include <asm/proto.h>
32#include <asm/ia32_unistd.h> 31#include <asm/ia32_unistd.h>
32#include <asm/sys_ia32.h>
33#endif /* CONFIG_X86_64 */ 33#endif /* CONFIG_X86_64 */
34 34
35#include <asm/syscall.h> 35#include <asm/syscall.h>
@@ -37,13 +37,6 @@
37 37
38#include <asm/sigframe.h> 38#include <asm/sigframe.h>
39 39
40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
41
42#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
43 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
44 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
45 X86_EFLAGS_CF)
46
47#ifdef CONFIG_X86_32 40#ifdef CONFIG_X86_32
48# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) 41# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
49#else 42#else
@@ -68,9 +61,8 @@
68 regs->seg = GET_SEG(seg) | 3; \ 61 regs->seg = GET_SEG(seg) | 3; \
69} while (0) 62} while (0)
70 63
71static int 64int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
72restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 65 unsigned long *pax)
73 unsigned long *pax)
74{ 66{
75 void __user *buf; 67 void __user *buf;
76 unsigned int tmpflags; 68 unsigned int tmpflags;
@@ -125,9 +117,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
125 return err; 117 return err;
126} 118}
127 119
128static int 120int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
129setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, 121 struct pt_regs *regs, unsigned long mask)
130 struct pt_regs *regs, unsigned long mask)
131{ 122{
132 int err = 0; 123 int err = 0;
133 124
@@ -642,6 +633,16 @@ static int signr_convert(int sig)
642#define is_ia32 0 633#define is_ia32 0
643#endif /* CONFIG_IA32_EMULATION */ 634#endif /* CONFIG_IA32_EMULATION */
644 635
636#ifdef CONFIG_X86_X32_ABI
637#define is_x32 test_thread_flag(TIF_X32)
638
639static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
640 siginfo_t *info, compat_sigset_t *set,
641 struct pt_regs *regs);
642#else /* !CONFIG_X86_X32_ABI */
643#define is_x32 0
644#endif /* CONFIG_X86_X32_ABI */
645
645int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 646int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
646 sigset_t *set, struct pt_regs *regs); 647 sigset_t *set, struct pt_regs *regs);
647int ia32_setup_frame(int sig, struct k_sigaction *ka, 648int ia32_setup_frame(int sig, struct k_sigaction *ka,
@@ -666,8 +667,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
666 ret = ia32_setup_rt_frame(usig, ka, info, set, regs); 667 ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
667 else 668 else
668 ret = ia32_setup_frame(usig, ka, set, regs); 669 ret = ia32_setup_frame(usig, ka, set, regs);
669 } else 670#ifdef CONFIG_X86_X32_ABI
671 } else if (is_x32) {
672 ret = x32_setup_rt_frame(usig, ka, info,
673 (compat_sigset_t *)set, regs);
674#endif
675 } else {
670 ret = __setup_rt_frame(sig, ka, info, set, regs); 676 ret = __setup_rt_frame(sig, ka, info, set, regs);
677 }
671 678
672 if (ret) { 679 if (ret) {
673 force_sigsegv(sig, current); 680 force_sigsegv(sig, current);
@@ -850,3 +857,102 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
850 857
851 force_sig(SIGSEGV, me); 858 force_sig(SIGSEGV, me);
852} 859}
860
861#ifdef CONFIG_X86_X32_ABI
862static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
863 siginfo_t *info, compat_sigset_t *set,
864 struct pt_regs *regs)
865{
866 struct rt_sigframe_x32 __user *frame;
867 void __user *restorer;
868 int err = 0;
869 void __user *fpstate = NULL;
870
871 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
872
873 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
874 return -EFAULT;
875
876 if (ka->sa.sa_flags & SA_SIGINFO) {
877 if (copy_siginfo_to_user32(&frame->info, info))
878 return -EFAULT;
879 }
880
881 put_user_try {
882 /* Create the ucontext. */
883 if (cpu_has_xsave)
884 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
885 else
886 put_user_ex(0, &frame->uc.uc_flags);
887 put_user_ex(0, &frame->uc.uc_link);
888 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
889 put_user_ex(sas_ss_flags(regs->sp),
890 &frame->uc.uc_stack.ss_flags);
891 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
892 put_user_ex(0, &frame->uc.uc__pad0);
893 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
894 regs, set->sig[0]);
895 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
896
897 if (ka->sa.sa_flags & SA_RESTORER) {
898 restorer = ka->sa.sa_restorer;
899 } else {
900 /* could use a vstub here */
901 restorer = NULL;
902 err |= -EFAULT;
903 }
904 put_user_ex(restorer, &frame->pretcode);
905 } put_user_catch(err);
906
907 if (err)
908 return -EFAULT;
909
910 /* Set up registers for signal handler */
911 regs->sp = (unsigned long) frame;
912 regs->ip = (unsigned long) ka->sa.sa_handler;
913
914 /* We use the x32 calling convention here... */
915 regs->di = sig;
916 regs->si = (unsigned long) &frame->info;
917 regs->dx = (unsigned long) &frame->uc;
918
919 loadsegment(ds, __USER_DS);
920 loadsegment(es, __USER_DS);
921
922 regs->cs = __USER_CS;
923 regs->ss = __USER_DS;
924
925 return 0;
926}
927
928asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
929{
930 struct rt_sigframe_x32 __user *frame;
931 sigset_t set;
932 unsigned long ax;
933 struct pt_regs tregs;
934
935 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
936
937 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
938 goto badframe;
939 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
940 goto badframe;
941
942 sigdelsetmask(&set, ~_BLOCKABLE);
943 set_current_blocked(&set);
944
945 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
946 goto badframe;
947
948 tregs = *regs;
949 if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
950 goto badframe;
951
952 return ax;
953
954badframe:
955 signal_fault(regs, frame, "x32 rt_sigreturn");
956 return 0;
957}
958#endif
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 051489082d5..f921df8c209 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -98,7 +98,7 @@ out:
98static void find_start_end(unsigned long flags, unsigned long *begin, 98static void find_start_end(unsigned long flags, unsigned long *begin,
99 unsigned long *end) 99 unsigned long *end)
100{ 100{
101 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { 101 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
102 unsigned long new_begin; 102 unsigned long new_begin;
103 /* This is usually used needed to map code in small 103 /* This is usually used needed to map code in small
104 model, so it needs to be in the first 31bit. Limit 104 model, so it needs to be in the first 31bit. Limit
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 (!vma || addr + len <= vma->vm_start)) 144 (!vma || addr + len <= vma->vm_start))
145 return addr; 145 return addr;
146 } 146 }
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
148 && len <= mm->cached_hole_size) { 148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0; 149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin; 150 mm->free_area_cache = begin;
@@ -205,7 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
205 return addr; 205 return addr;
206 206
207 /* for MAP_32BIT mappings we force the legact mmap base */ 207 /* for MAP_32BIT mappings we force the legact mmap base */
208 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) 208 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
209 goto bottomup; 209 goto bottomup;
210 210
211 /* requesting a specific address */ 211 /* requesting a specific address */
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 7ac7943be02..5c7f8c20da7 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -5,6 +5,14 @@
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/asm-offsets.h> 6#include <asm/asm-offsets.h>
7 7
8#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
9
10#ifdef CONFIG_X86_X32_ABI
11# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
12#else
13# define __SYSCALL_X32(nr, sym, compat) /* nothing */
14#endif
15
8#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 16#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
9#include <asm/syscalls_64.h> 17#include <asm/syscalls_64.h>
10#undef __SYSCALL_64 18#undef __SYSCALL_64