aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/asm-offsets_64.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c4
-rw-r--r--arch/x86/kernel/entry_64.S44
-rw-r--r--arch/x86/kernel/process_64.c27
-rw-r--r--arch/x86/kernel/ptrace.c99
-rw-r--r--arch/x86/kernel/signal.c138
-rw-r--r--arch/x86/kernel/sys_x86_64.c6
-rw-r--r--arch/x86/kernel/syscall_64.c8
9 files changed, 312 insertions, 30 deletions
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 834e897b1e25..1b4754f82ba7 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,6 +1,12 @@
1#include <asm/ia32.h> 1#include <asm/ia32.h>
2 2
3#define __SYSCALL_64(nr, sym, compat) [nr] = 1, 3#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
4#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
5#ifdef CONFIG_X86_X32_ABI
6# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
7#else
8# define __SYSCALL_X32(nr, sym, compat) /* nothing */
9#endif
4static char syscalls_64[] = { 10static char syscalls_64[] = {
5#include <asm/syscalls_64.h> 11#include <asm/syscalls_64.h>
6}; 12};
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 79289632cb27..a041e094b8b9 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -167,6 +167,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
167{ 167{
168 int err = 0; 168 int err = 0;
169 mtrr_type type; 169 mtrr_type type;
170 unsigned long base;
170 unsigned long size; 171 unsigned long size;
171 struct mtrr_sentry sentry; 172 struct mtrr_sentry sentry;
172 struct mtrr_gentry gentry; 173 struct mtrr_gentry gentry;
@@ -267,14 +268,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
267#endif 268#endif
268 if (gentry.regnum >= num_var_ranges) 269 if (gentry.regnum >= num_var_ranges)
269 return -EINVAL; 270 return -EINVAL;
270 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 271 mtrr_if->get(gentry.regnum, &base, &size, &type);
271 272
272 /* Hide entries that go above 4GB */ 273 /* Hide entries that go above 4GB */
273 if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) 274 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
274 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) 275 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
275 gentry.base = gentry.size = gentry.type = 0; 276 gentry.base = gentry.size = gentry.type = 0;
276 else { 277 else {
277 gentry.base <<= PAGE_SHIFT; 278 gentry.base = base << PAGE_SHIFT;
278 gentry.size = size << PAGE_SHIFT; 279 gentry.size = size << PAGE_SHIFT;
279 gentry.type = type; 280 gentry.type = type;
280 } 281 }
@@ -321,11 +322,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
321#endif 322#endif
322 if (gentry.regnum >= num_var_ranges) 323 if (gentry.regnum >= num_var_ranges)
323 return -EINVAL; 324 return -EINVAL;
324 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 325 mtrr_if->get(gentry.regnum, &base, &size, &type);
325 /* Hide entries that would overflow */ 326 /* Hide entries that would overflow */
326 if (size != (__typeof__(gentry.size))size) 327 if (size != (__typeof__(gentry.size))size)
327 gentry.base = gentry.size = gentry.type = 0; 328 gentry.base = gentry.size = gentry.type = 0;
328 else { 329 else {
330 gentry.base = base;
329 gentry.size = size; 331 gentry.size = size;
330 gentry.type = type; 332 gentry.type = type;
331 } 333 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index fa2900c0e398..40883ffe2da9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,7 +29,6 @@
29#include <asm/apic.h> 29#include <asm/apic.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h>
33#include <asm/smp.h> 32#include <asm/smp.h>
34#include <asm/alternative.h> 33#include <asm/alternative.h>
35#include <asm/timer.h> 34#include <asm/timer.h>
@@ -1748,6 +1747,9 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1748} 1747}
1749 1748
1750#ifdef CONFIG_COMPAT 1749#ifdef CONFIG_COMPAT
1750
1751#include <asm/compat.h>
1752
1751static inline int 1753static inline int
1752perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 1754perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1753{ 1755{
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 734ebd1d3caa..cdc79b5cfcd9 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -481,7 +481,12 @@ GLOBAL(system_call_after_swapgs)
481 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 481 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
482 jnz tracesys 482 jnz tracesys
483system_call_fastpath: 483system_call_fastpath:
484#if __SYSCALL_MASK == ~0
484 cmpq $__NR_syscall_max,%rax 485 cmpq $__NR_syscall_max,%rax
486#else
487 andl $__SYSCALL_MASK,%eax
488 cmpl $__NR_syscall_max,%eax
489#endif
485 ja badsys 490 ja badsys
486 movq %r10,%rcx 491 movq %r10,%rcx
487 call *sys_call_table(,%rax,8) # XXX: rip relative 492 call *sys_call_table(,%rax,8) # XXX: rip relative
@@ -595,7 +600,12 @@ tracesys:
595 */ 600 */
596 LOAD_ARGS ARGOFFSET, 1 601 LOAD_ARGS ARGOFFSET, 1
597 RESTORE_REST 602 RESTORE_REST
603#if __SYSCALL_MASK == ~0
598 cmpq $__NR_syscall_max,%rax 604 cmpq $__NR_syscall_max,%rax
605#else
606 andl $__SYSCALL_MASK,%eax
607 cmpl $__NR_syscall_max,%eax
608#endif
599 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ 609 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
600 movq %r10,%rcx /* fixup for C */ 610 movq %r10,%rcx /* fixup for C */
601 call *sys_call_table(,%rax,8) 611 call *sys_call_table(,%rax,8)
@@ -735,6 +745,40 @@ ENTRY(stub_rt_sigreturn)
735 CFI_ENDPROC 745 CFI_ENDPROC
736END(stub_rt_sigreturn) 746END(stub_rt_sigreturn)
737 747
748#ifdef CONFIG_X86_X32_ABI
749 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
750
751ENTRY(stub_x32_rt_sigreturn)
752 CFI_STARTPROC
753 addq $8, %rsp
754 PARTIAL_FRAME 0
755 SAVE_REST
756 movq %rsp,%rdi
757 FIXUP_TOP_OF_STACK %r11
758 call sys32_x32_rt_sigreturn
759 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
760 RESTORE_REST
761 jmp int_ret_from_sys_call
762 CFI_ENDPROC
763END(stub_x32_rt_sigreturn)
764
765ENTRY(stub_x32_execve)
766 CFI_STARTPROC
767 addq $8, %rsp
768 PARTIAL_FRAME 0
769 SAVE_REST
770 FIXUP_TOP_OF_STACK %r11
771 movq %rsp, %rcx
772 call sys32_execve
773 RESTORE_TOP_OF_STACK %r11
774 movq %rax,RAX(%rsp)
775 RESTORE_REST
776 jmp int_ret_from_sys_call
777 CFI_ENDPROC
778END(stub_x32_execve)
779
780#endif
781
738/* 782/*
739 * Build the entry stubs and pointer table with some assembler magic. 783 * Build the entry stubs and pointer table with some assembler magic.
740 * We pack 7 stubs into a single 32-byte chunk, which will fit in a 784 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 2b154da0b6d3..733ca39f367e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -258,7 +258,9 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
258void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) 258void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
259{ 259{
260 start_thread_common(regs, new_ip, new_sp, 260 start_thread_common(regs, new_ip, new_sp,
261 __USER32_CS, __USER32_DS, __USER32_DS); 261 test_thread_flag(TIF_X32)
262 ? __USER_CS : __USER32_CS,
263 __USER_DS, __USER_DS);
262} 264}
263#endif 265#endif
264 266
@@ -381,6 +383,8 @@ void set_personality_64bit(void)
381 383
382 /* Make sure to be in 64bit mode */ 384 /* Make sure to be in 64bit mode */
383 clear_thread_flag(TIF_IA32); 385 clear_thread_flag(TIF_IA32);
386 clear_thread_flag(TIF_ADDR32);
387 clear_thread_flag(TIF_X32);
384 388
385 /* Ensure the corresponding mm is not marked. */ 389 /* Ensure the corresponding mm is not marked. */
386 if (current->mm) 390 if (current->mm)
@@ -393,20 +397,31 @@ void set_personality_64bit(void)
393 current->personality &= ~READ_IMPLIES_EXEC; 397 current->personality &= ~READ_IMPLIES_EXEC;
394} 398}
395 399
396void set_personality_ia32(void) 400void set_personality_ia32(bool x32)
397{ 401{
398 /* inherit personality from parent */ 402 /* inherit personality from parent */
399 403
400 /* Make sure to be in 32bit mode */ 404 /* Make sure to be in 32bit mode */
401 set_thread_flag(TIF_IA32); 405 set_thread_flag(TIF_ADDR32);
402 current->personality |= force_personality32;
403 406
404 /* Mark the associated mm as containing 32-bit tasks. */ 407 /* Mark the associated mm as containing 32-bit tasks. */
405 if (current->mm) 408 if (current->mm)
406 current->mm->context.ia32_compat = 1; 409 current->mm->context.ia32_compat = 1;
407 410
408 /* Prepare the first "return" to user space */ 411 if (x32) {
409 current_thread_info()->status |= TS_COMPAT; 412 clear_thread_flag(TIF_IA32);
413 set_thread_flag(TIF_X32);
414 current->personality &= ~READ_IMPLIES_EXEC;
415 /* is_compat_task() uses the presence of the x32
416 syscall bit flag to determine compat status */
417 current_thread_info()->status &= ~TS_COMPAT;
418 } else {
419 set_thread_flag(TIF_IA32);
420 clear_thread_flag(TIF_X32);
421 current->personality |= force_personality32;
422 /* Prepare the first "return" to user space */
423 current_thread_info()->status |= TS_COMPAT;
424 }
410} 425}
411 426
412unsigned long get_wchan(struct task_struct *p) 427unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 8a634c887652..284c35ae60e4 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1130,6 +1130,100 @@ static int genregs32_set(struct task_struct *target,
1130 return ret; 1130 return ret;
1131} 1131}
1132 1132
1133#ifdef CONFIG_X86_X32_ABI
1134static long x32_arch_ptrace(struct task_struct *child,
1135 compat_long_t request, compat_ulong_t caddr,
1136 compat_ulong_t cdata)
1137{
1138 unsigned long addr = caddr;
1139 unsigned long data = cdata;
1140 void __user *datap = compat_ptr(data);
1141 int ret;
1142
1143 switch (request) {
1144 /* Read 32bits at location addr in the USER area. Only allow
1145 to return the lower 32bits of segment and debug registers. */
1146 case PTRACE_PEEKUSR: {
1147 u32 tmp;
1148
1149 ret = -EIO;
1150 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1151 addr < offsetof(struct user_regs_struct, cs))
1152 break;
1153
1154 tmp = 0; /* Default return condition */
1155 if (addr < sizeof(struct user_regs_struct))
1156 tmp = getreg(child, addr);
1157 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1158 addr <= offsetof(struct user, u_debugreg[7])) {
1159 addr -= offsetof(struct user, u_debugreg[0]);
1160 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1161 }
1162 ret = put_user(tmp, (__u32 __user *)datap);
1163 break;
1164 }
1165
1166 /* Write the word at location addr in the USER area. Only allow
1167 to update segment and debug registers with the upper 32bits
1168 zero-extended. */
1169 case PTRACE_POKEUSR:
1170 ret = -EIO;
1171 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1172 addr < offsetof(struct user_regs_struct, cs))
1173 break;
1174
1175 if (addr < sizeof(struct user_regs_struct))
1176 ret = putreg(child, addr, data);
1177 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1178 addr <= offsetof(struct user, u_debugreg[7])) {
1179 addr -= offsetof(struct user, u_debugreg[0]);
1180 ret = ptrace_set_debugreg(child,
1181 addr / sizeof(data), data);
1182 }
1183 break;
1184
1185 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1186 return copy_regset_to_user(child,
1187 task_user_regset_view(current),
1188 REGSET_GENERAL,
1189 0, sizeof(struct user_regs_struct),
1190 datap);
1191
1192 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1193 return copy_regset_from_user(child,
1194 task_user_regset_view(current),
1195 REGSET_GENERAL,
1196 0, sizeof(struct user_regs_struct),
1197 datap);
1198
1199 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1200 return copy_regset_to_user(child,
1201 task_user_regset_view(current),
1202 REGSET_FP,
1203 0, sizeof(struct user_i387_struct),
1204 datap);
1205
1206 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1207 return copy_regset_from_user(child,
1208 task_user_regset_view(current),
1209 REGSET_FP,
1210 0, sizeof(struct user_i387_struct),
1211 datap);
1212
1213 /* normal 64bit interface to access TLS data.
1214 Works just like arch_prctl, except that the arguments
1215 are reversed. */
1216 case PTRACE_ARCH_PRCTL:
1217 return do_arch_prctl(child, data, addr);
1218
1219 default:
1220 return compat_ptrace_request(child, request, addr, data);
1221 }
1222
1223 return ret;
1224}
1225#endif
1226
1133long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1227long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1134 compat_ulong_t caddr, compat_ulong_t cdata) 1228 compat_ulong_t caddr, compat_ulong_t cdata)
1135{ 1229{
@@ -1139,6 +1233,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1139 int ret; 1233 int ret;
1140 __u32 val; 1234 __u32 val;
1141 1235
1236#ifdef CONFIG_X86_X32_ABI
1237 if (!is_ia32_task())
1238 return x32_arch_ptrace(child, request, caddr, cdata);
1239#endif
1240
1142 switch (request) { 1241 switch (request) {
1143 case PTRACE_PEEKUSR: 1242 case PTRACE_PEEKUSR:
1144 ret = getreg32(child, addr, &val); 1243 ret = getreg32(child, addr, &val);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 25edcfc9ba5b..5134e17855f0 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -10,10 +10,8 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/wait.h> 14#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h> 15#include <linux/tracehook.h>
18#include <linux/unistd.h> 16#include <linux/unistd.h>
19#include <linux/stddef.h> 17#include <linux/stddef.h>
@@ -27,10 +25,12 @@
27#include <asm/fpu-internal.h> 25#include <asm/fpu-internal.h>
28#include <asm/vdso.h> 26#include <asm/vdso.h>
29#include <asm/mce.h> 27#include <asm/mce.h>
28#include <asm/sighandling.h>
30 29
31#ifdef CONFIG_X86_64 30#ifdef CONFIG_X86_64
32#include <asm/proto.h> 31#include <asm/proto.h>
33#include <asm/ia32_unistd.h> 32#include <asm/ia32_unistd.h>
33#include <asm/sys_ia32.h>
34#endif /* CONFIG_X86_64 */ 34#endif /* CONFIG_X86_64 */
35 35
36#include <asm/syscall.h> 36#include <asm/syscall.h>
@@ -38,13 +38,6 @@
38 38
39#include <asm/sigframe.h> 39#include <asm/sigframe.h>
40 40
41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
42
43#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
44 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
45 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
46 X86_EFLAGS_CF)
47
48#ifdef CONFIG_X86_32 41#ifdef CONFIG_X86_32
49# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) 42# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
50#else 43#else
@@ -69,9 +62,8 @@
69 regs->seg = GET_SEG(seg) | 3; \ 62 regs->seg = GET_SEG(seg) | 3; \
70} while (0) 63} while (0)
71 64
72static int 65int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
73restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 66 unsigned long *pax)
74 unsigned long *pax)
75{ 67{
76 void __user *buf; 68 void __user *buf;
77 unsigned int tmpflags; 69 unsigned int tmpflags;
@@ -126,9 +118,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
126 return err; 118 return err;
127} 119}
128 120
129static int 121int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
130setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, 122 struct pt_regs *regs, unsigned long mask)
131 struct pt_regs *regs, unsigned long mask)
132{ 123{
133 int err = 0; 124 int err = 0;
134 125
@@ -643,6 +634,16 @@ static int signr_convert(int sig)
643#define is_ia32 0 634#define is_ia32 0
644#endif /* CONFIG_IA32_EMULATION */ 635#endif /* CONFIG_IA32_EMULATION */
645 636
637#ifdef CONFIG_X86_X32_ABI
638#define is_x32 test_thread_flag(TIF_X32)
639
640static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
641 siginfo_t *info, compat_sigset_t *set,
642 struct pt_regs *regs);
643#else /* !CONFIG_X86_X32_ABI */
644#define is_x32 0
645#endif /* CONFIG_X86_X32_ABI */
646
646int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 647int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
647 sigset_t *set, struct pt_regs *regs); 648 sigset_t *set, struct pt_regs *regs);
648int ia32_setup_frame(int sig, struct k_sigaction *ka, 649int ia32_setup_frame(int sig, struct k_sigaction *ka,
@@ -667,8 +668,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
667 ret = ia32_setup_rt_frame(usig, ka, info, set, regs); 668 ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
668 else 669 else
669 ret = ia32_setup_frame(usig, ka, set, regs); 670 ret = ia32_setup_frame(usig, ka, set, regs);
670 } else 671#ifdef CONFIG_X86_X32_ABI
672 } else if (is_x32) {
673 ret = x32_setup_rt_frame(usig, ka, info,
674 (compat_sigset_t *)set, regs);
675#endif
676 } else {
671 ret = __setup_rt_frame(sig, ka, info, set, regs); 677 ret = __setup_rt_frame(sig, ka, info, set, regs);
678 }
672 679
673 if (ret) { 680 if (ret) {
674 force_sigsegv(sig, current); 681 force_sigsegv(sig, current);
@@ -851,3 +858,102 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
851 858
852 force_sig(SIGSEGV, me); 859 force_sig(SIGSEGV, me);
853} 860}
861
862#ifdef CONFIG_X86_X32_ABI
863static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
864 siginfo_t *info, compat_sigset_t *set,
865 struct pt_regs *regs)
866{
867 struct rt_sigframe_x32 __user *frame;
868 void __user *restorer;
869 int err = 0;
870 void __user *fpstate = NULL;
871
872 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
873
874 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
875 return -EFAULT;
876
877 if (ka->sa.sa_flags & SA_SIGINFO) {
878 if (copy_siginfo_to_user32(&frame->info, info))
879 return -EFAULT;
880 }
881
882 put_user_try {
883 /* Create the ucontext. */
884 if (cpu_has_xsave)
885 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
886 else
887 put_user_ex(0, &frame->uc.uc_flags);
888 put_user_ex(0, &frame->uc.uc_link);
889 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
890 put_user_ex(sas_ss_flags(regs->sp),
891 &frame->uc.uc_stack.ss_flags);
892 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
893 put_user_ex(0, &frame->uc.uc__pad0);
894 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
895 regs, set->sig[0]);
896 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
897
898 if (ka->sa.sa_flags & SA_RESTORER) {
899 restorer = ka->sa.sa_restorer;
900 } else {
901 /* could use a vstub here */
902 restorer = NULL;
903 err |= -EFAULT;
904 }
905 put_user_ex(restorer, &frame->pretcode);
906 } put_user_catch(err);
907
908 if (err)
909 return -EFAULT;
910
911 /* Set up registers for signal handler */
912 regs->sp = (unsigned long) frame;
913 regs->ip = (unsigned long) ka->sa.sa_handler;
914
915 /* We use the x32 calling convention here... */
916 regs->di = sig;
917 regs->si = (unsigned long) &frame->info;
918 regs->dx = (unsigned long) &frame->uc;
919
920 loadsegment(ds, __USER_DS);
921 loadsegment(es, __USER_DS);
922
923 regs->cs = __USER_CS;
924 regs->ss = __USER_DS;
925
926 return 0;
927}
928
929asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
930{
931 struct rt_sigframe_x32 __user *frame;
932 sigset_t set;
933 unsigned long ax;
934 struct pt_regs tregs;
935
936 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
937
938 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
939 goto badframe;
940 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
941 goto badframe;
942
943 sigdelsetmask(&set, ~_BLOCKABLE);
944 set_current_blocked(&set);
945
946 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
947 goto badframe;
948
949 tregs = *regs;
950 if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
951 goto badframe;
952
953 return ax;
954
955badframe:
956 signal_fault(regs, frame, "x32 rt_sigreturn");
957 return 0;
958}
959#endif
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index ef59642ff1bf..b4d3c3927dd8 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -98,7 +98,7 @@ out:
98static void find_start_end(unsigned long flags, unsigned long *begin, 98static void find_start_end(unsigned long flags, unsigned long *begin,
99 unsigned long *end) 99 unsigned long *end)
100{ 100{
101 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { 101 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
102 unsigned long new_begin; 102 unsigned long new_begin;
103 /* This is usually used needed to map code in small 103 /* This is usually used needed to map code in small
104 model, so it needs to be in the first 31bit. Limit 104 model, so it needs to be in the first 31bit. Limit
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 (!vma || addr + len <= vma->vm_start)) 144 (!vma || addr + len <= vma->vm_start))
145 return addr; 145 return addr;
146 } 146 }
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
148 && len <= mm->cached_hole_size) { 148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0; 149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin; 150 mm->free_area_cache = begin;
@@ -205,7 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
205 return addr; 205 return addr;
206 206
207 /* for MAP_32BIT mappings we force the legact mmap base */ 207 /* for MAP_32BIT mappings we force the legact mmap base */
208 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) 208 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
209 goto bottomup; 209 goto bottomup;
210 210
211 /* requesting a specific address */ 211 /* requesting a specific address */
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 7ac7943be02c..5c7f8c20da74 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -5,6 +5,14 @@
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/asm-offsets.h> 6#include <asm/asm-offsets.h>
7 7
8#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
9
10#ifdef CONFIG_X86_X32_ABI
11# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
12#else
13# define __SYSCALL_X32(nr, sym, compat) /* nothing */
14#endif
15
8#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 16#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
9#include <asm/syscalls_64.h> 17#include <asm/syscalls_64.h>
10#undef __SYSCALL_64 18#undef __SYSCALL_64