aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-13 11:32:54 -0400
committerIngo Molnar <mingo@elte.hu>2012-03-13 11:33:03 -0400
commitef15eda98217f5183f457e7a2de8b79555ef908b (patch)
treef8f22b48f7bb237c9aa6646175f3e17eeac4af0e /arch/x86/kernel
parent5cb4ac3a583d4ee18c8682ab857e093c4a0d0895 (diff)
parentef334a20d84f52407a8a2afd02ddeaecbef0ad3d (diff)
Merge branch 'x86/cleanups' into perf/uprobes
Merge reason: We want to merge a dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/asm-offsets_64.c6
-rw-r--r--arch/x86/kernel/cpu/common.c5
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c44
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event.h8
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c37
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/entry_64.S53
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c1
-rw-r--r--arch/x86/kernel/process_32.c26
-rw-r--r--arch/x86/kernel/process_64.c57
-rw-r--r--arch/x86/kernel/ptrace.c102
-rw-r--r--arch/x86/kernel/signal.c140
-rw-r--r--arch/x86/kernel/sys_x86_64.c6
-rw-r--r--arch/x86/kernel/syscall_64.c8
-rw-r--r--arch/x86/kernel/traps.c176
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/kernel/xsave.c12
22 files changed, 512 insertions, 196 deletions
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 834e897b1e2..1b4754f82ba 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,6 +1,12 @@
1#include <asm/ia32.h> 1#include <asm/ia32.h>
2 2
3#define __SYSCALL_64(nr, sym, compat) [nr] = 1, 3#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
4#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
5#ifdef CONFIG_X86_X32_ABI
6# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
7#else
8# define __SYSCALL_X32(nr, sym, compat) /* nothing */
9#endif
4static char syscalls_64[] = { 10static char syscalls_64[] = {
5#include <asm/syscalls_64.h> 11#include <asm/syscalls_64.h>
6}; 12};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d43cad74f16..c0f7d68d318 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1044,6 +1044,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =
1044 1044
1045DEFINE_PER_CPU(unsigned int, irq_count) = -1; 1045DEFINE_PER_CPU(unsigned int, irq_count) = -1;
1046 1046
1047DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1048EXPORT_PER_CPU_SYMBOL(fpu_owner_task);
1049
1047/* 1050/*
1048 * Special IST stacks which the CPU switches to when it calls 1051 * Special IST stacks which the CPU switches to when it calls
1049 * an IST-marked descriptor entry. Up to 7 stacks (hardware 1052 * an IST-marked descriptor entry. Up to 7 stacks (hardware
@@ -1111,6 +1114,8 @@ void debug_stack_reset(void)
1111 1114
1112DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1115DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1113EXPORT_PER_CPU_SYMBOL(current_task); 1116EXPORT_PER_CPU_SYMBOL(current_task);
1117DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1118EXPORT_PER_CPU_SYMBOL(fpu_owner_task);
1114 1119
1115#ifdef CONFIG_CC_STACKPROTECTOR 1120#ifdef CONFIG_CC_STACKPROTECTOR
1116DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1121DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6b45e5e7a90..73d08ed98a6 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
327} 327}
328 328
329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, 329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
330 int index)
331{ 330{
332 int node; 331 int node;
333 332
@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
725#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) 724#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
726 725
727#ifdef CONFIG_SMP 726#ifdef CONFIG_SMP
728static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 727
728static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
729{ 729{
730 struct _cpuid4_info *this_leaf, *sibling_leaf; 730 struct _cpuid4_info *this_leaf;
731 unsigned long num_threads_sharing; 731 int ret, i, sibling;
732 int index_msb, i, sibling;
733 struct cpuinfo_x86 *c = &cpu_data(cpu); 732 struct cpuinfo_x86 *c = &cpu_data(cpu);
734 733
735 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 734 ret = 0;
735 if (index == 3) {
736 ret = 1;
736 for_each_cpu(i, cpu_llc_shared_mask(cpu)) { 737 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
737 if (!per_cpu(ici_cpuid4_info, i)) 738 if (!per_cpu(ici_cpuid4_info, i))
738 continue; 739 continue;
@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
743 set_bit(sibling, this_leaf->shared_cpu_map); 744 set_bit(sibling, this_leaf->shared_cpu_map);
744 } 745 }
745 } 746 }
746 return; 747 } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
748 ret = 1;
749 for_each_cpu(i, cpu_sibling_mask(cpu)) {
750 if (!per_cpu(ici_cpuid4_info, i))
751 continue;
752 this_leaf = CPUID4_INFO_IDX(i, index);
753 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
754 if (!cpu_online(sibling))
755 continue;
756 set_bit(sibling, this_leaf->shared_cpu_map);
757 }
758 }
747 } 759 }
760
761 return ret;
762}
763
764static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
765{
766 struct _cpuid4_info *this_leaf, *sibling_leaf;
767 unsigned long num_threads_sharing;
768 int index_msb, i;
769 struct cpuinfo_x86 *c = &cpu_data(cpu);
770
771 if (c->x86_vendor == X86_VENDOR_AMD) {
772 if (cache_shared_amd_cpu_map_setup(cpu, index))
773 return;
774 }
775
748 this_leaf = CPUID4_INFO_IDX(cpu, index); 776 this_leaf = CPUID4_INFO_IDX(cpu, index);
749 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; 777 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
750 778
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 786e76a8632..e4eeaaf58a4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -528,6 +528,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
528 528
529 sprintf(name, "threshold_bank%i", bank); 529 sprintf(name, "threshold_bank%i", bank);
530 530
531#ifdef CONFIG_SMP
531 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 532 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
532 i = cpumask_first(cpu_llc_shared_mask(cpu)); 533 i = cpumask_first(cpu_llc_shared_mask(cpu));
533 534
@@ -553,6 +554,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
553 554
554 goto out; 555 goto out;
555 } 556 }
557#endif
556 558
557 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); 559 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
558 if (!b) { 560 if (!b) {
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 79289632cb2..a041e094b8b 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -167,6 +167,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
167{ 167{
168 int err = 0; 168 int err = 0;
169 mtrr_type type; 169 mtrr_type type;
170 unsigned long base;
170 unsigned long size; 171 unsigned long size;
171 struct mtrr_sentry sentry; 172 struct mtrr_sentry sentry;
172 struct mtrr_gentry gentry; 173 struct mtrr_gentry gentry;
@@ -267,14 +268,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
267#endif 268#endif
268 if (gentry.regnum >= num_var_ranges) 269 if (gentry.regnum >= num_var_ranges)
269 return -EINVAL; 270 return -EINVAL;
270 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 271 mtrr_if->get(gentry.regnum, &base, &size, &type);
271 272
272 /* Hide entries that go above 4GB */ 273 /* Hide entries that go above 4GB */
273 if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) 274 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
274 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) 275 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
275 gentry.base = gentry.size = gentry.type = 0; 276 gentry.base = gentry.size = gentry.type = 0;
276 else { 277 else {
277 gentry.base <<= PAGE_SHIFT; 278 gentry.base = base << PAGE_SHIFT;
278 gentry.size = size << PAGE_SHIFT; 279 gentry.size = size << PAGE_SHIFT;
279 gentry.type = type; 280 gentry.type = type;
280 } 281 }
@@ -321,11 +322,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
321#endif 322#endif
322 if (gentry.regnum >= num_var_ranges) 323 if (gentry.regnum >= num_var_ranges)
323 return -EINVAL; 324 return -EINVAL;
324 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 325 mtrr_if->get(gentry.regnum, &base, &size, &type);
325 /* Hide entries that would overflow */ 326 /* Hide entries that would overflow */
326 if (size != (__typeof__(gentry.size))size) 327 if (size != (__typeof__(gentry.size))size)
327 gentry.base = gentry.size = gentry.type = 0; 328 gentry.base = gentry.size = gentry.type = 0;
328 else { 329 else {
330 gentry.base = base;
329 gentry.size = size; 331 gentry.size = size;
330 gentry.type = type; 332 gentry.type = type;
331 } 333 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3c44b712380..1c52bdbb9b8 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,7 +29,6 @@
29#include <asm/apic.h> 29#include <asm/apic.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h>
33#include <asm/smp.h> 32#include <asm/smp.h>
34#include <asm/alternative.h> 33#include <asm/alternative.h>
35#include <asm/timer.h> 34#include <asm/timer.h>
@@ -988,6 +987,9 @@ static void x86_pmu_start(struct perf_event *event, int flags)
988 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 987 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
989 int idx = event->hw.idx; 988 int idx = event->hw.idx;
990 989
990 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
991 return;
992
991 if (WARN_ON_ONCE(idx == -1)) 993 if (WARN_ON_ONCE(idx == -1))
992 return; 994 return;
993 995
@@ -1674,6 +1676,9 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1674} 1676}
1675 1677
1676#ifdef CONFIG_COMPAT 1678#ifdef CONFIG_COMPAT
1679
1680#include <asm/compat.h>
1681
1677static inline int 1682static inline int
1678perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 1683perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1679{ 1684{
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 513d617b93c..82db83b5c3b 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -147,7 +147,9 @@ struct cpu_hw_events {
147 /* 147 /*
148 * AMD specific bits 148 * AMD specific bits
149 */ 149 */
150 struct amd_nb *amd_nb; 150 struct amd_nb *amd_nb;
151 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
152 u64 perf_ctr_virt_mask;
151 153
152 void *kfree_on_online; 154 void *kfree_on_online;
153}; 155};
@@ -425,9 +427,11 @@ void x86_pmu_disable_all(void);
425static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 427static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
426 u64 enable_mask) 428 u64 enable_mask)
427{ 429{
430 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
431
428 if (hwc->extra_reg.reg) 432 if (hwc->extra_reg.reg)
429 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 433 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
430 wrmsrl(hwc->config_base, hwc->config | enable_mask); 434 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
431} 435}
432 436
433void x86_pmu_enable_all(int added); 437void x86_pmu_enable_all(int added);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 0397b23be8e..67250a52430 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -1,4 +1,5 @@
1#include <linux/perf_event.h> 1#include <linux/perf_event.h>
2#include <linux/export.h>
2#include <linux/types.h> 3#include <linux/types.h>
3#include <linux/init.h> 4#include <linux/init.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu)
357 struct amd_nb *nb; 358 struct amd_nb *nb;
358 int i, nb_id; 359 int i, nb_id;
359 360
360 if (boot_cpu_data.x86_max_cores < 2) 361 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
362
363 if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
361 return; 364 return;
362 365
363 nb_id = amd_get_nb_id(cpu); 366 nb_id = amd_get_nb_id(cpu);
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
587 .put_event_constraints = amd_put_event_constraints, 590 .put_event_constraints = amd_put_event_constraints,
588 591
589 .cpu_prepare = amd_pmu_cpu_prepare, 592 .cpu_prepare = amd_pmu_cpu_prepare,
590 .cpu_starting = amd_pmu_cpu_starting,
591 .cpu_dead = amd_pmu_cpu_dead, 593 .cpu_dead = amd_pmu_cpu_dead,
592#endif 594#endif
595 .cpu_starting = amd_pmu_cpu_starting,
593}; 596};
594 597
595__init int amd_pmu_init(void) 598__init int amd_pmu_init(void)
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void)
621 624
622 return 0; 625 return 0;
623} 626}
627
628void amd_pmu_enable_virt(void)
629{
630 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
631
632 cpuc->perf_ctr_virt_mask = 0;
633
634 /* Reload all events */
635 x86_pmu_disable_all();
636 x86_pmu_enable_all(0);
637}
638EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
639
640void amd_pmu_disable_virt(void)
641{
642 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
643
644 /*
645 * We only mask out the Host-only bit so that host-only counting works
646 * when SVM is disabled. If someone sets up a guest-only counter when
647 * SVM is disabled the Guest-only bits still gets set and the counter
648 * will not count anything.
649 */
650 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
651
652 /* Reload all events */
653 x86_pmu_disable_all();
654 x86_pmu_enable_all(0);
655}
656EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 4025fe4f928..28f98706b08 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -265,7 +265,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
265#endif 265#endif
266 printk("\n"); 266 printk("\n");
267 if (notify_die(DIE_OOPS, str, regs, err, 267 if (notify_die(DIE_OOPS, str, regs, err,
268 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) 268 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
269 return 1; 269 return 1;
270 270
271 show_registers(regs); 271 show_registers(regs);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 3fe8239fd8f..2925e14fb1d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -482,7 +482,12 @@ GLOBAL(system_call_after_swapgs)
482 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 482 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
483 jnz tracesys 483 jnz tracesys
484system_call_fastpath: 484system_call_fastpath:
485#if __SYSCALL_MASK == ~0
485 cmpq $__NR_syscall_max,%rax 486 cmpq $__NR_syscall_max,%rax
487#else
488 andl $__SYSCALL_MASK,%eax
489 cmpl $__NR_syscall_max,%eax
490#endif
486 ja badsys 491 ja badsys
487 movq %r10,%rcx 492 movq %r10,%rcx
488 call *sys_call_table(,%rax,8) # XXX: rip relative 493 call *sys_call_table(,%rax,8) # XXX: rip relative
@@ -596,7 +601,12 @@ tracesys:
596 */ 601 */
597 LOAD_ARGS ARGOFFSET, 1 602 LOAD_ARGS ARGOFFSET, 1
598 RESTORE_REST 603 RESTORE_REST
604#if __SYSCALL_MASK == ~0
599 cmpq $__NR_syscall_max,%rax 605 cmpq $__NR_syscall_max,%rax
606#else
607 andl $__SYSCALL_MASK,%eax
608 cmpl $__NR_syscall_max,%eax
609#endif
600 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ 610 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
601 movq %r10,%rcx /* fixup for C */ 611 movq %r10,%rcx /* fixup for C */
602 call *sys_call_table(,%rax,8) 612 call *sys_call_table(,%rax,8)
@@ -736,6 +746,40 @@ ENTRY(stub_rt_sigreturn)
736 CFI_ENDPROC 746 CFI_ENDPROC
737END(stub_rt_sigreturn) 747END(stub_rt_sigreturn)
738 748
749#ifdef CONFIG_X86_X32_ABI
750 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
751
752ENTRY(stub_x32_rt_sigreturn)
753 CFI_STARTPROC
754 addq $8, %rsp
755 PARTIAL_FRAME 0
756 SAVE_REST
757 movq %rsp,%rdi
758 FIXUP_TOP_OF_STACK %r11
759 call sys32_x32_rt_sigreturn
760 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
761 RESTORE_REST
762 jmp int_ret_from_sys_call
763 CFI_ENDPROC
764END(stub_x32_rt_sigreturn)
765
766ENTRY(stub_x32_execve)
767 CFI_STARTPROC
768 addq $8, %rsp
769 PARTIAL_FRAME 0
770 SAVE_REST
771 FIXUP_TOP_OF_STACK %r11
772 movq %rsp, %rcx
773 call sys32_execve
774 RESTORE_TOP_OF_STACK %r11
775 movq %rax,RAX(%rsp)
776 RESTORE_REST
777 jmp int_ret_from_sys_call
778 CFI_ENDPROC
779END(stub_x32_execve)
780
781#endif
782
739/* 783/*
740 * Build the entry stubs and pointer table with some assembler magic. 784 * Build the entry stubs and pointer table with some assembler magic.
741 * We pack 7 stubs into a single 32-byte chunk, which will fit in a 785 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
@@ -1532,10 +1576,17 @@ ENTRY(nmi)
1532 pushq_cfi %rdx 1576 pushq_cfi %rdx
1533 1577
1534 /* 1578 /*
1579 * If %cs was not the kernel segment, then the NMI triggered in user
1580 * space, which means it is definitely not nested.
1581 */
1582 cmpl $__KERNEL_CS, 16(%rsp)
1583 jne first_nmi
1584
1585 /*
1535 * Check the special variable on the stack to see if NMIs are 1586 * Check the special variable on the stack to see if NMIs are
1536 * executing. 1587 * executing.
1537 */ 1588 */
1538 cmp $1, -8(%rsp) 1589 cmpl $1, -8(%rsp)
1539 je nested_nmi 1590 je nested_nmi
1540 1591
1541 /* 1592 /*
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 313fb5cddbc..7b77062dea1 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -61,7 +61,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
61 outb(0, 0xF0); 61 outb(0, 0xF0);
62 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 62 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
63 return IRQ_NONE; 63 return IRQ_NONE;
64 math_error(get_irq_regs(), 0, 16); 64 math_error(get_irq_regs(), 0, X86_TRAP_MF);
65 return IRQ_HANDLED; 65 return IRQ_HANDLED;
66} 66}
67 67
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index ac0417be913..73465aab28f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -360,7 +360,6 @@ out:
360static enum ucode_state 360static enum ucode_state
361request_microcode_user(int cpu, const void __user *buf, size_t size) 361request_microcode_user(int cpu, const void __user *buf, size_t size)
362{ 362{
363 pr_info("AMD microcode update via /dev/cpu/microcode not supported\n");
364 return UCODE_ERROR; 363 return UCODE_ERROR;
365} 364}
366 365
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 485204f58cd..c08d1ff12b7 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -214,6 +214,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
214 214
215 task_user_gs(p) = get_user_gs(regs); 215 task_user_gs(p) = get_user_gs(regs);
216 216
217 p->fpu_counter = 0;
217 p->thread.io_bitmap_ptr = NULL; 218 p->thread.io_bitmap_ptr = NULL;
218 tsk = current; 219 tsk = current;
219 err = -ENOMEM; 220 err = -ENOMEM;
@@ -299,22 +300,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
299 *next = &next_p->thread; 300 *next = &next_p->thread;
300 int cpu = smp_processor_id(); 301 int cpu = smp_processor_id();
301 struct tss_struct *tss = &per_cpu(init_tss, cpu); 302 struct tss_struct *tss = &per_cpu(init_tss, cpu);
302 bool preload_fpu; 303 fpu_switch_t fpu;
303 304
304 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 305 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
305 306
306 /* 307 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
307 * If the task has used fpu the last 5 timeslices, just do a full
308 * restore of the math state immediately to avoid the trap; the
309 * chances of needing FPU soon are obviously high now
310 */
311 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
312
313 __unlazy_fpu(prev_p);
314
315 /* we're going to use this soon, after a few expensive things */
316 if (preload_fpu)
317 prefetch(next->fpu.state);
318 308
319 /* 309 /*
320 * Reload esp0. 310 * Reload esp0.
@@ -354,11 +344,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
354 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 344 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
355 __switch_to_xtra(prev_p, next_p, tss); 345 __switch_to_xtra(prev_p, next_p, tss);
356 346
357 /* If we're going to preload the fpu context, make sure clts
358 is run while we're batching the cpu state updates. */
359 if (preload_fpu)
360 clts();
361
362 /* 347 /*
363 * Leave lazy mode, flushing any hypercalls made here. 348 * Leave lazy mode, flushing any hypercalls made here.
364 * This must be done before restoring TLS segments so 349 * This must be done before restoring TLS segments so
@@ -368,15 +353,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
368 */ 353 */
369 arch_end_context_switch(next_p); 354 arch_end_context_switch(next_p);
370 355
371 if (preload_fpu)
372 __math_state_restore();
373
374 /* 356 /*
375 * Restore %gs if needed (which is common) 357 * Restore %gs if needed (which is common)
376 */ 358 */
377 if (prev->gs | next->gs) 359 if (prev->gs | next->gs)
378 lazy_load_gs(next->gs); 360 lazy_load_gs(next->gs);
379 361
362 switch_fpu_finish(next_p, fpu);
363
380 percpu_write(current_task, next_p); 364 percpu_write(current_task, next_p);
381 365
382 return prev_p; 366 return prev_p;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9b9fe4a85c8..550e77b1b94 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
286 286
287 set_tsk_thread_flag(p, TIF_FORK); 287 set_tsk_thread_flag(p, TIF_FORK);
288 288
289 p->fpu_counter = 0;
289 p->thread.io_bitmap_ptr = NULL; 290 p->thread.io_bitmap_ptr = NULL;
290 291
291 savesegment(gs, p->thread.gsindex); 292 savesegment(gs, p->thread.gsindex);
@@ -364,7 +365,9 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
364void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) 365void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
365{ 366{
366 start_thread_common(regs, new_ip, new_sp, 367 start_thread_common(regs, new_ip, new_sp,
367 __USER32_CS, __USER32_DS, __USER32_DS); 368 test_thread_flag(TIF_X32)
369 ? __USER_CS : __USER32_CS,
370 __USER_DS, __USER_DS);
368} 371}
369#endif 372#endif
370 373
@@ -386,18 +389,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
386 int cpu = smp_processor_id(); 389 int cpu = smp_processor_id();
387 struct tss_struct *tss = &per_cpu(init_tss, cpu); 390 struct tss_struct *tss = &per_cpu(init_tss, cpu);
388 unsigned fsindex, gsindex; 391 unsigned fsindex, gsindex;
389 bool preload_fpu; 392 fpu_switch_t fpu;
390 393
391 /* 394 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
392 * If the task has used fpu the last 5 timeslices, just do a full
393 * restore of the math state immediately to avoid the trap; the
394 * chances of needing FPU soon are obviously high now
395 */
396 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
397
398 /* we're going to use this soon, after a few expensive things */
399 if (preload_fpu)
400 prefetch(next->fpu.state);
401 395
402 /* 396 /*
403 * Reload esp0, LDT and the page table pointer: 397 * Reload esp0, LDT and the page table pointer:
@@ -427,13 +421,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
427 421
428 load_TLS(next, cpu); 422 load_TLS(next, cpu);
429 423
430 /* Must be after DS reload */
431 __unlazy_fpu(prev_p);
432
433 /* Make sure cpu is ready for new context */
434 if (preload_fpu)
435 clts();
436
437 /* 424 /*
438 * Leave lazy mode, flushing any hypercalls made here. 425 * Leave lazy mode, flushing any hypercalls made here.
439 * This must be done before restoring TLS segments so 426 * This must be done before restoring TLS segments so
@@ -474,6 +461,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
474 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 461 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
475 prev->gsindex = gsindex; 462 prev->gsindex = gsindex;
476 463
464 switch_fpu_finish(next_p, fpu);
465
477 /* 466 /*
478 * Switch the PDA and FPU contexts. 467 * Switch the PDA and FPU contexts.
479 */ 468 */
@@ -492,13 +481,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
492 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) 481 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
493 __switch_to_xtra(prev_p, next_p, tss); 482 __switch_to_xtra(prev_p, next_p, tss);
494 483
495 /*
496 * Preload the FPU context, now that we've determined that the
497 * task is likely to be using it.
498 */
499 if (preload_fpu)
500 __math_state_restore();
501
502 return prev_p; 484 return prev_p;
503} 485}
504 486
@@ -508,6 +490,8 @@ void set_personality_64bit(void)
508 490
509 /* Make sure to be in 64bit mode */ 491 /* Make sure to be in 64bit mode */
510 clear_thread_flag(TIF_IA32); 492 clear_thread_flag(TIF_IA32);
493 clear_thread_flag(TIF_ADDR32);
494 clear_thread_flag(TIF_X32);
511 495
512 /* Ensure the corresponding mm is not marked. */ 496 /* Ensure the corresponding mm is not marked. */
513 if (current->mm) 497 if (current->mm)
@@ -520,20 +504,31 @@ void set_personality_64bit(void)
520 current->personality &= ~READ_IMPLIES_EXEC; 504 current->personality &= ~READ_IMPLIES_EXEC;
521} 505}
522 506
523void set_personality_ia32(void) 507void set_personality_ia32(bool x32)
524{ 508{
525 /* inherit personality from parent */ 509 /* inherit personality from parent */
526 510
527 /* Make sure to be in 32bit mode */ 511 /* Make sure to be in 32bit mode */
528 set_thread_flag(TIF_IA32); 512 set_thread_flag(TIF_ADDR32);
529 current->personality |= force_personality32;
530 513
531 /* Mark the associated mm as containing 32-bit tasks. */ 514 /* Mark the associated mm as containing 32-bit tasks. */
532 if (current->mm) 515 if (current->mm)
533 current->mm->context.ia32_compat = 1; 516 current->mm->context.ia32_compat = 1;
534 517
535 /* Prepare the first "return" to user space */ 518 if (x32) {
536 current_thread_info()->status |= TS_COMPAT; 519 clear_thread_flag(TIF_IA32);
520 set_thread_flag(TIF_X32);
521 current->personality &= ~READ_IMPLIES_EXEC;
522 /* is_compat_task() uses the presence of the x32
523 syscall bit flag to determine compat status */
524 current_thread_info()->status &= ~TS_COMPAT;
525 } else {
526 set_thread_flag(TIF_IA32);
527 clear_thread_flag(TIF_X32);
528 current->personality |= force_personality32;
529 /* Prepare the first "return" to user space */
530 current_thread_info()->status |= TS_COMPAT;
531 }
537} 532}
538 533
539unsigned long get_wchan(struct task_struct *p) 534unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 50267386b76..6fb330adc7c 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -33,6 +33,7 @@
33#include <asm/prctl.h> 33#include <asm/prctl.h>
34#include <asm/proto.h> 34#include <asm/proto.h>
35#include <asm/hw_breakpoint.h> 35#include <asm/hw_breakpoint.h>
36#include <asm/traps.h>
36 37
37#include "tls.h" 38#include "tls.h"
38 39
@@ -1130,6 +1131,100 @@ static int genregs32_set(struct task_struct *target,
1130 return ret; 1131 return ret;
1131} 1132}
1132 1133
1134#ifdef CONFIG_X86_X32_ABI
1135static long x32_arch_ptrace(struct task_struct *child,
1136 compat_long_t request, compat_ulong_t caddr,
1137 compat_ulong_t cdata)
1138{
1139 unsigned long addr = caddr;
1140 unsigned long data = cdata;
1141 void __user *datap = compat_ptr(data);
1142 int ret;
1143
1144 switch (request) {
1145 /* Read 32bits at location addr in the USER area. Only allow
1146 to return the lower 32bits of segment and debug registers. */
1147 case PTRACE_PEEKUSR: {
1148 u32 tmp;
1149
1150 ret = -EIO;
1151 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1152 addr < offsetof(struct user_regs_struct, cs))
1153 break;
1154
1155 tmp = 0; /* Default return condition */
1156 if (addr < sizeof(struct user_regs_struct))
1157 tmp = getreg(child, addr);
1158 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1159 addr <= offsetof(struct user, u_debugreg[7])) {
1160 addr -= offsetof(struct user, u_debugreg[0]);
1161 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1162 }
1163 ret = put_user(tmp, (__u32 __user *)datap);
1164 break;
1165 }
1166
1167 /* Write the word at location addr in the USER area. Only allow
1168 to update segment and debug registers with the upper 32bits
1169 zero-extended. */
1170 case PTRACE_POKEUSR:
1171 ret = -EIO;
1172 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1173 addr < offsetof(struct user_regs_struct, cs))
1174 break;
1175
1176 if (addr < sizeof(struct user_regs_struct))
1177 ret = putreg(child, addr, data);
1178 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1179 addr <= offsetof(struct user, u_debugreg[7])) {
1180 addr -= offsetof(struct user, u_debugreg[0]);
1181 ret = ptrace_set_debugreg(child,
1182 addr / sizeof(data), data);
1183 }
1184 break;
1185
1186 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1187 return copy_regset_to_user(child,
1188 task_user_regset_view(current),
1189 REGSET_GENERAL,
1190 0, sizeof(struct user_regs_struct),
1191 datap);
1192
1193 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1194 return copy_regset_from_user(child,
1195 task_user_regset_view(current),
1196 REGSET_GENERAL,
1197 0, sizeof(struct user_regs_struct),
1198 datap);
1199
1200 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1201 return copy_regset_to_user(child,
1202 task_user_regset_view(current),
1203 REGSET_FP,
1204 0, sizeof(struct user_i387_struct),
1205 datap);
1206
1207 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1208 return copy_regset_from_user(child,
1209 task_user_regset_view(current),
1210 REGSET_FP,
1211 0, sizeof(struct user_i387_struct),
1212 datap);
1213
1214 /* normal 64bit interface to access TLS data.
1215 Works just like arch_prctl, except that the arguments
1216 are reversed. */
1217 case PTRACE_ARCH_PRCTL:
1218 return do_arch_prctl(child, data, addr);
1219
1220 default:
1221 return compat_ptrace_request(child, request, addr, data);
1222 }
1223
1224 return ret;
1225}
1226#endif
1227
1133long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1228long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1134 compat_ulong_t caddr, compat_ulong_t cdata) 1229 compat_ulong_t caddr, compat_ulong_t cdata)
1135{ 1230{
@@ -1139,6 +1234,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1139 int ret; 1234 int ret;
1140 __u32 val; 1235 __u32 val;
1141 1236
1237#ifdef CONFIG_X86_X32_ABI
1238 if (!is_ia32_task())
1239 return x32_arch_ptrace(child, request, caddr, cdata);
1240#endif
1241
1142 switch (request) { 1242 switch (request) {
1143 case PTRACE_PEEKUSR: 1243 case PTRACE_PEEKUSR:
1144 ret = getreg32(child, addr, &val); 1244 ret = getreg32(child, addr, &val);
@@ -1326,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
1326 int error_code, int si_code, 1426 int error_code, int si_code,
1327 struct siginfo *info) 1427 struct siginfo *info)
1328{ 1428{
1329 tsk->thread.trap_no = 1; 1429 tsk->thread.trap_nr = X86_TRAP_DB;
1330 tsk->thread.error_code = error_code; 1430 tsk->thread.error_code = error_code;
1331 1431
1332 memset(info, 0, sizeof(*info)); 1432 memset(info, 0, sizeof(*info));
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 46a01bdc27e..9c73acc1c86 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -10,10 +10,8 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/wait.h> 14#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h> 15#include <linux/tracehook.h>
18#include <linux/unistd.h> 16#include <linux/unistd.h>
19#include <linux/stddef.h> 17#include <linux/stddef.h>
@@ -26,10 +24,12 @@
26#include <asm/i387.h> 24#include <asm/i387.h>
27#include <asm/vdso.h> 25#include <asm/vdso.h>
28#include <asm/mce.h> 26#include <asm/mce.h>
27#include <asm/sighandling.h>
29 28
30#ifdef CONFIG_X86_64 29#ifdef CONFIG_X86_64
31#include <asm/proto.h> 30#include <asm/proto.h>
32#include <asm/ia32_unistd.h> 31#include <asm/ia32_unistd.h>
32#include <asm/sys_ia32.h>
33#endif /* CONFIG_X86_64 */ 33#endif /* CONFIG_X86_64 */
34 34
35#include <asm/syscall.h> 35#include <asm/syscall.h>
@@ -37,13 +37,6 @@
37 37
38#include <asm/sigframe.h> 38#include <asm/sigframe.h>
39 39
40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
41
42#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
43 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
44 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
45 X86_EFLAGS_CF)
46
47#ifdef CONFIG_X86_32 40#ifdef CONFIG_X86_32
48# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) 41# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
49#else 42#else
@@ -68,9 +61,8 @@
68 regs->seg = GET_SEG(seg) | 3; \ 61 regs->seg = GET_SEG(seg) | 3; \
69} while (0) 62} while (0)
70 63
71static int 64int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
72restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 65 unsigned long *pax)
73 unsigned long *pax)
74{ 66{
75 void __user *buf; 67 void __user *buf;
76 unsigned int tmpflags; 68 unsigned int tmpflags;
@@ -125,9 +117,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
125 return err; 117 return err;
126} 118}
127 119
128static int 120int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
129setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, 121 struct pt_regs *regs, unsigned long mask)
130 struct pt_regs *regs, unsigned long mask)
131{ 122{
132 int err = 0; 123 int err = 0;
133 124
@@ -159,7 +150,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
159 put_user_ex(regs->r15, &sc->r15); 150 put_user_ex(regs->r15, &sc->r15);
160#endif /* CONFIG_X86_64 */ 151#endif /* CONFIG_X86_64 */
161 152
162 put_user_ex(current->thread.trap_no, &sc->trapno); 153 put_user_ex(current->thread.trap_nr, &sc->trapno);
163 put_user_ex(current->thread.error_code, &sc->err); 154 put_user_ex(current->thread.error_code, &sc->err);
164 put_user_ex(regs->ip, &sc->ip); 155 put_user_ex(regs->ip, &sc->ip);
165#ifdef CONFIG_X86_32 156#ifdef CONFIG_X86_32
@@ -642,6 +633,16 @@ static int signr_convert(int sig)
642#define is_ia32 0 633#define is_ia32 0
643#endif /* CONFIG_IA32_EMULATION */ 634#endif /* CONFIG_IA32_EMULATION */
644 635
636#ifdef CONFIG_X86_X32_ABI
637#define is_x32 test_thread_flag(TIF_X32)
638
639static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
640 siginfo_t *info, compat_sigset_t *set,
641 struct pt_regs *regs);
642#else /* !CONFIG_X86_X32_ABI */
643#define is_x32 0
644#endif /* CONFIG_X86_X32_ABI */
645
645int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 646int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
646 sigset_t *set, struct pt_regs *regs); 647 sigset_t *set, struct pt_regs *regs);
647int ia32_setup_frame(int sig, struct k_sigaction *ka, 648int ia32_setup_frame(int sig, struct k_sigaction *ka,
@@ -666,8 +667,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
666 ret = ia32_setup_rt_frame(usig, ka, info, set, regs); 667 ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
667 else 668 else
668 ret = ia32_setup_frame(usig, ka, set, regs); 669 ret = ia32_setup_frame(usig, ka, set, regs);
669 } else 670#ifdef CONFIG_X86_X32_ABI
671 } else if (is_x32) {
672 ret = x32_setup_rt_frame(usig, ka, info,
673 (compat_sigset_t *)set, regs);
674#endif
675 } else {
670 ret = __setup_rt_frame(sig, ka, info, set, regs); 676 ret = __setup_rt_frame(sig, ka, info, set, regs);
677 }
671 678
672 if (ret) { 679 if (ret) {
673 force_sigsegv(sig, current); 680 force_sigsegv(sig, current);
@@ -850,3 +857,102 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
850 857
851 force_sig(SIGSEGV, me); 858 force_sig(SIGSEGV, me);
852} 859}
860
861#ifdef CONFIG_X86_X32_ABI
862static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
863 siginfo_t *info, compat_sigset_t *set,
864 struct pt_regs *regs)
865{
866 struct rt_sigframe_x32 __user *frame;
867 void __user *restorer;
868 int err = 0;
869 void __user *fpstate = NULL;
870
871 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
872
873 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
874 return -EFAULT;
875
876 if (ka->sa.sa_flags & SA_SIGINFO) {
877 if (copy_siginfo_to_user32(&frame->info, info))
878 return -EFAULT;
879 }
880
881 put_user_try {
882 /* Create the ucontext. */
883 if (cpu_has_xsave)
884 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
885 else
886 put_user_ex(0, &frame->uc.uc_flags);
887 put_user_ex(0, &frame->uc.uc_link);
888 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
889 put_user_ex(sas_ss_flags(regs->sp),
890 &frame->uc.uc_stack.ss_flags);
891 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
892 put_user_ex(0, &frame->uc.uc__pad0);
893 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
894 regs, set->sig[0]);
895 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
896
897 if (ka->sa.sa_flags & SA_RESTORER) {
898 restorer = ka->sa.sa_restorer;
899 } else {
900 /* could use a vstub here */
901 restorer = NULL;
902 err |= -EFAULT;
903 }
904 put_user_ex(restorer, &frame->pretcode);
905 } put_user_catch(err);
906
907 if (err)
908 return -EFAULT;
909
910 /* Set up registers for signal handler */
911 regs->sp = (unsigned long) frame;
912 regs->ip = (unsigned long) ka->sa.sa_handler;
913
914 /* We use the x32 calling convention here... */
915 regs->di = sig;
916 regs->si = (unsigned long) &frame->info;
917 regs->dx = (unsigned long) &frame->uc;
918
919 loadsegment(ds, __USER_DS);
920 loadsegment(es, __USER_DS);
921
922 regs->cs = __USER_CS;
923 regs->ss = __USER_DS;
924
925 return 0;
926}
927
928asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
929{
930 struct rt_sigframe_x32 __user *frame;
931 sigset_t set;
932 unsigned long ax;
933 struct pt_regs tregs;
934
935 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
936
937 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
938 goto badframe;
939 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
940 goto badframe;
941
942 sigdelsetmask(&set, ~_BLOCKABLE);
943 set_current_blocked(&set);
944
945 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
946 goto badframe;
947
948 tregs = *regs;
949 if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
950 goto badframe;
951
952 return ax;
953
954badframe:
955 signal_fault(regs, frame, "x32 rt_sigreturn");
956 return 0;
957}
958#endif
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 051489082d5..f921df8c209 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -98,7 +98,7 @@ out:
98static void find_start_end(unsigned long flags, unsigned long *begin, 98static void find_start_end(unsigned long flags, unsigned long *begin,
99 unsigned long *end) 99 unsigned long *end)
100{ 100{
101 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { 101 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
102 unsigned long new_begin; 102 unsigned long new_begin;
103 /* This is usually used needed to map code in small 103 /* This is usually used needed to map code in small
104 model, so it needs to be in the first 31bit. Limit 104 model, so it needs to be in the first 31bit. Limit
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 (!vma || addr + len <= vma->vm_start)) 144 (!vma || addr + len <= vma->vm_start))
145 return addr; 145 return addr;
146 } 146 }
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
148 && len <= mm->cached_hole_size) { 148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0; 149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin; 150 mm->free_area_cache = begin;
@@ -205,7 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
205 return addr; 205 return addr;
206 206
207 /* for MAP_32BIT mappings we force the legact mmap base */ 207 /* for MAP_32BIT mappings we force the legact mmap base */
208 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) 208 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
209 goto bottomup; 209 goto bottomup;
210 210
211 /* requesting a specific address */ 211 /* requesting a specific address */
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 7ac7943be02..5c7f8c20da7 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -5,6 +5,14 @@
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/asm-offsets.h> 6#include <asm/asm-offsets.h>
7 7
8#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
9
10#ifdef CONFIG_X86_X32_ABI
11# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
12#else
13# define __SYSCALL_X32(nr, sym, compat) /* nothing */
14#endif
15
8#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 16#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
9#include <asm/syscalls_64.h> 17#include <asm/syscalls_64.h>
10#undef __SYSCALL_64 18#undef __SYSCALL_64
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 482ec3af206..c6d17ad59b8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -119,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
120 * On nmi (interrupt 2), do_trap should not be called. 120 * On nmi (interrupt 2), do_trap should not be called.
121 */ 121 */
122 if (trapnr < 6) 122 if (trapnr < X86_TRAP_UD)
123 goto vm86_trap; 123 goto vm86_trap;
124 goto trap_signal; 124 goto trap_signal;
125 } 125 }
@@ -132,7 +132,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
132trap_signal: 132trap_signal:
133#endif 133#endif
134 /* 134 /*
135 * We want error_code and trap_no set for userspace faults and 135 * We want error_code and trap_nr set for userspace faults and
136 * kernelspace faults which result in die(), but not 136 * kernelspace faults which result in die(), but not
137 * kernelspace faults which are fixed up. die() gives the 137 * kernelspace faults which are fixed up. die() gives the
138 * process no chance to handle the signal and notice the 138 * process no chance to handle the signal and notice the
@@ -141,7 +141,7 @@ trap_signal:
141 * delivered, faults. See also do_general_protection below. 141 * delivered, faults. See also do_general_protection below.
142 */ 142 */
143 tsk->thread.error_code = error_code; 143 tsk->thread.error_code = error_code;
144 tsk->thread.trap_no = trapnr; 144 tsk->thread.trap_nr = trapnr;
145 145
146#ifdef CONFIG_X86_64 146#ifdef CONFIG_X86_64
147 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 147 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -164,7 +164,7 @@ trap_signal:
164kernel_trap: 164kernel_trap:
165 if (!fixup_exception(regs)) { 165 if (!fixup_exception(regs)) {
166 tsk->thread.error_code = error_code; 166 tsk->thread.error_code = error_code;
167 tsk->thread.trap_no = trapnr; 167 tsk->thread.trap_nr = trapnr;
168 die(str, regs, error_code); 168 die(str, regs, error_code);
169 } 169 }
170 return; 170 return;
@@ -203,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
203 do_trap(trapnr, signr, str, regs, error_code, &info); \ 203 do_trap(trapnr, signr, str, regs, error_code, &info); \
204} 204}
205 205
206DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 206DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
207DO_ERROR(4, SIGSEGV, "overflow", overflow) 207 regs->ip)
208DO_ERROR(5, SIGSEGV, "bounds", bounds) 208DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
209DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 209DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
210DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 210DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
211DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 211 regs->ip)
212DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 212DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
213 coprocessor_segment_overrun)
214DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
215DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
213#ifdef CONFIG_X86_32 216#ifdef CONFIG_X86_32
214DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 217DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
215#endif 218#endif
216DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 219DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
220 BUS_ADRALN, 0)
217 221
218#ifdef CONFIG_X86_64 222#ifdef CONFIG_X86_64
219/* Runs on IST stack */ 223/* Runs on IST stack */
220dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 224dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
221{ 225{
222 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 226 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
223 12, SIGBUS) == NOTIFY_STOP) 227 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
224 return; 228 return;
225 preempt_conditional_sti(regs); 229 preempt_conditional_sti(regs);
226 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 230 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
227 preempt_conditional_cli(regs); 231 preempt_conditional_cli(regs);
228} 232}
229 233
@@ -233,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
233 struct task_struct *tsk = current; 237 struct task_struct *tsk = current;
234 238
235 /* Return not checked because double check cannot be ignored */ 239 /* Return not checked because double check cannot be ignored */
236 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); 240 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
237 241
238 tsk->thread.error_code = error_code; 242 tsk->thread.error_code = error_code;
239 tsk->thread.trap_no = 8; 243 tsk->thread.trap_nr = X86_TRAP_DF;
240 244
241 /* 245 /*
242 * This is always a kernel trap and never fixable (and thus must 246 * This is always a kernel trap and never fixable (and thus must
@@ -264,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
264 goto gp_in_kernel; 268 goto gp_in_kernel;
265 269
266 tsk->thread.error_code = error_code; 270 tsk->thread.error_code = error_code;
267 tsk->thread.trap_no = 13; 271 tsk->thread.trap_nr = X86_TRAP_GP;
268 272
269 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 273 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
270 printk_ratelimit()) { 274 printk_ratelimit()) {
@@ -291,9 +295,9 @@ gp_in_kernel:
291 return; 295 return;
292 296
293 tsk->thread.error_code = error_code; 297 tsk->thread.error_code = error_code;
294 tsk->thread.trap_no = 13; 298 tsk->thread.trap_nr = X86_TRAP_GP;
295 if (notify_die(DIE_GPF, "general protection fault", regs, 299 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
296 error_code, 13, SIGSEGV) == NOTIFY_STOP) 300 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
297 return; 301 return;
298 die("general protection fault", regs, error_code); 302 die("general protection fault", regs, error_code);
299} 303}
@@ -302,13 +306,13 @@ gp_in_kernel:
302dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 306dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
303{ 307{
304#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 308#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
305 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 309 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
306 == NOTIFY_STOP) 310 SIGTRAP) == NOTIFY_STOP)
307 return; 311 return;
308#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 312#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
309 313
310 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 314 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
311 == NOTIFY_STOP) 315 SIGTRAP) == NOTIFY_STOP)
312 return; 316 return;
313 317
314 /* 318 /*
@@ -317,7 +321,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
317 */ 321 */
318 debug_stack_usage_inc(); 322 debug_stack_usage_inc();
319 preempt_conditional_sti(regs); 323 preempt_conditional_sti(regs);
320 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 324 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
321 preempt_conditional_cli(regs); 325 preempt_conditional_cli(regs);
322 debug_stack_usage_dec(); 326 debug_stack_usage_dec();
323} 327}
@@ -422,8 +426,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
422 preempt_conditional_sti(regs); 426 preempt_conditional_sti(regs);
423 427
424 if (regs->flags & X86_VM_MASK) { 428 if (regs->flags & X86_VM_MASK) {
425 handle_vm86_trap((struct kernel_vm86_regs *) regs, 429 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
426 error_code, 1); 430 X86_TRAP_DB);
427 preempt_conditional_cli(regs); 431 preempt_conditional_cli(regs);
428 debug_stack_usage_dec(); 432 debug_stack_usage_dec();
429 return; 433 return;
@@ -460,7 +464,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
460 struct task_struct *task = current; 464 struct task_struct *task = current;
461 siginfo_t info; 465 siginfo_t info;
462 unsigned short err; 466 unsigned short err;
463 char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; 467 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
468 "simd exception";
464 469
465 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 470 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
466 return; 471 return;
@@ -470,7 +475,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
470 { 475 {
471 if (!fixup_exception(regs)) { 476 if (!fixup_exception(regs)) {
472 task->thread.error_code = error_code; 477 task->thread.error_code = error_code;
473 task->thread.trap_no = trapnr; 478 task->thread.trap_nr = trapnr;
474 die(str, regs, error_code); 479 die(str, regs, error_code);
475 } 480 }
476 return; 481 return;
@@ -480,12 +485,12 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
480 * Save the info for the exception handler and clear the error. 485 * Save the info for the exception handler and clear the error.
481 */ 486 */
482 save_init_fpu(task); 487 save_init_fpu(task);
483 task->thread.trap_no = trapnr; 488 task->thread.trap_nr = trapnr;
484 task->thread.error_code = error_code; 489 task->thread.error_code = error_code;
485 info.si_signo = SIGFPE; 490 info.si_signo = SIGFPE;
486 info.si_errno = 0; 491 info.si_errno = 0;
487 info.si_addr = (void __user *)regs->ip; 492 info.si_addr = (void __user *)regs->ip;
488 if (trapnr == 16) { 493 if (trapnr == X86_TRAP_MF) {
489 unsigned short cwd, swd; 494 unsigned short cwd, swd;
490 /* 495 /*
491 * (~cwd & swd) will mask out exceptions that are not set to unmasked 496 * (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -529,10 +534,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
529 info.si_code = FPE_FLTRES; 534 info.si_code = FPE_FLTRES;
530 } else { 535 } else {
531 /* 536 /*
532 * If we're using IRQ 13, or supposedly even some trap 16 537 * If we're using IRQ 13, or supposedly even some trap
533 * implementations, it's possible we get a spurious trap... 538 * X86_TRAP_MF implementations, it's possible
539 * we get a spurious trap, which is not an error.
534 */ 540 */
535 return; /* Spurious trap, no error */ 541 return;
536 } 542 }
537 force_sig_info(SIGFPE, &info, task); 543 force_sig_info(SIGFPE, &info, task);
538} 544}
@@ -543,13 +549,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
543 ignore_fpu_irq = 1; 549 ignore_fpu_irq = 1;
544#endif 550#endif
545 551
546 math_error(regs, error_code, 16); 552 math_error(regs, error_code, X86_TRAP_MF);
547} 553}
548 554
549dotraplinkage void 555dotraplinkage void
550do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 556do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
551{ 557{
552 math_error(regs, error_code, 19); 558 math_error(regs, error_code, X86_TRAP_XF);
553} 559}
554 560
555dotraplinkage void 561dotraplinkage void
@@ -571,41 +577,18 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
571} 577}
572 578
573/* 579/*
574 * __math_state_restore assumes that cr0.TS is already clear and the
575 * fpu state is all ready for use. Used during context switch.
576 */
577void __math_state_restore(void)
578{
579 struct thread_info *thread = current_thread_info();
580 struct task_struct *tsk = thread->task;
581
582 /*
583 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
584 */
585 if (unlikely(restore_fpu_checking(tsk))) {
586 stts();
587 force_sig(SIGSEGV, tsk);
588 return;
589 }
590
591 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
592 tsk->fpu_counter++;
593}
594
595/*
596 * 'math_state_restore()' saves the current math information in the 580 * 'math_state_restore()' saves the current math information in the
597 * old math state array, and gets the new ones from the current task 581 * old math state array, and gets the new ones from the current task
598 * 582 *
599 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 583 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
600 * Don't touch unless you *really* know how it works. 584 * Don't touch unless you *really* know how it works.
601 * 585 *
602 * Must be called with kernel preemption disabled (in this case, 586 * Must be called with kernel preemption disabled (eg with local
603 * local interrupts are disabled at the call-site in entry.S). 587 * local interrupts as in the case of do_device_not_available).
604 */ 588 */
605asmlinkage void math_state_restore(void) 589void math_state_restore(void)
606{ 590{
607 struct thread_info *thread = current_thread_info(); 591 struct task_struct *tsk = current;
608 struct task_struct *tsk = thread->task;
609 592
610 if (!tsk_used_math(tsk)) { 593 if (!tsk_used_math(tsk)) {
611 local_irq_enable(); 594 local_irq_enable();
@@ -622,9 +605,17 @@ asmlinkage void math_state_restore(void)
622 local_irq_disable(); 605 local_irq_disable();
623 } 606 }
624 607
625 clts(); /* Allow maths ops (or we recurse) */ 608 __thread_fpu_begin(tsk);
609 /*
610 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
611 */
612 if (unlikely(restore_fpu_checking(tsk))) {
613 __thread_fpu_end(tsk);
614 force_sig(SIGSEGV, tsk);
615 return;
616 }
626 617
627 __math_state_restore(); 618 tsk->fpu_counter++;
628} 619}
629EXPORT_SYMBOL_GPL(math_state_restore); 620EXPORT_SYMBOL_GPL(math_state_restore);
630 621
@@ -658,20 +649,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
658 info.si_errno = 0; 649 info.si_errno = 0;
659 info.si_code = ILL_BADSTK; 650 info.si_code = ILL_BADSTK;
660 info.si_addr = NULL; 651 info.si_addr = NULL;
661 if (notify_die(DIE_TRAP, "iret exception", 652 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
662 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 653 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
663 return; 654 return;
664 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 655 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
656 &info);
665} 657}
666#endif 658#endif
667 659
668/* Set of traps needed for early debugging. */ 660/* Set of traps needed for early debugging. */
669void __init early_trap_init(void) 661void __init early_trap_init(void)
670{ 662{
671 set_intr_gate_ist(1, &debug, DEBUG_STACK); 663 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
672 /* int3 can be called from all */ 664 /* int3 can be called from all */
673 set_system_intr_gate_ist(3, &int3, DEBUG_STACK); 665 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
674 set_intr_gate(14, &page_fault); 666 set_intr_gate(X86_TRAP_PF, &page_fault);
675 load_idt(&idt_descr); 667 load_idt(&idt_descr);
676} 668}
677 669
@@ -687,30 +679,30 @@ void __init trap_init(void)
687 early_iounmap(p, 4); 679 early_iounmap(p, 4);
688#endif 680#endif
689 681
690 set_intr_gate(0, &divide_error); 682 set_intr_gate(X86_TRAP_DE, &divide_error);
691 set_intr_gate_ist(2, &nmi, NMI_STACK); 683 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
692 /* int4 can be called from all */ 684 /* int4 can be called from all */
693 set_system_intr_gate(4, &overflow); 685 set_system_intr_gate(X86_TRAP_OF, &overflow);
694 set_intr_gate(5, &bounds); 686 set_intr_gate(X86_TRAP_BR, &bounds);
695 set_intr_gate(6, &invalid_op); 687 set_intr_gate(X86_TRAP_UD, &invalid_op);
696 set_intr_gate(7, &device_not_available); 688 set_intr_gate(X86_TRAP_NM, &device_not_available);
697#ifdef CONFIG_X86_32 689#ifdef CONFIG_X86_32
698 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 690 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
699#else 691#else
700 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 692 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
701#endif 693#endif
702 set_intr_gate(9, &coprocessor_segment_overrun); 694 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
703 set_intr_gate(10, &invalid_TSS); 695 set_intr_gate(X86_TRAP_TS, &invalid_TSS);
704 set_intr_gate(11, &segment_not_present); 696 set_intr_gate(X86_TRAP_NP, &segment_not_present);
705 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); 697 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
706 set_intr_gate(13, &general_protection); 698 set_intr_gate(X86_TRAP_GP, &general_protection);
707 set_intr_gate(15, &spurious_interrupt_bug); 699 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
708 set_intr_gate(16, &coprocessor_error); 700 set_intr_gate(X86_TRAP_MF, &coprocessor_error);
709 set_intr_gate(17, &alignment_check); 701 set_intr_gate(X86_TRAP_AC, &alignment_check);
710#ifdef CONFIG_X86_MCE 702#ifdef CONFIG_X86_MCE
711 set_intr_gate_ist(18, &machine_check, MCE_STACK); 703 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
712#endif 704#endif
713 set_intr_gate(19, &simd_coprocessor_error); 705 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
714 706
715 /* Reserve all the builtin and the syscall vector: */ 707 /* Reserve all the builtin and the syscall vector: */
716 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 708 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
@@ -735,7 +727,7 @@ void __init trap_init(void)
735 727
736#ifdef CONFIG_X86_64 728#ifdef CONFIG_X86_64
737 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 729 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
738 set_nmi_gate(1, &debug); 730 set_nmi_gate(X86_TRAP_DB, &debug);
739 set_nmi_gate(3, &int3); 731 set_nmi_gate(X86_TRAP_BP, &int3);
740#endif 732#endif
741} 733}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index b466cab5ba1..a1315ab2d6b 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -567,7 +567,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
567 } 567 }
568 if (trapno != 1) 568 if (trapno != 1)
569 return 1; /* we let this handle by the calling routine */ 569 return 1; /* we let this handle by the calling routine */
570 current->thread.trap_no = trapno; 570 current->thread.trap_nr = trapno;
571 current->thread.error_code = error_code; 571 current->thread.error_code = error_code;
572 force_sig(SIGTRAP, current); 572 force_sig(SIGTRAP, current);
573 return 0; 573 return 0;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba939356..327509b95e0 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -153,7 +153,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
153 153
154 thread->error_code = 6; /* user fault, no page, write */ 154 thread->error_code = 6; /* user fault, no page, write */
155 thread->cr2 = ptr; 155 thread->cr2 = ptr;
156 thread->trap_no = 14; 156 thread->trap_nr = X86_TRAP_PF;
157 157
158 memset(&info, 0, sizeof(info)); 158 memset(&info, 0, sizeof(info));
159 info.si_signo = SIGSEGV; 159 info.si_signo = SIGSEGV;
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index a3911343976..71109111411 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
47 if (!fx) 47 if (!fx)
48 return; 48 return;
49 49
50 BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); 50 BUG_ON(__thread_has_fpu(tsk));
51 51
52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; 52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
53 53
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf)
168 if (!used_math()) 168 if (!used_math())
169 return 0; 169 return 0;
170 170
171 if (task_thread_info(tsk)->status & TS_USEDFPU) { 171 if (user_has_fpu()) {
172 if (use_xsave()) 172 if (use_xsave())
173 err = xsave_user(buf); 173 err = xsave_user(buf);
174 else 174 else
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf)
176 176
177 if (err) 177 if (err)
178 return err; 178 return err;
179 task_thread_info(tsk)->status &= ~TS_USEDFPU; 179 user_fpu_end();
180 stts();
181 } else { 180 } else {
182 sanitize_i387_state(tsk); 181 sanitize_i387_state(tsk);
183 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, 182 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf)
292 return err; 291 return err;
293 } 292 }
294 293
295 if (!(task_thread_info(current)->status & TS_USEDFPU)) { 294 user_fpu_begin();
296 clts();
297 task_thread_info(current)->status |= TS_USEDFPU;
298 }
299 if (use_xsave()) 295 if (use_xsave())
300 err = restore_user_xstate(buf); 296 err = restore_user_xstate(buf);
301 else 297 else