aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 16:59:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 16:59:17 -0400
commit15385dfe7e0fa6866b204dd0d14aec2cc48fc0a7 (patch)
tree3ddcb000ec3b82f672fa892e8e44b1be4a5ebb33 /arch/x86/kernel
parenta57d985e378ca69f430b85852e4187db3698a89e (diff)
parentb2cc2a074de75671bbed5e2dda67a9252ef353ea (diff)
Merge branch 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/smap support from Ingo Molnar: "This adds support for the SMAP (Supervisor Mode Access Prevention) CPU feature on Intel CPUs: a hardware feature that prevents unintended user-space data access from kernel privileged code. It's turned on automatically when possible. This, in combination with SMEP, makes it even harder to exploit kernel bugs such as NULL pointer dereferences." Fix up trivial conflict in arch/x86/kernel/entry_64.S due to newly added includes right next to each other. * 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, smep, smap: Make the switching functions one-way x86, suspend: On wakeup always initialize cr4 and EFER x86-32: Start out eflags and cr4 clean x86, smap: Do not abuse the [f][x]rstor_checking() functions for user space x86-32, smap: Add STAC/CLAC instructions to 32-bit kernel entry x86, smap: Reduce the SMAP overhead for signal handling x86, smap: A page fault due to SMAP is an oops x86, smap: Turn on Supervisor Mode Access Prevention x86, smap: Add STAC and CLAC instructions to control user space access x86, uaccess: Merge prototypes for clear_user/__clear_user x86, smap: Add a header file with macros for STAC/CLAC x86, alternative: Add header guards to <asm/alternative-asm.h> x86, alternative: Use .pushsection/.popsection x86, smap: Add CR4 bit for SMAP x86-32, mm: The WP test should be done on a kernel page
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/sleep.c15
-rw-r--r--arch/x86/kernel/cpu/common.c44
-rw-r--r--arch/x86/kernel/entry_32.S26
-rw-r--r--arch/x86/kernel/entry_64.S11
-rw-r--r--arch/x86/kernel/head_32.S31
-rw-r--r--arch/x86/kernel/signal.c24
-rw-r--r--arch/x86/kernel/xsave.c6
7 files changed, 108 insertions, 49 deletions
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 1b8e5a03d942..11676cf65aee 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -43,17 +43,22 @@ int acpi_suspend_lowlevel(void)
43 43
44 header->video_mode = saved_video_mode; 44 header->video_mode = saved_video_mode;
45 45
46 header->pmode_behavior = 0;
47
46#ifndef CONFIG_64BIT 48#ifndef CONFIG_64BIT
47 store_gdt((struct desc_ptr *)&header->pmode_gdt); 49 store_gdt((struct desc_ptr *)&header->pmode_gdt);
48 50
49 if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low, 51 if (!rdmsr_safe(MSR_EFER,
50 &header->pmode_efer_high)) 52 &header->pmode_efer_low,
51 header->pmode_efer_low = header->pmode_efer_high = 0; 53 &header->pmode_efer_high))
54 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
52#endif /* !CONFIG_64BIT */ 55#endif /* !CONFIG_64BIT */
53 56
54 header->pmode_cr0 = read_cr0(); 57 header->pmode_cr0 = read_cr0();
55 header->pmode_cr4 = read_cr4_safe(); 58 if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
56 header->pmode_behavior = 0; 59 header->pmode_cr4 = read_cr4();
60 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
61 }
57 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, 62 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
58 &header->pmode_misc_en_low, 63 &header->pmode_misc_en_low,
59 &header->pmode_misc_en_high)) 64 &header->pmode_misc_en_high))
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 532691b6c8fe..7505f7b13e71 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -259,23 +259,36 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
259} 259}
260#endif 260#endif
261 261
262static int disable_smep __cpuinitdata;
263static __init int setup_disable_smep(char *arg) 262static __init int setup_disable_smep(char *arg)
264{ 263{
265 disable_smep = 1; 264 setup_clear_cpu_cap(X86_FEATURE_SMEP);
266 return 1; 265 return 1;
267} 266}
268__setup("nosmep", setup_disable_smep); 267__setup("nosmep", setup_disable_smep);
269 268
270static __cpuinit void setup_smep(struct cpuinfo_x86 *c) 269static __always_inline void setup_smep(struct cpuinfo_x86 *c)
271{ 270{
272 if (cpu_has(c, X86_FEATURE_SMEP)) { 271 if (cpu_has(c, X86_FEATURE_SMEP))
273 if (unlikely(disable_smep)) { 272 set_in_cr4(X86_CR4_SMEP);
274 setup_clear_cpu_cap(X86_FEATURE_SMEP); 273}
275 clear_in_cr4(X86_CR4_SMEP); 274
276 } else 275static __init int setup_disable_smap(char *arg)
277 set_in_cr4(X86_CR4_SMEP); 276{
278 } 277 setup_clear_cpu_cap(X86_FEATURE_SMAP);
278 return 1;
279}
280__setup("nosmap", setup_disable_smap);
281
282static __always_inline void setup_smap(struct cpuinfo_x86 *c)
283{
284 unsigned long eflags;
285
286 /* This should have been cleared long ago */
287 raw_local_save_flags(eflags);
288 BUG_ON(eflags & X86_EFLAGS_AC);
289
290 if (cpu_has(c, X86_FEATURE_SMAP))
291 set_in_cr4(X86_CR4_SMAP);
279} 292}
280 293
281/* 294/*
@@ -712,8 +725,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
712 c->cpu_index = 0; 725 c->cpu_index = 0;
713 filter_cpuid_features(c, false); 726 filter_cpuid_features(c, false);
714 727
715 setup_smep(c);
716
717 if (this_cpu->c_bsp_init) 728 if (this_cpu->c_bsp_init)
718 this_cpu->c_bsp_init(c); 729 this_cpu->c_bsp_init(c);
719} 730}
@@ -798,8 +809,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
798 c->phys_proc_id = c->initial_apicid; 809 c->phys_proc_id = c->initial_apicid;
799 } 810 }
800 811
801 setup_smep(c);
802
803 get_model_name(c); /* Default name */ 812 get_model_name(c); /* Default name */
804 813
805 detect_nopl(c); 814 detect_nopl(c);
@@ -864,6 +873,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
864 /* Disable the PN if appropriate */ 873 /* Disable the PN if appropriate */
865 squash_the_stupid_serial_number(c); 874 squash_the_stupid_serial_number(c);
866 875
876 /* Set up SMEP/SMAP */
877 setup_smep(c);
878 setup_smap(c);
879
867 /* 880 /*
868 * The vendor-specific functions might have changed features. 881 * The vendor-specific functions might have changed features.
869 * Now we do "generic changes." 882 * Now we do "generic changes."
@@ -1114,7 +1127,8 @@ void syscall_init(void)
1114 1127
1115 /* Flags to clear on syscall */ 1128 /* Flags to clear on syscall */
1116 wrmsrl(MSR_SYSCALL_MASK, 1129 wrmsrl(MSR_SYSCALL_MASK,
1117 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); 1130 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1131 X86_EFLAGS_IOPL|X86_EFLAGS_AC);
1118} 1132}
1119 1133
1120/* 1134/*
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f438a44bf8f9..0750e3ba87c0 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -57,6 +57,7 @@
57#include <asm/cpufeature.h> 57#include <asm/cpufeature.h>
58#include <asm/alternative-asm.h> 58#include <asm/alternative-asm.h>
59#include <asm/asm.h> 59#include <asm/asm.h>
60#include <asm/smap.h>
60 61
61/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 62/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
62#include <linux/elf-em.h> 63#include <linux/elf-em.h>
@@ -407,7 +408,9 @@ sysenter_past_esp:
407 */ 408 */
408 cmpl $__PAGE_OFFSET-3,%ebp 409 cmpl $__PAGE_OFFSET-3,%ebp
409 jae syscall_fault 410 jae syscall_fault
411 ASM_STAC
4101: movl (%ebp),%ebp 4121: movl (%ebp),%ebp
413 ASM_CLAC
411 movl %ebp,PT_EBP(%esp) 414 movl %ebp,PT_EBP(%esp)
412 _ASM_EXTABLE(1b,syscall_fault) 415 _ASM_EXTABLE(1b,syscall_fault)
413 416
@@ -488,6 +491,7 @@ ENDPROC(ia32_sysenter_target)
488 # system call handler stub 491 # system call handler stub
489ENTRY(system_call) 492ENTRY(system_call)
490 RING0_INT_FRAME # can't unwind into user space anyway 493 RING0_INT_FRAME # can't unwind into user space anyway
494 ASM_CLAC
491 pushl_cfi %eax # save orig_eax 495 pushl_cfi %eax # save orig_eax
492 SAVE_ALL 496 SAVE_ALL
493 GET_THREAD_INFO(%ebp) 497 GET_THREAD_INFO(%ebp)
@@ -670,6 +674,7 @@ END(syscall_exit_work)
670 674
671 RING0_INT_FRAME # can't unwind into user space anyway 675 RING0_INT_FRAME # can't unwind into user space anyway
672syscall_fault: 676syscall_fault:
677 ASM_CLAC
673 GET_THREAD_INFO(%ebp) 678 GET_THREAD_INFO(%ebp)
674 movl $-EFAULT,PT_EAX(%esp) 679 movl $-EFAULT,PT_EAX(%esp)
675 jmp resume_userspace 680 jmp resume_userspace
@@ -825,6 +830,7 @@ END(interrupt)
825 */ 830 */
826 .p2align CONFIG_X86_L1_CACHE_SHIFT 831 .p2align CONFIG_X86_L1_CACHE_SHIFT
827common_interrupt: 832common_interrupt:
833 ASM_CLAC
828 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ 834 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
829 SAVE_ALL 835 SAVE_ALL
830 TRACE_IRQS_OFF 836 TRACE_IRQS_OFF
@@ -841,6 +847,7 @@ ENDPROC(common_interrupt)
841#define BUILD_INTERRUPT3(name, nr, fn) \ 847#define BUILD_INTERRUPT3(name, nr, fn) \
842ENTRY(name) \ 848ENTRY(name) \
843 RING0_INT_FRAME; \ 849 RING0_INT_FRAME; \
850 ASM_CLAC; \
844 pushl_cfi $~(nr); \ 851 pushl_cfi $~(nr); \
845 SAVE_ALL; \ 852 SAVE_ALL; \
846 TRACE_IRQS_OFF \ 853 TRACE_IRQS_OFF \
@@ -857,6 +864,7 @@ ENDPROC(name)
857 864
858ENTRY(coprocessor_error) 865ENTRY(coprocessor_error)
859 RING0_INT_FRAME 866 RING0_INT_FRAME
867 ASM_CLAC
860 pushl_cfi $0 868 pushl_cfi $0
861 pushl_cfi $do_coprocessor_error 869 pushl_cfi $do_coprocessor_error
862 jmp error_code 870 jmp error_code
@@ -865,6 +873,7 @@ END(coprocessor_error)
865 873
866ENTRY(simd_coprocessor_error) 874ENTRY(simd_coprocessor_error)
867 RING0_INT_FRAME 875 RING0_INT_FRAME
876 ASM_CLAC
868 pushl_cfi $0 877 pushl_cfi $0
869#ifdef CONFIG_X86_INVD_BUG 878#ifdef CONFIG_X86_INVD_BUG
870 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ 879 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
@@ -886,6 +895,7 @@ END(simd_coprocessor_error)
886 895
887ENTRY(device_not_available) 896ENTRY(device_not_available)
888 RING0_INT_FRAME 897 RING0_INT_FRAME
898 ASM_CLAC
889 pushl_cfi $-1 # mark this as an int 899 pushl_cfi $-1 # mark this as an int
890 pushl_cfi $do_device_not_available 900 pushl_cfi $do_device_not_available
891 jmp error_code 901 jmp error_code
@@ -906,6 +916,7 @@ END(native_irq_enable_sysexit)
906 916
907ENTRY(overflow) 917ENTRY(overflow)
908 RING0_INT_FRAME 918 RING0_INT_FRAME
919 ASM_CLAC
909 pushl_cfi $0 920 pushl_cfi $0
910 pushl_cfi $do_overflow 921 pushl_cfi $do_overflow
911 jmp error_code 922 jmp error_code
@@ -914,6 +925,7 @@ END(overflow)
914 925
915ENTRY(bounds) 926ENTRY(bounds)
916 RING0_INT_FRAME 927 RING0_INT_FRAME
928 ASM_CLAC
917 pushl_cfi $0 929 pushl_cfi $0
918 pushl_cfi $do_bounds 930 pushl_cfi $do_bounds
919 jmp error_code 931 jmp error_code
@@ -922,6 +934,7 @@ END(bounds)
922 934
923ENTRY(invalid_op) 935ENTRY(invalid_op)
924 RING0_INT_FRAME 936 RING0_INT_FRAME
937 ASM_CLAC
925 pushl_cfi $0 938 pushl_cfi $0
926 pushl_cfi $do_invalid_op 939 pushl_cfi $do_invalid_op
927 jmp error_code 940 jmp error_code
@@ -930,6 +943,7 @@ END(invalid_op)
930 943
931ENTRY(coprocessor_segment_overrun) 944ENTRY(coprocessor_segment_overrun)
932 RING0_INT_FRAME 945 RING0_INT_FRAME
946 ASM_CLAC
933 pushl_cfi $0 947 pushl_cfi $0
934 pushl_cfi $do_coprocessor_segment_overrun 948 pushl_cfi $do_coprocessor_segment_overrun
935 jmp error_code 949 jmp error_code
@@ -938,6 +952,7 @@ END(coprocessor_segment_overrun)
938 952
939ENTRY(invalid_TSS) 953ENTRY(invalid_TSS)
940 RING0_EC_FRAME 954 RING0_EC_FRAME
955 ASM_CLAC
941 pushl_cfi $do_invalid_TSS 956 pushl_cfi $do_invalid_TSS
942 jmp error_code 957 jmp error_code
943 CFI_ENDPROC 958 CFI_ENDPROC
@@ -945,6 +960,7 @@ END(invalid_TSS)
945 960
946ENTRY(segment_not_present) 961ENTRY(segment_not_present)
947 RING0_EC_FRAME 962 RING0_EC_FRAME
963 ASM_CLAC
948 pushl_cfi $do_segment_not_present 964 pushl_cfi $do_segment_not_present
949 jmp error_code 965 jmp error_code
950 CFI_ENDPROC 966 CFI_ENDPROC
@@ -952,6 +968,7 @@ END(segment_not_present)
952 968
953ENTRY(stack_segment) 969ENTRY(stack_segment)
954 RING0_EC_FRAME 970 RING0_EC_FRAME
971 ASM_CLAC
955 pushl_cfi $do_stack_segment 972 pushl_cfi $do_stack_segment
956 jmp error_code 973 jmp error_code
957 CFI_ENDPROC 974 CFI_ENDPROC
@@ -959,6 +976,7 @@ END(stack_segment)
959 976
960ENTRY(alignment_check) 977ENTRY(alignment_check)
961 RING0_EC_FRAME 978 RING0_EC_FRAME
979 ASM_CLAC
962 pushl_cfi $do_alignment_check 980 pushl_cfi $do_alignment_check
963 jmp error_code 981 jmp error_code
964 CFI_ENDPROC 982 CFI_ENDPROC
@@ -966,6 +984,7 @@ END(alignment_check)
966 984
967ENTRY(divide_error) 985ENTRY(divide_error)
968 RING0_INT_FRAME 986 RING0_INT_FRAME
987 ASM_CLAC
969 pushl_cfi $0 # no error code 988 pushl_cfi $0 # no error code
970 pushl_cfi $do_divide_error 989 pushl_cfi $do_divide_error
971 jmp error_code 990 jmp error_code
@@ -975,6 +994,7 @@ END(divide_error)
975#ifdef CONFIG_X86_MCE 994#ifdef CONFIG_X86_MCE
976ENTRY(machine_check) 995ENTRY(machine_check)
977 RING0_INT_FRAME 996 RING0_INT_FRAME
997 ASM_CLAC
978 pushl_cfi $0 998 pushl_cfi $0
979 pushl_cfi machine_check_vector 999 pushl_cfi machine_check_vector
980 jmp error_code 1000 jmp error_code
@@ -984,6 +1004,7 @@ END(machine_check)
984 1004
985ENTRY(spurious_interrupt_bug) 1005ENTRY(spurious_interrupt_bug)
986 RING0_INT_FRAME 1006 RING0_INT_FRAME
1007 ASM_CLAC
987 pushl_cfi $0 1008 pushl_cfi $0
988 pushl_cfi $do_spurious_interrupt_bug 1009 pushl_cfi $do_spurious_interrupt_bug
989 jmp error_code 1010 jmp error_code
@@ -1273,6 +1294,7 @@ return_to_handler:
1273 1294
1274ENTRY(page_fault) 1295ENTRY(page_fault)
1275 RING0_EC_FRAME 1296 RING0_EC_FRAME
1297 ASM_CLAC
1276 pushl_cfi $do_page_fault 1298 pushl_cfi $do_page_fault
1277 ALIGN 1299 ALIGN
1278error_code: 1300error_code:
@@ -1345,6 +1367,7 @@ END(page_fault)
1345 1367
1346ENTRY(debug) 1368ENTRY(debug)
1347 RING0_INT_FRAME 1369 RING0_INT_FRAME
1370 ASM_CLAC
1348 cmpl $ia32_sysenter_target,(%esp) 1371 cmpl $ia32_sysenter_target,(%esp)
1349 jne debug_stack_correct 1372 jne debug_stack_correct
1350 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn 1373 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
@@ -1369,6 +1392,7 @@ END(debug)
1369 */ 1392 */
1370ENTRY(nmi) 1393ENTRY(nmi)
1371 RING0_INT_FRAME 1394 RING0_INT_FRAME
1395 ASM_CLAC
1372 pushl_cfi %eax 1396 pushl_cfi %eax
1373 movl %ss, %eax 1397 movl %ss, %eax
1374 cmpw $__ESPFIX_SS, %ax 1398 cmpw $__ESPFIX_SS, %ax
@@ -1439,6 +1463,7 @@ END(nmi)
1439 1463
1440ENTRY(int3) 1464ENTRY(int3)
1441 RING0_INT_FRAME 1465 RING0_INT_FRAME
1466 ASM_CLAC
1442 pushl_cfi $-1 # mark this as an int 1467 pushl_cfi $-1 # mark this as an int
1443 SAVE_ALL 1468 SAVE_ALL
1444 TRACE_IRQS_OFF 1469 TRACE_IRQS_OFF
@@ -1459,6 +1484,7 @@ END(general_protection)
1459#ifdef CONFIG_KVM_GUEST 1484#ifdef CONFIG_KVM_GUEST
1460ENTRY(async_page_fault) 1485ENTRY(async_page_fault)
1461 RING0_EC_FRAME 1486 RING0_EC_FRAME
1487 ASM_CLAC
1462 pushl_cfi $do_async_page_fault 1488 pushl_cfi $do_async_page_fault
1463 jmp error_code 1489 jmp error_code
1464 CFI_ENDPROC 1490 CFI_ENDPROC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 066334be7b74..44531acd9a81 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -57,6 +57,7 @@
57#include <asm/percpu.h> 57#include <asm/percpu.h>
58#include <asm/asm.h> 58#include <asm/asm.h>
59#include <asm/rcu.h> 59#include <asm/rcu.h>
60#include <asm/smap.h>
60#include <linux/err.h> 61#include <linux/err.h>
61 62
62/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 63/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
@@ -568,7 +569,8 @@ END(ret_from_fork)
568 * System call entry. Up to 6 arguments in registers are supported. 569 * System call entry. Up to 6 arguments in registers are supported.
569 * 570 *
570 * SYSCALL does not save anything on the stack and does not change the 571 * SYSCALL does not save anything on the stack and does not change the
571 * stack pointer. 572 * stack pointer. However, it does mask the flags register for us, so
573 * CLD and CLAC are not needed.
572 */ 574 */
573 575
574/* 576/*
@@ -987,6 +989,7 @@ END(interrupt)
987 */ 989 */
988 .p2align CONFIG_X86_L1_CACHE_SHIFT 990 .p2align CONFIG_X86_L1_CACHE_SHIFT
989common_interrupt: 991common_interrupt:
992 ASM_CLAC
990 XCPT_FRAME 993 XCPT_FRAME
991 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 994 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
992 interrupt do_IRQ 995 interrupt do_IRQ
@@ -1126,6 +1129,7 @@ END(common_interrupt)
1126 */ 1129 */
1127.macro apicinterrupt num sym do_sym 1130.macro apicinterrupt num sym do_sym
1128ENTRY(\sym) 1131ENTRY(\sym)
1132 ASM_CLAC
1129 INTR_FRAME 1133 INTR_FRAME
1130 pushq_cfi $~(\num) 1134 pushq_cfi $~(\num)
1131.Lcommon_\sym: 1135.Lcommon_\sym:
@@ -1180,6 +1184,7 @@ apicinterrupt IRQ_WORK_VECTOR \
1180 */ 1184 */
1181.macro zeroentry sym do_sym 1185.macro zeroentry sym do_sym
1182ENTRY(\sym) 1186ENTRY(\sym)
1187 ASM_CLAC
1183 INTR_FRAME 1188 INTR_FRAME
1184 PARAVIRT_ADJUST_EXCEPTION_FRAME 1189 PARAVIRT_ADJUST_EXCEPTION_FRAME
1185 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1190 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
@@ -1197,6 +1202,7 @@ END(\sym)
1197 1202
1198.macro paranoidzeroentry sym do_sym 1203.macro paranoidzeroentry sym do_sym
1199ENTRY(\sym) 1204ENTRY(\sym)
1205 ASM_CLAC
1200 INTR_FRAME 1206 INTR_FRAME
1201 PARAVIRT_ADJUST_EXCEPTION_FRAME 1207 PARAVIRT_ADJUST_EXCEPTION_FRAME
1202 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1208 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
@@ -1215,6 +1221,7 @@ END(\sym)
1215#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) 1221#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1216.macro paranoidzeroentry_ist sym do_sym ist 1222.macro paranoidzeroentry_ist sym do_sym ist
1217ENTRY(\sym) 1223ENTRY(\sym)
1224 ASM_CLAC
1218 INTR_FRAME 1225 INTR_FRAME
1219 PARAVIRT_ADJUST_EXCEPTION_FRAME 1226 PARAVIRT_ADJUST_EXCEPTION_FRAME
1220 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1227 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
@@ -1234,6 +1241,7 @@ END(\sym)
1234 1241
1235.macro errorentry sym do_sym 1242.macro errorentry sym do_sym
1236ENTRY(\sym) 1243ENTRY(\sym)
1244 ASM_CLAC
1237 XCPT_FRAME 1245 XCPT_FRAME
1238 PARAVIRT_ADJUST_EXCEPTION_FRAME 1246 PARAVIRT_ADJUST_EXCEPTION_FRAME
1239 subq $ORIG_RAX-R15, %rsp 1247 subq $ORIG_RAX-R15, %rsp
@@ -1252,6 +1260,7 @@ END(\sym)
1252 /* error code is on the stack already */ 1260 /* error code is on the stack already */
1253.macro paranoiderrorentry sym do_sym 1261.macro paranoiderrorentry sym do_sym
1254ENTRY(\sym) 1262ENTRY(\sym)
1263 ASM_CLAC
1255 XCPT_FRAME 1264 XCPT_FRAME
1256 PARAVIRT_ADJUST_EXCEPTION_FRAME 1265 PARAVIRT_ADJUST_EXCEPTION_FRAME
1257 subq $ORIG_RAX-R15, %rsp 1266 subq $ORIG_RAX-R15, %rsp
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index d42ab17b7397..957a47aec64e 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -287,27 +287,28 @@ ENTRY(startup_32_smp)
287 leal -__PAGE_OFFSET(%ecx),%esp 287 leal -__PAGE_OFFSET(%ecx),%esp
288 288
289default_entry: 289default_entry:
290
291/* 290/*
292 * New page tables may be in 4Mbyte page mode and may 291 * New page tables may be in 4Mbyte page mode and may
293 * be using the global pages. 292 * be using the global pages.
294 * 293 *
295 * NOTE! If we are on a 486 we may have no cr4 at all! 294 * NOTE! If we are on a 486 we may have no cr4 at all!
296 * So we do not try to touch it unless we really have 295 * Specifically, cr4 exists if and only if CPUID exists,
297 * some bits in it to set. This won't work if the BSP 296 * which in turn exists if and only if EFLAGS.ID exists.
298 * implements cr4 but this AP does not -- very unlikely
299 * but be warned! The same applies to the pse feature
300 * if not equally supported. --macro
301 *
302 * NOTE! We have to correct for the fact that we're
303 * not yet offset PAGE_OFFSET..
304 */ 297 */
305#define cr4_bits pa(mmu_cr4_features) 298 movl $X86_EFLAGS_ID,%ecx
306 movl cr4_bits,%edx 299 pushl %ecx
307 andl %edx,%edx 300 popfl
308 jz 6f 301 pushfl
309 movl %cr4,%eax # Turn on paging options (PSE,PAE,..) 302 popl %eax
310 orl %edx,%eax 303 pushl $0
304 popfl
305 pushfl
306 popl %edx
307 xorl %edx,%eax
308 testl %ecx,%eax
309 jz 6f # No ID flag = no CPUID = no CR4
310
311 movl pa(mmu_cr4_features),%eax
311 movl %eax,%cr4 312 movl %eax,%cr4
312 313
313 testb $X86_CR4_PAE, %al # check if PAE is enabled 314 testb $X86_CR4_PAE, %al # check if PAE is enabled
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 3160c26db5e7..b33144c8b309 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -114,11 +114,12 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
114 regs->orig_ax = -1; /* disable syscall checks */ 114 regs->orig_ax = -1; /* disable syscall checks */
115 115
116 get_user_ex(buf, &sc->fpstate); 116 get_user_ex(buf, &sc->fpstate);
117 err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
118 117
119 get_user_ex(*pax, &sc->ax); 118 get_user_ex(*pax, &sc->ax);
120 } get_user_catch(err); 119 } get_user_catch(err);
121 120
121 err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
122
122 return err; 123 return err;
123} 124}
124 125
@@ -355,7 +356,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
355 put_user_ex(sig, &frame->sig); 356 put_user_ex(sig, &frame->sig);
356 put_user_ex(&frame->info, &frame->pinfo); 357 put_user_ex(&frame->info, &frame->pinfo);
357 put_user_ex(&frame->uc, &frame->puc); 358 put_user_ex(&frame->uc, &frame->puc);
358 err |= copy_siginfo_to_user(&frame->info, info);
359 359
360 /* Create the ucontext. */ 360 /* Create the ucontext. */
361 if (cpu_has_xsave) 361 if (cpu_has_xsave)
@@ -367,9 +367,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
367 put_user_ex(sas_ss_flags(regs->sp), 367 put_user_ex(sas_ss_flags(regs->sp),
368 &frame->uc.uc_stack.ss_flags); 368 &frame->uc.uc_stack.ss_flags);
369 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 369 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
370 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
371 regs, set->sig[0]);
372 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
373 370
374 /* Set up to return from userspace. */ 371 /* Set up to return from userspace. */
375 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); 372 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
@@ -386,6 +383,11 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
386 */ 383 */
387 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); 384 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
388 } put_user_catch(err); 385 } put_user_catch(err);
386
387 err |= copy_siginfo_to_user(&frame->info, info);
388 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
389 regs, set->sig[0]);
390 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
389 391
390 if (err) 392 if (err)
391 return -EFAULT; 393 return -EFAULT;
@@ -434,8 +436,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
434 put_user_ex(sas_ss_flags(regs->sp), 436 put_user_ex(sas_ss_flags(regs->sp),
435 &frame->uc.uc_stack.ss_flags); 437 &frame->uc.uc_stack.ss_flags);
436 put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 438 put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
437 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
438 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
439 439
440 /* Set up to return from userspace. If provided, use a stub 440 /* Set up to return from userspace. If provided, use a stub
441 already in userspace. */ 441 already in userspace. */
@@ -448,6 +448,9 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
448 } 448 }
449 } put_user_catch(err); 449 } put_user_catch(err);
450 450
451 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
452 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
453
451 if (err) 454 if (err)
452 return -EFAULT; 455 return -EFAULT;
453 456
@@ -504,9 +507,6 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
504 &frame->uc.uc_stack.ss_flags); 507 &frame->uc.uc_stack.ss_flags);
505 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 508 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
506 put_user_ex(0, &frame->uc.uc__pad0); 509 put_user_ex(0, &frame->uc.uc__pad0);
507 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
508 regs, set->sig[0]);
509 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
510 510
511 if (ka->sa.sa_flags & SA_RESTORER) { 511 if (ka->sa.sa_flags & SA_RESTORER) {
512 restorer = ka->sa.sa_restorer; 512 restorer = ka->sa.sa_restorer;
@@ -518,6 +518,10 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
518 put_user_ex(restorer, &frame->pretcode); 518 put_user_ex(restorer, &frame->pretcode);
519 } put_user_catch(err); 519 } put_user_catch(err);
520 520
521 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
522 regs, set->sig[0]);
523 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
524
521 if (err) 525 if (err)
522 return -EFAULT; 526 return -EFAULT;
523 527
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 4e89b3dd408d..ada87a329edc 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -315,7 +315,7 @@ static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
315 if ((unsigned long)buf % 64 || fx_only) { 315 if ((unsigned long)buf % 64 || fx_only) {
316 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; 316 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
317 xrstor_state(init_xstate_buf, init_bv); 317 xrstor_state(init_xstate_buf, init_bv);
318 return fxrstor_checking((__force void *) buf); 318 return fxrstor_user(buf);
319 } else { 319 } else {
320 u64 init_bv = pcntxt_mask & ~xbv; 320 u64 init_bv = pcntxt_mask & ~xbv;
321 if (unlikely(init_bv)) 321 if (unlikely(init_bv))
@@ -323,9 +323,9 @@ static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
323 return xrestore_user(buf, xbv); 323 return xrestore_user(buf, xbv);
324 } 324 }
325 } else if (use_fxsr()) { 325 } else if (use_fxsr()) {
326 return fxrstor_checking((__force void *) buf); 326 return fxrstor_user(buf);
327 } else 327 } else
328 return frstor_checking((__force void *) buf); 328 return frstor_user(buf);
329} 329}
330 330
331int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) 331int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)