aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 17:46:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 17:46:32 -0500
commitc9cdd9a6ae49704f12a289706551536ec842693f (patch)
tree21a79d8731065352e924f2bcecf257a4247c2f64 /arch/x86
parentf4bcd8ccddb02833340652e9f46f5127828eb79d (diff)
parent741e3902cd89a7fbc04ae53f29a7ca0da452aa8e (diff)
Merge branch 'x86/mpx' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpufeature and mpx updates from Peter Anvin: "This includes the basic infrastructure for MPX (Memory Protection Extensions) support, but does not include MPX support itself. It is, however, a prerequisite for KVM support for MPX, which I believe will be pushed later this merge window by the KVM team. This includes moving the functionality in futex_atomic_cmpxchg_inatomic() into a new function in uaccess.h so it can be reused - this will be used by the final MPX patches. The actual MPX functionality (map management and so on) will be pushed in a future merge window, when ready" * 'x86/mpx' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/intel/mpx: Remove unused LWP structure x86, mpx: Add MPX related opcodes to the x86 opcode map x86: replace futex_atomic_cmpxchg_inatomic() with user_atomic_cmpxchg_inatomic x86: add user_atomic_cmpxchg_inatomic at uaccess.h x86, xsave: Support eager-only xsave features, add MPX support x86, cpufeature: Define the Intel MPX feature flag
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/futex.h21
-rw-r--r--arch/x86/include/asm/processor.h17
-rw-r--r--arch/x86/include/asm/uaccess.h92
-rw-r--r--arch/x86/include/asm/xsave.h14
-rw-r--r--arch/x86/kernel/xsave.c10
-rw-r--r--arch/x86/lib/x86-opcode-map.txt4
7 files changed, 133 insertions, 26 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 89270b4318db..e099f9502ace 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -216,6 +216,7 @@
216#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 216#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
217#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ 217#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
218#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ 218#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
219#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
219#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ 220#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
220#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ 221#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
221#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ 222#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index be27ba1e947a..b4c1f5453436 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -110,26 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
110static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 110static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
111 u32 oldval, u32 newval) 111 u32 oldval, u32 newval)
112{ 112{
113 int ret = 0; 113 return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval);
114
115 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
116 return -EFAULT;
117
118 asm volatile("\t" ASM_STAC "\n"
119 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
120 "2:\t" ASM_CLAC "\n"
121 "\t.section .fixup, \"ax\"\n"
122 "3:\tmov %3, %0\n"
123 "\tjmp 2b\n"
124 "\t.previous\n"
125 _ASM_EXTABLE(1b, 3b)
126 : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
127 : "i" (-EFAULT), "r" (newval), "1" (oldval)
128 : "memory"
129 );
130
131 *uval = oldval;
132 return ret;
133} 114}
134 115
135#endif 116#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a61b0717da32..fdedd38fd0fc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -370,6 +370,20 @@ struct ymmh_struct {
370 u32 ymmh_space[64]; 370 u32 ymmh_space[64];
371}; 371};
372 372
373/* We don't support LWP yet: */
374struct lwp_struct {
375 u8 reserved[128];
376};
377
378struct bndregs_struct {
379 u64 bndregs[8];
380} __packed;
381
382struct bndcsr_struct {
383 u64 cfg_reg_u;
384 u64 status_reg;
385} __packed;
386
373struct xsave_hdr_struct { 387struct xsave_hdr_struct {
374 u64 xstate_bv; 388 u64 xstate_bv;
375 u64 reserved1[2]; 389 u64 reserved1[2];
@@ -380,6 +394,9 @@ struct xsave_struct {
380 struct i387_fxsave_struct i387; 394 struct i387_fxsave_struct i387;
381 struct xsave_hdr_struct xsave_hdr; 395 struct xsave_hdr_struct xsave_hdr;
382 struct ymmh_struct ymmh; 396 struct ymmh_struct ymmh;
397 struct lwp_struct lwp;
398 struct bndregs_struct bndregs;
399 struct bndcsr_struct bndcsr;
383 /* new processor state extensions will go here */ 400 /* new processor state extensions will go here */
384} __attribute__ ((packed, aligned (64))); 401} __attribute__ ((packed, aligned (64)));
385 402
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 6f1bb74d547b..0d592e0a5b84 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -533,6 +533,98 @@ extern __must_check long strnlen_user(const char __user *str, long n);
533unsigned long __must_check clear_user(void __user *mem, unsigned long len); 533unsigned long __must_check clear_user(void __user *mem, unsigned long len);
534unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 534unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
535 535
536extern void __cmpxchg_wrong_size(void)
537 __compiletime_error("Bad argument size for cmpxchg");
538
539#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
540({ \
541 int __ret = 0; \
542 __typeof__(ptr) __uval = (uval); \
543 __typeof__(*(ptr)) __old = (old); \
544 __typeof__(*(ptr)) __new = (new); \
545 switch (size) { \
546 case 1: \
547 { \
548 asm volatile("\t" ASM_STAC "\n" \
549 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
550 "2:\t" ASM_CLAC "\n" \
551 "\t.section .fixup, \"ax\"\n" \
552 "3:\tmov %3, %0\n" \
553 "\tjmp 2b\n" \
554 "\t.previous\n" \
555 _ASM_EXTABLE(1b, 3b) \
556 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
557 : "i" (-EFAULT), "q" (__new), "1" (__old) \
558 : "memory" \
559 ); \
560 break; \
561 } \
562 case 2: \
563 { \
564 asm volatile("\t" ASM_STAC "\n" \
565 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
566 "2:\t" ASM_CLAC "\n" \
567 "\t.section .fixup, \"ax\"\n" \
568 "3:\tmov %3, %0\n" \
569 "\tjmp 2b\n" \
570 "\t.previous\n" \
571 _ASM_EXTABLE(1b, 3b) \
572 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
573 : "i" (-EFAULT), "r" (__new), "1" (__old) \
574 : "memory" \
575 ); \
576 break; \
577 } \
578 case 4: \
579 { \
580 asm volatile("\t" ASM_STAC "\n" \
581 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
582 "2:\t" ASM_CLAC "\n" \
583 "\t.section .fixup, \"ax\"\n" \
584 "3:\tmov %3, %0\n" \
585 "\tjmp 2b\n" \
586 "\t.previous\n" \
587 _ASM_EXTABLE(1b, 3b) \
588 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
589 : "i" (-EFAULT), "r" (__new), "1" (__old) \
590 : "memory" \
591 ); \
592 break; \
593 } \
594 case 8: \
595 { \
596 if (!IS_ENABLED(CONFIG_X86_64)) \
597 __cmpxchg_wrong_size(); \
598 \
599 asm volatile("\t" ASM_STAC "\n" \
600 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
601 "2:\t" ASM_CLAC "\n" \
602 "\t.section .fixup, \"ax\"\n" \
603 "3:\tmov %3, %0\n" \
604 "\tjmp 2b\n" \
605 "\t.previous\n" \
606 _ASM_EXTABLE(1b, 3b) \
607 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
608 : "i" (-EFAULT), "r" (__new), "1" (__old) \
609 : "memory" \
610 ); \
611 break; \
612 } \
613 default: \
614 __cmpxchg_wrong_size(); \
615 } \
616 *__uval = __old; \
617 __ret; \
618})
619
620#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
621({ \
622 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
623 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
624 (old), (new), sizeof(*(ptr))) : \
625 -EFAULT; \
626})
627
536/* 628/*
537 * movsl can be slow when source and dest are not both 8-byte aligned 629 * movsl can be slow when source and dest are not both 8-byte aligned
538 */ 630 */
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 0415cdabb5a6..554738963b28 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -9,6 +9,8 @@
9#define XSTATE_FP 0x1 9#define XSTATE_FP 0x1
10#define XSTATE_SSE 0x2 10#define XSTATE_SSE 0x2
11#define XSTATE_YMM 0x4 11#define XSTATE_YMM 0x4
12#define XSTATE_BNDREGS 0x8
13#define XSTATE_BNDCSR 0x10
12 14
13#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) 15#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
14 16
@@ -20,10 +22,14 @@
20#define XSAVE_YMM_SIZE 256 22#define XSAVE_YMM_SIZE 256
21#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) 23#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
22 24
23/* 25/* Supported features which support lazy state saving */
24 * These are the features that the OS can handle currently. 26#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
25 */ 27
26#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) 28/* Supported features which require eager state saving */
29#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
30
31/* All currently supported features */
32#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
27 33
28#ifdef CONFIG_X86_64 34#ifdef CONFIG_X86_64
29#define REX_PREFIX "0x48, " 35#define REX_PREFIX "0x48, "
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 422fd8223470..a4b451c6addf 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -562,6 +562,16 @@ static void __init xstate_enable_boot_cpu(void)
562 if (cpu_has_xsaveopt && eagerfpu != DISABLE) 562 if (cpu_has_xsaveopt && eagerfpu != DISABLE)
563 eagerfpu = ENABLE; 563 eagerfpu = ENABLE;
564 564
565 if (pcntxt_mask & XSTATE_EAGER) {
566 if (eagerfpu == DISABLE) {
567 pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n",
568 pcntxt_mask & XSTATE_EAGER);
569 pcntxt_mask &= ~XSTATE_EAGER;
570 } else {
571 eagerfpu = ENABLE;
572 }
573 }
574
565 pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", 575 pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n",
566 pcntxt_mask, xstate_size); 576 pcntxt_mask, xstate_size);
567} 577}
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 533a85e3a07e..1a2be7c6895d 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -346,8 +346,8 @@ AVXcode: 1
34617: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) 34617: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
34718: Grp16 (1A) 34718: Grp16 (1A)
34819: 34819:
3491a: 3491a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv
3501b: 3501b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv
3511c: 3511c:
3521d: 3521d:
3531e: 3531e: