aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2015-09-02 19:31:26 -0400
committerIngo Molnar <mingo@kernel.org>2015-09-14 06:21:46 -0400
commitd91cab78133d33b1dfd3d3fa7167fcbf74fb5f99 (patch)
tree7c4367804ee4e24bf2ed4aa031ef826a679930a4
parent75933433d666c2ab13a7a93f4ec1e6f000a94ffc (diff)
x86/fpu: Rename XSAVE macros
There are two concepts that have some confusing naming: 1. Extended State Component numbers (currently called XFEATURE_BIT_*) 2. Extended State Component masks (currently called XSTATE_*) The numbers are (currently) from 0-9. State component 3 is the bounds registers for MPX, for instance. But when we want to enable "state component 3", we go set a bit in XCR0. The bit we set is 1<<3. We can check to see if a state component feature is enabled by looking at its bit. The current 'xfeature_bit's are at best xfeature bit _numbers_. Calling them bits is at best inconsistent with ending the enum list with 'XFEATURES_NR_MAX'. This patch renames the enum to be 'xfeature'. These also happen to be what the Intel documentation calls a "state component". We also want to differentiate these from the "XSTATE_*" macros. The "XSTATE_*" macros are a mask, and we rename them to match. These macros are reasonably widely used so this patch is a wee bit big, but this really is just a rename. The only non-mechanical part of this is the s/XSTATE_EXTEND_MASK/XFEATURE_MASK_EXTEND/ We need a better name for it, but that's another patch. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: dave@sr71.net Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20150902233126.38653250@viggo.jf.intel.com [ Ported to v4.3-rc1. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c3
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c3
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c3
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c3
-rw-r--r--arch/x86/crypto/chacha20_glue.c2
-rw-r--r--arch/x86/crypto/poly1305_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c3
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c3
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c2
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c2
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c2
-rw-r--r--arch/x86/include/asm/fpu/types.h44
-rw-r--r--arch/x86/include/asm/fpu/xstate.h14
-rw-r--r--arch/x86/kernel/fpu/init.c6
-rw-r--r--arch/x86/kernel/fpu/regset.c4
-rw-r--r--arch/x86/kernel/fpu/signal.c6
-rw-r--r--arch/x86/kernel/fpu/xstate.c36
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/x86.c27
-rw-r--r--arch/x86/kvm/x86.h6
-rw-r--r--arch/x86/mm/mpx.c6
23 files changed, 103 insertions, 82 deletions
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 4c65c70e628b..d84456924563 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -567,7 +567,8 @@ static int __init camellia_aesni_init(void)
567 return -ENODEV; 567 return -ENODEV;
568 } 568 }
569 569
570 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 570 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
571 &feature_name)) {
571 pr_info("CPU feature '%s' is not supported.\n", feature_name); 572 pr_info("CPU feature '%s' is not supported.\n", feature_name);
572 return -ENODEV; 573 return -ENODEV;
573 } 574 }
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 80a0e4389c9a..12e729bfe71b 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -554,7 +554,8 @@ static int __init camellia_aesni_init(void)
554{ 554{
555 const char *feature_name; 555 const char *feature_name;
556 556
557 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 557 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
558 &feature_name)) {
558 pr_info("CPU feature '%s' is not supported.\n", feature_name); 559 pr_info("CPU feature '%s' is not supported.\n", feature_name);
559 return -ENODEV; 560 return -ENODEV;
560 } 561 }
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index be00aa48b2b5..8648158f3916 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -469,7 +469,8 @@ static int __init cast5_init(void)
469{ 469{
470 const char *feature_name; 470 const char *feature_name;
471 471
472 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 472 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
473 &feature_name)) {
473 pr_info("CPU feature '%s' is not supported.\n", feature_name); 474 pr_info("CPU feature '%s' is not supported.\n", feature_name);
474 return -ENODEV; 475 return -ENODEV;
475 } 476 }
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 5dbba7224221..fca459578c35 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -591,7 +591,8 @@ static int __init cast6_init(void)
591{ 591{
592 const char *feature_name; 592 const char *feature_name;
593 593
594 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 594 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
595 &feature_name)) {
595 pr_info("CPU feature '%s' is not supported.\n", feature_name); 596 pr_info("CPU feature '%s' is not supported.\n", feature_name);
596 return -ENODEV; 597 return -ENODEV;
597 } 598 }
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index effe2160b7c5..722bacea040e 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -130,7 +130,7 @@ static int __init chacha20_simd_mod_init(void)
130 130
131#ifdef CONFIG_AS_AVX2 131#ifdef CONFIG_AS_AVX2
132 chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 && 132 chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
133 cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); 133 cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
134#endif 134#endif
135 return crypto_register_alg(&alg); 135 return crypto_register_alg(&alg);
136} 136}
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index f7170d764f32..4264a3d59589 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -184,7 +184,7 @@ static int __init poly1305_simd_mod_init(void)
184 184
185#ifdef CONFIG_AS_AVX2 185#ifdef CONFIG_AS_AVX2
186 poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 && 186 poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
187 cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); 187 cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
188 alg.descsize = sizeof(struct poly1305_simd_desc_ctx); 188 alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
189 if (poly1305_use_avx2) 189 if (poly1305_use_avx2)
190 alg.descsize += 10 * sizeof(u32); 190 alg.descsize += 10 * sizeof(u32);
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 7d838dc4d888..6d198342e2de 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -542,7 +542,8 @@ static int __init init(void)
542 pr_info("AVX2 instructions are not detected.\n"); 542 pr_info("AVX2 instructions are not detected.\n");
543 return -ENODEV; 543 return -ENODEV;
544 } 544 }
545 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 545 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
546 &feature_name)) {
546 pr_info("CPU feature '%s' is not supported.\n", feature_name); 547 pr_info("CPU feature '%s' is not supported.\n", feature_name);
547 return -ENODEV; 548 return -ENODEV;
548 } 549 }
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index da7dafc9b16d..5dc37026c7ce 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -597,7 +597,8 @@ static int __init serpent_init(void)
597{ 597{
598 const char *feature_name; 598 const char *feature_name;
599 599
600 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 600 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
601 &feature_name)) {
601 pr_info("CPU feature '%s' is not supported.\n", feature_name); 602 pr_info("CPU feature '%s' is not supported.\n", feature_name);
602 return -ENODEV; 603 return -ENODEV;
603 } 604 }
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 7c48e8b20848..00212c32d4db 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -121,7 +121,7 @@ static struct shash_alg alg = {
121#ifdef CONFIG_AS_AVX 121#ifdef CONFIG_AS_AVX
122static bool __init avx_usable(void) 122static bool __init avx_usable(void)
123{ 123{
124 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { 124 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
125 if (cpu_has_avx) 125 if (cpu_has_avx)
126 pr_info("AVX detected but unusable.\n"); 126 pr_info("AVX detected but unusable.\n");
127 return false; 127 return false;
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index f8097fc0d1d1..0e0e85aea634 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -130,7 +130,7 @@ static struct shash_alg algs[] = { {
130#ifdef CONFIG_AS_AVX 130#ifdef CONFIG_AS_AVX
131static bool __init avx_usable(void) 131static bool __init avx_usable(void)
132{ 132{
133 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { 133 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
134 if (cpu_has_avx) 134 if (cpu_has_avx)
135 pr_info("AVX detected but unusable.\n"); 135 pr_info("AVX detected but unusable.\n");
136 return false; 136 return false;
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 2edad7b81870..0c8c38c101ac 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -129,7 +129,7 @@ static struct shash_alg algs[] = { {
129#ifdef CONFIG_AS_AVX 129#ifdef CONFIG_AS_AVX
130static bool __init avx_usable(void) 130static bool __init avx_usable(void)
131{ 131{
132 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { 132 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
133 if (cpu_has_avx) 133 if (cpu_has_avx)
134 pr_info("AVX detected but unusable.\n"); 134 pr_info("AVX detected but unusable.\n");
135 return false; 135 return false;
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index c2bd0ce718ee..6f3738ced95e 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -558,7 +558,7 @@ static int __init twofish_init(void)
558{ 558{
559 const char *feature_name; 559 const char *feature_name;
560 560
561 if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 561 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
562 pr_info("CPU feature '%s' is not supported.\n", feature_name); 562 pr_info("CPU feature '%s' is not supported.\n", feature_name);
563 return -ENODEV; 563 return -ENODEV;
564 } 564 }
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 5dc1a18ef11c..9f20d10af3b1 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -95,30 +95,36 @@ struct swregs_state {
95/* 95/*
96 * List of XSAVE features Linux knows about: 96 * List of XSAVE features Linux knows about:
97 */ 97 */
98enum xfeature_bit { 98enum xfeature {
99 XSTATE_BIT_FP, 99 XFEATURE_FP,
100 XSTATE_BIT_SSE, 100 XFEATURE_SSE,
101 XSTATE_BIT_YMM, 101 /*
102 XSTATE_BIT_BNDREGS, 102 * Values above here are "legacy states".
103 XSTATE_BIT_BNDCSR, 103 * Those below are "extended states".
104 XSTATE_BIT_OPMASK, 104 */
105 XSTATE_BIT_ZMM_Hi256, 105 XFEATURE_YMM,
106 XSTATE_BIT_Hi16_ZMM, 106 XFEATURE_BNDREGS,
107 XFEATURE_BNDCSR,
108 XFEATURE_OPMASK,
109 XFEATURE_ZMM_Hi256,
110 XFEATURE_Hi16_ZMM,
107 111
108 XFEATURES_NR_MAX, 112 XFEATURES_NR_MAX,
109}; 113};
110 114
111#define XSTATE_FP (1 << XSTATE_BIT_FP) 115#define XFEATURE_MASK_FP (1 << XFEATURE_FP)
112#define XSTATE_SSE (1 << XSTATE_BIT_SSE) 116#define XFEATURE_MASK_SSE (1 << XFEATURE_SSE)
113#define XSTATE_YMM (1 << XSTATE_BIT_YMM) 117#define XFEATURE_MASK_YMM (1 << XFEATURE_YMM)
114#define XSTATE_BNDREGS (1 << XSTATE_BIT_BNDREGS) 118#define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS)
115#define XSTATE_BNDCSR (1 << XSTATE_BIT_BNDCSR) 119#define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR)
116#define XSTATE_OPMASK (1 << XSTATE_BIT_OPMASK) 120#define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
117#define XSTATE_ZMM_Hi256 (1 << XSTATE_BIT_ZMM_Hi256) 121#define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
118#define XSTATE_Hi16_ZMM (1 << XSTATE_BIT_Hi16_ZMM) 122#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
119 123
120#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) 124#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
121#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) 125#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
126 | XFEATURE_MASK_ZMM_Hi256 \
127 | XFEATURE_MASK_Hi16_ZMM)
122 128
123/* 129/*
124 * There are 16x 256-bit AVX registers named YMM0-YMM15. 130 * There are 16x 256-bit AVX registers named YMM0-YMM15.
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index d5a9b736553c..3a6c89b70307 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -6,7 +6,7 @@
6#include <linux/uaccess.h> 6#include <linux/uaccess.h>
7 7
8/* Bit 63 of XCR0 is reserved for future expansion */ 8/* Bit 63 of XCR0 is reserved for future expansion */
9#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63))) 9#define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
10 10
11#define XSTATE_CPUID 0x0000000d 11#define XSTATE_CPUID 0x0000000d
12 12
@@ -19,14 +19,18 @@
19#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) 19#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
20 20
21/* Supported features which support lazy state saving */ 21/* Supported features which support lazy state saving */
22#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \ 22#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
23 | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) 23 XFEATURE_MASK_SSE | \
24 XFEATURE_MASK_YMM | \
25 XFEATURE_MASK_OPMASK | \
26 XFEATURE_MASK_ZMM_Hi256 | \
27 XFEATURE_MASK_Hi16_ZMM)
24 28
25/* Supported features which require eager state saving */ 29/* Supported features which require eager state saving */
26#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR) 30#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
27 31
28/* All currently supported features */ 32/* All currently supported features */
29#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER) 33#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
30 34
31#ifdef CONFIG_X86_64 35#ifdef CONFIG_X86_64
32#define REX_PREFIX "0x48, " 36#define REX_PREFIX "0x48, "
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 0a250afc6cdf..be39b5fde4b9 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -290,11 +290,11 @@ static void __init fpu__init_system_ctx_switch(void)
290 if (cpu_has_xsaveopt && eagerfpu != DISABLE) 290 if (cpu_has_xsaveopt && eagerfpu != DISABLE)
291 eagerfpu = ENABLE; 291 eagerfpu = ENABLE;
292 292
293 if (xfeatures_mask & XSTATE_EAGER) { 293 if (xfeatures_mask & XFEATURE_MASK_EAGER) {
294 if (eagerfpu == DISABLE) { 294 if (eagerfpu == DISABLE) {
295 pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n", 295 pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
296 xfeatures_mask & XSTATE_EAGER); 296 xfeatures_mask & XFEATURE_MASK_EAGER);
297 xfeatures_mask &= ~XSTATE_EAGER; 297 xfeatures_mask &= ~XFEATURE_MASK_EAGER;
298 } else { 298 } else {
299 eagerfpu = ENABLE; 299 eagerfpu = ENABLE;
300 } 300 }
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index dc60810c1c74..0bc3490420c5 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -66,7 +66,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
66 * presence of FP and SSE state. 66 * presence of FP and SSE state.
67 */ 67 */
68 if (cpu_has_xsave) 68 if (cpu_has_xsave)
69 fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE; 69 fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
70 70
71 return ret; 71 return ret;
72} 72}
@@ -326,7 +326,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
326 * presence of FP. 326 * presence of FP.
327 */ 327 */
328 if (cpu_has_xsave) 328 if (cpu_has_xsave)
329 fpu->state.xsave.header.xfeatures |= XSTATE_FP; 329 fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
330 return ret; 330 return ret;
331} 331}
332 332
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 50ec9af1bd51..eb032677f939 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -107,7 +107,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
107 * header as well as change any contents in the memory layout. 107 * header as well as change any contents in the memory layout.
108 * xrestore as part of sigreturn will capture all the changes. 108 * xrestore as part of sigreturn will capture all the changes.
109 */ 109 */
110 xfeatures |= XSTATE_FPSSE; 110 xfeatures |= XFEATURE_MASK_FPSSE;
111 111
112 err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures); 112 err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
113 113
@@ -207,7 +207,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
207 * layout and not enabled by the OS. 207 * layout and not enabled by the OS.
208 */ 208 */
209 if (fx_only) 209 if (fx_only)
210 header->xfeatures = XSTATE_FPSSE; 210 header->xfeatures = XFEATURE_MASK_FPSSE;
211 else 211 else
212 header->xfeatures &= (xfeatures_mask & xfeatures); 212 header->xfeatures &= (xfeatures_mask & xfeatures);
213 } 213 }
@@ -230,7 +230,7 @@ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_
230{ 230{
231 if (use_xsave()) { 231 if (use_xsave()) {
232 if ((unsigned long)buf % 64 || fx_only) { 232 if ((unsigned long)buf % 64 || fx_only) {
233 u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE; 233 u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
234 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); 234 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
235 return copy_user_to_fxregs(buf); 235 return copy_user_to_fxregs(buf);
236 } else { 236 } else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 769603abae63..d6f0be9c2f5a 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -72,7 +72,7 @@ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
72 /* 72 /*
73 * So we use FLS here to be able to print the most advanced 73 * So we use FLS here to be able to print the most advanced
74 * feature that was requested but is missing. So if a driver 74 * feature that was requested but is missing. So if a driver
75 * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the 75 * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
76 * missing AVX feature - this is the most informative message 76 * missing AVX feature - this is the most informative message
77 * to users: 77 * to users:
78 */ 78 */
@@ -131,7 +131,7 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
131 /* 131 /*
132 * FP is in init state 132 * FP is in init state
133 */ 133 */
134 if (!(xfeatures & XSTATE_FP)) { 134 if (!(xfeatures & XFEATURE_MASK_FP)) {
135 fx->cwd = 0x37f; 135 fx->cwd = 0x37f;
136 fx->swd = 0; 136 fx->swd = 0;
137 fx->twd = 0; 137 fx->twd = 0;
@@ -144,7 +144,7 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
144 /* 144 /*
145 * SSE is in init state 145 * SSE is in init state
146 */ 146 */
147 if (!(xfeatures & XSTATE_SSE)) 147 if (!(xfeatures & XFEATURE_MASK_SSE))
148 memset(&fx->xmm_space[0], 0, 256); 148 memset(&fx->xmm_space[0], 0, 256);
149 149
150 /* 150 /*
@@ -223,14 +223,14 @@ static void __init print_xstate_feature(u64 xstate_mask)
223 */ 223 */
224static void __init print_xstate_features(void) 224static void __init print_xstate_features(void)
225{ 225{
226 print_xstate_feature(XSTATE_FP); 226 print_xstate_feature(XFEATURE_MASK_FP);
227 print_xstate_feature(XSTATE_SSE); 227 print_xstate_feature(XFEATURE_MASK_SSE);
228 print_xstate_feature(XSTATE_YMM); 228 print_xstate_feature(XFEATURE_MASK_YMM);
229 print_xstate_feature(XSTATE_BNDREGS); 229 print_xstate_feature(XFEATURE_MASK_BNDREGS);
230 print_xstate_feature(XSTATE_BNDCSR); 230 print_xstate_feature(XFEATURE_MASK_BNDCSR);
231 print_xstate_feature(XSTATE_OPMASK); 231 print_xstate_feature(XFEATURE_MASK_OPMASK);
232 print_xstate_feature(XSTATE_ZMM_Hi256); 232 print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
233 print_xstate_feature(XSTATE_Hi16_ZMM); 233 print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
234} 234}
235 235
236/* 236/*
@@ -365,7 +365,11 @@ static int init_xstate_size(void)
365 return 0; 365 return 0;
366} 366}
367 367
368void fpu__init_disable_system_xstate(void) 368/*
369 * We enabled the XSAVE hardware, but something went wrong and
370 * we can not use it. Disable it.
371 */
372static void fpu__init_disable_system_xstate(void)
369{ 373{
370 xfeatures_mask = 0; 374 xfeatures_mask = 0;
371 cr4_clear_bits(X86_CR4_OSXSAVE); 375 cr4_clear_bits(X86_CR4_OSXSAVE);
@@ -398,7 +402,7 @@ void __init fpu__init_system_xstate(void)
398 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); 402 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
399 xfeatures_mask = eax + ((u64)edx << 32); 403 xfeatures_mask = eax + ((u64)edx << 32);
400 404
401 if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { 405 if ((xfeatures_mask & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
402 pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask); 406 pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
403 BUG(); 407 BUG();
404 } 408 }
@@ -451,7 +455,7 @@ void fpu__resume_cpu(void)
451 * Inputs: 455 * Inputs:
452 * xstate: the thread's storage area for all FPU data 456 * xstate: the thread's storage area for all FPU data
453 * xstate_feature: state which is defined in xsave.h (e.g. 457 * xstate_feature: state which is defined in xsave.h (e.g.
454 * XSTATE_FP, XSTATE_SSE, etc...) 458 * XFEATURE_MASK_FP, XFEATURE_MASK_SSE, etc...)
455 * Output: 459 * Output:
456 * address of the state in the xsave area, or NULL if the 460 * address of the state in the xsave area, or NULL if the
457 * field is not present in the xsave buffer. 461 * field is not present in the xsave buffer.
@@ -502,8 +506,8 @@ EXPORT_SYMBOL_GPL(get_xsave_addr);
502 * Note that this only works on the current task. 506 * Note that this only works on the current task.
503 * 507 *
504 * Inputs: 508 * Inputs:
505 * @xsave_state: state which is defined in xsave.h (e.g. XSTATE_FP, 509 * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
506 * XSTATE_SSE, etc...) 510 * XFEATURE_MASK_SSE, etc...)
507 * Output: 511 * Output:
508 * address of the state in the xsave area or NULL if the state 512 * address of the state in the xsave area or NULL if the state
509 * is not present or is in its 'init state'. 513 * is not present or is in its 'init state'.
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 346eec73f7db..0cd2ac5c0f28 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
384 * which is all zeros which indicates MPX was not 384 * which is all zeros which indicates MPX was not
385 * responsible for the exception. 385 * responsible for the exception.
386 */ 386 */
387 bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); 387 bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
388 if (!bndcsr) 388 if (!bndcsr)
389 goto exit_trap; 389 goto exit_trap;
390 390
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2fbea2544f24..156441bcaac8 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -30,7 +30,7 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
30 int feature_bit = 0; 30 int feature_bit = 0;
31 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 31 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
32 32
33 xstate_bv &= XSTATE_EXTEND_MASK; 33 xstate_bv &= XFEATURE_MASK_EXTEND;
34 while (xstate_bv) { 34 while (xstate_bv) {
35 if (xstate_bv & 0x1) { 35 if (xstate_bv & 0x1) {
36 u32 eax, ebx, ecx, edx, offset; 36 u32 eax, ebx, ecx, edx, offset;
@@ -51,7 +51,7 @@ u64 kvm_supported_xcr0(void)
51 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; 51 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
52 52
53 if (!kvm_x86_ops->mpx_supported()) 53 if (!kvm_x86_ops->mpx_supported())
54 xcr0 &= ~(XSTATE_BNDREGS | XSTATE_BNDCSR); 54 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
55 55
56 return xcr0; 56 return xcr0;
57} 57}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a60bdbccff51..2d4e54db49af 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -662,9 +662,9 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
662 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 662 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
663 if (index != XCR_XFEATURE_ENABLED_MASK) 663 if (index != XCR_XFEATURE_ENABLED_MASK)
664 return 1; 664 return 1;
665 if (!(xcr0 & XSTATE_FP)) 665 if (!(xcr0 & XFEATURE_MASK_FP))
666 return 1; 666 return 1;
667 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) 667 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
668 return 1; 668 return 1;
669 669
670 /* 670 /*
@@ -672,23 +672,24 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
672 * saving. However, xcr0 bit 0 is always set, even if the 672 * saving. However, xcr0 bit 0 is always set, even if the
673 * emulated CPU does not support XSAVE (see fx_init). 673 * emulated CPU does not support XSAVE (see fx_init).
674 */ 674 */
675 valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; 675 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
676 if (xcr0 & ~valid_bits) 676 if (xcr0 & ~valid_bits)
677 return 1; 677 return 1;
678 678
679 if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) 679 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
680 (!(xcr0 & XFEATURE_MASK_BNDCSR)))
680 return 1; 681 return 1;
681 682
682 if (xcr0 & XSTATE_AVX512) { 683 if (xcr0 & XFEATURE_MASK_AVX512) {
683 if (!(xcr0 & XSTATE_YMM)) 684 if (!(xcr0 & XFEATURE_MASK_YMM))
684 return 1; 685 return 1;
685 if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512) 686 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
686 return 1; 687 return 1;
687 } 688 }
688 kvm_put_guest_xcr0(vcpu); 689 kvm_put_guest_xcr0(vcpu);
689 vcpu->arch.xcr0 = xcr0; 690 vcpu->arch.xcr0 = xcr0;
690 691
691 if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK) 692 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
692 kvm_update_cpuid(vcpu); 693 kvm_update_cpuid(vcpu);
693 return 0; 694 return 0;
694} 695}
@@ -2906,7 +2907,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
2906 * Copy each region from the possibly compacted offset to the 2907 * Copy each region from the possibly compacted offset to the
2907 * non-compacted offset. 2908 * non-compacted offset.
2908 */ 2909 */
2909 valid = xstate_bv & ~XSTATE_FPSSE; 2910 valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
2910 while (valid) { 2911 while (valid) {
2911 u64 feature = valid & -valid; 2912 u64 feature = valid & -valid;
2912 int index = fls64(feature) - 1; 2913 int index = fls64(feature) - 1;
@@ -2944,7 +2945,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
2944 * Copy each region from the non-compacted offset to the 2945 * Copy each region from the non-compacted offset to the
2945 * possibly compacted offset. 2946 * possibly compacted offset.
2946 */ 2947 */
2947 valid = xstate_bv & ~XSTATE_FPSSE; 2948 valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
2948 while (valid) { 2949 while (valid) {
2949 u64 feature = valid & -valid; 2950 u64 feature = valid & -valid;
2950 int index = fls64(feature) - 1; 2951 int index = fls64(feature) - 1;
@@ -2972,7 +2973,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2972 &vcpu->arch.guest_fpu.state.fxsave, 2973 &vcpu->arch.guest_fpu.state.fxsave,
2973 sizeof(struct fxregs_state)); 2974 sizeof(struct fxregs_state));
2974 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = 2975 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2975 XSTATE_FPSSE; 2976 XFEATURE_MASK_FPSSE;
2976 } 2977 }
2977} 2978}
2978 2979
@@ -2992,7 +2993,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2992 return -EINVAL; 2993 return -EINVAL;
2993 load_xsave(vcpu, (u8 *)guest_xsave->region); 2994 load_xsave(vcpu, (u8 *)guest_xsave->region);
2994 } else { 2995 } else {
2995 if (xstate_bv & ~XSTATE_FPSSE) 2996 if (xstate_bv & ~XFEATURE_MASK_FPSSE)
2996 return -EINVAL; 2997 return -EINVAL;
2997 memcpy(&vcpu->arch.guest_fpu.state.fxsave, 2998 memcpy(&vcpu->arch.guest_fpu.state.fxsave,
2998 guest_xsave->region, sizeof(struct fxregs_state)); 2999 guest_xsave->region, sizeof(struct fxregs_state));
@@ -7001,7 +7002,7 @@ static void fx_init(struct kvm_vcpu *vcpu)
7001 /* 7002 /*
7002 * Ensure guest xcr0 is valid for loading 7003 * Ensure guest xcr0 is valid for loading
7003 */ 7004 */
7004 vcpu->arch.xcr0 = XSTATE_FP; 7005 vcpu->arch.xcr0 = XFEATURE_MASK_FP;
7005 7006
7006 vcpu->arch.cr0 |= X86_CR0_ET; 7007 vcpu->arch.cr0 |= X86_CR0_ET;
7007} 7008}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 2f822cd886c2..f2afa5fe48a6 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -180,9 +180,9 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
180bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 180bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
181 int page_num); 181 int page_num);
182 182
183#define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \ 183#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
184 | XSTATE_BNDREGS | XSTATE_BNDCSR \ 184 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
185 | XSTATE_AVX512) 185 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512)
186extern u64 host_xcr0; 186extern u64 host_xcr0;
187 187
188extern u64 kvm_supported_xcr0(void); 188extern u64 kvm_supported_xcr0(void);
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 134948b0926f..f35fc9c6ed50 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -258,7 +258,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
258 goto err_out; 258 goto err_out;
259 } 259 }
260 /* get bndregs field from current task's xsave area */ 260 /* get bndregs field from current task's xsave area */
261 bndregs = get_xsave_field_ptr(XSTATE_BNDREGS); 261 bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS);
262 if (!bndregs) { 262 if (!bndregs) {
263 err = -EINVAL; 263 err = -EINVAL;
264 goto err_out; 264 goto err_out;
@@ -315,7 +315,7 @@ static __user void *mpx_get_bounds_dir(void)
315 * The bounds directory pointer is stored in a register 315 * The bounds directory pointer is stored in a register
316 * only accessible if we first do an xsave. 316 * only accessible if we first do an xsave.
317 */ 317 */
318 bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); 318 bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
319 if (!bndcsr) 319 if (!bndcsr)
320 return MPX_INVALID_BOUNDS_DIR; 320 return MPX_INVALID_BOUNDS_DIR;
321 321
@@ -492,7 +492,7 @@ static int do_mpx_bt_fault(void)
492 const struct bndcsr *bndcsr; 492 const struct bndcsr *bndcsr;
493 struct mm_struct *mm = current->mm; 493 struct mm_struct *mm = current->mm;
494 494
495 bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); 495 bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
496 if (!bndcsr) 496 if (!bndcsr)
497 return -EINVAL; 497 return -EINVAL;
498 /* 498 /*