diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2010-07-19 19:05:52 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2010-07-19 20:52:24 -0400 |
commit | 6bad06b768920e278c7cedfdda56a0b4c6a35ee9 (patch) | |
tree | d84dd7d1f0f9afcf4880cea734bf3a32e8e9804c /arch/x86/include | |
parent | 29104e101d710dd152f807978884643a52eca8b7 (diff) |
x86, xsave: Use xsaveopt in context-switch path when supported
xsaveopt is a more optimized form of xsave specifically designed
for the context switch usage. xsaveopt doesn't save the state that's not
modified from the prior xrstor. And if a specific feature state gets
modified to the init state, then xsaveopt just updates the header bit
in the xsave memory layout without updating the corresponding memory
layout.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <20100719230205.604014179@sbs-t61.sc.intel.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/i387.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/xsave.h | 9 |
2 files changed, 7 insertions, 4 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index bb370fd0a1c2..59bd93ac7fef 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -60,7 +60,7 @@ extern int restore_i387_xstate_ia32(void __user *buf); | |||
60 | 60 | ||
61 | static __always_inline __pure bool use_xsaveopt(void) | 61 | static __always_inline __pure bool use_xsaveopt(void) |
62 | { | 62 | { |
63 | return 0; | 63 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
64 | } | 64 | } |
65 | 65 | ||
66 | static __always_inline __pure bool use_xsave(void) | 66 | static __always_inline __pure bool use_xsave(void) |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 0c72adc0cb15..ec86c5fd6a6e 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -125,8 +125,11 @@ static inline void fpu_xsave(struct fpu *fpu) | |||
125 | { | 125 | { |
126 | /* This, however, we can work around by forcing the compiler to select | 126 | /* This, however, we can work around by forcing the compiler to select |
127 | an addressing mode that doesn't require extended registers. */ | 127 | an addressing mode that doesn't require extended registers. */ |
128 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" | 128 | alternative_input( |
129 | : : "D" (&(fpu->state->xsave)), | 129 | ".byte " REX_PREFIX "0x0f,0xae,0x27", |
130 | "a" (-1), "d"(-1) : "memory"); | 130 | ".byte " REX_PREFIX "0x0f,0xae,0x37", |
131 | X86_FEATURE_XSAVEOPT, | ||
132 | [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) : | ||
133 | "memory"); | ||
131 | } | 134 | } |
132 | #endif | 135 | #endif |