aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2010-07-19 19:05:52 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-07-19 20:52:24 -0400
commit6bad06b768920e278c7cedfdda56a0b4c6a35ee9 (patch)
treed84dd7d1f0f9afcf4880cea734bf3a32e8e9804c /arch/x86
parent29104e101d710dd152f807978884643a52eca8b7 (diff)
x86, xsave: Use xsaveopt in context-switch path when supported
xsaveopt is a more optimized form of xsave specifically designed for the context switch usage. xsaveopt doesn't save the state that's not modified from the prior xrstor. And if a specific feature state gets modified to the init state, then xsaveopt just updates the header bit in the xsave memory layout without updating the corresponding memory layout. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> LKML-Reference: <20100719230205.604014179@sbs-t61.sc.intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/i387.h2
-rw-r--r--arch/x86/include/asm/xsave.h9
-rw-r--r--arch/x86/kernel/cpu/common.c8
3 files changed, 15 insertions, 4 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index bb370fd0a1c2..59bd93ac7fef 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -60,7 +60,7 @@ extern int restore_i387_xstate_ia32(void __user *buf);
60 60
61static __always_inline __pure bool use_xsaveopt(void) 61static __always_inline __pure bool use_xsaveopt(void)
62{ 62{
63 return 0; 63 return static_cpu_has(X86_FEATURE_XSAVEOPT);
64} 64}
65 65
66static __always_inline __pure bool use_xsave(void) 66static __always_inline __pure bool use_xsave(void)
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 0c72adc0cb15..ec86c5fd6a6e 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -125,8 +125,11 @@ static inline void fpu_xsave(struct fpu *fpu)
125{ 125{
126 /* This, however, we can work around by forcing the compiler to select 126 /* This, however, we can work around by forcing the compiler to select
127 an addressing mode that doesn't require extended registers. */ 127 an addressing mode that doesn't require extended registers. */
128 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" 128 alternative_input(
129 : : "D" (&(fpu->state->xsave)), 129 ".byte " REX_PREFIX "0x0f,0xae,0x27",
130 "a" (-1), "d"(-1) : "memory"); 130 ".byte " REX_PREFIX "0x0f,0xae,0x37",
131 X86_FEATURE_XSAVEOPT,
132 [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) :
133 "memory");
131} 134}
132#endif 135#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c7358303d8cd..3f715efc594d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -140,10 +140,18 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
140static int __init x86_xsave_setup(char *s) 140static int __init x86_xsave_setup(char *s)
141{ 141{
142 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 142 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
143 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
143 return 1; 144 return 1;
144} 145}
145__setup("noxsave", x86_xsave_setup); 146__setup("noxsave", x86_xsave_setup);
146 147
148static int __init x86_xsaveopt_setup(char *s)
149{
150 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
151 return 1;
152}
153__setup("noxsaveopt", x86_xsaveopt_setup);
154
147#ifdef CONFIG_X86_32 155#ifdef CONFIG_X86_32
148static int cachesize_override __cpuinitdata = -1; 156static int cachesize_override __cpuinitdata = -1;
149static int disable_x86_serial_nr __cpuinitdata = 1; 157static int disable_x86_serial_nr __cpuinitdata = 1;