aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2010-07-19 19:05:49 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-07-19 20:51:30 -0400
commit29104e101d710dd152f807978884643a52eca8b7 (patch)
tree03ab625528d91ad13d60a817c7885591fea197b3 /arch/x86/include
parenta1488f8bf4d72ad724700f6e982469a1240e4264 (diff)
x86, xsave: Sync xsave memory layout with its header for user handling
With xsaveopt, if a processor implementation discern that a processor state component is in its initialized state it may modify the corresponding bit in the xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory layout. Hence wHile presenting the xstate information to the user, we always ensure that the memory layout of a feature will be in the init state if the corresponding header bit is zero. This ensures the consistency and avoids the condition of the user seeing some some stale state in the memory layout during signal handling, debugging etc. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> LKML-Reference: <20100719230205.351459480@sbs-t61.sc.intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/i387.h14
-rw-r--r--arch/x86/include/asm/xsave.h10
2 files changed, 24 insertions, 0 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index c991b3a7b904..bb370fd0a1c2 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -58,11 +58,25 @@ extern int restore_i387_xstate_ia32(void __user *buf);
58 58
59#define X87_FSW_ES (1 << 7) /* Exception Summary */ 59#define X87_FSW_ES (1 << 7) /* Exception Summary */
60 60
61static __always_inline __pure bool use_xsaveopt(void)
62{
63 return 0;
64}
65
61static __always_inline __pure bool use_xsave(void) 66static __always_inline __pure bool use_xsave(void)
62{ 67{
63 return static_cpu_has(X86_FEATURE_XSAVE); 68 return static_cpu_has(X86_FEATURE_XSAVE);
64} 69}
65 70
71extern void __sanitize_i387_state(struct task_struct *);
72
73static inline void sanitize_i387_state(struct task_struct *tsk)
74{
75 if (!use_xsaveopt())
76 return;
77 __sanitize_i387_state(tsk);
78}
79
66#ifdef CONFIG_X86_64 80#ifdef CONFIG_X86_64
67 81
68/* Ignore delayed exceptions from user space */ 82/* Ignore delayed exceptions from user space */
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 2c4390cae228..0c72adc0cb15 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -111,6 +111,16 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
111 : "memory"); 111 : "memory");
112} 112}
113 113
114static inline void xsave_state(struct xsave_struct *fx, u64 mask)
115{
116 u32 lmask = mask;
117 u32 hmask = mask >> 32;
118
119 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
120 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
121 : "memory");
122}
123
114static inline void fpu_xsave(struct fpu *fpu) 124static inline void fpu_xsave(struct fpu *fpu)
115{ 125{
116 /* This, however, we can work around by forcing the compiler to select 126 /* This, however, we can work around by forcing the compiler to select