aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2014-05-29 14:12:38 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-05-29 17:32:57 -0400
commitfacbf4d91ae64f84ef93a00e4037135cd9f4b2ab (patch)
treeb2f8cdf1e2ecfd04cf901421f03fd340581a3d1f /arch
parentf9de314b340f4816671f037e79ed01f685ac9787 (diff)
x86/xsaves: Use xsave/xrstor for saving and restoring user space context
We use legacy xsave/xrstor to save and restore standard form of xsave area in user space context. No xsaveopt or xsaves is used here for two reasons. First, we don't want to use modified optimization which is implemented in xsaveopt and xsaves because xrstor/xrstors might track a wrong user space application. Secondly, we don't use compacted format of xsave area for backward compatibility because legacy user space applications only don't understand the compacted format of the xsave area. Using standard form of the xsave area may allocate more memory for user context than compacted form, but preserves compatibility with legacy applications. Furthermore, even with holes, the relevant cache lines don't get touched and thus the performance impact is limited. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Link: http://lkml.kernel.org/r/1401387164-43416-11-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/xsave.h33
1 files changed, 18 insertions, 15 deletions
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 8b75824e41dd..0d1523146545 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -145,6 +145,16 @@ static inline int fpu_xrstor_checking(struct xsave_struct *fx)
145 return xrstor_state(fx, -1); 145 return xrstor_state(fx, -1);
146} 146}
147 147
148/*
149 * Save xstate to user space xsave area.
150 *
151 * We don't use modified optimization because xrstor/xrstors might track
152 * a different application.
153 *
154 * We don't use compacted format xsave area for
155 * backward compatibility for old applications which don't understand
156 * compacted format of xsave area.
157 */
148static inline int xsave_user(struct xsave_struct __user *buf) 158static inline int xsave_user(struct xsave_struct __user *buf)
149{ 159{
150 int err; 160 int err;
@@ -158,35 +168,28 @@ static inline int xsave_user(struct xsave_struct __user *buf)
158 return -EFAULT; 168 return -EFAULT;
159 169
160 __asm__ __volatile__(ASM_STAC "\n" 170 __asm__ __volatile__(ASM_STAC "\n"
161 "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" 171 "1:"XSAVE"\n"
162 "2: " ASM_CLAC "\n" 172 "2: " ASM_CLAC "\n"
163 ".section .fixup,\"ax\"\n" 173 xstate_fault
164 "3: movl $-1,%[err]\n"
165 " jmp 2b\n"
166 ".previous\n"
167 _ASM_EXTABLE(1b,3b)
168 : [err] "=r" (err)
169 : "D" (buf), "a" (-1), "d" (-1), "0" (0) 174 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
170 : "memory"); 175 : "memory");
171 return err; 176 return err;
172} 177}
173 178
179/*
180 * Restore xstate from user space xsave area.
181 */
174static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) 182static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
175{ 183{
176 int err; 184 int err = 0;
177 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); 185 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
178 u32 lmask = mask; 186 u32 lmask = mask;
179 u32 hmask = mask >> 32; 187 u32 hmask = mask >> 32;
180 188
181 __asm__ __volatile__(ASM_STAC "\n" 189 __asm__ __volatile__(ASM_STAC "\n"
182 "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" 190 "1:"XRSTOR"\n"
183 "2: " ASM_CLAC "\n" 191 "2: " ASM_CLAC "\n"
184 ".section .fixup,\"ax\"\n" 192 xstate_fault
185 "3: movl $-1,%[err]\n"
186 " jmp 2b\n"
187 ".previous\n"
188 _ASM_EXTABLE(1b,3b)
189 : [err] "=r" (err)
190 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) 193 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
191 : "memory"); /* memory required? */ 194 : "memory"); /* memory required? */
192 return err; 195 return err;