aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/xsave.h
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-07-29 13:29:20 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-30 13:49:24 -0400
commitb359e8a434cc3d09847010fc4aeccf48d69740e4 (patch)
tree8911c299dc1768c78d5452a1e7e0efd2fc8d5abb /include/asm-x86/xsave.h
parentdc1e35c6e95e8923cf1d3510438b63c600fee1e2 (diff)
x86, xsave: context switch support using xsave/xrstor
Uses xsave/xrstor (instead of traditional fxsave/fxrstor) in context switch when available. Introduces TS_XSAVE flag, which determine the need to use xsave/xrstor instructions during context switch instead of the legacy fxsave/fxrstor instructions. Thread-synchronous status word is already in L1 cache during this code patch and thus minimizes the performance penality compared to (cpu_has_xsave) checks. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/xsave.h')
-rw-r--r--include/asm-x86/xsave.h35
1 files changed, 34 insertions, 1 deletions
diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h
index 6d70e62c6bdc..e835a917ee19 100644
--- a/include/asm-x86/xsave.h
+++ b/include/asm-x86/xsave.h
@@ -17,10 +17,43 @@
17#define XCNTXT_LMASK (XSTATE_FP | XSTATE_SSE) 17#define XCNTXT_LMASK (XSTATE_FP | XSTATE_SSE)
18#define XCNTXT_HMASK 0x0 18#define XCNTXT_HMASK 0x0
19 19
20#ifdef CONFIG_X86_64
21#define REX_PREFIX "0x48, "
22#else
23#define REX_PREFIX
24#endif
25
20extern unsigned int xstate_size, pcntxt_hmask, pcntxt_lmask; 26extern unsigned int xstate_size, pcntxt_hmask, pcntxt_lmask;
21extern struct xsave_struct *init_xstate_buf; 27extern struct xsave_struct *init_xstate_buf;
22 28
23extern void xsave_cntxt_init(void); 29extern void xsave_cntxt_init(void);
24extern void xsave_init(void); 30extern void xsave_init(void);
25 31extern int init_fpu(struct task_struct *child);
32
33static inline int xrstor_checking(struct xsave_struct *fx)
34{
35 int err;
36
37 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
38 "2:\n"
39 ".section .fixup,\"ax\"\n"
40 "3: movl $-1,%[err]\n"
41 " jmp 2b\n"
42 ".previous\n"
43 _ASM_EXTABLE(1b, 3b)
44 : [err] "=r" (err)
45 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
46 : "memory");
47
48 return err;
49}
50
51static inline void xsave(struct task_struct *tsk)
52{
53 /* This, however, we can work around by forcing the compiler to select
54 an addressing mode that doesn't require extended registers. */
55 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
56 : : "D" (&(tsk->thread.xstate->xsave)),
57 "a" (-1), "d"(-1) : "memory");
58}
26#endif 59#endif