aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2014-05-29 14:12:36 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-05-29 17:31:21 -0400
commitf31a9f7c71691569359fa7fb8b0acaa44bce0324 (patch)
treed645ef9b40bb233a177bcebf9be1cb9ef77823d6 /arch
parentb84e70552e5aad71a1c14536e6ffcfe7934b73e4 (diff)
x86/xsaves: Use xsaves/xrstors to save and restore xsave area
If xsaves is eanbled, use xsaves/xrstors instrucitons to save and restore xstate. xsaves and xrstors support compacted format, init optimization, modified optimization, and supervisor states. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Link: http://lkml.kernel.org/r/1401387164-43416-9-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/xsave.h84
1 files changed, 64 insertions, 20 deletions
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 76c2459188c8..f9177a2a97e9 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -65,6 +65,70 @@ extern int init_fpu(struct task_struct *child);
65 _ASM_EXTABLE(1b, 3b) \ 65 _ASM_EXTABLE(1b, 3b) \
66 : [err] "=r" (err) 66 : [err] "=r" (err)
67 67
68/*
69 * Save processor xstate to xsave area.
70 */
71static inline int xsave_state(struct xsave_struct *fx, u64 mask)
72{
73 u32 lmask = mask;
74 u32 hmask = mask >> 32;
75 int err = 0;
76
77 /*
78 * If xsaves is enabled, xsaves replaces xsaveopt because
79 * it supports compact format and supervisor states in addition to
80 * modified optimization in xsaveopt.
81 *
82 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
83 * because xsaveopt supports modified optimization which is not
84 * supported by xsave.
85 *
86 * If none of xsaves and xsaveopt is enabled, use xsave.
87 */
88 alternative_input_2(
89 "1:"XSAVE,
90 "1:"XSAVEOPT,
91 X86_FEATURE_XSAVEOPT,
92 "1:"XSAVES,
93 X86_FEATURE_XSAVES,
94 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
95 "memory");
96 asm volatile("2:\n\t"
97 xstate_fault
98 : "0" (0)
99 : "memory");
100
101 return err;
102}
103
104/*
105 * Restore processor xstate from xsave area.
106 */
107static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
108{
109 int err = 0;
110 u32 lmask = mask;
111 u32 hmask = mask >> 32;
112
113 /*
114 * Use xrstors to restore context if it is enabled. xrstors supports
115 * compacted format of xsave area which is not supported by xrstor.
116 */
117 alternative_input(
118 "1: " XRSTOR,
119 "1: " XRSTORS,
120 X86_FEATURE_XSAVES,
121 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
122 : "memory");
123
124 asm volatile("2:\n"
125 xstate_fault
126 : "0" (0)
127 : "memory");
128
129 return err;
130}
131
68static inline int fpu_xrstor_checking(struct xsave_struct *fx) 132static inline int fpu_xrstor_checking(struct xsave_struct *fx)
69{ 133{
70 int err; 134 int err;
@@ -130,26 +194,6 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
130 return err; 194 return err;
131} 195}
132 196
133static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
134{
135 u32 lmask = mask;
136 u32 hmask = mask >> 32;
137
138 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
139 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
140 : "memory");
141}
142
143static inline void xsave_state(struct xsave_struct *fx, u64 mask)
144{
145 u32 lmask = mask;
146 u32 hmask = mask >> 32;
147
148 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
149 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
150 : "memory");
151}
152
153static inline void fpu_xsave(struct fpu *fpu) 197static inline void fpu_xsave(struct fpu *fpu)
154{ 198{
155 /* This, however, we can work around by forcing the compiler to select 199 /* This, however, we can work around by forcing the compiler to select