aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/xsave.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/xsave.h')
-rw-r--r--arch/x86/include/asm/xsave.h223
1 files changed, 167 insertions, 56 deletions
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index d949ef28c48b..7e7a79ada658 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -52,24 +52,170 @@ extern void xsave_init(void);
52extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); 52extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
53extern int init_fpu(struct task_struct *child); 53extern int init_fpu(struct task_struct *child);
54 54
55static inline int fpu_xrstor_checking(struct xsave_struct *fx) 55/* These macros all use (%edi)/(%rdi) as the single memory argument. */
56#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
57#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
58#define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
59#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
60#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
61
62#define xstate_fault ".section .fixup,\"ax\"\n" \
63 "3: movl $-1,%[err]\n" \
64 " jmp 2b\n" \
65 ".previous\n" \
66 _ASM_EXTABLE(1b, 3b) \
67 : [err] "=r" (err)
68
69/*
70 * This function is called only during boot time when x86 caps are not set
71 * up and alternative can not be used yet.
72 */
73static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
56{ 74{
57 int err; 75 u32 lmask = mask;
76 u32 hmask = mask >> 32;
77 int err = 0;
78
79 WARN_ON(system_state != SYSTEM_BOOTING);
80
81 if (boot_cpu_has(X86_FEATURE_XSAVES))
82 asm volatile("1:"XSAVES"\n\t"
83 "2:\n\t"
84 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
85 : "memory");
86 else
87 asm volatile("1:"XSAVE"\n\t"
88 "2:\n\t"
89 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
90 : "memory");
91
92 asm volatile(xstate_fault
93 : "0" (0)
94 : "memory");
95
96 return err;
97}
98
99/*
100 * This function is called only during boot time when x86 caps are not set
101 * up and alternative can not be used yet.
102 */
103static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
104{
105 u32 lmask = mask;
106 u32 hmask = mask >> 32;
107 int err = 0;
108
109 WARN_ON(system_state != SYSTEM_BOOTING);
58 110
59 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 111 if (boot_cpu_has(X86_FEATURE_XSAVES))
60 "2:\n" 112 asm volatile("1:"XRSTORS"\n\t"
61 ".section .fixup,\"ax\"\n" 113 "2:\n\t"
62 "3: movl $-1,%[err]\n" 114 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
63 " jmp 2b\n" 115 : "memory");
64 ".previous\n" 116 else
65 _ASM_EXTABLE(1b, 3b) 117 asm volatile("1:"XRSTOR"\n\t"
66 : [err] "=r" (err) 118 "2:\n\t"
67 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) 119 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
120 : "memory");
121
122 asm volatile(xstate_fault
123 : "0" (0)
124 : "memory");
125
126 return err;
127}
128
129/*
130 * Save processor xstate to xsave area.
131 */
132static inline int xsave_state(struct xsave_struct *fx, u64 mask)
133{
134 u32 lmask = mask;
135 u32 hmask = mask >> 32;
136 int err = 0;
137
138 /*
139 * If xsaves is enabled, xsaves replaces xsaveopt because
140 * it supports compact format and supervisor states in addition to
141 * modified optimization in xsaveopt.
142 *
143 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
144 * because xsaveopt supports modified optimization which is not
145 * supported by xsave.
146 *
147 * If none of xsaves and xsaveopt is enabled, use xsave.
148 */
149 alternative_input_2(
150 "1:"XSAVE,
151 "1:"XSAVEOPT,
152 X86_FEATURE_XSAVEOPT,
153 "1:"XSAVES,
154 X86_FEATURE_XSAVES,
155 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
156 "memory");
157 asm volatile("2:\n\t"
158 xstate_fault
159 : "0" (0)
68 : "memory"); 160 : "memory");
69 161
70 return err; 162 return err;
71} 163}
72 164
165/*
166 * Restore processor xstate from xsave area.
167 */
168static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
169{
170 int err = 0;
171 u32 lmask = mask;
172 u32 hmask = mask >> 32;
173
174 /*
175 * Use xrstors to restore context if it is enabled. xrstors supports
176 * compacted format of xsave area which is not supported by xrstor.
177 */
178 alternative_input(
179 "1: " XRSTOR,
180 "1: " XRSTORS,
181 X86_FEATURE_XSAVES,
182 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
183 : "memory");
184
185 asm volatile("2:\n"
186 xstate_fault
187 : "0" (0)
188 : "memory");
189
190 return err;
191}
192
193/*
194 * Save xstate context for old process during context switch.
195 */
196static inline void fpu_xsave(struct fpu *fpu)
197{
198 xsave_state(&fpu->state->xsave, -1);
199}
200
201/*
202 * Restore xstate context for new process during context switch.
203 */
204static inline int fpu_xrstor_checking(struct xsave_struct *fx)
205{
206 return xrstor_state(fx, -1);
207}
208
209/*
210 * Save xstate to user space xsave area.
211 *
212 * We don't use modified optimization because xrstor/xrstors might track
213 * a different application.
214 *
215 * We don't use compacted format xsave area for
216 * backward compatibility for old applications which don't understand
217 * compacted format of xsave area.
218 */
73static inline int xsave_user(struct xsave_struct __user *buf) 219static inline int xsave_user(struct xsave_struct __user *buf)
74{ 220{
75 int err; 221 int err;
@@ -83,69 +229,34 @@ static inline int xsave_user(struct xsave_struct __user *buf)
83 return -EFAULT; 229 return -EFAULT;
84 230
85 __asm__ __volatile__(ASM_STAC "\n" 231 __asm__ __volatile__(ASM_STAC "\n"
86 "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" 232 "1:"XSAVE"\n"
87 "2: " ASM_CLAC "\n" 233 "2: " ASM_CLAC "\n"
88 ".section .fixup,\"ax\"\n" 234 xstate_fault
89 "3: movl $-1,%[err]\n"
90 " jmp 2b\n"
91 ".previous\n"
92 _ASM_EXTABLE(1b,3b)
93 : [err] "=r" (err)
94 : "D" (buf), "a" (-1), "d" (-1), "0" (0) 235 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
95 : "memory"); 236 : "memory");
96 return err; 237 return err;
97} 238}
98 239
240/*
241 * Restore xstate from user space xsave area.
242 */
99static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) 243static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
100{ 244{
101 int err; 245 int err = 0;
102 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); 246 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
103 u32 lmask = mask; 247 u32 lmask = mask;
104 u32 hmask = mask >> 32; 248 u32 hmask = mask >> 32;
105 249
106 __asm__ __volatile__(ASM_STAC "\n" 250 __asm__ __volatile__(ASM_STAC "\n"
107 "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" 251 "1:"XRSTOR"\n"
108 "2: " ASM_CLAC "\n" 252 "2: " ASM_CLAC "\n"
109 ".section .fixup,\"ax\"\n" 253 xstate_fault
110 "3: movl $-1,%[err]\n"
111 " jmp 2b\n"
112 ".previous\n"
113 _ASM_EXTABLE(1b,3b)
114 : [err] "=r" (err)
115 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) 254 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
116 : "memory"); /* memory required? */ 255 : "memory"); /* memory required? */
117 return err; 256 return err;
118} 257}
119 258
120static inline void xrstor_state(struct xsave_struct *fx, u64 mask) 259void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
121{ 260void setup_xstate_comp(void);
122 u32 lmask = mask;
123 u32 hmask = mask >> 32;
124
125 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
126 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
127 : "memory");
128}
129
130static inline void xsave_state(struct xsave_struct *fx, u64 mask)
131{
132 u32 lmask = mask;
133 u32 hmask = mask >> 32;
134 261
135 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
136 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
137 : "memory");
138}
139
140static inline void fpu_xsave(struct fpu *fpu)
141{
142 /* This, however, we can work around by forcing the compiler to select
143 an addressing mode that doesn't require extended registers. */
144 alternative_input(
145 ".byte " REX_PREFIX "0x0f,0xae,0x27",
146 ".byte " REX_PREFIX "0x0f,0xae,0x37",
147 X86_FEATURE_XSAVEOPT,
148 [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) :
149 "memory");
150}
151#endif 262#endif