diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-11 19:56:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-11 19:56:38 -0500 |
commit | 6896d9f7e7ee98d772224a539b7581a1e6dd6b2c (patch) | |
tree | a9c491e55e36437fbe1d358f7cd5f6db222fd1d6 | |
parent | 671d5532aaad777782b66eff71bc4dfad25f942d (diff) | |
parent | e49a449b869afb2b8bf282427c8355bc3a2fad56 (diff) |
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fpu updates from Ingo Molnar:
"This cleans up the FPU fault handling methods to be more robust, and
moves eligible variables to .init.data"
* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/fpu: Put a few variables in .init.data
x86/fpu: Get rid of xstate_fault()
x86/fpu: Add an XSTATE_OP() macro
-rw-r--r-- | arch/x86/include/asm/fpu/internal.h | 173 | ||||
-rw-r--r-- | arch/x86/kernel/fpu/init.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/fpu/xstate.c | 4 |
3 files changed, 87 insertions, 94 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 3c3550c3a4a3..eadcdd5bb946 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h | |||
@@ -224,18 +224,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) | |||
224 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" | 224 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" |
225 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" | 225 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" |
226 | 226 | ||
227 | /* xstate instruction fault handler: */ | 227 | #define XSTATE_OP(op, st, lmask, hmask, err) \ |
228 | #define xstate_fault(__err) \ | 228 | asm volatile("1:" op "\n\t" \ |
229 | \ | 229 | "xor %[err], %[err]\n" \ |
230 | ".section .fixup,\"ax\"\n" \ | 230 | "2:\n\t" \ |
231 | \ | 231 | ".pushsection .fixup,\"ax\"\n\t" \ |
232 | "3: movl $-2,%[_err]\n" \ | 232 | "3: movl $-2,%[err]\n\t" \ |
233 | " jmp 2b\n" \ | 233 | "jmp 2b\n\t" \ |
234 | \ | 234 | ".popsection\n\t" \ |
235 | ".previous\n" \ | 235 | _ASM_EXTABLE(1b, 3b) \ |
236 | \ | 236 | : [err] "=r" (err) \ |
237 | _ASM_EXTABLE(1b, 3b) \ | 237 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
238 | : [_err] "=r" (__err) | 238 | : "memory") |
239 | |||
240 | /* | ||
241 | * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact | ||
242 | * format and supervisor states in addition to modified optimization in | ||
243 | * XSAVEOPT. | ||
244 | * | ||
245 | * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT | ||
246 | * supports modified optimization which is not supported by XSAVE. | ||
247 | * | ||
248 | * We use XSAVE as a fallback. | ||
249 | * | ||
250 | * The 661 label is defined in the ALTERNATIVE* macros as the address of the | ||
251 | * original instruction which gets replaced. We need to use it here as the | ||
252 | * address of the instruction where we might get an exception at. | ||
253 | */ | ||
254 | #define XSTATE_XSAVE(st, lmask, hmask, err) \ | ||
255 | asm volatile(ALTERNATIVE_2(XSAVE, \ | ||
256 | XSAVEOPT, X86_FEATURE_XSAVEOPT, \ | ||
257 | XSAVES, X86_FEATURE_XSAVES) \ | ||
258 | "\n" \ | ||
259 | "xor %[err], %[err]\n" \ | ||
260 | "3:\n" \ | ||
261 | ".pushsection .fixup,\"ax\"\n" \ | ||
262 | "4: movl $-2, %[err]\n" \ | ||
263 | "jmp 3b\n" \ | ||
264 | ".popsection\n" \ | ||
265 | _ASM_EXTABLE(661b, 4b) \ | ||
266 | : [err] "=r" (err) \ | ||
267 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | ||
268 | : "memory") | ||
269 | |||
270 | /* | ||
271 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact | ||
272 | * XSAVE area format. | ||
273 | */ | ||
274 | #define XSTATE_XRESTORE(st, lmask, hmask, err) \ | ||
275 | asm volatile(ALTERNATIVE(XRSTOR, \ | ||
276 | XRSTORS, X86_FEATURE_XSAVES) \ | ||
277 | "\n" \ | ||
278 | "xor %[err], %[err]\n" \ | ||
279 | "3:\n" \ | ||
280 | ".pushsection .fixup,\"ax\"\n" \ | ||
281 | "4: movl $-2, %[err]\n" \ | ||
282 | "jmp 3b\n" \ | ||
283 | ".popsection\n" \ | ||
284 | _ASM_EXTABLE(661b, 4b) \ | ||
285 | : [err] "=r" (err) \ | ||
286 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | ||
287 | : "memory") | ||
239 | 288 | ||
240 | /* | 289 | /* |
241 | * This function is called only during boot time when x86 caps are not set | 290 | * This function is called only during boot time when x86 caps are not set |
@@ -246,22 +295,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) | |||
246 | u64 mask = -1; | 295 | u64 mask = -1; |
247 | u32 lmask = mask; | 296 | u32 lmask = mask; |
248 | u32 hmask = mask >> 32; | 297 | u32 hmask = mask >> 32; |
249 | int err = 0; | 298 | int err; |
250 | 299 | ||
251 | WARN_ON(system_state != SYSTEM_BOOTING); | 300 | WARN_ON(system_state != SYSTEM_BOOTING); |
252 | 301 | ||
253 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 302 | if (static_cpu_has_safe(X86_FEATURE_XSAVES)) |
254 | asm volatile("1:"XSAVES"\n\t" | 303 | XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
255 | "2:\n\t" | ||
256 | xstate_fault(err) | ||
257 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
258 | : "memory"); | ||
259 | else | 304 | else |
260 | asm volatile("1:"XSAVE"\n\t" | 305 | XSTATE_OP(XSAVE, xstate, lmask, hmask, err); |
261 | "2:\n\t" | ||
262 | xstate_fault(err) | ||
263 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
264 | : "memory"); | ||
265 | 306 | ||
266 | /* We should never fault when copying to a kernel buffer: */ | 307 | /* We should never fault when copying to a kernel buffer: */ |
267 | WARN_ON_FPU(err); | 308 | WARN_ON_FPU(err); |
@@ -276,22 +317,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) | |||
276 | u64 mask = -1; | 317 | u64 mask = -1; |
277 | u32 lmask = mask; | 318 | u32 lmask = mask; |
278 | u32 hmask = mask >> 32; | 319 | u32 hmask = mask >> 32; |
279 | int err = 0; | 320 | int err; |
280 | 321 | ||
281 | WARN_ON(system_state != SYSTEM_BOOTING); | 322 | WARN_ON(system_state != SYSTEM_BOOTING); |
282 | 323 | ||
283 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 324 | if (static_cpu_has_safe(X86_FEATURE_XSAVES)) |
284 | asm volatile("1:"XRSTORS"\n\t" | 325 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
285 | "2:\n\t" | ||
286 | xstate_fault(err) | ||
287 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
288 | : "memory"); | ||
289 | else | 326 | else |
290 | asm volatile("1:"XRSTOR"\n\t" | 327 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
291 | "2:\n\t" | ||
292 | xstate_fault(err) | ||
293 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
294 | : "memory"); | ||
295 | 328 | ||
296 | /* We should never fault when copying from a kernel buffer: */ | 329 | /* We should never fault when copying from a kernel buffer: */ |
297 | WARN_ON_FPU(err); | 330 | WARN_ON_FPU(err); |
@@ -305,33 +338,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate) | |||
305 | u64 mask = -1; | 338 | u64 mask = -1; |
306 | u32 lmask = mask; | 339 | u32 lmask = mask; |
307 | u32 hmask = mask >> 32; | 340 | u32 hmask = mask >> 32; |
308 | int err = 0; | 341 | int err; |
309 | 342 | ||
310 | WARN_ON(!alternatives_patched); | 343 | WARN_ON(!alternatives_patched); |
311 | 344 | ||
312 | /* | 345 | XSTATE_XSAVE(xstate, lmask, hmask, err); |
313 | * If xsaves is enabled, xsaves replaces xsaveopt because | ||
314 | * it supports compact format and supervisor states in addition to | ||
315 | * modified optimization in xsaveopt. | ||
316 | * | ||
317 | * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave | ||
318 | * because xsaveopt supports modified optimization which is not | ||
319 | * supported by xsave. | ||
320 | * | ||
321 | * If none of xsaves and xsaveopt is enabled, use xsave. | ||
322 | */ | ||
323 | alternative_input_2( | ||
324 | "1:"XSAVE, | ||
325 | XSAVEOPT, | ||
326 | X86_FEATURE_XSAVEOPT, | ||
327 | XSAVES, | ||
328 | X86_FEATURE_XSAVES, | ||
329 | [xstate] "D" (xstate), "a" (lmask), "d" (hmask) : | ||
330 | "memory"); | ||
331 | asm volatile("2:\n\t" | ||
332 | xstate_fault(err) | ||
333 | : "0" (err) | ||
334 | : "memory"); | ||
335 | 346 | ||
336 | /* We should never fault when copying to a kernel buffer: */ | 347 | /* We should never fault when copying to a kernel buffer: */ |
337 | WARN_ON_FPU(err); | 348 | WARN_ON_FPU(err); |
@@ -344,23 +355,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) | |||
344 | { | 355 | { |
345 | u32 lmask = mask; | 356 | u32 lmask = mask; |
346 | u32 hmask = mask >> 32; | 357 | u32 hmask = mask >> 32; |
347 | int err = 0; | 358 | int err; |
348 | 359 | ||
349 | /* | 360 | XSTATE_XRESTORE(xstate, lmask, hmask, err); |
350 | * Use xrstors to restore context if it is enabled. xrstors supports | ||
351 | * compacted format of xsave area which is not supported by xrstor. | ||
352 | */ | ||
353 | alternative_input( | ||
354 | "1: " XRSTOR, | ||
355 | XRSTORS, | ||
356 | X86_FEATURE_XSAVES, | ||
357 | "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask) | ||
358 | : "memory"); | ||
359 | |||
360 | asm volatile("2:\n" | ||
361 | xstate_fault(err) | ||
362 | : "0" (err) | ||
363 | : "memory"); | ||
364 | 361 | ||
365 | /* We should never fault when copying from a kernel buffer: */ | 362 | /* We should never fault when copying from a kernel buffer: */ |
366 | WARN_ON_FPU(err); | 363 | WARN_ON_FPU(err); |
@@ -388,12 +385,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf) | |||
388 | if (unlikely(err)) | 385 | if (unlikely(err)) |
389 | return -EFAULT; | 386 | return -EFAULT; |
390 | 387 | ||
391 | __asm__ __volatile__(ASM_STAC "\n" | 388 | stac(); |
392 | "1:"XSAVE"\n" | 389 | XSTATE_OP(XSAVE, buf, -1, -1, err); |
393 | "2: " ASM_CLAC "\n" | 390 | clac(); |
394 | xstate_fault(err) | 391 | |
395 | : "D" (buf), "a" (-1), "d" (-1), "0" (err) | ||
396 | : "memory"); | ||
397 | return err; | 392 | return err; |
398 | } | 393 | } |
399 | 394 | ||
@@ -405,14 +400,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) | |||
405 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); | 400 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); |
406 | u32 lmask = mask; | 401 | u32 lmask = mask; |
407 | u32 hmask = mask >> 32; | 402 | u32 hmask = mask >> 32; |
408 | int err = 0; | 403 | int err; |
409 | 404 | ||
410 | __asm__ __volatile__(ASM_STAC "\n" | 405 | stac(); |
411 | "1:"XRSTOR"\n" | 406 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
412 | "2: " ASM_CLAC "\n" | 407 | clac(); |
413 | xstate_fault(err) | 408 | |
414 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
415 | : "memory"); /* memory required? */ | ||
416 | return err; | 409 | return err; |
417 | } | 410 | } |
418 | 411 | ||
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 0d4e092ae1bf..7b2978ab30df 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c | |||
@@ -197,7 +197,7 @@ static void __init fpu__init_task_struct_size(void) | |||
197 | */ | 197 | */ |
198 | static void __init fpu__init_system_xstate_size_legacy(void) | 198 | static void __init fpu__init_system_xstate_size_legacy(void) |
199 | { | 199 | { |
200 | static int on_boot_cpu = 1; | 200 | static int on_boot_cpu __initdata = 1; |
201 | 201 | ||
202 | WARN_ON_FPU(!on_boot_cpu); | 202 | WARN_ON_FPU(!on_boot_cpu); |
203 | on_boot_cpu = 0; | 203 | on_boot_cpu = 0; |
@@ -287,7 +287,7 @@ __setup("eagerfpu=", eager_fpu_setup); | |||
287 | */ | 287 | */ |
288 | static void __init fpu__init_system_ctx_switch(void) | 288 | static void __init fpu__init_system_ctx_switch(void) |
289 | { | 289 | { |
290 | static bool on_boot_cpu = 1; | 290 | static bool on_boot_cpu __initdata = 1; |
291 | 291 | ||
292 | WARN_ON_FPU(!on_boot_cpu); | 292 | WARN_ON_FPU(!on_boot_cpu); |
293 | on_boot_cpu = 0; | 293 | on_boot_cpu = 0; |
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 70fc312221fc..40f100285984 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c | |||
@@ -297,7 +297,7 @@ static void __init setup_xstate_comp(void) | |||
297 | */ | 297 | */ |
298 | static void __init setup_init_fpu_buf(void) | 298 | static void __init setup_init_fpu_buf(void) |
299 | { | 299 | { |
300 | static int on_boot_cpu = 1; | 300 | static int on_boot_cpu __initdata = 1; |
301 | 301 | ||
302 | WARN_ON_FPU(!on_boot_cpu); | 302 | WARN_ON_FPU(!on_boot_cpu); |
303 | on_boot_cpu = 0; | 303 | on_boot_cpu = 0; |
@@ -608,7 +608,7 @@ static void fpu__init_disable_system_xstate(void) | |||
608 | void __init fpu__init_system_xstate(void) | 608 | void __init fpu__init_system_xstate(void) |
609 | { | 609 | { |
610 | unsigned int eax, ebx, ecx, edx; | 610 | unsigned int eax, ebx, ecx, edx; |
611 | static int on_boot_cpu = 1; | 611 | static int on_boot_cpu __initdata = 1; |
612 | int err; | 612 | int err; |
613 | 613 | ||
614 | WARN_ON_FPU(!on_boot_cpu); | 614 | WARN_ON_FPU(!on_boot_cpu); |