aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/fpu/internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/fpu/internal.h')
-rw-r--r--arch/x86/include/asm/fpu/internal.h90
1 files changed, 22 insertions, 68 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 554cdb205d17..e3221ffa304e 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -23,11 +23,9 @@
23/* 23/*
24 * High level FPU state handling functions: 24 * High level FPU state handling functions:
25 */ 25 */
26extern void fpu__activate_curr(struct fpu *fpu); 26extern void fpu__initialize(struct fpu *fpu);
27extern void fpu__activate_fpstate_read(struct fpu *fpu); 27extern void fpu__prepare_read(struct fpu *fpu);
28extern void fpu__activate_fpstate_write(struct fpu *fpu); 28extern void fpu__prepare_write(struct fpu *fpu);
29extern void fpu__current_fpstate_write_begin(void);
30extern void fpu__current_fpstate_write_end(void);
31extern void fpu__save(struct fpu *fpu); 29extern void fpu__save(struct fpu *fpu);
32extern void fpu__restore(struct fpu *fpu); 30extern void fpu__restore(struct fpu *fpu);
33extern int fpu__restore_sig(void __user *buf, int ia32_frame); 31extern int fpu__restore_sig(void __user *buf, int ia32_frame);
@@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
120 err; \ 118 err; \
121}) 119})
122 120
123#define check_insn(insn, output, input...) \ 121#define kernel_insn(insn, output, input...) \
124({ \
125 int err; \
126 asm volatile("1:" #insn "\n\t" \ 122 asm volatile("1:" #insn "\n\t" \
127 "2:\n" \ 123 "2:\n" \
128 ".section .fixup,\"ax\"\n" \ 124 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
129 "3: movl $-1,%[err]\n" \ 125 : output : input)
130 " jmp 2b\n" \
131 ".previous\n" \
132 _ASM_EXTABLE(1b, 3b) \
133 : [err] "=r" (err), output \
134 : "0"(0), input); \
135 err; \
136})
137 126
138static inline int copy_fregs_to_user(struct fregs_state __user *fx) 127static inline int copy_fregs_to_user(struct fregs_state __user *fx)
139{ 128{
@@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
153 142
154static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) 143static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
155{ 144{
156 int err;
157
158 if (IS_ENABLED(CONFIG_X86_32)) { 145 if (IS_ENABLED(CONFIG_X86_32)) {
159 err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 146 kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
160 } else { 147 } else {
161 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { 148 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
162 err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 149 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
163 } else { 150 } else {
164 /* See comment in copy_fxregs_to_kernel() below. */ 151 /* See comment in copy_fxregs_to_kernel() below. */
165 err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); 152 kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
166 } 153 }
167 } 154 }
168 /* Copying from a kernel buffer to FPU registers should never fail: */
169 WARN_ON_FPU(err);
170} 155}
171 156
172static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) 157static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
@@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
183 168
184static inline void copy_kernel_to_fregs(struct fregs_state *fx) 169static inline void copy_kernel_to_fregs(struct fregs_state *fx)
185{ 170{
186 int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 171 kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
187
188 WARN_ON_FPU(err);
189} 172}
190 173
191static inline int copy_user_to_fregs(struct fregs_state __user *fx) 174static inline int copy_user_to_fregs(struct fregs_state __user *fx)
@@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
281 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact 264 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
282 * XSAVE area format. 265 * XSAVE area format.
283 */ 266 */
284#define XSTATE_XRESTORE(st, lmask, hmask, err) \ 267#define XSTATE_XRESTORE(st, lmask, hmask) \
285 asm volatile(ALTERNATIVE(XRSTOR, \ 268 asm volatile(ALTERNATIVE(XRSTOR, \
286 XRSTORS, X86_FEATURE_XSAVES) \ 269 XRSTORS, X86_FEATURE_XSAVES) \
287 "\n" \ 270 "\n" \
288 "xor %[err], %[err]\n" \
289 "3:\n" \ 271 "3:\n" \
290 ".pushsection .fixup,\"ax\"\n" \ 272 _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
291 "4: movl $-2, %[err]\n" \ 273 : \
292 "jmp 3b\n" \
293 ".popsection\n" \
294 _ASM_EXTABLE(661b, 4b) \
295 : [err] "=r" (err) \
296 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ 274 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
297 : "memory") 275 : "memory")
298 276
@@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
336 else 314 else
337 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 315 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
338 316
339 /* We should never fault when copying from a kernel buffer: */ 317 /*
318 * We should never fault when copying from a kernel buffer, and the FPU
319 * state we set at boot time should be valid.
320 */
340 WARN_ON_FPU(err); 321 WARN_ON_FPU(err);
341} 322}
342 323
@@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
350 u32 hmask = mask >> 32; 331 u32 hmask = mask >> 32;
351 int err; 332 int err;
352 333
353 WARN_ON(!alternatives_patched); 334 WARN_ON_FPU(!alternatives_patched);
354 335
355 XSTATE_XSAVE(xstate, lmask, hmask, err); 336 XSTATE_XSAVE(xstate, lmask, hmask, err);
356 337
@@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
365{ 346{
366 u32 lmask = mask; 347 u32 lmask = mask;
367 u32 hmask = mask >> 32; 348 u32 hmask = mask >> 32;
368 int err;
369
370 XSTATE_XRESTORE(xstate, lmask, hmask, err);
371 349
372 /* We should never fault when copying from a kernel buffer: */ 350 XSTATE_XRESTORE(xstate, lmask, hmask);
373 WARN_ON_FPU(err);
374} 351}
375 352
376/* 353/*
@@ -526,38 +503,17 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
526 */ 503 */
527static inline void fpregs_deactivate(struct fpu *fpu) 504static inline void fpregs_deactivate(struct fpu *fpu)
528{ 505{
529 WARN_ON_FPU(!fpu->fpregs_active);
530
531 fpu->fpregs_active = 0;
532 this_cpu_write(fpu_fpregs_owner_ctx, NULL); 506 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
533 trace_x86_fpu_regs_deactivated(fpu); 507 trace_x86_fpu_regs_deactivated(fpu);
534} 508}
535 509
536static inline void fpregs_activate(struct fpu *fpu) 510static inline void fpregs_activate(struct fpu *fpu)
537{ 511{
538 WARN_ON_FPU(fpu->fpregs_active);
539
540 fpu->fpregs_active = 1;
541 this_cpu_write(fpu_fpregs_owner_ctx, fpu); 512 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
542 trace_x86_fpu_regs_activated(fpu); 513 trace_x86_fpu_regs_activated(fpu);
543} 514}
544 515
545/* 516/*
546 * The question "does this thread have fpu access?"
547 * is slightly racy, since preemption could come in
548 * and revoke it immediately after the test.
549 *
550 * However, even in that very unlikely scenario,
551 * we can just assume we have FPU access - typically
552 * to save the FP state - we'll just take a #NM
553 * fault and get the FPU access back.
554 */
555static inline int fpregs_active(void)
556{
557 return current->thread.fpu.fpregs_active;
558}
559
560/*
561 * FPU state switching for scheduling. 517 * FPU state switching for scheduling.
562 * 518 *
563 * This is a two-stage process: 519 * This is a two-stage process:
@@ -571,14 +527,13 @@ static inline int fpregs_active(void)
571static inline void 527static inline void
572switch_fpu_prepare(struct fpu *old_fpu, int cpu) 528switch_fpu_prepare(struct fpu *old_fpu, int cpu)
573{ 529{
574 if (old_fpu->fpregs_active) { 530 if (old_fpu->initialized) {
575 if (!copy_fpregs_to_fpstate(old_fpu)) 531 if (!copy_fpregs_to_fpstate(old_fpu))
576 old_fpu->last_cpu = -1; 532 old_fpu->last_cpu = -1;
577 else 533 else
578 old_fpu->last_cpu = cpu; 534 old_fpu->last_cpu = cpu;
579 535
580 /* But leave fpu_fpregs_owner_ctx! */ 536 /* But leave fpu_fpregs_owner_ctx! */
581 old_fpu->fpregs_active = 0;
582 trace_x86_fpu_regs_deactivated(old_fpu); 537 trace_x86_fpu_regs_deactivated(old_fpu);
583 } else 538 } else
584 old_fpu->last_cpu = -1; 539 old_fpu->last_cpu = -1;
@@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
595static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) 550static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
596{ 551{
597 bool preload = static_cpu_has(X86_FEATURE_FPU) && 552 bool preload = static_cpu_has(X86_FEATURE_FPU) &&
598 new_fpu->fpstate_active; 553 new_fpu->initialized;
599 554
600 if (preload) { 555 if (preload) {
601 if (!fpregs_state_valid(new_fpu, cpu)) 556 if (!fpregs_state_valid(new_fpu, cpu))
@@ -617,8 +572,7 @@ static inline void user_fpu_begin(void)
617 struct fpu *fpu = &current->thread.fpu; 572 struct fpu *fpu = &current->thread.fpu;
618 573
619 preempt_disable(); 574 preempt_disable();
620 if (!fpregs_active()) 575 fpregs_activate(fpu);
621 fpregs_activate(fpu);
622 preempt_enable(); 576 preempt_enable();
623} 577}
624 578