aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-05-25 05:27:46 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-27 08:11:30 -0400
commit9ccc27a5d297503e485373b69688d038a1d8e662 (patch)
tree316780b039d30bdc3896bc5209a0755144456327
parent3e1bf47e5c81c2b895db4bea67f70c3ca8e5b984 (diff)
x86/fpu: Remove error return values from copy_kernel_to_*regs() functions
None of the copy_kernel_to_*regs() FPU register copying functions are supposed to fail, and all of them have debugging checks that enforce this. Remove their return values and simplify their call sites, which have redundant error checks and error handling code paths. Cc: Andy Lutomirski <luto@amacapital.net> Cc: Bobby Powers <bobbypowers@gmail.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/fpu/internal.h27
-rw-r--r--arch/x86/kernel/fpu/core.c18
2 files changed, 14 insertions, 31 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 6193b7a9cf00..da71d41227ff 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -141,7 +141,7 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
141 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); 141 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
142} 142}
143 143
144static inline int copy_kernel_to_fxregs(struct fxregs_state *fx) 144static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
145{ 145{
146 int err; 146 int err;
147 147
@@ -157,8 +157,6 @@ static inline int copy_kernel_to_fxregs(struct fxregs_state *fx)
157 } 157 }
158 /* Copying from a kernel buffer to FPU registers should never fail: */ 158 /* Copying from a kernel buffer to FPU registers should never fail: */
159 WARN_ON_FPU(err); 159 WARN_ON_FPU(err);
160
161 return err;
162} 160}
163 161
164static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) 162static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
@@ -173,13 +171,11 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
173 "m" (*fx)); 171 "m" (*fx));
174} 172}
175 173
176static inline int copy_kernel_to_fregs(struct fregs_state *fx) 174static inline void copy_kernel_to_fregs(struct fregs_state *fx)
177{ 175{
178 int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 176 int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
179 177
180 WARN_ON_FPU(err); 178 WARN_ON_FPU(err);
181
182 return err;
183} 179}
184 180
185static inline int copy_user_to_fregs(struct fregs_state __user *fx) 181static inline int copy_user_to_fregs(struct fregs_state __user *fx)
@@ -450,20 +446,19 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
450 return 0; 446 return 0;
451} 447}
452 448
453static inline int __copy_kernel_to_fpregs(struct fpu *fpu) 449static inline void __copy_kernel_to_fpregs(struct fpu *fpu)
454{ 450{
455 if (use_xsave()) { 451 if (use_xsave()) {
456 copy_kernel_to_xregs(&fpu->state.xsave, -1); 452 copy_kernel_to_xregs(&fpu->state.xsave, -1);
457 return 0;
458 } else { 453 } else {
459 if (use_fxsr()) 454 if (use_fxsr())
460 return copy_kernel_to_fxregs(&fpu->state.fxsave); 455 copy_kernel_to_fxregs(&fpu->state.fxsave);
461 else 456 else
462 return copy_kernel_to_fregs(&fpu->state.fsave); 457 copy_kernel_to_fregs(&fpu->state.fsave);
463 } 458 }
464} 459}
465 460
466static inline int copy_kernel_to_fpregs(struct fpu *fpu) 461static inline void copy_kernel_to_fpregs(struct fpu *fpu)
467{ 462{
468 /* 463 /*
469 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is 464 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
@@ -478,7 +473,7 @@ static inline int copy_kernel_to_fpregs(struct fpu *fpu)
478 : : [addr] "m" (fpu->fpregs_active)); 473 : : [addr] "m" (fpu->fpregs_active));
479 } 474 }
480 475
481 return __copy_kernel_to_fpregs(fpu); 476 __copy_kernel_to_fpregs(fpu);
482} 477}
483 478
484extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); 479extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
@@ -646,12 +641,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
646 */ 641 */
647static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) 642static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
648{ 643{
649 if (fpu_switch.preload) { 644 if (fpu_switch.preload)
650 if (unlikely(copy_kernel_to_fpregs(new_fpu))) { 645 copy_kernel_to_fpregs(new_fpu);
651 WARN_ON_FPU(1);
652 fpu__clear(new_fpu);
653 }
654 }
655} 646}
656 647
657/* 648/*
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index e0e0ee565dc3..8470df44c06d 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -126,12 +126,10 @@ void __kernel_fpu_end(void)
126{ 126{
127 struct fpu *fpu = &current->thread.fpu; 127 struct fpu *fpu = &current->thread.fpu;
128 128
129 if (fpu->fpregs_active) { 129 if (fpu->fpregs_active)
130 if (WARN_ON_FPU(copy_kernel_to_fpregs(fpu))) 130 copy_kernel_to_fpregs(fpu);
131 fpu__clear(fpu); 131 else
132 } else {
133 __fpregs_deactivate_hw(); 132 __fpregs_deactivate_hw();
134 }
135 133
136 kernel_fpu_enable(); 134 kernel_fpu_enable();
137} 135}
@@ -370,14 +368,8 @@ void fpu__restore(struct fpu *fpu)
370 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ 368 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
371 kernel_fpu_disable(); 369 kernel_fpu_disable();
372 fpregs_activate(fpu); 370 fpregs_activate(fpu);
373 if (unlikely(copy_kernel_to_fpregs(fpu))) { 371 copy_kernel_to_fpregs(fpu);
374 /* Copying the kernel state to FPU registers should never fail: */ 372 fpu->counter++;
375 WARN_ON_FPU(1);
376 fpu__clear(fpu);
377 force_sig_info(SIGSEGV, SEND_SIG_PRIV, current);
378 } else {
379 fpu->counter++;
380 }
381 kernel_fpu_enable(); 373 kernel_fpu_enable();
382} 374}
383EXPORT_SYMBOL_GPL(fpu__restore); 375EXPORT_SYMBOL_GPL(fpu__restore);