aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-09 21:01:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-09 21:01:52 -0500
commitc93ecedab35f5305542a9fe5cfbd37377189589e (patch)
treec480259a0588c1dd47f596e25f6ac805d878bf05
parent072bc448cc796c4d2d3519795f38e13a6c2a14a5 (diff)
parent7575637ab293861a799f3bbafe0d8c597389f4e9 (diff)
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fpu updates from Ingo Molnar: "Initial round of kernel_fpu_begin/end cleanups from Oleg Nesterov, plus a cleanup from Borislav Petkov" * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, fpu: Fix math_state_restore() race with kernel_fpu_begin() x86, fpu: Don't abuse has_fpu in __kernel_fpu_begin/end() x86, fpu: Introduce per-cpu in_kernel_fpu state x86/fpu: Use a symbolic name for asm operand
-rw-r--r--arch/x86/include/asm/fpu-internal.h10
-rw-r--r--arch/x86/include/asm/i387.h6
-rw-r--r--arch/x86/kernel/i387.c39
-rw-r--r--arch/x86/kernel/traps.c12
4 files changed, 42 insertions, 25 deletions
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index e97622f57722..0dbc08282291 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
207 if (config_enabled(CONFIG_X86_32)) 207 if (config_enabled(CONFIG_X86_32))
208 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); 208 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
209 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 209 else if (config_enabled(CONFIG_AS_FXSAVEQ))
210 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); 210 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
211 else { 211 else {
212 /* Using "rex64; fxsave %0" is broken because, if the memory 212 /* Using "rex64; fxsave %0" is broken because, if the memory
213 * operand uses any extended registers for addressing, a second 213 * operand uses any extended registers for addressing, a second
@@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu)
290 290
291static inline int restore_fpu_checking(struct task_struct *tsk) 291static inline int restore_fpu_checking(struct task_struct *tsk)
292{ 292{
293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 293 /*
294 is pending. Clear the x87 state here by setting it to fixed 294 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
295 values. "m" is a random variable that should be in L1 */ 295 * pending. Clear the x87 state here by setting it to fixed values.
296 * "m" is a random variable that should be in L1.
297 */
296 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { 298 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
297 asm volatile( 299 asm volatile(
298 "fnclex\n\t" 300 "fnclex\n\t"
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index ed8089d69094..6eb6fcb83f63 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void);
40 40
41static inline void kernel_fpu_begin(void) 41static inline void kernel_fpu_begin(void)
42{ 42{
43 WARN_ON_ONCE(!irq_fpu_usable());
44 preempt_disable(); 43 preempt_disable();
44 WARN_ON_ONCE(!irq_fpu_usable());
45 __kernel_fpu_begin(); 45 __kernel_fpu_begin();
46} 46}
47 47
@@ -51,6 +51,10 @@ static inline void kernel_fpu_end(void)
51 preempt_enable(); 51 preempt_enable();
52} 52}
53 53
54/* Must be called with preempt disabled */
55extern void kernel_fpu_disable(void);
56extern void kernel_fpu_enable(void);
57
54/* 58/*
55 * Some instructions like VIA's padlock instructions generate a spurious 59 * Some instructions like VIA's padlock instructions generate a spurious
56 * DNA fault but don't modify SSE registers. And these instructions 60 * DNA fault but don't modify SSE registers. And these instructions
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index a9a4229f6161..81049ffab2d6 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -19,6 +19,19 @@
19#include <asm/fpu-internal.h> 19#include <asm/fpu-internal.h>
20#include <asm/user.h> 20#include <asm/user.h>
21 21
22static DEFINE_PER_CPU(bool, in_kernel_fpu);
23
24void kernel_fpu_disable(void)
25{
26 WARN_ON(this_cpu_read(in_kernel_fpu));
27 this_cpu_write(in_kernel_fpu, true);
28}
29
30void kernel_fpu_enable(void)
31{
32 this_cpu_write(in_kernel_fpu, false);
33}
34
22/* 35/*
23 * Were we in an interrupt that interrupted kernel mode? 36 * Were we in an interrupt that interrupted kernel mode?
24 * 37 *
@@ -33,6 +46,9 @@
33 */ 46 */
34static inline bool interrupted_kernel_fpu_idle(void) 47static inline bool interrupted_kernel_fpu_idle(void)
35{ 48{
49 if (this_cpu_read(in_kernel_fpu))
50 return false;
51
36 if (use_eager_fpu()) 52 if (use_eager_fpu())
37 return __thread_has_fpu(current); 53 return __thread_has_fpu(current);
38 54
@@ -73,10 +89,10 @@ void __kernel_fpu_begin(void)
73{ 89{
74 struct task_struct *me = current; 90 struct task_struct *me = current;
75 91
92 this_cpu_write(in_kernel_fpu, true);
93
76 if (__thread_has_fpu(me)) { 94 if (__thread_has_fpu(me)) {
77 __thread_clear_has_fpu(me);
78 __save_init_fpu(me); 95 __save_init_fpu(me);
79 /* We do 'stts()' in __kernel_fpu_end() */
80 } else if (!use_eager_fpu()) { 96 } else if (!use_eager_fpu()) {
81 this_cpu_write(fpu_owner_task, NULL); 97 this_cpu_write(fpu_owner_task, NULL);
82 clts(); 98 clts();
@@ -86,19 +102,16 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
86 102
87void __kernel_fpu_end(void) 103void __kernel_fpu_end(void)
88{ 104{
89 if (use_eager_fpu()) { 105 struct task_struct *me = current;
90 /* 106
91 * For eager fpu, most the time, tsk_used_math() is true. 107 if (__thread_has_fpu(me)) {
92 * Restore the user math as we are done with the kernel usage. 108 if (WARN_ON(restore_fpu_checking(me)))
93 * At few instances during thread exit, signal handling etc, 109 drop_init_fpu(me);
94 * tsk_used_math() is false. Those few places will take proper 110 } else if (!use_eager_fpu()) {
95 * actions, so we don't need to restore the math here.
96 */
97 if (likely(tsk_used_math(current)))
98 math_state_restore();
99 } else {
100 stts(); 111 stts();
101 } 112 }
113
114 this_cpu_write(in_kernel_fpu, false);
102} 115}
103EXPORT_SYMBOL(__kernel_fpu_end); 116EXPORT_SYMBOL(__kernel_fpu_end);
104 117
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c74f2f5652da..9d2073e2ecc9 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -859,18 +859,16 @@ void math_state_restore(void)
859 local_irq_disable(); 859 local_irq_disable();
860 } 860 }
861 861
862 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
863 kernel_fpu_disable();
862 __thread_fpu_begin(tsk); 864 __thread_fpu_begin(tsk);
863
864 /*
865 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
866 */
867 if (unlikely(restore_fpu_checking(tsk))) { 865 if (unlikely(restore_fpu_checking(tsk))) {
868 drop_init_fpu(tsk); 866 drop_init_fpu(tsk);
869 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 867 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
870 return; 868 } else {
869 tsk->thread.fpu_counter++;
871 } 870 }
872 871 kernel_fpu_enable();
873 tsk->thread.fpu_counter++;
874} 872}
875EXPORT_SYMBOL_GPL(math_state_restore); 873EXPORT_SYMBOL_GPL(math_state_restore);
876 874