diff options
author | Oleg Nesterov <oleg@redhat.com> | 2015-01-15 14:19:43 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-01-20 07:53:07 -0500 |
commit | 14e153ef75eecae8fd0738ffb42120f4962a00cd (patch) | |
tree | 22fb7872fddb4c639d882ca602f4240c38acfaae | |
parent | 6ca7a8a15035add0a4f9b2fd658118d41dbeb20c (diff) |
x86, fpu: Introduce per-cpu in_kernel_fpu state
interrupted_kernel_fpu_idle() tries to detect if kernel_fpu_begin()
is safe or not. In particular it should obviously deny the nested
kernel_fpu_begin() and this logic looks very confusing.
If use_eager_fpu() == T we rely on a) __thread_has_fpu() check in
interrupted_kernel_fpu_idle(), and b) on the fact that _begin() does
__thread_clear_has_fpu().
Otherwise we demand that the interrupted task has no FPU if it is in
kernel mode, this works because __kernel_fpu_begin() does clts() and
interrupted_kernel_fpu_idle() checks X86_CR0_TS.
Add the per-cpu "bool in_kernel_fpu" variable, and change this code
to check/set/clear it. This allows to do more cleanups and fixes, see
the next changes.
The patch also moves WARN_ON_ONCE() under preempt_disable() just to
make this_cpu_read() look better, this is not really needed. And in
fact I think we should move it into __kernel_fpu_begin().
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: matt.fleming@intel.com
Cc: bp@suse.de
Cc: pbonzini@redhat.com
Cc: luto@amacapital.net
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Suresh Siddha <sbsiddha@gmail.com>
Link: http://lkml.kernel.org/r/20150115191943.GB27332@redhat.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/include/asm/i387.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 9 |
2 files changed, 10 insertions, 1 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index ed8089d69094..5e275d31802e 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void); | |||
40 | 40 | ||
41 | static inline void kernel_fpu_begin(void) | 41 | static inline void kernel_fpu_begin(void) |
42 | { | 42 | { |
43 | WARN_ON_ONCE(!irq_fpu_usable()); | ||
44 | preempt_disable(); | 43 | preempt_disable(); |
44 | WARN_ON_ONCE(!irq_fpu_usable()); | ||
45 | __kernel_fpu_begin(); | 45 | __kernel_fpu_begin(); |
46 | } | 46 | } |
47 | 47 | ||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index a9a4229f6161..a81572338243 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <asm/fpu-internal.h> | 19 | #include <asm/fpu-internal.h> |
20 | #include <asm/user.h> | 20 | #include <asm/user.h> |
21 | 21 | ||
22 | static DEFINE_PER_CPU(bool, in_kernel_fpu); | ||
23 | |||
22 | /* | 24 | /* |
23 | * Were we in an interrupt that interrupted kernel mode? | 25 | * Were we in an interrupt that interrupted kernel mode? |
24 | * | 26 | * |
@@ -33,6 +35,9 @@ | |||
33 | */ | 35 | */ |
34 | static inline bool interrupted_kernel_fpu_idle(void) | 36 | static inline bool interrupted_kernel_fpu_idle(void) |
35 | { | 37 | { |
38 | if (this_cpu_read(in_kernel_fpu)) | ||
39 | return false; | ||
40 | |||
36 | if (use_eager_fpu()) | 41 | if (use_eager_fpu()) |
37 | return __thread_has_fpu(current); | 42 | return __thread_has_fpu(current); |
38 | 43 | ||
@@ -73,6 +78,8 @@ void __kernel_fpu_begin(void) | |||
73 | { | 78 | { |
74 | struct task_struct *me = current; | 79 | struct task_struct *me = current; |
75 | 80 | ||
81 | this_cpu_write(in_kernel_fpu, true); | ||
82 | |||
76 | if (__thread_has_fpu(me)) { | 83 | if (__thread_has_fpu(me)) { |
77 | __thread_clear_has_fpu(me); | 84 | __thread_clear_has_fpu(me); |
78 | __save_init_fpu(me); | 85 | __save_init_fpu(me); |
@@ -99,6 +106,8 @@ void __kernel_fpu_end(void) | |||
99 | } else { | 106 | } else { |
100 | stts(); | 107 | stts(); |
101 | } | 108 | } |
109 | |||
110 | this_cpu_write(in_kernel_fpu, false); | ||
102 | } | 111 | } |
103 | EXPORT_SYMBOL(__kernel_fpu_end); | 112 | EXPORT_SYMBOL(__kernel_fpu_end); |
104 | 113 | ||