diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-16 12:15:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-16 12:15:04 -0500 |
commit | 15d8791cae75dca27bfda8ecfe87dca9379d6bb0 (patch) | |
tree | e8e2cd53544eda0644ee141b6b0414e34d96f6be /arch/x86 | |
parent | c38e23456278e967f094b08247ffc3711b1029b2 (diff) |
i387: fix x86-64 preemption-unsafe user stack save/restore
Commit 5b1cbac37798 ("i387: make irq_fpu_usable() tests more robust")
added a sanity check to the #NM handler to verify that we never cause
the "Device Not Available" exception in kernel mode.
However, that check actually pinpointed a (fundamental) race where we do
cause that exception as part of the signal stack FPU state save/restore
code.
Because we use the floating point instructions themselves to save and
restore state directly from user mode, we cannot do that atomically with
testing the TS_USEDFPU bit: the user mode access itself may cause a page
fault, which causes a task switch, which saves and restores the FP/MMX
state from the kernel buffers.
This kind of "recursive" FP state save is fine per se, but it means that
when the signal stack save/restore gets restarted, it will now take the
'#NM' exception we originally tried to avoid. With preemption this can
happen even without the page fault - but because of the user access, we
cannot just disable preemption around the save/restore instruction.
There are various ways to solve this, including using the
"enable/disable_page_fault()" helpers to not allow page faults at all
during the sequence, and fall back to copying things by hand without the
use of the native FP state save/restore instructions.
However, the simplest thing to do is to just allow the #NM from kernel
space, but fix the race in setting and clearing CR0.TS that this all
exposed: the TS bit changes and the TS_USEDFPU bit absolutely have to be
atomic wrt scheduling, so while the actual state save/restore can be
interrupted and restarted, the act of actually clearing/setting CR0.TS
and the TS_USEDFPU bit together must not.
Instead of just adding random "preempt_disable/enable()" calls to what
is already excessively ugly code, this introduces some helper functions
that mostly mirror the "kernel_fpu_begin/end()" functionality, just for
the user state instead.
Those helper functions should probably eventually replace the other
ad-hoc CR0.TS and TS_USEDFPU tests too, but I'll need to think about it
some more: the task switching functionality in particular needs to
expose the difference between the 'prev' and 'next' threads, while the
new helper functions intentionally were written to only work with
'current'.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/i387.h | 42 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 10 |
3 files changed, 45 insertions, 8 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 727c1dd84899..f704be239883 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -400,6 +400,48 @@ static inline void irq_ts_restore(int TS_state) | |||
400 | } | 400 | } |
401 | 401 | ||
402 | /* | 402 | /* |
403 | * The question "does this thread have fpu access?" | ||
404 | * is slightly racy, since preemption could come in | ||
405 | * and revoke it immediately after the test. | ||
406 | * | ||
407 | * However, even in that very unlikely scenario, | ||
408 | * we can just assume we have FPU access - typically | ||
409 | * to save the FP state - we'll just take a #NM | ||
410 | * fault and get the FPU access back. | ||
411 | * | ||
412 | * The actual user_fpu_begin/end() functions | ||
413 | * need to be preemption-safe, though. | ||
414 | * | ||
415 | * NOTE! user_fpu_end() must be used only after you | ||
416 | * have saved the FP state, and user_fpu_begin() must | ||
417 | * be used only immediately before restoring it. | ||
418 | * These functions do not do any save/restore on | ||
419 | * their own. | ||
420 | */ | ||
421 | static inline int user_has_fpu(void) | ||
422 | { | ||
423 | return current_thread_info()->status & TS_USEDFPU; | ||
424 | } | ||
425 | |||
426 | static inline void user_fpu_end(void) | ||
427 | { | ||
428 | preempt_disable(); | ||
429 | current_thread_info()->status &= ~TS_USEDFPU; | ||
430 | stts(); | ||
431 | preempt_enable(); | ||
432 | } | ||
433 | |||
434 | static inline void user_fpu_begin(void) | ||
435 | { | ||
436 | preempt_disable(); | ||
437 | if (!user_has_fpu()) { | ||
438 | clts(); | ||
439 | current_thread_info()->status |= TS_USEDFPU; | ||
440 | } | ||
441 | preempt_enable(); | ||
442 | } | ||
443 | |||
444 | /* | ||
403 | * These disable preemption on their own and are safe | 445 | * These disable preemption on their own and are safe |
404 | */ | 446 | */ |
405 | static inline void save_init_fpu(struct task_struct *tsk) | 447 | static inline void save_init_fpu(struct task_struct *tsk) |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 8ba27dbc107a..982433b5da30 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -631,7 +631,6 @@ EXPORT_SYMBOL_GPL(math_state_restore); | |||
631 | dotraplinkage void __kprobes | 631 | dotraplinkage void __kprobes |
632 | do_device_not_available(struct pt_regs *regs, long error_code) | 632 | do_device_not_available(struct pt_regs *regs, long error_code) |
633 | { | 633 | { |
634 | WARN_ON_ONCE(!user_mode_vm(regs)); | ||
635 | #ifdef CONFIG_MATH_EMULATION | 634 | #ifdef CONFIG_MATH_EMULATION |
636 | if (read_cr0() & X86_CR0_EM) { | 635 | if (read_cr0() & X86_CR0_EM) { |
637 | struct math_emu_info info = { }; | 636 | struct math_emu_info info = { }; |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index a3911343976b..86f1f09a738a 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf) | |||
168 | if (!used_math()) | 168 | if (!used_math()) |
169 | return 0; | 169 | return 0; |
170 | 170 | ||
171 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 171 | if (user_has_fpu()) { |
172 | if (use_xsave()) | 172 | if (use_xsave()) |
173 | err = xsave_user(buf); | 173 | err = xsave_user(buf); |
174 | else | 174 | else |
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf) | |||
176 | 176 | ||
177 | if (err) | 177 | if (err) |
178 | return err; | 178 | return err; |
179 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 179 | user_fpu_end(); |
180 | stts(); | ||
181 | } else { | 180 | } else { |
182 | sanitize_i387_state(tsk); | 181 | sanitize_i387_state(tsk); |
183 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, | 182 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, |
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf) | |||
292 | return err; | 291 | return err; |
293 | } | 292 | } |
294 | 293 | ||
295 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | 294 | user_fpu_begin(); |
296 | clts(); | ||
297 | task_thread_info(current)->status |= TS_USEDFPU; | ||
298 | } | ||
299 | if (use_xsave()) | 295 | if (use_xsave()) |
300 | err = restore_user_xstate(buf); | 296 | err = restore_user_xstate(buf); |
301 | else | 297 | else |