diff options
author | Borislav Petkov <bp@suse.de> | 2015-04-16 14:41:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-17 04:15:47 -0400 |
commit | 18ecb3bfa5a9f6fffbb3eeb4369f0b9463438ec0 (patch) | |
tree | dd21e99bdb7e2941989207acc03a7c2d1a754134 /arch/x86 | |
parent | fd0f86b66425bd8c6af8985881e82b28c30fd450 (diff) |
x86/fpu: Load xsave pointer *after* initialization
So I was playing with gdb today and did this simple thing:
gdb /bin/ls
...
(gdb) run
Box exploded with this splat:
BUG: unable to handle kernel NULL pointer dereference at 00000000000001d0
IP: [<ffffffff8100fe5a>] xstateregs_get+0x7a/0x120
[...]
Call Trace:
ptrace_regset
ptrace_request
? wait_task_inactive
? preempt_count_sub
arch_ptrace
? ptrace_get_task_struct
SyS_ptrace
system_call_fastpath
... because we do cache &target->thread.fpu.state->xsave into the
local variable xsave but that pointer is NULL at that time and
it gets initialized later, in init_fpu(), see:
e7f180dcd8ab ("x86/fpu: Change xstateregs_get()/set() to use ->xsave.i387 rather than ->fxsave")
The fix is simple: load xsave *after* init_fpu() has run.
Also do the same in xstateregs_set(), as suggested by Oleg Nesterov.
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tavis Ormandy <taviso@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1429209697-5902-1-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/i387.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 367f39d35e9c..009183276bb7 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -341,7 +341,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, | |||
341 | unsigned int pos, unsigned int count, | 341 | unsigned int pos, unsigned int count, |
342 | void *kbuf, void __user *ubuf) | 342 | void *kbuf, void __user *ubuf) |
343 | { | 343 | { |
344 | struct xsave_struct *xsave = &target->thread.fpu.state->xsave; | 344 | struct xsave_struct *xsave; |
345 | int ret; | 345 | int ret; |
346 | 346 | ||
347 | if (!cpu_has_xsave) | 347 | if (!cpu_has_xsave) |
@@ -351,6 +351,8 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, | |||
351 | if (ret) | 351 | if (ret) |
352 | return ret; | 352 | return ret; |
353 | 353 | ||
354 | xsave = &target->thread.fpu.state->xsave; | ||
355 | |||
354 | /* | 356 | /* |
355 | * Copy the 48bytes defined by the software first into the xstate | 357 | * Copy the 48bytes defined by the software first into the xstate |
356 | * memory layout in the thread struct, so that we can copy the entire | 358 | * memory layout in the thread struct, so that we can copy the entire |
@@ -369,7 +371,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | |||
369 | unsigned int pos, unsigned int count, | 371 | unsigned int pos, unsigned int count, |
370 | const void *kbuf, const void __user *ubuf) | 372 | const void *kbuf, const void __user *ubuf) |
371 | { | 373 | { |
372 | struct xsave_struct *xsave = &target->thread.fpu.state->xsave; | 374 | struct xsave_struct *xsave; |
373 | int ret; | 375 | int ret; |
374 | 376 | ||
375 | if (!cpu_has_xsave) | 377 | if (!cpu_has_xsave) |
@@ -379,6 +381,8 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | |||
379 | if (ret) | 381 | if (ret) |
380 | return ret; | 382 | return ret; |
381 | 383 | ||
384 | xsave = &target->thread.fpu.state->xsave; | ||
385 | |||
382 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); | 386 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
383 | /* | 387 | /* |
384 | * mxcsr reserved bits must be masked to zero for security reasons. | 388 | * mxcsr reserved bits must be masked to zero for security reasons. |