aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-07-03 15:44:19 -0400
committerIngo Molnar <mingo@kernel.org>2015-07-07 04:58:30 -0400
commit5e99cb7c35ca0580da8e892f91c655d35ecf8798 (patch)
tree3d8c48af3e328fc6691757dbfa2adec2f596caeb
parent5e5c684a2c78b98dcba3d6fce56773a375f63980 (diff)
x86/entry/64/compat: Fix bad fast syscall arg failure path
If user code does SYSCALL32 or SYSENTER without a valid stack, then our attempt to determine the syscall args will result in a failed uaccess fault. Previously, we would try to recover by jumping to the syscall exit code, but we'd run the syscall exit work even though we never made it to the syscall entry work. Clean it up by treating the failure path as a non-syscall entry and exit pair. This fixes strace's output when running the syscall_arg_fault test. Without this fix, strace would get out of sync and would fail to associate syscall entries with syscall exits. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Denys Vlasenko <vda.linux@googlemail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/903010762c07a3d67df914fea2da84b52b0f8f1d.1435952415.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/entry/entry_64_compat.S35
2 files changed, 34 insertions, 3 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3bb2c4302df1..141a5d49dddc 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -613,7 +613,7 @@ ret_from_intr:
613 testb $3, CS(%rsp) 613 testb $3, CS(%rsp)
614 jz retint_kernel 614 jz retint_kernel
615 /* Interrupt came from user space */ 615 /* Interrupt came from user space */
616retint_user: 616GLOBAL(retint_user)
617 GET_THREAD_INFO(%rcx) 617 GET_THREAD_INFO(%rcx)
618 618
619 /* %rcx: thread info. Interrupts are off. */ 619 /* %rcx: thread info. Interrupts are off. */
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index b868cfc72985..e5ebdd963a99 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -428,8 +428,39 @@ cstar_tracesys:
428END(entry_SYSCALL_compat) 428END(entry_SYSCALL_compat)
429 429
430ia32_badarg: 430ia32_badarg:
431 ASM_CLAC 431 /*
432 movq $-EFAULT, RAX(%rsp) 432 * So far, we've entered kernel mode, set AC, turned on IRQs, and
433 * saved C regs except r8-r11. We haven't done any of the other
434 * standard entry work, though. We want to bail, but we shouldn't
435 * treat this as a syscall entry since we don't even know what the
436 * args are. Instead, treat this as a non-syscall entry, finish
437 * the entry work, and immediately exit after setting AX = -EFAULT.
438 *
439 * We're really just being polite here. Killing the task outright
440 * would be a reasonable action, too. Given that the only valid
441 * way to have gotten here is through the vDSO, and we already know
442 * that the stack pointer is bad, the task isn't going to survive
443 * for long no matter what we do.
444 */
445
446 ASM_CLAC /* undo STAC */
447 movq $-EFAULT, RAX(%rsp) /* return -EFAULT if possible */
448
449 /* Fill in the rest of pt_regs */
450 xorl %eax, %eax
451 movq %rax, R11(%rsp)
452 movq %rax, R10(%rsp)
453 movq %rax, R9(%rsp)
454 movq %rax, R8(%rsp)
455 SAVE_EXTRA_REGS
456
457 /* Turn IRQs back off. */
458 DISABLE_INTERRUPTS(CLBR_NONE)
459 TRACE_IRQS_OFF
460
461 /* And exit again. */
462 jmp retint_user
463
433ia32_ret_from_sys_call: 464ia32_ret_from_sys_call:
434 xorl %eax, %eax /* Do not leak kernel information */ 465 xorl %eax, %eax /* Do not leak kernel information */
435 movq %rax, R11(%rsp) 466 movq %rax, R11(%rsp)