aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-09-05 18:13:55 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-09-08 17:14:08 -0400
commit54eea9957f5763dd1a2555d7e4cb53b4dd389cc6 (patch)
tree65f301ae08fe3805ee92746e52b6a26139a0b0b9
parente0ffbaabc46db508b8717f023c0ce03b980eefac (diff)
x86_64, entry: Treat regs->ax the same in fastpath and slowpath syscalls
For slowpath syscalls, we initialize regs->ax to -ENOSYS and stick the syscall number into regs->orig_ax prior to any possible tracing and syscall execution. This is user-visible ABI used by ptrace syscall emulation and seccomp. For fastpath syscalls, there's no good reason not to do the same thing. It's even slightly simpler than what we're currently doing. It probably has no measureable performance impact. It should have no user-visible effect. The purpose of this patch is to prepare for two-phase syscall tracing, in which the first phase might modify the saved RAX without leaving the fast path. This change is just subtle enough that I'm keeping it separate. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/01218b493f12ae2f98034b78c9ae085e38e94350.1409954077.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/calling.h6
-rw-r--r--arch/x86/kernel/entry_64.S13
2 files changed, 9 insertions, 10 deletions
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index cb4c73bfeb48..76659b67fd11 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -85,7 +85,7 @@ For 32-bit we have the following conventions - kernel is built with
85#define ARGOFFSET R11 85#define ARGOFFSET R11
86#define SWFRAME ORIG_RAX 86#define SWFRAME ORIG_RAX
87 87
88 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1 88 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
89 subq $9*8+\addskip, %rsp 89 subq $9*8+\addskip, %rsp
90 CFI_ADJUST_CFA_OFFSET 9*8+\addskip 90 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
91 movq_cfi rdi, 8*8 91 movq_cfi rdi, 8*8
@@ -96,7 +96,11 @@ For 32-bit we have the following conventions - kernel is built with
96 movq_cfi rcx, 5*8 96 movq_cfi rcx, 5*8
97 .endif 97 .endif
98 98
99 .if \rax_enosys
100 movq $-ENOSYS, 4*8(%rsp)
101 .else
99 movq_cfi rax, 4*8 102 movq_cfi rax, 4*8
103 .endif
100 104
101 .if \save_r891011 105 .if \save_r891011
102 movq_cfi r8, 3*8 106 movq_cfi r8, 3*8
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 2fac1343a90b..0bd6d3c28064 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -404,8 +404,8 @@ GLOBAL(system_call_after_swapgs)
404 * and short: 404 * and short:
405 */ 405 */
406 ENABLE_INTERRUPTS(CLBR_NONE) 406 ENABLE_INTERRUPTS(CLBR_NONE)
407 SAVE_ARGS 8,0 407 SAVE_ARGS 8, 0, rax_enosys=1
408 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) 408 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
409 movq %rcx,RIP-ARGOFFSET(%rsp) 409 movq %rcx,RIP-ARGOFFSET(%rsp)
410 CFI_REL_OFFSET rip,RIP-ARGOFFSET 410 CFI_REL_OFFSET rip,RIP-ARGOFFSET
411 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 411 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
@@ -417,7 +417,7 @@ system_call_fastpath:
417 andl $__SYSCALL_MASK,%eax 417 andl $__SYSCALL_MASK,%eax
418 cmpl $__NR_syscall_max,%eax 418 cmpl $__NR_syscall_max,%eax
419#endif 419#endif
420 ja badsys 420 ja ret_from_sys_call /* and return regs->ax */
421 movq %r10,%rcx 421 movq %r10,%rcx
422 call *sys_call_table(,%rax,8) # XXX: rip relative 422 call *sys_call_table(,%rax,8) # XXX: rip relative
423 movq %rax,RAX-ARGOFFSET(%rsp) 423 movq %rax,RAX-ARGOFFSET(%rsp)
@@ -476,10 +476,6 @@ sysret_signal:
476 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET 476 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
477 jmp int_check_syscall_exit_work 477 jmp int_check_syscall_exit_work
478 478
479badsys:
480 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
481 jmp ret_from_sys_call
482
483#ifdef CONFIG_AUDITSYSCALL 479#ifdef CONFIG_AUDITSYSCALL
484 /* 480 /*
485 * Fast path for syscall audit without full syscall trace. 481 * Fast path for syscall audit without full syscall trace.
@@ -519,7 +515,6 @@ tracesys:
519 jz auditsys 515 jz auditsys
520#endif 516#endif
521 SAVE_REST 517 SAVE_REST
522 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
523 FIXUP_TOP_OF_STACK %rdi 518 FIXUP_TOP_OF_STACK %rdi
524 movq %rsp,%rdi 519 movq %rsp,%rdi
525 call syscall_trace_enter 520 call syscall_trace_enter
@@ -536,7 +531,7 @@ tracesys:
536 andl $__SYSCALL_MASK,%eax 531 andl $__SYSCALL_MASK,%eax
537 cmpl $__NR_syscall_max,%eax 532 cmpl $__NR_syscall_max,%eax
538#endif 533#endif
539 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ 534 ja int_ret_from_sys_call /* RAX(%rsp) is already set */
540 movq %r10,%rcx /* fixup for C */ 535 movq %r10,%rcx /* fixup for C */
541 call *sys_call_table(,%rax,8) 536 call *sys_call_table(,%rax,8)
542 movq %rax,RAX-ARGOFFSET(%rsp) 537 movq %rax,RAX-ARGOFFSET(%rsp)