aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/calling.h
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-09-05 18:13:55 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-09-08 17:14:08 -0400
commit54eea9957f5763dd1a2555d7e4cb53b4dd389cc6 (patch)
tree65f301ae08fe3805ee92746e52b6a26139a0b0b9 /arch/x86/include/asm/calling.h
parente0ffbaabc46db508b8717f023c0ce03b980eefac (diff)
x86_64, entry: Treat regs->ax the same in fastpath and slowpath syscalls
For slowpath syscalls, we initialize regs->ax to -ENOSYS and stick the syscall number into regs->orig_ax prior to any possible tracing and syscall execution. This is user-visible ABI used by ptrace syscall emulation and seccomp. For fastpath syscalls, there's no good reason not to do the same thing. It's even slightly simpler than what we're currently doing. It probably has no measureable performance impact. It should have no user-visible effect. The purpose of this patch is to prepare for two-phase syscall tracing, in which the first phase might modify the saved RAX without leaving the fast path. This change is just subtle enough that I'm keeping it separate. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/01218b493f12ae2f98034b78c9ae085e38e94350.1409954077.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/calling.h')
-rw-r--r--arch/x86/include/asm/calling.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index cb4c73bfeb48..76659b67fd11 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -85,7 +85,7 @@ For 32-bit we have the following conventions - kernel is built with
85#define ARGOFFSET R11 85#define ARGOFFSET R11
86#define SWFRAME ORIG_RAX 86#define SWFRAME ORIG_RAX
87 87
88 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1 88 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
89 subq $9*8+\addskip, %rsp 89 subq $9*8+\addskip, %rsp
90 CFI_ADJUST_CFA_OFFSET 9*8+\addskip 90 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
91 movq_cfi rdi, 8*8 91 movq_cfi rdi, 8*8
@@ -96,7 +96,11 @@ For 32-bit we have the following conventions - kernel is built with
96 movq_cfi rcx, 5*8 96 movq_cfi rcx, 5*8
97 .endif 97 .endif
98 98
99 .if \rax_enosys
100 movq $-ENOSYS, 4*8(%rsp)
101 .else
99 movq_cfi rax, 4*8 102 movq_cfi rax, 4*8
103 .endif
100 104
101 .if \save_r891011 105 .if \save_r891011
102 movq_cfi r8, 3*8 106 movq_cfi r8, 3*8