aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-09-05 18:13:56 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-09-08 17:14:12 -0400
commit1dcf74f6edfc3a9acd84d83d8865dd9e2a3b1d1e (patch)
tree21030b6f0394f5b82cd17b96fd0008375b3f254b /arch/x86
parent54eea9957f5763dd1a2555d7e4cb53b4dd389cc6 (diff)
x86_64, entry: Use split-phase syscall_trace_enter for 64-bit syscalls
On KVM on my box, this reduces the overhead from an always-accept seccomp filter from ~130ns to ~17ns. Most of that comes from avoiding IRET on every syscall when seccomp is enabled. In extremely approximate hacked-up benchmarking, just bypassing IRET saves about 80ns, so there's another 43ns of savings here from simplifying the seccomp path. The diffstat is also rather nice :) Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/a3dbd267ee990110478d349f78cccfdac5497a84.1409954077.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/entry_64.S38
1 files changed, 15 insertions, 23 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 0bd6d3c28064..df088bb03fb3 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -478,22 +478,6 @@ sysret_signal:
478 478
479#ifdef CONFIG_AUDITSYSCALL 479#ifdef CONFIG_AUDITSYSCALL
480 /* 480 /*
481 * Fast path for syscall audit without full syscall trace.
482 * We just call __audit_syscall_entry() directly, and then
483 * jump back to the normal fast path.
484 */
485auditsys:
486 movq %r10,%r9 /* 6th arg: 4th syscall arg */
487 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
488 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
489 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
490 movq %rax,%rsi /* 2nd arg: syscall number */
491 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
492 call __audit_syscall_entry
493 LOAD_ARGS 0 /* reload call-clobbered registers */
494 jmp system_call_fastpath
495
496 /*
497 * Return fast path for syscall audit. Call __audit_syscall_exit() 481 * Return fast path for syscall audit. Call __audit_syscall_exit()
498 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT 482 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
499 * masked off. 483 * masked off.
@@ -510,17 +494,25 @@ sysret_audit:
510 494
511 /* Do syscall tracing */ 495 /* Do syscall tracing */
512tracesys: 496tracesys:
513#ifdef CONFIG_AUDITSYSCALL 497 leaq -REST_SKIP(%rsp), %rdi
514 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 498 movq $AUDIT_ARCH_X86_64, %rsi
515 jz auditsys 499 call syscall_trace_enter_phase1
516#endif 500 test %rax, %rax
501 jnz tracesys_phase2 /* if needed, run the slow path */
502 LOAD_ARGS 0 /* else restore clobbered regs */
503 jmp system_call_fastpath /* and return to the fast path */
504
505tracesys_phase2:
517 SAVE_REST 506 SAVE_REST
518 FIXUP_TOP_OF_STACK %rdi 507 FIXUP_TOP_OF_STACK %rdi
519 movq %rsp,%rdi 508 movq %rsp, %rdi
520 call syscall_trace_enter 509 movq $AUDIT_ARCH_X86_64, %rsi
510 movq %rax,%rdx
511 call syscall_trace_enter_phase2
512
521 /* 513 /*
522 * Reload arg registers from stack in case ptrace changed them. 514 * Reload arg registers from stack in case ptrace changed them.
523 * We don't reload %rax because syscall_trace_enter() returned 515 * We don't reload %rax because syscall_trace_entry_phase2() returned
524 * the value it wants us to use in the table lookup. 516 * the value it wants us to use in the table lookup.
525 */ 517 */
526 LOAD_ARGS ARGOFFSET, 1 518 LOAD_ARGS ARGOFFSET, 1