aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-08-19 10:57:09 -0400
committerWill Deacon <will.deacon@arm.com>2015-08-21 10:11:43 -0400
commit412fcb6cebd758d080cacd5a41a0cbc656ea5fce (patch)
tree2e62df0204e68e580b468bdf871e60ddde7b822b /arch/arm64/kernel/entry.S
parentd8d23fa0f27f3b2942a7bbc7378c7735324ed519 (diff)
arm64: entry: always restore x0 from the stack on syscall return
We have a micro-optimisation on the fast syscall return path where we take care to keep x0 live with the return value from the syscall so that we can avoid restoring it from the stack. The benefit of doing this is fairly suspect, since we will be restoring x1 from the stack anyway (which lives adjacent in the pt_regs structure) and the only additional cost is saving x0 back to pt_regs after the syscall handler, which could be seen as a poor man's prefetch. More importantly, this causes issues with the context tracking code. The ct_user_enter macro ends up branching into C code, which is free to use x0 as a scratch register and consequently leads to us returning junk back to userspace as the syscall return value. Rather than special case the context-tracking code, this patch removes the questionable optimisation entirely. Cc: <stable@vger.kernel.org> Cc: Larry Bassel <larry.bassel@linaro.org> Cc: Kevin Hilman <khilman@linaro.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reported-by: Hanjun Guo <hanjun.guo@linaro.org> Tested-by: Hanjun Guo <hanjun.guo@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S17
1 files changed, 6 insertions, 11 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index d8a523600a4c..4306c937b1ff 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -116,7 +116,7 @@
116 */ 116 */
117 .endm 117 .endm
118 118
119 .macro kernel_exit, el, ret = 0 119 .macro kernel_exit, el
120 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR 120 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
121 .if \el == 0 121 .if \el == 0
122 ct_user_enter 122 ct_user_enter
@@ -143,11 +143,7 @@ alternative_endif
143 .endif 143 .endif
144 msr elr_el1, x21 // set up the return data 144 msr elr_el1, x21 // set up the return data
145 msr spsr_el1, x22 145 msr spsr_el1, x22
146 .if \ret
147 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
148 .else
149 ldp x0, x1, [sp, #16 * 0] 146 ldp x0, x1, [sp, #16 * 0]
150 .endif
151 ldp x2, x3, [sp, #16 * 1] 147 ldp x2, x3, [sp, #16 * 1]
152 ldp x4, x5, [sp, #16 * 2] 148 ldp x4, x5, [sp, #16 * 2]
153 ldp x6, x7, [sp, #16 * 3] 149 ldp x6, x7, [sp, #16 * 3]
@@ -610,22 +606,21 @@ ENDPROC(cpu_switch_to)
610 */ 606 */
611ret_fast_syscall: 607ret_fast_syscall:
612 disable_irq // disable interrupts 608 disable_irq // disable interrupts
609 str x0, [sp, #S_X0] // returned x0
613 ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing 610 ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
614 and x2, x1, #_TIF_SYSCALL_WORK 611 and x2, x1, #_TIF_SYSCALL_WORK
615 cbnz x2, ret_fast_syscall_trace 612 cbnz x2, ret_fast_syscall_trace
616 and x2, x1, #_TIF_WORK_MASK 613 and x2, x1, #_TIF_WORK_MASK
617 cbnz x2, fast_work_pending 614 cbnz x2, work_pending
618 enable_step_tsk x1, x2 615 enable_step_tsk x1, x2
619 kernel_exit 0, ret = 1 616 kernel_exit 0
620ret_fast_syscall_trace: 617ret_fast_syscall_trace:
621 enable_irq // enable interrupts 618 enable_irq // enable interrupts
622 b __sys_trace_return 619 b __sys_trace_return_skipped // we already saved x0
623 620
624/* 621/*
625 * Ok, we need to do extra processing, enter the slow path. 622 * Ok, we need to do extra processing, enter the slow path.
626 */ 623 */
627fast_work_pending:
628 str x0, [sp, #S_X0] // returned x0
629work_pending: 624work_pending:
630 tbnz x1, #TIF_NEED_RESCHED, work_resched 625 tbnz x1, #TIF_NEED_RESCHED, work_resched
631 /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ 626 /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
@@ -649,7 +644,7 @@ ret_to_user:
649 cbnz x2, work_pending 644 cbnz x2, work_pending
650 enable_step_tsk x1, x2 645 enable_step_tsk x1, x2
651no_work_pending: 646no_work_pending:
652 kernel_exit 0, ret = 0 647 kernel_exit 0
653ENDPROC(ret_to_user) 648ENDPROC(ret_to_user)
654 649
655/* 650/*