aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/entry_64.S
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-06-30 19:51:22 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2011-07-02 12:05:31 -0400
commit1871853f7abc3c727c4346539c5062cbeaf016a4 (patch)
treeb01f5f5f5cb44a11b4ae542cfcf813d82c62c8eb /arch/x86/kernel/entry_64.S
parent47ce11a2b6519f9c7843223ea8e561eb71ea5896 (diff)
x86,64: Simplify save_regs()
The save_regs function that saves the regs on low level irq entry is complicated because of the fact it changes its stack in the middle and also because it manipulates data allocated in the caller frame and accesses there are directly calculated from callee rsp value with the return address in the middle of the way. This complicates the static stack offsets calculation and require more dynamic ones. It also needs a save/restore of the function's return address. To simplify and optimize this, turn save_regs() into a macro. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jan Beulich <JBeulich@novell.com>
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r--arch/x86/kernel/entry_64.S44
1 files changed, 17 insertions, 27 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 8a445a0c989e..b6b2e85454cf 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -297,27 +297,22 @@ ENDPROC(native_usergs_sysret64)
297 .endm 297 .endm
298 298
299/* save partial stack frame */ 299/* save partial stack frame */
300 .pushsection .kprobes.text, "ax" 300 .macro SAVE_ARGS_IRQ
301ENTRY(save_args)
302 XCPT_FRAME
303 cld 301 cld
304 /* 302 /* start from rbp in pt_regs and jump over */
305 * start from rbp in pt_regs and jump over 303 movq_cfi rdi, RDI-RBP
306 * return address. 304 movq_cfi rsi, RSI-RBP
307 */ 305 movq_cfi rdx, RDX-RBP
308 movq_cfi rdi, RDI+8-RBP 306 movq_cfi rcx, RCX-RBP
309 movq_cfi rsi, RSI+8-RBP 307 movq_cfi rax, RAX-RBP
310 movq_cfi rdx, RDX+8-RBP 308 movq_cfi r8, R8-RBP
311 movq_cfi rcx, RCX+8-RBP 309 movq_cfi r9, R9-RBP
312 movq_cfi rax, RAX+8-RBP 310 movq_cfi r10, R10-RBP
313 movq_cfi r8, R8+8-RBP 311 movq_cfi r11, R11-RBP
314 movq_cfi r9, R9+8-RBP 312
315 movq_cfi r10, R10+8-RBP 313 leaq -RBP(%rsp),%rdi /* arg1 for handler */
316 movq_cfi r11, R11+8-RBP 314 movq_cfi rbp, 0 /* push %rbp */
317 315 movq %rsp, %rbp
318 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
319 movq_cfi rbp, 8 /* push %rbp */
320 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
321 testl $3, CS(%rdi) 316 testl $3, CS(%rdi)
322 je 1f 317 je 1f
323 SWAPGS 318 SWAPGS
@@ -329,19 +324,14 @@ ENTRY(save_args)
329 */ 324 */
3301: incl PER_CPU_VAR(irq_count) 3251: incl PER_CPU_VAR(irq_count)
331 jne 2f 326 jne 2f
332 popq_cfi %rax /* move return address... */
333 mov PER_CPU_VAR(irq_stack_ptr),%rsp 327 mov PER_CPU_VAR(irq_stack_ptr),%rsp
334 EMPTY_FRAME 0 328 EMPTY_FRAME 0
335 pushq_cfi %rbp /* backlink for unwinder */ 329 pushq_cfi %rbp /* backlink for unwinder */
336 pushq_cfi %rax /* ... to the new stack */
337 /* 330 /*
338 * We entered an interrupt context - irqs are off: 331 * We entered an interrupt context - irqs are off:
339 */ 332 */
3402: TRACE_IRQS_OFF 3332: TRACE_IRQS_OFF
341 ret 334 .endm
342 CFI_ENDPROC
343END(save_args)
344 .popsection
345 335
346ENTRY(save_rest) 336ENTRY(save_rest)
347 PARTIAL_FRAME 1 REST_SKIP+8 337 PARTIAL_FRAME 1 REST_SKIP+8
@@ -791,7 +781,7 @@ END(interrupt)
791 /* reserve pt_regs for scratch regs and rbp */ 781 /* reserve pt_regs for scratch regs and rbp */
792 subq $ORIG_RAX-RBP, %rsp 782 subq $ORIG_RAX-RBP, %rsp
793 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP 783 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
794 call save_args 784 SAVE_ARGS_IRQ
795 PARTIAL_FRAME 0 785 PARTIAL_FRAME 0
796 call \func 786 call \func
797 .endm 787 .endm