aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-07-02 10:52:45 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2011-07-02 12:06:36 -0400
commita2bbe75089d5eb9a3a46d50dd5c215e213790288 (patch)
treed1322d80e7d2c048611e6c79b7211412d4629e25
parent48ffee7d9e6df51b4957bed64115b7beed671374 (diff)
x86: Don't use frame pointer to save old stack on irq entry
rbp is used in SAVE_ARGS_IRQ to save the old stack pointer in order to restore it later in ret_from_intr. It is convenient because we save its value in the irq regs and it's easily restored using the leave instruction. However this is a kind of abuse of the frame pointer which role is to help unwinding the kernel by chaining frames together, each node following the return address to the previous frame. But although we are breaking the frame by changing the stack pointer, there is no preceding return address before the new frame. Hence using the frame pointer to link the two stacks breaks the stack unwinders that find a random value instead of a return address here. There is no workaround that can work in every case. We are using the fixup_bp_irq_link() function to dereference that abused frame pointer in the case of non nesting interrupt (which means stack changed). But that doesn't fix the case of interrupts that don't change the stack (but we still have the unconditional frame link), which is the case of hardirq interrupting softirq. We have no way to detect this transition so the frame irq link is considered as a real frame pointer and the return address is dereferenced but it is still a spurious one. There are two possible results of this: either the spurious return address, a random stack value, luckily belongs to the kernel text and then the unwinding can continue and we just have a weird entry in the stack trace. Or it doesn't belong to the kernel text and unwinding stops there. This is the reason why stacktraces (including perf callchains) on irqs that interrupted softirqs don't work very well. To solve this, we don't save the old stack pointer on rbp anymore but we save it to a scratch register that we push on the new stack and that we pop back later on irq return. This preserves the whole frame chain without spurious return addresses in the middle and drops the need for the horrid fixup_bp_irq_link() workaround. And finally irqs that interrupt softirq are sanely unwinded. Before: 99.81% perf [kernel.kallsyms] [k] perf_pending_event | --- perf_pending_event irq_work_run smp_irq_work_interrupt irq_work_interrupt | |--41.60%-- __read | | | |--99.90%-- create_worker | | bench_sched_messaging | | cmd_bench | | run_builtin | | main | | __libc_start_main | --0.10%-- [...] After: 1.64% swapper [kernel.kallsyms] [k] perf_pending_event | --- perf_pending_event irq_work_run smp_irq_work_interrupt irq_work_interrupt | |--95.00%-- arch_irq_work_raise | irq_work_queue | __perf_event_overflow | perf_swevent_overflow | perf_swevent_event | perf_tp_event | perf_trace_softirq | __do_softirq | call_softirq | do_softirq | irq_exit | | | |--73.68%-- smp_apic_timer_interrupt | | apic_timer_interrupt | | | | | |--96.43%-- amd_e400_idle | | | cpu_idle | | | start_secondary Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jan Beulich <JBeulich@novell.com>
-rw-r--r--arch/x86/kernel/dumpstack_64.c30
-rw-r--r--arch/x86/kernel/entry_64.S27
2 files changed, 15 insertions, 42 deletions
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 788295cbe4a7..19853ad8afc5 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -105,34 +105,6 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
105} 105}
106 106
107/* 107/*
108 * We are returning from the irq stack and go to the previous one.
109 * If the previous stack is also in the irq stack, then bp in the first
110 * frame of the irq stack points to the previous, interrupted one.
111 * Otherwise we have another level of indirection: We first save
112 * the bp of the previous stack, then we switch the stack to the irq one
113 * and save a new bp that links to the previous one.
114 * (See save_args())
115 */
116static inline unsigned long
117fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
118 unsigned long *irq_stack, unsigned long *irq_stack_end)
119{
120#ifdef CONFIG_FRAME_POINTER
121 struct stack_frame *frame = (struct stack_frame *)bp;
122 unsigned long next;
123
124 if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
125 if (!probe_kernel_address(&frame->next_frame, next))
126 return next;
127 else
128 WARN_ONCE(1, "Perf: bad frame pointer = %p in "
129 "callchain\n", &frame->next_frame);
130 }
131#endif
132 return bp;
133}
134
135/*
136 * x86-64 can have up to three kernel stacks: 108 * x86-64 can have up to three kernel stacks:
137 * process stack 109 * process stack
138 * interrupt stack 110 * interrupt stack
@@ -208,8 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
208 * pointer (index -1 to end) in the IRQ stack: 180 * pointer (index -1 to end) in the IRQ stack:
209 */ 181 */
210 stack = (unsigned long *) (irq_stack_end[-1]); 182 stack = (unsigned long *) (irq_stack_end[-1]);
211 bp = fixup_bp_irq_link(bp, stack, irq_stack,
212 irq_stack_end);
213 irq_stack_end = NULL; 183 irq_stack_end = NULL;
214 ops->stack(data, "EOI"); 184 ops->stack(data, "EOI");
215 continue; 185 continue;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 6131432c5afa..d656f68371a4 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -310,8 +310,11 @@ ENDPROC(native_usergs_sysret64)
310 movq_cfi r10, R10-RBP 310 movq_cfi r10, R10-RBP
311 movq_cfi r11, R11-RBP 311 movq_cfi r11, R11-RBP
312 312
313 movq_cfi rbp, 0 /* push %rbp */ 313 /* Save rbp so that we can unwind from get_irq_regs() */
314 movq %rsp, %rbp 314 movq_cfi rbp, 0
315
316 /* Save previous stack value */
317 movq %rsp, %rsi
315 318
316 leaq -RBP(%rsp),%rdi /* arg1 for handler */ 319 leaq -RBP(%rsp),%rdi /* arg1 for handler */
317 testl $3, CS(%rdi) 320 testl $3, CS(%rdi)
@@ -327,10 +330,11 @@ ENDPROC(native_usergs_sysret64)
327 jne 2f 330 jne 2f
328 mov PER_CPU_VAR(irq_stack_ptr),%rsp 331 mov PER_CPU_VAR(irq_stack_ptr),%rsp
329 EMPTY_FRAME 0 332 EMPTY_FRAME 0
330 /* 333
331 * We entered an interrupt context - irqs are off: 3342: /* Store previous stack value */
332 */ 335 pushq %rsi
3332: TRACE_IRQS_OFF 336 /* We entered an interrupt context - irqs are off: */
337 TRACE_IRQS_OFF
334 .endm 338 .endm
335 339
336ENTRY(save_rest) 340ENTRY(save_rest)
@@ -804,15 +808,14 @@ ret_from_intr:
804 DISABLE_INTERRUPTS(CLBR_NONE) 808 DISABLE_INTERRUPTS(CLBR_NONE)
805 TRACE_IRQS_OFF 809 TRACE_IRQS_OFF
806 decl PER_CPU_VAR(irq_count) 810 decl PER_CPU_VAR(irq_count)
807 leaveq
808 811
809 CFI_RESTORE rbp 812 /* Restore saved previous stack */
813 popq %rsi
814 leaq 16(%rsi), %rsp
815
810 CFI_DEF_CFA_REGISTER rsp 816 CFI_DEF_CFA_REGISTER rsp
811 CFI_ADJUST_CFA_OFFSET -8 817 CFI_ADJUST_CFA_OFFSET -16
812 818
813 /* we did not save rbx, restore only from ARGOFFSET */
814 addq $8, %rsp
815 CFI_ADJUST_CFA_OFFSET -8
816exit_intr: 819exit_intr:
817 GET_THREAD_INFO(%rcx) 820 GET_THREAD_INFO(%rcx)
818 testl $3,CS-ARGOFFSET(%rsp) 821 testl $3,CS-ARGOFFSET(%rsp)