aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/unwinder.c
diff options
context:
space:
mode:
authorMatt Fleming <matt@console-pimps.org>2009-08-16 16:54:48 -0400
committerMatt Fleming <matt@console-pimps.org>2009-08-21 08:02:44 -0400
commitb344e24a8e8ceda83d1285d22e3e5baf4f5e42d3 (patch)
tree4b9500264a797736b48b59c3f0977277ace53386 /arch/sh/kernel/unwinder.c
parent97efbbd5886e27b61c19c77d41f6491f5d96fbd0 (diff)
sh: unwinder: Introduce UNWINDER_BUG() and UNWINDER_BUG_ON()
We can't assume that if we execute the unwinder code and the unwinder was already running that it has faulted. Clearly two kernel threads can invoke the unwinder at the same time and may be running simultaneously. The previous approach used BUG() and BUG_ON() in the unwinder code to detect whether the unwinder was incapable of unwinding the stack, and that the next available unwinder should be used instead. A better approach is to explicitly invoke a trap handler to switch unwinders when the current unwinder cannot continue. Signed-off-by: Matt Fleming <matt@console-pimps.org>
Diffstat (limited to 'arch/sh/kernel/unwinder.c')
-rw-r--r--arch/sh/kernel/unwinder.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
index 2b30fa28b44..b9c122abe25 100644
--- a/arch/sh/kernel/unwinder.c
+++ b/arch/sh/kernel/unwinder.c
@@ -53,8 +53,6 @@ static struct list_head unwinder_list = {
53 53
54static DEFINE_SPINLOCK(unwinder_lock); 54static DEFINE_SPINLOCK(unwinder_lock);
55 55
56static atomic_t unwinder_running = ATOMIC_INIT(0);
57
58/** 56/**
59 * select_unwinder - Select the best registered stack unwinder. 57 * select_unwinder - Select the best registered stack unwinder.
60 * 58 *
@@ -122,6 +120,8 @@ int unwinder_register(struct unwinder *u)
122 return ret; 120 return ret;
123} 121}
124 122
123int unwinder_faulted = 0;
124
125/* 125/*
126 * Unwind the call stack and pass information to the stacktrace_ops 126 * Unwind the call stack and pass information to the stacktrace_ops
127 * functions. Also handle the case where we need to switch to a new 127 * functions. Also handle the case where we need to switch to a new
@@ -144,19 +144,40 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs,
144 * Hopefully this will give us a semi-reliable stacktrace so we 144 * Hopefully this will give us a semi-reliable stacktrace so we
145 * can diagnose why curr_unwinder->dump() faulted. 145 * can diagnose why curr_unwinder->dump() faulted.
146 */ 146 */
147 if (atomic_inc_return(&unwinder_running) != 1) { 147 if (unwinder_faulted) {
148 spin_lock_irqsave(&unwinder_lock, flags); 148 spin_lock_irqsave(&unwinder_lock, flags);
149 149
150 if (!list_is_singular(&unwinder_list)) { 150 /* Make sure no one beat us to changing the unwinder */
151 if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
151 list_del(&curr_unwinder->list); 152 list_del(&curr_unwinder->list);
152 curr_unwinder = select_unwinder(); 153 curr_unwinder = select_unwinder();
154
155 unwinder_faulted = 0;
153 } 156 }
154 157
155 spin_unlock_irqrestore(&unwinder_lock, flags); 158 spin_unlock_irqrestore(&unwinder_lock, flags);
156 atomic_dec(&unwinder_running);
157 } 159 }
158 160
159 curr_unwinder->dump(task, regs, sp, ops, data); 161 curr_unwinder->dump(task, regs, sp, ops, data);
162}
163
164/*
165 * Trap handler for UWINDER_BUG() statements. We must switch to the
166 * unwinder with the next highest rating.
167 */
168BUILD_TRAP_HANDLER(unwinder)
169{
170 insn_size_t insn;
171 TRAP_HANDLER_DECL;
172
173 /* Rewind */
174 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
175 insn = *(insn_size_t *)instruction_pointer(regs);
176
177 /* Switch unwinders when unwind_stack() is called */
178 unwinder_faulted = 1;
160 179
161 atomic_dec(&unwinder_running); 180#ifdef CONFIG_BUG
181 handle_BUG(regs);
182#endif
162} 183}