aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/unwinder.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/unwinder.c')
-rw-r--r--arch/sh/kernel/unwinder.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
index 2b30fa28b440..468889d958f4 100644
--- a/arch/sh/kernel/unwinder.c
+++ b/arch/sh/kernel/unwinder.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/module.h>
14#include <asm/unwinder.h> 15#include <asm/unwinder.h>
15#include <asm/atomic.h> 16#include <asm/atomic.h>
16 17
@@ -53,8 +54,6 @@ static struct list_head unwinder_list = {
53 54
54static DEFINE_SPINLOCK(unwinder_lock); 55static DEFINE_SPINLOCK(unwinder_lock);
55 56
56static atomic_t unwinder_running = ATOMIC_INIT(0);
57
58/** 57/**
59 * select_unwinder - Select the best registered stack unwinder. 58 * select_unwinder - Select the best registered stack unwinder.
60 * 59 *
@@ -122,6 +121,8 @@ int unwinder_register(struct unwinder *u)
122 return ret; 121 return ret;
123} 122}
124 123
124int unwinder_faulted = 0;
125
125/* 126/*
126 * Unwind the call stack and pass information to the stacktrace_ops 127 * Unwind the call stack and pass information to the stacktrace_ops
127 * functions. Also handle the case where we need to switch to a new 128 * functions. Also handle the case where we need to switch to a new
@@ -144,19 +145,20 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs,
144 * Hopefully this will give us a semi-reliable stacktrace so we 145 * Hopefully this will give us a semi-reliable stacktrace so we
145 * can diagnose why curr_unwinder->dump() faulted. 146 * can diagnose why curr_unwinder->dump() faulted.
146 */ 147 */
147 if (atomic_inc_return(&unwinder_running) != 1) { 148 if (unwinder_faulted) {
148 spin_lock_irqsave(&unwinder_lock, flags); 149 spin_lock_irqsave(&unwinder_lock, flags);
149 150
150 if (!list_is_singular(&unwinder_list)) { 151 /* Make sure no one beat us to changing the unwinder */
152 if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
151 list_del(&curr_unwinder->list); 153 list_del(&curr_unwinder->list);
152 curr_unwinder = select_unwinder(); 154 curr_unwinder = select_unwinder();
155
156 unwinder_faulted = 0;
153 } 157 }
154 158
155 spin_unlock_irqrestore(&unwinder_lock, flags); 159 spin_unlock_irqrestore(&unwinder_lock, flags);
156 atomic_dec(&unwinder_running);
157 } 160 }
158 161
159 curr_unwinder->dump(task, regs, sp, ops, data); 162 curr_unwinder->dump(task, regs, sp, ops, data);
160
161 atomic_dec(&unwinder_running);
162} 163}
164EXPORT_SYMBOL_GPL(unwind_stack);