aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMatt Fleming <matt@console-pimps.org>2009-08-13 06:49:03 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-13 06:49:03 -0400
commitbf61ad1f870be88676a07bfef69acd59ce10172e (patch)
tree2fb8817fb76386b3d543d8d02f2cbe5877088e18 /arch
parent4e14dfc722b8e9e07a355f97aa60a3d9f0739071 (diff)
sh: Allow multiple stack unwinders to be setup
Provide an interface for registering stack unwinders, where each unwinder is given a rating that describes its accuracy and complexity. The more accurate an unwinder is, the more complex it is. If a the current stack unwinder faults, then the stack unwinder with the next highest accuracy will be used in its place (provided one is available). For example, this allows unwinders, such as the DWARF unwinder, to liberally sprinkle BUG()s to catch badly formed DWARF debug info. Signed-off-by: Matt Fleming <matt@console-pimps.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/unwinder.h25
-rw-r--r--arch/sh/kernel/Makefile_324
-rw-r--r--arch/sh/kernel/Makefile_642
-rw-r--r--arch/sh/kernel/unwinder.c162
4 files changed, 190 insertions, 3 deletions
diff --git a/arch/sh/include/asm/unwinder.h b/arch/sh/include/asm/unwinder.h
new file mode 100644
index 000000000000..3dc551453e28
--- /dev/null
+++ b/arch/sh/include/asm/unwinder.h
@@ -0,0 +1,25 @@
1#ifndef _LINUX_UNWINDER_H
2#define _LINUX_UNWINDER_H
3
4#include <asm/stacktrace.h>
5
6struct unwinder {
7 const char *name;
8 struct list_head list;
9 int rating;
10 void (*dump)(struct task_struct *, struct pt_regs *,
11 unsigned long *, const struct stacktrace_ops *, void *);
12};
13
14extern int unwinder_init(void);
15extern int unwinder_register(struct unwinder *);
16
17extern void unwind_stack(struct task_struct *, struct pt_regs *,
18 unsigned long *, const struct stacktrace_ops *,
19 void *);
20
21extern void stack_reader_dump(struct task_struct *, struct pt_regs *,
22 unsigned long *, const struct stacktrace_ops *,
23 void *);
24
25#endif /* _LINUX_UNWINDER_H */
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 6b32de701a7e..37a3b7704fc6 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -11,8 +11,8 @@ endif
11 11
12obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ 12obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \
13 machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ 13 machvec.o process_32.o ptrace_32.o setup.o signal_32.o \
14 sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ 14 sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \
15 traps.o traps_32.o 15 traps.o traps_32.o unwinder.o
16 16
17obj-y += cpu/ 17obj-y += cpu/
18obj-$(CONFIG_VSYSCALL) += vsyscall/ 18obj-$(CONFIG_VSYSCALL) += vsyscall/
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index 67b9f6c6326b..00b73e7598cb 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -2,7 +2,7 @@ extra-y := head_64.o init_task.o vmlinux.lds
2 2
3obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \ 3obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \
4 ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ 4 ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
5 syscalls_64.o time.o topology.o traps.o traps_64.o 5 syscalls_64.o time.o topology.o traps.o traps_64.o unwinder.o
6 6
7obj-y += cpu/ 7obj-y += cpu/
8obj-$(CONFIG_SMP) += smp.o 8obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
new file mode 100644
index 000000000000..2b30fa28b440
--- /dev/null
+++ b/arch/sh/kernel/unwinder.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (C) 2009 Matt Fleming
3 *
4 * Based, in part, on kernel/time/clocksource.c.
5 *
6 * This file provides arbitration code for stack unwinders.
7 *
8 * Multiple stack unwinders can be available on a system, usually with
9 * the most accurate unwinder being the currently active one.
10 */
11#include <linux/errno.h>
12#include <linux/list.h>
13#include <linux/spinlock.h>
14#include <asm/unwinder.h>
15#include <asm/atomic.h>
16
17/*
18 * This is the most basic stack unwinder an architecture can
19 * provide. For architectures without reliable frame pointers, e.g.
20 * RISC CPUs, it can be implemented by looking through the stack for
21 * addresses that lie within the kernel text section.
22 *
23 * Other CPUs, e.g. x86, can use their frame pointer register to
24 * construct more accurate stack traces.
25 */
26static struct list_head unwinder_list;
27static struct unwinder stack_reader = {
28 .name = "stack-reader",
29 .dump = stack_reader_dump,
30 .rating = 50,
31 .list = {
32 .next = &unwinder_list,
33 .prev = &unwinder_list,
34 },
35};
36
37/*
38 * "curr_unwinder" points to the stack unwinder currently in use. This
39 * is the unwinder with the highest rating.
40 *
41 * "unwinder_list" is a linked-list of all available unwinders, sorted
42 * by rating.
43 *
44 * All modifications of "curr_unwinder" and "unwinder_list" must be
45 * performed whilst holding "unwinder_lock".
46 */
47static struct unwinder *curr_unwinder = &stack_reader;
48
49static struct list_head unwinder_list = {
50 .next = &stack_reader.list,
51 .prev = &stack_reader.list,
52};
53
54static DEFINE_SPINLOCK(unwinder_lock);
55
56static atomic_t unwinder_running = ATOMIC_INIT(0);
57
58/**
59 * select_unwinder - Select the best registered stack unwinder.
60 *
61 * Private function. Must hold unwinder_lock when called.
62 *
63 * Select the stack unwinder with the best rating. This is useful for
64 * setting up curr_unwinder.
65 */
66static struct unwinder *select_unwinder(void)
67{
68 struct unwinder *best;
69
70 if (list_empty(&unwinder_list))
71 return NULL;
72
73 best = list_entry(unwinder_list.next, struct unwinder, list);
74 if (best == curr_unwinder)
75 return NULL;
76
77 return best;
78}
79
80/*
81 * Enqueue the stack unwinder sorted by rating.
82 */
83static int unwinder_enqueue(struct unwinder *ops)
84{
85 struct list_head *tmp, *entry = &unwinder_list;
86
87 list_for_each(tmp, &unwinder_list) {
88 struct unwinder *o;
89
90 o = list_entry(tmp, struct unwinder, list);
91 if (o == ops)
92 return -EBUSY;
93 /* Keep track of the place, where to insert */
94 if (o->rating >= ops->rating)
95 entry = tmp;
96 }
97 list_add(&ops->list, entry);
98
99 return 0;
100}
101
102/**
103 * unwinder_register - Used to install new stack unwinder
104 * @u: unwinder to be registered
105 *
106 * Install the new stack unwinder on the unwinder list, which is sorted
107 * by rating.
108 *
109 * Returns -EBUSY if registration fails, zero otherwise.
110 */
111int unwinder_register(struct unwinder *u)
112{
113 unsigned long flags;
114 int ret;
115
116 spin_lock_irqsave(&unwinder_lock, flags);
117 ret = unwinder_enqueue(u);
118 if (!ret)
119 curr_unwinder = select_unwinder();
120 spin_unlock_irqrestore(&unwinder_lock, flags);
121
122 return ret;
123}
124
125/*
126 * Unwind the call stack and pass information to the stacktrace_ops
127 * functions. Also handle the case where we need to switch to a new
128 * stack dumper because the current one faulted unexpectedly.
129 */
130void unwind_stack(struct task_struct *task, struct pt_regs *regs,
131 unsigned long *sp, const struct stacktrace_ops *ops,
132 void *data)
133{
134 unsigned long flags;
135
136 /*
137 * The problem with unwinders with high ratings is that they are
138 * inherently more complicated than the simple ones with lower
139 * ratings. We are therefore more likely to fault in the
140 * complicated ones, e.g. hitting BUG()s. If we fault in the
141 * code for the current stack unwinder we try to downgrade to
142 * one with a lower rating.
143 *
144 * Hopefully this will give us a semi-reliable stacktrace so we
145 * can diagnose why curr_unwinder->dump() faulted.
146 */
147 if (atomic_inc_return(&unwinder_running) != 1) {
148 spin_lock_irqsave(&unwinder_lock, flags);
149
150 if (!list_is_singular(&unwinder_list)) {
151 list_del(&curr_unwinder->list);
152 curr_unwinder = select_unwinder();
153 }
154
155 spin_unlock_irqrestore(&unwinder_lock, flags);
156 atomic_dec(&unwinder_running);
157 }
158
159 curr_unwinder->dump(task, regs, sp, ops, data);
160
161 atomic_dec(&unwinder_running);
162}