aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-15 00:00:02 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-15 00:00:02 -0400
commit4b6b987969b076298485697bfb0d0e35502642a3 (patch)
treea8f5ebd6a0b9efbe30272012d759669b0c5ddc13 /arch/sh/kernel
parentdf47cd096c8f54a5242e3a2ffb4525c804567eda (diff)
parent60e0a4c7adc700f2d2929cdb2d0055e519a3eb3d (diff)
Merge branch 'master' into sh/hwblk
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile_327
-rw-r--r--arch/sh/kernel/Makefile_643
-rw-r--r--arch/sh/kernel/cpu/init.c34
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S3
-rw-r--r--arch/sh/kernel/cpu/sh2a/entry.S3
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S1
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S70
-rw-r--r--arch/sh/kernel/dumpstack.c123
-rw-r--r--arch/sh/kernel/dwarf.c902
-rw-r--r--arch/sh/kernel/early_printk.c1
-rw-r--r--arch/sh/kernel/entry-common.S8
-rw-r--r--arch/sh/kernel/irq.c4
-rw-r--r--arch/sh/kernel/stacktrace.c98
-rw-r--r--arch/sh/kernel/traps_32.c24
-rw-r--r--arch/sh/kernel/unwinder.c162
-rw-r--r--arch/sh/kernel/vmlinux.lds.S4
16 files changed, 1387 insertions, 60 deletions
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 94ed99b68002..f2245ebf0b31 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -9,10 +9,10 @@ ifdef CONFIG_FUNCTION_TRACER
9CFLAGS_REMOVE_ftrace.o = -pg 9CFLAGS_REMOVE_ftrace.o = -pg
10endif 10endif
11 11
12obj-y := debugtraps.o idle.o io.o io_generic.o irq.o \ 12obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \
13 machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ 13 machvec.o process_32.o ptrace_32.o setup.o signal_32.o \
14 sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ 14 sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \
15 traps.o traps_32.o 15 traps.o traps_32.o unwinder.o
16 16
17obj-y += cpu/ 17obj-y += cpu/
18obj-$(CONFIG_VSYSCALL) += vsyscall/ 18obj-$(CONFIG_VSYSCALL) += vsyscall/
@@ -33,6 +33,7 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
33obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 33obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
34obj-$(CONFIG_DUMP_CODE) += disassemble.o 34obj-$(CONFIG_DUMP_CODE) += disassemble.o
35obj-$(CONFIG_HIBERNATION) += swsusp.o 35obj-$(CONFIG_HIBERNATION) += swsusp.o
36obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
36 37
37obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o 38obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
38 39
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index 67b9f6c6326b..639ee514266c 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -2,7 +2,7 @@ extra-y := head_64.o init_task.o vmlinux.lds
2 2
3obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \ 3obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \
4 ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ 4 ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
5 syscalls_64.o time.o topology.o traps.o traps_64.o 5 syscalls_64.o time.o topology.o traps.o traps_64.o unwinder.o
6 6
7obj-y += cpu/ 7obj-y += cpu/
8obj-$(CONFIG_SMP) += smp.o 8obj-$(CONFIG_SMP) += smp.o
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
13obj-$(CONFIG_STACKTRACE) += stacktrace.o 13obj-$(CONFIG_STACKTRACE) += stacktrace.o
14obj-$(CONFIG_IO_TRAPPED) += io_trapped.o 14obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
15obj-$(CONFIG_GENERIC_GPIO) += gpio.o 15obj-$(CONFIG_GENERIC_GPIO) += gpio.o
16obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
16 17
17obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o 18obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
18 19
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ad85421099cd..d40b9db5be03 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * CPU init code 4 * CPU init code
5 * 5 *
6 * Copyright (C) 2002 - 2007 Paul Mundt 6 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Copyright (C) 2003 Richard Curnow 7 * Copyright (C) 2003 Richard Curnow
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
@@ -62,6 +62,37 @@ static void __init speculative_execution_init(void)
62#define speculative_execution_init() do { } while (0) 62#define speculative_execution_init() do { } while (0)
63#endif 63#endif
64 64
65#ifdef CONFIG_CPU_SH4A
66#define EXPMASK 0xff2f0004
67#define EXPMASK_RTEDS (1 << 0)
68#define EXPMASK_BRDSSLP (1 << 1)
69#define EXPMASK_MMCAW (1 << 4)
70
71static void __init expmask_init(void)
72{
73 unsigned long expmask = __raw_readl(EXPMASK);
74
75 /*
76 * Future proofing.
77 *
78 * Disable support for slottable sleep instruction
79 * and non-nop instructions in the rte delay slot.
80 */
81 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP);
82
83 /*
84 * Enable associative writes to the memory-mapped cache array
85 * until the cache flush ops have been rewritten.
86 */
87 expmask |= EXPMASK_MMCAW;
88
89 __raw_writel(expmask, EXPMASK);
90 ctrl_barrier();
91}
92#else
93#define expmask_init() do { } while (0)
94#endif
95
65/* 2nd-level cache init */ 96/* 2nd-level cache init */
66void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) 97void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
67{ 98{
@@ -321,4 +352,5 @@ asmlinkage void __init sh_cpu_init(void)
321#endif 352#endif
322 353
323 speculative_execution_init(); 354 speculative_execution_init();
355 expmask_init();
324} 356}
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index becc54c45692..c8a4331d9b8d 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -227,8 +227,9 @@ ENTRY(sh_bios_handler)
227 mov.l @r15+, r14 227 mov.l @r15+, r14
228 add #8,r15 228 add #8,r15
229 lds.l @r15+, pr 229 lds.l @r15+, pr
230 mov.l @r15+,r15
230 rte 231 rte
231 mov.l @r15+,r15 232 nop
232 .align 2 233 .align 2
2331: .long gdb_vbr_vector 2341: .long gdb_vbr_vector
234#endif /* CONFIG_SH_STANDARD_BIOS */ 235#endif /* CONFIG_SH_STANDARD_BIOS */
diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S
index ab3903eeda5c..222742ddc0d6 100644
--- a/arch/sh/kernel/cpu/sh2a/entry.S
+++ b/arch/sh/kernel/cpu/sh2a/entry.S
@@ -176,8 +176,9 @@ ENTRY(sh_bios_handler)
176 movml.l @r15+,r14 176 movml.l @r15+,r14
177 add #8,r15 177 add #8,r15
178 lds.l @r15+, pr 178 lds.l @r15+, pr
179 mov.l @r15+,r15
179 rte 180 rte
180 mov.l @r15+,r15 181 nop
181 .align 2 182 .align 2
1821: .long gdb_vbr_vector 1831: .long gdb_vbr_vector
183#endif /* CONFIG_SH_STANDARD_BIOS */ 184#endif /* CONFIG_SH_STANDARD_BIOS */
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3cb531f233f2..67ad6467c694 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -137,6 +137,7 @@ ENTRY(tlb_protection_violation_store)
137 mov #1, r5 137 mov #1, r5
138 138
139call_dpf: 139call_dpf:
140 setup_frame_reg
140 mov.l 1f, r0 141 mov.l 1f, r0
141 mov r5, r8 142 mov r5, r8
142 mov.l @r0, r6 143 mov.l @r0, r6
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
index 5d888ef53d82..baf2d7d46b05 100644
--- a/arch/sh/kernel/cpu/shmobile/sleep.S
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -26,8 +26,30 @@ ENTRY(sh_mobile_standby)
26 26
27 tst #SUSP_SH_SF, r0 27 tst #SUSP_SH_SF, r0
28 bt skip_set_sf 28 bt skip_set_sf
29#ifdef CONFIG_CPU_SUBTYPE_SH7724
30 /* DBSC: put memory in self-refresh mode */
29 31
30 /* SDRAM: disable power down and put in self-refresh mode */ 32 mov.l dben_reg, r4
33 mov.l dben_data0, r1
34 mov.l r1, @r4
35
36 mov.l dbrfpdn0_reg, r4
37 mov.l dbrfpdn0_data0, r1
38 mov.l r1, @r4
39
40 mov.l dbcmdcnt_reg, r4
41 mov.l dbcmdcnt_data0, r1
42 mov.l r1, @r4
43
44 mov.l dbcmdcnt_reg, r4
45 mov.l dbcmdcnt_data1, r1
46 mov.l r1, @r4
47
48 mov.l dbrfpdn0_reg, r4
49 mov.l dbrfpdn0_data1, r1
50 mov.l r1, @r4
51#else
52 /* SBSC: disable power down and put in self-refresh mode */
31 mov.l 1f, r4 53 mov.l 1f, r4
32 mov.l 2f, r1 54 mov.l 2f, r1
33 mov.l @r4, r2 55 mov.l @r4, r2
@@ -35,6 +57,7 @@ ENTRY(sh_mobile_standby)
35 mov.l 3f, r3 57 mov.l 3f, r3
36 and r3, r2 58 and r3, r2
37 mov.l r2, @r4 59 mov.l r2, @r4
60#endif
38 61
39skip_set_sf: 62skip_set_sf:
40 tst #SUSP_SH_SLEEP, r0 63 tst #SUSP_SH_SLEEP, r0
@@ -84,7 +107,36 @@ done_sleep:
84 tst #SUSP_SH_SF, r0 107 tst #SUSP_SH_SF, r0
85 bt skip_restore_sf 108 bt skip_restore_sf
86 109
87 /* SDRAM: set auto-refresh mode */ 110#ifdef CONFIG_CPU_SUBTYPE_SH7724
111 /* DBSC: put memory in auto-refresh mode */
112
113 mov.l dbrfpdn0_reg, r4
114 mov.l dbrfpdn0_data0, r1
115 mov.l r1, @r4
116
117 /* sleep 140 ns */
118 nop
119 nop
120 nop
121 nop
122
123 mov.l dbcmdcnt_reg, r4
124 mov.l dbcmdcnt_data0, r1
125 mov.l r1, @r4
126
127 mov.l dbcmdcnt_reg, r4
128 mov.l dbcmdcnt_data1, r1
129 mov.l r1, @r4
130
131 mov.l dben_reg, r4
132 mov.l dben_data1, r1
133 mov.l r1, @r4
134
135 mov.l dbrfpdn0_reg, r4
136 mov.l dbrfpdn0_data2, r1
137 mov.l r1, @r4
138#else
139 /* SBSC: set auto-refresh mode */
88 mov.l 1f, r4 140 mov.l 1f, r4
89 mov.l @r4, r2 141 mov.l @r4, r2
90 mov.l 4f, r3 142 mov.l 4f, r3
@@ -98,15 +150,29 @@ done_sleep:
98 add r4, r3 150 add r4, r3
99 or r2, r3 151 or r2, r3
100 mov.l r3, @r1 152 mov.l r3, @r1
153#endif
101skip_restore_sf: 154skip_restore_sf:
102 rts 155 rts
103 nop 156 nop
104 157
105 .balign 4 158 .balign 4
159#ifdef CONFIG_CPU_SUBTYPE_SH7724
160dben_reg: .long 0xfd000010 /* DBEN */
161dben_data0: .long 0
162dben_data1: .long 1
163dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */
164dbrfpdn0_data0: .long 0
165dbrfpdn0_data1: .long 1
166dbrfpdn0_data2: .long 0x00010000
167dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */
168dbcmdcnt_data0: .long 2
169dbcmdcnt_data1: .long 4
170#else
1061: .long 0xfe400008 /* SDCR0 */ 1711: .long 0xfe400008 /* SDCR0 */
1072: .long 0x00000400 1722: .long 0x00000400
1083: .long 0xffff7fff 1733: .long 0xffff7fff
1094: .long 0xfffffbff 1744: .long 0xfffffbff
175#endif
1105: .long 0xa4150020 /* STBCR */ 1765: .long 0xa4150020 /* STBCR */
1116: .long 0xfe40001c /* RTCOR */ 1776: .long 0xfe40001c /* RTCOR */
1127: .long 0xfe400018 /* RTCNT */ 1787: .long 0xfe400018 /* RTCNT */
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
new file mode 100644
index 000000000000..6f5ad1513409
--- /dev/null
+++ b/arch/sh/kernel/dumpstack.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2009 Matt Fleming
5 */
6#include <linux/kallsyms.h>
7#include <linux/ftrace.h>
8#include <linux/debug_locks.h>
9#include <asm/unwinder.h>
10#include <asm/stacktrace.h>
11
12void printk_address(unsigned long address, int reliable)
13{
14 printk(" [<%p>] %s%pS\n", (void *) address,
15 reliable ? "" : "? ", (void *) address);
16}
17
18#ifdef CONFIG_FUNCTION_GRAPH_TRACER
19static void
20print_ftrace_graph_addr(unsigned long addr, void *data,
21 const struct stacktrace_ops *ops,
22 struct thread_info *tinfo, int *graph)
23{
24 struct task_struct *task = tinfo->task;
25 unsigned long ret_addr;
26 int index = task->curr_ret_stack;
27
28 if (addr != (unsigned long)return_to_handler)
29 return;
30
31 if (!task->ret_stack || index < *graph)
32 return;
33
34 index -= *graph;
35 ret_addr = task->ret_stack[index].ret;
36
37 ops->address(data, ret_addr, 1);
38
39 (*graph)++;
40}
41#else
42static inline void
43print_ftrace_graph_addr(unsigned long addr, void *data,
44 const struct stacktrace_ops *ops,
45 struct thread_info *tinfo, int *graph)
46{ }
47#endif
48
49void
50stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
51 unsigned long *sp, const struct stacktrace_ops *ops,
52 void *data)
53{
54 struct thread_info *context;
55 int graph = 0;
56
57 context = (struct thread_info *)
58 ((unsigned long)sp & (~(THREAD_SIZE - 1)));
59
60 while (!kstack_end(sp)) {
61 unsigned long addr = *sp++;
62
63 if (__kernel_text_address(addr)) {
64 ops->address(data, addr, 1);
65
66 print_ftrace_graph_addr(addr, data, ops,
67 context, &graph);
68 }
69 }
70}
71
72static void
73print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
74{
75 printk(data);
76 print_symbol(msg, symbol);
77 printk("\n");
78}
79
80static void print_trace_warning(void *data, char *msg)
81{
82 printk("%s%s\n", (char *)data, msg);
83}
84
85static int print_trace_stack(void *data, char *name)
86{
87 printk("%s <%s> ", (char *)data, name);
88 return 0;
89}
90
91/*
92 * Print one address/symbol entries per line.
93 */
94static void print_trace_address(void *data, unsigned long addr, int reliable)
95{
96 printk(data);
97 printk_address(addr, reliable);
98}
99
100static const struct stacktrace_ops print_trace_ops = {
101 .warning = print_trace_warning,
102 .warning_symbol = print_trace_warning_symbol,
103 .stack = print_trace_stack,
104 .address = print_trace_address,
105};
106
107void show_trace(struct task_struct *tsk, unsigned long *sp,
108 struct pt_regs *regs)
109{
110 if (regs && user_mode(regs))
111 return;
112
113 printk("\nCall trace:\n");
114
115 unwind_stack(tsk, regs, sp, &print_trace_ops, "");
116
117 printk("\n");
118
119 if (!tsk)
120 tsk = current;
121
122 debug_show_held_locks(tsk);
123}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
new file mode 100644
index 000000000000..c6c5764a8ab1
--- /dev/null
+++ b/arch/sh/kernel/dwarf.c
@@ -0,0 +1,902 @@
1/*
2 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * This is an implementation of a DWARF unwinder. Its main purpose is
9 * for generating stacktrace information. Based on the DWARF 3
10 * specification from http://www.dwarfstd.org.
11 *
12 * TODO:
13 * - DWARF64 doesn't work.
14 */
15
16/* #define DEBUG */
17#include <linux/kernel.h>
18#include <linux/io.h>
19#include <linux/list.h>
20#include <linux/mm.h>
21#include <asm/dwarf.h>
22#include <asm/unwinder.h>
23#include <asm/sections.h>
24#include <asm/unaligned.h>
25#include <asm/dwarf.h>
26#include <asm/stacktrace.h>
27
28static LIST_HEAD(dwarf_cie_list);
29DEFINE_SPINLOCK(dwarf_cie_lock);
30
31static LIST_HEAD(dwarf_fde_list);
32DEFINE_SPINLOCK(dwarf_fde_lock);
33
34static struct dwarf_cie *cached_cie;
35
36/*
37 * Figure out whether we need to allocate some dwarf registers. If dwarf
38 * registers have already been allocated then we may need to realloc
39 * them. "reg" is a register number that we need to be able to access
40 * after this call.
41 *
42 * Register numbers start at zero, therefore we need to allocate space
43 * for "reg" + 1 registers.
44 */
45static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
46 unsigned int reg)
47{
48 struct dwarf_reg *regs;
49 unsigned int num_regs = reg + 1;
50 size_t new_size;
51 size_t old_size;
52
53 new_size = num_regs * sizeof(*regs);
54 old_size = frame->num_regs * sizeof(*regs);
55
56 /* Fast path: don't allocate any regs if we've already got enough. */
57 if (frame->num_regs >= num_regs)
58 return;
59
60 regs = kzalloc(new_size, GFP_ATOMIC);
61 if (!regs) {
62 printk(KERN_WARNING "Unable to allocate DWARF registers\n");
63 /*
64 * Let's just bomb hard here, we have no way to
65 * gracefully recover.
66 */
67 BUG();
68 }
69
70 if (frame->regs) {
71 memcpy(regs, frame->regs, old_size);
72 kfree(frame->regs);
73 }
74
75 frame->regs = regs;
76 frame->num_regs = num_regs;
77}
78
79/**
80 * dwarf_read_addr - read dwarf data
81 * @src: source address of data
82 * @dst: destination address to store the data to
83 *
84 * Read 'n' bytes from @src, where 'n' is the size of an address on
85 * the native machine. We return the number of bytes read, which
86 * should always be 'n'. We also have to be careful when reading
87 * from @src and writing to @dst, because they can be arbitrarily
88 * aligned. Return 'n' - the number of bytes read.
89 */
90static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
91{
92 u32 val = get_unaligned(src);
93 put_unaligned(val, dst);
94 return sizeof(unsigned long *);
95}
96
97/**
98 * dwarf_read_uleb128 - read unsigned LEB128 data
99 * @addr: the address where the ULEB128 data is stored
100 * @ret: address to store the result
101 *
102 * Decode an unsigned LEB128 encoded datum. The algorithm is taken
103 * from Appendix C of the DWARF 3 spec. For information on the
104 * encodings refer to section "7.6 - Variable Length Data". Return
105 * the number of bytes read.
106 */
107static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
108{
109 unsigned int result;
110 unsigned char byte;
111 int shift, count;
112
113 result = 0;
114 shift = 0;
115 count = 0;
116
117 while (1) {
118 byte = __raw_readb(addr);
119 addr++;
120 count++;
121
122 result |= (byte & 0x7f) << shift;
123 shift += 7;
124
125 if (!(byte & 0x80))
126 break;
127 }
128
129 *ret = result;
130
131 return count;
132}
133
134/**
135 * dwarf_read_leb128 - read signed LEB128 data
136 * @addr: the address of the LEB128 encoded data
137 * @ret: address to store the result
138 *
139 * Decode signed LEB128 data. The algorithm is taken from Appendix
140 * C of the DWARF 3 spec. Return the number of bytes read.
141 */
142static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
143{
144 unsigned char byte;
145 int result, shift;
146 int num_bits;
147 int count;
148
149 result = 0;
150 shift = 0;
151 count = 0;
152
153 while (1) {
154 byte = __raw_readb(addr);
155 addr++;
156 result |= (byte & 0x7f) << shift;
157 shift += 7;
158 count++;
159
160 if (!(byte & 0x80))
161 break;
162 }
163
164 /* The number of bits in a signed integer. */
165 num_bits = 8 * sizeof(result);
166
167 if ((shift < num_bits) && (byte & 0x40))
168 result |= (-1 << shift);
169
170 *ret = result;
171
172 return count;
173}
174
175/**
176 * dwarf_read_encoded_value - return the decoded value at @addr
177 * @addr: the address of the encoded value
178 * @val: where to write the decoded value
179 * @encoding: the encoding with which we can decode @addr
180 *
181 * GCC emits encoded address in the .eh_frame FDE entries. Decode
182 * the value at @addr using @encoding. The decoded value is written
183 * to @val and the number of bytes read is returned.
184 */
185static int dwarf_read_encoded_value(char *addr, unsigned long *val,
186 char encoding)
187{
188 unsigned long decoded_addr = 0;
189 int count = 0;
190
191 switch (encoding & 0x70) {
192 case DW_EH_PE_absptr:
193 break;
194 case DW_EH_PE_pcrel:
195 decoded_addr = (unsigned long)addr;
196 break;
197 default:
198 pr_debug("encoding=0x%x\n", (encoding & 0x70));
199 BUG();
200 }
201
202 if ((encoding & 0x07) == 0x00)
203 encoding |= DW_EH_PE_udata4;
204
205 switch (encoding & 0x0f) {
206 case DW_EH_PE_sdata4:
207 case DW_EH_PE_udata4:
208 count += 4;
209 decoded_addr += get_unaligned((u32 *)addr);
210 __raw_writel(decoded_addr, val);
211 break;
212 default:
213 pr_debug("encoding=0x%x\n", encoding);
214 BUG();
215 }
216
217 return count;
218}
219
220/**
221 * dwarf_entry_len - return the length of an FDE or CIE
222 * @addr: the address of the entry
223 * @len: the length of the entry
224 *
225 * Read the initial_length field of the entry and store the size of
226 * the entry in @len. We return the number of bytes read. Return a
227 * count of 0 on error.
228 */
229static inline int dwarf_entry_len(char *addr, unsigned long *len)
230{
231 u32 initial_len;
232 int count;
233
234 initial_len = get_unaligned((u32 *)addr);
235 count = 4;
236
237 /*
238 * An initial length field value in the range DW_LEN_EXT_LO -
239 * DW_LEN_EXT_HI indicates an extension, and should not be
240 * interpreted as a length. The only extension that we currently
241 * understand is the use of DWARF64 addresses.
242 */
243 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
244 /*
245 * The 64-bit length field immediately follows the
246 * compulsory 32-bit length field.
247 */
248 if (initial_len == DW_EXT_DWARF64) {
249 *len = get_unaligned((u64 *)addr + 4);
250 count = 12;
251 } else {
252 printk(KERN_WARNING "Unknown DWARF extension\n");
253 count = 0;
254 }
255 } else
256 *len = initial_len;
257
258 return count;
259}
260
261/**
262 * dwarf_lookup_cie - locate the cie
263 * @cie_ptr: pointer to help with lookup
264 */
265static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
266{
267 struct dwarf_cie *cie, *n;
268 unsigned long flags;
269
270 spin_lock_irqsave(&dwarf_cie_lock, flags);
271
272 /*
273 * We've cached the last CIE we looked up because chances are
274 * that the FDE wants this CIE.
275 */
276 if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
277 cie = cached_cie;
278 goto out;
279 }
280
281 list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) {
282 if (cie->cie_pointer == cie_ptr) {
283 cached_cie = cie;
284 break;
285 }
286 }
287
288 /* Couldn't find the entry in the list. */
289 if (&cie->link == &dwarf_cie_list)
290 cie = NULL;
291out:
292 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
293 return cie;
294}
295
296/**
297 * dwarf_lookup_fde - locate the FDE that covers pc
298 * @pc: the program counter
299 */
300struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
301{
302 unsigned long flags;
303 struct dwarf_fde *fde, *n;
304
305 spin_lock_irqsave(&dwarf_fde_lock, flags);
306 list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) {
307 unsigned long start, end;
308
309 start = fde->initial_location;
310 end = fde->initial_location + fde->address_range;
311
312 if (pc >= start && pc < end)
313 break;
314 }
315
316 /* Couldn't find the entry in the list. */
317 if (&fde->link == &dwarf_fde_list)
318 fde = NULL;
319
320 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
321
322 return fde;
323}
324
325/**
326 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
327 * @insn_start: address of the first instruction
328 * @insn_end: address of the last instruction
329 * @cie: the CIE for this function
330 * @fde: the FDE for this function
331 * @frame: the instructions calculate the CFA for this frame
332 * @pc: the program counter of the address we're interested in
333 * @define_ra: keep executing insns until the return addr reg is defined?
334 *
335 * Execute the Call Frame instruction sequence starting at
336 * @insn_start and ending at @insn_end. The instructions describe
337 * how to calculate the Canonical Frame Address of a stackframe.
338 * Store the results in @frame.
339 */
340static int dwarf_cfa_execute_insns(unsigned char *insn_start,
341 unsigned char *insn_end,
342 struct dwarf_cie *cie,
343 struct dwarf_fde *fde,
344 struct dwarf_frame *frame,
345 unsigned long pc,
346 bool define_ra)
347{
348 unsigned char insn;
349 unsigned char *current_insn;
350 unsigned int count, delta, reg, expr_len, offset;
351 bool seen_ra_reg;
352
353 current_insn = insn_start;
354
355 /*
356 * If we're executing instructions for the dwarf_unwind_stack()
357 * FDE we need to keep executing instructions until the value of
358 * DWARF_ARCH_RA_REG is defined. See the comment in
359 * dwarf_unwind_stack() for more details.
360 */
361 if (define_ra)
362 seen_ra_reg = false;
363 else
364 seen_ra_reg = true;
365
366 while (current_insn < insn_end && (frame->pc <= pc || !seen_ra_reg) ) {
367 insn = __raw_readb(current_insn++);
368
369 if (!seen_ra_reg) {
370 if (frame->num_regs >= DWARF_ARCH_RA_REG &&
371 frame->regs[DWARF_ARCH_RA_REG].flags)
372 seen_ra_reg = true;
373 }
374
375 /*
376 * Firstly, handle the opcodes that embed their operands
377 * in the instructions.
378 */
379 switch (DW_CFA_opcode(insn)) {
380 case DW_CFA_advance_loc:
381 delta = DW_CFA_operand(insn);
382 delta *= cie->code_alignment_factor;
383 frame->pc += delta;
384 continue;
385 /* NOTREACHED */
386 case DW_CFA_offset:
387 reg = DW_CFA_operand(insn);
388 count = dwarf_read_uleb128(current_insn, &offset);
389 current_insn += count;
390 offset *= cie->data_alignment_factor;
391 dwarf_frame_alloc_regs(frame, reg);
392 frame->regs[reg].addr = offset;
393 frame->regs[reg].flags |= DWARF_REG_OFFSET;
394 continue;
395 /* NOTREACHED */
396 case DW_CFA_restore:
397 reg = DW_CFA_operand(insn);
398 continue;
399 /* NOTREACHED */
400 }
401
402 /*
403 * Secondly, handle the opcodes that don't embed their
404 * operands in the instruction.
405 */
406 switch (insn) {
407 case DW_CFA_nop:
408 continue;
409 case DW_CFA_advance_loc1:
410 delta = *current_insn++;
411 frame->pc += delta * cie->code_alignment_factor;
412 break;
413 case DW_CFA_advance_loc2:
414 delta = get_unaligned((u16 *)current_insn);
415 current_insn += 2;
416 frame->pc += delta * cie->code_alignment_factor;
417 break;
418 case DW_CFA_advance_loc4:
419 delta = get_unaligned((u32 *)current_insn);
420 current_insn += 4;
421 frame->pc += delta * cie->code_alignment_factor;
422 break;
423 case DW_CFA_offset_extended:
424 count = dwarf_read_uleb128(current_insn, &reg);
425 current_insn += count;
426 count = dwarf_read_uleb128(current_insn, &offset);
427 current_insn += count;
428 offset *= cie->data_alignment_factor;
429 break;
430 case DW_CFA_restore_extended:
431 count = dwarf_read_uleb128(current_insn, &reg);
432 current_insn += count;
433 break;
434 case DW_CFA_undefined:
435 count = dwarf_read_uleb128(current_insn, &reg);
436 current_insn += count;
437 break;
438 case DW_CFA_def_cfa:
439 count = dwarf_read_uleb128(current_insn,
440 &frame->cfa_register);
441 current_insn += count;
442 count = dwarf_read_uleb128(current_insn,
443 &frame->cfa_offset);
444 current_insn += count;
445
446 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
447 break;
448 case DW_CFA_def_cfa_register:
449 count = dwarf_read_uleb128(current_insn,
450 &frame->cfa_register);
451 current_insn += count;
452 frame->cfa_offset = 0;
453 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
454 break;
455 case DW_CFA_def_cfa_offset:
456 count = dwarf_read_uleb128(current_insn, &offset);
457 current_insn += count;
458 frame->cfa_offset = offset;
459 break;
460 case DW_CFA_def_cfa_expression:
461 count = dwarf_read_uleb128(current_insn, &expr_len);
462 current_insn += count;
463
464 frame->cfa_expr = current_insn;
465 frame->cfa_expr_len = expr_len;
466 current_insn += expr_len;
467
468 frame->flags |= DWARF_FRAME_CFA_REG_EXP;
469 break;
470 case DW_CFA_offset_extended_sf:
471 count = dwarf_read_uleb128(current_insn, &reg);
472 current_insn += count;
473 count = dwarf_read_leb128(current_insn, &offset);
474 current_insn += count;
475 offset *= cie->data_alignment_factor;
476 dwarf_frame_alloc_regs(frame, reg);
477 frame->regs[reg].flags |= DWARF_REG_OFFSET;
478 frame->regs[reg].addr = offset;
479 break;
480 case DW_CFA_val_offset:
481 count = dwarf_read_uleb128(current_insn, &reg);
482 current_insn += count;
483 count = dwarf_read_leb128(current_insn, &offset);
484 offset *= cie->data_alignment_factor;
485 frame->regs[reg].flags |= DWARF_REG_OFFSET;
486 frame->regs[reg].addr = offset;
487 break;
488 default:
489 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
490 break;
491 }
492 }
493
494 return 0;
495}
496
497/**
498 * dwarf_unwind_stack - recursively unwind the stack
499 * @pc: address of the function to unwind
500 * @prev: struct dwarf_frame of the previous stackframe on the callstack
501 *
502 * Return a struct dwarf_frame representing the most recent frame
503 * on the callstack. Each of the lower (older) stack frames are
504 * linked via the "prev" member.
505 */
506struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
507 struct dwarf_frame *prev)
508{
509 struct dwarf_frame *frame;
510 struct dwarf_cie *cie;
511 struct dwarf_fde *fde;
512 unsigned long addr;
513 int i, offset;
514 bool define_ra = false;
515
516 /*
517 * If this is the first invocation of this recursive function we
518 * need get the contents of a physical register to get the CFA
519 * in order to begin the virtual unwinding of the stack.
520 *
521 * Setting "define_ra" to true indictates that we want
522 * dwarf_cfa_execute_insns() to continue executing instructions
523 * until we know how to calculate the value of DWARF_ARCH_RA_REG
524 * (which we need in order to kick off the whole unwinding
525 * process).
526 *
527 * NOTE: the return address is guaranteed to be setup by the
528 * time this function makes its first function call.
529 */
530 if (!pc && !prev) {
531 pc = (unsigned long)&dwarf_unwind_stack;
532 define_ra = true;
533 }
534
535 frame = kzalloc(sizeof(*frame), GFP_ATOMIC);
536 if (!frame)
537 return NULL;
538
539 frame->prev = prev;
540
541 fde = dwarf_lookup_fde(pc);
542 if (!fde) {
543 /*
544 * This is our normal exit path - the one that stops the
545 * recursion. There's two reasons why we might exit
546 * here,
547 *
548 * a) pc has no asscociated DWARF frame info and so
549 * we don't know how to unwind this frame. This is
550 * usually the case when we're trying to unwind a
551 * frame that was called from some assembly code
552 * that has no DWARF info, e.g. syscalls.
553 *
554 * b) the DEBUG info for pc is bogus. There's
555 * really no way to distinguish this case from the
556 * case above, which sucks because we could print a
557 * warning here.
558 */
559 return NULL;
560 }
561
562 cie = dwarf_lookup_cie(fde->cie_pointer);
563
564 frame->pc = fde->initial_location;
565
566 /* CIE initial instructions */
567 dwarf_cfa_execute_insns(cie->initial_instructions,
568 cie->instructions_end, cie, fde,
569 frame, pc, false);
570
571 /* FDE instructions */
572 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
573 fde, frame, pc, define_ra);
574
575 /* Calculate the CFA */
576 switch (frame->flags) {
577 case DWARF_FRAME_CFA_REG_OFFSET:
578 if (prev) {
579 BUG_ON(!prev->regs[frame->cfa_register].flags);
580
581 addr = prev->cfa;
582 addr += prev->regs[frame->cfa_register].addr;
583 frame->cfa = __raw_readl(addr);
584
585 } else {
586 /*
587 * Again, this is the first invocation of this
588 * recurisve function. We need to physically
589 * read the contents of a register in order to
590 * get the Canonical Frame Address for this
591 * function.
592 */
593 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
594 }
595
596 frame->cfa += frame->cfa_offset;
597 break;
598 default:
599 BUG();
600 }
601
602 /* If we haven't seen the return address reg, we're screwed. */
603 BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags);
604
605 for (i = 0; i <= frame->num_regs; i++) {
606 struct dwarf_reg *reg = &frame->regs[i];
607
608 if (!reg->flags)
609 continue;
610
611 offset = reg->addr;
612 offset += frame->cfa;
613 }
614
615 addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
616 frame->return_addr = __raw_readl(addr);
617
618 frame->next = dwarf_unwind_stack(frame->return_addr, frame);
619 return frame;
620}
621
622static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
623 unsigned char *end)
624{
625 struct dwarf_cie *cie;
626 unsigned long flags;
627 int count;
628
629 cie = kzalloc(sizeof(*cie), GFP_KERNEL);
630 if (!cie)
631 return -ENOMEM;
632
633 cie->length = len;
634
635 /*
636 * Record the offset into the .eh_frame section
637 * for this CIE. It allows this CIE to be
638 * quickly and easily looked up from the
639 * corresponding FDE.
640 */
641 cie->cie_pointer = (unsigned long)entry;
642
643 cie->version = *(char *)p++;
644 BUG_ON(cie->version != 1);
645
646 cie->augmentation = p;
647 p += strlen(cie->augmentation) + 1;
648
649 count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
650 p += count;
651
652 count = dwarf_read_leb128(p, &cie->data_alignment_factor);
653 p += count;
654
655 /*
656 * Which column in the rule table contains the
657 * return address?
658 */
659 if (cie->version == 1) {
660 cie->return_address_reg = __raw_readb(p);
661 p++;
662 } else {
663 count = dwarf_read_uleb128(p, &cie->return_address_reg);
664 p += count;
665 }
666
667 if (cie->augmentation[0] == 'z') {
668 unsigned int length, count;
669 cie->flags |= DWARF_CIE_Z_AUGMENTATION;
670
671 count = dwarf_read_uleb128(p, &length);
672 p += count;
673
674 BUG_ON((unsigned char *)p > end);
675
676 cie->initial_instructions = p + length;
677 cie->augmentation++;
678 }
679
680 while (*cie->augmentation) {
681 /*
682 * "L" indicates a byte showing how the
683 * LSDA pointer is encoded. Skip it.
684 */
685 if (*cie->augmentation == 'L') {
686 p++;
687 cie->augmentation++;
688 } else if (*cie->augmentation == 'R') {
689 /*
690 * "R" indicates a byte showing
691 * how FDE addresses are
692 * encoded.
693 */
694 cie->encoding = *(char *)p++;
695 cie->augmentation++;
696 } else if (*cie->augmentation == 'P') {
697 /*
698 * "R" indicates a personality
699 * routine in the CIE
700 * augmentation.
701 */
702 BUG();
703 } else if (*cie->augmentation == 'S') {
704 BUG();
705 } else {
706 /*
707 * Unknown augmentation. Assume
708 * 'z' augmentation.
709 */
710 p = cie->initial_instructions;
711 BUG_ON(!p);
712 break;
713 }
714 }
715
716 cie->initial_instructions = p;
717 cie->instructions_end = end;
718
719 /* Add to list */
720 spin_lock_irqsave(&dwarf_cie_lock, flags);
721 list_add_tail(&cie->link, &dwarf_cie_list);
722 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
723
724 return 0;
725}
726
727static int dwarf_parse_fde(void *entry, u32 entry_type,
728 void *start, unsigned long len)
729{
730 struct dwarf_fde *fde;
731 struct dwarf_cie *cie;
732 unsigned long flags;
733 int count;
734 void *p = start;
735
736 fde = kzalloc(sizeof(*fde), GFP_KERNEL);
737 if (!fde)
738 return -ENOMEM;
739
740 fde->length = len;
741
742 /*
743 * In a .eh_frame section the CIE pointer is the
744 * delta between the address within the FDE
745 */
746 fde->cie_pointer = (unsigned long)(p - entry_type - 4);
747
748 cie = dwarf_lookup_cie(fde->cie_pointer);
749 fde->cie = cie;
750
751 if (cie->encoding)
752 count = dwarf_read_encoded_value(p, &fde->initial_location,
753 cie->encoding);
754 else
755 count = dwarf_read_addr(p, &fde->initial_location);
756
757 p += count;
758
759 if (cie->encoding)
760 count = dwarf_read_encoded_value(p, &fde->address_range,
761 cie->encoding & 0x0f);
762 else
763 count = dwarf_read_addr(p, &fde->address_range);
764
765 p += count;
766
767 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
768 unsigned int length;
769 count = dwarf_read_uleb128(p, &length);
770 p += count + length;
771 }
772
773 /* Call frame instructions. */
774 fde->instructions = p;
775 fde->end = start + len;
776
777 /* Add to list. */
778 spin_lock_irqsave(&dwarf_fde_lock, flags);
779 list_add_tail(&fde->link, &dwarf_fde_list);
780 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
781
782 return 0;
783}
784
785static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
786 unsigned long *sp,
787 const struct stacktrace_ops *ops, void *data)
788{
789 struct dwarf_frame *frame;
790
791 frame = dwarf_unwind_stack(0, NULL);
792
793 while (frame && frame->return_addr) {
794 ops->address(data, frame->return_addr, 1);
795 frame = frame->next;
796 }
797}
798
799static struct unwinder dwarf_unwinder = {
800 .name = "dwarf-unwinder",
801 .dump = dwarf_unwinder_dump,
802 .rating = 150,
803};
804
805static void dwarf_unwinder_cleanup(void)
806{
807 struct dwarf_cie *cie, *m;
808 struct dwarf_fde *fde, *n;
809 unsigned long flags;
810
811 /*
812 * Deallocate all the memory allocated for the DWARF unwinder.
813 * Traverse all the FDE/CIE lists and remove and free all the
814 * memory associated with those data structures.
815 */
816 spin_lock_irqsave(&dwarf_cie_lock, flags);
817 list_for_each_entry_safe(cie, m, &dwarf_cie_list, link)
818 kfree(cie);
819 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
820
821 spin_lock_irqsave(&dwarf_fde_lock, flags);
822 list_for_each_entry_safe(fde, n, &dwarf_fde_list, link)
823 kfree(fde);
824 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
825}
826
827/**
828 * dwarf_unwinder_init - initialise the dwarf unwinder
829 *
830 * Build the data structures describing the .dwarf_frame section to
831 * make it easier to lookup CIE and FDE entries. Because the
832 * .eh_frame section is packed as tightly as possible it is not
833 * easy to lookup the FDE for a given PC, so we build a list of FDE
834 * and CIE entries that make it easier.
835 */
836void dwarf_unwinder_init(void)
837{
838 u32 entry_type;
839 void *p, *entry;
840 int count, err;
841 unsigned long len;
842 unsigned int c_entries, f_entries;
843 unsigned char *end;
844 INIT_LIST_HEAD(&dwarf_cie_list);
845 INIT_LIST_HEAD(&dwarf_fde_list);
846
847 c_entries = 0;
848 f_entries = 0;
849 entry = &__start_eh_frame;
850
851 while ((char *)entry < __stop_eh_frame) {
852 p = entry;
853
854 count = dwarf_entry_len(p, &len);
855 if (count == 0) {
856 /*
857 * We read a bogus length field value. There is
858 * nothing we can do here apart from disabling
859 * the DWARF unwinder. We can't even skip this
860 * entry and move to the next one because 'len'
861 * tells us where our next entry is.
862 */
863 goto out;
864 } else
865 p += count;
866
867 /* initial length does not include itself */
868 end = p + len;
869
870 entry_type = get_unaligned((u32 *)p);
871 p += 4;
872
873 if (entry_type == DW_EH_FRAME_CIE) {
874 err = dwarf_parse_cie(entry, p, len, end);
875 if (err < 0)
876 goto out;
877 else
878 c_entries++;
879 } else {
880 err = dwarf_parse_fde(entry, entry_type, p, len);
881 if (err < 0)
882 goto out;
883 else
884 f_entries++;
885 }
886
887 entry = (char *)entry + len + 4;
888 }
889
890 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
891 c_entries, f_entries);
892
893 err = unwinder_register(&dwarf_unwinder);
894 if (err)
895 goto out;
896
897 return;
898
899out:
900 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
901 dwarf_unwinder_cleanup();
902}
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index 64f2746baf91..81a46145ffa5 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -223,6 +223,7 @@ static int __init setup_early_printk(char *buf)
223#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) 223#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
224 scif_sercon_init(buf + 6); 224 scif_sercon_init(buf + 6);
225#endif 225#endif
226#endif
226 } 227 }
227#endif 228#endif
228 229
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index fc26ccd82789..e63178fefb9b 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -43,6 +43,7 @@
43 * syscall # 43 * syscall #
44 * 44 *
45 */ 45 */
46#include <asm/dwarf.h>
46 47
47#if defined(CONFIG_PREEMPT) 48#if defined(CONFIG_PREEMPT)
48# define preempt_stop() cli ; TRACE_IRQS_OFF 49# define preempt_stop() cli ; TRACE_IRQS_OFF
@@ -66,6 +67,11 @@ ENTRY(exception_error)
66 67
67 .align 2 68 .align 2
68ret_from_exception: 69ret_from_exception:
70 CFI_STARTPROC simple
71 CFI_DEF_CFA r14, 0
72 CFI_REL_OFFSET 17, 64
73 CFI_REL_OFFSET 15, 0
74 CFI_REL_OFFSET 14, 56
69 preempt_stop() 75 preempt_stop()
70ENTRY(ret_from_irq) 76ENTRY(ret_from_irq)
71 ! 77 !
@@ -240,6 +246,7 @@ debug_trap:
240 nop 246 nop
241 bra __restore_all 247 bra __restore_all
242 nop 248 nop
249 CFI_ENDPROC
243 250
244 .align 2 251 .align 2
2451: .long debug_trap_table 2521: .long debug_trap_table
@@ -285,6 +292,7 @@ ret_from_fork:
285 * system calls and debug traps through their respective jump tables. 292 * system calls and debug traps through their respective jump tables.
286 */ 293 */
287ENTRY(system_call) 294ENTRY(system_call)
295 setup_frame_reg
288#if !defined(CONFIG_CPU_SH2) 296#if !defined(CONFIG_CPU_SH2)
289 mov.l 1f, r9 297 mov.l 1f, r9
290 mov.l @r9, r8 ! Read from TRA (Trap Address) Register 298 mov.l @r9, r8 ! Read from TRA (Trap Address) Register
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 278c68c60488..2bb43dc74f22 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -14,6 +14,7 @@
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/machvec.h> 15#include <asm/machvec.h>
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/dwarf.h>
17#include <asm/thread_info.h> 18#include <asm/thread_info.h>
18#include <cpu/mmu_context.h> 19#include <cpu/mmu_context.h>
19 20
@@ -261,6 +262,9 @@ void __init init_IRQ(void)
261 sh_mv.mv_init_irq(); 262 sh_mv.mv_init_irq();
262 263
263 irq_ctx_init(smp_processor_id()); 264 irq_ctx_init(smp_processor_id());
265
266 /* This needs to be early, but not too early.. */
267 dwarf_unwinder_init();
264} 268}
265 269
266#ifdef CONFIG_SPARSE_IRQ 270#ifdef CONFIG_SPARSE_IRQ
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index 1a2a5eb76e41..c2e45c48409c 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -13,47 +13,93 @@
13#include <linux/stacktrace.h> 13#include <linux/stacktrace.h>
14#include <linux/thread_info.h> 14#include <linux/thread_info.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <asm/unwinder.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/stacktrace.h>
19
20static void save_stack_warning(void *data, char *msg)
21{
22}
23
24static void
25save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
26{
27}
28
29static int save_stack_stack(void *data, char *name)
30{
31 return 0;
32}
17 33
18/* 34/*
19 * Save stack-backtrace addresses into a stack_trace buffer. 35 * Save stack-backtrace addresses into a stack_trace buffer.
20 */ 36 */
37static void save_stack_address(void *data, unsigned long addr, int reliable)
38{
39 struct stack_trace *trace = data;
40
41 if (!reliable)
42 return;
43
44 if (trace->skip > 0) {
45 trace->skip--;
46 return;
47 }
48
49 if (trace->nr_entries < trace->max_entries)
50 trace->entries[trace->nr_entries++] = addr;
51}
52
53static const struct stacktrace_ops save_stack_ops = {
54 .warning = save_stack_warning,
55 .warning_symbol = save_stack_warning_symbol,
56 .stack = save_stack_stack,
57 .address = save_stack_address,
58};
59
21void save_stack_trace(struct stack_trace *trace) 60void save_stack_trace(struct stack_trace *trace)
22{ 61{
23 unsigned long *sp = (unsigned long *)current_stack_pointer; 62 unsigned long *sp = (unsigned long *)current_stack_pointer;
24 63
25 while (!kstack_end(sp)) { 64 unwind_stack(current, NULL, sp, &save_stack_ops, trace);
26 unsigned long addr = *sp++; 65 if (trace->nr_entries < trace->max_entries)
27 66 trace->entries[trace->nr_entries++] = ULONG_MAX;
28 if (__kernel_text_address(addr)) {
29 if (trace->skip > 0)
30 trace->skip--;
31 else
32 trace->entries[trace->nr_entries++] = addr;
33 if (trace->nr_entries >= trace->max_entries)
34 break;
35 }
36 }
37} 67}
38EXPORT_SYMBOL_GPL(save_stack_trace); 68EXPORT_SYMBOL_GPL(save_stack_trace);
39 69
70static void
71save_stack_address_nosched(void *data, unsigned long addr, int reliable)
72{
73 struct stack_trace *trace = (struct stack_trace *)data;
74
75 if (!reliable)
76 return;
77
78 if (in_sched_functions(addr))
79 return;
80
81 if (trace->skip > 0) {
82 trace->skip--;
83 return;
84 }
85
86 if (trace->nr_entries < trace->max_entries)
87 trace->entries[trace->nr_entries++] = addr;
88}
89
90static const struct stacktrace_ops save_stack_ops_nosched = {
91 .warning = save_stack_warning,
92 .warning_symbol = save_stack_warning_symbol,
93 .stack = save_stack_stack,
94 .address = save_stack_address_nosched,
95};
96
40void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 97void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
41{ 98{
42 unsigned long *sp = (unsigned long *)tsk->thread.sp; 99 unsigned long *sp = (unsigned long *)tsk->thread.sp;
43 100
44 while (!kstack_end(sp)) { 101 unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
45 unsigned long addr = *sp++; 102 if (trace->nr_entries < trace->max_entries)
46 103 trace->entries[trace->nr_entries++] = ULONG_MAX;
47 if (__kernel_text_address(addr)) {
48 if (in_sched_functions(addr))
49 break;
50 if (trace->skip > 0)
51 trace->skip--;
52 else
53 trace->entries[trace->nr_entries++] = addr;
54 if (trace->nr_entries >= trace->max_entries)
55 break;
56 }
57 }
58} 104}
59EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 105EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 2b772776fcda..563426487c6b 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -858,30 +858,6 @@ void __init trap_init(void)
858 per_cpu_trap_init(); 858 per_cpu_trap_init();
859} 859}
860 860
861void show_trace(struct task_struct *tsk, unsigned long *sp,
862 struct pt_regs *regs)
863{
864 unsigned long addr;
865
866 if (regs && user_mode(regs))
867 return;
868
869 printk("\nCall trace:\n");
870
871 while (!kstack_end(sp)) {
872 addr = *sp++;
873 if (kernel_text_address(addr))
874 print_ip_sym(addr);
875 }
876
877 printk("\n");
878
879 if (!tsk)
880 tsk = current;
881
882 debug_show_held_locks(tsk);
883}
884
885void show_stack(struct task_struct *tsk, unsigned long *sp) 861void show_stack(struct task_struct *tsk, unsigned long *sp)
886{ 862{
887 unsigned long stack; 863 unsigned long stack;
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
new file mode 100644
index 000000000000..2b30fa28b440
--- /dev/null
+++ b/arch/sh/kernel/unwinder.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (C) 2009 Matt Fleming
3 *
4 * Based, in part, on kernel/time/clocksource.c.
5 *
6 * This file provides arbitration code for stack unwinders.
7 *
8 * Multiple stack unwinders can be available on a system, usually with
9 * the most accurate unwinder being the currently active one.
10 */
11#include <linux/errno.h>
12#include <linux/list.h>
13#include <linux/spinlock.h>
14#include <asm/unwinder.h>
15#include <asm/atomic.h>
16
17/*
18 * This is the most basic stack unwinder an architecture can
19 * provide. For architectures without reliable frame pointers, e.g.
20 * RISC CPUs, it can be implemented by looking through the stack for
21 * addresses that lie within the kernel text section.
22 *
23 * Other CPUs, e.g. x86, can use their frame pointer register to
24 * construct more accurate stack traces.
25 */
26static struct list_head unwinder_list;
27static struct unwinder stack_reader = {
28 .name = "stack-reader",
29 .dump = stack_reader_dump,
30 .rating = 50,
31 .list = {
32 .next = &unwinder_list,
33 .prev = &unwinder_list,
34 },
35};
36
37/*
38 * "curr_unwinder" points to the stack unwinder currently in use. This
39 * is the unwinder with the highest rating.
40 *
41 * "unwinder_list" is a linked-list of all available unwinders, sorted
42 * by rating.
43 *
44 * All modifications of "curr_unwinder" and "unwinder_list" must be
45 * performed whilst holding "unwinder_lock".
46 */
47static struct unwinder *curr_unwinder = &stack_reader;
48
49static struct list_head unwinder_list = {
50 .next = &stack_reader.list,
51 .prev = &stack_reader.list,
52};
53
54static DEFINE_SPINLOCK(unwinder_lock);
55
56static atomic_t unwinder_running = ATOMIC_INIT(0);
57
58/**
59 * select_unwinder - Select the best registered stack unwinder.
60 *
61 * Private function. Must hold unwinder_lock when called.
62 *
63 * Select the stack unwinder with the best rating. This is useful for
64 * setting up curr_unwinder.
65 */
66static struct unwinder *select_unwinder(void)
67{
68 struct unwinder *best;
69
70 if (list_empty(&unwinder_list))
71 return NULL;
72
73 best = list_entry(unwinder_list.next, struct unwinder, list);
74 if (best == curr_unwinder)
75 return NULL;
76
77 return best;
78}
79
80/*
81 * Enqueue the stack unwinder sorted by rating.
82 */
83static int unwinder_enqueue(struct unwinder *ops)
84{
85 struct list_head *tmp, *entry = &unwinder_list;
86
87 list_for_each(tmp, &unwinder_list) {
88 struct unwinder *o;
89
90 o = list_entry(tmp, struct unwinder, list);
91 if (o == ops)
92 return -EBUSY;
93 /* Keep track of the place, where to insert */
94 if (o->rating >= ops->rating)
95 entry = tmp;
96 }
97 list_add(&ops->list, entry);
98
99 return 0;
100}
101
102/**
103 * unwinder_register - Used to install new stack unwinder
104 * @u: unwinder to be registered
105 *
106 * Install the new stack unwinder on the unwinder list, which is sorted
107 * by rating.
108 *
109 * Returns -EBUSY if registration fails, zero otherwise.
110 */
111int unwinder_register(struct unwinder *u)
112{
113 unsigned long flags;
114 int ret;
115
116 spin_lock_irqsave(&unwinder_lock, flags);
117 ret = unwinder_enqueue(u);
118 if (!ret)
119 curr_unwinder = select_unwinder();
120 spin_unlock_irqrestore(&unwinder_lock, flags);
121
122 return ret;
123}
124
125/*
126 * Unwind the call stack and pass information to the stacktrace_ops
127 * functions. Also handle the case where we need to switch to a new
128 * stack dumper because the current one faulted unexpectedly.
129 */
130void unwind_stack(struct task_struct *task, struct pt_regs *regs,
131 unsigned long *sp, const struct stacktrace_ops *ops,
132 void *data)
133{
134 unsigned long flags;
135
136 /*
137 * The problem with unwinders with high ratings is that they are
138 * inherently more complicated than the simple ones with lower
139 * ratings. We are therefore more likely to fault in the
140 * complicated ones, e.g. hitting BUG()s. If we fault in the
141 * code for the current stack unwinder we try to downgrade to
142 * one with a lower rating.
143 *
144 * Hopefully this will give us a semi-reliable stacktrace so we
145 * can diagnose why curr_unwinder->dump() faulted.
146 */
147 if (atomic_inc_return(&unwinder_running) != 1) {
148 spin_lock_irqsave(&unwinder_lock, flags);
149
150 if (!list_is_singular(&unwinder_list)) {
151 list_del(&curr_unwinder->list);
152 curr_unwinder = select_unwinder();
153 }
154
155 spin_unlock_irqrestore(&unwinder_lock, flags);
156 atomic_dec(&unwinder_running);
157 }
158
159 curr_unwinder->dump(task, regs, sp, ops, data);
160
161 atomic_dec(&unwinder_running);
162}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 80dc9f8d9412..1b7d9d541e01 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -12,7 +12,7 @@ OUTPUT_ARCH(sh)
12 12
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15#include <asm-generic/vmlinux.lds.h> 15#include <asm/vmlinux.lds.h>
16 16
17ENTRY(_start) 17ENTRY(_start)
18SECTIONS 18SECTIONS
@@ -70,6 +70,8 @@ SECTIONS
70 70
71 _edata = .; /* End of data section */ 71 _edata = .; /* End of data section */
72 72
73 DWARF_EH_FRAME
74
73 . = ALIGN(PAGE_SIZE); /* Init code and data */ 75 . = ALIGN(PAGE_SIZE); /* Init code and data */
74 __init_begin = .; 76 __init_begin = .;
75 INIT_TEXT_SECTION(PAGE_SIZE) 77 INIT_TEXT_SECTION(PAGE_SIZE)