diff options
author | Robin Getz <robin.getz@analog.com> | 2010-03-11 11:24:18 -0500 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2010-05-21 09:40:17 -0400 |
commit | 2a12c4632db1c0c548a7023e63869b27c7789a92 (patch) | |
tree | 518ec2b9379886d5fe7301cf3d5eed959f0452ca | |
parent | bb84dbf69b0730fcc78c275f900ed74b2b8453a5 (diff) |
Blackfin: split kernel/traps.c
The current kernel/traps.c file has grown a bit unwieldy as more debugging
functionality has been added over time, so split it up into more logical
files. There should be no functional changes here, just minor whitespace
tweaking. This should make future extensions easier to manage.
Signed-off-by: Robin Getz <robin.getz@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
-rw-r--r-- | arch/blackfin/include/asm/trace.h | 2 | ||||
-rw-r--r-- | arch/blackfin/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/blackfin/kernel/dumpstack.c | 181 | ||||
-rw-r--r-- | arch/blackfin/kernel/exception.c | 45 | ||||
-rw-r--r-- | arch/blackfin/kernel/sys_bfin.c | 23 | ||||
-rw-r--r-- | arch/blackfin/kernel/trace.c | 594 | ||||
-rw-r--r-- | arch/blackfin/kernel/traps.c | 875 |
7 files changed, 887 insertions, 836 deletions
diff --git a/arch/blackfin/include/asm/trace.h b/arch/blackfin/include/asm/trace.h index dc0aa55ae773..395decd8bc3b 100644 --- a/arch/blackfin/include/asm/trace.h +++ b/arch/blackfin/include/asm/trace.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
24 | extern unsigned long trace_buff_offset; | 24 | extern unsigned long trace_buff_offset; |
25 | extern unsigned long software_trace_buff[]; | 25 | extern unsigned long software_trace_buff[]; |
26 | extern void decode_address(char *buf, unsigned long address); | ||
27 | extern bool get_instruction(unsigned short *val, unsigned short *address); | ||
26 | 28 | ||
27 | /* Trace Macros for C files */ | 29 | /* Trace Macros for C files */ |
28 | 30 | ||
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index 346a421f1562..b32a04a95d99 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile | |||
@@ -7,7 +7,8 @@ extra-y := init_task.o vmlinux.lds | |||
7 | obj-y := \ | 7 | obj-y := \ |
8 | entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ | 8 | entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ |
9 | sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \ | 9 | sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \ |
10 | fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o | 10 | fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o \ |
11 | trace.o exception.o dumpstack.o | ||
11 | 12 | ||
12 | ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y) | 13 | ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y) |
13 | obj-y += time-ts.o | 14 | obj-y += time-ts.o |
diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c new file mode 100644 index 000000000000..e81392c9d1db --- /dev/null +++ b/arch/blackfin/kernel/dumpstack.c | |||
@@ -0,0 +1,181 @@ | |||
1 | /* Provide basic stack dumping functions | ||
2 | * | ||
3 | * Copyright 2004-2009 Analog Devices Inc. | ||
4 | * | ||
5 | * Licensed under the GPL-2 or later | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/thread_info.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <asm/trace.h> | ||
14 | |||
15 | /* | ||
16 | * Checks to see if the address pointed to is either a | ||
17 | * 16-bit CALL instruction, or a 32-bit CALL instruction | ||
18 | */ | ||
19 | static bool is_bfin_call(unsigned short *addr) | ||
20 | { | ||
21 | unsigned short opcode = 0, *ins_addr; | ||
22 | ins_addr = (unsigned short *)addr; | ||
23 | |||
24 | if (!get_instruction(&opcode, ins_addr)) | ||
25 | return false; | ||
26 | |||
27 | if ((opcode >= 0x0060 && opcode <= 0x0067) || | ||
28 | (opcode >= 0x0070 && opcode <= 0x0077)) | ||
29 | return true; | ||
30 | |||
31 | ins_addr--; | ||
32 | if (!get_instruction(&opcode, ins_addr)) | ||
33 | return false; | ||
34 | |||
35 | if (opcode >= 0xE300 && opcode <= 0xE3FF) | ||
36 | return true; | ||
37 | |||
38 | return false; | ||
39 | |||
40 | } | ||
41 | |||
42 | void show_stack(struct task_struct *task, unsigned long *stack) | ||
43 | { | ||
44 | #ifdef CONFIG_PRINTK | ||
45 | unsigned int *addr, *endstack, *fp = 0, *frame; | ||
46 | unsigned short *ins_addr; | ||
47 | char buf[150]; | ||
48 | unsigned int i, j, ret_addr, frame_no = 0; | ||
49 | |||
50 | /* | ||
51 | * If we have been passed a specific stack, use that one otherwise | ||
52 | * if we have been passed a task structure, use that, otherwise | ||
53 | * use the stack of where the variable "stack" exists | ||
54 | */ | ||
55 | |||
56 | if (stack == NULL) { | ||
57 | if (task) { | ||
58 | /* We know this is a kernel stack, so this is the start/end */ | ||
59 | stack = (unsigned long *)task->thread.ksp; | ||
60 | endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE); | ||
61 | } else { | ||
62 | /* print out the existing stack info */ | ||
63 | stack = (unsigned long *)&stack; | ||
64 | endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); | ||
65 | } | ||
66 | } else | ||
67 | endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); | ||
68 | |||
69 | printk(KERN_NOTICE "Stack info:\n"); | ||
70 | decode_address(buf, (unsigned int)stack); | ||
71 | printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); | ||
72 | |||
73 | if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { | ||
74 | printk(KERN_NOTICE "Invalid stack pointer\n"); | ||
75 | return; | ||
76 | } | ||
77 | |||
78 | /* First thing is to look for a frame pointer */ | ||
79 | for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { | ||
80 | if (*addr & 0x1) | ||
81 | continue; | ||
82 | ins_addr = (unsigned short *)*addr; | ||
83 | ins_addr--; | ||
84 | if (is_bfin_call(ins_addr)) | ||
85 | fp = addr - 1; | ||
86 | |||
87 | if (fp) { | ||
88 | /* Let's check to see if it is a frame pointer */ | ||
89 | while (fp >= (addr - 1) && fp < endstack | ||
90 | && fp && ((unsigned int) fp & 0x3) == 0) | ||
91 | fp = (unsigned int *)*fp; | ||
92 | if (fp == 0 || fp == endstack) { | ||
93 | fp = addr - 1; | ||
94 | break; | ||
95 | } | ||
96 | fp = 0; | ||
97 | } | ||
98 | } | ||
99 | if (fp) { | ||
100 | frame = fp; | ||
101 | printk(KERN_NOTICE " FP: (0x%p)\n", fp); | ||
102 | } else | ||
103 | frame = 0; | ||
104 | |||
105 | /* | ||
106 | * Now that we think we know where things are, we | ||
107 | * walk the stack again, this time printing things out | ||
108 | * incase there is no frame pointer, we still look for | ||
109 | * valid return addresses | ||
110 | */ | ||
111 | |||
112 | /* First time print out data, next time, print out symbols */ | ||
113 | for (j = 0; j <= 1; j++) { | ||
114 | if (j) | ||
115 | printk(KERN_NOTICE "Return addresses in stack:\n"); | ||
116 | else | ||
117 | printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack); | ||
118 | |||
119 | fp = frame; | ||
120 | frame_no = 0; | ||
121 | |||
122 | for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0; | ||
123 | addr < endstack; addr++, i++) { | ||
124 | |||
125 | ret_addr = 0; | ||
126 | if (!j && i % 8 == 0) | ||
127 | printk(KERN_NOTICE "%p:", addr); | ||
128 | |||
129 | /* if it is an odd address, or zero, just skip it */ | ||
130 | if (*addr & 0x1 || !*addr) | ||
131 | goto print; | ||
132 | |||
133 | ins_addr = (unsigned short *)*addr; | ||
134 | |||
135 | /* Go back one instruction, and see if it is a CALL */ | ||
136 | ins_addr--; | ||
137 | ret_addr = is_bfin_call(ins_addr); | ||
138 | print: | ||
139 | if (!j && stack == (unsigned long *)addr) | ||
140 | printk("[%08x]", *addr); | ||
141 | else if (ret_addr) | ||
142 | if (j) { | ||
143 | decode_address(buf, (unsigned int)*addr); | ||
144 | if (frame == addr) { | ||
145 | printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf); | ||
146 | continue; | ||
147 | } | ||
148 | printk(KERN_NOTICE " address : %s\n", buf); | ||
149 | } else | ||
150 | printk("<%08x>", *addr); | ||
151 | else if (fp == addr) { | ||
152 | if (j) | ||
153 | frame = addr+1; | ||
154 | else | ||
155 | printk("(%08x)", *addr); | ||
156 | |||
157 | fp = (unsigned int *)*addr; | ||
158 | frame_no++; | ||
159 | |||
160 | } else if (!j) | ||
161 | printk(" %08x ", *addr); | ||
162 | } | ||
163 | if (!j) | ||
164 | printk("\n"); | ||
165 | } | ||
166 | #endif | ||
167 | } | ||
168 | EXPORT_SYMBOL(show_stack); | ||
169 | |||
170 | void dump_stack(void) | ||
171 | { | ||
172 | unsigned long stack; | ||
173 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | ||
174 | int tflags; | ||
175 | #endif | ||
176 | trace_buffer_save(tflags); | ||
177 | dump_bfin_trace_buffer(); | ||
178 | show_stack(current, &stack); | ||
179 | trace_buffer_restore(tflags); | ||
180 | } | ||
181 | EXPORT_SYMBOL(dump_stack); | ||
diff --git a/arch/blackfin/kernel/exception.c b/arch/blackfin/kernel/exception.c new file mode 100644 index 000000000000..9208b5fd5186 --- /dev/null +++ b/arch/blackfin/kernel/exception.c | |||
@@ -0,0 +1,45 @@ | |||
1 | /* Basic functions for adding/removing custom exception handlers | ||
2 | * | ||
3 | * Copyright 2004-2009 Analog Devices Inc. | ||
4 | * | ||
5 | * Licensed under the GPL-2 or later | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <asm/irq_handler.h> | ||
10 | |||
11 | int bfin_request_exception(unsigned int exception, void (*handler)(void)) | ||
12 | { | ||
13 | void (*curr_handler)(void); | ||
14 | |||
15 | if (exception > 0x3F) | ||
16 | return -EINVAL; | ||
17 | |||
18 | curr_handler = ex_table[exception]; | ||
19 | |||
20 | if (curr_handler != ex_replaceable) | ||
21 | return -EBUSY; | ||
22 | |||
23 | ex_table[exception] = handler; | ||
24 | |||
25 | return 0; | ||
26 | } | ||
27 | EXPORT_SYMBOL(bfin_request_exception); | ||
28 | |||
29 | int bfin_free_exception(unsigned int exception, void (*handler)(void)) | ||
30 | { | ||
31 | void (*curr_handler)(void); | ||
32 | |||
33 | if (exception > 0x3F) | ||
34 | return -EINVAL; | ||
35 | |||
36 | curr_handler = ex_table[exception]; | ||
37 | |||
38 | if (curr_handler != handler) | ||
39 | return -EBUSY; | ||
40 | |||
41 | ex_table[exception] = ex_replaceable; | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | EXPORT_SYMBOL(bfin_free_exception); | ||
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c index 2e7f8e10bf87..bdc1e2f0da32 100644 --- a/arch/blackfin/kernel/sys_bfin.c +++ b/arch/blackfin/kernel/sys_bfin.c | |||
@@ -47,3 +47,26 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, | |||
47 | } | 47 | } |
48 | EXPORT_SYMBOL(get_fb_unmapped_area); | 48 | EXPORT_SYMBOL(get_fb_unmapped_area); |
49 | #endif | 49 | #endif |
50 | |||
51 | /* Needed for legacy userspace atomic emulation */ | ||
52 | static DEFINE_SPINLOCK(bfin_spinlock_lock); | ||
53 | |||
54 | #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1 | ||
55 | __attribute__((l1_text)) | ||
56 | #endif | ||
57 | asmlinkage int sys_bfin_spinlock(int *p) | ||
58 | { | ||
59 | int ret, tmp = 0; | ||
60 | |||
61 | spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */ | ||
62 | ret = get_user(tmp, p); | ||
63 | if (likely(ret == 0)) { | ||
64 | if (unlikely(tmp)) | ||
65 | ret = 1; | ||
66 | else | ||
67 | put_user(1, p); | ||
68 | } | ||
69 | spin_unlock(&bfin_spinlock_lock); | ||
70 | |||
71 | return ret; | ||
72 | } | ||
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c new file mode 100644 index 000000000000..3a268c3ed47e --- /dev/null +++ b/arch/blackfin/kernel/trace.c | |||
@@ -0,0 +1,594 @@ | |||
1 | /* provide some functions which dump the trace buffer, in a nice way for people | ||
2 | * to read it, and understand what is going on | ||
3 | * | ||
4 | * Copyright 2004-2010 Analog Devices Inc. | ||
5 | * | ||
6 | * Licensed under the GPL-2 or later | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/hardirq.h> | ||
11 | #include <linux/thread_info.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/uaccess.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <asm/dma.h> | ||
19 | #include <asm/trace.h> | ||
20 | #include <asm/fixed_code.h> | ||
21 | #include <asm/traps.h> | ||
22 | |||
23 | #ifdef CONFIG_DEBUG_VERBOSE | ||
24 | #define verbose_printk(fmt, arg...) \ | ||
25 | printk(fmt, ##arg) | ||
26 | #else | ||
27 | #define verbose_printk(fmt, arg...) \ | ||
28 | ({ if (0) printk(fmt, ##arg); 0; }) | ||
29 | #endif | ||
30 | |||
31 | |||
32 | void decode_address(char *buf, unsigned long address) | ||
33 | { | ||
34 | #ifdef CONFIG_DEBUG_VERBOSE | ||
35 | struct task_struct *p; | ||
36 | struct mm_struct *mm; | ||
37 | unsigned long flags, offset; | ||
38 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
39 | struct rb_node *n; | ||
40 | |||
41 | #ifdef CONFIG_KALLSYMS | ||
42 | unsigned long symsize; | ||
43 | const char *symname; | ||
44 | char *modname; | ||
45 | char *delim = ":"; | ||
46 | char namebuf[128]; | ||
47 | #endif | ||
48 | |||
49 | buf += sprintf(buf, "<0x%08lx> ", address); | ||
50 | |||
51 | #ifdef CONFIG_KALLSYMS | ||
52 | /* look up the address and see if we are in kernel space */ | ||
53 | symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); | ||
54 | |||
55 | if (symname) { | ||
56 | /* yeah! kernel space! */ | ||
57 | if (!modname) | ||
58 | modname = delim = ""; | ||
59 | sprintf(buf, "{ %s%s%s%s + 0x%lx }", | ||
60 | delim, modname, delim, symname, | ||
61 | (unsigned long)offset); | ||
62 | return; | ||
63 | } | ||
64 | #endif | ||
65 | |||
66 | if (address >= FIXED_CODE_START && address < FIXED_CODE_END) { | ||
67 | /* Problem in fixed code section? */ | ||
68 | strcat(buf, "/* Maybe fixed code section */"); | ||
69 | return; | ||
70 | |||
71 | } else if (address < CONFIG_BOOT_LOAD) { | ||
72 | /* Problem somewhere before the kernel start address */ | ||
73 | strcat(buf, "/* Maybe null pointer? */"); | ||
74 | return; | ||
75 | |||
76 | } else if (address >= COREMMR_BASE) { | ||
77 | strcat(buf, "/* core mmrs */"); | ||
78 | return; | ||
79 | |||
80 | } else if (address >= SYSMMR_BASE) { | ||
81 | strcat(buf, "/* system mmrs */"); | ||
82 | return; | ||
83 | |||
84 | } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) { | ||
85 | strcat(buf, "/* on-chip L1 ROM */"); | ||
86 | return; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Don't walk any of the vmas if we are oopsing, it has been known | ||
91 | * to cause problems - corrupt vmas (kernel crashes) cause double faults | ||
92 | */ | ||
93 | if (oops_in_progress) { | ||
94 | strcat(buf, "/* kernel dynamic memory (maybe user-space) */"); | ||
95 | return; | ||
96 | } | ||
97 | |||
98 | /* looks like we're off in user-land, so let's walk all the | ||
99 | * mappings of all our processes and see if we can't be a whee | ||
100 | * bit more specific | ||
101 | */ | ||
102 | write_lock_irqsave(&tasklist_lock, flags); | ||
103 | for_each_process(p) { | ||
104 | mm = (in_atomic ? p->mm : get_task_mm(p)); | ||
105 | if (!mm) | ||
106 | continue; | ||
107 | |||
108 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
109 | if (!in_atomic) | ||
110 | mmput(mm); | ||
111 | continue; | ||
112 | } | ||
113 | |||
114 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | ||
115 | struct vm_area_struct *vma; | ||
116 | |||
117 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
118 | |||
119 | if (address >= vma->vm_start && address < vma->vm_end) { | ||
120 | char _tmpbuf[256]; | ||
121 | char *name = p->comm; | ||
122 | struct file *file = vma->vm_file; | ||
123 | |||
124 | if (file) { | ||
125 | char *d_name = d_path(&file->f_path, _tmpbuf, | ||
126 | sizeof(_tmpbuf)); | ||
127 | if (!IS_ERR(d_name)) | ||
128 | name = d_name; | ||
129 | } | ||
130 | |||
131 | /* FLAT does not have its text aligned to the start of | ||
132 | * the map while FDPIC ELF does ... | ||
133 | */ | ||
134 | |||
135 | /* before we can check flat/fdpic, we need to | ||
136 | * make sure current is valid | ||
137 | */ | ||
138 | if ((unsigned long)current >= FIXED_CODE_START && | ||
139 | !((unsigned long)current & 0x3)) { | ||
140 | if (current->mm && | ||
141 | (address > current->mm->start_code) && | ||
142 | (address < current->mm->end_code)) | ||
143 | offset = address - current->mm->start_code; | ||
144 | else | ||
145 | offset = (address - vma->vm_start) + | ||
146 | (vma->vm_pgoff << PAGE_SHIFT); | ||
147 | |||
148 | sprintf(buf, "[ %s + 0x%lx ]", name, offset); | ||
149 | } else | ||
150 | sprintf(buf, "[ %s vma:0x%lx-0x%lx]", | ||
151 | name, vma->vm_start, vma->vm_end); | ||
152 | |||
153 | up_read(&mm->mmap_sem); | ||
154 | if (!in_atomic) | ||
155 | mmput(mm); | ||
156 | |||
157 | if (buf[0] == '\0') | ||
158 | sprintf(buf, "[ %s ] dynamic memory", name); | ||
159 | |||
160 | goto done; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | up_read(&mm->mmap_sem); | ||
165 | if (!in_atomic) | ||
166 | mmput(mm); | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * we were unable to find this address anywhere, | ||
171 | * or some MMs were skipped because they were in use. | ||
172 | */ | ||
173 | sprintf(buf, "/* kernel dynamic memory */"); | ||
174 | |||
175 | done: | ||
176 | write_unlock_irqrestore(&tasklist_lock, flags); | ||
177 | #else | ||
178 | sprintf(buf, " "); | ||
179 | #endif | ||
180 | } | ||
181 | |||
182 | #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) | ||
183 | |||
184 | /* | ||
185 | * Similar to get_user, do some address checking, then dereference | ||
186 | * Return true on success, false on bad address | ||
187 | */ | ||
188 | bool get_instruction(unsigned short *val, unsigned short *address) | ||
189 | { | ||
190 | unsigned long addr = (unsigned long)address; | ||
191 | |||
192 | /* Check for odd addresses */ | ||
193 | if (addr & 0x1) | ||
194 | return false; | ||
195 | |||
196 | /* MMR region will never have instructions */ | ||
197 | if (addr >= SYSMMR_BASE) | ||
198 | return false; | ||
199 | |||
200 | switch (bfin_mem_access_type(addr, 2)) { | ||
201 | case BFIN_MEM_ACCESS_CORE: | ||
202 | case BFIN_MEM_ACCESS_CORE_ONLY: | ||
203 | *val = *address; | ||
204 | return true; | ||
205 | case BFIN_MEM_ACCESS_DMA: | ||
206 | dma_memcpy(val, address, 2); | ||
207 | return true; | ||
208 | case BFIN_MEM_ACCESS_ITEST: | ||
209 | isram_memcpy(val, address, 2); | ||
210 | return true; | ||
211 | default: /* invalid access */ | ||
212 | return false; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * decode the instruction if we are printing out the trace, as it | ||
218 | * makes things easier to follow, without running it through objdump | ||
219 | * These are the normal instructions which cause change of flow, which | ||
220 | * would be at the source of the trace buffer | ||
221 | */ | ||
222 | #if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON) | ||
223 | static void decode_instruction(unsigned short *address) | ||
224 | { | ||
225 | unsigned short opcode; | ||
226 | |||
227 | if (get_instruction(&opcode, address)) { | ||
228 | if (opcode == 0x0010) | ||
229 | verbose_printk("RTS"); | ||
230 | else if (opcode == 0x0011) | ||
231 | verbose_printk("RTI"); | ||
232 | else if (opcode == 0x0012) | ||
233 | verbose_printk("RTX"); | ||
234 | else if (opcode == 0x0013) | ||
235 | verbose_printk("RTN"); | ||
236 | else if (opcode == 0x0014) | ||
237 | verbose_printk("RTE"); | ||
238 | else if (opcode == 0x0025) | ||
239 | verbose_printk("EMUEXCPT"); | ||
240 | else if (opcode >= 0x0040 && opcode <= 0x0047) | ||
241 | verbose_printk("STI R%i", opcode & 7); | ||
242 | else if (opcode >= 0x0050 && opcode <= 0x0057) | ||
243 | verbose_printk("JUMP (P%i)", opcode & 7); | ||
244 | else if (opcode >= 0x0060 && opcode <= 0x0067) | ||
245 | verbose_printk("CALL (P%i)", opcode & 7); | ||
246 | else if (opcode >= 0x0070 && opcode <= 0x0077) | ||
247 | verbose_printk("CALL (PC+P%i)", opcode & 7); | ||
248 | else if (opcode >= 0x0080 && opcode <= 0x0087) | ||
249 | verbose_printk("JUMP (PC+P%i)", opcode & 7); | ||
250 | else if (opcode >= 0x0090 && opcode <= 0x009F) | ||
251 | verbose_printk("RAISE 0x%x", opcode & 0xF); | ||
252 | else if (opcode >= 0x00A0 && opcode <= 0x00AF) | ||
253 | verbose_printk("EXCPT 0x%x", opcode & 0xF); | ||
254 | else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF)) | ||
255 | verbose_printk("IF !CC JUMP"); | ||
256 | else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff)) | ||
257 | verbose_printk("IF CC JUMP"); | ||
258 | else if (opcode >= 0x2000 && opcode <= 0x2fff) | ||
259 | verbose_printk("JUMP.S"); | ||
260 | else if (opcode >= 0xe080 && opcode <= 0xe0ff) | ||
261 | verbose_printk("LSETUP"); | ||
262 | else if (opcode >= 0xe200 && opcode <= 0xe2ff) | ||
263 | verbose_printk("JUMP.L"); | ||
264 | else if (opcode >= 0xe300 && opcode <= 0xe3ff) | ||
265 | verbose_printk("CALL pcrel"); | ||
266 | else | ||
267 | verbose_printk("0x%04x", opcode); | ||
268 | } | ||
269 | |||
270 | } | ||
271 | #endif | ||
272 | |||
273 | void dump_bfin_trace_buffer(void) | ||
274 | { | ||
275 | #ifdef CONFIG_DEBUG_VERBOSE | ||
276 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | ||
277 | int tflags, i = 0; | ||
278 | char buf[150]; | ||
279 | unsigned short *addr; | ||
280 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
281 | int j, index; | ||
282 | #endif | ||
283 | |||
284 | trace_buffer_save(tflags); | ||
285 | |||
286 | printk(KERN_NOTICE "Hardware Trace:\n"); | ||
287 | |||
288 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
289 | printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n"); | ||
290 | #endif | ||
291 | |||
292 | if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) { | ||
293 | for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) { | ||
294 | decode_address(buf, (unsigned long)bfin_read_TBUF()); | ||
295 | printk(KERN_NOTICE "%4i Target : %s\n", i, buf); | ||
296 | addr = (unsigned short *)bfin_read_TBUF(); | ||
297 | decode_address(buf, (unsigned long)addr); | ||
298 | printk(KERN_NOTICE " Source : %s ", buf); | ||
299 | decode_instruction(addr); | ||
300 | printk("\n"); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
305 | if (trace_buff_offset) | ||
306 | index = trace_buff_offset / 4; | ||
307 | else | ||
308 | index = EXPAND_LEN; | ||
309 | |||
310 | j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128; | ||
311 | while (j) { | ||
312 | decode_address(buf, software_trace_buff[index]); | ||
313 | printk(KERN_NOTICE "%4i Target : %s\n", i, buf); | ||
314 | index -= 1; | ||
315 | if (index < 0) | ||
316 | index = EXPAND_LEN; | ||
317 | decode_address(buf, software_trace_buff[index]); | ||
318 | printk(KERN_NOTICE " Source : %s ", buf); | ||
319 | decode_instruction((unsigned short *)software_trace_buff[index]); | ||
320 | printk("\n"); | ||
321 | index -= 1; | ||
322 | if (index < 0) | ||
323 | index = EXPAND_LEN; | ||
324 | j--; | ||
325 | i++; | ||
326 | } | ||
327 | #endif | ||
328 | |||
329 | trace_buffer_restore(tflags); | ||
330 | #endif | ||
331 | #endif | ||
332 | } | ||
333 | EXPORT_SYMBOL(dump_bfin_trace_buffer); | ||
334 | |||
335 | void dump_bfin_process(struct pt_regs *fp) | ||
336 | { | ||
337 | #ifdef CONFIG_DEBUG_VERBOSE | ||
338 | /* We should be able to look at fp->ipend, but we don't push it on the | ||
339 | * stack all the time, so do this until we fix that */ | ||
340 | unsigned int context = bfin_read_IPEND(); | ||
341 | |||
342 | if (oops_in_progress) | ||
343 | verbose_printk(KERN_EMERG "Kernel OOPS in progress\n"); | ||
344 | |||
345 | if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) | ||
346 | verbose_printk(KERN_NOTICE "HW Error context\n"); | ||
347 | else if (context & 0x0020) | ||
348 | verbose_printk(KERN_NOTICE "Deferred Exception context\n"); | ||
349 | else if (context & 0x3FC0) | ||
350 | verbose_printk(KERN_NOTICE "Interrupt context\n"); | ||
351 | else if (context & 0x4000) | ||
352 | verbose_printk(KERN_NOTICE "Deferred Interrupt context\n"); | ||
353 | else if (context & 0x8000) | ||
354 | verbose_printk(KERN_NOTICE "Kernel process context\n"); | ||
355 | |||
356 | /* Because we are crashing, and pointers could be bad, we check things | ||
357 | * pretty closely before we use them | ||
358 | */ | ||
359 | if ((unsigned long)current >= FIXED_CODE_START && | ||
360 | !((unsigned long)current & 0x3) && current->pid) { | ||
361 | verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n"); | ||
362 | if (current->comm >= (char *)FIXED_CODE_START) | ||
363 | verbose_printk(KERN_NOTICE "COMM=%s PID=%d", | ||
364 | current->comm, current->pid); | ||
365 | else | ||
366 | verbose_printk(KERN_NOTICE "COMM= invalid"); | ||
367 | |||
368 | printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu); | ||
369 | if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) | ||
370 | verbose_printk(KERN_NOTICE | ||
371 | "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" | ||
372 | " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n", | ||
373 | (void *)current->mm->start_code, | ||
374 | (void *)current->mm->end_code, | ||
375 | (void *)current->mm->start_data, | ||
376 | (void *)current->mm->end_data, | ||
377 | (void *)current->mm->end_data, | ||
378 | (void *)current->mm->brk, | ||
379 | (void *)current->mm->start_stack); | ||
380 | else | ||
381 | verbose_printk(KERN_NOTICE "invalid mm\n"); | ||
382 | } else | ||
383 | verbose_printk(KERN_NOTICE | ||
384 | "No Valid process in current context\n"); | ||
385 | #endif | ||
386 | } | ||
387 | |||
388 | void dump_bfin_mem(struct pt_regs *fp) | ||
389 | { | ||
390 | #ifdef CONFIG_DEBUG_VERBOSE | ||
391 | unsigned short *addr, *erraddr, val = 0, err = 0; | ||
392 | char sti = 0, buf[6]; | ||
393 | |||
394 | erraddr = (void *)fp->pc; | ||
395 | |||
396 | verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr); | ||
397 | |||
398 | for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10; | ||
399 | addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10; | ||
400 | addr++) { | ||
401 | if (!((unsigned long)addr & 0xF)) | ||
402 | verbose_printk(KERN_NOTICE "0x%p: ", addr); | ||
403 | |||
404 | if (!get_instruction(&val, addr)) { | ||
405 | val = 0; | ||
406 | sprintf(buf, "????"); | ||
407 | } else | ||
408 | sprintf(buf, "%04x", val); | ||
409 | |||
410 | if (addr == erraddr) { | ||
411 | verbose_printk("[%s]", buf); | ||
412 | err = val; | ||
413 | } else | ||
414 | verbose_printk(" %s ", buf); | ||
415 | |||
416 | /* Do any previous instructions turn on interrupts? */ | ||
417 | if (addr <= erraddr && /* in the past */ | ||
418 | ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */ | ||
419 | val == 0x017b)) /* [SP++] = RETI */ | ||
420 | sti = 1; | ||
421 | } | ||
422 | |||
423 | verbose_printk("\n"); | ||
424 | |||
425 | /* Hardware error interrupts can be deferred */ | ||
426 | if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR && | ||
427 | oops_in_progress)){ | ||
428 | verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n"); | ||
429 | #ifndef CONFIG_DEBUG_HWERR | ||
430 | verbose_printk(KERN_NOTICE | ||
431 | "The remaining message may be meaningless\n" | ||
432 | "You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n"); | ||
433 | #else | ||
434 | /* If we are handling only one peripheral interrupt | ||
435 | * and current mm and pid are valid, and the last error | ||
436 | * was in that user space process's text area | ||
437 | * print it out - because that is where the problem exists | ||
438 | */ | ||
439 | if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) && | ||
440 | (current->pid && current->mm)) { | ||
441 | /* And the last RETI points to the current userspace context */ | ||
442 | if ((fp + 1)->pc >= current->mm->start_code && | ||
443 | (fp + 1)->pc <= current->mm->end_code) { | ||
444 | verbose_printk(KERN_NOTICE "It might be better to look around here :\n"); | ||
445 | verbose_printk(KERN_NOTICE "-------------------------------------------\n"); | ||
446 | show_regs(fp + 1); | ||
447 | verbose_printk(KERN_NOTICE "-------------------------------------------\n"); | ||
448 | } | ||
449 | } | ||
450 | #endif | ||
451 | } | ||
452 | #endif | ||
453 | } | ||
454 | |||
455 | void show_regs(struct pt_regs *fp) | ||
456 | { | ||
457 | #ifdef CONFIG_DEBUG_VERBOSE | ||
458 | char buf[150]; | ||
459 | struct irqaction *action; | ||
460 | unsigned int i; | ||
461 | unsigned long flags = 0; | ||
462 | unsigned int cpu = raw_smp_processor_id(); | ||
463 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
464 | |||
465 | verbose_printk(KERN_NOTICE "\n"); | ||
466 | if (CPUID != bfin_cpuid()) | ||
467 | verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), " | ||
468 | "but running on:0x%04x (Rev %d)\n", | ||
469 | CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid()); | ||
470 | |||
471 | verbose_printk(KERN_NOTICE "ADSP-%s-0.%d", | ||
472 | CPU, bfin_compiled_revid()); | ||
473 | |||
474 | if (bfin_compiled_revid() != bfin_revid()) | ||
475 | verbose_printk("(Detected 0.%d)", bfin_revid()); | ||
476 | |||
477 | verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n", | ||
478 | get_cclk()/1000000, get_sclk()/1000000, | ||
479 | #ifdef CONFIG_MPU | ||
480 | "mpu on" | ||
481 | #else | ||
482 | "mpu off" | ||
483 | #endif | ||
484 | ); | ||
485 | |||
486 | verbose_printk(KERN_NOTICE "%s", linux_banner); | ||
487 | |||
488 | verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted()); | ||
489 | verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n", | ||
490 | (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg); | ||
491 | if (fp->ipend & EVT_IRPTEN) | ||
492 | verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n"); | ||
493 | if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 | | ||
494 | EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR))) | ||
495 | verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n"); | ||
496 | if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14))) | ||
497 | verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n"); | ||
498 | if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) { | ||
499 | verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n", | ||
500 | (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14); | ||
501 | #ifdef EBIU_ERRMST | ||
502 | /* If the error was from the EBIU, print it out */ | ||
503 | if (bfin_read_EBIU_ERRMST() & CORE_ERROR) { | ||
504 | verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n", | ||
505 | bfin_read_EBIU_ERRMST()); | ||
506 | verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n", | ||
507 | bfin_read_EBIU_ERRADD()); | ||
508 | } | ||
509 | #endif | ||
510 | } | ||
511 | verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n", | ||
512 | fp->seqstat & SEQSTAT_EXCAUSE); | ||
513 | for (i = 2; i <= 15 ; i++) { | ||
514 | if (fp->ipend & (1 << i)) { | ||
515 | if (i != 4) { | ||
516 | decode_address(buf, bfin_read32(EVT0 + 4*i)); | ||
517 | verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf); | ||
518 | } else | ||
519 | verbose_printk(KERN_NOTICE " interrupts disabled\n"); | ||
520 | } | ||
521 | } | ||
522 | |||
523 | /* if no interrupts are going off, don't print this out */ | ||
524 | if (fp->ipend & ~0x3F) { | ||
525 | for (i = 0; i < (NR_IRQS - 1); i++) { | ||
526 | if (!in_atomic) | ||
527 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
528 | |||
529 | action = irq_desc[i].action; | ||
530 | if (!action) | ||
531 | goto unlock; | ||
532 | |||
533 | decode_address(buf, (unsigned int)action->handler); | ||
534 | verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf); | ||
535 | for (action = action->next; action; action = action->next) { | ||
536 | decode_address(buf, (unsigned int)action->handler); | ||
537 | verbose_printk(", %s", buf); | ||
538 | } | ||
539 | verbose_printk("\n"); | ||
540 | unlock: | ||
541 | if (!in_atomic) | ||
542 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
543 | } | ||
544 | } | ||
545 | |||
546 | decode_address(buf, fp->rete); | ||
547 | verbose_printk(KERN_NOTICE " RETE: %s\n", buf); | ||
548 | decode_address(buf, fp->retn); | ||
549 | verbose_printk(KERN_NOTICE " RETN: %s\n", buf); | ||
550 | decode_address(buf, fp->retx); | ||
551 | verbose_printk(KERN_NOTICE " RETX: %s\n", buf); | ||
552 | decode_address(buf, fp->rets); | ||
553 | verbose_printk(KERN_NOTICE " RETS: %s\n", buf); | ||
554 | decode_address(buf, fp->pc); | ||
555 | verbose_printk(KERN_NOTICE " PC : %s\n", buf); | ||
556 | |||
557 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && | ||
558 | (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { | ||
559 | decode_address(buf, cpu_pda[cpu].dcplb_fault_addr); | ||
560 | verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf); | ||
561 | decode_address(buf, cpu_pda[cpu].icplb_fault_addr); | ||
562 | verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf); | ||
563 | } | ||
564 | |||
565 | verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n"); | ||
566 | verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", | ||
567 | fp->r0, fp->r1, fp->r2, fp->r3); | ||
568 | verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", | ||
569 | fp->r4, fp->r5, fp->r6, fp->r7); | ||
570 | verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n", | ||
571 | fp->p0, fp->p1, fp->p2, fp->p3); | ||
572 | verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n", | ||
573 | fp->p4, fp->p5, fp->fp, (long)fp); | ||
574 | verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n", | ||
575 | fp->lb0, fp->lt0, fp->lc0); | ||
576 | verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n", | ||
577 | fp->lb1, fp->lt1, fp->lc1); | ||
578 | verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n", | ||
579 | fp->b0, fp->l0, fp->m0, fp->i0); | ||
580 | verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n", | ||
581 | fp->b1, fp->l1, fp->m1, fp->i1); | ||
582 | verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n", | ||
583 | fp->b2, fp->l2, fp->m2, fp->i2); | ||
584 | verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n", | ||
585 | fp->b3, fp->l3, fp->m3, fp->i3); | ||
586 | verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n", | ||
587 | fp->a0w, fp->a0x, fp->a1w, fp->a1x); | ||
588 | |||
589 | verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n", | ||
590 | rdusp(), fp->astat); | ||
591 | |||
592 | verbose_printk(KERN_NOTICE "\n"); | ||
593 | #endif | ||
594 | } | ||
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index ba70c4bc2699..891cc39f7eec 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -1,20 +1,16 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2004-2009 Analog Devices Inc. | 2 | * Main exception handling logic. |
3 | * | ||
4 | * Copyright 2004-2010 Analog Devices Inc. | ||
3 | * | 5 | * |
4 | * Licensed under the GPL-2 or later | 6 | * Licensed under the GPL-2 or later |
5 | */ | 7 | */ |
6 | 8 | ||
7 | #include <linux/bug.h> | 9 | #include <linux/bug.h> |
8 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/kallsyms.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/rbtree.h> | ||
14 | #include <asm/traps.h> | 12 | #include <asm/traps.h> |
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/cplb.h> | 13 | #include <asm/cplb.h> |
17 | #include <asm/dma.h> | ||
18 | #include <asm/blackfin.h> | 14 | #include <asm/blackfin.h> |
19 | #include <asm/irq_handler.h> | 15 | #include <asm/irq_handler.h> |
20 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
@@ -62,194 +58,6 @@ void __init trap_init(void) | |||
62 | CSYNC(); | 58 | CSYNC(); |
63 | } | 59 | } |
64 | 60 | ||
65 | static void decode_address(char *buf, unsigned long address) | ||
66 | { | ||
67 | #ifdef CONFIG_DEBUG_VERBOSE | ||
68 | struct task_struct *p; | ||
69 | struct mm_struct *mm; | ||
70 | unsigned long flags, offset; | ||
71 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
72 | struct rb_node *n; | ||
73 | |||
74 | #ifdef CONFIG_KALLSYMS | ||
75 | unsigned long symsize; | ||
76 | const char *symname; | ||
77 | char *modname; | ||
78 | char *delim = ":"; | ||
79 | char namebuf[128]; | ||
80 | #endif | ||
81 | |||
82 | buf += sprintf(buf, "<0x%08lx> ", address); | ||
83 | |||
84 | #ifdef CONFIG_KALLSYMS | ||
85 | /* look up the address and see if we are in kernel space */ | ||
86 | symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); | ||
87 | |||
88 | if (symname) { | ||
89 | /* yeah! kernel space! */ | ||
90 | if (!modname) | ||
91 | modname = delim = ""; | ||
92 | sprintf(buf, "{ %s%s%s%s + 0x%lx }", | ||
93 | delim, modname, delim, symname, | ||
94 | (unsigned long)offset); | ||
95 | return; | ||
96 | } | ||
97 | #endif | ||
98 | |||
99 | if (address >= FIXED_CODE_START && address < FIXED_CODE_END) { | ||
100 | /* Problem in fixed code section? */ | ||
101 | strcat(buf, "/* Maybe fixed code section */"); | ||
102 | return; | ||
103 | |||
104 | } else if (address < CONFIG_BOOT_LOAD) { | ||
105 | /* Problem somewhere before the kernel start address */ | ||
106 | strcat(buf, "/* Maybe null pointer? */"); | ||
107 | return; | ||
108 | |||
109 | } else if (address >= COREMMR_BASE) { | ||
110 | strcat(buf, "/* core mmrs */"); | ||
111 | return; | ||
112 | |||
113 | } else if (address >= SYSMMR_BASE) { | ||
114 | strcat(buf, "/* system mmrs */"); | ||
115 | return; | ||
116 | |||
117 | } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) { | ||
118 | strcat(buf, "/* on-chip L1 ROM */"); | ||
119 | return; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Don't walk any of the vmas if we are oopsing, it has been known | ||
124 | * to cause problems - corrupt vmas (kernel crashes) cause double faults | ||
125 | */ | ||
126 | if (oops_in_progress) { | ||
127 | strcat(buf, "/* kernel dynamic memory (maybe user-space) */"); | ||
128 | return; | ||
129 | } | ||
130 | |||
131 | /* looks like we're off in user-land, so let's walk all the | ||
132 | * mappings of all our processes and see if we can't be a whee | ||
133 | * bit more specific | ||
134 | */ | ||
135 | write_lock_irqsave(&tasklist_lock, flags); | ||
136 | for_each_process(p) { | ||
137 | mm = (in_atomic ? p->mm : get_task_mm(p)); | ||
138 | if (!mm) | ||
139 | continue; | ||
140 | |||
141 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
142 | if (!in_atomic) | ||
143 | mmput(mm); | ||
144 | continue; | ||
145 | } | ||
146 | |||
147 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | ||
148 | struct vm_area_struct *vma; | ||
149 | |||
150 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
151 | |||
152 | if (address >= vma->vm_start && address < vma->vm_end) { | ||
153 | char _tmpbuf[256]; | ||
154 | char *name = p->comm; | ||
155 | struct file *file = vma->vm_file; | ||
156 | |||
157 | if (file) { | ||
158 | char *d_name = d_path(&file->f_path, _tmpbuf, | ||
159 | sizeof(_tmpbuf)); | ||
160 | if (!IS_ERR(d_name)) | ||
161 | name = d_name; | ||
162 | } | ||
163 | |||
164 | /* FLAT does not have its text aligned to the start of | ||
165 | * the map while FDPIC ELF does ... | ||
166 | */ | ||
167 | |||
168 | /* before we can check flat/fdpic, we need to | ||
169 | * make sure current is valid | ||
170 | */ | ||
171 | if ((unsigned long)current >= FIXED_CODE_START && | ||
172 | !((unsigned long)current & 0x3)) { | ||
173 | if (current->mm && | ||
174 | (address > current->mm->start_code) && | ||
175 | (address < current->mm->end_code)) | ||
176 | offset = address - current->mm->start_code; | ||
177 | else | ||
178 | offset = (address - vma->vm_start) + | ||
179 | (vma->vm_pgoff << PAGE_SHIFT); | ||
180 | |||
181 | sprintf(buf, "[ %s + 0x%lx ]", name, offset); | ||
182 | } else | ||
183 | sprintf(buf, "[ %s vma:0x%lx-0x%lx]", | ||
184 | name, vma->vm_start, vma->vm_end); | ||
185 | |||
186 | up_read(&mm->mmap_sem); | ||
187 | if (!in_atomic) | ||
188 | mmput(mm); | ||
189 | |||
190 | if (buf[0] == '\0') | ||
191 | sprintf(buf, "[ %s ] dynamic memory", name); | ||
192 | |||
193 | goto done; | ||
194 | } | ||
195 | } | ||
196 | |||
197 | up_read(&mm->mmap_sem); | ||
198 | if (!in_atomic) | ||
199 | mmput(mm); | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * we were unable to find this address anywhere, | ||
204 | * or some MMs were skipped because they were in use. | ||
205 | */ | ||
206 | sprintf(buf, "/* kernel dynamic memory */"); | ||
207 | |||
208 | done: | ||
209 | write_unlock_irqrestore(&tasklist_lock, flags); | ||
210 | #else | ||
211 | sprintf(buf, " "); | ||
212 | #endif | ||
213 | } | ||
214 | |||
215 | asmlinkage void double_fault_c(struct pt_regs *fp) | ||
216 | { | ||
217 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | ||
218 | int j; | ||
219 | trace_buffer_save(j); | ||
220 | #endif | ||
221 | |||
222 | console_verbose(); | ||
223 | oops_in_progress = 1; | ||
224 | #ifdef CONFIG_DEBUG_VERBOSE | ||
225 | printk(KERN_EMERG "Double Fault\n"); | ||
226 | #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT | ||
227 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { | ||
228 | unsigned int cpu = raw_smp_processor_id(); | ||
229 | char buf[150]; | ||
230 | decode_address(buf, cpu_pda[cpu].retx_doublefault); | ||
231 | printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", | ||
232 | (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf); | ||
233 | decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr); | ||
234 | printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); | ||
235 | decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr); | ||
236 | printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); | ||
237 | |||
238 | decode_address(buf, fp->retx); | ||
239 | printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf); | ||
240 | } else | ||
241 | #endif | ||
242 | { | ||
243 | dump_bfin_process(fp); | ||
244 | dump_bfin_mem(fp); | ||
245 | show_regs(fp); | ||
246 | dump_bfin_trace_buffer(); | ||
247 | } | ||
248 | #endif | ||
249 | panic("Double Fault - unrecoverable event"); | ||
250 | |||
251 | } | ||
252 | |||
253 | static int kernel_mode_regs(struct pt_regs *regs) | 61 | static int kernel_mode_regs(struct pt_regs *regs) |
254 | { | 62 | { |
255 | return regs->ipend & 0xffc0; | 63 | return regs->ipend & 0xffc0; |
@@ -672,659 +480,44 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) | |||
672 | trace_buffer_restore(j); | 480 | trace_buffer_restore(j); |
673 | } | 481 | } |
674 | 482 | ||
675 | /* Typical exception handling routines */ | 483 | asmlinkage void double_fault_c(struct pt_regs *fp) |
676 | |||
677 | #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) | ||
678 | |||
679 | /* | ||
680 | * Similar to get_user, do some address checking, then dereference | ||
681 | * Return true on success, false on bad address | ||
682 | */ | ||
683 | static bool get_instruction(unsigned short *val, unsigned short *address) | ||
684 | { | ||
685 | unsigned long addr = (unsigned long)address; | ||
686 | |||
687 | /* Check for odd addresses */ | ||
688 | if (addr & 0x1) | ||
689 | return false; | ||
690 | |||
691 | /* MMR region will never have instructions */ | ||
692 | if (addr >= SYSMMR_BASE) | ||
693 | return false; | ||
694 | |||
695 | switch (bfin_mem_access_type(addr, 2)) { | ||
696 | case BFIN_MEM_ACCESS_CORE: | ||
697 | case BFIN_MEM_ACCESS_CORE_ONLY: | ||
698 | *val = *address; | ||
699 | return true; | ||
700 | case BFIN_MEM_ACCESS_DMA: | ||
701 | dma_memcpy(val, address, 2); | ||
702 | return true; | ||
703 | case BFIN_MEM_ACCESS_ITEST: | ||
704 | isram_memcpy(val, address, 2); | ||
705 | return true; | ||
706 | default: /* invalid access */ | ||
707 | return false; | ||
708 | } | ||
709 | } | ||
710 | |||
711 | /* | ||
712 | * decode the instruction if we are printing out the trace, as it | ||
713 | * makes things easier to follow, without running it through objdump | ||
714 | * These are the normal instructions which cause change of flow, which | ||
715 | * would be at the source of the trace buffer | ||
716 | */ | ||
717 | #if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON) | ||
718 | static void decode_instruction(unsigned short *address) | ||
719 | { | ||
720 | unsigned short opcode; | ||
721 | |||
722 | if (get_instruction(&opcode, address)) { | ||
723 | if (opcode == 0x0010) | ||
724 | verbose_printk("RTS"); | ||
725 | else if (opcode == 0x0011) | ||
726 | verbose_printk("RTI"); | ||
727 | else if (opcode == 0x0012) | ||
728 | verbose_printk("RTX"); | ||
729 | else if (opcode == 0x0013) | ||
730 | verbose_printk("RTN"); | ||
731 | else if (opcode == 0x0014) | ||
732 | verbose_printk("RTE"); | ||
733 | else if (opcode == 0x0025) | ||
734 | verbose_printk("EMUEXCPT"); | ||
735 | else if (opcode >= 0x0040 && opcode <= 0x0047) | ||
736 | verbose_printk("STI R%i", opcode & 7); | ||
737 | else if (opcode >= 0x0050 && opcode <= 0x0057) | ||
738 | verbose_printk("JUMP (P%i)", opcode & 7); | ||
739 | else if (opcode >= 0x0060 && opcode <= 0x0067) | ||
740 | verbose_printk("CALL (P%i)", opcode & 7); | ||
741 | else if (opcode >= 0x0070 && opcode <= 0x0077) | ||
742 | verbose_printk("CALL (PC+P%i)", opcode & 7); | ||
743 | else if (opcode >= 0x0080 && opcode <= 0x0087) | ||
744 | verbose_printk("JUMP (PC+P%i)", opcode & 7); | ||
745 | else if (opcode >= 0x0090 && opcode <= 0x009F) | ||
746 | verbose_printk("RAISE 0x%x", opcode & 0xF); | ||
747 | else if (opcode >= 0x00A0 && opcode <= 0x00AF) | ||
748 | verbose_printk("EXCPT 0x%x", opcode & 0xF); | ||
749 | else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF)) | ||
750 | verbose_printk("IF !CC JUMP"); | ||
751 | else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff)) | ||
752 | verbose_printk("IF CC JUMP"); | ||
753 | else if (opcode >= 0x2000 && opcode <= 0x2fff) | ||
754 | verbose_printk("JUMP.S"); | ||
755 | else if (opcode >= 0xe080 && opcode <= 0xe0ff) | ||
756 | verbose_printk("LSETUP"); | ||
757 | else if (opcode >= 0xe200 && opcode <= 0xe2ff) | ||
758 | verbose_printk("JUMP.L"); | ||
759 | else if (opcode >= 0xe300 && opcode <= 0xe3ff) | ||
760 | verbose_printk("CALL pcrel"); | ||
761 | else | ||
762 | verbose_printk("0x%04x", opcode); | ||
763 | } | ||
764 | |||
765 | } | ||
766 | #endif | ||
767 | |||
768 | void dump_bfin_trace_buffer(void) | ||
769 | { | ||
770 | #ifdef CONFIG_DEBUG_VERBOSE | ||
771 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | ||
772 | int tflags, i = 0; | ||
773 | char buf[150]; | ||
774 | unsigned short *addr; | ||
775 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
776 | int j, index; | ||
777 | #endif | ||
778 | |||
779 | trace_buffer_save(tflags); | ||
780 | |||
781 | printk(KERN_NOTICE "Hardware Trace:\n"); | ||
782 | |||
783 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
784 | printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n"); | ||
785 | #endif | ||
786 | |||
787 | if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) { | ||
788 | for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) { | ||
789 | decode_address(buf, (unsigned long)bfin_read_TBUF()); | ||
790 | printk(KERN_NOTICE "%4i Target : %s\n", i, buf); | ||
791 | addr = (unsigned short *)bfin_read_TBUF(); | ||
792 | decode_address(buf, (unsigned long)addr); | ||
793 | printk(KERN_NOTICE " Source : %s ", buf); | ||
794 | decode_instruction(addr); | ||
795 | printk("\n"); | ||
796 | } | ||
797 | } | ||
798 | |||
799 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
800 | if (trace_buff_offset) | ||
801 | index = trace_buff_offset / 4; | ||
802 | else | ||
803 | index = EXPAND_LEN; | ||
804 | |||
805 | j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128; | ||
806 | while (j) { | ||
807 | decode_address(buf, software_trace_buff[index]); | ||
808 | printk(KERN_NOTICE "%4i Target : %s\n", i, buf); | ||
809 | index -= 1; | ||
810 | if (index < 0 ) | ||
811 | index = EXPAND_LEN; | ||
812 | decode_address(buf, software_trace_buff[index]); | ||
813 | printk(KERN_NOTICE " Source : %s ", buf); | ||
814 | decode_instruction((unsigned short *)software_trace_buff[index]); | ||
815 | printk("\n"); | ||
816 | index -= 1; | ||
817 | if (index < 0) | ||
818 | index = EXPAND_LEN; | ||
819 | j--; | ||
820 | i++; | ||
821 | } | ||
822 | #endif | ||
823 | |||
824 | trace_buffer_restore(tflags); | ||
825 | #endif | ||
826 | #endif | ||
827 | } | ||
828 | EXPORT_SYMBOL(dump_bfin_trace_buffer); | ||
829 | |||
830 | #ifdef CONFIG_BUG | ||
831 | int is_valid_bugaddr(unsigned long addr) | ||
832 | { | ||
833 | unsigned short opcode; | ||
834 | |||
835 | if (!get_instruction(&opcode, (unsigned short *)addr)) | ||
836 | return 0; | ||
837 | |||
838 | return opcode == BFIN_BUG_OPCODE; | ||
839 | } | ||
840 | #endif | ||
841 | |||
842 | /* | ||
843 | * Checks to see if the address pointed to is either a | ||
844 | * 16-bit CALL instruction, or a 32-bit CALL instruction | ||
845 | */ | ||
846 | static bool is_bfin_call(unsigned short *addr) | ||
847 | { | ||
848 | unsigned short opcode = 0, *ins_addr; | ||
849 | ins_addr = (unsigned short *)addr; | ||
850 | |||
851 | if (!get_instruction(&opcode, ins_addr)) | ||
852 | return false; | ||
853 | |||
854 | if ((opcode >= 0x0060 && opcode <= 0x0067) || | ||
855 | (opcode >= 0x0070 && opcode <= 0x0077)) | ||
856 | return true; | ||
857 | |||
858 | ins_addr--; | ||
859 | if (!get_instruction(&opcode, ins_addr)) | ||
860 | return false; | ||
861 | |||
862 | if (opcode >= 0xE300 && opcode <= 0xE3FF) | ||
863 | return true; | ||
864 | |||
865 | return false; | ||
866 | |||
867 | } | ||
868 | |||
869 | void show_stack(struct task_struct *task, unsigned long *stack) | ||
870 | { | ||
871 | #ifdef CONFIG_PRINTK | ||
872 | unsigned int *addr, *endstack, *fp = 0, *frame; | ||
873 | unsigned short *ins_addr; | ||
874 | char buf[150]; | ||
875 | unsigned int i, j, ret_addr, frame_no = 0; | ||
876 | |||
877 | /* | ||
878 | * If we have been passed a specific stack, use that one otherwise | ||
879 | * if we have been passed a task structure, use that, otherwise | ||
880 | * use the stack of where the variable "stack" exists | ||
881 | */ | ||
882 | |||
883 | if (stack == NULL) { | ||
884 | if (task) { | ||
885 | /* We know this is a kernel stack, so this is the start/end */ | ||
886 | stack = (unsigned long *)task->thread.ksp; | ||
887 | endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE); | ||
888 | } else { | ||
889 | /* print out the existing stack info */ | ||
890 | stack = (unsigned long *)&stack; | ||
891 | endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); | ||
892 | } | ||
893 | } else | ||
894 | endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); | ||
895 | |||
896 | printk(KERN_NOTICE "Stack info:\n"); | ||
897 | decode_address(buf, (unsigned int)stack); | ||
898 | printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); | ||
899 | |||
900 | if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { | ||
901 | printk(KERN_NOTICE "Invalid stack pointer\n"); | ||
902 | return; | ||
903 | } | ||
904 | |||
905 | /* First thing is to look for a frame pointer */ | ||
906 | for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { | ||
907 | if (*addr & 0x1) | ||
908 | continue; | ||
909 | ins_addr = (unsigned short *)*addr; | ||
910 | ins_addr--; | ||
911 | if (is_bfin_call(ins_addr)) | ||
912 | fp = addr - 1; | ||
913 | |||
914 | if (fp) { | ||
915 | /* Let's check to see if it is a frame pointer */ | ||
916 | while (fp >= (addr - 1) && fp < endstack | ||
917 | && fp && ((unsigned int) fp & 0x3) == 0) | ||
918 | fp = (unsigned int *)*fp; | ||
919 | if (fp == 0 || fp == endstack) { | ||
920 | fp = addr - 1; | ||
921 | break; | ||
922 | } | ||
923 | fp = 0; | ||
924 | } | ||
925 | } | ||
926 | if (fp) { | ||
927 | frame = fp; | ||
928 | printk(KERN_NOTICE " FP: (0x%p)\n", fp); | ||
929 | } else | ||
930 | frame = 0; | ||
931 | |||
932 | /* | ||
933 | * Now that we think we know where things are, we | ||
934 | * walk the stack again, this time printing things out | ||
935 | * incase there is no frame pointer, we still look for | ||
936 | * valid return addresses | ||
937 | */ | ||
938 | |||
939 | /* First time print out data, next time, print out symbols */ | ||
940 | for (j = 0; j <= 1; j++) { | ||
941 | if (j) | ||
942 | printk(KERN_NOTICE "Return addresses in stack:\n"); | ||
943 | else | ||
944 | printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack); | ||
945 | |||
946 | fp = frame; | ||
947 | frame_no = 0; | ||
948 | |||
949 | for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0; | ||
950 | addr < endstack; addr++, i++) { | ||
951 | |||
952 | ret_addr = 0; | ||
953 | if (!j && i % 8 == 0) | ||
954 | printk(KERN_NOTICE "%p:",addr); | ||
955 | |||
956 | /* if it is an odd address, or zero, just skip it */ | ||
957 | if (*addr & 0x1 || !*addr) | ||
958 | goto print; | ||
959 | |||
960 | ins_addr = (unsigned short *)*addr; | ||
961 | |||
962 | /* Go back one instruction, and see if it is a CALL */ | ||
963 | ins_addr--; | ||
964 | ret_addr = is_bfin_call(ins_addr); | ||
965 | print: | ||
966 | if (!j && stack == (unsigned long *)addr) | ||
967 | printk("[%08x]", *addr); | ||
968 | else if (ret_addr) | ||
969 | if (j) { | ||
970 | decode_address(buf, (unsigned int)*addr); | ||
971 | if (frame == addr) { | ||
972 | printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf); | ||
973 | continue; | ||
974 | } | ||
975 | printk(KERN_NOTICE " address : %s\n", buf); | ||
976 | } else | ||
977 | printk("<%08x>", *addr); | ||
978 | else if (fp == addr) { | ||
979 | if (j) | ||
980 | frame = addr+1; | ||
981 | else | ||
982 | printk("(%08x)", *addr); | ||
983 | |||
984 | fp = (unsigned int *)*addr; | ||
985 | frame_no++; | ||
986 | |||
987 | } else if (!j) | ||
988 | printk(" %08x ", *addr); | ||
989 | } | ||
990 | if (!j) | ||
991 | printk("\n"); | ||
992 | } | ||
993 | #endif | ||
994 | } | ||
995 | EXPORT_SYMBOL(show_stack); | ||
996 | |||
997 | void dump_stack(void) | ||
998 | { | 484 | { |
999 | unsigned long stack; | ||
1000 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | 485 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON |
1001 | int tflags; | 486 | int j; |
487 | trace_buffer_save(j); | ||
1002 | #endif | 488 | #endif |
1003 | trace_buffer_save(tflags); | ||
1004 | dump_bfin_trace_buffer(); | ||
1005 | show_stack(current, &stack); | ||
1006 | trace_buffer_restore(tflags); | ||
1007 | } | ||
1008 | EXPORT_SYMBOL(dump_stack); | ||
1009 | 489 | ||
1010 | void dump_bfin_process(struct pt_regs *fp) | 490 | console_verbose(); |
1011 | { | 491 | oops_in_progress = 1; |
1012 | #ifdef CONFIG_DEBUG_VERBOSE | 492 | #ifdef CONFIG_DEBUG_VERBOSE |
1013 | /* We should be able to look at fp->ipend, but we don't push it on the | 493 | printk(KERN_EMERG "Double Fault\n"); |
1014 | * stack all the time, so do this until we fix that */ | 494 | #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT |
1015 | unsigned int context = bfin_read_IPEND(); | 495 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { |
1016 | 496 | unsigned int cpu = raw_smp_processor_id(); | |
1017 | if (oops_in_progress) | 497 | char buf[150]; |
1018 | verbose_printk(KERN_EMERG "Kernel OOPS in progress\n"); | 498 | decode_address(buf, cpu_pda[cpu].retx_doublefault); |
1019 | 499 | printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", | |
1020 | if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) | 500 | (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf); |
1021 | verbose_printk(KERN_NOTICE "HW Error context\n"); | 501 | decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr); |
1022 | else if (context & 0x0020) | 502 | printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); |
1023 | verbose_printk(KERN_NOTICE "Deferred Exception context\n"); | 503 | decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr); |
1024 | else if (context & 0x3FC0) | 504 | printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); |
1025 | verbose_printk(KERN_NOTICE "Interrupt context\n"); | ||
1026 | else if (context & 0x4000) | ||
1027 | verbose_printk(KERN_NOTICE "Deferred Interrupt context\n"); | ||
1028 | else if (context & 0x8000) | ||
1029 | verbose_printk(KERN_NOTICE "Kernel process context\n"); | ||
1030 | |||
1031 | /* Because we are crashing, and pointers could be bad, we check things | ||
1032 | * pretty closely before we use them | ||
1033 | */ | ||
1034 | if ((unsigned long)current >= FIXED_CODE_START && | ||
1035 | !((unsigned long)current & 0x3) && current->pid) { | ||
1036 | verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n"); | ||
1037 | if (current->comm >= (char *)FIXED_CODE_START) | ||
1038 | verbose_printk(KERN_NOTICE "COMM=%s PID=%d", | ||
1039 | current->comm, current->pid); | ||
1040 | else | ||
1041 | verbose_printk(KERN_NOTICE "COMM= invalid"); | ||
1042 | 505 | ||
1043 | printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu); | 506 | decode_address(buf, fp->retx); |
1044 | if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) | 507 | printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf); |
1045 | verbose_printk(KERN_NOTICE | ||
1046 | "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" | ||
1047 | " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n", | ||
1048 | (void *)current->mm->start_code, | ||
1049 | (void *)current->mm->end_code, | ||
1050 | (void *)current->mm->start_data, | ||
1051 | (void *)current->mm->end_data, | ||
1052 | (void *)current->mm->end_data, | ||
1053 | (void *)current->mm->brk, | ||
1054 | (void *)current->mm->start_stack); | ||
1055 | else | ||
1056 | verbose_printk(KERN_NOTICE "invalid mm\n"); | ||
1057 | } else | 508 | } else |
1058 | verbose_printk(KERN_NOTICE | ||
1059 | "No Valid process in current context\n"); | ||
1060 | #endif | ||
1061 | } | ||
1062 | |||
1063 | void dump_bfin_mem(struct pt_regs *fp) | ||
1064 | { | ||
1065 | #ifdef CONFIG_DEBUG_VERBOSE | ||
1066 | unsigned short *addr, *erraddr, val = 0, err = 0; | ||
1067 | char sti = 0, buf[6]; | ||
1068 | |||
1069 | erraddr = (void *)fp->pc; | ||
1070 | |||
1071 | verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr); | ||
1072 | |||
1073 | for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10; | ||
1074 | addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10; | ||
1075 | addr++) { | ||
1076 | if (!((unsigned long)addr & 0xF)) | ||
1077 | verbose_printk(KERN_NOTICE "0x%p: ", addr); | ||
1078 | |||
1079 | if (!get_instruction(&val, addr)) { | ||
1080 | val = 0; | ||
1081 | sprintf(buf, "????"); | ||
1082 | } else | ||
1083 | sprintf(buf, "%04x", val); | ||
1084 | |||
1085 | if (addr == erraddr) { | ||
1086 | verbose_printk("[%s]", buf); | ||
1087 | err = val; | ||
1088 | } else | ||
1089 | verbose_printk(" %s ", buf); | ||
1090 | |||
1091 | /* Do any previous instructions turn on interrupts? */ | ||
1092 | if (addr <= erraddr && /* in the past */ | ||
1093 | ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */ | ||
1094 | val == 0x017b)) /* [SP++] = RETI */ | ||
1095 | sti = 1; | ||
1096 | } | ||
1097 | |||
1098 | verbose_printk("\n"); | ||
1099 | |||
1100 | /* Hardware error interrupts can be deferred */ | ||
1101 | if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR && | ||
1102 | oops_in_progress)){ | ||
1103 | verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n"); | ||
1104 | #ifndef CONFIG_DEBUG_HWERR | ||
1105 | verbose_printk(KERN_NOTICE | ||
1106 | "The remaining message may be meaningless\n" | ||
1107 | "You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n"); | ||
1108 | #else | ||
1109 | /* If we are handling only one peripheral interrupt | ||
1110 | * and current mm and pid are valid, and the last error | ||
1111 | * was in that user space process's text area | ||
1112 | * print it out - because that is where the problem exists | ||
1113 | */ | ||
1114 | if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) && | ||
1115 | (current->pid && current->mm)) { | ||
1116 | /* And the last RETI points to the current userspace context */ | ||
1117 | if ((fp + 1)->pc >= current->mm->start_code && | ||
1118 | (fp + 1)->pc <= current->mm->end_code) { | ||
1119 | verbose_printk(KERN_NOTICE "It might be better to look around here :\n"); | ||
1120 | verbose_printk(KERN_NOTICE "-------------------------------------------\n"); | ||
1121 | show_regs(fp + 1); | ||
1122 | verbose_printk(KERN_NOTICE "-------------------------------------------\n"); | ||
1123 | } | ||
1124 | } | ||
1125 | #endif | ||
1126 | } | ||
1127 | #endif | ||
1128 | } | ||
1129 | |||
1130 | void show_regs(struct pt_regs *fp) | ||
1131 | { | ||
1132 | #ifdef CONFIG_DEBUG_VERBOSE | ||
1133 | char buf [150]; | ||
1134 | struct irqaction *action; | ||
1135 | unsigned int i; | ||
1136 | unsigned long flags = 0; | ||
1137 | unsigned int cpu = raw_smp_processor_id(); | ||
1138 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
1139 | |||
1140 | verbose_printk(KERN_NOTICE "\n"); | ||
1141 | if (CPUID != bfin_cpuid()) | ||
1142 | verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), " | ||
1143 | "but running on:0x%04x (Rev %d)\n", | ||
1144 | CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid()); | ||
1145 | |||
1146 | verbose_printk(KERN_NOTICE "ADSP-%s-0.%d", | ||
1147 | CPU, bfin_compiled_revid()); | ||
1148 | |||
1149 | if (bfin_compiled_revid() != bfin_revid()) | ||
1150 | verbose_printk("(Detected 0.%d)", bfin_revid()); | ||
1151 | |||
1152 | verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n", | ||
1153 | get_cclk()/1000000, get_sclk()/1000000, | ||
1154 | #ifdef CONFIG_MPU | ||
1155 | "mpu on" | ||
1156 | #else | ||
1157 | "mpu off" | ||
1158 | #endif | 509 | #endif |
1159 | ); | 510 | { |
1160 | 511 | dump_bfin_process(fp); | |
1161 | verbose_printk(KERN_NOTICE "%s", linux_banner); | 512 | dump_bfin_mem(fp); |
1162 | 513 | show_regs(fp); | |
1163 | verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted()); | 514 | dump_bfin_trace_buffer(); |
1164 | verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n", | ||
1165 | (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg); | ||
1166 | if (fp->ipend & EVT_IRPTEN) | ||
1167 | verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n"); | ||
1168 | if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 | | ||
1169 | EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR))) | ||
1170 | verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n"); | ||
1171 | if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14))) | ||
1172 | verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n"); | ||
1173 | if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) { | ||
1174 | verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n", | ||
1175 | (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14); | ||
1176 | #ifdef EBIU_ERRMST | ||
1177 | /* If the error was from the EBIU, print it out */ | ||
1178 | if (bfin_read_EBIU_ERRMST() & CORE_ERROR) { | ||
1179 | verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n", | ||
1180 | bfin_read_EBIU_ERRMST()); | ||
1181 | verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n", | ||
1182 | bfin_read_EBIU_ERRADD()); | ||
1183 | } | ||
1184 | #endif | ||
1185 | } | ||
1186 | verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n", | ||
1187 | fp->seqstat & SEQSTAT_EXCAUSE); | ||
1188 | for (i = 2; i <= 15 ; i++) { | ||
1189 | if (fp->ipend & (1 << i)) { | ||
1190 | if (i != 4) { | ||
1191 | decode_address(buf, bfin_read32(EVT0 + 4*i)); | ||
1192 | verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf); | ||
1193 | } else | ||
1194 | verbose_printk(KERN_NOTICE " interrupts disabled\n"); | ||
1195 | } | ||
1196 | } | ||
1197 | |||
1198 | /* if no interrupts are going off, don't print this out */ | ||
1199 | if (fp->ipend & ~0x3F) { | ||
1200 | for (i = 0; i < (NR_IRQS - 1); i++) { | ||
1201 | if (!in_atomic) | ||
1202 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
1203 | |||
1204 | action = irq_desc[i].action; | ||
1205 | if (!action) | ||
1206 | goto unlock; | ||
1207 | |||
1208 | decode_address(buf, (unsigned int)action->handler); | ||
1209 | verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf); | ||
1210 | for (action = action->next; action; action = action->next) { | ||
1211 | decode_address(buf, (unsigned int)action->handler); | ||
1212 | verbose_printk(", %s", buf); | ||
1213 | } | ||
1214 | verbose_printk("\n"); | ||
1215 | unlock: | ||
1216 | if (!in_atomic) | ||
1217 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
1218 | } | ||
1219 | } | ||
1220 | |||
1221 | decode_address(buf, fp->rete); | ||
1222 | verbose_printk(KERN_NOTICE " RETE: %s\n", buf); | ||
1223 | decode_address(buf, fp->retn); | ||
1224 | verbose_printk(KERN_NOTICE " RETN: %s\n", buf); | ||
1225 | decode_address(buf, fp->retx); | ||
1226 | verbose_printk(KERN_NOTICE " RETX: %s\n", buf); | ||
1227 | decode_address(buf, fp->rets); | ||
1228 | verbose_printk(KERN_NOTICE " RETS: %s\n", buf); | ||
1229 | decode_address(buf, fp->pc); | ||
1230 | verbose_printk(KERN_NOTICE " PC : %s\n", buf); | ||
1231 | |||
1232 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && | ||
1233 | (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { | ||
1234 | decode_address(buf, cpu_pda[cpu].dcplb_fault_addr); | ||
1235 | verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf); | ||
1236 | decode_address(buf, cpu_pda[cpu].icplb_fault_addr); | ||
1237 | verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf); | ||
1238 | } | 515 | } |
1239 | |||
1240 | verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n"); | ||
1241 | verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", | ||
1242 | fp->r0, fp->r1, fp->r2, fp->r3); | ||
1243 | verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", | ||
1244 | fp->r4, fp->r5, fp->r6, fp->r7); | ||
1245 | verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n", | ||
1246 | fp->p0, fp->p1, fp->p2, fp->p3); | ||
1247 | verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n", | ||
1248 | fp->p4, fp->p5, fp->fp, (long)fp); | ||
1249 | verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n", | ||
1250 | fp->lb0, fp->lt0, fp->lc0); | ||
1251 | verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n", | ||
1252 | fp->lb1, fp->lt1, fp->lc1); | ||
1253 | verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n", | ||
1254 | fp->b0, fp->l0, fp->m0, fp->i0); | ||
1255 | verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n", | ||
1256 | fp->b1, fp->l1, fp->m1, fp->i1); | ||
1257 | verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n", | ||
1258 | fp->b2, fp->l2, fp->m2, fp->i2); | ||
1259 | verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n", | ||
1260 | fp->b3, fp->l3, fp->m3, fp->i3); | ||
1261 | verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n", | ||
1262 | fp->a0w, fp->a0x, fp->a1w, fp->a1x); | ||
1263 | |||
1264 | verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n", | ||
1265 | rdusp(), fp->astat); | ||
1266 | |||
1267 | verbose_printk(KERN_NOTICE "\n"); | ||
1268 | #endif | ||
1269 | } | ||
1270 | |||
1271 | #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1 | ||
1272 | asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text)); | ||
1273 | #endif | 516 | #endif |
517 | panic("Double Fault - unrecoverable event"); | ||
1274 | 518 | ||
1275 | static DEFINE_SPINLOCK(bfin_spinlock_lock); | ||
1276 | |||
1277 | asmlinkage int sys_bfin_spinlock(int *p) | ||
1278 | { | ||
1279 | int ret, tmp = 0; | ||
1280 | |||
1281 | spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */ | ||
1282 | ret = get_user(tmp, p); | ||
1283 | if (likely(ret == 0)) { | ||
1284 | if (unlikely(tmp)) | ||
1285 | ret = 1; | ||
1286 | else | ||
1287 | put_user(1, p); | ||
1288 | } | ||
1289 | spin_unlock(&bfin_spinlock_lock); | ||
1290 | return ret; | ||
1291 | } | 519 | } |
1292 | 520 | ||
1293 | int bfin_request_exception(unsigned int exception, void (*handler)(void)) | ||
1294 | { | ||
1295 | void (*curr_handler)(void); | ||
1296 | |||
1297 | if (exception > 0x3F) | ||
1298 | return -EINVAL; | ||
1299 | |||
1300 | curr_handler = ex_table[exception]; | ||
1301 | |||
1302 | if (curr_handler != ex_replaceable) | ||
1303 | return -EBUSY; | ||
1304 | |||
1305 | ex_table[exception] = handler; | ||
1306 | |||
1307 | return 0; | ||
1308 | } | ||
1309 | EXPORT_SYMBOL(bfin_request_exception); | ||
1310 | |||
1311 | int bfin_free_exception(unsigned int exception, void (*handler)(void)) | ||
1312 | { | ||
1313 | void (*curr_handler)(void); | ||
1314 | |||
1315 | if (exception > 0x3F) | ||
1316 | return -EINVAL; | ||
1317 | |||
1318 | curr_handler = ex_table[exception]; | ||
1319 | |||
1320 | if (curr_handler != handler) | ||
1321 | return -EBUSY; | ||
1322 | |||
1323 | ex_table[exception] = ex_replaceable; | ||
1324 | |||
1325 | return 0; | ||
1326 | } | ||
1327 | EXPORT_SYMBOL(bfin_free_exception); | ||
1328 | 521 | ||
1329 | void panic_cplb_error(int cplb_panic, struct pt_regs *fp) | 522 | void panic_cplb_error(int cplb_panic, struct pt_regs *fp) |
1330 | { | 523 | { |
@@ -1349,3 +542,15 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp) | |||
1349 | dump_stack(); | 542 | dump_stack(); |
1350 | panic("Unrecoverable event"); | 543 | panic("Unrecoverable event"); |
1351 | } | 544 | } |
545 | |||
546 | #ifdef CONFIG_BUG | ||
547 | int is_valid_bugaddr(unsigned long addr) | ||
548 | { | ||
549 | unsigned short opcode; | ||
550 | |||
551 | if (!get_instruction(&opcode, (unsigned short *)addr)) | ||
552 | return 0; | ||
553 | |||
554 | return opcode == BFIN_BUG_OPCODE; | ||
555 | } | ||
556 | #endif | ||