aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/traps.c
diff options
context:
space:
mode:
authorRobin Getz <robin.getz@analog.com>2010-03-11 11:24:18 -0500
committerMike Frysinger <vapier@gentoo.org>2010-05-21 09:40:17 -0400
commit2a12c4632db1c0c548a7023e63869b27c7789a92 (patch)
tree518ec2b9379886d5fe7301cf3d5eed959f0452ca /arch/blackfin/kernel/traps.c
parentbb84dbf69b0730fcc78c275f900ed74b2b8453a5 (diff)
Blackfin: split kernel/traps.c
The current kernel/traps.c file has grown a bit unwieldy as more debugging functionality has been added over time, so split it up into more logical files. There should be no functional changes here, just minor whitespace tweaking. This should make future extensions easier to manage. Signed-off-by: Robin Getz <robin.getz@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/kernel/traps.c')
-rw-r--r--arch/blackfin/kernel/traps.c875
1 files changed, 40 insertions, 835 deletions
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index ba70c4bc2699..891cc39f7eec 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -1,20 +1,16 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Main exception handling logic.
3 *
4 * Copyright 2004-2010 Analog Devices Inc.
3 * 5 *
4 * Licensed under the GPL-2 or later 6 * Licensed under the GPL-2 or later
5 */ 7 */
6 8
7#include <linux/bug.h> 9#include <linux/bug.h>
8#include <linux/uaccess.h> 10#include <linux/uaccess.h>
9#include <linux/interrupt.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/kallsyms.h>
12#include <linux/fs.h>
13#include <linux/rbtree.h>
14#include <asm/traps.h> 12#include <asm/traps.h>
15#include <asm/cacheflush.h>
16#include <asm/cplb.h> 13#include <asm/cplb.h>
17#include <asm/dma.h>
18#include <asm/blackfin.h> 14#include <asm/blackfin.h>
19#include <asm/irq_handler.h> 15#include <asm/irq_handler.h>
20#include <linux/irq.h> 16#include <linux/irq.h>
@@ -62,194 +58,6 @@ void __init trap_init(void)
62 CSYNC(); 58 CSYNC();
63} 59}
64 60
65static void decode_address(char *buf, unsigned long address)
66{
67#ifdef CONFIG_DEBUG_VERBOSE
68 struct task_struct *p;
69 struct mm_struct *mm;
70 unsigned long flags, offset;
71 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
72 struct rb_node *n;
73
74#ifdef CONFIG_KALLSYMS
75 unsigned long symsize;
76 const char *symname;
77 char *modname;
78 char *delim = ":";
79 char namebuf[128];
80#endif
81
82 buf += sprintf(buf, "<0x%08lx> ", address);
83
84#ifdef CONFIG_KALLSYMS
85 /* look up the address and see if we are in kernel space */
86 symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
87
88 if (symname) {
89 /* yeah! kernel space! */
90 if (!modname)
91 modname = delim = "";
92 sprintf(buf, "{ %s%s%s%s + 0x%lx }",
93 delim, modname, delim, symname,
94 (unsigned long)offset);
95 return;
96 }
97#endif
98
99 if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
100 /* Problem in fixed code section? */
101 strcat(buf, "/* Maybe fixed code section */");
102 return;
103
104 } else if (address < CONFIG_BOOT_LOAD) {
105 /* Problem somewhere before the kernel start address */
106 strcat(buf, "/* Maybe null pointer? */");
107 return;
108
109 } else if (address >= COREMMR_BASE) {
110 strcat(buf, "/* core mmrs */");
111 return;
112
113 } else if (address >= SYSMMR_BASE) {
114 strcat(buf, "/* system mmrs */");
115 return;
116
117 } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
118 strcat(buf, "/* on-chip L1 ROM */");
119 return;
120 }
121
122 /*
123 * Don't walk any of the vmas if we are oopsing, it has been known
124 * to cause problems - corrupt vmas (kernel crashes) cause double faults
125 */
126 if (oops_in_progress) {
127 strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
128 return;
129 }
130
131 /* looks like we're off in user-land, so let's walk all the
132 * mappings of all our processes and see if we can't be a whee
133 * bit more specific
134 */
135 write_lock_irqsave(&tasklist_lock, flags);
136 for_each_process(p) {
137 mm = (in_atomic ? p->mm : get_task_mm(p));
138 if (!mm)
139 continue;
140
141 if (!down_read_trylock(&mm->mmap_sem)) {
142 if (!in_atomic)
143 mmput(mm);
144 continue;
145 }
146
147 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
148 struct vm_area_struct *vma;
149
150 vma = rb_entry(n, struct vm_area_struct, vm_rb);
151
152 if (address >= vma->vm_start && address < vma->vm_end) {
153 char _tmpbuf[256];
154 char *name = p->comm;
155 struct file *file = vma->vm_file;
156
157 if (file) {
158 char *d_name = d_path(&file->f_path, _tmpbuf,
159 sizeof(_tmpbuf));
160 if (!IS_ERR(d_name))
161 name = d_name;
162 }
163
164 /* FLAT does not have its text aligned to the start of
165 * the map while FDPIC ELF does ...
166 */
167
168 /* before we can check flat/fdpic, we need to
169 * make sure current is valid
170 */
171 if ((unsigned long)current >= FIXED_CODE_START &&
172 !((unsigned long)current & 0x3)) {
173 if (current->mm &&
174 (address > current->mm->start_code) &&
175 (address < current->mm->end_code))
176 offset = address - current->mm->start_code;
177 else
178 offset = (address - vma->vm_start) +
179 (vma->vm_pgoff << PAGE_SHIFT);
180
181 sprintf(buf, "[ %s + 0x%lx ]", name, offset);
182 } else
183 sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
184 name, vma->vm_start, vma->vm_end);
185
186 up_read(&mm->mmap_sem);
187 if (!in_atomic)
188 mmput(mm);
189
190 if (buf[0] == '\0')
191 sprintf(buf, "[ %s ] dynamic memory", name);
192
193 goto done;
194 }
195 }
196
197 up_read(&mm->mmap_sem);
198 if (!in_atomic)
199 mmput(mm);
200 }
201
202 /*
203 * we were unable to find this address anywhere,
204 * or some MMs were skipped because they were in use.
205 */
206 sprintf(buf, "/* kernel dynamic memory */");
207
208done:
209 write_unlock_irqrestore(&tasklist_lock, flags);
210#else
211 sprintf(buf, " ");
212#endif
213}
214
215asmlinkage void double_fault_c(struct pt_regs *fp)
216{
217#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
218 int j;
219 trace_buffer_save(j);
220#endif
221
222 console_verbose();
223 oops_in_progress = 1;
224#ifdef CONFIG_DEBUG_VERBOSE
225 printk(KERN_EMERG "Double Fault\n");
226#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
227 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
228 unsigned int cpu = raw_smp_processor_id();
229 char buf[150];
230 decode_address(buf, cpu_pda[cpu].retx_doublefault);
231 printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
232 (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
233 decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
234 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
235 decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
236 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
237
238 decode_address(buf, fp->retx);
239 printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
240 } else
241#endif
242 {
243 dump_bfin_process(fp);
244 dump_bfin_mem(fp);
245 show_regs(fp);
246 dump_bfin_trace_buffer();
247 }
248#endif
249 panic("Double Fault - unrecoverable event");
250
251}
252
253static int kernel_mode_regs(struct pt_regs *regs) 61static int kernel_mode_regs(struct pt_regs *regs)
254{ 62{
255 return regs->ipend & 0xffc0; 63 return regs->ipend & 0xffc0;
@@ -672,659 +480,44 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
672 trace_buffer_restore(j); 480 trace_buffer_restore(j);
673} 481}
674 482
675/* Typical exception handling routines */ 483asmlinkage void double_fault_c(struct pt_regs *fp)
676
677#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
678
679/*
680 * Similar to get_user, do some address checking, then dereference
681 * Return true on success, false on bad address
682 */
683static bool get_instruction(unsigned short *val, unsigned short *address)
684{
685 unsigned long addr = (unsigned long)address;
686
687 /* Check for odd addresses */
688 if (addr & 0x1)
689 return false;
690
691 /* MMR region will never have instructions */
692 if (addr >= SYSMMR_BASE)
693 return false;
694
695 switch (bfin_mem_access_type(addr, 2)) {
696 case BFIN_MEM_ACCESS_CORE:
697 case BFIN_MEM_ACCESS_CORE_ONLY:
698 *val = *address;
699 return true;
700 case BFIN_MEM_ACCESS_DMA:
701 dma_memcpy(val, address, 2);
702 return true;
703 case BFIN_MEM_ACCESS_ITEST:
704 isram_memcpy(val, address, 2);
705 return true;
706 default: /* invalid access */
707 return false;
708 }
709}
710
711/*
712 * decode the instruction if we are printing out the trace, as it
713 * makes things easier to follow, without running it through objdump
714 * These are the normal instructions which cause change of flow, which
715 * would be at the source of the trace buffer
716 */
717#if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
718static void decode_instruction(unsigned short *address)
719{
720 unsigned short opcode;
721
722 if (get_instruction(&opcode, address)) {
723 if (opcode == 0x0010)
724 verbose_printk("RTS");
725 else if (opcode == 0x0011)
726 verbose_printk("RTI");
727 else if (opcode == 0x0012)
728 verbose_printk("RTX");
729 else if (opcode == 0x0013)
730 verbose_printk("RTN");
731 else if (opcode == 0x0014)
732 verbose_printk("RTE");
733 else if (opcode == 0x0025)
734 verbose_printk("EMUEXCPT");
735 else if (opcode >= 0x0040 && opcode <= 0x0047)
736 verbose_printk("STI R%i", opcode & 7);
737 else if (opcode >= 0x0050 && opcode <= 0x0057)
738 verbose_printk("JUMP (P%i)", opcode & 7);
739 else if (opcode >= 0x0060 && opcode <= 0x0067)
740 verbose_printk("CALL (P%i)", opcode & 7);
741 else if (opcode >= 0x0070 && opcode <= 0x0077)
742 verbose_printk("CALL (PC+P%i)", opcode & 7);
743 else if (opcode >= 0x0080 && opcode <= 0x0087)
744 verbose_printk("JUMP (PC+P%i)", opcode & 7);
745 else if (opcode >= 0x0090 && opcode <= 0x009F)
746 verbose_printk("RAISE 0x%x", opcode & 0xF);
747 else if (opcode >= 0x00A0 && opcode <= 0x00AF)
748 verbose_printk("EXCPT 0x%x", opcode & 0xF);
749 else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF))
750 verbose_printk("IF !CC JUMP");
751 else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff))
752 verbose_printk("IF CC JUMP");
753 else if (opcode >= 0x2000 && opcode <= 0x2fff)
754 verbose_printk("JUMP.S");
755 else if (opcode >= 0xe080 && opcode <= 0xe0ff)
756 verbose_printk("LSETUP");
757 else if (opcode >= 0xe200 && opcode <= 0xe2ff)
758 verbose_printk("JUMP.L");
759 else if (opcode >= 0xe300 && opcode <= 0xe3ff)
760 verbose_printk("CALL pcrel");
761 else
762 verbose_printk("0x%04x", opcode);
763 }
764
765}
766#endif
767
768void dump_bfin_trace_buffer(void)
769{
770#ifdef CONFIG_DEBUG_VERBOSE
771#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
772 int tflags, i = 0;
773 char buf[150];
774 unsigned short *addr;
775#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
776 int j, index;
777#endif
778
779 trace_buffer_save(tflags);
780
781 printk(KERN_NOTICE "Hardware Trace:\n");
782
783#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
784 printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n");
785#endif
786
787 if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
788 for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
789 decode_address(buf, (unsigned long)bfin_read_TBUF());
790 printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
791 addr = (unsigned short *)bfin_read_TBUF();
792 decode_address(buf, (unsigned long)addr);
793 printk(KERN_NOTICE " Source : %s ", buf);
794 decode_instruction(addr);
795 printk("\n");
796 }
797 }
798
799#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
800 if (trace_buff_offset)
801 index = trace_buff_offset / 4;
802 else
803 index = EXPAND_LEN;
804
805 j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
806 while (j) {
807 decode_address(buf, software_trace_buff[index]);
808 printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
809 index -= 1;
810 if (index < 0 )
811 index = EXPAND_LEN;
812 decode_address(buf, software_trace_buff[index]);
813 printk(KERN_NOTICE " Source : %s ", buf);
814 decode_instruction((unsigned short *)software_trace_buff[index]);
815 printk("\n");
816 index -= 1;
817 if (index < 0)
818 index = EXPAND_LEN;
819 j--;
820 i++;
821 }
822#endif
823
824 trace_buffer_restore(tflags);
825#endif
826#endif
827}
828EXPORT_SYMBOL(dump_bfin_trace_buffer);
829
830#ifdef CONFIG_BUG
831int is_valid_bugaddr(unsigned long addr)
832{
833 unsigned short opcode;
834
835 if (!get_instruction(&opcode, (unsigned short *)addr))
836 return 0;
837
838 return opcode == BFIN_BUG_OPCODE;
839}
840#endif
841
842/*
843 * Checks to see if the address pointed to is either a
844 * 16-bit CALL instruction, or a 32-bit CALL instruction
845 */
846static bool is_bfin_call(unsigned short *addr)
847{
848 unsigned short opcode = 0, *ins_addr;
849 ins_addr = (unsigned short *)addr;
850
851 if (!get_instruction(&opcode, ins_addr))
852 return false;
853
854 if ((opcode >= 0x0060 && opcode <= 0x0067) ||
855 (opcode >= 0x0070 && opcode <= 0x0077))
856 return true;
857
858 ins_addr--;
859 if (!get_instruction(&opcode, ins_addr))
860 return false;
861
862 if (opcode >= 0xE300 && opcode <= 0xE3FF)
863 return true;
864
865 return false;
866
867}
868
869void show_stack(struct task_struct *task, unsigned long *stack)
870{
871#ifdef CONFIG_PRINTK
872 unsigned int *addr, *endstack, *fp = 0, *frame;
873 unsigned short *ins_addr;
874 char buf[150];
875 unsigned int i, j, ret_addr, frame_no = 0;
876
877 /*
878 * If we have been passed a specific stack, use that one otherwise
879 * if we have been passed a task structure, use that, otherwise
880 * use the stack of where the variable "stack" exists
881 */
882
883 if (stack == NULL) {
884 if (task) {
885 /* We know this is a kernel stack, so this is the start/end */
886 stack = (unsigned long *)task->thread.ksp;
887 endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE);
888 } else {
889 /* print out the existing stack info */
890 stack = (unsigned long *)&stack;
891 endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
892 }
893 } else
894 endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
895
896 printk(KERN_NOTICE "Stack info:\n");
897 decode_address(buf, (unsigned int)stack);
898 printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
899
900 if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
901 printk(KERN_NOTICE "Invalid stack pointer\n");
902 return;
903 }
904
905 /* First thing is to look for a frame pointer */
906 for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
907 if (*addr & 0x1)
908 continue;
909 ins_addr = (unsigned short *)*addr;
910 ins_addr--;
911 if (is_bfin_call(ins_addr))
912 fp = addr - 1;
913
914 if (fp) {
915 /* Let's check to see if it is a frame pointer */
916 while (fp >= (addr - 1) && fp < endstack
917 && fp && ((unsigned int) fp & 0x3) == 0)
918 fp = (unsigned int *)*fp;
919 if (fp == 0 || fp == endstack) {
920 fp = addr - 1;
921 break;
922 }
923 fp = 0;
924 }
925 }
926 if (fp) {
927 frame = fp;
928 printk(KERN_NOTICE " FP: (0x%p)\n", fp);
929 } else
930 frame = 0;
931
932 /*
933 * Now that we think we know where things are, we
934 * walk the stack again, this time printing things out
935 * incase there is no frame pointer, we still look for
936 * valid return addresses
937 */
938
939 /* First time print out data, next time, print out symbols */
940 for (j = 0; j <= 1; j++) {
941 if (j)
942 printk(KERN_NOTICE "Return addresses in stack:\n");
943 else
944 printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack);
945
946 fp = frame;
947 frame_no = 0;
948
949 for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0;
950 addr < endstack; addr++, i++) {
951
952 ret_addr = 0;
953 if (!j && i % 8 == 0)
954 printk(KERN_NOTICE "%p:",addr);
955
956 /* if it is an odd address, or zero, just skip it */
957 if (*addr & 0x1 || !*addr)
958 goto print;
959
960 ins_addr = (unsigned short *)*addr;
961
962 /* Go back one instruction, and see if it is a CALL */
963 ins_addr--;
964 ret_addr = is_bfin_call(ins_addr);
965 print:
966 if (!j && stack == (unsigned long *)addr)
967 printk("[%08x]", *addr);
968 else if (ret_addr)
969 if (j) {
970 decode_address(buf, (unsigned int)*addr);
971 if (frame == addr) {
972 printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf);
973 continue;
974 }
975 printk(KERN_NOTICE " address : %s\n", buf);
976 } else
977 printk("<%08x>", *addr);
978 else if (fp == addr) {
979 if (j)
980 frame = addr+1;
981 else
982 printk("(%08x)", *addr);
983
984 fp = (unsigned int *)*addr;
985 frame_no++;
986
987 } else if (!j)
988 printk(" %08x ", *addr);
989 }
990 if (!j)
991 printk("\n");
992 }
993#endif
994}
995EXPORT_SYMBOL(show_stack);
996
997void dump_stack(void)
998{ 484{
999 unsigned long stack;
1000#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON 485#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
1001 int tflags; 486 int j;
487 trace_buffer_save(j);
1002#endif 488#endif
1003 trace_buffer_save(tflags);
1004 dump_bfin_trace_buffer();
1005 show_stack(current, &stack);
1006 trace_buffer_restore(tflags);
1007}
1008EXPORT_SYMBOL(dump_stack);
1009 489
1010void dump_bfin_process(struct pt_regs *fp) 490 console_verbose();
1011{ 491 oops_in_progress = 1;
1012#ifdef CONFIG_DEBUG_VERBOSE 492#ifdef CONFIG_DEBUG_VERBOSE
1013 /* We should be able to look at fp->ipend, but we don't push it on the 493 printk(KERN_EMERG "Double Fault\n");
1014 * stack all the time, so do this until we fix that */ 494#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
1015 unsigned int context = bfin_read_IPEND(); 495 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
1016 496 unsigned int cpu = raw_smp_processor_id();
1017 if (oops_in_progress) 497 char buf[150];
1018 verbose_printk(KERN_EMERG "Kernel OOPS in progress\n"); 498 decode_address(buf, cpu_pda[cpu].retx_doublefault);
1019 499 printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
1020 if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) 500 (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
1021 verbose_printk(KERN_NOTICE "HW Error context\n"); 501 decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
1022 else if (context & 0x0020) 502 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
1023 verbose_printk(KERN_NOTICE "Deferred Exception context\n"); 503 decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
1024 else if (context & 0x3FC0) 504 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
1025 verbose_printk(KERN_NOTICE "Interrupt context\n");
1026 else if (context & 0x4000)
1027 verbose_printk(KERN_NOTICE "Deferred Interrupt context\n");
1028 else if (context & 0x8000)
1029 verbose_printk(KERN_NOTICE "Kernel process context\n");
1030
1031 /* Because we are crashing, and pointers could be bad, we check things
1032 * pretty closely before we use them
1033 */
1034 if ((unsigned long)current >= FIXED_CODE_START &&
1035 !((unsigned long)current & 0x3) && current->pid) {
1036 verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n");
1037 if (current->comm >= (char *)FIXED_CODE_START)
1038 verbose_printk(KERN_NOTICE "COMM=%s PID=%d",
1039 current->comm, current->pid);
1040 else
1041 verbose_printk(KERN_NOTICE "COMM= invalid");
1042 505
1043 printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu); 506 decode_address(buf, fp->retx);
1044 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) 507 printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
1045 verbose_printk(KERN_NOTICE
1046 "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
1047 " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
1048 (void *)current->mm->start_code,
1049 (void *)current->mm->end_code,
1050 (void *)current->mm->start_data,
1051 (void *)current->mm->end_data,
1052 (void *)current->mm->end_data,
1053 (void *)current->mm->brk,
1054 (void *)current->mm->start_stack);
1055 else
1056 verbose_printk(KERN_NOTICE "invalid mm\n");
1057 } else 508 } else
1058 verbose_printk(KERN_NOTICE
1059 "No Valid process in current context\n");
1060#endif
1061}
1062
1063void dump_bfin_mem(struct pt_regs *fp)
1064{
1065#ifdef CONFIG_DEBUG_VERBOSE
1066 unsigned short *addr, *erraddr, val = 0, err = 0;
1067 char sti = 0, buf[6];
1068
1069 erraddr = (void *)fp->pc;
1070
1071 verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr);
1072
1073 for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
1074 addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
1075 addr++) {
1076 if (!((unsigned long)addr & 0xF))
1077 verbose_printk(KERN_NOTICE "0x%p: ", addr);
1078
1079 if (!get_instruction(&val, addr)) {
1080 val = 0;
1081 sprintf(buf, "????");
1082 } else
1083 sprintf(buf, "%04x", val);
1084
1085 if (addr == erraddr) {
1086 verbose_printk("[%s]", buf);
1087 err = val;
1088 } else
1089 verbose_printk(" %s ", buf);
1090
1091 /* Do any previous instructions turn on interrupts? */
1092 if (addr <= erraddr && /* in the past */
1093 ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
1094 val == 0x017b)) /* [SP++] = RETI */
1095 sti = 1;
1096 }
1097
1098 verbose_printk("\n");
1099
1100 /* Hardware error interrupts can be deferred */
1101 if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
1102 oops_in_progress)){
1103 verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n");
1104#ifndef CONFIG_DEBUG_HWERR
1105 verbose_printk(KERN_NOTICE
1106"The remaining message may be meaningless\n"
1107"You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
1108#else
1109 /* If we are handling only one peripheral interrupt
1110 * and current mm and pid are valid, and the last error
1111 * was in that user space process's text area
1112 * print it out - because that is where the problem exists
1113 */
1114 if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
1115 (current->pid && current->mm)) {
1116 /* And the last RETI points to the current userspace context */
1117 if ((fp + 1)->pc >= current->mm->start_code &&
1118 (fp + 1)->pc <= current->mm->end_code) {
1119 verbose_printk(KERN_NOTICE "It might be better to look around here :\n");
1120 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
1121 show_regs(fp + 1);
1122 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
1123 }
1124 }
1125#endif
1126 }
1127#endif
1128}
1129
1130void show_regs(struct pt_regs *fp)
1131{
1132#ifdef CONFIG_DEBUG_VERBOSE
1133 char buf [150];
1134 struct irqaction *action;
1135 unsigned int i;
1136 unsigned long flags = 0;
1137 unsigned int cpu = raw_smp_processor_id();
1138 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
1139
1140 verbose_printk(KERN_NOTICE "\n");
1141 if (CPUID != bfin_cpuid())
1142 verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), "
1143 "but running on:0x%04x (Rev %d)\n",
1144 CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
1145
1146 verbose_printk(KERN_NOTICE "ADSP-%s-0.%d",
1147 CPU, bfin_compiled_revid());
1148
1149 if (bfin_compiled_revid() != bfin_revid())
1150 verbose_printk("(Detected 0.%d)", bfin_revid());
1151
1152 verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
1153 get_cclk()/1000000, get_sclk()/1000000,
1154#ifdef CONFIG_MPU
1155 "mpu on"
1156#else
1157 "mpu off"
1158#endif 509#endif
1159 ); 510 {
1160 511 dump_bfin_process(fp);
1161 verbose_printk(KERN_NOTICE "%s", linux_banner); 512 dump_bfin_mem(fp);
1162 513 show_regs(fp);
1163 verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted()); 514 dump_bfin_trace_buffer();
1164 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
1165 (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
1166 if (fp->ipend & EVT_IRPTEN)
1167 verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n");
1168 if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
1169 EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
1170 verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n");
1171 if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
1172 verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n");
1173 if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
1174 verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n",
1175 (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
1176#ifdef EBIU_ERRMST
1177 /* If the error was from the EBIU, print it out */
1178 if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
1179 verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n",
1180 bfin_read_EBIU_ERRMST());
1181 verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n",
1182 bfin_read_EBIU_ERRADD());
1183 }
1184#endif
1185 }
1186 verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n",
1187 fp->seqstat & SEQSTAT_EXCAUSE);
1188 for (i = 2; i <= 15 ; i++) {
1189 if (fp->ipend & (1 << i)) {
1190 if (i != 4) {
1191 decode_address(buf, bfin_read32(EVT0 + 4*i));
1192 verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf);
1193 } else
1194 verbose_printk(KERN_NOTICE " interrupts disabled\n");
1195 }
1196 }
1197
1198 /* if no interrupts are going off, don't print this out */
1199 if (fp->ipend & ~0x3F) {
1200 for (i = 0; i < (NR_IRQS - 1); i++) {
1201 if (!in_atomic)
1202 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
1203
1204 action = irq_desc[i].action;
1205 if (!action)
1206 goto unlock;
1207
1208 decode_address(buf, (unsigned int)action->handler);
1209 verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf);
1210 for (action = action->next; action; action = action->next) {
1211 decode_address(buf, (unsigned int)action->handler);
1212 verbose_printk(", %s", buf);
1213 }
1214 verbose_printk("\n");
1215unlock:
1216 if (!in_atomic)
1217 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
1218 }
1219 }
1220
1221 decode_address(buf, fp->rete);
1222 verbose_printk(KERN_NOTICE " RETE: %s\n", buf);
1223 decode_address(buf, fp->retn);
1224 verbose_printk(KERN_NOTICE " RETN: %s\n", buf);
1225 decode_address(buf, fp->retx);
1226 verbose_printk(KERN_NOTICE " RETX: %s\n", buf);
1227 decode_address(buf, fp->rets);
1228 verbose_printk(KERN_NOTICE " RETS: %s\n", buf);
1229 decode_address(buf, fp->pc);
1230 verbose_printk(KERN_NOTICE " PC : %s\n", buf);
1231
1232 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
1233 (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
1234 decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
1235 verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
1236 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
1237 verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
1238 } 515 }
1239
1240 verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n");
1241 verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
1242 fp->r0, fp->r1, fp->r2, fp->r3);
1243 verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
1244 fp->r4, fp->r5, fp->r6, fp->r7);
1245 verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
1246 fp->p0, fp->p1, fp->p2, fp->p3);
1247 verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
1248 fp->p4, fp->p5, fp->fp, (long)fp);
1249 verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n",
1250 fp->lb0, fp->lt0, fp->lc0);
1251 verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n",
1252 fp->lb1, fp->lt1, fp->lc1);
1253 verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
1254 fp->b0, fp->l0, fp->m0, fp->i0);
1255 verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
1256 fp->b1, fp->l1, fp->m1, fp->i1);
1257 verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
1258 fp->b2, fp->l2, fp->m2, fp->i2);
1259 verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
1260 fp->b3, fp->l3, fp->m3, fp->i3);
1261 verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
1262 fp->a0w, fp->a0x, fp->a1w, fp->a1x);
1263
1264 verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n",
1265 rdusp(), fp->astat);
1266
1267 verbose_printk(KERN_NOTICE "\n");
1268#endif
1269}
1270
1271#ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
1272asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text));
1273#endif 516#endif
517 panic("Double Fault - unrecoverable event");
1274 518
1275static DEFINE_SPINLOCK(bfin_spinlock_lock);
1276
1277asmlinkage int sys_bfin_spinlock(int *p)
1278{
1279 int ret, tmp = 0;
1280
1281 spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
1282 ret = get_user(tmp, p);
1283 if (likely(ret == 0)) {
1284 if (unlikely(tmp))
1285 ret = 1;
1286 else
1287 put_user(1, p);
1288 }
1289 spin_unlock(&bfin_spinlock_lock);
1290 return ret;
1291} 519}
1292 520
1293int bfin_request_exception(unsigned int exception, void (*handler)(void))
1294{
1295 void (*curr_handler)(void);
1296
1297 if (exception > 0x3F)
1298 return -EINVAL;
1299
1300 curr_handler = ex_table[exception];
1301
1302 if (curr_handler != ex_replaceable)
1303 return -EBUSY;
1304
1305 ex_table[exception] = handler;
1306
1307 return 0;
1308}
1309EXPORT_SYMBOL(bfin_request_exception);
1310
1311int bfin_free_exception(unsigned int exception, void (*handler)(void))
1312{
1313 void (*curr_handler)(void);
1314
1315 if (exception > 0x3F)
1316 return -EINVAL;
1317
1318 curr_handler = ex_table[exception];
1319
1320 if (curr_handler != handler)
1321 return -EBUSY;
1322
1323 ex_table[exception] = ex_replaceable;
1324
1325 return 0;
1326}
1327EXPORT_SYMBOL(bfin_free_exception);
1328 521
1329void panic_cplb_error(int cplb_panic, struct pt_regs *fp) 522void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
1330{ 523{
@@ -1349,3 +542,15 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
1349 dump_stack(); 542 dump_stack();
1350 panic("Unrecoverable event"); 543 panic("Unrecoverable event");
1351} 544}
545
546#ifdef CONFIG_BUG
547int is_valid_bugaddr(unsigned long addr)
548{
549 unsigned short opcode;
550
551 if (!get_instruction(&opcode, (unsigned short *)addr))
552 return 0;
553
554 return opcode == BFIN_BUG_OPCODE;
555}
556#endif