aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common/entry.S')
-rw-r--r--arch/blackfin/mach-common/entry.S90
1 files changed, 51 insertions, 39 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 21e65a339a22..31fa313e81cf 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -36,13 +36,13 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <linux/unistd.h> 38#include <linux/unistd.h>
39#include <linux/threads.h>
40#include <asm/blackfin.h> 39#include <asm/blackfin.h>
41#include <asm/errno.h> 40#include <asm/errno.h>
42#include <asm/fixed_code.h> 41#include <asm/fixed_code.h>
43#include <asm/thread_info.h> /* TIF_NEED_RESCHED */ 42#include <asm/thread_info.h> /* TIF_NEED_RESCHED */
44#include <asm/asm-offsets.h> 43#include <asm/asm-offsets.h>
45#include <asm/trace.h> 44#include <asm/trace.h>
45#include <asm/traps.h>
46 46
47#include <asm/context.S> 47#include <asm/context.S>
48 48
@@ -85,13 +85,15 @@ ENTRY(_ex_workaround_261)
85 if !cc jump _bfin_return_from_exception; 85 if !cc jump _bfin_return_from_exception;
86 /* fall through */ 86 /* fall through */
87 R7 = P4; 87 R7 = P4;
88 R6 = 0x26; /* Data CPLB Miss */ 88 R6 = VEC_CPLB_M; /* Data CPLB Miss */
89 cc = R6 == R7; 89 cc = R6 == R7;
90 if cc jump _ex_dcplb_miss (BP); 90 if cc jump _ex_dcplb_miss (BP);
91 R6 = 0x23; /* Data CPLB Miss */ 91#ifdef CONFIG_MPU
92 R6 = VEC_CPLB_VL; /* Data CPLB Violation */
92 cc = R6 == R7; 93 cc = R6 == R7;
93 if cc jump _ex_dcplb_viol (BP); 94 if cc jump _ex_dcplb_viol (BP);
94 /* Handle 0x23 Data CPLB Protection Violation 95#endif
96 /* Handle Data CPLB Protection Violation
95 * and Data CPLB Multiple Hits - Linux Trap Zero 97 * and Data CPLB Multiple Hits - Linux Trap Zero
96 */ 98 */
97 jump _ex_trap_c; 99 jump _ex_trap_c;
@@ -201,7 +203,18 @@ ENTRY(_ex_single_step)
201 cc = r7 == 0; 203 cc = r7 == 0;
202 if !cc jump 1f; 204 if !cc jump 1f;
203#endif 205#endif
204 206#ifdef CONFIG_EXACT_HWERR
207 /* Read the ILAT, and to check to see if the process we are
208 * single stepping caused a previous hardware error
209 * If so, do not single step, (which lowers to IRQ5, and makes
210 * us miss the error).
211 */
212 p5.l = lo(ILAT);
213 p5.h = hi(ILAT);
214 r7 = [p5];
215 cc = bittst(r7, EVT_IVHW_P);
216 if cc jump 1f;
217#endif
205 /* Single stepping only a single instruction, so clear the trace 218 /* Single stepping only a single instruction, so clear the trace
206 * bit here. */ 219 * bit here. */
207 r7 = syscfg; 220 r7 = syscfg;
@@ -260,16 +273,7 @@ ENTRY(_bfin_return_from_exception)
260 r6.l = lo(SEQSTAT_EXCAUSE); 273 r6.l = lo(SEQSTAT_EXCAUSE);
261 r6.h = hi(SEQSTAT_EXCAUSE); 274 r6.h = hi(SEQSTAT_EXCAUSE);
262 r7 = r7 & r6; 275 r7 = r7 & r6;
263 r6 = 0x25; 276 r6 = VEC_UNCOV;
264 CC = R7 == R6;
265 if CC JUMP _double_fault;
266
267 /* Did we cause a HW error? */
268 p5.l = lo(ILAT);
269 p5.h = hi(ILAT);
270 r6 = [p5];
271 r7 = 0x20; /* Did I just cause anther HW error? */
272 r6 = r7 & r6;
273 CC = R7 == R6; 277 CC = R7 == R6;
274 if CC JUMP _double_fault; 278 if CC JUMP _double_fault;
275#endif 279#endif
@@ -473,6 +477,16 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
473 [--sp] = ASTAT; 477 [--sp] = ASTAT;
474 [--sp] = (R7:6,P5:4); 478 [--sp] = (R7:6,P5:4);
475 479
480#ifdef CONFIG_EXACT_HWERR
481 /* Make sure all pending read/writes complete. This will ensure any
482 * accesses which could cause hardware errors completes, and signal
483 * the the hardware before we do something silly, like crash the
484 * kernel. We don't need to work around anomaly 05000312, since
485 * we are already atomic
486 */
487 ssync;
488#endif
489
476#if ANOMALY_05000283 || ANOMALY_05000315 490#if ANOMALY_05000283 || ANOMALY_05000315
477 cc = r7 == r7; 491 cc = r7 == r7;
478 p5.h = HI(CHIPID); 492 p5.h = HI(CHIPID);
@@ -855,7 +869,7 @@ ENTRY(_ret_from_exception)
855 p1.h = _schedule_and_signal; 869 p1.h = _schedule_and_signal;
856 [p0] = p1; 870 [p0] = p1;
857 csync; 871 csync;
858 raise 15; /* raise evt14 to do signal or reschedule */ 872 raise 15; /* raise evt15 to do signal or reschedule */
8594: 8734:
860 r0 = syscfg; 874 r0 = syscfg;
861 bitclr(r0, 0); 875 bitclr(r0, 0);
@@ -916,7 +930,7 @@ ENTRY(_return_from_int)
916 p1.h = _schedule_and_signal_from_int; 930 p1.h = _schedule_and_signal_from_int;
917 [p0] = p1; 931 [p0] = p1;
918 csync; 932 csync;
919#if ANOMALY_05000281 933#if ANOMALY_05000281 || ANOMALY_05000461
920 r0.l = lo(SAFE_USER_INSTRUCTION); 934 r0.l = lo(SAFE_USER_INSTRUCTION);
921 r0.h = hi(SAFE_USER_INSTRUCTION); 935 r0.h = hi(SAFE_USER_INSTRUCTION);
922 reti = r0; 936 reti = r0;
@@ -930,18 +944,27 @@ ENTRY(_return_from_int)
930ENDPROC(_return_from_int) 944ENDPROC(_return_from_int)
931 945
932ENTRY(_lower_to_irq14) 946ENTRY(_lower_to_irq14)
933#if ANOMALY_05000281 947#if ANOMALY_05000281 || ANOMALY_05000461
934 r0.l = lo(SAFE_USER_INSTRUCTION); 948 r0.l = lo(SAFE_USER_INSTRUCTION);
935 r0.h = hi(SAFE_USER_INSTRUCTION); 949 r0.h = hi(SAFE_USER_INSTRUCTION);
936 reti = r0; 950 reti = r0;
937#endif 951#endif
938 r0 = 0x401f; 952
953#ifdef CONFIG_DEBUG_HWERR
954 /* enable irq14 & hwerr interrupt, until we transition to _evt14_softirq */
955 r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
956#else
957 /* Only enable irq14 interrupt, until we transition to _evt14_softirq */
958 r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
959#endif
939 sti r0; 960 sti r0;
940 raise 14; 961 raise 14;
941 rti; 962 rti;
963ENDPROC(_lower_to_irq14)
964
942ENTRY(_evt14_softirq) 965ENTRY(_evt14_softirq)
943#ifdef CONFIG_DEBUG_HWERR 966#ifdef CONFIG_DEBUG_HWERR
944 r0 = 0x3f; 967 r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
945 sti r0; 968 sti r0;
946#else 969#else
947 cli r0; 970 cli r0;
@@ -949,8 +972,9 @@ ENTRY(_evt14_softirq)
949 [--sp] = RETI; 972 [--sp] = RETI;
950 SP += 4; 973 SP += 4;
951 rts; 974 rts;
975ENDPROC(_evt14_softirq)
952 976
953_schedule_and_signal_from_int: 977ENTRY(_schedule_and_signal_from_int)
954 /* To end up here, vector 15 was changed - so we have to change it 978 /* To end up here, vector 15 was changed - so we have to change it
955 * back. 979 * back.
956 */ 980 */
@@ -983,8 +1007,9 @@ _schedule_and_signal_from_int:
983 call _finish_atomic_sections; 1007 call _finish_atomic_sections;
984 sp += 12; 1008 sp += 12;
985 jump.s .Lresume_userspace; 1009 jump.s .Lresume_userspace;
1010ENDPROC(_schedule_and_signal_from_int)
986 1011
987_schedule_and_signal: 1012ENTRY(_schedule_and_signal)
988 SAVE_CONTEXT_SYSCALL 1013 SAVE_CONTEXT_SYSCALL
989 /* To end up here, vector 15 was changed - so we have to change it 1014 /* To end up here, vector 15 was changed - so we have to change it
990 * back. 1015 * back.
@@ -1002,7 +1027,7 @@ _schedule_and_signal:
10021: 10271:
1003 RESTORE_CONTEXT 1028 RESTORE_CONTEXT
1004 rti; 1029 rti;
1005ENDPROC(_lower_to_irq14) 1030ENDPROC(_schedule_and_signal)
1006 1031
1007/* We handle this 100% in exception space - to reduce overhead 1032/* We handle this 100% in exception space - to reduce overhead
1008 * Only potiential problem is if the software buffer gets swapped out of the 1033 * Only potiential problem is if the software buffer gets swapped out of the
@@ -1581,24 +1606,11 @@ ENTRY(_sys_call_table)
1581 .long _sys_dup3 1606 .long _sys_dup3
1582 .long _sys_pipe2 1607 .long _sys_pipe2
1583 .long _sys_inotify_init1 /* 365 */ 1608 .long _sys_inotify_init1 /* 365 */
1609 .long _sys_preadv
1610 .long _sys_pwritev
1611 .long _sys_rt_tgsigqueueinfo
1584 1612
1585 .rept NR_syscalls-(.-_sys_call_table)/4 1613 .rept NR_syscalls-(.-_sys_call_table)/4
1586 .long _sys_ni_syscall 1614 .long _sys_ni_syscall
1587 .endr 1615 .endr
1588END(_sys_call_table) 1616END(_sys_call_table)
1589
1590#ifdef CONFIG_EXCEPTION_L1_SCRATCH
1591/* .section .l1.bss.scratch */
1592.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH
1593#else
1594#ifdef CONFIG_SYSCALL_TAB_L1
1595.section .l1.bss
1596#else
1597.bss
1598#endif
1599ENTRY(_exception_stack)
1600 .rept 1024 * NR_CPUS
1601 .long 0
1602 .endr
1603_exception_stack_top:
1604#endif