diff options
Diffstat (limited to 'arch/blackfin/mach-common/entry.S')
| -rw-r--r-- | arch/blackfin/mach-common/entry.S | 76 |
1 files changed, 41 insertions, 35 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index a063a434f7e3..da0558ad1b1a 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
| 37 | #include <linux/linkage.h> | 37 | #include <linux/linkage.h> |
| 38 | #include <linux/unistd.h> | 38 | #include <linux/unistd.h> |
| 39 | #include <linux/threads.h> | ||
| 40 | #include <asm/blackfin.h> | 39 | #include <asm/blackfin.h> |
| 41 | #include <asm/errno.h> | 40 | #include <asm/errno.h> |
| 42 | #include <asm/fixed_code.h> | 41 | #include <asm/fixed_code.h> |
| @@ -201,7 +200,18 @@ ENTRY(_ex_single_step) | |||
| 201 | cc = r7 == 0; | 200 | cc = r7 == 0; |
| 202 | if !cc jump 1f; | 201 | if !cc jump 1f; |
| 203 | #endif | 202 | #endif |
| 204 | 203 | #ifdef CONFIG_EXACT_HWERR | |
| 204 | /* Read the ILAT, and to check to see if the process we are | ||
| 205 | * single stepping caused a previous hardware error | ||
| 206 | * If so, do not single step, (which lowers to IRQ5, and makes | ||
| 207 | * us miss the error). | ||
| 208 | */ | ||
| 209 | p5.l = lo(ILAT); | ||
| 210 | p5.h = hi(ILAT); | ||
| 211 | r7 = [p5]; | ||
| 212 | cc = bittst(r7, EVT_IVHW_P); | ||
| 213 | if cc jump 1f; | ||
| 214 | #endif | ||
| 205 | /* Single stepping only a single instruction, so clear the trace | 215 | /* Single stepping only a single instruction, so clear the trace |
| 206 | * bit here. */ | 216 | * bit here. */ |
| 207 | r7 = syscfg; | 217 | r7 = syscfg; |
| @@ -263,15 +273,6 @@ ENTRY(_bfin_return_from_exception) | |||
| 263 | r6 = 0x25; | 273 | r6 = 0x25; |
| 264 | CC = R7 == R6; | 274 | CC = R7 == R6; |
| 265 | if CC JUMP _double_fault; | 275 | if CC JUMP _double_fault; |
| 266 | |||
| 267 | /* Did we cause a HW error? */ | ||
| 268 | p5.l = lo(ILAT); | ||
| 269 | p5.h = hi(ILAT); | ||
| 270 | r6 = [p5]; | ||
| 271 | r7 = 0x20; /* Did I just cause anther HW error? */ | ||
| 272 | r6 = r7 & r6; | ||
| 273 | CC = R7 == R6; | ||
| 274 | if CC JUMP _double_fault; | ||
| 275 | #endif | 276 | #endif |
| 276 | 277 | ||
| 277 | (R7:6,P5:4) = [sp++]; | 278 | (R7:6,P5:4) = [sp++]; |
| @@ -473,6 +474,16 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ | |||
| 473 | [--sp] = ASTAT; | 474 | [--sp] = ASTAT; |
| 474 | [--sp] = (R7:6,P5:4); | 475 | [--sp] = (R7:6,P5:4); |
| 475 | 476 | ||
| 477 | #ifdef CONFIG_EXACT_HWERR | ||
| 478 | /* Make sure all pending read/writes complete. This will ensure any | ||
| 479 | * accesses which could cause hardware errors completes, and signal | ||
| 480 | * the the hardware before we do something silly, like crash the | ||
| 481 | * kernel. We don't need to work around anomaly 05000312, since | ||
| 482 | * we are already atomic | ||
| 483 | */ | ||
| 484 | ssync; | ||
| 485 | #endif | ||
| 486 | |||
| 476 | #if ANOMALY_05000283 || ANOMALY_05000315 | 487 | #if ANOMALY_05000283 || ANOMALY_05000315 |
| 477 | cc = r7 == r7; | 488 | cc = r7 == r7; |
| 478 | p5.h = HI(CHIPID); | 489 | p5.h = HI(CHIPID); |
| @@ -855,7 +866,7 @@ ENTRY(_ret_from_exception) | |||
| 855 | p1.h = _schedule_and_signal; | 866 | p1.h = _schedule_and_signal; |
| 856 | [p0] = p1; | 867 | [p0] = p1; |
| 857 | csync; | 868 | csync; |
| 858 | raise 15; /* raise evt14 to do signal or reschedule */ | 869 | raise 15; /* raise evt15 to do signal or reschedule */ |
| 859 | 4: | 870 | 4: |
| 860 | r0 = syscfg; | 871 | r0 = syscfg; |
| 861 | bitclr(r0, 0); | 872 | bitclr(r0, 0); |
| @@ -916,7 +927,7 @@ ENTRY(_return_from_int) | |||
| 916 | p1.h = _schedule_and_signal_from_int; | 927 | p1.h = _schedule_and_signal_from_int; |
| 917 | [p0] = p1; | 928 | [p0] = p1; |
| 918 | csync; | 929 | csync; |
| 919 | #if ANOMALY_05000281 | 930 | #if ANOMALY_05000281 || ANOMALY_05000461 |
| 920 | r0.l = lo(SAFE_USER_INSTRUCTION); | 931 | r0.l = lo(SAFE_USER_INSTRUCTION); |
| 921 | r0.h = hi(SAFE_USER_INSTRUCTION); | 932 | r0.h = hi(SAFE_USER_INSTRUCTION); |
| 922 | reti = r0; | 933 | reti = r0; |
| @@ -930,18 +941,27 @@ ENTRY(_return_from_int) | |||
| 930 | ENDPROC(_return_from_int) | 941 | ENDPROC(_return_from_int) |
| 931 | 942 | ||
| 932 | ENTRY(_lower_to_irq14) | 943 | ENTRY(_lower_to_irq14) |
| 933 | #if ANOMALY_05000281 | 944 | #if ANOMALY_05000281 || ANOMALY_05000461 |
| 934 | r0.l = lo(SAFE_USER_INSTRUCTION); | 945 | r0.l = lo(SAFE_USER_INSTRUCTION); |
| 935 | r0.h = hi(SAFE_USER_INSTRUCTION); | 946 | r0.h = hi(SAFE_USER_INSTRUCTION); |
| 936 | reti = r0; | 947 | reti = r0; |
| 937 | #endif | 948 | #endif |
| 938 | r0 = 0x401f; | 949 | |
| 950 | #ifdef CONFIG_DEBUG_HWERR | ||
| 951 | /* enable irq14 & hwerr interrupt, until we transition to _evt14_softirq */ | ||
| 952 | r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); | ||
| 953 | #else | ||
| 954 | /* Only enable irq14 interrupt, until we transition to _evt14_softirq */ | ||
| 955 | r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); | ||
| 956 | #endif | ||
| 939 | sti r0; | 957 | sti r0; |
| 940 | raise 14; | 958 | raise 14; |
| 941 | rti; | 959 | rti; |
| 960 | ENDPROC(_lower_to_irq14) | ||
| 961 | |||
| 942 | ENTRY(_evt14_softirq) | 962 | ENTRY(_evt14_softirq) |
| 943 | #ifdef CONFIG_DEBUG_HWERR | 963 | #ifdef CONFIG_DEBUG_HWERR |
| 944 | r0 = 0x3f; | 964 | r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); |
| 945 | sti r0; | 965 | sti r0; |
| 946 | #else | 966 | #else |
| 947 | cli r0; | 967 | cli r0; |
| @@ -949,8 +969,9 @@ ENTRY(_evt14_softirq) | |||
| 949 | [--sp] = RETI; | 969 | [--sp] = RETI; |
| 950 | SP += 4; | 970 | SP += 4; |
| 951 | rts; | 971 | rts; |
| 972 | ENDPROC(_evt14_softirq) | ||
| 952 | 973 | ||
| 953 | _schedule_and_signal_from_int: | 974 | ENTRY(_schedule_and_signal_from_int) |
| 954 | /* To end up here, vector 15 was changed - so we have to change it | 975 | /* To end up here, vector 15 was changed - so we have to change it |
| 955 | * back. | 976 | * back. |
| 956 | */ | 977 | */ |
| @@ -983,8 +1004,9 @@ _schedule_and_signal_from_int: | |||
| 983 | call _finish_atomic_sections; | 1004 | call _finish_atomic_sections; |
| 984 | sp += 12; | 1005 | sp += 12; |
| 985 | jump.s .Lresume_userspace; | 1006 | jump.s .Lresume_userspace; |
| 1007 | ENDPROC(_schedule_and_signal_from_int) | ||
| 986 | 1008 | ||
| 987 | _schedule_and_signal: | 1009 | ENTRY(_schedule_and_signal) |
| 988 | SAVE_CONTEXT_SYSCALL | 1010 | SAVE_CONTEXT_SYSCALL |
| 989 | /* To end up here, vector 15 was changed - so we have to change it | 1011 | /* To end up here, vector 15 was changed - so we have to change it |
| 990 | * back. | 1012 | * back. |
| @@ -1002,7 +1024,7 @@ _schedule_and_signal: | |||
| 1002 | 1: | 1024 | 1: |
| 1003 | RESTORE_CONTEXT | 1025 | RESTORE_CONTEXT |
| 1004 | rti; | 1026 | rti; |
| 1005 | ENDPROC(_lower_to_irq14) | 1027 | ENDPROC(_schedule_and_signal) |
| 1006 | 1028 | ||
| 1007 | /* We handle this 100% in exception space - to reduce overhead | 1029 | /* We handle this 100% in exception space - to reduce overhead |
| 1008 | * Only potiential problem is if the software buffer gets swapped out of the | 1030 | * Only potiential problem is if the software buffer gets swapped out of the |
| @@ -1588,19 +1610,3 @@ ENTRY(_sys_call_table) | |||
| 1588 | .long _sys_ni_syscall | 1610 | .long _sys_ni_syscall |
| 1589 | .endr | 1611 | .endr |
| 1590 | END(_sys_call_table) | 1612 | END(_sys_call_table) |
| 1591 | |||
| 1592 | #ifdef CONFIG_EXCEPTION_L1_SCRATCH | ||
| 1593 | /* .section .l1.bss.scratch */ | ||
| 1594 | .set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH | ||
| 1595 | #else | ||
| 1596 | #ifdef CONFIG_SYSCALL_TAB_L1 | ||
| 1597 | .section .l1.bss | ||
| 1598 | #else | ||
| 1599 | .bss | ||
| 1600 | #endif | ||
| 1601 | ENTRY(_exception_stack) | ||
| 1602 | .rept 1024 * NR_CPUS | ||
| 1603 | .long 0 | ||
| 1604 | .endr | ||
| 1605 | _exception_stack_top: | ||
| 1606 | #endif | ||
