diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-24 12:33:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-24 12:33:34 -0400 |
commit | 346ad4b7fe392571f19314f153db9151dbc1d82b (patch) | |
tree | 2d4085338c9044bca2f6472893da60387db3c96f /arch/blackfin/mach-common/entry.S | |
parent | 845199f194306dbd69ca42d3b40a5125cdb50b89 (diff) | |
parent | 2dc63a84b2db23b9680646aff93917211613bf1a (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/blackfin-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/blackfin-2.6: (85 commits)
Blackfin char driver for Blackfin on-chip OTP memory (v3)
Blackfin Serial Driver: fix bug - use mod_timer to replace only add_timer.
Blackfin Serial Driver: the uart break anomaly has been given its own number, so switch to it
Blackfin Serial Driver: use BFIN_UART_NR_PORTS to help SIR driver in uart port.
Blackfin Serial Driver: Fix bug - kernel hangs when accessing uart 0 on bf537 when booting u-boot and linux on uart 1
Blackfin Serial Driver: punt unused lsr variable
Blackfin Serial Driver: Enable IR function when user application (irattach /dev/ttyBFx -s) call TIOCSETD ioctl with line discipline N_IRDA
[Blackfin] arch: add include/boot .gitignore files
[Blackfin] arch: Functional power management support: Add support for cpu frequency scaling
[Blackfin] arch: Functional power management support: Remove broken cpu frequency scaling drivers
[Blackfin] arch: Equalize include files: Add PLL_DIV Masks
[Blackfin] arch: Add a warning about the value of CLKIN.
[Blackfin] arch: take DDR DEVWD into consideration as well for BF548
[Blackfin] arch: Remove the circular buffering mechanism for exceptions
[Blackfin] arch: lose unnecessary dependency on CONFIG_BFIN_ICACHE for MPU
[Blackfin] arch: fix bug - before assign new channel to the map register, need clear the bits first.
[Blackfin] arch: add Blackfin on-chip SIR IrDA driver support
[Blackfin] arch: BF54x memsizes are in mbits, not mbytes
[Blackfin] arch: try to remove condition that causes double fault, by checking current before it gets dereferenced
[Blackfin] arch: Update anomaly list.
...
Diffstat (limited to 'arch/blackfin/mach-common/entry.S')
-rw-r--r-- | arch/blackfin/mach-common/entry.S | 128 |
1 files changed, 39 insertions, 89 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index cee54cebbc65..f2fb87e9a46e 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/unistd.h> | 38 | #include <linux/unistd.h> |
39 | #include <asm/blackfin.h> | 39 | #include <asm/blackfin.h> |
40 | #include <asm/errno.h> | 40 | #include <asm/errno.h> |
41 | #include <asm/fixed_code.h> | ||
41 | #include <asm/thread_info.h> /* TIF_NEED_RESCHED */ | 42 | #include <asm/thread_info.h> /* TIF_NEED_RESCHED */ |
42 | #include <asm/asm-offsets.h> | 43 | #include <asm/asm-offsets.h> |
43 | #include <asm/trace.h> | 44 | #include <asm/trace.h> |
@@ -52,15 +53,6 @@ | |||
52 | # define EX_SCRATCH_REG CYCLES | 53 | # define EX_SCRATCH_REG CYCLES |
53 | #endif | 54 | #endif |
54 | 55 | ||
55 | #if ANOMALY_05000281 | ||
56 | ENTRY(_safe_speculative_execution) | ||
57 | NOP; | ||
58 | NOP; | ||
59 | NOP; | ||
60 | jump _safe_speculative_execution; | ||
61 | ENDPROC(_safe_speculative_execution) | ||
62 | #endif | ||
63 | |||
64 | #ifdef CONFIG_EXCPT_IRQ_SYSC_L1 | 56 | #ifdef CONFIG_EXCPT_IRQ_SYSC_L1 |
65 | .section .l1.text | 57 | .section .l1.text |
66 | #else | 58 | #else |
@@ -121,10 +113,14 @@ ENTRY(_ex_icplb_miss) | |||
121 | (R7:6,P5:4) = [sp++]; | 113 | (R7:6,P5:4) = [sp++]; |
122 | ASTAT = [sp++]; | 114 | ASTAT = [sp++]; |
123 | SAVE_ALL_SYS | 115 | SAVE_ALL_SYS |
124 | DEBUG_HWTRACE_SAVE(p5, r7) | ||
125 | #ifdef CONFIG_MPU | 116 | #ifdef CONFIG_MPU |
117 | /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that | ||
118 | * will change the stack pointer. */ | ||
126 | R0 = SEQSTAT; | 119 | R0 = SEQSTAT; |
127 | R1 = SP; | 120 | R1 = SP; |
121 | #endif | ||
122 | DEBUG_HWTRACE_SAVE(p5, r7) | ||
123 | #ifdef CONFIG_MPU | ||
128 | sp += -12; | 124 | sp += -12; |
129 | call _cplb_hdr; | 125 | call _cplb_hdr; |
130 | sp += 12; | 126 | sp += 12; |
@@ -191,6 +187,7 @@ ENTRY(_bfin_return_from_exception) | |||
191 | ENDPROC(_bfin_return_from_exception) | 187 | ENDPROC(_bfin_return_from_exception) |
192 | 188 | ||
193 | ENTRY(_handle_bad_cplb) | 189 | ENTRY(_handle_bad_cplb) |
190 | DEBUG_HWTRACE_RESTORE(p5, r7) | ||
194 | /* To get here, we just tried and failed to change a CPLB | 191 | /* To get here, we just tried and failed to change a CPLB |
195 | * so, handle things in trap_c (C code), by lowering to | 192 | * so, handle things in trap_c (C code), by lowering to |
196 | * IRQ5, just like we normally do. Since this is not a | 193 | * IRQ5, just like we normally do. Since this is not a |
@@ -225,6 +222,26 @@ ENTRY(_ex_trap_c) | |||
225 | [p4] = p5; | 222 | [p4] = p5; |
226 | csync; | 223 | csync; |
227 | 224 | ||
225 | p4.l = lo(DCPLB_FAULT_ADDR); | ||
226 | p4.h = hi(DCPLB_FAULT_ADDR); | ||
227 | r7 = [p4]; | ||
228 | p5.h = _saved_dcplb_fault_addr; | ||
229 | p5.l = _saved_dcplb_fault_addr; | ||
230 | [p5] = r7; | ||
231 | |||
232 | r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)]; | ||
233 | p5.h = _saved_icplb_fault_addr; | ||
234 | p5.l = _saved_icplb_fault_addr; | ||
235 | [p5] = r7; | ||
236 | |||
237 | p4.l = __retx; | ||
238 | p4.h = __retx; | ||
239 | r6 = retx; | ||
240 | [p4] = r6; | ||
241 | p4.l = lo(SAFE_USER_INSTRUCTION); | ||
242 | p4.h = hi(SAFE_USER_INSTRUCTION); | ||
243 | retx = p4; | ||
244 | |||
228 | /* Disable all interrupts, but make sure level 5 is enabled so | 245 | /* Disable all interrupts, but make sure level 5 is enabled so |
229 | * we can switch to that level. Save the old mask. */ | 246 | * we can switch to that level. Save the old mask. */ |
230 | cli r6; | 247 | cli r6; |
@@ -234,23 +251,6 @@ ENTRY(_ex_trap_c) | |||
234 | r6 = 0x3f; | 251 | r6 = 0x3f; |
235 | sti r6; | 252 | sti r6; |
236 | 253 | ||
237 | /* Save the excause into a circular buffer, in case the instruction | ||
238 | * which caused this excecptions causes others. | ||
239 | */ | ||
240 | P5.l = _in_ptr_excause; | ||
241 | P5.h = _in_ptr_excause; | ||
242 | R7 = [P5]; | ||
243 | R7 += 4; | ||
244 | R6 = 0xF; | ||
245 | R7 = R7 & R6; | ||
246 | [P5] = R7; | ||
247 | R6.l = _excause_circ_buf; | ||
248 | R6.h = _excause_circ_buf; | ||
249 | R7 = R7 + R6; | ||
250 | p5 = R7; | ||
251 | R6 = SEQSTAT; | ||
252 | [P5] = R6; | ||
253 | |||
254 | (R7:6,P5:4) = [sp++]; | 254 | (R7:6,P5:4) = [sp++]; |
255 | ASTAT = [sp++]; | 255 | ASTAT = [sp++]; |
256 | SP = EX_SCRATCH_REG; | 256 | SP = EX_SCRATCH_REG; |
@@ -307,6 +307,11 @@ ENDPROC(_double_fault) | |||
307 | ENTRY(_exception_to_level5) | 307 | ENTRY(_exception_to_level5) |
308 | SAVE_ALL_SYS | 308 | SAVE_ALL_SYS |
309 | 309 | ||
310 | p4.l = __retx; | ||
311 | p4.h = __retx; | ||
312 | r6 = [p4]; | ||
313 | [sp + PT_PC] = r6; | ||
314 | |||
310 | /* Restore interrupt mask. We haven't pushed RETI, so this | 315 | /* Restore interrupt mask. We haven't pushed RETI, so this |
311 | * doesn't enable interrupts until we return from this handler. */ | 316 | * doesn't enable interrupts until we return from this handler. */ |
312 | p4.l = _excpt_saved_imask; | 317 | p4.l = _excpt_saved_imask; |
@@ -328,42 +333,11 @@ ENTRY(_exception_to_level5) | |||
328 | r0 = [p2]; /* Read current IPEND */ | 333 | r0 = [p2]; /* Read current IPEND */ |
329 | [sp + PT_IPEND] = r0; /* Store IPEND */ | 334 | [sp + PT_IPEND] = r0; /* Store IPEND */ |
330 | 335 | ||
331 | /* Pop the excause from the circular buffer and push it on the stack | ||
332 | * (in the right place - if you change the location of SEQSTAT, you | ||
333 | * must change this offset. | ||
334 | */ | ||
335 | .L_excep_to_5_again: | ||
336 | P5.l = _out_ptr_excause; | ||
337 | P5.h = _out_ptr_excause; | ||
338 | R7 = [P5]; | ||
339 | R7 += 4; | ||
340 | R6 = 0xF; | ||
341 | R7 = R7 & R6; | ||
342 | [P5] = R7; | ||
343 | R6.l = _excause_circ_buf; | ||
344 | R6.h = _excause_circ_buf; | ||
345 | R7 = R7 + R6; | ||
346 | P5 = R7; | ||
347 | R1 = [P5]; | ||
348 | [SP + PT_SEQSTAT] = r1; | ||
349 | |||
350 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ | 336 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ |
351 | SP += -12; | 337 | SP += -12; |
352 | call _trap_c; | 338 | call _trap_c; |
353 | SP += 12; | 339 | SP += 12; |
354 | 340 | ||
355 | /* See if anything else is in the exception buffer | ||
356 | * if there is, process it | ||
357 | */ | ||
358 | P5.l = _out_ptr_excause; | ||
359 | P5.h = _out_ptr_excause; | ||
360 | P4.l = _in_ptr_excause; | ||
361 | P4.h = _in_ptr_excause; | ||
362 | R6 = [P5]; | ||
363 | R7 = [P4]; | ||
364 | CC = R6 == R7; | ||
365 | if ! CC JUMP .L_excep_to_5_again | ||
366 | |||
367 | call _ret_from_exception; | 341 | call _ret_from_exception; |
368 | RESTORE_ALL_SYS | 342 | RESTORE_ALL_SYS |
369 | rti; | 343 | rti; |
@@ -727,8 +701,8 @@ ENTRY(_return_from_int) | |||
727 | [p0] = p1; | 701 | [p0] = p1; |
728 | csync; | 702 | csync; |
729 | #if ANOMALY_05000281 | 703 | #if ANOMALY_05000281 |
730 | r0.l = _safe_speculative_execution; | 704 | r0.l = lo(SAFE_USER_INSTRUCTION); |
731 | r0.h = _safe_speculative_execution; | 705 | r0.h = hi(SAFE_USER_INSTRUCTION); |
732 | reti = r0; | 706 | reti = r0; |
733 | #endif | 707 | #endif |
734 | r0 = 0x801f (z); | 708 | r0 = 0x801f (z); |
@@ -741,8 +715,8 @@ ENDPROC(_return_from_int) | |||
741 | 715 | ||
742 | ENTRY(_lower_to_irq14) | 716 | ENTRY(_lower_to_irq14) |
743 | #if ANOMALY_05000281 | 717 | #if ANOMALY_05000281 |
744 | r0.l = _safe_speculative_execution; | 718 | r0.l = lo(SAFE_USER_INSTRUCTION); |
745 | r0.h = _safe_speculative_execution; | 719 | r0.h = hi(SAFE_USER_INSTRUCTION); |
746 | reti = r0; | 720 | reti = r0; |
747 | #endif | 721 | #endif |
748 | r0 = 0x401f; | 722 | r0 = 0x401f; |
@@ -809,20 +783,6 @@ _schedule_and_signal: | |||
809 | rti; | 783 | rti; |
810 | ENDPROC(_lower_to_irq14) | 784 | ENDPROC(_lower_to_irq14) |
811 | 785 | ||
812 | /* Make sure when we start, that the circular buffer is initialized properly | ||
813 | * R0 and P0 are call clobbered, so we can use them here. | ||
814 | */ | ||
815 | ENTRY(_init_exception_buff) | ||
816 | r0 = 0; | ||
817 | p0.h = _in_ptr_excause; | ||
818 | p0.l = _in_ptr_excause; | ||
819 | [p0] = r0; | ||
820 | p0.h = _out_ptr_excause; | ||
821 | p0.l = _out_ptr_excause; | ||
822 | [p0] = r0; | ||
823 | rts; | ||
824 | ENDPROC(_init_exception_buff) | ||
825 | |||
826 | /* We handle this 100% in exception space - to reduce overhead | 786 | /* We handle this 100% in exception space - to reduce overhead |
827 | * Only potiential problem is if the software buffer gets swapped out of the | 787 | * Only potiential problem is if the software buffer gets swapped out of the |
828 | * CPLB table - then double fault. - so we don't let this happen in other places | 788 | * CPLB table - then double fault. - so we don't let this happen in other places |
@@ -1398,17 +1358,7 @@ _exception_stack_top: | |||
1398 | _last_cplb_fault_retx: | 1358 | _last_cplb_fault_retx: |
1399 | .long 0; | 1359 | .long 0; |
1400 | #endif | 1360 | #endif |
1401 | /* | 1361 | /* Used to save the real RETX when temporarily storing a safe |
1402 | * Single instructions can have multiple faults, which need to be | 1362 | * return address. */ |
1403 | * handled by traps.c, in irq5. We store the exception cause to ensure | 1363 | __retx: |
1404 | * we don't miss a double fault condition | ||
1405 | */ | ||
1406 | ENTRY(_in_ptr_excause) | ||
1407 | .long 0; | 1364 | .long 0; |
1408 | ENTRY(_out_ptr_excause) | ||
1409 | .long 0; | ||
1410 | ALIGN | ||
1411 | ENTRY(_excause_circ_buf) | ||
1412 | .rept 4 | ||
1413 | .long 0 | ||
1414 | .endr | ||