aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common/entry.S')
-rw-r--r--arch/blackfin/mach-common/entry.S92
1 files changed, 42 insertions, 50 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index c6ae8442fc4e..5531f49c84e6 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <linux/unistd.h> 38#include <linux/unistd.h>
39#include <linux/threads.h>
39#include <asm/blackfin.h> 40#include <asm/blackfin.h>
40#include <asm/errno.h> 41#include <asm/errno.h>
41#include <asm/fixed_code.h> 42#include <asm/fixed_code.h>
@@ -75,11 +76,11 @@ ENTRY(_ex_workaround_261)
75 * handle it. 76 * handle it.
76 */ 77 */
77 P4 = R7; /* Store EXCAUSE */ 78 P4 = R7; /* Store EXCAUSE */
78 p5.l = _last_cplb_fault_retx; 79
79 p5.h = _last_cplb_fault_retx; 80 GET_PDA(p5, r7);
80 r7 = [p5]; 81 r7 = [p5 + PDA_LFRETX];
81 r6 = retx; 82 r6 = retx;
82 [p5] = r6; 83 [p5 + PDA_LFRETX] = r6;
83 cc = r6 == r7; 84 cc = r6 == r7;
84 if !cc jump _bfin_return_from_exception; 85 if !cc jump _bfin_return_from_exception;
85 /* fall through */ 86 /* fall through */
@@ -324,7 +325,9 @@ ENTRY(_ex_trap_c)
324 [p4] = p5; 325 [p4] = p5;
325 csync; 326 csync;
326 327
328 GET_PDA(p5, r6);
327#ifndef CONFIG_DEBUG_DOUBLEFAULT 329#ifndef CONFIG_DEBUG_DOUBLEFAULT
330
328 /* 331 /*
329 * Save these registers, as they are only valid in exception context 332 * Save these registers, as they are only valid in exception context
330 * (where we are now - as soon as we defer to IRQ5, they can change) 333 * (where we are now - as soon as we defer to IRQ5, they can change)
@@ -335,29 +338,25 @@ ENTRY(_ex_trap_c)
335 p4.l = lo(DCPLB_FAULT_ADDR); 338 p4.l = lo(DCPLB_FAULT_ADDR);
336 p4.h = hi(DCPLB_FAULT_ADDR); 339 p4.h = hi(DCPLB_FAULT_ADDR);
337 r7 = [p4]; 340 r7 = [p4];
338 p5.h = _saved_dcplb_fault_addr; 341 [p5 + PDA_DCPLB] = r7;
339 p5.l = _saved_dcplb_fault_addr;
340 [p5] = r7;
341 342
342 r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)]; 343 p4.l = lo(ICPLB_FAULT_ADDR);
343 p5.h = _saved_icplb_fault_addr; 344 p4.h = hi(ICPLB_FAULT_ADDR);
344 p5.l = _saved_icplb_fault_addr; 345 r6 = [p4];
345 [p5] = r7; 346 [p5 + PDA_ICPLB] = r6;
346 347
347 r6 = retx; 348 r6 = retx;
348 p4.l = _saved_retx; 349 [p5 + PDA_RETX] = r6;
349 p4.h = _saved_retx;
350 [p4] = r6;
351#endif 350#endif
352 r6 = SYSCFG; 351 r6 = SYSCFG;
353 [p4 + 4] = r6; 352 [p5 + PDA_SYSCFG] = r6;
354 BITCLR(r6, 0); 353 BITCLR(r6, 0);
355 SYSCFG = r6; 354 SYSCFG = r6;
356 355
357 /* Disable all interrupts, but make sure level 5 is enabled so 356 /* Disable all interrupts, but make sure level 5 is enabled so
358 * we can switch to that level. Save the old mask. */ 357 * we can switch to that level. Save the old mask. */
359 cli r6; 358 cli r6;
360 [p4 + 8] = r6; 359 [p5 + PDA_EXIMASK] = r6;
361 360
362 p4.l = lo(SAFE_USER_INSTRUCTION); 361 p4.l = lo(SAFE_USER_INSTRUCTION);
363 p4.h = hi(SAFE_USER_INSTRUCTION); 362 p4.h = hi(SAFE_USER_INSTRUCTION);
@@ -424,17 +423,16 @@ ENDPROC(_double_fault)
424ENTRY(_exception_to_level5) 423ENTRY(_exception_to_level5)
425 SAVE_ALL_SYS 424 SAVE_ALL_SYS
426 425
427 p4.l = _saved_retx; 426 GET_PDA(p4, r7); /* Fetch current PDA */
428 p4.h = _saved_retx; 427 r6 = [p4 + PDA_RETX];
429 r6 = [p4];
430 [sp + PT_PC] = r6; 428 [sp + PT_PC] = r6;
431 429
432 r6 = [p4 + 4]; 430 r6 = [p4 + PDA_SYSCFG];
433 [sp + PT_SYSCFG] = r6; 431 [sp + PT_SYSCFG] = r6;
434 432
435 /* Restore interrupt mask. We haven't pushed RETI, so this 433 /* Restore interrupt mask. We haven't pushed RETI, so this
436 * doesn't enable interrupts until we return from this handler. */ 434 * doesn't enable interrupts until we return from this handler. */
437 r6 = [p4 + 8]; 435 r6 = [p4 + PDA_EXIMASK];
438 sti r6; 436 sti r6;
439 437
440 /* Restore the hardware error vector. */ 438 /* Restore the hardware error vector. */
@@ -478,8 +476,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
478 * scratch register (for want of a better option). 476 * scratch register (for want of a better option).
479 */ 477 */
480 EX_SCRATCH_REG = sp; 478 EX_SCRATCH_REG = sp;
481 sp.l = _exception_stack_top; 479 GET_PDA_SAFE(sp);
482 sp.h = _exception_stack_top; 480 sp = [sp + PDA_EXSTACK]
483 /* Try to deal with syscalls quickly. */ 481 /* Try to deal with syscalls quickly. */
484 [--sp] = ASTAT; 482 [--sp] = ASTAT;
485 [--sp] = (R7:6,P5:4); 483 [--sp] = (R7:6,P5:4);
@@ -501,27 +499,22 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
501 * but they are not very interesting, so don't save them 499 * but they are not very interesting, so don't save them
502 */ 500 */
503 501
502 GET_PDA(p5, r7);
504 p4.l = lo(DCPLB_FAULT_ADDR); 503 p4.l = lo(DCPLB_FAULT_ADDR);
505 p4.h = hi(DCPLB_FAULT_ADDR); 504 p4.h = hi(DCPLB_FAULT_ADDR);
506 r7 = [p4]; 505 r7 = [p4];
507 p5.h = _saved_dcplb_fault_addr; 506 [p5 + PDA_DCPLB] = r7;
508 p5.l = _saved_dcplb_fault_addr;
509 [p5] = r7;
510 507
511 r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)]; 508 p4.l = lo(ICPLB_FAULT_ADDR);
512 p5.h = _saved_icplb_fault_addr; 509 p4.h = hi(ICPLB_FAULT_ADDR);
513 p5.l = _saved_icplb_fault_addr; 510 r7 = [p4];
514 [p5] = r7; 511 [p5 + PDA_ICPLB] = r7;
515 512
516 p4.l = _saved_retx;
517 p4.h = _saved_retx;
518 r6 = retx; 513 r6 = retx;
519 [p4] = r6; 514 [p5 + PDA_RETX] = r6;
520 515
521 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 516 r7 = SEQSTAT; /* reason code is in bit 5:0 */
522 p4.l = _saved_seqstat; 517 [p5 + PDA_SEQSTAT] = r7;
523 p4.h = _saved_seqstat;
524 [p4] = r7;
525#else 518#else
526 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 519 r7 = SEQSTAT; /* reason code is in bit 5:0 */
527#endif 520#endif
@@ -546,11 +539,11 @@ ENTRY(_kernel_execve)
546 p0 = sp; 539 p0 = sp;
547 r3 = SIZEOF_PTREGS / 4; 540 r3 = SIZEOF_PTREGS / 4;
548 r4 = 0(x); 541 r4 = 0(x);
5490: 542.Lclear_regs:
550 [p0++] = r4; 543 [p0++] = r4;
551 r3 += -1; 544 r3 += -1;
552 cc = r3 == 0; 545 cc = r3 == 0;
553 if !cc jump 0b (bp); 546 if !cc jump .Lclear_regs (bp);
554 547
555 p0 = sp; 548 p0 = sp;
556 sp += -16; 549 sp += -16;
@@ -558,7 +551,7 @@ ENTRY(_kernel_execve)
558 call _do_execve; 551 call _do_execve;
559 SP += 16; 552 SP += 16;
560 cc = r0 == 0; 553 cc = r0 == 0;
561 if ! cc jump 1f; 554 if ! cc jump .Lexecve_failed;
562 /* Success. Copy our temporary pt_regs to the top of the kernel 555 /* Success. Copy our temporary pt_regs to the top of the kernel
563 * stack and do a normal exception return. 556 * stack and do a normal exception return.
564 */ 557 */
@@ -574,12 +567,12 @@ ENTRY(_kernel_execve)
574 p0 = fp; 567 p0 = fp;
575 r4 = [p0--]; 568 r4 = [p0--];
576 r3 = SIZEOF_PTREGS / 4; 569 r3 = SIZEOF_PTREGS / 4;
5770: 570.Lcopy_regs:
578 r4 = [p0--]; 571 r4 = [p0--];
579 [p1--] = r4; 572 [p1--] = r4;
580 r3 += -1; 573 r3 += -1;
581 cc = r3 == 0; 574 cc = r3 == 0;
582 if ! cc jump 0b (bp); 575 if ! cc jump .Lcopy_regs (bp);
583 576
584 r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z); 577 r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
585 p1 = r0; 578 p1 = r0;
@@ -591,7 +584,7 @@ ENTRY(_kernel_execve)
591 584
592 RESTORE_CONTEXT; 585 RESTORE_CONTEXT;
593 rti; 586 rti;
5941: 587.Lexecve_failed:
595 unlink; 588 unlink;
596 rts; 589 rts;
597ENDPROC(_kernel_execve) 590ENDPROC(_kernel_execve)
@@ -925,9 +918,14 @@ _schedule_and_signal_from_int:
925 p1 = rets; 918 p1 = rets;
926 [sp + PT_RESERVED] = p1; 919 [sp + PT_RESERVED] = p1;
927 920
921#ifdef CONFIG_SMP
922 GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
923 r0 = [p0 + PDA_IRQFLAGS];
924#else
928 p0.l = _irq_flags; 925 p0.l = _irq_flags;
929 p0.h = _irq_flags; 926 p0.h = _irq_flags;
930 r0 = [p0]; 927 r0 = [p0];
928#endif
931 sti r0; 929 sti r0;
932 930
933 r0 = sp; 931 r0 = sp;
@@ -1539,12 +1537,6 @@ ENTRY(_sys_call_table)
1539 .endr 1537 .endr
1540END(_sys_call_table) 1538END(_sys_call_table)
1541 1539
1542#if ANOMALY_05000261
1543/* Used by the assembly entry point to work around an anomaly. */
1544_last_cplb_fault_retx:
1545 .long 0;
1546#endif
1547
1548#ifdef CONFIG_EXCEPTION_L1_SCRATCH 1540#ifdef CONFIG_EXCEPTION_L1_SCRATCH
1549/* .section .l1.bss.scratch */ 1541/* .section .l1.bss.scratch */
1550.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH 1542.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH
@@ -1554,8 +1546,8 @@ _last_cplb_fault_retx:
1554#else 1546#else
1555.bss 1547.bss
1556#endif 1548#endif
1557_exception_stack: 1549ENTRY(_exception_stack)
1558 .rept 1024 1550 .rept 1024 * NR_CPUS
1559 .long 0 1551 .long 0
1560 .endr 1552 .endr
1561_exception_stack_top: 1553_exception_stack_top: