aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common/entry.S')
-rw-r--r--arch/blackfin/mach-common/entry.S191
1 files changed, 100 insertions, 91 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index fb1795d5be2a..01af24cde362 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -301,27 +301,31 @@ ENTRY(_ex_replaceable)
301 nop; 301 nop;
302 302
303ENTRY(_ex_trap_c) 303ENTRY(_ex_trap_c)
304 /* The only thing that has been saved in this context is
305 * (R7:6,P5:4), ASTAT & SP - don't use anything else
306 */
307
308 GET_PDA(p5, r6);
309
304 /* Make sure we are not in a double fault */ 310 /* Make sure we are not in a double fault */
305 p4.l = lo(IPEND); 311 p4.l = lo(IPEND);
306 p4.h = hi(IPEND); 312 p4.h = hi(IPEND);
307 r7 = [p4]; 313 r7 = [p4];
308 CC = BITTST (r7, 5); 314 CC = BITTST (r7, 5);
309 if CC jump _double_fault; 315 if CC jump _double_fault;
316 [p5 + PDA_EXIPEND] = r7;
310 317
311 /* Call C code (trap_c) to handle the exception, which most 318 /* Call C code (trap_c) to handle the exception, which most
312 * likely involves sending a signal to the current process. 319 * likely involves sending a signal to the current process.
313 * To avoid double faults, lower our priority to IRQ5 first. 320 * To avoid double faults, lower our priority to IRQ5 first.
314 */ 321 */
315 P5.h = _exception_to_level5; 322 r7.h = _exception_to_level5;
316 P5.l = _exception_to_level5; 323 r7.l = _exception_to_level5;
317 p4.l = lo(EVT5); 324 p4.l = lo(EVT5);
318 p4.h = hi(EVT5); 325 p4.h = hi(EVT5);
319 [p4] = p5; 326 [p4] = r7;
320 csync; 327 csync;
321 328
322 GET_PDA(p5, r6);
323#ifndef CONFIG_DEBUG_DOUBLEFAULT
324
325 /* 329 /*
326 * Save these registers, as they are only valid in exception context 330 * Save these registers, as they are only valid in exception context
327 * (where we are now - as soon as we defer to IRQ5, they can change) 331 * (where we are now - as soon as we defer to IRQ5, they can change)
@@ -341,7 +345,10 @@ ENTRY(_ex_trap_c)
341 345
342 r6 = retx; 346 r6 = retx;
343 [p5 + PDA_RETX] = r6; 347 [p5 + PDA_RETX] = r6;
344#endif 348
349 r6 = SEQSTAT;
350 [p5 + PDA_SEQSTAT] = r6;
351
345 /* Save the state of single stepping */ 352 /* Save the state of single stepping */
346 r6 = SYSCFG; 353 r6 = SYSCFG;
347 [p5 + PDA_SYSCFG] = r6; 354 [p5 + PDA_SYSCFG] = r6;
@@ -349,8 +356,7 @@ ENTRY(_ex_trap_c)
349 BITCLR(r6, SYSCFG_SSSTEP_P); 356 BITCLR(r6, SYSCFG_SSSTEP_P);
350 SYSCFG = r6; 357 SYSCFG = r6;
351 358
352 /* Disable all interrupts, but make sure level 5 is enabled so 359 /* Save the current IMASK, since we change in order to jump to level 5 */
353 * we can switch to that level. Save the old mask. */
354 cli r6; 360 cli r6;
355 [p5 + PDA_EXIMASK] = r6; 361 [p5 + PDA_EXIMASK] = r6;
356 362
@@ -358,9 +364,21 @@ ENTRY(_ex_trap_c)
358 p4.h = hi(SAFE_USER_INSTRUCTION); 364 p4.h = hi(SAFE_USER_INSTRUCTION);
359 retx = p4; 365 retx = p4;
360 366
367 /* Disable all interrupts, but make sure level 5 is enabled so
368 * we can switch to that level.
369 */
361 r6 = 0x3f; 370 r6 = 0x3f;
362 sti r6; 371 sti r6;
363 372
373 /* In case interrupts are disabled IPEND[4] (global interrupt disable bit)
374 * clear it (re-enabling interrupts again) by the special sequence of pushing
375 * RETI onto the stack. This way we can lower ourselves to IVG5 even if the
376 * exception was taken after the interrupt handler was called but before it
377 * got a chance to enable global interrupts itself.
378 */
379 [--sp] = reti;
380 sp += 4;
381
364 raise 5; 382 raise 5;
365 jump.s _bfin_return_from_exception; 383 jump.s _bfin_return_from_exception;
366ENDPROC(_ex_trap_c) 384ENDPROC(_ex_trap_c)
@@ -379,8 +397,7 @@ ENTRY(_double_fault)
379 397
380 R5 = [P4]; /* Control Register*/ 398 R5 = [P4]; /* Control Register*/
381 BITCLR(R5,ENICPLB_P); 399 BITCLR(R5,ENICPLB_P);
382 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ 400 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
383 .align 8;
384 [P4] = R5; 401 [P4] = R5;
385 SSYNC; 402 SSYNC;
386 403
@@ -388,8 +405,7 @@ ENTRY(_double_fault)
388 P4.H = HI(DMEM_CONTROL); 405 P4.H = HI(DMEM_CONTROL);
389 R5 = [P4]; 406 R5 = [P4];
390 BITCLR(R5,ENDCPLB_P); 407 BITCLR(R5,ENDCPLB_P);
391 SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ 408 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
392 .align 8;
393 [P4] = R5; 409 [P4] = R5;
394 SSYNC; 410 SSYNC;
395 411
@@ -420,47 +436,55 @@ ENDPROC(_double_fault)
420ENTRY(_exception_to_level5) 436ENTRY(_exception_to_level5)
421 SAVE_ALL_SYS 437 SAVE_ALL_SYS
422 438
423 GET_PDA(p4, r7); /* Fetch current PDA */ 439 GET_PDA(p5, r7); /* Fetch current PDA */
424 r6 = [p4 + PDA_RETX]; 440 r6 = [p5 + PDA_RETX];
425 [sp + PT_PC] = r6; 441 [sp + PT_PC] = r6;
426 442
427 r6 = [p4 + PDA_SYSCFG]; 443 r6 = [p5 + PDA_SYSCFG];
428 [sp + PT_SYSCFG] = r6; 444 [sp + PT_SYSCFG] = r6;
429 445
430 /* Restore interrupt mask. We haven't pushed RETI, so this 446 r6 = [p5 + PDA_SEQSTAT]; /* Read back seqstat */
431 * doesn't enable interrupts until we return from this handler. */ 447 [sp + PT_SEQSTAT] = r6;
432 r6 = [p4 + PDA_EXIMASK];
433 sti r6;
434 448
435 /* Restore the hardware error vector. */ 449 /* Restore the hardware error vector. */
436 P5.h = _evt_ivhw; 450 r7.h = _evt_ivhw;
437 P5.l = _evt_ivhw; 451 r7.l = _evt_ivhw;
438 p4.l = lo(EVT5); 452 p4.l = lo(EVT5);
439 p4.h = hi(EVT5); 453 p4.h = hi(EVT5);
440 [p4] = p5; 454 [p4] = r7;
441 csync; 455 csync;
442 456
443 p2.l = lo(IPEND); 457#ifdef CONFIG_DEBUG_DOUBLEFAULT
444 p2.h = hi(IPEND); 458 /* Now that we have the hardware error vector programmed properly
445 csync; 459 * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes
446 r0 = [p2]; /* Read current IPEND */ 460 * another hardware error, we can catch it (self-nesting).
447 [sp + PT_IPEND] = r0; /* Store IPEND */ 461 */
462 [--sp] = reti;
463 sp += 4;
464#endif
465
466 r7 = [p5 + PDA_EXIPEND] /* Read the IPEND from the Exception state */
467 [sp + PT_IPEND] = r7; /* Store IPEND onto the stack */
448 468
449 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 469 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
450 SP += -12; 470 SP += -12;
451 call _trap_c; 471 call _trap_c;
452 SP += 12; 472 SP += 12;
453 473
454#ifdef CONFIG_DEBUG_DOUBLEFAULT 474 /* If interrupts were off during the exception (IPEND[4] = 1), turn them off
455 /* Grab ILAT */ 475 * before we return.
456 p2.l = lo(ILAT); 476 */
457 p2.h = hi(ILAT); 477 CC = BITTST(r7, EVT_IRPTEN_P)
458 r0 = [p2]; 478 if !CC jump 1f;
459 r1 = 0x20; /* Did I just cause anther HW error? */ 479 /* this will load a random value into the reti register - but that is OK,
460 r0 = r0 & r1; 480 * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro
461 CC = R0 == R1; 481 */
462 if CC JUMP _double_fault; 482 sp += -4;
463#endif 483 reti = [sp++];
4841:
485 /* restore the interrupt mask (IMASK) */
486 r6 = [p5 + PDA_EXIMASK];
487 sti r6;
464 488
465 call _ret_from_exception; 489 call _ret_from_exception;
466 RESTORE_ALL_SYS 490 RESTORE_ALL_SYS
@@ -474,7 +498,7 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
474 */ 498 */
475 EX_SCRATCH_REG = sp; 499 EX_SCRATCH_REG = sp;
476 GET_PDA_SAFE(sp); 500 GET_PDA_SAFE(sp);
477 sp = [sp + PDA_EXSTACK] 501 sp = [sp + PDA_EXSTACK];
478 /* Try to deal with syscalls quickly. */ 502 /* Try to deal with syscalls quickly. */
479 [--sp] = ASTAT; 503 [--sp] = ASTAT;
480 [--sp] = (R7:6,P5:4); 504 [--sp] = (R7:6,P5:4);
@@ -489,14 +513,7 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
489 ssync; 513 ssync;
490#endif 514#endif
491 515
492#if ANOMALY_05000283 || ANOMALY_05000315 516 ANOMALY_283_315_WORKAROUND(p5, r7)
493 cc = r7 == r7;
494 p5.h = HI(CHIPID);
495 p5.l = LO(CHIPID);
496 if cc jump 1f;
497 r7.l = W[p5];
4981:
499#endif
500 517
501#ifdef CONFIG_DEBUG_DOUBLEFAULT 518#ifdef CONFIG_DEBUG_DOUBLEFAULT
502 /* 519 /*
@@ -510,18 +527,18 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
510 p4.l = lo(DCPLB_FAULT_ADDR); 527 p4.l = lo(DCPLB_FAULT_ADDR);
511 p4.h = hi(DCPLB_FAULT_ADDR); 528 p4.h = hi(DCPLB_FAULT_ADDR);
512 r7 = [p4]; 529 r7 = [p4];
513 [p5 + PDA_DCPLB] = r7; 530 [p5 + PDA_DF_DCPLB] = r7;
514 531
515 p4.l = lo(ICPLB_FAULT_ADDR); 532 p4.l = lo(ICPLB_FAULT_ADDR);
516 p4.h = hi(ICPLB_FAULT_ADDR); 533 p4.h = hi(ICPLB_FAULT_ADDR);
517 r7 = [p4]; 534 r7 = [p4];
518 [p5 + PDA_ICPLB] = r7; 535 [p5 + PDA_DF_ICPLB] = r7;
519 536
520 r6 = retx; 537 r7 = retx;
521 [p5 + PDA_RETX] = r6; 538 [p5 + PDA_DF_RETX] = r7;
522 539
523 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 540 r7 = SEQSTAT; /* reason code is in bit 5:0 */
524 [p5 + PDA_SEQSTAT] = r7; 541 [p5 + PDA_DF_SEQSTAT] = r7;
525#else 542#else
526 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 543 r7 = SEQSTAT; /* reason code is in bit 5:0 */
527#endif 544#endif
@@ -686,8 +703,14 @@ ENTRY(_system_call)
686#ifdef CONFIG_IPIPE 703#ifdef CONFIG_IPIPE
687 cc = BITTST(r7, TIF_IRQ_SYNC); 704 cc = BITTST(r7, TIF_IRQ_SYNC);
688 if !cc jump .Lsyscall_no_irqsync; 705 if !cc jump .Lsyscall_no_irqsync;
706 /*
707 * Clear IPEND[4] manually to undo what resume_userspace_1 just did;
708 * we need this so that high priority domain interrupts may still
709 * preempt the current domain while the pipeline log is being played
710 * back.
711 */
689 [--sp] = reti; 712 [--sp] = reti;
690 r0 = [sp++]; 713 SP += 4; /* don't merge with next insn to keep the pattern obvious */
691 SP += -12; 714 SP += -12;
692 call ___ipipe_sync_root; 715 call ___ipipe_sync_root;
693 SP += 12; 716 SP += 12;
@@ -699,7 +722,7 @@ ENTRY(_system_call)
699 722
700 /* Reenable interrupts. */ 723 /* Reenable interrupts. */
701 [--sp] = reti; 724 [--sp] = reti;
702 r0 = [sp++]; 725 sp += 4;
703 726
704 SP += -12; 727 SP += -12;
705 call _schedule; 728 call _schedule;
@@ -715,7 +738,7 @@ ENTRY(_system_call)
715.Lsyscall_do_signals: 738.Lsyscall_do_signals:
716 /* Reenable interrupts. */ 739 /* Reenable interrupts. */
717 [--sp] = reti; 740 [--sp] = reti;
718 r0 = [sp++]; 741 sp += 4;
719 742
720 r0 = sp; 743 r0 = sp;
721 SP += -12; 744 SP += -12;
@@ -725,10 +748,6 @@ ENTRY(_system_call)
725.Lsyscall_really_exit: 748.Lsyscall_really_exit:
726 r5 = [sp + PT_RESERVED]; 749 r5 = [sp + PT_RESERVED];
727 rets = r5; 750 rets = r5;
728#ifdef CONFIG_IPIPE
729 [--sp] = reti;
730 r5 = [sp++];
731#endif /* CONFIG_IPIPE */
732 rts; 751 rts;
733ENDPROC(_system_call) 752ENDPROC(_system_call)
734 753
@@ -816,13 +835,13 @@ ENDPROC(_resume)
816 835
817ENTRY(_ret_from_exception) 836ENTRY(_ret_from_exception)
818#ifdef CONFIG_IPIPE 837#ifdef CONFIG_IPIPE
819 [--sp] = rets; 838 p2.l = _per_cpu__ipipe_percpu_domain;
820 SP += -12; 839 p2.h = _per_cpu__ipipe_percpu_domain;
821 call ___ipipe_check_root 840 r0.l = _ipipe_root;
822 SP += 12 841 r0.h = _ipipe_root;
823 rets = [sp++]; 842 r2 = [p2];
824 cc = r0 == 0; 843 cc = r0 == r2;
825 if cc jump 4f; /* not on behalf of Linux, get out */ 844 if !cc jump 4f; /* not on behalf of the root domain, get out */
826#endif /* CONFIG_IPIPE */ 845#endif /* CONFIG_IPIPE */
827 p2.l = lo(IPEND); 846 p2.l = lo(IPEND);
828 p2.h = hi(IPEND); 847 p2.h = hi(IPEND);
@@ -882,14 +901,9 @@ ENDPROC(_ret_from_exception)
882 901
883#ifdef CONFIG_IPIPE 902#ifdef CONFIG_IPIPE
884 903
885_sync_root_irqs:
886 [--sp] = reti; /* Reenable interrupts */
887 r0 = [sp++];
888 jump.l ___ipipe_sync_root
889
890_resume_kernel_from_int: 904_resume_kernel_from_int:
891 r0.l = _sync_root_irqs 905 r0.l = ___ipipe_sync_root;
892 r0.h = _sync_root_irqs 906 r0.h = ___ipipe_sync_root;
893 [--sp] = rets; 907 [--sp] = rets;
894 [--sp] = ( r7:4, p5:3 ); 908 [--sp] = ( r7:4, p5:3 );
895 SP += -12; 909 SP += -12;
@@ -953,10 +967,10 @@ ENTRY(_lower_to_irq14)
953#endif 967#endif
954 968
955#ifdef CONFIG_DEBUG_HWERR 969#ifdef CONFIG_DEBUG_HWERR
956 /* enable irq14 & hwerr interrupt, until we transition to _evt14_softirq */ 970 /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
957 r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); 971 r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
958#else 972#else
959 /* Only enable irq14 interrupt, until we transition to _evt14_softirq */ 973 /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
960 r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); 974 r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
961#endif 975#endif
962 sti r0; 976 sti r0;
@@ -964,7 +978,7 @@ ENTRY(_lower_to_irq14)
964 rti; 978 rti;
965ENDPROC(_lower_to_irq14) 979ENDPROC(_lower_to_irq14)
966 980
967ENTRY(_evt14_softirq) 981ENTRY(_evt_evt14)
968#ifdef CONFIG_DEBUG_HWERR 982#ifdef CONFIG_DEBUG_HWERR
969 r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); 983 r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
970 sti r0; 984 sti r0;
@@ -974,7 +988,7 @@ ENTRY(_evt14_softirq)
974 [--sp] = RETI; 988 [--sp] = RETI;
975 SP += 4; 989 SP += 4;
976 rts; 990 rts;
977ENDPROC(_evt14_softirq) 991ENDPROC(_evt_evt14)
978 992
979ENTRY(_schedule_and_signal_from_int) 993ENTRY(_schedule_and_signal_from_int)
980 /* To end up here, vector 15 was changed - so we have to change it 994 /* To end up here, vector 15 was changed - so we have to change it
@@ -1004,6 +1018,12 @@ ENTRY(_schedule_and_signal_from_int)
1004#endif 1018#endif
1005 sti r0; 1019 sti r0;
1006 1020
1021 /* finish the userspace "atomic" functions for it */
1022 r1 = FIXED_CODE_END;
1023 r2 = [sp + PT_PC];
1024 cc = r1 <= r2;
1025 if cc jump .Lresume_userspace (bp);
1026
1007 r0 = sp; 1027 r0 = sp;
1008 sp += -12; 1028 sp += -12;
1009 call _finish_atomic_sections; 1029 call _finish_atomic_sections;
@@ -1107,14 +1127,7 @@ ENTRY(_early_trap)
1107 SAVE_ALL_SYS 1127 SAVE_ALL_SYS
1108 trace_buffer_stop(p0,r0); 1128 trace_buffer_stop(p0,r0);
1109 1129
1110#if ANOMALY_05000283 || ANOMALY_05000315 1130 ANOMALY_283_315_WORKAROUND(p4, r5)
1111 cc = r5 == r5;
1112 p4.h = HI(CHIPID);
1113 p4.l = LO(CHIPID);
1114 if cc jump 1f;
1115 r5.l = W[p4];
11161:
1117#endif
1118 1131
1119 /* Turn caches off, to ensure we don't get double exceptions */ 1132 /* Turn caches off, to ensure we don't get double exceptions */
1120 1133
@@ -1123,9 +1136,7 @@ ENTRY(_early_trap)
1123 1136
1124 R5 = [P4]; /* Control Register*/ 1137 R5 = [P4]; /* Control Register*/
1125 BITCLR(R5,ENICPLB_P); 1138 BITCLR(R5,ENICPLB_P);
1126 CLI R1; 1139 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
1127 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
1128 .align 8;
1129 [P4] = R5; 1140 [P4] = R5;
1130 SSYNC; 1141 SSYNC;
1131 1142
@@ -1133,11 +1144,9 @@ ENTRY(_early_trap)
1133 P4.H = HI(DMEM_CONTROL); 1144 P4.H = HI(DMEM_CONTROL);
1134 R5 = [P4]; 1145 R5 = [P4];
1135 BITCLR(R5,ENDCPLB_P); 1146 BITCLR(R5,ENDCPLB_P);
1136 SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ 1147 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
1137 .align 8;
1138 [P4] = R5; 1148 [P4] = R5;
1139 SSYNC; 1149 SSYNC;
1140 STI R1;
1141 1150
1142 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 1151 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
1143 r1 = RETX; 1152 r1 = RETX;