aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/intvec_64.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel/intvec_64.S')
-rw-r--r--arch/tile/kernel/intvec_64.S80
1 files changed, 44 insertions, 36 deletions
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 30ae76e50c44..7c06d597ffd0 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -220,7 +220,9 @@ intvec_\vecname:
220 * This routine saves just the first four registers, plus the 220 * This routine saves just the first four registers, plus the
221 * stack context so we can do proper backtracing right away, 221 * stack context so we can do proper backtracing right away,
222 * and defers to handle_interrupt to save the rest. 222 * and defers to handle_interrupt to save the rest.
223 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. 223 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
224 * and needs sp set to its final location at the bottom of
225 * the stack frame.
224 */ 226 */
225 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) 227 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
226 wh64 r0 /* cache line 7 */ 228 wh64 r0 /* cache line 7 */
@@ -450,23 +452,6 @@ intvec_\vecname:
450 push_reg r5, r52 452 push_reg r5, r52
451 st r52, r4 453 st r52, r4
452 454
453 /* Load tp with our per-cpu offset. */
454#ifdef CONFIG_SMP
455 {
456 mfspr r20, SPR_SYSTEM_SAVE_K_0
457 moveli r21, hw2_last(__per_cpu_offset)
458 }
459 {
460 shl16insli r21, r21, hw1(__per_cpu_offset)
461 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
462 }
463 shl16insli r21, r21, hw0(__per_cpu_offset)
464 shl3add r20, r20, r21
465 ld tp, r20
466#else
467 move tp, zero
468#endif
469
470 /* 455 /*
471 * If we will be returning to the kernel, we will need to 456 * If we will be returning to the kernel, we will need to
472 * reset the interrupt masks to the state they had before. 457 * reset the interrupt masks to the state they had before.
@@ -489,6 +474,44 @@ intvec_\vecname:
489 .endif 474 .endif
490 st r21, r32 475 st r21, r32
491 476
477 /*
478 * we've captured enough state to the stack (including in
479 * particular our EX_CONTEXT state) that we can now release
480 * the interrupt critical section and replace it with our
481 * standard "interrupts disabled" mask value. This allows
482 * synchronous interrupts (and profile interrupts) to punch
483 * through from this point onwards.
484 *
485 * It's important that no code before this point touch memory
486 * other than our own stack (to keep the invariant that this
487 * is all that gets touched under ICS), and that no code after
488 * this point reference any interrupt-specific SPR, in particular
489 * the EX_CONTEXT_K_ values.
490 */
491 .ifc \function,handle_nmi
492 IRQ_DISABLE_ALL(r20)
493 .else
494 IRQ_DISABLE(r20, r21)
495 .endif
496 mtspr INTERRUPT_CRITICAL_SECTION, zero
497
498 /* Load tp with our per-cpu offset. */
499#ifdef CONFIG_SMP
500 {
501 mfspr r20, SPR_SYSTEM_SAVE_K_0
502 moveli r21, hw2_last(__per_cpu_offset)
503 }
504 {
505 shl16insli r21, r21, hw1(__per_cpu_offset)
506 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
507 }
508 shl16insli r21, r21, hw0(__per_cpu_offset)
509 shl3add r20, r20, r21
510 ld tp, r20
511#else
512 move tp, zero
513#endif
514
492#ifdef __COLLECT_LINKER_FEEDBACK__ 515#ifdef __COLLECT_LINKER_FEEDBACK__
493 /* 516 /*
494 * Notify the feedback routines that we were in the 517 * Notify the feedback routines that we were in the
@@ -513,21 +536,6 @@ intvec_\vecname:
513#endif 536#endif
514 537
515 /* 538 /*
516 * we've captured enough state to the stack (including in
517 * particular our EX_CONTEXT state) that we can now release
518 * the interrupt critical section and replace it with our
519 * standard "interrupts disabled" mask value. This allows
520 * synchronous interrupts (and profile interrupts) to punch
521 * through from this point onwards.
522 */
523 .ifc \function,handle_nmi
524 IRQ_DISABLE_ALL(r20)
525 .else
526 IRQ_DISABLE(r20, r21)
527 .endif
528 mtspr INTERRUPT_CRITICAL_SECTION, zero
529
530 /*
531 * Prepare the first 256 stack bytes to be rapidly accessible 539 * Prepare the first 256 stack bytes to be rapidly accessible
532 * without having to fetch the background data. 540 * without having to fetch the background data.
533 */ 541 */
@@ -736,9 +744,10 @@ STD_ENTRY(interrupt_return)
736 beqzt r30, .Lrestore_regs 744 beqzt r30, .Lrestore_regs
737 j 3f 745 j 3f
7382: TRACE_IRQS_ON 7462: TRACE_IRQS_ON
747 IRQ_ENABLE_LOAD(r20, r21)
739 movei r0, 1 748 movei r0, 1
740 mtspr INTERRUPT_CRITICAL_SECTION, r0 749 mtspr INTERRUPT_CRITICAL_SECTION, r0
741 IRQ_ENABLE(r20, r21) 750 IRQ_ENABLE_APPLY(r20, r21)
742 beqzt r30, .Lrestore_regs 751 beqzt r30, .Lrestore_regs
7433: 7523:
744 753
@@ -755,7 +764,6 @@ STD_ENTRY(interrupt_return)
755 * that will save some cycles if this turns out to be a syscall. 764 * that will save some cycles if this turns out to be a syscall.
756 */ 765 */
757.Lrestore_regs: 766.Lrestore_regs:
758 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
759 767
760 /* 768 /*
761 * Rotate so we have one high bit and one low bit to test. 769 * Rotate so we have one high bit and one low bit to test.
@@ -1249,7 +1257,7 @@ STD_ENTRY(fill_ra_stack)
1249 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign 1257 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1250 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault 1258 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1251 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault 1259 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1252 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr 1260 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
1253 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap 1261 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1254 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt 1262 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1255 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr 1263 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr