diff options
author | Paul Mackerras <paulus@samba.org> | 2008-08-29 21:39:26 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-09-15 14:08:06 -0400 |
commit | 9a95516740c924675d52c472d7d170c62eab176c (patch) | |
tree | 33e6a5435060526f9297076f4a6a6f442cd59847 /arch | |
parent | cf00085d8045cddd80a8aabad97de96fa8131793 (diff) |
powerpc: Rearrange head_64.S to move interrupt handler code to the beginning
This rearranges head_64.S so that we have all the first-level exception
prologs together starting at 0x100, followed by all the second-level
handlers that are invoked from the first-level prologs, followed by
other code. This doesn't make any functional change but will make
following changes for relocatable kernel support easier.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 203 |
1 files changed, 106 insertions, 97 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index cc8fb474d520..27935d1ab6a1 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -325,16 +325,32 @@ do_stab_bolted_pSeries: | |||
325 | mfspr r12,SPRN_SPRG2 | 325 | mfspr r12,SPRN_SPRG2 |
326 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | 326 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) |
327 | 327 | ||
328 | #ifdef CONFIG_PPC_PSERIES | ||
329 | /* | ||
330 | * Vectors for the FWNMI option. Share common code. | ||
331 | */ | ||
332 | .globl system_reset_fwnmi | ||
333 | .align 7 | ||
334 | system_reset_fwnmi: | ||
335 | HMT_MEDIUM | ||
336 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
337 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | ||
338 | |||
339 | .globl machine_check_fwnmi | ||
340 | .align 7 | ||
341 | machine_check_fwnmi: | ||
342 | HMT_MEDIUM | ||
343 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
344 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
345 | |||
346 | #endif /* CONFIG_PPC_PSERIES */ | ||
347 | |||
348 | #ifdef __DISABLED__ | ||
328 | /* | 349 | /* |
329 | * We have some room here we use that to put | ||
330 | * the peries slb miss user trampoline code so it's reasonably | ||
331 | * away from slb_miss_user_common to avoid problems with rfid | ||
332 | * | ||
333 | * This is used for when the SLB miss handler has to go virtual, | 350 | * This is used for when the SLB miss handler has to go virtual, |
334 | * which doesn't happen for now anymore but will once we re-implement | 351 | * which doesn't happen for now anymore but will once we re-implement |
335 | * dynamic VSIDs for shared page tables | 352 | * dynamic VSIDs for shared page tables |
336 | */ | 353 | */ |
337 | #ifdef __DISABLED__ | ||
338 | slb_miss_user_pseries: | 354 | slb_miss_user_pseries: |
339 | std r10,PACA_EXGEN+EX_R10(r13) | 355 | std r10,PACA_EXGEN+EX_R10(r13) |
340 | std r11,PACA_EXGEN+EX_R11(r13) | 356 | std r11,PACA_EXGEN+EX_R11(r13) |
@@ -357,25 +373,14 @@ slb_miss_user_pseries: | |||
357 | b . /* prevent spec. execution */ | 373 | b . /* prevent spec. execution */ |
358 | #endif /* __DISABLED__ */ | 374 | #endif /* __DISABLED__ */ |
359 | 375 | ||
360 | #ifdef CONFIG_PPC_PSERIES | 376 | .align 7 |
377 | .globl __end_interrupts | ||
378 | __end_interrupts: | ||
379 | |||
361 | /* | 380 | /* |
362 | * Vectors for the FWNMI option. Share common code. | 381 | * Code from here down to __end_handlers is invoked from the |
382 | * exception prologs above. | ||
363 | */ | 383 | */ |
364 | .globl system_reset_fwnmi | ||
365 | .align 7 | ||
366 | system_reset_fwnmi: | ||
367 | HMT_MEDIUM | ||
368 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
369 | EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common) | ||
370 | |||
371 | .globl machine_check_fwnmi | ||
372 | .align 7 | ||
373 | machine_check_fwnmi: | ||
374 | HMT_MEDIUM | ||
375 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
376 | EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common) | ||
377 | |||
378 | #endif /* CONFIG_PPC_PSERIES */ | ||
379 | 384 | ||
380 | /*** Common interrupt handlers ***/ | 385 | /*** Common interrupt handlers ***/ |
381 | 386 | ||
@@ -457,65 +462,6 @@ bad_stack: | |||
457 | b 1b | 462 | b 1b |
458 | 463 | ||
459 | /* | 464 | /* |
460 | * Return from an exception with minimal checks. | ||
461 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
462 | * If interrupts have been enabled, or anything has been | ||
463 | * done that might have changed the scheduling status of | ||
464 | * any task or sent any task a signal, you should use | ||
465 | * ret_from_except or ret_from_except_lite instead of this. | ||
466 | */ | ||
467 | fast_exc_return_irq: /* restores irq state too */ | ||
468 | ld r3,SOFTE(r1) | ||
469 | TRACE_AND_RESTORE_IRQ(r3); | ||
470 | ld r12,_MSR(r1) | ||
471 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | ||
472 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | ||
473 | b 1f | ||
474 | |||
475 | .globl fast_exception_return | ||
476 | fast_exception_return: | ||
477 | ld r12,_MSR(r1) | ||
478 | 1: ld r11,_NIP(r1) | ||
479 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
480 | beq- unrecov_fer | ||
481 | |||
482 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
483 | andi. r3,r12,MSR_PR | ||
484 | beq 2f | ||
485 | ACCOUNT_CPU_USER_EXIT(r3, r4) | ||
486 | 2: | ||
487 | #endif | ||
488 | |||
489 | ld r3,_CCR(r1) | ||
490 | ld r4,_LINK(r1) | ||
491 | ld r5,_CTR(r1) | ||
492 | ld r6,_XER(r1) | ||
493 | mtcr r3 | ||
494 | mtlr r4 | ||
495 | mtctr r5 | ||
496 | mtxer r6 | ||
497 | REST_GPR(0, r1) | ||
498 | REST_8GPRS(2, r1) | ||
499 | |||
500 | mfmsr r10 | ||
501 | rldicl r10,r10,48,1 /* clear EE */ | ||
502 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | ||
503 | mtmsrd r10,1 | ||
504 | |||
505 | mtspr SPRN_SRR1,r12 | ||
506 | mtspr SPRN_SRR0,r11 | ||
507 | REST_4GPRS(10, r1) | ||
508 | ld r1,GPR1(r1) | ||
509 | rfid | ||
510 | b . /* prevent speculative execution */ | ||
511 | |||
512 | unrecov_fer: | ||
513 | bl .save_nvgprs | ||
514 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
515 | bl .unrecoverable_exception | ||
516 | b 1b | ||
517 | |||
518 | /* | ||
519 | * Here r13 points to the paca, r9 contains the saved CR, | 465 | * Here r13 points to the paca, r9 contains the saved CR, |
520 | * SRR0 and SRR1 are saved in r11 and r12, | 466 | * SRR0 and SRR1 are saved in r11 and r12, |
521 | * r9 - r13 are saved in paca->exgen. | 467 | * r9 - r13 are saved in paca->exgen. |
@@ -766,6 +712,85 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
766 | bl .altivec_unavailable_exception | 712 | bl .altivec_unavailable_exception |
767 | b .ret_from_except | 713 | b .ret_from_except |
768 | 714 | ||
715 | .align 7 | ||
716 | .globl vsx_unavailable_common | ||
717 | vsx_unavailable_common: | ||
718 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | ||
719 | #ifdef CONFIG_VSX | ||
720 | BEGIN_FTR_SECTION | ||
721 | bne .load_up_vsx | ||
722 | 1: | ||
723 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
724 | #endif | ||
725 | bl .save_nvgprs | ||
726 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
727 | ENABLE_INTS | ||
728 | bl .vsx_unavailable_exception | ||
729 | b .ret_from_except | ||
730 | |||
731 | .align 7 | ||
732 | .globl __end_handlers | ||
733 | __end_handlers: | ||
734 | |||
735 | /* | ||
736 | * Return from an exception with minimal checks. | ||
737 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
738 | * If interrupts have been enabled, or anything has been | ||
739 | * done that might have changed the scheduling status of | ||
740 | * any task or sent any task a signal, you should use | ||
741 | * ret_from_except or ret_from_except_lite instead of this. | ||
742 | */ | ||
743 | fast_exc_return_irq: /* restores irq state too */ | ||
744 | ld r3,SOFTE(r1) | ||
745 | TRACE_AND_RESTORE_IRQ(r3); | ||
746 | ld r12,_MSR(r1) | ||
747 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | ||
748 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | ||
749 | b 1f | ||
750 | |||
751 | .globl fast_exception_return | ||
752 | fast_exception_return: | ||
753 | ld r12,_MSR(r1) | ||
754 | 1: ld r11,_NIP(r1) | ||
755 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
756 | beq- unrecov_fer | ||
757 | |||
758 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
759 | andi. r3,r12,MSR_PR | ||
760 | beq 2f | ||
761 | ACCOUNT_CPU_USER_EXIT(r3, r4) | ||
762 | 2: | ||
763 | #endif | ||
764 | |||
765 | ld r3,_CCR(r1) | ||
766 | ld r4,_LINK(r1) | ||
767 | ld r5,_CTR(r1) | ||
768 | ld r6,_XER(r1) | ||
769 | mtcr r3 | ||
770 | mtlr r4 | ||
771 | mtctr r5 | ||
772 | mtxer r6 | ||
773 | REST_GPR(0, r1) | ||
774 | REST_8GPRS(2, r1) | ||
775 | |||
776 | mfmsr r10 | ||
777 | rldicl r10,r10,48,1 /* clear EE */ | ||
778 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | ||
779 | mtmsrd r10,1 | ||
780 | |||
781 | mtspr SPRN_SRR1,r12 | ||
782 | mtspr SPRN_SRR0,r11 | ||
783 | REST_4GPRS(10, r1) | ||
784 | ld r1,GPR1(r1) | ||
785 | rfid | ||
786 | b . /* prevent speculative execution */ | ||
787 | |||
788 | unrecov_fer: | ||
789 | bl .save_nvgprs | ||
790 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
791 | bl .unrecoverable_exception | ||
792 | b 1b | ||
793 | |||
769 | #ifdef CONFIG_ALTIVEC | 794 | #ifdef CONFIG_ALTIVEC |
770 | /* | 795 | /* |
771 | * load_up_altivec(unused, unused, tsk) | 796 | * load_up_altivec(unused, unused, tsk) |
@@ -840,22 +865,6 @@ _STATIC(load_up_altivec) | |||
840 | blr | 865 | blr |
841 | #endif /* CONFIG_ALTIVEC */ | 866 | #endif /* CONFIG_ALTIVEC */ |
842 | 867 | ||
843 | .align 7 | ||
844 | .globl vsx_unavailable_common | ||
845 | vsx_unavailable_common: | ||
846 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | ||
847 | #ifdef CONFIG_VSX | ||
848 | BEGIN_FTR_SECTION | ||
849 | bne .load_up_vsx | ||
850 | 1: | ||
851 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
852 | #endif | ||
853 | bl .save_nvgprs | ||
854 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
855 | ENABLE_INTS | ||
856 | bl .vsx_unavailable_exception | ||
857 | b .ret_from_except | ||
858 | |||
859 | #ifdef CONFIG_VSX | 868 | #ifdef CONFIG_VSX |
860 | /* | 869 | /* |
861 | * load_up_vsx(unused, unused, tsk) | 870 | * load_up_vsx(unused, unused, tsk) |