diff options
Diffstat (limited to 'arch/powerpc/kernel/exceptions-64s.S')
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 314 |
1 files changed, 88 insertions, 226 deletions
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 15c5a4f6de01..2d0868a4e2f0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -12,6 +12,7 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <asm/hw_irq.h> | ||
15 | #include <asm/exception-64s.h> | 16 | #include <asm/exception-64s.h> |
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | 18 | ||
@@ -19,7 +20,7 @@ | |||
19 | * We layout physical memory as follows: | 20 | * We layout physical memory as follows: |
20 | * 0x0000 - 0x00ff : Secondary processor spin code | 21 | * 0x0000 - 0x00ff : Secondary processor spin code |
21 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | 22 | * 0x0100 - 0x2fff : pSeries Interrupt prologs |
22 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | 23 | * 0x3000 - 0x5fff : interrupt support common interrupt prologs |
23 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | 24 | * 0x6000 - 0x6fff : Initial (CPU0) segment table |
24 | * 0x7000 - 0x7fff : FWNMI data area | 25 | * 0x7000 - 0x7fff : FWNMI data area |
25 | * 0x8000 - : Early init and support code | 26 | * 0x8000 - : Early init and support code |
@@ -356,34 +357,60 @@ do_stab_bolted_pSeries: | |||
356 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) | 357 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) |
357 | 358 | ||
358 | /* | 359 | /* |
359 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 360 | * An interrupt came in while soft-disabled. We set paca->irq_happened, |
360 | * clear paca->hard_enabled and return. | 361 | * then, if it was a decrementer interrupt, we bump the dec to max and |
362 | * and return, else we hard disable and return. This is called with | ||
363 | * r10 containing the value to OR to the paca field. | ||
361 | */ | 364 | */ |
362 | masked_interrupt: | 365 | #define MASKED_INTERRUPT(_H) \ |
363 | stb r10,PACAHARDIRQEN(r13) | 366 | masked_##_H##interrupt: \ |
364 | mtcrf 0x80,r9 | 367 | std r11,PACA_EXGEN+EX_R11(r13); \ |
365 | ld r9,PACA_EXGEN+EX_R9(r13) | 368 | lbz r11,PACAIRQHAPPENED(r13); \ |
366 | mfspr r10,SPRN_SRR1 | 369 | or r11,r11,r10; \ |
367 | rldicl r10,r10,48,1 /* clear MSR_EE */ | 370 | stb r11,PACAIRQHAPPENED(r13); \ |
368 | rotldi r10,r10,16 | 371 | andi. r10,r10,PACA_IRQ_DEC; \ |
369 | mtspr SPRN_SRR1,r10 | 372 | beq 1f; \ |
370 | ld r10,PACA_EXGEN+EX_R10(r13) | 373 | lis r10,0x7fff; \ |
371 | GET_SCRATCH0(r13) | 374 | ori r10,r10,0xffff; \ |
372 | rfid | 375 | mtspr SPRN_DEC,r10; \ |
376 | b 2f; \ | ||
377 | 1: mfspr r10,SPRN_##_H##SRR1; \ | ||
378 | rldicl r10,r10,48,1; /* clear MSR_EE */ \ | ||
379 | rotldi r10,r10,16; \ | ||
380 | mtspr SPRN_##_H##SRR1,r10; \ | ||
381 | 2: mtcrf 0x80,r9; \ | ||
382 | ld r9,PACA_EXGEN+EX_R9(r13); \ | ||
383 | ld r10,PACA_EXGEN+EX_R10(r13); \ | ||
384 | ld r11,PACA_EXGEN+EX_R11(r13); \ | ||
385 | GET_SCRATCH0(r13); \ | ||
386 | ##_H##rfid; \ | ||
373 | b . | 387 | b . |
388 | |||
389 | MASKED_INTERRUPT() | ||
390 | MASKED_INTERRUPT(H) | ||
374 | 391 | ||
375 | masked_Hinterrupt: | 392 | /* |
376 | stb r10,PACAHARDIRQEN(r13) | 393 | * Called from arch_local_irq_enable when an interrupt needs |
377 | mtcrf 0x80,r9 | 394 | * to be resent. r3 contains 0x500 or 0x900 to indicate which |
378 | ld r9,PACA_EXGEN+EX_R9(r13) | 395 | * kind of interrupt. MSR:EE is already off. We generate a |
379 | mfspr r10,SPRN_HSRR1 | 396 | * stackframe like if a real interrupt had happened. |
380 | rldicl r10,r10,48,1 /* clear MSR_EE */ | 397 | * |
381 | rotldi r10,r10,16 | 398 | * Note: While MSR:EE is off, we need to make sure that _MSR |
382 | mtspr SPRN_HSRR1,r10 | 399 | * in the generated frame has EE set to 1 or the exception |
383 | ld r10,PACA_EXGEN+EX_R10(r13) | 400 | * handler will not properly re-enable them. |
384 | GET_SCRATCH0(r13) | 401 | */ |
385 | hrfid | 402 | _GLOBAL(__replay_interrupt) |
386 | b . | 403 | /* We are going to jump to the exception common code which |
404 | * will retrieve various register values from the PACA which | ||
405 | * we don't give a damn about, so we don't bother storing them. | ||
406 | */ | ||
407 | mfmsr r12 | ||
408 | mflr r11 | ||
409 | mfcr r9 | ||
410 | ori r12,r12,MSR_EE | ||
411 | andi. r3,r3,0x0800 | ||
412 | bne decrementer_common | ||
413 | b hardware_interrupt_common | ||
387 | 414 | ||
388 | #ifdef CONFIG_PPC_PSERIES | 415 | #ifdef CONFIG_PPC_PSERIES |
389 | /* | 416 | /* |
@@ -458,14 +485,15 @@ machine_check_common: | |||
458 | bl .machine_check_exception | 485 | bl .machine_check_exception |
459 | b .ret_from_except | 486 | b .ret_from_except |
460 | 487 | ||
461 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | 488 | STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) |
489 | STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) | ||
462 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | 490 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) |
463 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | 491 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) |
464 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | 492 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) |
465 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | 493 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) |
466 | STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) | 494 | STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) |
467 | STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) | 495 | STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) |
468 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) | 496 | STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) |
469 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | 497 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) |
470 | #ifdef CONFIG_ALTIVEC | 498 | #ifdef CONFIG_ALTIVEC |
471 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | 499 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) |
@@ -482,6 +510,9 @@ machine_check_common: | |||
482 | system_call_entry: | 510 | system_call_entry: |
483 | b system_call_common | 511 | b system_call_common |
484 | 512 | ||
513 | ppc64_runlatch_on_trampoline: | ||
514 | b .__ppc64_runlatch_on | ||
515 | |||
485 | /* | 516 | /* |
486 | * Here we have detected that the kernel stack pointer is bad. | 517 | * Here we have detected that the kernel stack pointer is bad. |
487 | * R9 contains the saved CR, r13 points to the paca, | 518 | * R9 contains the saved CR, r13 points to the paca, |
@@ -555,6 +586,8 @@ data_access_common: | |||
555 | mfspr r10,SPRN_DSISR | 586 | mfspr r10,SPRN_DSISR |
556 | stw r10,PACA_EXGEN+EX_DSISR(r13) | 587 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
557 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | 588 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) |
589 | DISABLE_INTS | ||
590 | ld r12,_MSR(r1) | ||
558 | ld r3,PACA_EXGEN+EX_DAR(r13) | 591 | ld r3,PACA_EXGEN+EX_DAR(r13) |
559 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 592 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
560 | li r5,0x300 | 593 | li r5,0x300 |
@@ -569,6 +602,7 @@ h_data_storage_common: | |||
569 | stw r10,PACA_EXGEN+EX_DSISR(r13) | 602 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
570 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) | 603 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) |
571 | bl .save_nvgprs | 604 | bl .save_nvgprs |
605 | DISABLE_INTS | ||
572 | addi r3,r1,STACK_FRAME_OVERHEAD | 606 | addi r3,r1,STACK_FRAME_OVERHEAD |
573 | bl .unknown_exception | 607 | bl .unknown_exception |
574 | b .ret_from_except | 608 | b .ret_from_except |
@@ -577,6 +611,8 @@ h_data_storage_common: | |||
577 | .globl instruction_access_common | 611 | .globl instruction_access_common |
578 | instruction_access_common: | 612 | instruction_access_common: |
579 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | 613 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) |
614 | DISABLE_INTS | ||
615 | ld r12,_MSR(r1) | ||
580 | ld r3,_NIP(r1) | 616 | ld r3,_NIP(r1) |
581 | andis. r4,r12,0x5820 | 617 | andis. r4,r12,0x5820 |
582 | li r5,0x400 | 618 | li r5,0x400 |
@@ -672,12 +708,6 @@ _GLOBAL(slb_miss_realmode) | |||
672 | ld r10,PACA_EXSLB+EX_LR(r13) | 708 | ld r10,PACA_EXSLB+EX_LR(r13) |
673 | ld r3,PACA_EXSLB+EX_R3(r13) | 709 | ld r3,PACA_EXSLB+EX_R3(r13) |
674 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | 710 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ |
675 | #ifdef CONFIG_PPC_ISERIES | ||
676 | BEGIN_FW_FTR_SECTION | ||
677 | ld r11,PACALPPACAPTR(r13) | ||
678 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | ||
679 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
680 | #endif /* CONFIG_PPC_ISERIES */ | ||
681 | 711 | ||
682 | mtlr r10 | 712 | mtlr r10 |
683 | 713 | ||
@@ -690,12 +720,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | |||
690 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | 720 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ |
691 | .machine pop | 721 | .machine pop |
692 | 722 | ||
693 | #ifdef CONFIG_PPC_ISERIES | ||
694 | BEGIN_FW_FTR_SECTION | ||
695 | mtspr SPRN_SRR0,r11 | ||
696 | mtspr SPRN_SRR1,r12 | ||
697 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
698 | #endif /* CONFIG_PPC_ISERIES */ | ||
699 | ld r9,PACA_EXSLB+EX_R9(r13) | 723 | ld r9,PACA_EXSLB+EX_R9(r13) |
700 | ld r10,PACA_EXSLB+EX_R10(r13) | 724 | ld r10,PACA_EXSLB+EX_R10(r13) |
701 | ld r11,PACA_EXSLB+EX_R11(r13) | 725 | ld r11,PACA_EXSLB+EX_R11(r13) |
@@ -704,13 +728,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | |||
704 | rfid | 728 | rfid |
705 | b . /* prevent speculative execution */ | 729 | b . /* prevent speculative execution */ |
706 | 730 | ||
707 | 2: | 731 | 2: mfspr r11,SPRN_SRR0 |
708 | #ifdef CONFIG_PPC_ISERIES | ||
709 | BEGIN_FW_FTR_SECTION | ||
710 | b unrecov_slb | ||
711 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
712 | #endif /* CONFIG_PPC_ISERIES */ | ||
713 | mfspr r11,SPRN_SRR0 | ||
714 | ld r10,PACAKBASE(r13) | 732 | ld r10,PACAKBASE(r13) |
715 | LOAD_HANDLER(r10,unrecov_slb) | 733 | LOAD_HANDLER(r10,unrecov_slb) |
716 | mtspr SPRN_SRR0,r10 | 734 | mtspr SPRN_SRR0,r10 |
@@ -727,20 +745,6 @@ unrecov_slb: | |||
727 | bl .unrecoverable_exception | 745 | bl .unrecoverable_exception |
728 | b 1b | 746 | b 1b |
729 | 747 | ||
730 | .align 7 | ||
731 | .globl hardware_interrupt_common | ||
732 | .globl hardware_interrupt_entry | ||
733 | hardware_interrupt_common: | ||
734 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | ||
735 | FINISH_NAP | ||
736 | hardware_interrupt_entry: | ||
737 | DISABLE_INTS | ||
738 | BEGIN_FTR_SECTION | ||
739 | bl .ppc64_runlatch_on | ||
740 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) | ||
741 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
742 | bl .do_IRQ | ||
743 | b .ret_from_except_lite | ||
744 | 748 | ||
745 | #ifdef CONFIG_PPC_970_NAP | 749 | #ifdef CONFIG_PPC_970_NAP |
746 | power4_fixup_nap: | 750 | power4_fixup_nap: |
@@ -785,8 +789,8 @@ fp_unavailable_common: | |||
785 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | 789 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) |
786 | bne 1f /* if from user, just load it up */ | 790 | bne 1f /* if from user, just load it up */ |
787 | bl .save_nvgprs | 791 | bl .save_nvgprs |
792 | DISABLE_INTS | ||
788 | addi r3,r1,STACK_FRAME_OVERHEAD | 793 | addi r3,r1,STACK_FRAME_OVERHEAD |
789 | ENABLE_INTS | ||
790 | bl .kernel_fp_unavailable_exception | 794 | bl .kernel_fp_unavailable_exception |
791 | BUG_OPCODE | 795 | BUG_OPCODE |
792 | 1: bl .load_up_fpu | 796 | 1: bl .load_up_fpu |
@@ -805,8 +809,8 @@ BEGIN_FTR_SECTION | |||
805 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 809 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
806 | #endif | 810 | #endif |
807 | bl .save_nvgprs | 811 | bl .save_nvgprs |
812 | DISABLE_INTS | ||
808 | addi r3,r1,STACK_FRAME_OVERHEAD | 813 | addi r3,r1,STACK_FRAME_OVERHEAD |
809 | ENABLE_INTS | ||
810 | bl .altivec_unavailable_exception | 814 | bl .altivec_unavailable_exception |
811 | b .ret_from_except | 815 | b .ret_from_except |
812 | 816 | ||
@@ -816,13 +820,14 @@ vsx_unavailable_common: | |||
816 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | 820 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) |
817 | #ifdef CONFIG_VSX | 821 | #ifdef CONFIG_VSX |
818 | BEGIN_FTR_SECTION | 822 | BEGIN_FTR_SECTION |
819 | bne .load_up_vsx | 823 | beq 1f |
824 | b .load_up_vsx | ||
820 | 1: | 825 | 1: |
821 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | 826 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
822 | #endif | 827 | #endif |
823 | bl .save_nvgprs | 828 | bl .save_nvgprs |
829 | DISABLE_INTS | ||
824 | addi r3,r1,STACK_FRAME_OVERHEAD | 830 | addi r3,r1,STACK_FRAME_OVERHEAD |
825 | ENABLE_INTS | ||
826 | bl .vsx_unavailable_exception | 831 | bl .vsx_unavailable_exception |
827 | b .ret_from_except | 832 | b .ret_from_except |
828 | 833 | ||
@@ -831,66 +836,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
831 | __end_handlers: | 836 | __end_handlers: |
832 | 837 | ||
833 | /* | 838 | /* |
834 | * Return from an exception with minimal checks. | ||
835 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
836 | * If interrupts have been enabled, or anything has been | ||
837 | * done that might have changed the scheduling status of | ||
838 | * any task or sent any task a signal, you should use | ||
839 | * ret_from_except or ret_from_except_lite instead of this. | ||
840 | */ | ||
841 | fast_exc_return_irq: /* restores irq state too */ | ||
842 | ld r3,SOFTE(r1) | ||
843 | TRACE_AND_RESTORE_IRQ(r3); | ||
844 | ld r12,_MSR(r1) | ||
845 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | ||
846 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | ||
847 | b 1f | ||
848 | |||
849 | .globl fast_exception_return | ||
850 | fast_exception_return: | ||
851 | ld r12,_MSR(r1) | ||
852 | 1: ld r11,_NIP(r1) | ||
853 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
854 | beq- unrecov_fer | ||
855 | |||
856 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
857 | andi. r3,r12,MSR_PR | ||
858 | beq 2f | ||
859 | ACCOUNT_CPU_USER_EXIT(r3, r4) | ||
860 | 2: | ||
861 | #endif | ||
862 | |||
863 | ld r3,_CCR(r1) | ||
864 | ld r4,_LINK(r1) | ||
865 | ld r5,_CTR(r1) | ||
866 | ld r6,_XER(r1) | ||
867 | mtcr r3 | ||
868 | mtlr r4 | ||
869 | mtctr r5 | ||
870 | mtxer r6 | ||
871 | REST_GPR(0, r1) | ||
872 | REST_8GPRS(2, r1) | ||
873 | |||
874 | mfmsr r10 | ||
875 | rldicl r10,r10,48,1 /* clear EE */ | ||
876 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | ||
877 | mtmsrd r10,1 | ||
878 | |||
879 | mtspr SPRN_SRR1,r12 | ||
880 | mtspr SPRN_SRR0,r11 | ||
881 | REST_4GPRS(10, r1) | ||
882 | ld r1,GPR1(r1) | ||
883 | rfid | ||
884 | b . /* prevent speculative execution */ | ||
885 | |||
886 | unrecov_fer: | ||
887 | bl .save_nvgprs | ||
888 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
889 | bl .unrecoverable_exception | ||
890 | b 1b | ||
891 | |||
892 | |||
893 | /* | ||
894 | * Hash table stuff | 839 | * Hash table stuff |
895 | */ | 840 | */ |
896 | .align 7 | 841 | .align 7 |
@@ -912,28 +857,6 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
912 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | 857 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ |
913 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | 858 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ |
914 | bne 77f /* then don't call hash_page now */ | 859 | bne 77f /* then don't call hash_page now */ |
915 | |||
916 | /* | ||
917 | * On iSeries, we soft-disable interrupts here, then | ||
918 | * hard-enable interrupts so that the hash_page code can spin on | ||
919 | * the hash_table_lock without problems on a shared processor. | ||
920 | */ | ||
921 | DISABLE_INTS | ||
922 | |||
923 | /* | ||
924 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | ||
925 | * and will clobber volatile registers when irq tracing is enabled | ||
926 | * so we need to reload them. It may be possible to be smarter here | ||
927 | * and move the irq tracing elsewhere but let's keep it simple for | ||
928 | * now | ||
929 | */ | ||
930 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
931 | ld r3,_DAR(r1) | ||
932 | ld r4,_DSISR(r1) | ||
933 | ld r5,_TRAP(r1) | ||
934 | ld r12,_MSR(r1) | ||
935 | clrrdi r5,r5,4 | ||
936 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
937 | /* | 860 | /* |
938 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | 861 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are |
939 | * accessing a userspace segment (even from the kernel). We assume | 862 | * accessing a userspace segment (even from the kernel). We assume |
@@ -951,62 +874,25 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
951 | * r4 contains the required access permissions | 874 | * r4 contains the required access permissions |
952 | * r5 contains the trap number | 875 | * r5 contains the trap number |
953 | * | 876 | * |
954 | * at return r3 = 0 for success | 877 | * at return r3 = 0 for success, 1 for page fault, negative for error |
955 | */ | 878 | */ |
956 | bl .hash_page /* build HPTE if possible */ | 879 | bl .hash_page /* build HPTE if possible */ |
957 | cmpdi r3,0 /* see if hash_page succeeded */ | 880 | cmpdi r3,0 /* see if hash_page succeeded */ |
958 | 881 | ||
959 | BEGIN_FW_FTR_SECTION | 882 | /* Success */ |
960 | /* | ||
961 | * If we had interrupts soft-enabled at the point where the | ||
962 | * DSI/ISI occurred, and an interrupt came in during hash_page, | ||
963 | * handle it now. | ||
964 | * We jump to ret_from_except_lite rather than fast_exception_return | ||
965 | * because ret_from_except_lite will check for and handle pending | ||
966 | * interrupts if necessary. | ||
967 | */ | ||
968 | beq 13f | ||
969 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
970 | |||
971 | BEGIN_FW_FTR_SECTION | ||
972 | /* | ||
973 | * Here we have interrupts hard-disabled, so it is sufficient | ||
974 | * to restore paca->{soft,hard}_enable and get out. | ||
975 | */ | ||
976 | beq fast_exc_return_irq /* Return from exception on success */ | 883 | beq fast_exc_return_irq /* Return from exception on success */ |
977 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | ||
978 | |||
979 | /* For a hash failure, we don't bother re-enabling interrupts */ | ||
980 | ble- 12f | ||
981 | |||
982 | /* | ||
983 | * hash_page couldn't handle it, set soft interrupt enable back | ||
984 | * to what it was before the trap. Note that .arch_local_irq_restore | ||
985 | * handles any interrupts pending at this point. | ||
986 | */ | ||
987 | ld r3,SOFTE(r1) | ||
988 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) | ||
989 | bl .arch_local_irq_restore | ||
990 | b 11f | ||
991 | 884 | ||
992 | /* We have a data breakpoint exception - handle it */ | 885 | /* Error */ |
993 | handle_dabr_fault: | 886 | blt- 13f |
994 | bl .save_nvgprs | ||
995 | ld r4,_DAR(r1) | ||
996 | ld r5,_DSISR(r1) | ||
997 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
998 | bl .do_dabr | ||
999 | b .ret_from_except_lite | ||
1000 | 887 | ||
1001 | /* Here we have a page fault that hash_page can't handle. */ | 888 | /* Here we have a page fault that hash_page can't handle. */ |
1002 | handle_page_fault: | 889 | handle_page_fault: |
1003 | ENABLE_INTS | ||
1004 | 11: ld r4,_DAR(r1) | 890 | 11: ld r4,_DAR(r1) |
1005 | ld r5,_DSISR(r1) | 891 | ld r5,_DSISR(r1) |
1006 | addi r3,r1,STACK_FRAME_OVERHEAD | 892 | addi r3,r1,STACK_FRAME_OVERHEAD |
1007 | bl .do_page_fault | 893 | bl .do_page_fault |
1008 | cmpdi r3,0 | 894 | cmpdi r3,0 |
1009 | beq+ 13f | 895 | beq+ 12f |
1010 | bl .save_nvgprs | 896 | bl .save_nvgprs |
1011 | mr r5,r3 | 897 | mr r5,r3 |
1012 | addi r3,r1,STACK_FRAME_OVERHEAD | 898 | addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -1014,12 +900,20 @@ handle_page_fault: | |||
1014 | bl .bad_page_fault | 900 | bl .bad_page_fault |
1015 | b .ret_from_except | 901 | b .ret_from_except |
1016 | 902 | ||
1017 | 13: b .ret_from_except_lite | 903 | /* We have a data breakpoint exception - handle it */ |
904 | handle_dabr_fault: | ||
905 | bl .save_nvgprs | ||
906 | ld r4,_DAR(r1) | ||
907 | ld r5,_DSISR(r1) | ||
908 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
909 | bl .do_dabr | ||
910 | 12: b .ret_from_except_lite | ||
911 | |||
1018 | 912 | ||
1019 | /* We have a page fault that hash_page could handle but HV refused | 913 | /* We have a page fault that hash_page could handle but HV refused |
1020 | * the PTE insertion | 914 | * the PTE insertion |
1021 | */ | 915 | */ |
1022 | 12: bl .save_nvgprs | 916 | 13: bl .save_nvgprs |
1023 | mr r5,r3 | 917 | mr r5,r3 |
1024 | addi r3,r1,STACK_FRAME_OVERHEAD | 918 | addi r3,r1,STACK_FRAME_OVERHEAD |
1025 | ld r4,_DAR(r1) | 919 | ld r4,_DAR(r1) |
@@ -1141,51 +1035,19 @@ _GLOBAL(do_stab_bolted) | |||
1141 | .= 0x7000 | 1035 | .= 0x7000 |
1142 | .globl fwnmi_data_area | 1036 | .globl fwnmi_data_area |
1143 | fwnmi_data_area: | 1037 | fwnmi_data_area: |
1144 | #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ | ||
1145 | 1038 | ||
1146 | /* iSeries does not use the FWNMI stuff, so it is safe to put | ||
1147 | * this here, even if we later allow kernels that will boot on | ||
1148 | * both pSeries and iSeries */ | ||
1149 | #ifdef CONFIG_PPC_ISERIES | ||
1150 | . = LPARMAP_PHYS | ||
1151 | .globl xLparMap | ||
1152 | xLparMap: | ||
1153 | .quad HvEsidsToMap /* xNumberEsids */ | ||
1154 | .quad HvRangesToMap /* xNumberRanges */ | ||
1155 | .quad STAB0_PAGE /* xSegmentTableOffs */ | ||
1156 | .zero 40 /* xRsvd */ | ||
1157 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | ||
1158 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | ||
1159 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | ||
1160 | .quad VMALLOC_START_ESID /* xKernelEsid */ | ||
1161 | .quad VMALLOC_START_VSID /* xKernelVsid */ | ||
1162 | /* xRanges (HvRangesToMap entries of 3 quads) */ | ||
1163 | .quad HvPagesToMap /* xPages */ | ||
1164 | .quad 0 /* xOffset */ | ||
1165 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | ||
1166 | |||
1167 | #endif /* CONFIG_PPC_ISERIES */ | ||
1168 | |||
1169 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) | ||
1170 | /* pseries and powernv need to keep the whole page from | 1039 | /* pseries and powernv need to keep the whole page from |
1171 | * 0x7000 to 0x8000 free for use by the firmware | 1040 | * 0x7000 to 0x8000 free for use by the firmware |
1172 | */ | 1041 | */ |
1173 | . = 0x8000 | 1042 | . = 0x8000 |
1174 | #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ | 1043 | #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ |
1175 | 1044 | ||
1176 | /* | 1045 | /* Space for CPU0's segment table */ |
1177 | * Space for CPU0's segment table. | 1046 | .balign 4096 |
1178 | * | ||
1179 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1180 | * we get control (with relocate on). The address is given to the hv | ||
1181 | * as a page number (see xLparMap above), so this must be at a | ||
1182 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1183 | * PAGE_SHIFT). | ||
1184 | */ | ||
1185 | . = STAB0_OFFSET /* 0x8000 */ | ||
1186 | .globl initial_stab | 1047 | .globl initial_stab |
1187 | initial_stab: | 1048 | initial_stab: |
1188 | .space 4096 | 1049 | .space 4096 |
1050 | |||
1189 | #ifdef CONFIG_PPC_POWERNV | 1051 | #ifdef CONFIG_PPC_POWERNV |
1190 | _GLOBAL(opal_mc_secondary_handler) | 1052 | _GLOBAL(opal_mc_secondary_handler) |
1191 | HMT_MEDIUM | 1053 | HMT_MEDIUM |