diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-03-07 00:48:45 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-03-08 18:55:08 -0500 |
commit | a546498f3bf9aac311c66f965186373aee2ca0b0 (patch) | |
tree | 86fb9a778aba26df3810acd8e52a921a2d84489b /arch/powerpc/kernel/exceptions-64s.S | |
parent | 1b70117924a4f254840ed70fbe3020d4519a1a9a (diff) |
powerpc: Call do_page_fault() with interrupts off
We currently turn interrupts back to their previous state before
calling do_page_fault(). This can be annoying when debugging as
a bad fault will potentially have lost some processor state before
getting into the debugger.
We also end up calling some generic code with interrupts enabled
such as notify_page_fault() with interrupts enabled, which could
be unexpected.
This changes our code to behave more like other architectures,
and make the assembly entry code call into do_page_faults() with
interrupts disabled. They are conditionally re-enabled from
within do_page_fault() in the same spot x86 does it.
While there, add the might_sleep() test in the case of a successful
trylock of the mmap semaphore, again like x86.
Also fix a bug in the existing assembly where r12 (_MSR) could get
clobbered by C calls (the DTL accounting in the exception common
macro and DISABLE_INTS) in some cases.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
v2. Add the r12 clobber fix
Diffstat (limited to 'arch/powerpc/kernel/exceptions-64s.S')
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 59 |
1 files changed, 17 insertions, 42 deletions
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3af80e82830b..d8ff6d37fc4d 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -559,6 +559,8 @@ data_access_common: | |||
559 | mfspr r10,SPRN_DSISR | 559 | mfspr r10,SPRN_DSISR |
560 | stw r10,PACA_EXGEN+EX_DSISR(r13) | 560 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
561 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | 561 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) |
562 | DISABLE_INTS | ||
563 | ld r12,_MSR(r1) | ||
562 | ld r3,PACA_EXGEN+EX_DAR(r13) | 564 | ld r3,PACA_EXGEN+EX_DAR(r13) |
563 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 565 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
564 | li r5,0x300 | 566 | li r5,0x300 |
@@ -573,6 +575,7 @@ h_data_storage_common: | |||
573 | stw r10,PACA_EXGEN+EX_DSISR(r13) | 575 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
574 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) | 576 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) |
575 | bl .save_nvgprs | 577 | bl .save_nvgprs |
578 | DISABLE_INTS | ||
576 | addi r3,r1,STACK_FRAME_OVERHEAD | 579 | addi r3,r1,STACK_FRAME_OVERHEAD |
577 | bl .unknown_exception | 580 | bl .unknown_exception |
578 | b .ret_from_except | 581 | b .ret_from_except |
@@ -581,6 +584,8 @@ h_data_storage_common: | |||
581 | .globl instruction_access_common | 584 | .globl instruction_access_common |
582 | instruction_access_common: | 585 | instruction_access_common: |
583 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | 586 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) |
587 | DISABLE_INTS | ||
588 | ld r12,_MSR(r1) | ||
584 | ld r3,_NIP(r1) | 589 | ld r3,_NIP(r1) |
585 | andis. r4,r12,0x5820 | 590 | andis. r4,r12,0x5820 |
586 | li r5,0x400 | 591 | li r5,0x400 |
@@ -884,24 +889,6 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
884 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | 889 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ |
885 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | 890 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ |
886 | bne 77f /* then don't call hash_page now */ | 891 | bne 77f /* then don't call hash_page now */ |
887 | |||
888 | /* We run with interrupts both soft and hard disabled */ | ||
889 | DISABLE_INTS | ||
890 | |||
891 | /* | ||
892 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | ||
893 | * and will clobber volatile registers when irq tracing is enabled | ||
894 | * so we need to reload them. It may be possible to be smarter here | ||
895 | * and move the irq tracing elsewhere but let's keep it simple for | ||
896 | * now | ||
897 | */ | ||
898 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
899 | ld r3,_DAR(r1) | ||
900 | ld r4,_DSISR(r1) | ||
901 | ld r5,_TRAP(r1) | ||
902 | ld r12,_MSR(r1) | ||
903 | clrrdi r5,r5,4 | ||
904 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
905 | /* | 892 | /* |
906 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | 893 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are |
907 | * accessing a userspace segment (even from the kernel). We assume | 894 | * accessing a userspace segment (even from the kernel). We assume |
@@ -931,36 +918,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
931 | beq fast_exc_return_irq /* Return from exception on success */ | 918 | beq fast_exc_return_irq /* Return from exception on success */ |
932 | 919 | ||
933 | /* For a hash failure, we don't bother re-enabling interrupts */ | 920 | /* For a hash failure, we don't bother re-enabling interrupts */ |
934 | ble- 12f | 921 | ble- 13f |
935 | |||
936 | /* | ||
937 | * hash_page couldn't handle it, set soft interrupt enable back | ||
938 | * to what it was before the trap. Note that .arch_local_irq_restore | ||
939 | * handles any interrupts pending at this point. | ||
940 | */ | ||
941 | ld r3,SOFTE(r1) | ||
942 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) | ||
943 | bl .arch_local_irq_restore | ||
944 | b 11f | ||
945 | |||
946 | /* We have a data breakpoint exception - handle it */ | ||
947 | handle_dabr_fault: | ||
948 | bl .save_nvgprs | ||
949 | ld r4,_DAR(r1) | ||
950 | ld r5,_DSISR(r1) | ||
951 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
952 | bl .do_dabr | ||
953 | b .ret_from_except_lite | ||
954 | 922 | ||
955 | /* Here we have a page fault that hash_page can't handle. */ | 923 | /* Here we have a page fault that hash_page can't handle. */ |
956 | handle_page_fault: | 924 | handle_page_fault: |
957 | ENABLE_INTS | ||
958 | 11: ld r4,_DAR(r1) | 925 | 11: ld r4,_DAR(r1) |
959 | ld r5,_DSISR(r1) | 926 | ld r5,_DSISR(r1) |
960 | addi r3,r1,STACK_FRAME_OVERHEAD | 927 | addi r3,r1,STACK_FRAME_OVERHEAD |
961 | bl .do_page_fault | 928 | bl .do_page_fault |
962 | cmpdi r3,0 | 929 | cmpdi r3,0 |
963 | beq+ 13f | 930 | beq+ 12f |
964 | bl .save_nvgprs | 931 | bl .save_nvgprs |
965 | mr r5,r3 | 932 | mr r5,r3 |
966 | addi r3,r1,STACK_FRAME_OVERHEAD | 933 | addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -968,12 +935,20 @@ handle_page_fault: | |||
968 | bl .bad_page_fault | 935 | bl .bad_page_fault |
969 | b .ret_from_except | 936 | b .ret_from_except |
970 | 937 | ||
971 | 13: b .ret_from_except_lite | 938 | /* We have a data breakpoint exception - handle it */ |
939 | handle_dabr_fault: | ||
940 | bl .save_nvgprs | ||
941 | ld r4,_DAR(r1) | ||
942 | ld r5,_DSISR(r1) | ||
943 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
944 | bl .do_dabr | ||
945 | 12: b .ret_from_except_lite | ||
946 | |||
972 | 947 | ||
973 | /* We have a page fault that hash_page could handle but HV refused | 948 | /* We have a page fault that hash_page could handle but HV refused |
974 | * the PTE insertion | 949 | * the PTE insertion |
975 | */ | 950 | */ |
976 | 12: bl .save_nvgprs | 951 | 13: bl .save_nvgprs |
977 | mr r5,r3 | 952 | mr r5,r3 |
978 | addi r3,r1,STACK_FRAME_OVERHEAD | 953 | addi r3,r1,STACK_FRAME_OVERHEAD |
979 | ld r4,_DAR(r1) | 954 | ld r4,_DAR(r1) |