aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2012-11-02 02:21:43 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-11-14 23:08:05 -0500
commitc1fb6816fb1b78dd94b673b0fdaa9a7a16e97bd1 (patch)
tree80a3f3b0fa7aa6a999b2f586f9201aea69da4ca7 /arch/powerpc/include
parent4700dfaf1e988b785bd9791064df92d3353e8b88 (diff)
powerpc: Add relocation on exception vector handlers
POWER8/v2.07 allows exceptions to be taken with the MMU still on. A new set of exception vectors is added at 0xc000_0000_0000_4xxx. When the HW takes us here, MSR IR/DR will be set already and we no longer need a costly RFID to turn the MMU back on again. The original 0x0 based exception vectors remain for when the HW can't leave the MMU on. Examples of this are when we can't trust the current MMU mappings, like when we are changing from guest to hypervisor (HV 0 -> 1) or when the MMU was off already. In these cases the HW will take us to the original 0x0 based exception vectors with the MMU off as before. This uses the new macros added previously too implement these new execption vectors at 0xc000_0000_0000_4xxx. We exit these exception vectors using mflr/blr (rather than mtspr SSR0/RFID), since we don't need the costly MMU switch anymore. This moves the __end_interrupts marker down past these new 0x4000 vectors since they will need to be copied down to 0x0 when the kernel is not at 0x0. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/exception-64s.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 10787d3673ac..ad708dda3ba3 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -115,6 +115,7 @@
115 mfspr r10,SPRN_CFAR; \ 115 mfspr r10,SPRN_CFAR; \
116 std r10,area+EX_CFAR(r13); \ 116 std r10,area+EX_CFAR(r13); \
117 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 117 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
118 SAVE_LR(r10, area); \
118 mfcr r9; \ 119 mfcr r9; \
119 extra(vec); \ 120 extra(vec); \
120 std r11,area+EX_R11(r13); \ 121 std r11,area+EX_R11(r13); \
@@ -215,6 +216,7 @@ do_kvm_##n: \
215 sth r1,PACA_TRAP_SAVE(r13); \ 216 sth r1,PACA_TRAP_SAVE(r13); \
216 std r3,area+EX_R3(r13); \ 217 std r3,area+EX_R3(r13); \
217 addi r3,r13,area; /* r3 -> where regs are saved*/ \ 218 addi r3,r13,area; /* r3 -> where regs are saved*/ \
219 RESTORE_LR(r1, area); \
218 b bad_stack; \ 220 b bad_stack; \
2193: std r9,_CCR(r1); /* save CR in stackframe */ \ 2213: std r9,_CCR(r1); /* save CR in stackframe */ \
220 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 222 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
@@ -240,8 +242,8 @@ do_kvm_##n: \
240 ld r10,area+EX_CFAR(r13); \ 242 ld r10,area+EX_CFAR(r13); \
241 std r10,ORIG_GPR3(r1); \ 243 std r10,ORIG_GPR3(r1); \
242 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 244 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
245 GET_LR(r9,area); /* Get LR, later save to stack */ \
243 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 246 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
244 mflr r9; /* save LR in stackframe */ \
245 std r9,_LINK(r1); \ 247 std r9,_LINK(r1); \
246 mfctr r10; /* save CTR in stackframe */ \ 248 mfctr r10; /* save CTR in stackframe */ \
247 std r10,_CTR(r1); \ 249 std r10,_CTR(r1); \