aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-04-10 03:21:35 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-04-10 03:21:35 -0400
commit08f1ec8a594c60bf3856e3c45b6d15fd691d90bb (patch)
treea30ecbe79c57eb431e0f9fdce091fd9a8eb8e07b /arch/powerpc/kernel
parentb1a808ff436343956a6ae63178ea1810c5e5a3a1 (diff)
powerpc: Fix page fault with lockdep regression
commit a546498f3bf9aac311c66f965186373aee2ca0b0 introduced a regression on 32-bit when irq tracing is enabled by exposing an old bug in our irq tracing code for exception entry. The code would save and restore some GPRs around the calls to the C lockdep code, however, it tries to be too smart for its own good and restores some of the GPRs from the exception frame (as saved there on exception entry). However, for page faults, we do replace those GPRs with arguments to do_page_fault before we call transfer_to_handler and so restoring from the exception frame is plain wrong in this case. This was fine as long as we didn't touch the interrupt state when taking page fault, but when I started doing it, it would trigger the lockdep calls and the bug. This fixes it by cleaning up that code a bit. It did create a small stack frame for the sake of backtraces, so let's make it a bit bigger and use it to save and restore the stuff we care about. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/entry_32.S39
1 files changed, 21 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3e57a00b8cb..ba3aeb4bc06 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -206,40 +206,43 @@ reenable_mmu: /* re-enable mmu so we can */
206 andi. r10,r10,MSR_EE /* Did EE change? */ 206 andi. r10,r10,MSR_EE /* Did EE change? */
207 beq 1f 207 beq 1f
208 208
209 /* Save handler and return address into the 2 unused words
210 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
211 * else can be recovered from the pt_regs except r3 which for
212 * normal interrupts has been set to pt_regs and for syscalls
213 * is an argument, so we temporarily use ORIG_GPR3 to save it
214 */
215 stw r9,8(r1)
216 stw r11,12(r1)
217 stw r3,ORIG_GPR3(r1)
218 /* 209 /*
219 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. 210 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
220 * If from user mode there is only one stack frame on the stack, and 211 * If from user mode there is only one stack frame on the stack, and
221 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy 212 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
222 * stack frame to make trace_hardirqs_off happy. 213 * stack frame to make trace_hardirqs_off happy.
214 *
215 * This is handy because we also need to save a bunch of GPRs,
216 * r3 can be different from GPR3(r1) at this point, r9 and r11
217 * contains the old MSR and handler address respectively,
218 * r4 & r5 can contain page fault arguments that need to be passed
219 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
220 * they aren't useful past this point (aren't syscall arguments),
221 * the rest is restored from the exception frame.
223 */ 222 */
223 stwu r1,-32(r1)
224 stw r9,8(r1)
225 stw r11,12(r1)
226 stw r3,16(r1)
227 stw r4,20(r1)
228 stw r5,24(r1)
224 andi. r12,r12,MSR_PR 229 andi. r12,r12,MSR_PR
225 beq 11f 230 b 11f
226 stwu r1,-16(r1)
227 bl trace_hardirqs_off 231 bl trace_hardirqs_off
228 addi r1,r1,16
229 b 12f 232 b 12f
230
23111: 23311:
232 bl trace_hardirqs_off 234 bl trace_hardirqs_off
23312: 23512:
236 lwz r5,24(r1)
237 lwz r4,20(r1)
238 lwz r3,16(r1)
239 lwz r11,12(r1)
240 lwz r9,8(r1)
241 addi r1,r1,32
234 lwz r0,GPR0(r1) 242 lwz r0,GPR0(r1)
235 lwz r3,ORIG_GPR3(r1)
236 lwz r4,GPR4(r1)
237 lwz r5,GPR5(r1)
238 lwz r6,GPR6(r1) 243 lwz r6,GPR6(r1)
239 lwz r7,GPR7(r1) 244 lwz r7,GPR7(r1)
240 lwz r8,GPR8(r1) 245 lwz r8,GPR8(r1)
241 lwz r9,8(r1)
242 lwz r11,12(r1)
2431: mtctr r11 2461: mtctr r11
244 mtlr r9 247 mtlr r9
245 bctr /* jump to handler */ 248 bctr /* jump to handler */