aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-17 13:43:59 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-26 00:37:27 -0400
commit5d38902c483881645ba16058cffaa478b81e5cfa (patch)
tree1ed21b6a245c772a6d86cac7cd8ebe566dd970bb
parent04a85d1234d7e1682a612565e663e6b760918643 (diff)
powerpc: Add irqtrace support for 32-bit powerpc
Based on initial work from: Dale Farnsworth <dale@farnsworth.org> Add the low level irq tracing hooks for 32-bit powerpc needed to enable full lockdep functionality. The approach taken to deal with the code in entry_32.S is that we don't trace all the transitions of MSR:EE when we just turn it off to peek at TI_FLAGS without races. Only when we are calling into C code or returning from exceptions with a state that have changed from what lockdep thinks. There's a little bugger though: If we take an exception that keeps interrupts enabled (such as an alignment exception) while interrupts are enabled, we will call trace_hardirqs_on() on the way back spurriously. Not a big deal, but to get rid of it would require remembering in pt_regs that the exception was one of the type that kept interrupts enabled which we don't know at this stage. (Well, we could test all cases for regs->trap but that sucks too much). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Tested-by: Kumar Gala <galak@kernel.crashing.org>
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/hw_irq.h20
-rw-r--r--arch/powerpc/kernel/entry_32.S127
-rw-r--r--arch/powerpc/kernel/setup_32.c2
4 files changed, 135 insertions, 15 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bf6cedfa05db..d00131ca0835 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -62,7 +62,6 @@ config HAVE_LATENCYTOP_SUPPORT
62 62
63config TRACE_IRQFLAGS_SUPPORT 63config TRACE_IRQFLAGS_SUPPORT
64 bool 64 bool
65 depends on PPC64
66 default y 65 default y
67 66
68config LOCKDEP_SUPPORT 67config LOCKDEP_SUPPORT
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 867ab8ed69b3..8b505eaaa38a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags)
68 68
69#if defined(CONFIG_BOOKE) 69#if defined(CONFIG_BOOKE)
70#define SET_MSR_EE(x) mtmsr(x) 70#define SET_MSR_EE(x) mtmsr(x)
71#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 71#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
72#else 72#else
73#define SET_MSR_EE(x) mtmsr(x) 73#define SET_MSR_EE(x) mtmsr(x)
74#define local_irq_restore(flags) mtmsr(flags) 74#define raw_local_irq_restore(flags) mtmsr(flags)
75#endif 75#endif
76 76
77static inline void local_irq_disable(void) 77static inline void raw_local_irq_disable(void)
78{ 78{
79#ifdef CONFIG_BOOKE 79#ifdef CONFIG_BOOKE
80 __asm__ __volatile__("wrteei 0": : :"memory"); 80 __asm__ __volatile__("wrteei 0": : :"memory");
@@ -86,7 +86,7 @@ static inline void local_irq_disable(void)
86#endif 86#endif
87} 87}
88 88
89static inline void local_irq_enable(void) 89static inline void raw_local_irq_enable(void)
90{ 90{
91#ifdef CONFIG_BOOKE 91#ifdef CONFIG_BOOKE
92 __asm__ __volatile__("wrteei 1": : :"memory"); 92 __asm__ __volatile__("wrteei 1": : :"memory");
@@ -98,7 +98,7 @@ static inline void local_irq_enable(void)
98#endif 98#endif
99} 99}
100 100
101static inline void local_irq_save_ptr(unsigned long *flags) 101static inline void raw_local_irq_save_ptr(unsigned long *flags)
102{ 102{
103 unsigned long msr; 103 unsigned long msr;
104 msr = mfmsr(); 104 msr = mfmsr();
@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags)
110#endif 110#endif
111} 111}
112 112
113#define local_save_flags(flags) ((flags) = mfmsr()) 113#define raw_local_save_flags(flags) ((flags) = mfmsr())
114#define local_irq_save(flags) local_irq_save_ptr(&flags) 114#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
115#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) 115#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
116#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
116 117
117#define hard_irq_enable() local_irq_enable() 118#define hard_irq_disable() raw_local_irq_disable()
118#define hard_irq_disable() local_irq_disable()
119 119
120static inline int irqs_disabled_flags(unsigned long flags) 120static inline int irqs_disabled_flags(unsigned long flags)
121{ 121{
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4dd38f129153..3cadba60a4b6 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -191,11 +191,49 @@ transfer_to_handler_cont:
191 mflr r9 191 mflr r9
192 lwz r11,0(r9) /* virtual address of handler */ 192 lwz r11,0(r9) /* virtual address of handler */
193 lwz r9,4(r9) /* where to go when done */ 193 lwz r9,4(r9) /* where to go when done */
194#ifdef CONFIG_TRACE_IRQFLAGS
195 lis r12,reenable_mmu@h
196 ori r12,r12,reenable_mmu@l
197 mtspr SPRN_SRR0,r12
198 mtspr SPRN_SRR1,r10
199 SYNC
200 RFI
201reenable_mmu: /* re-enable mmu so we can */
202 mfmsr r10
203 lwz r12,_MSR(r1)
204 xor r10,r10,r12
205 andi. r10,r10,MSR_EE /* Did EE change? */
206 beq 1f
207
208 /* Save handler and return address into the 2 unused words
209 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
210 * else can be recovered from the pt_regs except r3 which for
211 * normal interrupts has been set to pt_regs and for syscalls
212 * is an argument, so we temporarily use ORIG_GPR3 to save it
213 */
214 stw r9,8(r1)
215 stw r11,12(r1)
216 stw r3,ORIG_GPR3(r1)
217 bl trace_hardirqs_off
218 lwz r0,GPR0(r1)
219 lwz r3,ORIG_GPR3(r1)
220 lwz r4,GPR4(r1)
221 lwz r5,GPR5(r1)
222 lwz r6,GPR6(r1)
223 lwz r7,GPR7(r1)
224 lwz r8,GPR8(r1)
225 lwz r9,8(r1)
226 lwz r11,12(r1)
2271: mtctr r11
228 mtlr r9
229 bctr /* jump to handler */
230#else /* CONFIG_TRACE_IRQFLAGS */
194 mtspr SPRN_SRR0,r11 231 mtspr SPRN_SRR0,r11
195 mtspr SPRN_SRR1,r10 232 mtspr SPRN_SRR1,r10
196 mtlr r9 233 mtlr r9
197 SYNC 234 SYNC
198 RFI /* jump to handler, enable MMU */ 235 RFI /* jump to handler, enable MMU */
236#endif /* CONFIG_TRACE_IRQFLAGS */
199 237
200#if defined (CONFIG_6xx) || defined(CONFIG_E500) 238#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2014: rlwinm r12,r12,0,~_TLF_NAPPING 2394: rlwinm r12,r12,0,~_TLF_NAPPING
@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall)
251#ifdef SHOW_SYSCALLS 289#ifdef SHOW_SYSCALLS
252 bl do_show_syscall 290 bl do_show_syscall
253#endif /* SHOW_SYSCALLS */ 291#endif /* SHOW_SYSCALLS */
292#ifdef CONFIG_TRACE_IRQFLAGS
293 /* Return from syscalls can (and generally will) hard enable
294 * interrupts. You aren't supposed to call a syscall with
295 * interrupts disabled in the first place. However, to ensure
296 * that we get it right vs. lockdep if it happens, we force
297 * that hard enable here with appropriate tracing if we see
298 * that we have been called with interrupts off
299 */
300 mfmsr r11
301 andi. r12,r11,MSR_EE
302 bne+ 1f
303 /* We came in with interrupts disabled, we enable them now */
304 bl trace_hardirqs_on
305 mfmsr r11
306 lwz r0,GPR0(r1)
307 lwz r3,GPR3(r1)
308 lwz r4,GPR4(r1)
309 ori r11,r11,MSR_EE
310 lwz r5,GPR5(r1)
311 lwz r6,GPR6(r1)
312 lwz r7,GPR7(r1)
313 lwz r8,GPR8(r1)
314 mtmsr r11
3151:
316#endif /* CONFIG_TRACE_IRQFLAGS */
254 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ 317 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
255 lwz r11,TI_FLAGS(r10) 318 lwz r11,TI_FLAGS(r10)
256 andi. r11,r11,_TIF_SYSCALL_T_OR_A 319 andi. r11,r11,_TIF_SYSCALL_T_OR_A
@@ -275,6 +338,7 @@ ret_from_syscall:
275 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ 338 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
276 /* disable interrupts so current_thread_info()->flags can't change */ 339 /* disable interrupts so current_thread_info()->flags can't change */
277 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ 340 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
341 /* Note: We don't bother telling lockdep about it */
278 SYNC 342 SYNC
279 MTMSRD(r10) 343 MTMSRD(r10)
280 lwz r9,TI_FLAGS(r12) 344 lwz r9,TI_FLAGS(r12)
@@ -288,6 +352,19 @@ ret_from_syscall:
288 oris r11,r11,0x1000 /* Set SO bit in CR */ 352 oris r11,r11,0x1000 /* Set SO bit in CR */
289 stw r11,_CCR(r1) 353 stw r11,_CCR(r1)
290syscall_exit_cont: 354syscall_exit_cont:
355 lwz r8,_MSR(r1)
356#ifdef CONFIG_TRACE_IRQFLAGS
357 /* If we are going to return from the syscall with interrupts
358 * off, we trace that here. It shouldn't happen though but we
359 * want to catch the bugger if it does right ?
360 */
361 andi. r10,r8,MSR_EE
362 bne+ 1f
363 stw r3,GPR3(r1)
364 bl trace_hardirqs_off
365 lwz r3,GPR3(r1)
3661:
367#endif /* CONFIG_TRACE_IRQFLAGS */
291#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 368#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
292 /* If the process has its own DBCR0 value, load it up. The internal 369 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */ 370 debug mode bit tells us that dbcr0 should be loaded. */
@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
311 mtlr r4 388 mtlr r4
312 mtcr r5 389 mtcr r5
313 lwz r7,_NIP(r1) 390 lwz r7,_NIP(r1)
314 lwz r8,_MSR(r1)
315 FIX_SRR1(r8, r0) 391 FIX_SRR1(r8, r0)
316 lwz r2,GPR2(r1) 392 lwz r2,GPR2(r1)
317 lwz r1,GPR1(r1) 393 lwz r1,GPR1(r1)
@@ -394,7 +470,9 @@ syscall_exit_work:
394 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) 470 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
395 beq ret_from_except 471 beq ret_from_except
396 472
397 /* Re-enable interrupts */ 473 /* Re-enable interrupts. There is no need to trace that with
474 * lockdep as we are supposed to have IRQs on at this point
475 */
398 ori r10,r10,MSR_EE 476 ori r10,r10,MSR_EE
399 SYNC 477 SYNC
400 MTMSRD(r10) 478 MTMSRD(r10)
@@ -705,6 +783,7 @@ ret_from_except:
705 /* Hard-disable interrupts so that current_thread_info()->flags 783 /* Hard-disable interrupts so that current_thread_info()->flags
706 * can't change between when we test it and when we return 784 * can't change between when we test it and when we return
707 * from the interrupt. */ 785 * from the interrupt. */
786 /* Note: We don't bother telling lockdep about it */
708 LOAD_MSR_KERNEL(r10,MSR_KERNEL) 787 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
709 SYNC /* Some chip revs have problems here... */ 788 SYNC /* Some chip revs have problems here... */
710 MTMSRD(r10) /* disable interrupts */ 789 MTMSRD(r10) /* disable interrupts */
@@ -744,11 +823,24 @@ resume_kernel:
744 beq+ restore 823 beq+ restore
745 andi. r0,r3,MSR_EE /* interrupts off? */ 824 andi. r0,r3,MSR_EE /* interrupts off? */
746 beq restore /* don't schedule if so */ 825 beq restore /* don't schedule if so */
826#ifdef CONFIG_TRACE_IRQFLAGS
827 /* Lockdep thinks irqs are enabled, we need to call
828 * preempt_schedule_irq with IRQs off, so we inform lockdep
829 * now that we -did- turn them off already
830 */
831 bl trace_hardirqs_off
832#endif
7471: bl preempt_schedule_irq 8331: bl preempt_schedule_irq
748 rlwinm r9,r1,0,0,(31-THREAD_SHIFT) 834 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
749 lwz r3,TI_FLAGS(r9) 835 lwz r3,TI_FLAGS(r9)
750 andi. r0,r3,_TIF_NEED_RESCHED 836 andi. r0,r3,_TIF_NEED_RESCHED
751 bne- 1b 837 bne- 1b
838#ifdef CONFIG_TRACE_IRQFLAGS
839 /* And now, to properly rebalance the above, we tell lockdep they
840 * are being turned back on, which will happen when we return
841 */
842 bl trace_hardirqs_on
843#endif
752#else 844#else
753resume_kernel: 845resume_kernel:
754#endif /* CONFIG_PREEMPT */ 846#endif /* CONFIG_PREEMPT */
@@ -765,6 +857,28 @@ restore:
765 stw r6,icache_44x_need_flush@l(r4) 857 stw r6,icache_44x_need_flush@l(r4)
7661: 8581:
767#endif /* CONFIG_44x */ 859#endif /* CONFIG_44x */
860
861 lwz r9,_MSR(r1)
862#ifdef CONFIG_TRACE_IRQFLAGS
863 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
864 * off in this assembly code while peeking at TI_FLAGS() and such. However
865 * we need to inform it if the exception turned interrupts off, and we
866 * are about to trun them back on.
867 *
868 * The problem here sadly is that we don't know whether the exceptions was
869 * one that turned interrupts off or not. So we always tell lockdep about
870 * turning them on here when we go back to wherever we came from with EE
871 * on, even if that may meen some redudant calls being tracked. Maybe later
872 * we could encode what the exception did somewhere or test the exception
873 * type in the pt_regs but that sounds overkill
874 */
875 andi. r10,r9,MSR_EE
876 beq 1f
877 bl trace_hardirqs_on
878 lwz r9,_MSR(r1)
8791:
880#endif /* CONFIG_TRACE_IRQFLAGS */
881
768 lwz r0,GPR0(r1) 882 lwz r0,GPR0(r1)
769 lwz r2,GPR2(r1) 883 lwz r2,GPR2(r1)
770 REST_4GPRS(3, r1) 884 REST_4GPRS(3, r1)
@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
782 stwcx. r0,0,r1 /* to clear the reservation */ 896 stwcx. r0,0,r1 /* to clear the reservation */
783 897
784#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 898#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
785 lwz r9,_MSR(r1)
786 andi. r10,r9,MSR_RI /* check if this exception occurred */ 899 andi. r10,r9,MSR_RI /* check if this exception occurred */
787 beql nonrecoverable /* at a bad place (MSR:RI = 0) */ 900 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
788 901
@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
805 MTMSRD(r10) /* clear the RI bit */ 918 MTMSRD(r10) /* clear the RI bit */
806 .globl exc_exit_restart 919 .globl exc_exit_restart
807exc_exit_restart: 920exc_exit_restart:
808 lwz r9,_MSR(r1)
809 lwz r12,_NIP(r1) 921 lwz r12,_NIP(r1)
810 FIX_SRR1(r9,r10) 922 FIX_SRR1(r9,r10)
811 mtspr SPRN_SRR0,r12 923 mtspr SPRN_SRR0,r12
@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */
1035 beq do_user_signal 1147 beq do_user_signal
1036 1148
1037do_resched: /* r10 contains MSR_KERNEL here */ 1149do_resched: /* r10 contains MSR_KERNEL here */
1150 /* Note: We don't need to inform lockdep that we are enabling
1151 * interrupts here. As far as it knows, they are already enabled
1152 */
1038 ori r10,r10,MSR_EE 1153 ori r10,r10,MSR_EE
1039 SYNC 1154 SYNC
1040 MTMSRD(r10) /* hard-enable interrupts */ 1155 MTMSRD(r10) /* hard-enable interrupts */
1041 bl schedule 1156 bl schedule
1042recheck: 1157recheck:
1158 /* Note: And we don't tell it we are disabling them again
1159 * neither. Those disable/enable cycles used to peek at
1160 * TI_FLAGS aren't advertised.
1161 */
1043 LOAD_MSR_KERNEL(r10,MSR_KERNEL) 1162 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1044 SYNC 1163 SYNC
1045 MTMSRD(r10) /* disable interrupts */ 1164 MTMSRD(r10) /* disable interrupts */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d154248cf40..e1e3059cf34b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
119 */ 119 */
120notrace void __init machine_init(unsigned long dt_ptr) 120notrace void __init machine_init(unsigned long dt_ptr)
121{ 121{
122 lockdep_init();
123
122 /* Enable early debugging if any specified (see udbg.h) */ 124 /* Enable early debugging if any specified (see udbg.h) */
123 udbg_early_init(); 125 udbg_early_init();
124 126