diff options
Diffstat (limited to 'arch/powerpc/kernel/entry_32.S')
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 127 |
1 files changed, 123 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 4dd38f129153..3cadba60a4b6 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -191,11 +191,49 @@ transfer_to_handler_cont: | |||
191 | mflr r9 | 191 | mflr r9 |
192 | lwz r11,0(r9) /* virtual address of handler */ | 192 | lwz r11,0(r9) /* virtual address of handler */ |
193 | lwz r9,4(r9) /* where to go when done */ | 193 | lwz r9,4(r9) /* where to go when done */ |
194 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
195 | lis r12,reenable_mmu@h | ||
196 | ori r12,r12,reenable_mmu@l | ||
197 | mtspr SPRN_SRR0,r12 | ||
198 | mtspr SPRN_SRR1,r10 | ||
199 | SYNC | ||
200 | RFI | ||
201 | reenable_mmu: /* re-enable mmu so we can */ | ||
202 | mfmsr r10 | ||
203 | lwz r12,_MSR(r1) | ||
204 | xor r10,r10,r12 | ||
205 | andi. r10,r10,MSR_EE /* Did EE change? */ | ||
206 | beq 1f | ||
207 | |||
208 | /* Save handler and return address into the 2 unused words | ||
209 | * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything | ||
210 | * else can be recovered from the pt_regs except r3 which for | ||
211 | * normal interrupts has been set to pt_regs and for syscalls | ||
212 | * is an argument, so we temporarily use ORIG_GPR3 to save it | ||
213 | */ | ||
214 | stw r9,8(r1) | ||
215 | stw r11,12(r1) | ||
216 | stw r3,ORIG_GPR3(r1) | ||
217 | bl trace_hardirqs_off | ||
218 | lwz r0,GPR0(r1) | ||
219 | lwz r3,ORIG_GPR3(r1) | ||
220 | lwz r4,GPR4(r1) | ||
221 | lwz r5,GPR5(r1) | ||
222 | lwz r6,GPR6(r1) | ||
223 | lwz r7,GPR7(r1) | ||
224 | lwz r8,GPR8(r1) | ||
225 | lwz r9,8(r1) | ||
226 | lwz r11,12(r1) | ||
227 | 1: mtctr r11 | ||
228 | mtlr r9 | ||
229 | bctr /* jump to handler */ | ||
230 | #else /* CONFIG_TRACE_IRQFLAGS */ | ||
194 | mtspr SPRN_SRR0,r11 | 231 | mtspr SPRN_SRR0,r11 |
195 | mtspr SPRN_SRR1,r10 | 232 | mtspr SPRN_SRR1,r10 |
196 | mtlr r9 | 233 | mtlr r9 |
197 | SYNC | 234 | SYNC |
198 | RFI /* jump to handler, enable MMU */ | 235 | RFI /* jump to handler, enable MMU */ |
236 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
199 | 237 | ||
200 | #if defined (CONFIG_6xx) || defined(CONFIG_E500) | 238 | #if defined (CONFIG_6xx) || defined(CONFIG_E500) |
201 | 4: rlwinm r12,r12,0,~_TLF_NAPPING | 239 | 4: rlwinm r12,r12,0,~_TLF_NAPPING |
@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall) | |||
251 | #ifdef SHOW_SYSCALLS | 289 | #ifdef SHOW_SYSCALLS |
252 | bl do_show_syscall | 290 | bl do_show_syscall |
253 | #endif /* SHOW_SYSCALLS */ | 291 | #endif /* SHOW_SYSCALLS */ |
292 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
293 | /* Return from syscalls can (and generally will) hard enable | ||
294 | * interrupts. You aren't supposed to call a syscall with | ||
295 | * interrupts disabled in the first place. However, to ensure | ||
296 | * that we get it right vs. lockdep if it happens, we force | ||
297 | * that hard enable here with appropriate tracing if we see | ||
298 | * that we have been called with interrupts off | ||
299 | */ | ||
300 | mfmsr r11 | ||
301 | andi. r12,r11,MSR_EE | ||
302 | bne+ 1f | ||
303 | /* We came in with interrupts disabled, we enable them now */ | ||
304 | bl trace_hardirqs_on | ||
305 | mfmsr r11 | ||
306 | lwz r0,GPR0(r1) | ||
307 | lwz r3,GPR3(r1) | ||
308 | lwz r4,GPR4(r1) | ||
309 | ori r11,r11,MSR_EE | ||
310 | lwz r5,GPR5(r1) | ||
311 | lwz r6,GPR6(r1) | ||
312 | lwz r7,GPR7(r1) | ||
313 | lwz r8,GPR8(r1) | ||
314 | mtmsr r11 | ||
315 | 1: | ||
316 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
254 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 317 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
255 | lwz r11,TI_FLAGS(r10) | 318 | lwz r11,TI_FLAGS(r10) |
256 | andi. r11,r11,_TIF_SYSCALL_T_OR_A | 319 | andi. r11,r11,_TIF_SYSCALL_T_OR_A |
@@ -275,6 +338,7 @@ ret_from_syscall: | |||
275 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 338 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
276 | /* disable interrupts so current_thread_info()->flags can't change */ | 339 | /* disable interrupts so current_thread_info()->flags can't change */ |
277 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ | 340 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ |
341 | /* Note: We don't bother telling lockdep about it */ | ||
278 | SYNC | 342 | SYNC |
279 | MTMSRD(r10) | 343 | MTMSRD(r10) |
280 | lwz r9,TI_FLAGS(r12) | 344 | lwz r9,TI_FLAGS(r12) |
@@ -288,6 +352,19 @@ ret_from_syscall: | |||
288 | oris r11,r11,0x1000 /* Set SO bit in CR */ | 352 | oris r11,r11,0x1000 /* Set SO bit in CR */ |
289 | stw r11,_CCR(r1) | 353 | stw r11,_CCR(r1) |
290 | syscall_exit_cont: | 354 | syscall_exit_cont: |
355 | lwz r8,_MSR(r1) | ||
356 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
357 | /* If we are going to return from the syscall with interrupts | ||
358 | * off, we trace that here. It shouldn't happen though but we | ||
359 | * want to catch the bugger if it does right ? | ||
360 | */ | ||
361 | andi. r10,r8,MSR_EE | ||
362 | bne+ 1f | ||
363 | stw r3,GPR3(r1) | ||
364 | bl trace_hardirqs_off | ||
365 | lwz r3,GPR3(r1) | ||
366 | 1: | ||
367 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
291 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 368 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
292 | /* If the process has its own DBCR0 value, load it up. The internal | 369 | /* If the process has its own DBCR0 value, load it up. The internal |
293 | debug mode bit tells us that dbcr0 should be loaded. */ | 370 | debug mode bit tells us that dbcr0 should be loaded. */ |
@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
311 | mtlr r4 | 388 | mtlr r4 |
312 | mtcr r5 | 389 | mtcr r5 |
313 | lwz r7,_NIP(r1) | 390 | lwz r7,_NIP(r1) |
314 | lwz r8,_MSR(r1) | ||
315 | FIX_SRR1(r8, r0) | 391 | FIX_SRR1(r8, r0) |
316 | lwz r2,GPR2(r1) | 392 | lwz r2,GPR2(r1) |
317 | lwz r1,GPR1(r1) | 393 | lwz r1,GPR1(r1) |
@@ -394,7 +470,9 @@ syscall_exit_work: | |||
394 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | 470 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
395 | beq ret_from_except | 471 | beq ret_from_except |
396 | 472 | ||
397 | /* Re-enable interrupts */ | 473 | /* Re-enable interrupts. There is no need to trace that with |
474 | * lockdep as we are supposed to have IRQs on at this point | ||
475 | */ | ||
398 | ori r10,r10,MSR_EE | 476 | ori r10,r10,MSR_EE |
399 | SYNC | 477 | SYNC |
400 | MTMSRD(r10) | 478 | MTMSRD(r10) |
@@ -705,6 +783,7 @@ ret_from_except: | |||
705 | /* Hard-disable interrupts so that current_thread_info()->flags | 783 | /* Hard-disable interrupts so that current_thread_info()->flags |
706 | * can't change between when we test it and when we return | 784 | * can't change between when we test it and when we return |
707 | * from the interrupt. */ | 785 | * from the interrupt. */ |
786 | /* Note: We don't bother telling lockdep about it */ | ||
708 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 787 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
709 | SYNC /* Some chip revs have problems here... */ | 788 | SYNC /* Some chip revs have problems here... */ |
710 | MTMSRD(r10) /* disable interrupts */ | 789 | MTMSRD(r10) /* disable interrupts */ |
@@ -744,11 +823,24 @@ resume_kernel: | |||
744 | beq+ restore | 823 | beq+ restore |
745 | andi. r0,r3,MSR_EE /* interrupts off? */ | 824 | andi. r0,r3,MSR_EE /* interrupts off? */ |
746 | beq restore /* don't schedule if so */ | 825 | beq restore /* don't schedule if so */ |
826 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
827 | /* Lockdep thinks irqs are enabled, we need to call | ||
828 | * preempt_schedule_irq with IRQs off, so we inform lockdep | ||
829 | * now that we -did- turn them off already | ||
830 | */ | ||
831 | bl trace_hardirqs_off | ||
832 | #endif | ||
747 | 1: bl preempt_schedule_irq | 833 | 1: bl preempt_schedule_irq |
748 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) | 834 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) |
749 | lwz r3,TI_FLAGS(r9) | 835 | lwz r3,TI_FLAGS(r9) |
750 | andi. r0,r3,_TIF_NEED_RESCHED | 836 | andi. r0,r3,_TIF_NEED_RESCHED |
751 | bne- 1b | 837 | bne- 1b |
838 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
839 | /* And now, to properly rebalance the above, we tell lockdep they | ||
840 | * are being turned back on, which will happen when we return | ||
841 | */ | ||
842 | bl trace_hardirqs_on | ||
843 | #endif | ||
752 | #else | 844 | #else |
753 | resume_kernel: | 845 | resume_kernel: |
754 | #endif /* CONFIG_PREEMPT */ | 846 | #endif /* CONFIG_PREEMPT */ |
@@ -765,6 +857,28 @@ restore: | |||
765 | stw r6,icache_44x_need_flush@l(r4) | 857 | stw r6,icache_44x_need_flush@l(r4) |
766 | 1: | 858 | 1: |
767 | #endif /* CONFIG_44x */ | 859 | #endif /* CONFIG_44x */ |
860 | |||
861 | lwz r9,_MSR(r1) | ||
862 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
863 | /* Lockdep doesn't know about the fact that IRQs are temporarily turned | ||
864 | * off in this assembly code while peeking at TI_FLAGS() and such. However | ||
865 | * we need to inform it if the exception turned interrupts off, and we | ||
866 | * are about to trun them back on. | ||
867 | * | ||
868 | * The problem here sadly is that we don't know whether the exceptions was | ||
869 | * one that turned interrupts off or not. So we always tell lockdep about | ||
870 | * turning them on here when we go back to wherever we came from with EE | ||
871 | * on, even if that may meen some redudant calls being tracked. Maybe later | ||
872 | * we could encode what the exception did somewhere or test the exception | ||
873 | * type in the pt_regs but that sounds overkill | ||
874 | */ | ||
875 | andi. r10,r9,MSR_EE | ||
876 | beq 1f | ||
877 | bl trace_hardirqs_on | ||
878 | lwz r9,_MSR(r1) | ||
879 | 1: | ||
880 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
881 | |||
768 | lwz r0,GPR0(r1) | 882 | lwz r0,GPR0(r1) |
769 | lwz r2,GPR2(r1) | 883 | lwz r2,GPR2(r1) |
770 | REST_4GPRS(3, r1) | 884 | REST_4GPRS(3, r1) |
@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
782 | stwcx. r0,0,r1 /* to clear the reservation */ | 896 | stwcx. r0,0,r1 /* to clear the reservation */ |
783 | 897 | ||
784 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 898 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
785 | lwz r9,_MSR(r1) | ||
786 | andi. r10,r9,MSR_RI /* check if this exception occurred */ | 899 | andi. r10,r9,MSR_RI /* check if this exception occurred */ |
787 | beql nonrecoverable /* at a bad place (MSR:RI = 0) */ | 900 | beql nonrecoverable /* at a bad place (MSR:RI = 0) */ |
788 | 901 | ||
@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
805 | MTMSRD(r10) /* clear the RI bit */ | 918 | MTMSRD(r10) /* clear the RI bit */ |
806 | .globl exc_exit_restart | 919 | .globl exc_exit_restart |
807 | exc_exit_restart: | 920 | exc_exit_restart: |
808 | lwz r9,_MSR(r1) | ||
809 | lwz r12,_NIP(r1) | 921 | lwz r12,_NIP(r1) |
810 | FIX_SRR1(r9,r10) | 922 | FIX_SRR1(r9,r10) |
811 | mtspr SPRN_SRR0,r12 | 923 | mtspr SPRN_SRR0,r12 |
@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */ | |||
1035 | beq do_user_signal | 1147 | beq do_user_signal |
1036 | 1148 | ||
1037 | do_resched: /* r10 contains MSR_KERNEL here */ | 1149 | do_resched: /* r10 contains MSR_KERNEL here */ |
1150 | /* Note: We don't need to inform lockdep that we are enabling | ||
1151 | * interrupts here. As far as it knows, they are already enabled | ||
1152 | */ | ||
1038 | ori r10,r10,MSR_EE | 1153 | ori r10,r10,MSR_EE |
1039 | SYNC | 1154 | SYNC |
1040 | MTMSRD(r10) /* hard-enable interrupts */ | 1155 | MTMSRD(r10) /* hard-enable interrupts */ |
1041 | bl schedule | 1156 | bl schedule |
1042 | recheck: | 1157 | recheck: |
1158 | /* Note: And we don't tell it we are disabling them again | ||
1159 | * neither. Those disable/enable cycles used to peek at | ||
1160 | * TI_FLAGS aren't advertised. | ||
1161 | */ | ||
1043 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 1162 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
1044 | SYNC | 1163 | SYNC |
1045 | MTMSRD(r10) /* disable interrupts */ | 1164 | MTMSRD(r10) /* disable interrupts */ |