diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 127 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_32.S | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/of_device.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 69 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/udbg_16550.c | 2 |
8 files changed, 200 insertions, 24 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 4dd38f129153..3cadba60a4b6 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -191,11 +191,49 @@ transfer_to_handler_cont: | |||
191 | mflr r9 | 191 | mflr r9 |
192 | lwz r11,0(r9) /* virtual address of handler */ | 192 | lwz r11,0(r9) /* virtual address of handler */ |
193 | lwz r9,4(r9) /* where to go when done */ | 193 | lwz r9,4(r9) /* where to go when done */ |
194 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
195 | lis r12,reenable_mmu@h | ||
196 | ori r12,r12,reenable_mmu@l | ||
197 | mtspr SPRN_SRR0,r12 | ||
198 | mtspr SPRN_SRR1,r10 | ||
199 | SYNC | ||
200 | RFI | ||
201 | reenable_mmu: /* re-enable mmu so we can */ | ||
202 | mfmsr r10 | ||
203 | lwz r12,_MSR(r1) | ||
204 | xor r10,r10,r12 | ||
205 | andi. r10,r10,MSR_EE /* Did EE change? */ | ||
206 | beq 1f | ||
207 | |||
208 | /* Save handler and return address into the 2 unused words | ||
209 | * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything | ||
210 | * else can be recovered from the pt_regs except r3 which for | ||
211 | * normal interrupts has been set to pt_regs and for syscalls | ||
212 | * is an argument, so we temporarily use ORIG_GPR3 to save it | ||
213 | */ | ||
214 | stw r9,8(r1) | ||
215 | stw r11,12(r1) | ||
216 | stw r3,ORIG_GPR3(r1) | ||
217 | bl trace_hardirqs_off | ||
218 | lwz r0,GPR0(r1) | ||
219 | lwz r3,ORIG_GPR3(r1) | ||
220 | lwz r4,GPR4(r1) | ||
221 | lwz r5,GPR5(r1) | ||
222 | lwz r6,GPR6(r1) | ||
223 | lwz r7,GPR7(r1) | ||
224 | lwz r8,GPR8(r1) | ||
225 | lwz r9,8(r1) | ||
226 | lwz r11,12(r1) | ||
227 | 1: mtctr r11 | ||
228 | mtlr r9 | ||
229 | bctr /* jump to handler */ | ||
230 | #else /* CONFIG_TRACE_IRQFLAGS */ | ||
194 | mtspr SPRN_SRR0,r11 | 231 | mtspr SPRN_SRR0,r11 |
195 | mtspr SPRN_SRR1,r10 | 232 | mtspr SPRN_SRR1,r10 |
196 | mtlr r9 | 233 | mtlr r9 |
197 | SYNC | 234 | SYNC |
198 | RFI /* jump to handler, enable MMU */ | 235 | RFI /* jump to handler, enable MMU */ |
236 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
199 | 237 | ||
200 | #if defined (CONFIG_6xx) || defined(CONFIG_E500) | 238 | #if defined (CONFIG_6xx) || defined(CONFIG_E500) |
201 | 4: rlwinm r12,r12,0,~_TLF_NAPPING | 239 | 4: rlwinm r12,r12,0,~_TLF_NAPPING |
@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall) | |||
251 | #ifdef SHOW_SYSCALLS | 289 | #ifdef SHOW_SYSCALLS |
252 | bl do_show_syscall | 290 | bl do_show_syscall |
253 | #endif /* SHOW_SYSCALLS */ | 291 | #endif /* SHOW_SYSCALLS */ |
292 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
293 | /* Return from syscalls can (and generally will) hard enable | ||
294 | * interrupts. You aren't supposed to call a syscall with | ||
295 | * interrupts disabled in the first place. However, to ensure | ||
296 | * that we get it right vs. lockdep if it happens, we force | ||
297 | * that hard enable here with appropriate tracing if we see | ||
298 | * that we have been called with interrupts off | ||
299 | */ | ||
300 | mfmsr r11 | ||
301 | andi. r12,r11,MSR_EE | ||
302 | bne+ 1f | ||
303 | /* We came in with interrupts disabled, we enable them now */ | ||
304 | bl trace_hardirqs_on | ||
305 | mfmsr r11 | ||
306 | lwz r0,GPR0(r1) | ||
307 | lwz r3,GPR3(r1) | ||
308 | lwz r4,GPR4(r1) | ||
309 | ori r11,r11,MSR_EE | ||
310 | lwz r5,GPR5(r1) | ||
311 | lwz r6,GPR6(r1) | ||
312 | lwz r7,GPR7(r1) | ||
313 | lwz r8,GPR8(r1) | ||
314 | mtmsr r11 | ||
315 | 1: | ||
316 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
254 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 317 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
255 | lwz r11,TI_FLAGS(r10) | 318 | lwz r11,TI_FLAGS(r10) |
256 | andi. r11,r11,_TIF_SYSCALL_T_OR_A | 319 | andi. r11,r11,_TIF_SYSCALL_T_OR_A |
@@ -275,6 +338,7 @@ ret_from_syscall: | |||
275 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 338 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
276 | /* disable interrupts so current_thread_info()->flags can't change */ | 339 | /* disable interrupts so current_thread_info()->flags can't change */ |
277 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ | 340 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ |
341 | /* Note: We don't bother telling lockdep about it */ | ||
278 | SYNC | 342 | SYNC |
279 | MTMSRD(r10) | 343 | MTMSRD(r10) |
280 | lwz r9,TI_FLAGS(r12) | 344 | lwz r9,TI_FLAGS(r12) |
@@ -288,6 +352,19 @@ ret_from_syscall: | |||
288 | oris r11,r11,0x1000 /* Set SO bit in CR */ | 352 | oris r11,r11,0x1000 /* Set SO bit in CR */ |
289 | stw r11,_CCR(r1) | 353 | stw r11,_CCR(r1) |
290 | syscall_exit_cont: | 354 | syscall_exit_cont: |
355 | lwz r8,_MSR(r1) | ||
356 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
357 | /* If we are going to return from the syscall with interrupts | ||
358 | * off, we trace that here. It shouldn't happen though but we | ||
359 | * want to catch the bugger if it does right ? | ||
360 | */ | ||
361 | andi. r10,r8,MSR_EE | ||
362 | bne+ 1f | ||
363 | stw r3,GPR3(r1) | ||
364 | bl trace_hardirqs_off | ||
365 | lwz r3,GPR3(r1) | ||
366 | 1: | ||
367 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
291 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 368 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
292 | /* If the process has its own DBCR0 value, load it up. The internal | 369 | /* If the process has its own DBCR0 value, load it up. The internal |
293 | debug mode bit tells us that dbcr0 should be loaded. */ | 370 | debug mode bit tells us that dbcr0 should be loaded. */ |
@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
311 | mtlr r4 | 388 | mtlr r4 |
312 | mtcr r5 | 389 | mtcr r5 |
313 | lwz r7,_NIP(r1) | 390 | lwz r7,_NIP(r1) |
314 | lwz r8,_MSR(r1) | ||
315 | FIX_SRR1(r8, r0) | 391 | FIX_SRR1(r8, r0) |
316 | lwz r2,GPR2(r1) | 392 | lwz r2,GPR2(r1) |
317 | lwz r1,GPR1(r1) | 393 | lwz r1,GPR1(r1) |
@@ -394,7 +470,9 @@ syscall_exit_work: | |||
394 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | 470 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
395 | beq ret_from_except | 471 | beq ret_from_except |
396 | 472 | ||
397 | /* Re-enable interrupts */ | 473 | /* Re-enable interrupts. There is no need to trace that with |
474 | * lockdep as we are supposed to have IRQs on at this point | ||
475 | */ | ||
398 | ori r10,r10,MSR_EE | 476 | ori r10,r10,MSR_EE |
399 | SYNC | 477 | SYNC |
400 | MTMSRD(r10) | 478 | MTMSRD(r10) |
@@ -705,6 +783,7 @@ ret_from_except: | |||
705 | /* Hard-disable interrupts so that current_thread_info()->flags | 783 | /* Hard-disable interrupts so that current_thread_info()->flags |
706 | * can't change between when we test it and when we return | 784 | * can't change between when we test it and when we return |
707 | * from the interrupt. */ | 785 | * from the interrupt. */ |
786 | /* Note: We don't bother telling lockdep about it */ | ||
708 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 787 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
709 | SYNC /* Some chip revs have problems here... */ | 788 | SYNC /* Some chip revs have problems here... */ |
710 | MTMSRD(r10) /* disable interrupts */ | 789 | MTMSRD(r10) /* disable interrupts */ |
@@ -744,11 +823,24 @@ resume_kernel: | |||
744 | beq+ restore | 823 | beq+ restore |
745 | andi. r0,r3,MSR_EE /* interrupts off? */ | 824 | andi. r0,r3,MSR_EE /* interrupts off? */ |
746 | beq restore /* don't schedule if so */ | 825 | beq restore /* don't schedule if so */ |
826 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
827 | /* Lockdep thinks irqs are enabled, we need to call | ||
828 | * preempt_schedule_irq with IRQs off, so we inform lockdep | ||
829 | * now that we -did- turn them off already | ||
830 | */ | ||
831 | bl trace_hardirqs_off | ||
832 | #endif | ||
747 | 1: bl preempt_schedule_irq | 833 | 1: bl preempt_schedule_irq |
748 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) | 834 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) |
749 | lwz r3,TI_FLAGS(r9) | 835 | lwz r3,TI_FLAGS(r9) |
750 | andi. r0,r3,_TIF_NEED_RESCHED | 836 | andi. r0,r3,_TIF_NEED_RESCHED |
751 | bne- 1b | 837 | bne- 1b |
838 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
839 | /* And now, to properly rebalance the above, we tell lockdep they | ||
840 | * are being turned back on, which will happen when we return | ||
841 | */ | ||
842 | bl trace_hardirqs_on | ||
843 | #endif | ||
752 | #else | 844 | #else |
753 | resume_kernel: | 845 | resume_kernel: |
754 | #endif /* CONFIG_PREEMPT */ | 846 | #endif /* CONFIG_PREEMPT */ |
@@ -765,6 +857,28 @@ restore: | |||
765 | stw r6,icache_44x_need_flush@l(r4) | 857 | stw r6,icache_44x_need_flush@l(r4) |
766 | 1: | 858 | 1: |
767 | #endif /* CONFIG_44x */ | 859 | #endif /* CONFIG_44x */ |
860 | |||
861 | lwz r9,_MSR(r1) | ||
862 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
863 | /* Lockdep doesn't know about the fact that IRQs are temporarily turned | ||
864 | * off in this assembly code while peeking at TI_FLAGS() and such. However | ||
865 | * we need to inform it if the exception turned interrupts off, and we | ||
866 | * are about to trun them back on. | ||
867 | * | ||
868 | * The problem here sadly is that we don't know whether the exceptions was | ||
869 | * one that turned interrupts off or not. So we always tell lockdep about | ||
870 | * turning them on here when we go back to wherever we came from with EE | ||
871 | * on, even if that may meen some redudant calls being tracked. Maybe later | ||
872 | * we could encode what the exception did somewhere or test the exception | ||
873 | * type in the pt_regs but that sounds overkill | ||
874 | */ | ||
875 | andi. r10,r9,MSR_EE | ||
876 | beq 1f | ||
877 | bl trace_hardirqs_on | ||
878 | lwz r9,_MSR(r1) | ||
879 | 1: | ||
880 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
881 | |||
768 | lwz r0,GPR0(r1) | 882 | lwz r0,GPR0(r1) |
769 | lwz r2,GPR2(r1) | 883 | lwz r2,GPR2(r1) |
770 | REST_4GPRS(3, r1) | 884 | REST_4GPRS(3, r1) |
@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
782 | stwcx. r0,0,r1 /* to clear the reservation */ | 896 | stwcx. r0,0,r1 /* to clear the reservation */ |
783 | 897 | ||
784 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 898 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
785 | lwz r9,_MSR(r1) | ||
786 | andi. r10,r9,MSR_RI /* check if this exception occurred */ | 899 | andi. r10,r9,MSR_RI /* check if this exception occurred */ |
787 | beql nonrecoverable /* at a bad place (MSR:RI = 0) */ | 900 | beql nonrecoverable /* at a bad place (MSR:RI = 0) */ |
788 | 901 | ||
@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
805 | MTMSRD(r10) /* clear the RI bit */ | 918 | MTMSRD(r10) /* clear the RI bit */ |
806 | .globl exc_exit_restart | 919 | .globl exc_exit_restart |
807 | exc_exit_restart: | 920 | exc_exit_restart: |
808 | lwz r9,_MSR(r1) | ||
809 | lwz r12,_NIP(r1) | 921 | lwz r12,_NIP(r1) |
810 | FIX_SRR1(r9,r10) | 922 | FIX_SRR1(r9,r10) |
811 | mtspr SPRN_SRR0,r12 | 923 | mtspr SPRN_SRR0,r12 |
@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */ | |||
1035 | beq do_user_signal | 1147 | beq do_user_signal |
1036 | 1148 | ||
1037 | do_resched: /* r10 contains MSR_KERNEL here */ | 1149 | do_resched: /* r10 contains MSR_KERNEL here */ |
1150 | /* Note: We don't need to inform lockdep that we are enabling | ||
1151 | * interrupts here. As far as it knows, they are already enabled | ||
1152 | */ | ||
1038 | ori r10,r10,MSR_EE | 1153 | ori r10,r10,MSR_EE |
1039 | SYNC | 1154 | SYNC |
1040 | MTMSRD(r10) /* hard-enable interrupts */ | 1155 | MTMSRD(r10) /* hard-enable interrupts */ |
1041 | bl schedule | 1156 | bl schedule |
1042 | recheck: | 1157 | recheck: |
1158 | /* Note: And we don't tell it we are disabling them again | ||
1159 | * neither. Those disable/enable cycles used to peek at | ||
1160 | * TI_FLAGS aren't advertised. | ||
1161 | */ | ||
1043 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 1162 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
1044 | SYNC | 1163 | SYNC |
1045 | MTMSRD(r10) /* disable interrupts */ | 1164 | MTMSRD(r10) /* disable interrupts */ |
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 48469463f89e..fc2132942754 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -1124,9 +1124,8 @@ mmu_off: | |||
1124 | RFI | 1124 | RFI |
1125 | 1125 | ||
1126 | /* | 1126 | /* |
1127 | * Use the first pair of BAT registers to map the 1st 16MB | 1127 | * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET |
1128 | * of RAM to PAGE_OFFSET. From this point on we can't safely | 1128 | * (we keep one for debugging) and on others, we use one 256M BAT. |
1129 | * call OF any more. | ||
1130 | */ | 1129 | */ |
1131 | initial_bats: | 1130 | initial_bats: |
1132 | lis r11,PAGE_OFFSET@h | 1131 | lis r11,PAGE_OFFSET@h |
@@ -1136,12 +1135,16 @@ initial_bats: | |||
1136 | bne 4f | 1135 | bne 4f |
1137 | ori r11,r11,4 /* set up BAT registers for 601 */ | 1136 | ori r11,r11,4 /* set up BAT registers for 601 */ |
1138 | li r8,0x7f /* valid, block length = 8MB */ | 1137 | li r8,0x7f /* valid, block length = 8MB */ |
1139 | oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1140 | oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1141 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ | 1138 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ |
1142 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ | 1139 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ |
1143 | mtspr SPRN_IBAT1U,r9 | 1140 | addis r11,r11,0x800000@h |
1144 | mtspr SPRN_IBAT1L,r10 | 1141 | addis r8,r8,0x800000@h |
1142 | mtspr SPRN_IBAT1U,r11 | ||
1143 | mtspr SPRN_IBAT1L,r8 | ||
1144 | addis r11,r11,0x800000@h | ||
1145 | addis r8,r8,0x800000@h | ||
1146 | mtspr SPRN_IBAT2U,r11 | ||
1147 | mtspr SPRN_IBAT2L,r8 | ||
1145 | isync | 1148 | isync |
1146 | blr | 1149 | blr |
1147 | 1150 | ||
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c index fa983a59c4ce..a359cb08e900 100644 --- a/arch/powerpc/kernel/of_device.c +++ b/arch/powerpc/kernel/of_device.c | |||
@@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np, | |||
76 | dev->dev.archdata.of_node = np; | 76 | dev->dev.archdata.of_node = np; |
77 | 77 | ||
78 | if (bus_id) | 78 | if (bus_id) |
79 | dev_set_name(&dev->dev, bus_id); | 79 | dev_set_name(&dev->dev, "%s", bus_id); |
80 | else | 80 | else |
81 | of_device_make_bus_id(dev); | 81 | of_device_make_bus_id(dev); |
82 | 82 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3e7135bbe40f..892a9f2e6d76 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs) | |||
528 | 528 | ||
529 | for (i = 0; i < 32; i++) { | 529 | for (i = 0; i < 32; i++) { |
530 | if ((i % REGS_PER_LINE) == 0) | 530 | if ((i % REGS_PER_LINE) == 0) |
531 | printk("\n" KERN_INFO "GPR%02d: ", i); | 531 | printk("\nGPR%02d: ", i); |
532 | printk(REG " ", regs->gpr[i]); | 532 | printk(REG " ", regs->gpr[i]); |
533 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) | 533 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) |
534 | break; | 534 | break; |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index ee4c7609b649..c434823b8c83 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -38,9 +38,10 @@ | |||
38 | #include <asm/syscalls.h> | 38 | #include <asm/syscalls.h> |
39 | #include <asm/smp.h> | 39 | #include <asm/smp.h> |
40 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
41 | #include <asm/time.h> | ||
41 | 42 | ||
42 | struct rtas_t rtas = { | 43 | struct rtas_t rtas = { |
43 | .lock = SPIN_LOCK_UNLOCKED | 44 | .lock = __RAW_SPIN_LOCK_UNLOCKED |
44 | }; | 45 | }; |
45 | EXPORT_SYMBOL(rtas); | 46 | EXPORT_SYMBOL(rtas); |
46 | 47 | ||
@@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf; | |||
67 | void (*rtas_flash_term_hook)(int); | 68 | void (*rtas_flash_term_hook)(int); |
68 | EXPORT_SYMBOL(rtas_flash_term_hook); | 69 | EXPORT_SYMBOL(rtas_flash_term_hook); |
69 | 70 | ||
71 | /* RTAS use home made raw locking instead of spin_lock_irqsave | ||
72 | * because those can be called from within really nasty contexts | ||
73 | * such as having the timebase stopped which would lockup with | ||
74 | * normal locks and spinlock debugging enabled | ||
75 | */ | ||
76 | static unsigned long lock_rtas(void) | ||
77 | { | ||
78 | unsigned long flags; | ||
79 | |||
80 | local_irq_save(flags); | ||
81 | preempt_disable(); | ||
82 | __raw_spin_lock_flags(&rtas.lock, flags); | ||
83 | return flags; | ||
84 | } | ||
85 | |||
86 | static void unlock_rtas(unsigned long flags) | ||
87 | { | ||
88 | __raw_spin_unlock(&rtas.lock); | ||
89 | local_irq_restore(flags); | ||
90 | preempt_enable(); | ||
91 | } | ||
92 | |||
70 | /* | 93 | /* |
71 | * call_rtas_display_status and call_rtas_display_status_delay | 94 | * call_rtas_display_status and call_rtas_display_status_delay |
72 | * are designed only for very early low-level debugging, which | 95 | * are designed only for very early low-level debugging, which |
@@ -79,7 +102,7 @@ static void call_rtas_display_status(char c) | |||
79 | 102 | ||
80 | if (!rtas.base) | 103 | if (!rtas.base) |
81 | return; | 104 | return; |
82 | spin_lock_irqsave(&rtas.lock, s); | 105 | s = lock_rtas(); |
83 | 106 | ||
84 | args->token = 10; | 107 | args->token = 10; |
85 | args->nargs = 1; | 108 | args->nargs = 1; |
@@ -89,7 +112,7 @@ static void call_rtas_display_status(char c) | |||
89 | 112 | ||
90 | enter_rtas(__pa(args)); | 113 | enter_rtas(__pa(args)); |
91 | 114 | ||
92 | spin_unlock_irqrestore(&rtas.lock, s); | 115 | unlock_rtas(s); |
93 | } | 116 | } |
94 | 117 | ||
95 | static void call_rtas_display_status_delay(char c) | 118 | static void call_rtas_display_status_delay(char c) |
@@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) | |||
411 | if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) | 434 | if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) |
412 | return -1; | 435 | return -1; |
413 | 436 | ||
414 | /* Gotta do something different here, use global lock for now... */ | 437 | s = lock_rtas(); |
415 | spin_lock_irqsave(&rtas.lock, s); | ||
416 | rtas_args = &rtas.args; | 438 | rtas_args = &rtas.args; |
417 | 439 | ||
418 | rtas_args->token = token; | 440 | rtas_args->token = token; |
@@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) | |||
439 | outputs[i] = rtas_args->rets[i+1]; | 461 | outputs[i] = rtas_args->rets[i+1]; |
440 | ret = (nret > 0)? rtas_args->rets[0]: 0; | 462 | ret = (nret > 0)? rtas_args->rets[0]: 0; |
441 | 463 | ||
442 | /* Gotta do something different here, use global lock for now... */ | 464 | unlock_rtas(s); |
443 | spin_unlock_irqrestore(&rtas.lock, s); | ||
444 | 465 | ||
445 | if (buff_copy) { | 466 | if (buff_copy) { |
446 | log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); | 467 | log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); |
@@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) | |||
837 | 858 | ||
838 | buff_copy = get_errorlog_buffer(); | 859 | buff_copy = get_errorlog_buffer(); |
839 | 860 | ||
840 | spin_lock_irqsave(&rtas.lock, flags); | 861 | flags = lock_rtas(); |
841 | 862 | ||
842 | rtas.args = args; | 863 | rtas.args = args; |
843 | enter_rtas(__pa(&rtas.args)); | 864 | enter_rtas(__pa(&rtas.args)); |
@@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) | |||
848 | if (args.rets[0] == -1) | 869 | if (args.rets[0] == -1) |
849 | errbuf = __fetch_rtas_last_error(buff_copy); | 870 | errbuf = __fetch_rtas_last_error(buff_copy); |
850 | 871 | ||
851 | spin_unlock_irqrestore(&rtas.lock, flags); | 872 | unlock_rtas(flags); |
852 | 873 | ||
853 | if (buff_copy) { | 874 | if (buff_copy) { |
854 | if (errbuf) | 875 | if (errbuf) |
@@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node, | |||
951 | /* break now */ | 972 | /* break now */ |
952 | return 1; | 973 | return 1; |
953 | } | 974 | } |
975 | |||
976 | static raw_spinlock_t timebase_lock; | ||
977 | static u64 timebase = 0; | ||
978 | |||
979 | void __cpuinit rtas_give_timebase(void) | ||
980 | { | ||
981 | unsigned long flags; | ||
982 | |||
983 | local_irq_save(flags); | ||
984 | hard_irq_disable(); | ||
985 | __raw_spin_lock(&timebase_lock); | ||
986 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); | ||
987 | timebase = get_tb(); | ||
988 | __raw_spin_unlock(&timebase_lock); | ||
989 | |||
990 | while (timebase) | ||
991 | barrier(); | ||
992 | rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); | ||
993 | local_irq_restore(flags); | ||
994 | } | ||
995 | |||
996 | void __cpuinit rtas_take_timebase(void) | ||
997 | { | ||
998 | while (!timebase) | ||
999 | barrier(); | ||
1000 | __raw_spin_lock(&timebase_lock); | ||
1001 | set_tb(timebase >> 32, timebase & 0xffffffff); | ||
1002 | timebase = 0; | ||
1003 | __raw_spin_unlock(&timebase_lock); | ||
1004 | } | ||
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 1d154248cf40..e1e3059cf34b 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) | |||
119 | */ | 119 | */ |
120 | notrace void __init machine_init(unsigned long dt_ptr) | 120 | notrace void __init machine_init(unsigned long dt_ptr) |
121 | { | 121 | { |
122 | lockdep_init(); | ||
123 | |||
122 | /* Enable early debugging if any specified (see udbg.h) */ | 124 | /* Enable early debugging if any specified (see udbg.h) */ |
123 | udbg_early_init(); | 125 | udbg_early_init(); |
124 | 126 | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 65484b2200b3..0b47de07302d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |||
68 | /* SMP operations for this machine */ | 68 | /* SMP operations for this machine */ |
69 | struct smp_ops_t *smp_ops; | 69 | struct smp_ops_t *smp_ops; |
70 | 70 | ||
71 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | 71 | /* Can't be static due to PowerMac hackery */ |
72 | volatile unsigned int cpu_callin_map[NR_CPUS]; | ||
72 | 73 | ||
73 | int smt_enabled_at_boot = 1; | 74 | int smt_enabled_at_boot = 1; |
74 | 75 | ||
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 0362a891e54e..acb74a17bbbf 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c | |||
@@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void) | |||
219 | #ifdef CONFIG_PPC_EARLY_DEBUG_44x | 219 | #ifdef CONFIG_PPC_EARLY_DEBUG_44x |
220 | #include <platforms/44x/44x.h> | 220 | #include <platforms/44x/44x.h> |
221 | 221 | ||
222 | static int udbg_44x_as1_flush(void) | 222 | static void udbg_44x_as1_flush(void) |
223 | { | 223 | { |
224 | if (udbg_comport) { | 224 | if (udbg_comport) { |
225 | while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0) | 225 | while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0) |