aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
commita1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch)
tree0f1777542b385ebefd30b3586d830fd8ed6fda5b /arch/powerpc/kernel
parent75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff)
parentd28daf923ac5e4a0d7cecebae56f3e339189366b (diff)
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts: arch/Kconfig kernel/trace/trace.h Merge reason: resolve the conflicts, plus adopt to the new ring-buffer APIs. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile10
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S3
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S49
-rw-r--r--arch/powerpc/kernel/dma.c6
-rw-r--r--arch/powerpc/kernel/entry_32.S127
-rw-r--r--arch/powerpc/kernel/ftrace.c2
-rw-r--r--arch/powerpc/kernel/head_32.S17
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c417
-rw-r--r--arch/powerpc/kernel/of_device.c2
-rw-r--r--arch/powerpc/kernel/perf_counter.c265
-rw-r--r--arch/powerpc/kernel/power4-pmu.c90
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c96
-rw-r--r--arch/powerpc/kernel/power5-pmu.c99
-rw-r--r--arch/powerpc/kernel/power6-pmu.c73
-rw-r--r--arch/powerpc/kernel/power7-pmu.c69
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c63
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c40
-rw-r--r--arch/powerpc/kernel/ptrace.c17
-rw-r--r--arch/powerpc/kernel/ptrace32.c1
-rw-r--r--arch/powerpc/kernel/rtas.c69
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/powerpc/kernel/udbg_16550.c2
-rw-r--r--arch/powerpc/kernel/vector.S6
26 files changed, 1209 insertions, 346 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 612b0c4dc26d..b73396b93905 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -4,6 +4,8 @@
4 4
5CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' 5CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
6 6
7subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
8
7ifeq ($(CONFIG_PPC64),y) 9ifeq ($(CONFIG_PPC64),y)
8CFLAGS_prom_init.o += -mno-minimal-toc 10CFLAGS_prom_init.o += -mno-minimal-toc
9endif 11endif
@@ -95,9 +97,10 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
95 97
96obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 98obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
97obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 99obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
98obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \ 100obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o
99 power5-pmu.o power5+-pmu.o power6-pmu.o \ 101obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
100 power7-pmu.o 102 power5+-pmu.o power6-pmu.o power7-pmu.o
103obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
101 104
102obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o 105obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
103 106
@@ -106,6 +109,7 @@ obj-y += iomap.o
106endif 109endif
107 110
108obj-$(CONFIG_PPC64) += $(obj64-y) 111obj-$(CONFIG_PPC64) += $(obj64-y)
112obj-$(CONFIG_PPC32) += $(obj32-y)
109 113
110ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),) 114ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
111obj-y += ppc_save_regs.o 115obj-y += ppc_save_regs.o
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 54f767e31a1a..1e9949e68856 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -239,6 +239,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
239 ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE 239 ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE
240 ori r11,r11,HID0_LRSTK | HID0_BTIC 240 ori r11,r11,HID0_LRSTK | HID0_BTIC
241 oris r11,r11,HID0_DPM@h 241 oris r11,r11,HID0_DPM@h
242BEGIN_MMU_FTR_SECTION
243 oris r11,r11,HID0_HIGH_BAT@h
244END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
242BEGIN_FTR_SECTION 245BEGIN_FTR_SECTION
243 xori r11,r11,HID0_BTIC 246 xori r11,r11,HID0_BTIC
244END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) 247END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index eb4b9adcedb4..0adb50ad8031 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -17,6 +17,40 @@
17#include <asm/cputable.h> 17#include <asm/cputable.h>
18#include <asm/ppc_asm.h> 18#include <asm/ppc_asm.h>
19 19
20_GLOBAL(__e500_icache_setup)
21 mfspr r0, SPRN_L1CSR1
22 andi. r3, r0, L1CSR1_ICE
23 bnelr /* Already enabled */
24 oris r0, r0, L1CSR1_CPE@h
25 ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE)
26 mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */
27 isync
28 blr
29
30_GLOBAL(__e500_dcache_setup)
31 mfspr r0, SPRN_L1CSR0
32 andi. r3, r0, L1CSR0_DCE
33 bnelr /* Already enabled */
34 msync
35 isync
36 li r0, 0
37 mtspr SPRN_L1CSR0, r0 /* Disable */
38 msync
39 isync
40 li r0, (L1CSR0_DCFI | L1CSR0_CLFC)
41 mtspr SPRN_L1CSR0, r0 /* Invalidate */
42 isync
431: mfspr r0, SPRN_L1CSR0
44 andi. r3, r0, L1CSR0_CLFC
45 bne+ 1b /* Wait for lock bits reset */
46 oris r0, r0, L1CSR0_CPE@h
47 ori r0, r0, L1CSR0_DCE
48 msync
49 isync
50 mtspr SPRN_L1CSR0, r0 /* Enable */
51 isync
52 blr
53
20_GLOBAL(__setup_cpu_e200) 54_GLOBAL(__setup_cpu_e200)
21 /* enable dedicated debug exception handling resources (Debug APU) */ 55 /* enable dedicated debug exception handling resources (Debug APU) */
22 mfspr r3,SPRN_HID0 56 mfspr r3,SPRN_HID0
@@ -25,7 +59,16 @@ _GLOBAL(__setup_cpu_e200)
25 b __setup_e200_ivors 59 b __setup_e200_ivors
26_GLOBAL(__setup_cpu_e500v1) 60_GLOBAL(__setup_cpu_e500v1)
27_GLOBAL(__setup_cpu_e500v2) 61_GLOBAL(__setup_cpu_e500v2)
28 b __setup_e500_ivors 62 mflr r4
63 bl __e500_icache_setup
64 bl __e500_dcache_setup
65 bl __setup_e500_ivors
66 mtlr r4
67 blr
29_GLOBAL(__setup_cpu_e500mc) 68_GLOBAL(__setup_cpu_e500mc)
30 b __setup_e500mc_ivors 69 mflr r4
31 70 bl __e500_icache_setup
71 bl __e500_dcache_setup
72 bl __setup_e500mc_ivors
73 mtlr r4
74 blr
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 20a60d661ba8..ccf129d47d84 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
10#include <linux/lmb.h>
10#include <asm/bug.h> 11#include <asm/bug.h>
11#include <asm/abs_addr.h> 12#include <asm/abs_addr.h>
12 13
@@ -90,11 +91,10 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
90static int dma_direct_dma_supported(struct device *dev, u64 mask) 91static int dma_direct_dma_supported(struct device *dev, u64 mask)
91{ 92{
92#ifdef CONFIG_PPC64 93#ifdef CONFIG_PPC64
93 /* Could be improved to check for memory though it better be 94 /* Could be improved so platforms can set the limit in case
94 * done via some global so platforms can set the limit in case
95 * they have limited DMA windows 95 * they have limited DMA windows
96 */ 96 */
97 return mask >= DMA_BIT_MASK(32); 97 return mask >= (lmb_end_of_DRAM() - 1);
98#else 98#else
99 return 1; 99 return 1;
100#endif 100#endif
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4dd38f129153..3cadba60a4b6 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -191,11 +191,49 @@ transfer_to_handler_cont:
191 mflr r9 191 mflr r9
192 lwz r11,0(r9) /* virtual address of handler */ 192 lwz r11,0(r9) /* virtual address of handler */
193 lwz r9,4(r9) /* where to go when done */ 193 lwz r9,4(r9) /* where to go when done */
194#ifdef CONFIG_TRACE_IRQFLAGS
195 lis r12,reenable_mmu@h
196 ori r12,r12,reenable_mmu@l
197 mtspr SPRN_SRR0,r12
198 mtspr SPRN_SRR1,r10
199 SYNC
200 RFI
201reenable_mmu: /* re-enable mmu so we can */
202 mfmsr r10
203 lwz r12,_MSR(r1)
204 xor r10,r10,r12
205 andi. r10,r10,MSR_EE /* Did EE change? */
206 beq 1f
207
208 /* Save handler and return address into the 2 unused words
209 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
210 * else can be recovered from the pt_regs except r3 which for
211 * normal interrupts has been set to pt_regs and for syscalls
212 * is an argument, so we temporarily use ORIG_GPR3 to save it
213 */
214 stw r9,8(r1)
215 stw r11,12(r1)
216 stw r3,ORIG_GPR3(r1)
217 bl trace_hardirqs_off
218 lwz r0,GPR0(r1)
219 lwz r3,ORIG_GPR3(r1)
220 lwz r4,GPR4(r1)
221 lwz r5,GPR5(r1)
222 lwz r6,GPR6(r1)
223 lwz r7,GPR7(r1)
224 lwz r8,GPR8(r1)
225 lwz r9,8(r1)
226 lwz r11,12(r1)
2271: mtctr r11
228 mtlr r9
229 bctr /* jump to handler */
230#else /* CONFIG_TRACE_IRQFLAGS */
194 mtspr SPRN_SRR0,r11 231 mtspr SPRN_SRR0,r11
195 mtspr SPRN_SRR1,r10 232 mtspr SPRN_SRR1,r10
196 mtlr r9 233 mtlr r9
197 SYNC 234 SYNC
198 RFI /* jump to handler, enable MMU */ 235 RFI /* jump to handler, enable MMU */
236#endif /* CONFIG_TRACE_IRQFLAGS */
199 237
200#if defined (CONFIG_6xx) || defined(CONFIG_E500) 238#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2014: rlwinm r12,r12,0,~_TLF_NAPPING 2394: rlwinm r12,r12,0,~_TLF_NAPPING
@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall)
251#ifdef SHOW_SYSCALLS 289#ifdef SHOW_SYSCALLS
252 bl do_show_syscall 290 bl do_show_syscall
253#endif /* SHOW_SYSCALLS */ 291#endif /* SHOW_SYSCALLS */
292#ifdef CONFIG_TRACE_IRQFLAGS
293 /* Return from syscalls can (and generally will) hard enable
294 * interrupts. You aren't supposed to call a syscall with
295 * interrupts disabled in the first place. However, to ensure
296 * that we get it right vs. lockdep if it happens, we force
297 * that hard enable here with appropriate tracing if we see
298 * that we have been called with interrupts off
299 */
300 mfmsr r11
301 andi. r12,r11,MSR_EE
302 bne+ 1f
303 /* We came in with interrupts disabled, we enable them now */
304 bl trace_hardirqs_on
305 mfmsr r11
306 lwz r0,GPR0(r1)
307 lwz r3,GPR3(r1)
308 lwz r4,GPR4(r1)
309 ori r11,r11,MSR_EE
310 lwz r5,GPR5(r1)
311 lwz r6,GPR6(r1)
312 lwz r7,GPR7(r1)
313 lwz r8,GPR8(r1)
314 mtmsr r11
3151:
316#endif /* CONFIG_TRACE_IRQFLAGS */
254 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ 317 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
255 lwz r11,TI_FLAGS(r10) 318 lwz r11,TI_FLAGS(r10)
256 andi. r11,r11,_TIF_SYSCALL_T_OR_A 319 andi. r11,r11,_TIF_SYSCALL_T_OR_A
@@ -275,6 +338,7 @@ ret_from_syscall:
275 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ 338 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
276 /* disable interrupts so current_thread_info()->flags can't change */ 339 /* disable interrupts so current_thread_info()->flags can't change */
277 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ 340 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
341 /* Note: We don't bother telling lockdep about it */
278 SYNC 342 SYNC
279 MTMSRD(r10) 343 MTMSRD(r10)
280 lwz r9,TI_FLAGS(r12) 344 lwz r9,TI_FLAGS(r12)
@@ -288,6 +352,19 @@ ret_from_syscall:
288 oris r11,r11,0x1000 /* Set SO bit in CR */ 352 oris r11,r11,0x1000 /* Set SO bit in CR */
289 stw r11,_CCR(r1) 353 stw r11,_CCR(r1)
290syscall_exit_cont: 354syscall_exit_cont:
355 lwz r8,_MSR(r1)
356#ifdef CONFIG_TRACE_IRQFLAGS
357 /* If we are going to return from the syscall with interrupts
358 * off, we trace that here. It shouldn't happen though but we
359 * want to catch the bugger if it does right ?
360 */
361 andi. r10,r8,MSR_EE
362 bne+ 1f
363 stw r3,GPR3(r1)
364 bl trace_hardirqs_off
365 lwz r3,GPR3(r1)
3661:
367#endif /* CONFIG_TRACE_IRQFLAGS */
291#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 368#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
292 /* If the process has its own DBCR0 value, load it up. The internal 369 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */ 370 debug mode bit tells us that dbcr0 should be loaded. */
@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
311 mtlr r4 388 mtlr r4
312 mtcr r5 389 mtcr r5
313 lwz r7,_NIP(r1) 390 lwz r7,_NIP(r1)
314 lwz r8,_MSR(r1)
315 FIX_SRR1(r8, r0) 391 FIX_SRR1(r8, r0)
316 lwz r2,GPR2(r1) 392 lwz r2,GPR2(r1)
317 lwz r1,GPR1(r1) 393 lwz r1,GPR1(r1)
@@ -394,7 +470,9 @@ syscall_exit_work:
394 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) 470 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
395 beq ret_from_except 471 beq ret_from_except
396 472
397 /* Re-enable interrupts */ 473 /* Re-enable interrupts. There is no need to trace that with
474 * lockdep as we are supposed to have IRQs on at this point
475 */
398 ori r10,r10,MSR_EE 476 ori r10,r10,MSR_EE
399 SYNC 477 SYNC
400 MTMSRD(r10) 478 MTMSRD(r10)
@@ -705,6 +783,7 @@ ret_from_except:
705 /* Hard-disable interrupts so that current_thread_info()->flags 783 /* Hard-disable interrupts so that current_thread_info()->flags
706 * can't change between when we test it and when we return 784 * can't change between when we test it and when we return
707 * from the interrupt. */ 785 * from the interrupt. */
786 /* Note: We don't bother telling lockdep about it */
708 LOAD_MSR_KERNEL(r10,MSR_KERNEL) 787 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
709 SYNC /* Some chip revs have problems here... */ 788 SYNC /* Some chip revs have problems here... */
710 MTMSRD(r10) /* disable interrupts */ 789 MTMSRD(r10) /* disable interrupts */
@@ -744,11 +823,24 @@ resume_kernel:
744 beq+ restore 823 beq+ restore
745 andi. r0,r3,MSR_EE /* interrupts off? */ 824 andi. r0,r3,MSR_EE /* interrupts off? */
746 beq restore /* don't schedule if so */ 825 beq restore /* don't schedule if so */
826#ifdef CONFIG_TRACE_IRQFLAGS
827 /* Lockdep thinks irqs are enabled, we need to call
828 * preempt_schedule_irq with IRQs off, so we inform lockdep
829 * now that we -did- turn them off already
830 */
831 bl trace_hardirqs_off
832#endif
7471: bl preempt_schedule_irq 8331: bl preempt_schedule_irq
748 rlwinm r9,r1,0,0,(31-THREAD_SHIFT) 834 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
749 lwz r3,TI_FLAGS(r9) 835 lwz r3,TI_FLAGS(r9)
750 andi. r0,r3,_TIF_NEED_RESCHED 836 andi. r0,r3,_TIF_NEED_RESCHED
751 bne- 1b 837 bne- 1b
838#ifdef CONFIG_TRACE_IRQFLAGS
839 /* And now, to properly rebalance the above, we tell lockdep they
840 * are being turned back on, which will happen when we return
841 */
842 bl trace_hardirqs_on
843#endif
752#else 844#else
753resume_kernel: 845resume_kernel:
754#endif /* CONFIG_PREEMPT */ 846#endif /* CONFIG_PREEMPT */
@@ -765,6 +857,28 @@ restore:
765 stw r6,icache_44x_need_flush@l(r4) 857 stw r6,icache_44x_need_flush@l(r4)
7661: 8581:
767#endif /* CONFIG_44x */ 859#endif /* CONFIG_44x */
860
861 lwz r9,_MSR(r1)
862#ifdef CONFIG_TRACE_IRQFLAGS
863 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
864 * off in this assembly code while peeking at TI_FLAGS() and such. However
865 * we need to inform it if the exception turned interrupts off, and we
866 * are about to trun them back on.
867 *
868 * The problem here sadly is that we don't know whether the exceptions was
869 * one that turned interrupts off or not. So we always tell lockdep about
870 * turning them on here when we go back to wherever we came from with EE
871 * on, even if that may meen some redudant calls being tracked. Maybe later
872 * we could encode what the exception did somewhere or test the exception
873 * type in the pt_regs but that sounds overkill
874 */
875 andi. r10,r9,MSR_EE
876 beq 1f
877 bl trace_hardirqs_on
878 lwz r9,_MSR(r1)
8791:
880#endif /* CONFIG_TRACE_IRQFLAGS */
881
768 lwz r0,GPR0(r1) 882 lwz r0,GPR0(r1)
769 lwz r2,GPR2(r1) 883 lwz r2,GPR2(r1)
770 REST_4GPRS(3, r1) 884 REST_4GPRS(3, r1)
@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
782 stwcx. r0,0,r1 /* to clear the reservation */ 896 stwcx. r0,0,r1 /* to clear the reservation */
783 897
784#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 898#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
785 lwz r9,_MSR(r1)
786 andi. r10,r9,MSR_RI /* check if this exception occurred */ 899 andi. r10,r9,MSR_RI /* check if this exception occurred */
787 beql nonrecoverable /* at a bad place (MSR:RI = 0) */ 900 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
788 901
@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
805 MTMSRD(r10) /* clear the RI bit */ 918 MTMSRD(r10) /* clear the RI bit */
806 .globl exc_exit_restart 919 .globl exc_exit_restart
807exc_exit_restart: 920exc_exit_restart:
808 lwz r9,_MSR(r1)
809 lwz r12,_NIP(r1) 921 lwz r12,_NIP(r1)
810 FIX_SRR1(r9,r10) 922 FIX_SRR1(r9,r10)
811 mtspr SPRN_SRR0,r12 923 mtspr SPRN_SRR0,r12
@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */
1035 beq do_user_signal 1147 beq do_user_signal
1036 1148
1037do_resched: /* r10 contains MSR_KERNEL here */ 1149do_resched: /* r10 contains MSR_KERNEL here */
1150 /* Note: We don't need to inform lockdep that we are enabling
1151 * interrupts here. As far as it knows, they are already enabled
1152 */
1038 ori r10,r10,MSR_EE 1153 ori r10,r10,MSR_EE
1039 SYNC 1154 SYNC
1040 MTMSRD(r10) /* hard-enable interrupts */ 1155 MTMSRD(r10) /* hard-enable interrupts */
1041 bl schedule 1156 bl schedule
1042recheck: 1157recheck:
1158 /* Note: And we don't tell it we are disabling them again
1159 * neither. Those disable/enable cycles used to peek at
1160 * TI_FLAGS aren't advertised.
1161 */
1043 LOAD_MSR_KERNEL(r10,MSR_KERNEL) 1162 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1044 SYNC 1163 SYNC
1045 MTMSRD(r10) /* disable interrupts */ 1164 MTMSRD(r10) /* disable interrupts */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 1b12696cca06..ce1f3e44c24f 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -586,7 +586,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
586 return; 586 return;
587 } 587 }
588 588
589 if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) { 589 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
590 *parent = old; 590 *parent = old;
591 return; 591 return;
592 } 592 }
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 48469463f89e..fc2132942754 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -1124,9 +1124,8 @@ mmu_off:
1124 RFI 1124 RFI
1125 1125
1126/* 1126/*
1127 * Use the first pair of BAT registers to map the 1st 16MB 1127 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1128 * of RAM to PAGE_OFFSET. From this point on we can't safely 1128 * (we keep one for debugging) and on others, we use one 256M BAT.
1129 * call OF any more.
1130 */ 1129 */
1131initial_bats: 1130initial_bats:
1132 lis r11,PAGE_OFFSET@h 1131 lis r11,PAGE_OFFSET@h
@@ -1136,12 +1135,16 @@ initial_bats:
1136 bne 4f 1135 bne 4f
1137 ori r11,r11,4 /* set up BAT registers for 601 */ 1136 ori r11,r11,4 /* set up BAT registers for 601 */
1138 li r8,0x7f /* valid, block length = 8MB */ 1137 li r8,0x7f /* valid, block length = 8MB */
1139 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1140 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1141 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ 1138 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1142 mtspr SPRN_IBAT0L,r8 /* lower BAT register */ 1139 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1143 mtspr SPRN_IBAT1U,r9 1140 addis r11,r11,0x800000@h
1144 mtspr SPRN_IBAT1L,r10 1141 addis r8,r8,0x800000@h
1142 mtspr SPRN_IBAT1U,r11
1143 mtspr SPRN_IBAT1L,r8
1144 addis r11,r11,0x800000@h
1145 addis r8,r8,0x800000@h
1146 mtspr SPRN_IBAT2U,r11
1147 mtspr SPRN_IBAT2L,r8
1145 isync 1148 isync
1146 blr 1149 blr
1147 1150
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
new file mode 100644
index 000000000000..cc466d039af6
--- /dev/null
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -0,0 +1,417 @@
1/*
2 * Performance counter support for MPC7450-family processors.
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/string.h>
12#include <linux/perf_counter.h>
13#include <asm/reg.h>
14#include <asm/cputable.h>
15
16#define N_COUNTER 6 /* Number of hardware counters */
17#define MAX_ALT 3 /* Maximum number of event alternative codes */
18
19/*
20 * Bits in event code for MPC7450 family
21 */
22#define PM_THRMULT_MSKS 0x40000
23#define PM_THRESH_SH 12
24#define PM_THRESH_MSK 0x3f
25#define PM_PMC_SH 8
26#define PM_PMC_MSK 7
27#define PM_PMCSEL_MSK 0x7f
28
29/*
30 * Classify events according to how specific their PMC requirements are.
31 * Result is:
32 * 0: can go on any PMC
33 * 1: can go on PMCs 1-4
34 * 2: can go on PMCs 1,2,4
35 * 3: can go on PMCs 1 or 2
36 * 4: can only go on one PMC
37 * -1: event code is invalid
38 */
39#define N_CLASSES 5
40
41static int mpc7450_classify_event(u32 event)
42{
43 int pmc;
44
45 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
46 if (pmc) {
47 if (pmc > N_COUNTER)
48 return -1;
49 return 4;
50 }
51 event &= PM_PMCSEL_MSK;
52 if (event <= 1)
53 return 0;
54 if (event <= 7)
55 return 1;
56 if (event <= 13)
57 return 2;
58 if (event <= 22)
59 return 3;
60 return -1;
61}
62
63/*
64 * Events using threshold and possible threshold scale:
65 * code scale? name
66 * 11e N PM_INSTQ_EXCEED_CYC
67 * 11f N PM_ALTV_IQ_EXCEED_CYC
68 * 128 Y PM_DTLB_SEARCH_EXCEED_CYC
69 * 12b Y PM_LD_MISS_EXCEED_L1_CYC
70 * 220 N PM_CQ_EXCEED_CYC
71 * 30c N PM_GPR_RB_EXCEED_CYC
72 * 30d ? PM_FPR_IQ_EXCEED_CYC ?
73 * 311 Y PM_ITLB_SEARCH_EXCEED
74 * 410 N PM_GPR_IQ_EXCEED_CYC
75 */
76
77/*
78 * Return use of threshold and threshold scale bits:
79 * 0 = uses neither, 1 = uses threshold, 2 = uses both
80 */
81static int mpc7450_threshold_use(u32 event)
82{
83 int pmc, sel;
84
85 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
86 sel = event & PM_PMCSEL_MSK;
87 switch (pmc) {
88 case 1:
89 if (sel == 0x1e || sel == 0x1f)
90 return 1;
91 if (sel == 0x28 || sel == 0x2b)
92 return 2;
93 break;
94 case 2:
95 if (sel == 0x20)
96 return 1;
97 break;
98 case 3:
99 if (sel == 0xc || sel == 0xd)
100 return 1;
101 if (sel == 0x11)
102 return 2;
103 break;
104 case 4:
105 if (sel == 0x10)
106 return 1;
107 break;
108 }
109 return 0;
110}
111
112/*
113 * Layout of constraint bits:
114 * 33222222222211111111110000000000
115 * 10987654321098765432109876543210
116 * |< >< > < > < ><><><><><><>
117 * TS TV G4 G3 G2P6P5P4P3P2P1
118 *
119 * P1 - P6
120 * 0 - 11: Count of events needing PMC1 .. PMC6
121 *
122 * G2
123 * 12 - 14: Count of events needing PMC1 or PMC2
124 *
125 * G3
126 * 16 - 18: Count of events needing PMC1, PMC2 or PMC4
127 *
128 * G4
129 * 20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
130 *
131 * TV
132 * 24 - 29: Threshold value requested
133 *
134 * TS
135 * 30: Threshold scale value requested
136 */
137
138static u32 pmcbits[N_COUNTER][2] = {
139 { 0x00844002, 0x00111001 }, /* PMC1 mask, value: P1,G2,G3,G4 */
140 { 0x00844008, 0x00111004 }, /* PMC2: P2,G2,G3,G4 */
141 { 0x00800020, 0x00100010 }, /* PMC3: P3,G4 */
142 { 0x00840080, 0x00110040 }, /* PMC4: P4,G3,G4 */
143 { 0x00000200, 0x00000100 }, /* PMC5: P5 */
144 { 0x00000800, 0x00000400 } /* PMC6: P6 */
145};
146
147static u32 classbits[N_CLASSES - 1][2] = {
148 { 0x00000000, 0x00000000 }, /* class 0: no constraint */
149 { 0x00800000, 0x00100000 }, /* class 1: G4 */
150 { 0x00040000, 0x00010000 }, /* class 2: G3 */
151 { 0x00004000, 0x00001000 }, /* class 3: G2 */
152};
153
154static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
155 unsigned long *valp)
156{
157 int pmc, class;
158 u32 mask, value;
159 int thresh, tuse;
160
161 class = mpc7450_classify_event(event);
162 if (class < 0)
163 return -1;
164 if (class == 4) {
165 pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
166 mask = pmcbits[pmc - 1][0];
167 value = pmcbits[pmc - 1][1];
168 } else {
169 mask = classbits[class][0];
170 value = classbits[class][1];
171 }
172
173 tuse = mpc7450_threshold_use(event);
174 if (tuse) {
175 thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK;
176 mask |= 0x3f << 24;
177 value |= thresh << 24;
178 if (tuse == 2) {
179 mask |= 0x40000000;
180 if ((unsigned int)event & PM_THRMULT_MSKS)
181 value |= 0x40000000;
182 }
183 }
184
185 *maskp = mask;
186 *valp = value;
187 return 0;
188}
189
190static const unsigned int event_alternatives[][MAX_ALT] = {
191 { 0x217, 0x317 }, /* PM_L1_DCACHE_MISS */
192 { 0x418, 0x50f, 0x60f }, /* PM_SNOOP_RETRY */
193 { 0x502, 0x602 }, /* PM_L2_HIT */
194 { 0x503, 0x603 }, /* PM_L3_HIT */
195 { 0x504, 0x604 }, /* PM_L2_ICACHE_MISS */
196 { 0x505, 0x605 }, /* PM_L3_ICACHE_MISS */
197 { 0x506, 0x606 }, /* PM_L2_DCACHE_MISS */
198 { 0x507, 0x607 }, /* PM_L3_DCACHE_MISS */
199 { 0x50a, 0x623 }, /* PM_LD_HIT_L3 */
200 { 0x50b, 0x624 }, /* PM_ST_HIT_L3 */
201 { 0x50d, 0x60d }, /* PM_L2_TOUCH_HIT */
202 { 0x50e, 0x60e }, /* PM_L3_TOUCH_HIT */
203 { 0x512, 0x612 }, /* PM_INT_LOCAL */
204 { 0x513, 0x61d }, /* PM_L2_MISS */
205 { 0x514, 0x61e }, /* PM_L3_MISS */
206};
207
208/*
209 * Scan the alternatives table for a match and return the
210 * index into the alternatives table if found, else -1.
211 */
212static int find_alternative(u32 event)
213{
214 int i, j;
215
216 for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
217 if (event < event_alternatives[i][0])
218 break;
219 for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
220 if (event == event_alternatives[i][j])
221 return i;
222 }
223 return -1;
224}
225
226static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[])
227{
228 int i, j, nalt = 1;
229 u32 ae;
230
231 alt[0] = event;
232 nalt = 1;
233 i = find_alternative((u32)event);
234 if (i >= 0) {
235 for (j = 0; j < MAX_ALT; ++j) {
236 ae = event_alternatives[i][j];
237 if (ae && ae != (u32)event)
238 alt[nalt++] = ae;
239 }
240 }
241 return nalt;
242}
243
244/*
245 * Bitmaps of which PMCs each class can use for classes 0 - 3.
246 * Bit i is set if PMC i+1 is usable.
247 */
248static const u8 classmap[N_CLASSES] = {
249 0x3f, 0x0f, 0x0b, 0x03, 0
250};
251
252/* Bit position and width of each PMCSEL field */
253static const int pmcsel_shift[N_COUNTER] = {
254 6, 0, 27, 22, 17, 11
255};
256static const u32 pmcsel_mask[N_COUNTER] = {
257 0x7f, 0x3f, 0x1f, 0x1f, 0x1f, 0x3f
258};
259
260/*
261 * Compute MMCR0/1/2 values for a set of events.
262 */
263static int mpc7450_compute_mmcr(u64 event[], int n_ev,
264 unsigned int hwc[], unsigned long mmcr[])
265{
266 u8 event_index[N_CLASSES][N_COUNTER];
267 int n_classevent[N_CLASSES];
268 int i, j, class, tuse;
269 u32 pmc_inuse = 0, pmc_avail;
270 u32 mmcr0 = 0, mmcr1 = 0, mmcr2 = 0;
271 u32 ev, pmc, thresh;
272
273 if (n_ev > N_COUNTER)
274 return -1;
275
276 /* First pass: count usage in each class */
277 for (i = 0; i < N_CLASSES; ++i)
278 n_classevent[i] = 0;
279 for (i = 0; i < n_ev; ++i) {
280 class = mpc7450_classify_event(event[i]);
281 if (class < 0)
282 return -1;
283 j = n_classevent[class]++;
284 event_index[class][j] = i;
285 }
286
287 /* Second pass: allocate PMCs from most specific event to least */
288 for (class = N_CLASSES - 1; class >= 0; --class) {
289 for (i = 0; i < n_classevent[class]; ++i) {
290 ev = event[event_index[class][i]];
291 if (class == 4) {
292 pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
293 if (pmc_inuse & (1 << (pmc - 1)))
294 return -1;
295 } else {
296 /* Find a suitable PMC */
297 pmc_avail = classmap[class] & ~pmc_inuse;
298 if (!pmc_avail)
299 return -1;
300 pmc = ffs(pmc_avail);
301 }
302 pmc_inuse |= 1 << (pmc - 1);
303
304 tuse = mpc7450_threshold_use(ev);
305 if (tuse) {
306 thresh = (ev >> PM_THRESH_SH) & PM_THRESH_MSK;
307 mmcr0 |= thresh << 16;
308 if (tuse == 2 && (ev & PM_THRMULT_MSKS))
309 mmcr2 = 0x80000000;
310 }
311 ev &= pmcsel_mask[pmc - 1];
312 ev <<= pmcsel_shift[pmc - 1];
313 if (pmc <= 2)
314 mmcr0 |= ev;
315 else
316 mmcr1 |= ev;
317 hwc[event_index[class][i]] = pmc - 1;
318 }
319 }
320
321 if (pmc_inuse & 1)
322 mmcr0 |= MMCR0_PMC1CE;
323 if (pmc_inuse & 0x3e)
324 mmcr0 |= MMCR0_PMCnCE;
325
326 /* Return MMCRx values */
327 mmcr[0] = mmcr0;
328 mmcr[1] = mmcr1;
329 mmcr[2] = mmcr2;
330 return 0;
331}
332
333/*
334 * Disable counting by a PMC.
335 * Note that the pmc argument is 0-based here, not 1-based.
336 */
337static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[])
338{
339 if (pmc <= 1)
340 mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
341 else
342 mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
343}
344
345static int mpc7450_generic_events[] = {
346 [PERF_COUNT_HW_CPU_CYCLES] = 1,
347 [PERF_COUNT_HW_INSTRUCTIONS] = 2,
348 [PERF_COUNT_HW_CACHE_MISSES] = 0x217, /* PM_L1_DCACHE_MISS */
349 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x122, /* PM_BR_CMPL */
350 [PERF_COUNT_HW_BRANCH_MISSES] = 0x41c, /* PM_BR_MPRED */
351};
352
353#define C(x) PERF_COUNT_HW_CACHE_##x
354
355/*
356 * Table of generalized cache-related events.
357 * 0 means not supported, -1 means nonsensical, other values
358 * are event codes.
359 */
360static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
361 [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
362 [C(OP_READ)] = { 0, 0x225 },
363 [C(OP_WRITE)] = { 0, 0x227 },
364 [C(OP_PREFETCH)] = { 0, 0 },
365 },
366 [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
367 [C(OP_READ)] = { 0x129, 0x115 },
368 [C(OP_WRITE)] = { -1, -1 },
369 [C(OP_PREFETCH)] = { 0x634, 0 },
370 },
371 [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
372 [C(OP_READ)] = { 0, 0 },
373 [C(OP_WRITE)] = { 0, 0 },
374 [C(OP_PREFETCH)] = { 0, 0 },
375 },
376 [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
377 [C(OP_READ)] = { 0, 0x312 },
378 [C(OP_WRITE)] = { -1, -1 },
379 [C(OP_PREFETCH)] = { -1, -1 },
380 },
381 [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
382 [C(OP_READ)] = { 0, 0x223 },
383 [C(OP_WRITE)] = { -1, -1 },
384 [C(OP_PREFETCH)] = { -1, -1 },
385 },
386 [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
387 [C(OP_READ)] = { 0x122, 0x41c },
388 [C(OP_WRITE)] = { -1, -1 },
389 [C(OP_PREFETCH)] = { -1, -1 },
390 },
391};
392
393struct power_pmu mpc7450_pmu = {
394 .name = "MPC7450 family",
395 .n_counter = N_COUNTER,
396 .max_alternatives = MAX_ALT,
397 .add_fields = 0x00111555ul,
398 .test_adder = 0x00301000ul,
399 .compute_mmcr = mpc7450_compute_mmcr,
400 .get_constraint = mpc7450_get_constraint,
401 .get_alternatives = mpc7450_get_alternatives,
402 .disable_pmc = mpc7450_disable_pmc,
403 .n_generic = ARRAY_SIZE(mpc7450_generic_events),
404 .generic_events = mpc7450_generic_events,
405 .cache_events = &mpc7450_cache_events,
406};
407
408static int init_mpc7450_pmu(void)
409{
410 if (!cur_cpu_spec->oprofile_cpu_type ||
411 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
412 return -ENODEV;
413
414 return register_power_pmu(&mpc7450_pmu);
415}
416
417arch_initcall(init_mpc7450_pmu);
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index fa983a59c4ce..a359cb08e900 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
76 dev->dev.archdata.of_node = np; 76 dev->dev.archdata.of_node = np;
77 77
78 if (bus_id) 78 if (bus_id)
79 dev_set_name(&dev->dev, bus_id); 79 dev_set_name(&dev->dev, "%s", bus_id);
80 else 80 else
81 of_device_make_bus_id(dev); 81 of_device_make_bus_id(dev);
82 82
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bb202388170e..70e1f57f7dd8 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -29,7 +29,7 @@ struct cpu_hw_counters {
29 struct perf_counter *counter[MAX_HWCOUNTERS]; 29 struct perf_counter *counter[MAX_HWCOUNTERS];
30 u64 events[MAX_HWCOUNTERS]; 30 u64 events[MAX_HWCOUNTERS];
31 unsigned int flags[MAX_HWCOUNTERS]; 31 unsigned int flags[MAX_HWCOUNTERS];
32 u64 mmcr[3]; 32 unsigned long mmcr[3];
33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; 33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; 34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
35}; 35};
@@ -46,6 +46,115 @@ struct power_pmu *ppmu;
46 */ 46 */
47static unsigned int freeze_counters_kernel = MMCR0_FCS; 47static unsigned int freeze_counters_kernel = MMCR0_FCS;
48 48
49/*
50 * 32-bit doesn't have MMCRA but does have an MMCR2,
51 * and a few other names are different.
52 */
53#ifdef CONFIG_PPC32
54
55#define MMCR0_FCHV 0
56#define MMCR0_PMCjCE MMCR0_PMCnCE
57
58#define SPRN_MMCRA SPRN_MMCR2
59#define MMCRA_SAMPLE_ENABLE 0
60
61static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
62{
63 return 0;
64}
65static inline void perf_set_pmu_inuse(int inuse) { }
66static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
67static inline u32 perf_get_misc_flags(struct pt_regs *regs)
68{
69 return 0;
70}
71static inline void perf_read_regs(struct pt_regs *regs) { }
72static inline int perf_intr_is_nmi(struct pt_regs *regs)
73{
74 return 0;
75}
76
77#endif /* CONFIG_PPC32 */
78
79/*
80 * Things that are specific to 64-bit implementations.
81 */
82#ifdef CONFIG_PPC64
83
84static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
85{
86 unsigned long mmcra = regs->dsisr;
87
88 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
89 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
90 if (slot > 1)
91 return 4 * (slot - 1);
92 }
93 return 0;
94}
95
96static inline void perf_set_pmu_inuse(int inuse)
97{
98 get_lppaca()->pmcregs_in_use = inuse;
99}
100
101/*
102 * The user wants a data address recorded.
103 * If we're not doing instruction sampling, give them the SDAR
104 * (sampled data address). If we are doing instruction sampling, then
105 * only give them the SDAR if it corresponds to the instruction
106 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
107 * bit in MMCRA.
108 */
109static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
110{
111 unsigned long mmcra = regs->dsisr;
112 unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
113 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
114
115 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
116 *addrp = mfspr(SPRN_SDAR);
117}
118
119static inline u32 perf_get_misc_flags(struct pt_regs *regs)
120{
121 unsigned long mmcra = regs->dsisr;
122
123 if (TRAP(regs) != 0xf00)
124 return 0; /* not a PMU interrupt */
125
126 if (ppmu->flags & PPMU_ALT_SIPR) {
127 if (mmcra & POWER6_MMCRA_SIHV)
128 return PERF_EVENT_MISC_HYPERVISOR;
129 return (mmcra & POWER6_MMCRA_SIPR) ?
130 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
131 }
132 if (mmcra & MMCRA_SIHV)
133 return PERF_EVENT_MISC_HYPERVISOR;
134 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
135 PERF_EVENT_MISC_KERNEL;
136}
137
138/*
139 * Overload regs->dsisr to store MMCRA so we only need to read it once
140 * on each interrupt.
141 */
142static inline void perf_read_regs(struct pt_regs *regs)
143{
144 regs->dsisr = mfspr(SPRN_MMCRA);
145}
146
147/*
148 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
149 * it as an NMI.
150 */
151static inline int perf_intr_is_nmi(struct pt_regs *regs)
152{
153 return !regs->softe;
154}
155
156#endif /* CONFIG_PPC64 */
157
49static void perf_counter_interrupt(struct pt_regs *regs); 158static void perf_counter_interrupt(struct pt_regs *regs);
50 159
51void perf_counter_print_debug(void) 160void perf_counter_print_debug(void)
@@ -78,12 +187,14 @@ static unsigned long read_pmc(int idx)
78 case 6: 187 case 6:
79 val = mfspr(SPRN_PMC6); 188 val = mfspr(SPRN_PMC6);
80 break; 189 break;
190#ifdef CONFIG_PPC64
81 case 7: 191 case 7:
82 val = mfspr(SPRN_PMC7); 192 val = mfspr(SPRN_PMC7);
83 break; 193 break;
84 case 8: 194 case 8:
85 val = mfspr(SPRN_PMC8); 195 val = mfspr(SPRN_PMC8);
86 break; 196 break;
197#endif /* CONFIG_PPC64 */
87 default: 198 default:
88 printk(KERN_ERR "oops trying to read PMC%d\n", idx); 199 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
89 val = 0; 200 val = 0;
@@ -115,12 +226,14 @@ static void write_pmc(int idx, unsigned long val)
115 case 6: 226 case 6:
116 mtspr(SPRN_PMC6, val); 227 mtspr(SPRN_PMC6, val);
117 break; 228 break;
229#ifdef CONFIG_PPC64
118 case 7: 230 case 7:
119 mtspr(SPRN_PMC7, val); 231 mtspr(SPRN_PMC7, val);
120 break; 232 break;
121 case 8: 233 case 8:
122 mtspr(SPRN_PMC8, val); 234 mtspr(SPRN_PMC8, val);
123 break; 235 break;
236#endif /* CONFIG_PPC64 */
124 default: 237 default:
125 printk(KERN_ERR "oops trying to write PMC%d\n", idx); 238 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
126 } 239 }
@@ -135,15 +248,15 @@ static void write_pmc(int idx, unsigned long val)
135static int power_check_constraints(u64 event[], unsigned int cflags[], 248static int power_check_constraints(u64 event[], unsigned int cflags[],
136 int n_ev) 249 int n_ev)
137{ 250{
138 u64 mask, value, nv; 251 unsigned long mask, value, nv;
139 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 252 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
140 u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 253 unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
141 u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 254 unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
142 u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; 255 unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
143 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; 256 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
144 int i, j; 257 int i, j;
145 u64 addf = ppmu->add_fields; 258 unsigned long addf = ppmu->add_fields;
146 u64 tadd = ppmu->test_adder; 259 unsigned long tadd = ppmu->test_adder;
147 260
148 if (n_ev > ppmu->n_counter) 261 if (n_ev > ppmu->n_counter)
149 return -1; 262 return -1;
@@ -283,7 +396,7 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
283 396
284static void power_pmu_read(struct perf_counter *counter) 397static void power_pmu_read(struct perf_counter *counter)
285{ 398{
286 long val, delta, prev; 399 s64 val, delta, prev;
287 400
288 if (!counter->hw.idx) 401 if (!counter->hw.idx)
289 return; 402 return;
@@ -403,14 +516,14 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
403void hw_perf_disable(void) 516void hw_perf_disable(void)
404{ 517{
405 struct cpu_hw_counters *cpuhw; 518 struct cpu_hw_counters *cpuhw;
406 unsigned long ret;
407 unsigned long flags; 519 unsigned long flags;
408 520
521 if (!ppmu)
522 return;
409 local_irq_save(flags); 523 local_irq_save(flags);
410 cpuhw = &__get_cpu_var(cpu_hw_counters); 524 cpuhw = &__get_cpu_var(cpu_hw_counters);
411 525
412 ret = cpuhw->disabled; 526 if (!cpuhw->disabled) {
413 if (!ret) {
414 cpuhw->disabled = 1; 527 cpuhw->disabled = 1;
415 cpuhw->n_added = 0; 528 cpuhw->n_added = 0;
416 529
@@ -461,6 +574,8 @@ void hw_perf_enable(void)
461 int n_lim; 574 int n_lim;
462 int idx; 575 int idx;
463 576
577 if (!ppmu)
578 return;
464 local_irq_save(flags); 579 local_irq_save(flags);
465 cpuhw = &__get_cpu_var(cpu_hw_counters); 580 cpuhw = &__get_cpu_var(cpu_hw_counters);
466 if (!cpuhw->disabled) { 581 if (!cpuhw->disabled) {
@@ -479,7 +594,7 @@ void hw_perf_enable(void)
479 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 594 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
480 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 595 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
481 if (cpuhw->n_counters == 0) 596 if (cpuhw->n_counters == 0)
482 get_lppaca()->pmcregs_in_use = 0; 597 perf_set_pmu_inuse(0);
483 goto out_enable; 598 goto out_enable;
484 } 599 }
485 600
@@ -512,7 +627,7 @@ void hw_perf_enable(void)
512 * bit set and set the hardware counters to their initial values. 627 * bit set and set the hardware counters to their initial values.
513 * Then unfreeze the counters. 628 * Then unfreeze the counters.
514 */ 629 */
515 get_lppaca()->pmcregs_in_use = 1; 630 perf_set_pmu_inuse(1);
516 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 631 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
517 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 632 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
518 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) 633 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -626,6 +741,8 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
626 long i, n, n0; 741 long i, n, n0;
627 struct perf_counter *sub; 742 struct perf_counter *sub;
628 743
744 if (!ppmu)
745 return 0;
629 cpuhw = &__get_cpu_var(cpu_hw_counters); 746 cpuhw = &__get_cpu_var(cpu_hw_counters);
630 n0 = cpuhw->n_counters; 747 n0 = cpuhw->n_counters;
631 n = collect_events(group_leader, ppmu->n_counter - n0, 748 n = collect_events(group_leader, ppmu->n_counter - n0,
@@ -913,6 +1030,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
913 case PERF_TYPE_RAW: 1030 case PERF_TYPE_RAW:
914 ev = counter->attr.config; 1031 ev = counter->attr.config;
915 break; 1032 break;
1033 default:
1034 return ERR_PTR(-EINVAL);
916 } 1035 }
917 counter->hw.config_base = ev; 1036 counter->hw.config_base = ev;
918 counter->hw.idx = 0; 1037 counter->hw.idx = 0;
@@ -1007,13 +1126,12 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1007 * things if requested. Note that interrupts are hard-disabled 1126 * things if requested. Note that interrupts are hard-disabled
1008 * here so there is no possibility of being interrupted. 1127 * here so there is no possibility of being interrupted.
1009 */ 1128 */
1010static void record_and_restart(struct perf_counter *counter, long val, 1129static void record_and_restart(struct perf_counter *counter, unsigned long val,
1011 struct pt_regs *regs, int nmi) 1130 struct pt_regs *regs, int nmi)
1012{ 1131{
1013 u64 period = counter->hw.sample_period; 1132 u64 period = counter->hw.sample_period;
1014 s64 prev, delta, left; 1133 s64 prev, delta, left;
1015 int record = 0; 1134 int record = 0;
1016 u64 addr, mmcra, sdsync;
1017 1135
1018 /* we don't have to worry about interrupts here */ 1136 /* we don't have to worry about interrupts here */
1019 prev = atomic64_read(&counter->hw.prev_count); 1137 prev = atomic64_read(&counter->hw.prev_count);
@@ -1033,8 +1151,8 @@ static void record_and_restart(struct perf_counter *counter, long val,
1033 left = period; 1151 left = period;
1034 record = 1; 1152 record = 1;
1035 } 1153 }
1036 if (left < 0x80000000L) 1154 if (left < 0x80000000LL)
1037 val = 0x80000000L - left; 1155 val = 0x80000000LL - left;
1038 } 1156 }
1039 1157
1040 /* 1158 /*
@@ -1047,22 +1165,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
1047 .period = counter->hw.last_period, 1165 .period = counter->hw.last_period,
1048 }; 1166 };
1049 1167
1050 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { 1168 if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
1051 /* 1169 perf_get_data_addr(regs, &data.addr);
1052 * The user wants a data address recorded. 1170
1053 * If we're not doing instruction sampling,
1054 * give them the SDAR (sampled data address).
1055 * If we are doing instruction sampling, then only
1056 * give them the SDAR if it corresponds to the
1057 * instruction pointed to by SIAR; this is indicated
1058 * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
1059 */
1060 mmcra = regs->dsisr;
1061 sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
1062 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
1063 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
1064 data.addr = mfspr(SPRN_SDAR);
1065 }
1066 if (perf_counter_overflow(counter, nmi, &data)) { 1171 if (perf_counter_overflow(counter, nmi, &data)) {
1067 /* 1172 /*
1068 * Interrupts are coming too fast - throttle them 1173 * Interrupts are coming too fast - throttle them
@@ -1088,25 +1193,12 @@ static void record_and_restart(struct perf_counter *counter, long val,
1088 */ 1193 */
1089unsigned long perf_misc_flags(struct pt_regs *regs) 1194unsigned long perf_misc_flags(struct pt_regs *regs)
1090{ 1195{
1091 unsigned long mmcra; 1196 u32 flags = perf_get_misc_flags(regs);
1092
1093 if (TRAP(regs) != 0xf00) {
1094 /* not a PMU interrupt */
1095 return user_mode(regs) ? PERF_EVENT_MISC_USER :
1096 PERF_EVENT_MISC_KERNEL;
1097 }
1098 1197
1099 mmcra = regs->dsisr; 1198 if (flags)
1100 if (ppmu->flags & PPMU_ALT_SIPR) { 1199 return flags;
1101 if (mmcra & POWER6_MMCRA_SIHV) 1200 return user_mode(regs) ? PERF_EVENT_MISC_USER :
1102 return PERF_EVENT_MISC_HYPERVISOR; 1201 PERF_EVENT_MISC_KERNEL;
1103 return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
1104 PERF_EVENT_MISC_KERNEL;
1105 }
1106 if (mmcra & MMCRA_SIHV)
1107 return PERF_EVENT_MISC_HYPERVISOR;
1108 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
1109 PERF_EVENT_MISC_KERNEL;
1110} 1202}
1111 1203
1112/* 1204/*
@@ -1115,20 +1207,12 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1115 */ 1207 */
1116unsigned long perf_instruction_pointer(struct pt_regs *regs) 1208unsigned long perf_instruction_pointer(struct pt_regs *regs)
1117{ 1209{
1118 unsigned long mmcra;
1119 unsigned long ip; 1210 unsigned long ip;
1120 unsigned long slot;
1121 1211
1122 if (TRAP(regs) != 0xf00) 1212 if (TRAP(regs) != 0xf00)
1123 return regs->nip; /* not a PMU interrupt */ 1213 return regs->nip; /* not a PMU interrupt */
1124 1214
1125 ip = mfspr(SPRN_SIAR); 1215 ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1126 mmcra = regs->dsisr;
1127 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
1128 slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
1129 if (slot > 1)
1130 ip += 4 * (slot - 1);
1131 }
1132 return ip; 1216 return ip;
1133} 1217}
1134 1218
@@ -1140,7 +1224,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1140 int i; 1224 int i;
1141 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); 1225 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
1142 struct perf_counter *counter; 1226 struct perf_counter *counter;
1143 long val; 1227 unsigned long val;
1144 int found = 0; 1228 int found = 0;
1145 int nmi; 1229 int nmi;
1146 1230
@@ -1148,16 +1232,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1148 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), 1232 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1149 mfspr(SPRN_PMC6)); 1233 mfspr(SPRN_PMC6));
1150 1234
1151 /* 1235 perf_read_regs(regs);
1152 * Overload regs->dsisr to store MMCRA so we only need to read it once.
1153 */
1154 regs->dsisr = mfspr(SPRN_MMCRA);
1155 1236
1156 /* 1237 nmi = perf_intr_is_nmi(regs);
1157 * If interrupts were soft-disabled when this PMU interrupt
1158 * occurred, treat it as an NMI.
1159 */
1160 nmi = !regs->softe;
1161 if (nmi) 1238 if (nmi)
1162 nmi_enter(); 1239 nmi_enter();
1163 else 1240 else
@@ -1210,54 +1287,28 @@ void hw_perf_counter_setup(int cpu)
1210{ 1287{
1211 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); 1288 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
1212 1289
1290 if (!ppmu)
1291 return;
1213 memset(cpuhw, 0, sizeof(*cpuhw)); 1292 memset(cpuhw, 0, sizeof(*cpuhw));
1214 cpuhw->mmcr[0] = MMCR0_FC; 1293 cpuhw->mmcr[0] = MMCR0_FC;
1215} 1294}
1216 1295
1217extern struct power_pmu power4_pmu; 1296int register_power_pmu(struct power_pmu *pmu)
1218extern struct power_pmu ppc970_pmu;
1219extern struct power_pmu power5_pmu;
1220extern struct power_pmu power5p_pmu;
1221extern struct power_pmu power6_pmu;
1222extern struct power_pmu power7_pmu;
1223
1224static int init_perf_counters(void)
1225{ 1297{
1226 unsigned long pvr; 1298 if (ppmu)
1227 1299 return -EBUSY; /* something's already registered */
1228 /* XXX should get this from cputable */ 1300
1229 pvr = mfspr(SPRN_PVR); 1301 ppmu = pmu;
1230 switch (PVR_VER(pvr)) { 1302 pr_info("%s performance monitor hardware support registered\n",
1231 case PV_POWER4: 1303 pmu->name);
1232 case PV_POWER4p:
1233 ppmu = &power4_pmu;
1234 break;
1235 case PV_970:
1236 case PV_970FX:
1237 case PV_970MP:
1238 ppmu = &ppc970_pmu;
1239 break;
1240 case PV_POWER5:
1241 ppmu = &power5_pmu;
1242 break;
1243 case PV_POWER5p:
1244 ppmu = &power5p_pmu;
1245 break;
1246 case 0x3e:
1247 ppmu = &power6_pmu;
1248 break;
1249 case 0x3f:
1250 ppmu = &power7_pmu;
1251 break;
1252 }
1253 1304
1305#ifdef MSR_HV
1254 /* 1306 /*
1255 * Use FCHV to ignore kernel events if MSR.HV is set. 1307 * Use FCHV to ignore kernel events if MSR.HV is set.
1256 */ 1308 */
1257 if (mfmsr() & MSR_HV) 1309 if (mfmsr() & MSR_HV)
1258 freeze_counters_kernel = MMCR0_FCHV; 1310 freeze_counters_kernel = MMCR0_FCHV;
1311#endif /* CONFIG_PPC64 */
1259 1312
1260 return 0; 1313 return 0;
1261} 1314}
1262
1263arch_initcall(init_perf_counters);
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 07bd308a5fa7..3c90a3d9173e 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -10,7 +10,9 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_counter.h>
13#include <linux/string.h>
13#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h>
14 16
15/* 17/*
16 * Bits in event code for POWER4 18 * Bits in event code for POWER4
@@ -179,22 +181,22 @@ static short mmcr1_adder_bits[8] = {
179 */ 181 */
180 182
181static struct unitinfo { 183static struct unitinfo {
182 u64 value, mask; 184 unsigned long value, mask;
183 int unit; 185 int unit;
184 int lowerbit; 186 int lowerbit;
185} p4_unitinfo[16] = { 187} p4_unitinfo[16] = {
186 [PM_FPU] = { 0x44000000000000ull, 0x88000000000000ull, PM_FPU, 0 }, 188 [PM_FPU] = { 0x44000000000000ul, 0x88000000000000ul, PM_FPU, 0 },
187 [PM_ISU1] = { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 }, 189 [PM_ISU1] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
188 [PM_ISU1_ALT] = 190 [PM_ISU1_ALT] =
189 { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 }, 191 { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
190 [PM_IFU] = { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 }, 192 [PM_IFU] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
191 [PM_IFU_ALT] = 193 [PM_IFU_ALT] =
192 { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 }, 194 { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
193 [PM_IDU0] = { 0x10100000000000ull, 0x80840000000000ull, PM_IDU0, 1 }, 195 [PM_IDU0] = { 0x10100000000000ul, 0x80840000000000ul, PM_IDU0, 1 },
194 [PM_ISU2] = { 0x10140000000000ull, 0x80840000000000ull, PM_ISU2, 0 }, 196 [PM_ISU2] = { 0x10140000000000ul, 0x80840000000000ul, PM_ISU2, 0 },
195 [PM_LSU0] = { 0x01400000000000ull, 0x08800000000000ull, PM_LSU0, 0 }, 197 [PM_LSU0] = { 0x01400000000000ul, 0x08800000000000ul, PM_LSU0, 0 },
196 [PM_LSU1] = { 0x00000000000000ull, 0x00010000000000ull, PM_LSU1, 40 }, 198 [PM_LSU1] = { 0x00000000000000ul, 0x00010000000000ul, PM_LSU1, 40 },
197 [PM_GPS] = { 0x00000000000000ull, 0x00000000000000ull, PM_GPS, 0 } 199 [PM_GPS] = { 0x00000000000000ul, 0x00000000000000ul, PM_GPS, 0 }
198}; 200};
199 201
200static unsigned char direct_marked_event[8] = { 202static unsigned char direct_marked_event[8] = {
@@ -249,10 +251,11 @@ static int p4_marked_instr_event(u64 event)
249 return (mask >> (byte * 8 + bit)) & 1; 251 return (mask >> (byte * 8 + bit)) & 1;
250} 252}
251 253
252static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp) 254static int p4_get_constraint(u64 event, unsigned long *maskp,
255 unsigned long *valp)
253{ 256{
254 int pmc, byte, unit, lower, sh; 257 int pmc, byte, unit, lower, sh;
255 u64 mask = 0, value = 0; 258 unsigned long mask = 0, value = 0;
256 int grp = -1; 259 int grp = -1;
257 260
258 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; 261 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -282,14 +285,14 @@ static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp)
282 value |= p4_unitinfo[unit].value; 285 value |= p4_unitinfo[unit].value;
283 sh = p4_unitinfo[unit].lowerbit; 286 sh = p4_unitinfo[unit].lowerbit;
284 if (sh > 1) 287 if (sh > 1)
285 value |= (u64)lower << sh; 288 value |= (unsigned long)lower << sh;
286 else if (lower != sh) 289 else if (lower != sh)
287 return -1; 290 return -1;
288 unit = p4_unitinfo[unit].unit; 291 unit = p4_unitinfo[unit].unit;
289 292
290 /* Set byte lane select field */ 293 /* Set byte lane select field */
291 mask |= 0xfULL << (28 - 4 * byte); 294 mask |= 0xfULL << (28 - 4 * byte);
292 value |= (u64)unit << (28 - 4 * byte); 295 value |= (unsigned long)unit << (28 - 4 * byte);
293 } 296 }
294 if (grp == 0) { 297 if (grp == 0) {
295 /* increment PMC1/2/5/6 field */ 298 /* increment PMC1/2/5/6 field */
@@ -353,9 +356,9 @@ static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[])
353} 356}
354 357
355static int p4_compute_mmcr(u64 event[], int n_ev, 358static int p4_compute_mmcr(u64 event[], int n_ev,
356 unsigned int hwc[], u64 mmcr[]) 359 unsigned int hwc[], unsigned long mmcr[])
357{ 360{
358 u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; 361 unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
359 unsigned int pmc, unit, byte, psel, lower; 362 unsigned int pmc, unit, byte, psel, lower;
360 unsigned int ttm, grp; 363 unsigned int ttm, grp;
361 unsigned int pmc_inuse = 0; 364 unsigned int pmc_inuse = 0;
@@ -429,9 +432,11 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
429 return -1; 432 return -1;
430 433
431 /* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */ 434 /* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */
432 mmcr1 |= (u64)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH; 435 mmcr1 |= (unsigned long)(unituse[3] * 2 + unituse[2])
433 mmcr1 |= (u64)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH; 436 << MMCR1_TTM0SEL_SH;
434 mmcr1 |= (u64)unituse[9] << MMCR1_TTM2SEL_SH; 437 mmcr1 |= (unsigned long)(unituse[7] * 3 + unituse[6] * 2)
438 << MMCR1_TTM1SEL_SH;
439 mmcr1 |= (unsigned long)unituse[9] << MMCR1_TTM2SEL_SH;
435 440
436 /* Set TTCxSEL fields. */ 441 /* Set TTCxSEL fields. */
437 if (unitlower & 0xe) 442 if (unitlower & 0xe)
@@ -456,7 +461,8 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
456 ttm = unit - 1; /* 2->1, 3->2 */ 461 ttm = unit - 1; /* 2->1, 3->2 */
457 else 462 else
458 ttm = unit >> 2; 463 ttm = unit >> 2;
459 mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2*byte); 464 mmcr1 |= (unsigned long)ttm
465 << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
460 } 466 }
461 } 467 }
462 468
@@ -519,7 +525,7 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
519 return 0; 525 return 0;
520} 526}
521 527
522static void p4_disable_pmc(unsigned int pmc, u64 mmcr[]) 528static void p4_disable_pmc(unsigned int pmc, unsigned long mmcr[])
523{ 529{
524 /* 530 /*
525 * Setting the PMCxSEL field to 0 disables PMC x. 531 * Setting the PMCxSEL field to 0 disables PMC x.
@@ -583,16 +589,28 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
583 }, 589 },
584}; 590};
585 591
586struct power_pmu power4_pmu = { 592static struct power_pmu power4_pmu = {
587 .n_counter = 8, 593 .name = "POWER4/4+",
588 .max_alternatives = 5, 594 .n_counter = 8,
589 .add_fields = 0x0000001100005555ull, 595 .max_alternatives = 5,
590 .test_adder = 0x0011083300000000ull, 596 .add_fields = 0x0000001100005555ul,
591 .compute_mmcr = p4_compute_mmcr, 597 .test_adder = 0x0011083300000000ul,
592 .get_constraint = p4_get_constraint, 598 .compute_mmcr = p4_compute_mmcr,
593 .get_alternatives = p4_get_alternatives, 599 .get_constraint = p4_get_constraint,
594 .disable_pmc = p4_disable_pmc, 600 .get_alternatives = p4_get_alternatives,
595 .n_generic = ARRAY_SIZE(p4_generic_events), 601 .disable_pmc = p4_disable_pmc,
596 .generic_events = p4_generic_events, 602 .n_generic = ARRAY_SIZE(p4_generic_events),
597 .cache_events = &power4_cache_events, 603 .generic_events = p4_generic_events,
604 .cache_events = &power4_cache_events,
598}; 605};
606
607static int init_power4_pmu(void)
608{
609 if (!cur_cpu_spec->oprofile_cpu_type ||
610 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4"))
611 return -ENODEV;
612
613 return register_power_pmu(&power4_pmu);
614}
615
616arch_initcall(init_power4_pmu);
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 41e5d2d958d4..31918af3e355 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -10,7 +10,9 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_counter.h>
13#include <linux/string.h>
13#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h>
14 16
15/* 17/*
16 * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3) 18 * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
@@ -126,20 +128,21 @@ static const int grsel_shift[8] = {
126}; 128};
127 129
128/* Masks and values for using events from the various units */ 130/* Masks and values for using events from the various units */
129static u64 unit_cons[PM_LASTUNIT+1][2] = { 131static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
130 [PM_FPU] = { 0x3200000000ull, 0x0100000000ull }, 132 [PM_FPU] = { 0x3200000000ul, 0x0100000000ul },
131 [PM_ISU0] = { 0x0200000000ull, 0x0080000000ull }, 133 [PM_ISU0] = { 0x0200000000ul, 0x0080000000ul },
132 [PM_ISU1] = { 0x3200000000ull, 0x3100000000ull }, 134 [PM_ISU1] = { 0x3200000000ul, 0x3100000000ul },
133 [PM_IFU] = { 0x3200000000ull, 0x2100000000ull }, 135 [PM_IFU] = { 0x3200000000ul, 0x2100000000ul },
134 [PM_IDU] = { 0x0e00000000ull, 0x0040000000ull }, 136 [PM_IDU] = { 0x0e00000000ul, 0x0040000000ul },
135 [PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull }, 137 [PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul },
136}; 138};
137 139
138static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp) 140static int power5p_get_constraint(u64 event, unsigned long *maskp,
141 unsigned long *valp)
139{ 142{
140 int pmc, byte, unit, sh; 143 int pmc, byte, unit, sh;
141 int bit, fmask; 144 int bit, fmask;
142 u64 mask = 0, value = 0; 145 unsigned long mask = 0, value = 0;
143 146
144 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; 147 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
145 if (pmc) { 148 if (pmc) {
@@ -171,17 +174,18 @@ static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
171 bit = event & 7; 174 bit = event & 7;
172 fmask = (bit == 6)? 7: 3; 175 fmask = (bit == 6)? 7: 3;
173 sh = grsel_shift[bit]; 176 sh = grsel_shift[bit];
174 mask |= (u64)fmask << sh; 177 mask |= (unsigned long)fmask << sh;
175 value |= (u64)((event >> PM_GRS_SH) & fmask) << sh; 178 value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
179 << sh;
176 } 180 }
177 /* Set byte lane select field */ 181 /* Set byte lane select field */
178 mask |= 0xfULL << (24 - 4 * byte); 182 mask |= 0xfUL << (24 - 4 * byte);
179 value |= (u64)unit << (24 - 4 * byte); 183 value |= (unsigned long)unit << (24 - 4 * byte);
180 } 184 }
181 if (pmc < 5) { 185 if (pmc < 5) {
182 /* need a counter from PMC1-4 set */ 186 /* need a counter from PMC1-4 set */
183 mask |= 0x8000000000000ull; 187 mask |= 0x8000000000000ul;
184 value |= 0x1000000000000ull; 188 value |= 0x1000000000000ul;
185 } 189 }
186 *maskp = mask; 190 *maskp = mask;
187 *valp = value; 191 *valp = value;
@@ -452,10 +456,10 @@ static int power5p_marked_instr_event(u64 event)
452} 456}
453 457
454static int power5p_compute_mmcr(u64 event[], int n_ev, 458static int power5p_compute_mmcr(u64 event[], int n_ev,
455 unsigned int hwc[], u64 mmcr[]) 459 unsigned int hwc[], unsigned long mmcr[])
456{ 460{
457 u64 mmcr1 = 0; 461 unsigned long mmcr1 = 0;
458 u64 mmcra = 0; 462 unsigned long mmcra = 0;
459 unsigned int pmc, unit, byte, psel; 463 unsigned int pmc, unit, byte, psel;
460 unsigned int ttm; 464 unsigned int ttm;
461 int i, isbus, bit, grsel; 465 int i, isbus, bit, grsel;
@@ -517,7 +521,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
517 continue; 521 continue;
518 if (ttmuse++) 522 if (ttmuse++)
519 return -1; 523 return -1;
520 mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH; 524 mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
521 } 525 }
522 ttmuse = 0; 526 ttmuse = 0;
523 for (; i <= PM_GRS; ++i) { 527 for (; i <= PM_GRS; ++i) {
@@ -525,7 +529,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
525 continue; 529 continue;
526 if (ttmuse++) 530 if (ttmuse++)
527 return -1; 531 return -1;
528 mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH; 532 mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
529 } 533 }
530 if (ttmuse > 1) 534 if (ttmuse > 1)
531 return -1; 535 return -1;
@@ -540,10 +544,11 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
540 unit = PM_ISU0_ALT; 544 unit = PM_ISU0_ALT;
541 } else if (unit == PM_LSU1 + 1) { 545 } else if (unit == PM_LSU1 + 1) {
542 /* select lower word of LSU1 for this byte */ 546 /* select lower word of LSU1 for this byte */
543 mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); 547 mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
544 } 548 }
545 ttm = unit >> 2; 549 ttm = unit >> 2;
546 mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); 550 mmcr1 |= (unsigned long)ttm
551 << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
547 } 552 }
548 553
549 /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ 554 /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -568,7 +573,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
568 if (isbus && (byte & 2) && 573 if (isbus && (byte & 2) &&
569 (psel == 8 || psel == 0x10 || psel == 0x28)) 574 (psel == 8 || psel == 0x10 || psel == 0x28))
570 /* add events on higher-numbered bus */ 575 /* add events on higher-numbered bus */
571 mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc); 576 mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
572 } else { 577 } else {
573 /* Instructions or run cycles on PMC5/6 */ 578 /* Instructions or run cycles on PMC5/6 */
574 --pmc; 579 --pmc;
@@ -576,7 +581,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
576 if (isbus && unit == PM_GRS) { 581 if (isbus && unit == PM_GRS) {
577 bit = psel & 7; 582 bit = psel & 7;
578 grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; 583 grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
579 mmcr1 |= (u64)grsel << grsel_shift[bit]; 584 mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
580 } 585 }
581 if (power5p_marked_instr_event(event[i])) 586 if (power5p_marked_instr_event(event[i]))
582 mmcra |= MMCRA_SAMPLE_ENABLE; 587 mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -599,7 +604,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
599 return 0; 604 return 0;
600} 605}
601 606
602static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[]) 607static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[])
603{ 608{
604 if (pmc <= 3) 609 if (pmc <= 3)
605 mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); 610 mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
@@ -654,18 +659,31 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
654 }, 659 },
655}; 660};
656 661
657struct power_pmu power5p_pmu = { 662static struct power_pmu power5p_pmu = {
658 .n_counter = 6, 663 .name = "POWER5+/++",
659 .max_alternatives = MAX_ALT, 664 .n_counter = 6,
660 .add_fields = 0x7000000000055ull, 665 .max_alternatives = MAX_ALT,
661 .test_adder = 0x3000040000000ull, 666 .add_fields = 0x7000000000055ul,
662 .compute_mmcr = power5p_compute_mmcr, 667 .test_adder = 0x3000040000000ul,
663 .get_constraint = power5p_get_constraint, 668 .compute_mmcr = power5p_compute_mmcr,
664 .get_alternatives = power5p_get_alternatives, 669 .get_constraint = power5p_get_constraint,
665 .disable_pmc = power5p_disable_pmc, 670 .get_alternatives = power5p_get_alternatives,
666 .limited_pmc_event = power5p_limited_pmc_event, 671 .disable_pmc = power5p_disable_pmc,
667 .flags = PPMU_LIMITED_PMC5_6, 672 .limited_pmc_event = power5p_limited_pmc_event,
668 .n_generic = ARRAY_SIZE(power5p_generic_events), 673 .flags = PPMU_LIMITED_PMC5_6,
669 .generic_events = power5p_generic_events, 674 .n_generic = ARRAY_SIZE(power5p_generic_events),
670 .cache_events = &power5p_cache_events, 675 .generic_events = power5p_generic_events,
676 .cache_events = &power5p_cache_events,
671}; 677};
678
679static int init_power5p_pmu(void)
680{
681 if (!cur_cpu_spec->oprofile_cpu_type ||
682 (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
683 && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++")))
684 return -ENODEV;
685
686 return register_power_pmu(&power5p_pmu);
687}
688
689arch_initcall(init_power5p_pmu);
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 05600b66221a..867f6f663963 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -10,7 +10,9 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_counter.h>
13#include <linux/string.h>
13#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h>
14 16
15/* 17/*
16 * Bits in event code for POWER5 (not POWER5++) 18 * Bits in event code for POWER5 (not POWER5++)
@@ -130,20 +132,21 @@ static const int grsel_shift[8] = {
130}; 132};
131 133
132/* Masks and values for using events from the various units */ 134/* Masks and values for using events from the various units */
133static u64 unit_cons[PM_LASTUNIT+1][2] = { 135static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
134 [PM_FPU] = { 0xc0002000000000ull, 0x00001000000000ull }, 136 [PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul },
135 [PM_ISU0] = { 0x00002000000000ull, 0x00000800000000ull }, 137 [PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul },
136 [PM_ISU1] = { 0xc0002000000000ull, 0xc0001000000000ull }, 138 [PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul },
137 [PM_IFU] = { 0xc0002000000000ull, 0x80001000000000ull }, 139 [PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul },
138 [PM_IDU] = { 0x30002000000000ull, 0x00000400000000ull }, 140 [PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul },
139 [PM_GRS] = { 0x30002000000000ull, 0x30000400000000ull }, 141 [PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul },
140}; 142};
141 143
142static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp) 144static int power5_get_constraint(u64 event, unsigned long *maskp,
145 unsigned long *valp)
143{ 146{
144 int pmc, byte, unit, sh; 147 int pmc, byte, unit, sh;
145 int bit, fmask; 148 int bit, fmask;
146 u64 mask = 0, value = 0; 149 unsigned long mask = 0, value = 0;
147 int grp = -1; 150 int grp = -1;
148 151
149 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; 152 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -178,8 +181,9 @@ static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
178 bit = event & 7; 181 bit = event & 7;
179 fmask = (bit == 6)? 7: 3; 182 fmask = (bit == 6)? 7: 3;
180 sh = grsel_shift[bit]; 183 sh = grsel_shift[bit];
181 mask |= (u64)fmask << sh; 184 mask |= (unsigned long)fmask << sh;
182 value |= (u64)((event >> PM_GRS_SH) & fmask) << sh; 185 value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
186 << sh;
183 } 187 }
184 /* 188 /*
185 * Bus events on bytes 0 and 2 can be counted 189 * Bus events on bytes 0 and 2 can be counted
@@ -188,22 +192,22 @@ static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
188 if (!pmc) 192 if (!pmc)
189 grp = byte & 1; 193 grp = byte & 1;
190 /* Set byte lane select field */ 194 /* Set byte lane select field */
191 mask |= 0xfULL << (24 - 4 * byte); 195 mask |= 0xfUL << (24 - 4 * byte);
192 value |= (u64)unit << (24 - 4 * byte); 196 value |= (unsigned long)unit << (24 - 4 * byte);
193 } 197 }
194 if (grp == 0) { 198 if (grp == 0) {
195 /* increment PMC1/2 field */ 199 /* increment PMC1/2 field */
196 mask |= 0x200000000ull; 200 mask |= 0x200000000ul;
197 value |= 0x080000000ull; 201 value |= 0x080000000ul;
198 } else if (grp == 1) { 202 } else if (grp == 1) {
199 /* increment PMC3/4 field */ 203 /* increment PMC3/4 field */
200 mask |= 0x40000000ull; 204 mask |= 0x40000000ul;
201 value |= 0x10000000ull; 205 value |= 0x10000000ul;
202 } 206 }
203 if (pmc < 5) { 207 if (pmc < 5) {
204 /* need a counter from PMC1-4 set */ 208 /* need a counter from PMC1-4 set */
205 mask |= 0x8000000000000ull; 209 mask |= 0x8000000000000ul;
206 value |= 0x1000000000000ull; 210 value |= 0x1000000000000ul;
207 } 211 }
208 *maskp = mask; 212 *maskp = mask;
209 *valp = value; 213 *valp = value;
@@ -383,10 +387,10 @@ static int power5_marked_instr_event(u64 event)
383} 387}
384 388
385static int power5_compute_mmcr(u64 event[], int n_ev, 389static int power5_compute_mmcr(u64 event[], int n_ev,
386 unsigned int hwc[], u64 mmcr[]) 390 unsigned int hwc[], unsigned long mmcr[])
387{ 391{
388 u64 mmcr1 = 0; 392 unsigned long mmcr1 = 0;
389 u64 mmcra = 0; 393 unsigned long mmcra = 0;
390 unsigned int pmc, unit, byte, psel; 394 unsigned int pmc, unit, byte, psel;
391 unsigned int ttm, grp; 395 unsigned int ttm, grp;
392 int i, isbus, bit, grsel; 396 int i, isbus, bit, grsel;
@@ -457,7 +461,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
457 continue; 461 continue;
458 if (ttmuse++) 462 if (ttmuse++)
459 return -1; 463 return -1;
460 mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH; 464 mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
461 } 465 }
462 ttmuse = 0; 466 ttmuse = 0;
463 for (; i <= PM_GRS; ++i) { 467 for (; i <= PM_GRS; ++i) {
@@ -465,7 +469,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
465 continue; 469 continue;
466 if (ttmuse++) 470 if (ttmuse++)
467 return -1; 471 return -1;
468 mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH; 472 mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
469 } 473 }
470 if (ttmuse > 1) 474 if (ttmuse > 1)
471 return -1; 475 return -1;
@@ -480,10 +484,11 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
480 unit = PM_ISU0_ALT; 484 unit = PM_ISU0_ALT;
481 } else if (unit == PM_LSU1 + 1) { 485 } else if (unit == PM_LSU1 + 1) {
482 /* select lower word of LSU1 for this byte */ 486 /* select lower word of LSU1 for this byte */
483 mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); 487 mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
484 } 488 }
485 ttm = unit >> 2; 489 ttm = unit >> 2;
486 mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); 490 mmcr1 |= (unsigned long)ttm
491 << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
487 } 492 }
488 493
489 /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ 494 /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -513,7 +518,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
513 --pmc; 518 --pmc;
514 if ((psel == 8 || psel == 0x10) && isbus && (byte & 2)) 519 if ((psel == 8 || psel == 0x10) && isbus && (byte & 2))
515 /* add events on higher-numbered bus */ 520 /* add events on higher-numbered bus */
516 mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc); 521 mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
517 } else { 522 } else {
518 /* Instructions or run cycles on PMC5/6 */ 523 /* Instructions or run cycles on PMC5/6 */
519 --pmc; 524 --pmc;
@@ -521,7 +526,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
521 if (isbus && unit == PM_GRS) { 526 if (isbus && unit == PM_GRS) {
522 bit = psel & 7; 527 bit = psel & 7;
523 grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; 528 grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
524 mmcr1 |= (u64)grsel << grsel_shift[bit]; 529 mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
525 } 530 }
526 if (power5_marked_instr_event(event[i])) 531 if (power5_marked_instr_event(event[i]))
527 mmcra |= MMCRA_SAMPLE_ENABLE; 532 mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -541,7 +546,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
541 return 0; 546 return 0;
542} 547}
543 548
544static void power5_disable_pmc(unsigned int pmc, u64 mmcr[]) 549static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[])
545{ 550{
546 if (pmc <= 3) 551 if (pmc <= 3)
547 mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); 552 mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
@@ -596,16 +601,28 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
596 }, 601 },
597}; 602};
598 603
599struct power_pmu power5_pmu = { 604static struct power_pmu power5_pmu = {
600 .n_counter = 6, 605 .name = "POWER5",
601 .max_alternatives = MAX_ALT, 606 .n_counter = 6,
602 .add_fields = 0x7000090000555ull, 607 .max_alternatives = MAX_ALT,
603 .test_adder = 0x3000490000000ull, 608 .add_fields = 0x7000090000555ul,
604 .compute_mmcr = power5_compute_mmcr, 609 .test_adder = 0x3000490000000ul,
605 .get_constraint = power5_get_constraint, 610 .compute_mmcr = power5_compute_mmcr,
606 .get_alternatives = power5_get_alternatives, 611 .get_constraint = power5_get_constraint,
607 .disable_pmc = power5_disable_pmc, 612 .get_alternatives = power5_get_alternatives,
608 .n_generic = ARRAY_SIZE(power5_generic_events), 613 .disable_pmc = power5_disable_pmc,
609 .generic_events = power5_generic_events, 614 .n_generic = ARRAY_SIZE(power5_generic_events),
610 .cache_events = &power5_cache_events, 615 .generic_events = power5_generic_events,
616 .cache_events = &power5_cache_events,
611}; 617};
618
619static int init_power5_pmu(void)
620{
621 if (!cur_cpu_spec->oprofile_cpu_type ||
622 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
623 return -ENODEV;
624
625 return register_power_pmu(&power5_pmu);
626}
627
628arch_initcall(init_power5_pmu);
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 46f74bebcfd9..fa21890531da 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -10,7 +10,9 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_counter.h>
13#include <linux/string.h>
13#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h>
14 16
15/* 17/*
16 * Bits in event code for POWER6 18 * Bits in event code for POWER6
@@ -41,9 +43,9 @@
41#define MMCR1_NESTSEL_SH 45 43#define MMCR1_NESTSEL_SH 45
42#define MMCR1_NESTSEL_MSK 0x7 44#define MMCR1_NESTSEL_MSK 0x7
43#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK) 45#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
44#define MMCR1_PMC1_LLA ((u64)1 << 44) 46#define MMCR1_PMC1_LLA (1ul << 44)
45#define MMCR1_PMC1_LLA_VALUE ((u64)1 << 39) 47#define MMCR1_PMC1_LLA_VALUE (1ul << 39)
46#define MMCR1_PMC1_ADDR_SEL ((u64)1 << 35) 48#define MMCR1_PMC1_ADDR_SEL (1ul << 35)
47#define MMCR1_PMC1SEL_SH 24 49#define MMCR1_PMC1SEL_SH 24
48#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) 50#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
49#define MMCR1_PMCSEL_MSK 0xff 51#define MMCR1_PMCSEL_MSK 0xff
@@ -173,10 +175,10 @@ static int power6_marked_instr_event(u64 event)
173 * Assign PMC numbers and compute MMCR1 value for a set of events 175 * Assign PMC numbers and compute MMCR1 value for a set of events
174 */ 176 */
175static int p6_compute_mmcr(u64 event[], int n_ev, 177static int p6_compute_mmcr(u64 event[], int n_ev,
176 unsigned int hwc[], u64 mmcr[]) 178 unsigned int hwc[], unsigned long mmcr[])
177{ 179{
178 u64 mmcr1 = 0; 180 unsigned long mmcr1 = 0;
179 u64 mmcra = 0; 181 unsigned long mmcra = 0;
180 int i; 182 int i;
181 unsigned int pmc, ev, b, u, s, psel; 183 unsigned int pmc, ev, b, u, s, psel;
182 unsigned int ttmset = 0; 184 unsigned int ttmset = 0;
@@ -215,7 +217,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
215 /* check for conflict on this byte of event bus */ 217 /* check for conflict on this byte of event bus */
216 if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u) 218 if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
217 return -1; 219 return -1;
218 mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b); 220 mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b);
219 ttmset |= 1 << b; 221 ttmset |= 1 << b;
220 if (u == 5) { 222 if (u == 5) {
221 /* Nest events have a further mux */ 223 /* Nest events have a further mux */
@@ -224,7 +226,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
224 MMCR1_NESTSEL(mmcr1) != s) 226 MMCR1_NESTSEL(mmcr1) != s)
225 return -1; 227 return -1;
226 ttmset |= 0x10; 228 ttmset |= 0x10;
227 mmcr1 |= (u64)s << MMCR1_NESTSEL_SH; 229 mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH;
228 } 230 }
229 if (0x30 <= psel && psel <= 0x3d) { 231 if (0x30 <= psel && psel <= 0x3d) {
230 /* these need the PMCx_ADDR_SEL bits */ 232 /* these need the PMCx_ADDR_SEL bits */
@@ -243,7 +245,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
243 if (power6_marked_instr_event(event[i])) 245 if (power6_marked_instr_event(event[i]))
244 mmcra |= MMCRA_SAMPLE_ENABLE; 246 mmcra |= MMCRA_SAMPLE_ENABLE;
245 if (pmc < 4) 247 if (pmc < 4)
246 mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc); 248 mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc);
247 } 249 }
248 mmcr[0] = 0; 250 mmcr[0] = 0;
249 if (pmc_inuse & 1) 251 if (pmc_inuse & 1)
@@ -265,10 +267,11 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
265 * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 267 * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
266 * 32-34 select field: nest (subunit) event selector 268 * 32-34 select field: nest (subunit) event selector
267 */ 269 */
268static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp) 270static int p6_get_constraint(u64 event, unsigned long *maskp,
271 unsigned long *valp)
269{ 272{
270 int pmc, byte, sh, subunit; 273 int pmc, byte, sh, subunit;
271 u64 mask = 0, value = 0; 274 unsigned long mask = 0, value = 0;
272 275
273 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; 276 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
274 if (pmc) { 277 if (pmc) {
@@ -282,11 +285,11 @@ static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp)
282 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; 285 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
283 sh = byte * 4 + (16 - PM_UNIT_SH); 286 sh = byte * 4 + (16 - PM_UNIT_SH);
284 mask |= PM_UNIT_MSKS << sh; 287 mask |= PM_UNIT_MSKS << sh;
285 value |= (u64)(event & PM_UNIT_MSKS) << sh; 288 value |= (unsigned long)(event & PM_UNIT_MSKS) << sh;
286 if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { 289 if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
287 subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; 290 subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
288 mask |= (u64)PM_SUBUNIT_MSK << 32; 291 mask |= (unsigned long)PM_SUBUNIT_MSK << 32;
289 value |= (u64)subunit << 32; 292 value |= (unsigned long)subunit << 32;
290 } 293 }
291 } 294 }
292 if (pmc <= 4) { 295 if (pmc <= 4) {
@@ -458,7 +461,7 @@ static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[])
458 return nalt; 461 return nalt;
459} 462}
460 463
461static void p6_disable_pmc(unsigned int pmc, u64 mmcr[]) 464static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[])
462{ 465{
463 /* Set PMCxSEL to 0 to disable PMCx */ 466 /* Set PMCxSEL to 0 to disable PMCx */
464 if (pmc <= 3) 467 if (pmc <= 3)
@@ -515,18 +518,30 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
515 }, 518 },
516}; 519};
517 520
518struct power_pmu power6_pmu = { 521static struct power_pmu power6_pmu = {
519 .n_counter = 6, 522 .name = "POWER6",
520 .max_alternatives = MAX_ALT, 523 .n_counter = 6,
521 .add_fields = 0x1555, 524 .max_alternatives = MAX_ALT,
522 .test_adder = 0x3000, 525 .add_fields = 0x1555,
523 .compute_mmcr = p6_compute_mmcr, 526 .test_adder = 0x3000,
524 .get_constraint = p6_get_constraint, 527 .compute_mmcr = p6_compute_mmcr,
525 .get_alternatives = p6_get_alternatives, 528 .get_constraint = p6_get_constraint,
526 .disable_pmc = p6_disable_pmc, 529 .get_alternatives = p6_get_alternatives,
527 .limited_pmc_event = p6_limited_pmc_event, 530 .disable_pmc = p6_disable_pmc,
528 .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, 531 .limited_pmc_event = p6_limited_pmc_event,
529 .n_generic = ARRAY_SIZE(power6_generic_events), 532 .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
530 .generic_events = power6_generic_events, 533 .n_generic = ARRAY_SIZE(power6_generic_events),
531 .cache_events = &power6_cache_events, 534 .generic_events = power6_generic_events,
535 .cache_events = &power6_cache_events,
532}; 536};
537
538static int init_power6_pmu(void)
539{
540 if (!cur_cpu_spec->oprofile_cpu_type ||
541 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
542 return -ENODEV;
543
544 return register_power_pmu(&power6_pmu);
545}
546
547arch_initcall(init_power6_pmu);
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index b72e7a19d054..018d094d92f9 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -10,7 +10,9 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_counter.h>
13#include <linux/string.h>
13#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h>
14 16
15/* 17/*
16 * Bits in event code for POWER7 18 * Bits in event code for POWER7
@@ -71,10 +73,11 @@
71 * 0-9: Count of events needing PMC1..PMC5 73 * 0-9: Count of events needing PMC1..PMC5
72 */ 74 */
73 75
74static int power7_get_constraint(u64 event, u64 *maskp, u64 *valp) 76static int power7_get_constraint(u64 event, unsigned long *maskp,
77 unsigned long *valp)
75{ 78{
76 int pmc, sh; 79 int pmc, sh;
77 u64 mask = 0, value = 0; 80 unsigned long mask = 0, value = 0;
78 81
79 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; 82 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
80 if (pmc) { 83 if (pmc) {
@@ -224,10 +227,10 @@ static int power7_marked_instr_event(u64 event)
224} 227}
225 228
226static int power7_compute_mmcr(u64 event[], int n_ev, 229static int power7_compute_mmcr(u64 event[], int n_ev,
227 unsigned int hwc[], u64 mmcr[]) 230 unsigned int hwc[], unsigned long mmcr[])
228{ 231{
229 u64 mmcr1 = 0; 232 unsigned long mmcr1 = 0;
230 u64 mmcra = 0; 233 unsigned long mmcra = 0;
231 unsigned int pmc, unit, combine, l2sel, psel; 234 unsigned int pmc, unit, combine, l2sel, psel;
232 unsigned int pmc_inuse = 0; 235 unsigned int pmc_inuse = 0;
233 int i; 236 int i;
@@ -265,11 +268,14 @@ static int power7_compute_mmcr(u64 event[], int n_ev,
265 --pmc; 268 --pmc;
266 } 269 }
267 if (pmc <= 3) { 270 if (pmc <= 3) {
268 mmcr1 |= (u64) unit << (MMCR1_TTM0SEL_SH - 4 * pmc); 271 mmcr1 |= (unsigned long) unit
269 mmcr1 |= (u64) combine << (MMCR1_PMC1_COMBINE_SH - pmc); 272 << (MMCR1_TTM0SEL_SH - 4 * pmc);
273 mmcr1 |= (unsigned long) combine
274 << (MMCR1_PMC1_COMBINE_SH - pmc);
270 mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); 275 mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
271 if (unit == 6) /* L2 events */ 276 if (unit == 6) /* L2 events */
272 mmcr1 |= (u64) l2sel << MMCR1_L2SEL_SH; 277 mmcr1 |= (unsigned long) l2sel
278 << MMCR1_L2SEL_SH;
273 } 279 }
274 if (power7_marked_instr_event(event[i])) 280 if (power7_marked_instr_event(event[i]))
275 mmcra |= MMCRA_SAMPLE_ENABLE; 281 mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -287,10 +293,10 @@ static int power7_compute_mmcr(u64 event[], int n_ev,
287 return 0; 293 return 0;
288} 294}
289 295
290static void power7_disable_pmc(unsigned int pmc, u64 mmcr[]) 296static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
291{ 297{
292 if (pmc <= 3) 298 if (pmc <= 3)
293 mmcr[1] &= ~(0xffULL << MMCR1_PMCSEL_SH(pmc)); 299 mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
294} 300}
295 301
296static int power7_generic_events[] = { 302static int power7_generic_events[] = {
@@ -311,7 +317,7 @@ static int power7_generic_events[] = {
311 */ 317 */
312static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { 318static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
313 [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ 319 [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
314 [C(OP_READ)] = { 0x400f0, 0xc880 }, 320 [C(OP_READ)] = { 0xc880, 0x400f0 },
315 [C(OP_WRITE)] = { 0, 0x300f0 }, 321 [C(OP_WRITE)] = { 0, 0x300f0 },
316 [C(OP_PREFETCH)] = { 0xd8b8, 0 }, 322 [C(OP_PREFETCH)] = { 0xd8b8, 0 },
317 }, 323 },
@@ -321,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
321 [C(OP_PREFETCH)] = { 0x408a, 0 }, 327 [C(OP_PREFETCH)] = { 0x408a, 0 },
322 }, 328 },
323 [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ 329 [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
324 [C(OP_READ)] = { 0x6080, 0x6084 }, 330 [C(OP_READ)] = { 0x16080, 0x26080 },
325 [C(OP_WRITE)] = { 0x6082, 0x6086 }, 331 [C(OP_WRITE)] = { 0x16082, 0x26082 },
326 [C(OP_PREFETCH)] = { 0, 0 }, 332 [C(OP_PREFETCH)] = { 0, 0 },
327 }, 333 },
328 [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ 334 [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
@@ -342,16 +348,29 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
342 }, 348 },
343}; 349};
344 350
345struct power_pmu power7_pmu = { 351static struct power_pmu power7_pmu = {
346 .n_counter = 6, 352 .name = "POWER7",
347 .max_alternatives = MAX_ALT + 1, 353 .n_counter = 6,
348 .add_fields = 0x1555ull, 354 .max_alternatives = MAX_ALT + 1,
349 .test_adder = 0x3000ull, 355 .add_fields = 0x1555ul,
350 .compute_mmcr = power7_compute_mmcr, 356 .test_adder = 0x3000ul,
351 .get_constraint = power7_get_constraint, 357 .compute_mmcr = power7_compute_mmcr,
352 .get_alternatives = power7_get_alternatives, 358 .get_constraint = power7_get_constraint,
353 .disable_pmc = power7_disable_pmc, 359 .get_alternatives = power7_get_alternatives,
354 .n_generic = ARRAY_SIZE(power7_generic_events), 360 .disable_pmc = power7_disable_pmc,
355 .generic_events = power7_generic_events, 361 .flags = PPMU_ALT_SIPR,
356 .cache_events = &power7_cache_events, 362 .n_generic = ARRAY_SIZE(power7_generic_events),
363 .generic_events = power7_generic_events,
364 .cache_events = &power7_cache_events,
357}; 365};
366
367static int init_power7_pmu(void)
368{
369 if (!cur_cpu_spec->oprofile_cpu_type ||
370 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
371 return -ENODEV;
372
373 return register_power_pmu(&power7_pmu);
374}
375
376arch_initcall(init_power7_pmu);
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index ba0a357a89f4..75dccb71a043 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -11,6 +11,7 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_counter.h>
13#include <asm/reg.h> 13#include <asm/reg.h>
14#include <asm/cputable.h>
14 15
15/* 16/*
16 * Bits in event code for PPC970 17 * Bits in event code for PPC970
@@ -183,7 +184,7 @@ static int p970_marked_instr_event(u64 event)
183} 184}
184 185
185/* Masks and values for using events from the various units */ 186/* Masks and values for using events from the various units */
186static u64 unit_cons[PM_LASTUNIT+1][2] = { 187static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
187 [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull }, 188 [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull },
188 [PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull }, 189 [PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull },
189 [PM_ISU] = { 0x080000000000ull, 0x020000000000ull }, 190 [PM_ISU] = { 0x080000000000ull, 0x020000000000ull },
@@ -192,10 +193,11 @@ static u64 unit_cons[PM_LASTUNIT+1][2] = {
192 [PM_STS] = { 0x380000000000ull, 0x310000000000ull }, 193 [PM_STS] = { 0x380000000000ull, 0x310000000000ull },
193}; 194};
194 195
195static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp) 196static int p970_get_constraint(u64 event, unsigned long *maskp,
197 unsigned long *valp)
196{ 198{
197 int pmc, byte, unit, sh, spcsel; 199 int pmc, byte, unit, sh, spcsel;
198 u64 mask = 0, value = 0; 200 unsigned long mask = 0, value = 0;
199 int grp = -1; 201 int grp = -1;
200 202
201 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; 203 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -222,7 +224,7 @@ static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
222 grp = byte & 1; 224 grp = byte & 1;
223 /* Set byte lane select field */ 225 /* Set byte lane select field */
224 mask |= 0xfULL << (28 - 4 * byte); 226 mask |= 0xfULL << (28 - 4 * byte);
225 value |= (u64)unit << (28 - 4 * byte); 227 value |= (unsigned long)unit << (28 - 4 * byte);
226 } 228 }
227 if (grp == 0) { 229 if (grp == 0) {
228 /* increment PMC1/2/5/6 field */ 230 /* increment PMC1/2/5/6 field */
@@ -236,7 +238,7 @@ static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
236 spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; 238 spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
237 if (spcsel) { 239 if (spcsel) {
238 mask |= 3ull << 48; 240 mask |= 3ull << 48;
239 value |= (u64)spcsel << 48; 241 value |= (unsigned long)spcsel << 48;
240 } 242 }
241 *maskp = mask; 243 *maskp = mask;
242 *valp = value; 244 *valp = value;
@@ -257,9 +259,9 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
257} 259}
258 260
259static int p970_compute_mmcr(u64 event[], int n_ev, 261static int p970_compute_mmcr(u64 event[], int n_ev,
260 unsigned int hwc[], u64 mmcr[]) 262 unsigned int hwc[], unsigned long mmcr[])
261{ 263{
262 u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; 264 unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
263 unsigned int pmc, unit, byte, psel; 265 unsigned int pmc, unit, byte, psel;
264 unsigned int ttm, grp; 266 unsigned int ttm, grp;
265 unsigned int pmc_inuse = 0; 267 unsigned int pmc_inuse = 0;
@@ -320,7 +322,7 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
320 continue; 322 continue;
321 ttm = unitmap[i]; 323 ttm = unitmap[i];
322 ++ttmuse[(ttm >> 2) & 1]; 324 ++ttmuse[(ttm >> 2) & 1];
323 mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH; 325 mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH;
324 } 326 }
325 /* Check only one unit per TTMx */ 327 /* Check only one unit per TTMx */
326 if (ttmuse[0] > 1 || ttmuse[1] > 1) 328 if (ttmuse[0] > 1 || ttmuse[1] > 1)
@@ -340,7 +342,8 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
340 if (unit == PM_LSU1L && byte >= 2) 342 if (unit == PM_LSU1L && byte >= 2)
341 mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); 343 mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
342 } 344 }
343 mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); 345 mmcr1 |= (unsigned long)ttm
346 << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
344 } 347 }
345 348
346 /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ 349 /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -386,7 +389,8 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
386 for (pmc = 0; pmc < 2; ++pmc) 389 for (pmc = 0; pmc < 2; ++pmc)
387 mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc); 390 mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
388 for (; pmc < 8; ++pmc) 391 for (; pmc < 8; ++pmc)
389 mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); 392 mmcr1 |= (unsigned long)pmcsel[pmc]
393 << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
390 if (pmc_inuse & 1) 394 if (pmc_inuse & 1)
391 mmcr0 |= MMCR0_PMC1CE; 395 mmcr0 |= MMCR0_PMC1CE;
392 if (pmc_inuse & 0xfe) 396 if (pmc_inuse & 0xfe)
@@ -401,7 +405,7 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
401 return 0; 405 return 0;
402} 406}
403 407
404static void p970_disable_pmc(unsigned int pmc, u64 mmcr[]) 408static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[])
405{ 409{
406 int shift, i; 410 int shift, i;
407 411
@@ -467,16 +471,29 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
467 }, 471 },
468}; 472};
469 473
470struct power_pmu ppc970_pmu = { 474static struct power_pmu ppc970_pmu = {
471 .n_counter = 8, 475 .name = "PPC970/FX/MP",
472 .max_alternatives = 2, 476 .n_counter = 8,
473 .add_fields = 0x001100005555ull, 477 .max_alternatives = 2,
474 .test_adder = 0x013300000000ull, 478 .add_fields = 0x001100005555ull,
475 .compute_mmcr = p970_compute_mmcr, 479 .test_adder = 0x013300000000ull,
476 .get_constraint = p970_get_constraint, 480 .compute_mmcr = p970_compute_mmcr,
477 .get_alternatives = p970_get_alternatives, 481 .get_constraint = p970_get_constraint,
478 .disable_pmc = p970_disable_pmc, 482 .get_alternatives = p970_get_alternatives,
479 .n_generic = ARRAY_SIZE(ppc970_generic_events), 483 .disable_pmc = p970_disable_pmc,
480 .generic_events = ppc970_generic_events, 484 .n_generic = ARRAY_SIZE(ppc970_generic_events),
481 .cache_events = &ppc970_cache_events, 485 .generic_events = ppc970_generic_events,
486 .cache_events = &ppc970_cache_events,
482}; 487};
488
489static int init_ppc970_pmu(void)
490{
491 if (!cur_cpu_spec->oprofile_cpu_type ||
492 (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
493 && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP")))
494 return -ENODEV;
495
496 return register_power_pmu(&ppc970_pmu);
497}
498
499arch_initcall(init_ppc970_pmu);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3e7135bbe40f..892a9f2e6d76 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs)
528 528
529 for (i = 0; i < 32; i++) { 529 for (i = 0; i < 32; i++) {
530 if ((i % REGS_PER_LINE) == 0) 530 if ((i % REGS_PER_LINE) == 0)
531 printk("\n" KERN_INFO "GPR%02d: ", i); 531 printk("\nGPR%02d: ", i);
532 printk(REG " ", regs->gpr[i]); 532 printk(REG " ", regs->gpr[i]);
533 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 533 if (i == LAST_VOLATILE && !FULL_REGS(regs))
534 break; 534 break;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ef6f64950e9b..a538824616fd 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1947,8 +1947,47 @@ static void __init fixup_device_tree_maple(void)
1947 prom_setprop(isa, name, "ranges", 1947 prom_setprop(isa, name, "ranges",
1948 isa_ranges, sizeof(isa_ranges)); 1948 isa_ranges, sizeof(isa_ranges));
1949} 1949}
1950
1951#define CPC925_MC_START 0xf8000000
1952#define CPC925_MC_LENGTH 0x1000000
1953/* The values for memory-controller don't have right number of cells */
1954static void __init fixup_device_tree_maple_memory_controller(void)
1955{
1956 phandle mc;
1957 u32 mc_reg[4];
1958 char *name = "/hostbridge@f8000000";
1959 struct prom_t *_prom = &RELOC(prom);
1960 u32 ac, sc;
1961
1962 mc = call_prom("finddevice", 1, 1, ADDR(name));
1963 if (!PHANDLE_VALID(mc))
1964 return;
1965
1966 if (prom_getproplen(mc, "reg") != 8)
1967 return;
1968
1969 prom_getprop(_prom->root, "#address-cells", &ac, sizeof(ac));
1970 prom_getprop(_prom->root, "#size-cells", &sc, sizeof(sc));
1971 if ((ac != 2) || (sc != 2))
1972 return;
1973
1974 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
1975 return;
1976
1977 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
1978 return;
1979
1980 prom_printf("Fixing up bogus hostbridge on Maple...\n");
1981
1982 mc_reg[0] = 0x0;
1983 mc_reg[1] = CPC925_MC_START;
1984 mc_reg[2] = 0x0;
1985 mc_reg[3] = CPC925_MC_LENGTH;
1986 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
1987}
1950#else 1988#else
1951#define fixup_device_tree_maple() 1989#define fixup_device_tree_maple()
1990#define fixup_device_tree_maple_memory_controller()
1952#endif 1991#endif
1953 1992
1954#ifdef CONFIG_PPC_CHRP 1993#ifdef CONFIG_PPC_CHRP
@@ -2189,6 +2228,7 @@ static void __init fixup_device_tree_efika(void)
2189static void __init fixup_device_tree(void) 2228static void __init fixup_device_tree(void)
2190{ 2229{
2191 fixup_device_tree_maple(); 2230 fixup_device_tree_maple();
2231 fixup_device_tree_maple_memory_controller();
2192 fixup_device_tree_chrp(); 2232 fixup_device_tree_chrp();
2193 fixup_device_tree_pmac(); 2233 fixup_device_tree_pmac();
2194 fixup_device_tree_efika(); 2234 fixup_device_tree_efika();
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 9fa2c7dcd05a..ef149880c145 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -736,15 +736,16 @@ void user_disable_single_step(struct task_struct *task)
736{ 736{
737 struct pt_regs *regs = task->thread.regs; 737 struct pt_regs *regs = task->thread.regs;
738 738
739
740#if defined(CONFIG_BOOKE)
741 /* If DAC then do not single step, skip */
742 if (task->thread.dabr)
743 return;
744#endif
745
746 if (regs != NULL) { 739 if (regs != NULL) {
747#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 740#if defined(CONFIG_BOOKE)
741 /* If DAC don't clear DBCRO_IDM or MSR_DE */
742 if (task->thread.dabr)
743 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
744 else {
745 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
746 regs->msr &= ~MSR_DE;
747 }
748#elif defined(CONFIG_40x)
748 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); 749 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
749 regs->msr &= ~MSR_DE; 750 regs->msr &= ~MSR_DE;
750#else 751#else
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 297632cba047..8a6daf4129f6 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -21,7 +21,6 @@
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/smp_lock.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/ptrace.h> 25#include <linux/ptrace.h>
27#include <linux/regset.h> 26#include <linux/regset.h>
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index ee4c7609b649..c434823b8c83 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -38,9 +38,10 @@
38#include <asm/syscalls.h> 38#include <asm/syscalls.h>
39#include <asm/smp.h> 39#include <asm/smp.h>
40#include <asm/atomic.h> 40#include <asm/atomic.h>
41#include <asm/time.h>
41 42
42struct rtas_t rtas = { 43struct rtas_t rtas = {
43 .lock = SPIN_LOCK_UNLOCKED 44 .lock = __RAW_SPIN_LOCK_UNLOCKED
44}; 45};
45EXPORT_SYMBOL(rtas); 46EXPORT_SYMBOL(rtas);
46 47
@@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf;
67void (*rtas_flash_term_hook)(int); 68void (*rtas_flash_term_hook)(int);
68EXPORT_SYMBOL(rtas_flash_term_hook); 69EXPORT_SYMBOL(rtas_flash_term_hook);
69 70
71/* RTAS use home made raw locking instead of spin_lock_irqsave
72 * because those can be called from within really nasty contexts
73 * such as having the timebase stopped which would lockup with
74 * normal locks and spinlock debugging enabled
75 */
76static unsigned long lock_rtas(void)
77{
78 unsigned long flags;
79
80 local_irq_save(flags);
81 preempt_disable();
82 __raw_spin_lock_flags(&rtas.lock, flags);
83 return flags;
84}
85
86static void unlock_rtas(unsigned long flags)
87{
88 __raw_spin_unlock(&rtas.lock);
89 local_irq_restore(flags);
90 preempt_enable();
91}
92
70/* 93/*
71 * call_rtas_display_status and call_rtas_display_status_delay 94 * call_rtas_display_status and call_rtas_display_status_delay
72 * are designed only for very early low-level debugging, which 95 * are designed only for very early low-level debugging, which
@@ -79,7 +102,7 @@ static void call_rtas_display_status(char c)
79 102
80 if (!rtas.base) 103 if (!rtas.base)
81 return; 104 return;
82 spin_lock_irqsave(&rtas.lock, s); 105 s = lock_rtas();
83 106
84 args->token = 10; 107 args->token = 10;
85 args->nargs = 1; 108 args->nargs = 1;
@@ -89,7 +112,7 @@ static void call_rtas_display_status(char c)
89 112
90 enter_rtas(__pa(args)); 113 enter_rtas(__pa(args));
91 114
92 spin_unlock_irqrestore(&rtas.lock, s); 115 unlock_rtas(s);
93} 116}
94 117
95static void call_rtas_display_status_delay(char c) 118static void call_rtas_display_status_delay(char c)
@@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
411 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) 434 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
412 return -1; 435 return -1;
413 436
414 /* Gotta do something different here, use global lock for now... */ 437 s = lock_rtas();
415 spin_lock_irqsave(&rtas.lock, s);
416 rtas_args = &rtas.args; 438 rtas_args = &rtas.args;
417 439
418 rtas_args->token = token; 440 rtas_args->token = token;
@@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
439 outputs[i] = rtas_args->rets[i+1]; 461 outputs[i] = rtas_args->rets[i+1];
440 ret = (nret > 0)? rtas_args->rets[0]: 0; 462 ret = (nret > 0)? rtas_args->rets[0]: 0;
441 463
442 /* Gotta do something different here, use global lock for now... */ 464 unlock_rtas(s);
443 spin_unlock_irqrestore(&rtas.lock, s);
444 465
445 if (buff_copy) { 466 if (buff_copy) {
446 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); 467 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
@@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
837 858
838 buff_copy = get_errorlog_buffer(); 859 buff_copy = get_errorlog_buffer();
839 860
840 spin_lock_irqsave(&rtas.lock, flags); 861 flags = lock_rtas();
841 862
842 rtas.args = args; 863 rtas.args = args;
843 enter_rtas(__pa(&rtas.args)); 864 enter_rtas(__pa(&rtas.args));
@@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
848 if (args.rets[0] == -1) 869 if (args.rets[0] == -1)
849 errbuf = __fetch_rtas_last_error(buff_copy); 870 errbuf = __fetch_rtas_last_error(buff_copy);
850 871
851 spin_unlock_irqrestore(&rtas.lock, flags); 872 unlock_rtas(flags);
852 873
853 if (buff_copy) { 874 if (buff_copy) {
854 if (errbuf) 875 if (errbuf)
@@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node,
951 /* break now */ 972 /* break now */
952 return 1; 973 return 1;
953} 974}
975
976static raw_spinlock_t timebase_lock;
977static u64 timebase = 0;
978
979void __cpuinit rtas_give_timebase(void)
980{
981 unsigned long flags;
982
983 local_irq_save(flags);
984 hard_irq_disable();
985 __raw_spin_lock(&timebase_lock);
986 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
987 timebase = get_tb();
988 __raw_spin_unlock(&timebase_lock);
989
990 while (timebase)
991 barrier();
992 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
993 local_irq_restore(flags);
994}
995
996void __cpuinit rtas_take_timebase(void)
997{
998 while (!timebase)
999 barrier();
1000 __raw_spin_lock(&timebase_lock);
1001 set_tb(timebase >> 32, timebase & 0xffffffff);
1002 timebase = 0;
1003 __raw_spin_unlock(&timebase_lock);
1004}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d154248cf40..e1e3059cf34b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
119 */ 119 */
120notrace void __init machine_init(unsigned long dt_ptr) 120notrace void __init machine_init(unsigned long dt_ptr)
121{ 121{
122 lockdep_init();
123
122 /* Enable early debugging if any specified (see udbg.h) */ 124 /* Enable early debugging if any specified (see udbg.h) */
123 udbg_early_init(); 125 udbg_early_init();
124 126
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 65484b2200b3..0b47de07302d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
68/* SMP operations for this machine */ 68/* SMP operations for this machine */
69struct smp_ops_t *smp_ops; 69struct smp_ops_t *smp_ops;
70 70
71static volatile unsigned int cpu_callin_map[NR_CPUS]; 71/* Can't be static due to PowerMac hackery */
72volatile unsigned int cpu_callin_map[NR_CPUS];
72 73
73int smt_enabled_at_boot = 1; 74int smt_enabled_at_boot = 1;
74 75
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 15391c2ab013..eae4511ceeac 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,6 +53,7 @@
53#include <linux/posix-timers.h> 53#include <linux/posix-timers.h>
54#include <linux/irq.h> 54#include <linux/irq.h>
55#include <linux/delay.h> 55#include <linux/delay.h>
56#include <linux/perf_counter.h>
56 57
57#include <asm/io.h> 58#include <asm/io.h>
58#include <asm/processor.h> 59#include <asm/processor.h>
@@ -525,6 +526,26 @@ void __init iSeries_time_init_early(void)
525} 526}
526#endif /* CONFIG_PPC_ISERIES */ 527#endif /* CONFIG_PPC_ISERIES */
527 528
529#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
530DEFINE_PER_CPU(u8, perf_counter_pending);
531
532void set_perf_counter_pending(void)
533{
534 get_cpu_var(perf_counter_pending) = 1;
535 set_dec(1);
536 put_cpu_var(perf_counter_pending);
537}
538
539#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
540#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
541
542#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
543
544#define test_perf_counter_pending() 0
545#define clear_perf_counter_pending()
546
547#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
548
528/* 549/*
529 * For iSeries shared processors, we have to let the hypervisor 550 * For iSeries shared processors, we have to let the hypervisor
530 * set the hardware decrementer. We set a virtual decrementer 551 * set the hardware decrementer. We set a virtual decrementer
@@ -551,6 +572,10 @@ void timer_interrupt(struct pt_regs * regs)
551 set_dec(DECREMENTER_MAX); 572 set_dec(DECREMENTER_MAX);
552 573
553#ifdef CONFIG_PPC32 574#ifdef CONFIG_PPC32
575 if (test_perf_counter_pending()) {
576 clear_perf_counter_pending();
577 perf_counter_do_pending();
578 }
554 if (atomic_read(&ppc_n_lost_interrupts) != 0) 579 if (atomic_read(&ppc_n_lost_interrupts) != 0)
555 do_IRQ(regs); 580 do_IRQ(regs);
556#endif 581#endif
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 0362a891e54e..acb74a17bbbf 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void)
219#ifdef CONFIG_PPC_EARLY_DEBUG_44x 219#ifdef CONFIG_PPC_EARLY_DEBUG_44x
220#include <platforms/44x/44x.h> 220#include <platforms/44x/44x.h>
221 221
222static int udbg_44x_as1_flush(void) 222static void udbg_44x_as1_flush(void)
223{ 223{
224 if (udbg_comport) { 224 if (udbg_comport) {
225 while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0) 225 while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index ef36cbbc5882..ea4d64644d02 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -80,10 +80,10 @@ _GLOBAL(load_up_altivec)
80 mtvscr vr0 80 mtvscr vr0
81 REST_32VRS(0,r4,r5) 81 REST_32VRS(0,r4,r5)
82#ifndef CONFIG_SMP 82#ifndef CONFIG_SMP
83 /* Update last_task_used_math to 'current' */ 83 /* Update last_task_used_altivec to 'current' */
84 subi r4,r5,THREAD /* Back to 'current' */ 84 subi r4,r5,THREAD /* Back to 'current' */
85 fromreal(r4) 85 fromreal(r4)
86 PPC_STL r4,ADDROFF(last_task_used_math)(r3) 86 PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
87#endif /* CONFIG_SMP */ 87#endif /* CONFIG_SMP */
88 /* restore registers and return */ 88 /* restore registers and return */
89 blr 89 blr
@@ -172,7 +172,7 @@ _GLOBAL(load_up_vsx)
172 oris r12,r12,MSR_VSX@h 172 oris r12,r12,MSR_VSX@h
173 std r12,_MSR(r1) 173 std r12,_MSR(r1)
174#ifndef CONFIG_SMP 174#ifndef CONFIG_SMP
175 /* Update last_task_used_math to 'current' */ 175 /* Update last_task_used_vsx to 'current' */
176 ld r4,PACACURRENT(r13) 176 ld r4,PACACURRENT(r13)
177 std r4,0(r3) 177 std r4,0(r3)
178#endif /* CONFIG_SMP */ 178#endif /* CONFIG_SMP */