aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/crunch-bits.S305
-rw-r--r--arch/arm/kernel/crunch.c88
-rw-r--r--arch/arm/kernel/ecard.c1
-rw-r--r--arch/arm/kernel/perf_event.c45
-rw-r--r--arch/arm/kernel/perf_event_v6.c22
-rw-r--r--arch/arm/kernel/perf_event_v7.c11
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
8 files changed, 64 insertions, 431 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 43b740d0e374..f16d7652f34b 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -62,9 +62,6 @@ obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
62CFLAGS_swp_emulate.o := -Wa,-march=armv7-a 62CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
63obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 63obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
64 64
65obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
66AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
67
68obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o 65obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
69obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o 66obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
70obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o 67obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S
deleted file mode 100644
index 0ec9bb48fab9..000000000000
--- a/arch/arm/kernel/crunch-bits.S
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 * arch/arm/kernel/crunch-bits.S
3 * Cirrus MaverickCrunch context switching and handling
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 *
7 * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
8 * Copyright (c) 2003-2004, MontaVista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/linkage.h>
16#include <asm/ptrace.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19#include <mach/ep93xx-regs.h>
20
21/*
22 * We can't use hex constants here due to a bug in gas.
23 */
24#define CRUNCH_MVDX0 0
25#define CRUNCH_MVDX1 8
26#define CRUNCH_MVDX2 16
27#define CRUNCH_MVDX3 24
28#define CRUNCH_MVDX4 32
29#define CRUNCH_MVDX5 40
30#define CRUNCH_MVDX6 48
31#define CRUNCH_MVDX7 56
32#define CRUNCH_MVDX8 64
33#define CRUNCH_MVDX9 72
34#define CRUNCH_MVDX10 80
35#define CRUNCH_MVDX11 88
36#define CRUNCH_MVDX12 96
37#define CRUNCH_MVDX13 104
38#define CRUNCH_MVDX14 112
39#define CRUNCH_MVDX15 120
40#define CRUNCH_MVAX0L 128
41#define CRUNCH_MVAX0M 132
42#define CRUNCH_MVAX0H 136
43#define CRUNCH_MVAX1L 140
44#define CRUNCH_MVAX1M 144
45#define CRUNCH_MVAX1H 148
46#define CRUNCH_MVAX2L 152
47#define CRUNCH_MVAX2M 156
48#define CRUNCH_MVAX2H 160
49#define CRUNCH_MVAX3L 164
50#define CRUNCH_MVAX3M 168
51#define CRUNCH_MVAX3H 172
52#define CRUNCH_DSPSC 176
53
54#define CRUNCH_SIZE 184
55
56 .text
57
58/*
59 * Lazy switching of crunch coprocessor context
60 *
61 * r10 = struct thread_info pointer
62 * r9 = ret_from_exception
63 * lr = undefined instr exit
64 *
65 * called from prefetch exception handler with interrupts disabled
66 */
67ENTRY(crunch_task_enable)
68 ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
69
70 ldr r1, [r8, #0x80]
71 tst r1, #0x00800000 @ access to crunch enabled?
72 movne pc, lr @ if so no business here
73 mov r3, #0xaa @ unlock syscon swlock
74 str r3, [r8, #0xc0]
75 orr r1, r1, #0x00800000 @ enable access to crunch
76 str r1, [r8, #0x80]
77
78 ldr r3, =crunch_owner
79 add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
80 ldr r2, [sp, #60] @ current task pc value
81 ldr r1, [r3] @ get current crunch owner
82 str r0, [r3] @ this task now owns crunch
83 sub r2, r2, #4 @ adjust pc back
84 str r2, [sp, #60]
85
86 ldr r2, [r8, #0x80]
87 mov r2, r2 @ flush out enable (@@@)
88
89 teq r1, #0 @ test for last ownership
90 mov lr, r9 @ normal exit from exception
91 beq crunch_load @ no owner, skip save
92
93crunch_save:
94 cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
95 cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
96 cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
97 cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
98 cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
99 cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
100 cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
101 cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
102 cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
103 cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
104 cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
105 cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
106 cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
107 cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
108 cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
109 cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
110
111#ifdef __ARMEB__
112#error fix me for ARMEB
113#endif
114
115 cfmv32al mvfx0, mvax0 @ save 72b accumulators
116 cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
117 cfmv32am mvfx0, mvax0
118 cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
119 cfmv32ah mvfx0, mvax0
120 cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
121 cfmv32al mvfx0, mvax1
122 cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
123 cfmv32am mvfx0, mvax1
124 cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
125 cfmv32ah mvfx0, mvax1
126 cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
127 cfmv32al mvfx0, mvax2
128 cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
129 cfmv32am mvfx0, mvax2
130 cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
131 cfmv32ah mvfx0, mvax2
132 cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
133 cfmv32al mvfx0, mvax3
134 cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
135 cfmv32am mvfx0, mvax3
136 cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
137 cfmv32ah mvfx0, mvax3
138 cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
139
140 cfmv32sc mvdx0, dspsc @ save status word
141 cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
142
143 teq r0, #0 @ anything to load?
144 cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
145 moveq pc, lr
146
147crunch_load:
148 cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
149 cfmvsc32 dspsc, mvdx0
150
151 cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
152 cfmval32 mvax0, mvfx0
153 cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
154 cfmvam32 mvax0, mvfx0
155 cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
156 cfmvah32 mvax0, mvfx0
157 cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
158 cfmval32 mvax1, mvfx0
159 cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
160 cfmvam32 mvax1, mvfx0
161 cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
162 cfmvah32 mvax1, mvfx0
163 cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
164 cfmval32 mvax2, mvfx0
165 cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
166 cfmvam32 mvax2, mvfx0
167 cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
168 cfmvah32 mvax2, mvfx0
169 cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
170 cfmval32 mvax3, mvfx0
171 cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
172 cfmvam32 mvax3, mvfx0
173 cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
174 cfmvah32 mvax3, mvfx0
175
176 cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
177 cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
178 cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
179 cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
180 cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
181 cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
182 cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
183 cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
184 cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
185 cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
186 cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
187 cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
188 cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
189 cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
190 cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
191 cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
192
193 mov pc, lr
194
195/*
196 * Back up crunch regs to save area and disable access to them
197 * (mainly for gdb or sleep mode usage)
198 *
199 * r0 = struct thread_info pointer of target task or NULL for any
200 */
201ENTRY(crunch_task_disable)
202 stmfd sp!, {r4, r5, lr}
203
204 mrs ip, cpsr
205 orr r2, ip, #PSR_I_BIT @ disable interrupts
206 msr cpsr_c, r2
207
208 ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
209
210 ldr r3, =crunch_owner
211 add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
212 ldr r1, [r3] @ get current crunch owner
213 teq r1, #0 @ any current owner?
214 beq 1f @ no: quit
215 teq r0, #0 @ any owner?
216 teqne r1, r2 @ or specified one?
217 bne 1f @ no: quit
218
219 ldr r5, [r4, #0x80] @ enable access to crunch
220 mov r2, #0xaa
221 str r2, [r4, #0xc0]
222 orr r5, r5, #0x00800000
223 str r5, [r4, #0x80]
224
225 mov r0, #0 @ nothing to load
226 str r0, [r3] @ no more current owner
227 ldr r2, [r4, #0x80] @ flush out enable (@@@)
228 mov r2, r2
229 bl crunch_save
230
231 mov r2, #0xaa @ disable access to crunch
232 str r2, [r4, #0xc0]
233 bic r5, r5, #0x00800000
234 str r5, [r4, #0x80]
235 ldr r5, [r4, #0x80] @ flush out enable (@@@)
236 mov r5, r5
237
2381: msr cpsr_c, ip @ restore interrupt mode
239 ldmfd sp!, {r4, r5, pc}
240
241/*
242 * Copy crunch state to given memory address
243 *
244 * r0 = struct thread_info pointer of target task
245 * r1 = memory address where to store crunch state
246 *
247 * this is called mainly in the creation of signal stack frames
248 */
249ENTRY(crunch_task_copy)
250 mrs ip, cpsr
251 orr r2, ip, #PSR_I_BIT @ disable interrupts
252 msr cpsr_c, r2
253
254 ldr r3, =crunch_owner
255 add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
256 ldr r3, [r3] @ get current crunch owner
257 teq r2, r3 @ does this task own it...
258 beq 1f
259
260 @ current crunch values are in the task save area
261 msr cpsr_c, ip @ restore interrupt mode
262 mov r0, r1
263 mov r1, r2
264 mov r2, #CRUNCH_SIZE
265 b memcpy
266
2671: @ this task owns crunch regs -- grab a copy from there
268 mov r0, #0 @ nothing to load
269 mov r3, lr @ preserve return address
270 bl crunch_save
271 msr cpsr_c, ip @ restore interrupt mode
272 mov pc, r3
273
274/*
275 * Restore crunch state from given memory address
276 *
277 * r0 = struct thread_info pointer of target task
278 * r1 = memory address where to get crunch state from
279 *
280 * this is used to restore crunch state when unwinding a signal stack frame
281 */
282ENTRY(crunch_task_restore)
283 mrs ip, cpsr
284 orr r2, ip, #PSR_I_BIT @ disable interrupts
285 msr cpsr_c, r2
286
287 ldr r3, =crunch_owner
288 add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
289 ldr r3, [r3] @ get current crunch owner
290 teq r2, r3 @ does this task own it...
291 beq 1f
292
293 @ this task doesn't own crunch regs -- use its save area
294 msr cpsr_c, ip @ restore interrupt mode
295 mov r0, r2
296 mov r2, #CRUNCH_SIZE
297 b memcpy
298
2991: @ this task owns crunch regs -- load them directly
300 mov r0, r1
301 mov r1, #0 @ nothing to save
302 mov r3, lr @ preserve return address
303 bl crunch_load
304 msr cpsr_c, ip @ restore interrupt mode
305 mov pc, r3
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
deleted file mode 100644
index 25ef223ba7f3..000000000000
--- a/arch/arm/kernel/crunch.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * arch/arm/kernel/crunch.c
3 * Cirrus MaverickCrunch context switching and handling
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <mach/ep93xx-regs.h>
20#include <asm/thread_notify.h>
21
22struct crunch_state *crunch_owner;
23
24void crunch_task_release(struct thread_info *thread)
25{
26 local_irq_disable();
27 if (crunch_owner == &thread->crunchstate)
28 crunch_owner = NULL;
29 local_irq_enable();
30}
31
32static int crunch_enabled(u32 devcfg)
33{
34 return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
35}
36
37static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
38{
39 struct thread_info *thread = (struct thread_info *)t;
40 struct crunch_state *crunch_state;
41 u32 devcfg;
42
43 crunch_state = &thread->crunchstate;
44
45 switch (cmd) {
46 case THREAD_NOTIFY_FLUSH:
47 memset(crunch_state, 0, sizeof(*crunch_state));
48
49 /*
50 * FALLTHROUGH: Ensure we don't try to overwrite our newly
51 * initialised state information on the first fault.
52 */
53
54 case THREAD_NOTIFY_EXIT:
55 crunch_task_release(thread);
56 break;
57
58 case THREAD_NOTIFY_SWITCH:
59 devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
60 if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
61 /*
62 * We don't use ep93xx_syscon_swlocked_write() here
63 * because we are on the context switch path and
64 * preemption is already disabled.
65 */
66 devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
67 __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
68 __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
69 }
70 break;
71 }
72
73 return NOTIFY_DONE;
74}
75
76static struct notifier_block crunch_notifier_block = {
77 .notifier_call = crunch_do,
78};
79
80static int __init crunch_init(void)
81{
82 thread_register_notifier(&crunch_notifier_block);
83 elf_hwcap |= HWCAP_CRUNCH;
84
85 return 0;
86}
87
88late_initcall(crunch_init);
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index 4dd0edab6a65..1651d4950744 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
242 242
243 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); 243 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
244 244
245 vma.vm_flags = VM_EXEC;
245 vma.vm_mm = mm; 246 vma.vm_mm = mm;
246 247
247 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); 248 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf3d47f..b2abfa18f137 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
180u64 180u64
181armpmu_event_update(struct perf_event *event, 181armpmu_event_update(struct perf_event *event,
182 struct hw_perf_event *hwc, 182 struct hw_perf_event *hwc,
183 int idx, int overflow) 183 int idx)
184{ 184{
185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
186 u64 delta, prev_raw_count, new_raw_count; 186 u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
193 new_raw_count) != prev_raw_count) 193 new_raw_count) != prev_raw_count)
194 goto again; 194 goto again;
195 195
196 new_raw_count &= armpmu->max_period; 196 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
197 prev_raw_count &= armpmu->max_period;
198
199 if (overflow)
200 delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
201 else
202 delta = new_raw_count - prev_raw_count;
203 197
204 local64_add(delta, &event->count); 198 local64_add(delta, &event->count);
205 local64_sub(delta, &hwc->period_left); 199 local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
216 if (hwc->idx < 0) 210 if (hwc->idx < 0)
217 return; 211 return;
218 212
219 armpmu_event_update(event, hwc, hwc->idx, 0); 213 armpmu_event_update(event, hwc, hwc->idx);
220} 214}
221 215
222static void 216static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
232 if (!(hwc->state & PERF_HES_STOPPED)) { 226 if (!(hwc->state & PERF_HES_STOPPED)) {
233 armpmu->disable(hwc, hwc->idx); 227 armpmu->disable(hwc, hwc->idx);
234 barrier(); /* why? */ 228 barrier(); /* why? */
235 armpmu_event_update(event, hwc, hwc->idx, 0); 229 armpmu_event_update(event, hwc, hwc->idx);
236 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 230 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
237 } 231 }
238} 232}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
518 hwc->config_base |= (unsigned long)mapping; 512 hwc->config_base |= (unsigned long)mapping;
519 513
520 if (!hwc->sample_period) { 514 if (!hwc->sample_period) {
521 hwc->sample_period = armpmu->max_period; 515 /*
516 * For non-sampling runs, limit the sample_period to half
517 * of the counter width. That way, the new counter value
518 * is far less likely to overtake the previous one unless
519 * you have some serious IRQ latency issues.
520 */
521 hwc->sample_period = armpmu->max_period >> 1;
522 hwc->last_period = hwc->sample_period; 522 hwc->last_period = hwc->sample_period;
523 local64_set(&hwc->period_left, hwc->sample_period); 523 local64_set(&hwc->period_left, hwc->sample_period);
524 } 524 }
@@ -680,6 +680,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
680} 680}
681 681
682/* 682/*
683 * PMU hardware loses all context when a CPU goes offline.
684 * When a CPU is hotplugged back in, since some hardware registers are
685 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
686 * junk values out of them.
687 */
688static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
689 unsigned long action, void *hcpu)
690{
691 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
692 return NOTIFY_DONE;
693
694 if (cpu_pmu && cpu_pmu->reset)
695 cpu_pmu->reset(NULL);
696
697 return NOTIFY_OK;
698}
699
700static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
701 .notifier_call = pmu_cpu_notify,
702};
703
704/*
683 * CPU PMU identification and registration. 705 * CPU PMU identification and registration.
684 */ 706 */
685static int __init 707static int __init
@@ -730,6 +752,7 @@ init_hw_perf_events(void)
730 pr_info("enabled with %s PMU driver, %d counters available\n", 752 pr_info("enabled with %s PMU driver, %d counters available\n",
731 cpu_pmu->name, cpu_pmu->num_events); 753 cpu_pmu->name, cpu_pmu->num_events);
732 cpu_pmu_init(cpu_pmu); 754 cpu_pmu_init(cpu_pmu);
755 register_cpu_notifier(&pmu_cpu_notifier);
733 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); 756 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
734 } else { 757 } else {
735 pr_info("no hardware support available\n"); 758 pr_info("no hardware support available\n");
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 533be9930ec2..b78af0cc6ef3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
467 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 467 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
468} 468}
469 469
470static int counter_is_active(unsigned long pmcr, int idx)
471{
472 unsigned long mask = 0;
473 if (idx == ARMV6_CYCLE_COUNTER)
474 mask = ARMV6_PMCR_CCOUNT_IEN;
475 else if (idx == ARMV6_COUNTER0)
476 mask = ARMV6_PMCR_COUNT0_IEN;
477 else if (idx == ARMV6_COUNTER1)
478 mask = ARMV6_PMCR_COUNT1_IEN;
479
480 if (mask)
481 return pmcr & mask;
482
483 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
484 return 0;
485}
486
487static irqreturn_t 470static irqreturn_t
488armv6pmu_handle_irq(int irq_num, 471armv6pmu_handle_irq(int irq_num,
489 void *dev) 472 void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
513 struct perf_event *event = cpuc->events[idx]; 496 struct perf_event *event = cpuc->events[idx];
514 struct hw_perf_event *hwc; 497 struct hw_perf_event *hwc;
515 498
516 if (!counter_is_active(pmcr, idx)) 499 /* Ignore if we don't have an event. */
500 if (!event)
517 continue; 501 continue;
518 502
519 /* 503 /*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
524 continue; 508 continue;
525 509
526 hwc = &event->hw; 510 hwc = &event->hw;
527 armpmu_event_update(event, hwc, idx, 1); 511 armpmu_event_update(event, hwc, idx);
528 data.period = event->hw.last_period; 512 data.period = event->hw.last_period;
529 if (!armpmu_event_set_period(event, hwc, idx)) 513 if (!armpmu_event_set_period(event, hwc, idx))
530 continue; 514 continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 6933244c68f9..4d7095af2ab3 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
809 809
810 counter = ARMV7_IDX_TO_COUNTER(idx); 810 counter = ARMV7_IDX_TO_COUNTER(idx);
811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); 811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
812 isb();
813 /* Clear the overflow flag in case an interrupt is pending. */
814 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
815 isb();
816
812 return idx; 817 return idx;
813} 818}
814 819
@@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
955 struct perf_event *event = cpuc->events[idx]; 960 struct perf_event *event = cpuc->events[idx];
956 struct hw_perf_event *hwc; 961 struct hw_perf_event *hwc;
957 962
963 /* Ignore if we don't have an event. */
964 if (!event)
965 continue;
966
958 /* 967 /*
959 * We have a single interrupt for all counters. Check that 968 * We have a single interrupt for all counters. Check that
960 * each counter has overflowed before we process it. 969 * each counter has overflowed before we process it.
@@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
963 continue; 972 continue;
964 973
965 hwc = &event->hw; 974 hwc = &event->hw;
966 armpmu_event_update(event, hwc, idx, 1); 975 armpmu_event_update(event, hwc, idx);
967 data.period = event->hw.last_period; 976 data.period = event->hw.last_period;
968 if (!armpmu_event_set_period(event, hwc, idx)) 977 if (!armpmu_event_set_period(event, hwc, idx))
969 continue; 978 continue;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3b99d8269829..71a21e6712f5 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
255 struct perf_event *event = cpuc->events[idx]; 255 struct perf_event *event = cpuc->events[idx];
256 struct hw_perf_event *hwc; 256 struct hw_perf_event *hwc;
257 257
258 if (!event)
259 continue;
260
258 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) 261 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
259 continue; 262 continue;
260 263
261 hwc = &event->hw; 264 hwc = &event->hw;
262 armpmu_event_update(event, hwc, idx, 1); 265 armpmu_event_update(event, hwc, idx);
263 data.period = event->hw.last_period; 266 data.period = event->hw.last_period;
264 if (!armpmu_event_set_period(event, hwc, idx)) 267 if (!armpmu_event_set_period(event, hwc, idx))
265 continue; 268 continue;
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
592 struct perf_event *event = cpuc->events[idx]; 595 struct perf_event *event = cpuc->events[idx];
593 struct hw_perf_event *hwc; 596 struct hw_perf_event *hwc;
594 597
595 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) 598 if (!event)
599 continue;
600
601 if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
596 continue; 602 continue;
597 603
598 hwc = &event->hw; 604 hwc = &event->hw;
599 armpmu_event_update(event, hwc, idx, 1); 605 armpmu_event_update(event, hwc, idx);
600 data.period = event->hw.last_period; 606 data.period = event->hw.last_period;
601 if (!armpmu_event_set_period(event, hwc, idx)) 607 if (!armpmu_event_set_period(event, hwc, idx))
602 continue; 608 continue;
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
663static void 669static void
664xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) 670xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
665{ 671{
666 unsigned long flags, ien, evtsel; 672 unsigned long flags, ien, evtsel, of_flags;
667 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 673 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
668 674
669 ien = xscale2pmu_read_int_enable(); 675 ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
672 switch (idx) { 678 switch (idx) {
673 case XSCALE_CYCLE_COUNTER: 679 case XSCALE_CYCLE_COUNTER:
674 ien &= ~XSCALE2_CCOUNT_INT_EN; 680 ien &= ~XSCALE2_CCOUNT_INT_EN;
681 of_flags = XSCALE2_CCOUNT_OVERFLOW;
675 break; 682 break;
676 case XSCALE_COUNTER0: 683 case XSCALE_COUNTER0:
677 ien &= ~XSCALE2_COUNT0_INT_EN; 684 ien &= ~XSCALE2_COUNT0_INT_EN;
678 evtsel &= ~XSCALE2_COUNT0_EVT_MASK; 685 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
679 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; 686 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
687 of_flags = XSCALE2_COUNT0_OVERFLOW;
680 break; 688 break;
681 case XSCALE_COUNTER1: 689 case XSCALE_COUNTER1:
682 ien &= ~XSCALE2_COUNT1_INT_EN; 690 ien &= ~XSCALE2_COUNT1_INT_EN;
683 evtsel &= ~XSCALE2_COUNT1_EVT_MASK; 691 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
684 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; 692 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
693 of_flags = XSCALE2_COUNT1_OVERFLOW;
685 break; 694 break;
686 case XSCALE_COUNTER2: 695 case XSCALE_COUNTER2:
687 ien &= ~XSCALE2_COUNT2_INT_EN; 696 ien &= ~XSCALE2_COUNT2_INT_EN;
688 evtsel &= ~XSCALE2_COUNT2_EVT_MASK; 697 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
689 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; 698 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
699 of_flags = XSCALE2_COUNT2_OVERFLOW;
690 break; 700 break;
691 case XSCALE_COUNTER3: 701 case XSCALE_COUNTER3:
692 ien &= ~XSCALE2_COUNT3_INT_EN; 702 ien &= ~XSCALE2_COUNT3_INT_EN;
693 evtsel &= ~XSCALE2_COUNT3_EVT_MASK; 703 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
694 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; 704 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
705 of_flags = XSCALE2_COUNT3_OVERFLOW;
695 break; 706 break;
696 default: 707 default:
697 WARN_ONCE(1, "invalid counter number (%d)\n", idx); 708 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
701 raw_spin_lock_irqsave(&events->pmu_lock, flags); 712 raw_spin_lock_irqsave(&events->pmu_lock, flags);
702 xscale2pmu_write_event_select(evtsel); 713 xscale2pmu_write_event_select(evtsel);
703 xscale2pmu_write_int_enable(ien); 714 xscale2pmu_write_int_enable(ien);
715 xscale2pmu_write_overflow_flags(of_flags);
704 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 716 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
705} 717}
706 718