diff options
author | Paul Mackerras <paulus@samba.org> | 2009-05-13 23:29:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-15 10:38:55 -0400 |
commit | ef923214a4816c289e4af2d67a9ebb1a31e4ac61 (patch) | |
tree | bf850f4f53a4f8391b6b9c0335e58364668586d9 /arch/powerpc/kernel/power4-pmu.c | |
parent | 2e569d36729c8105ae066a9b105068305442cc77 (diff) |
perf_counter: powerpc: use u64 for event codes internally
Although the perf_counter API allows 63-bit raw event codes,
internally in the powerpc back-end we had been using 32-bit
event codes. This expands them to 64 bits so that we can add
bits for specifying threshold start/stop events and instruction
sampling modes later.
This also corrects the return value of can_go_on_limited_pmc;
we were returning an event code rather than just a 0/1 value in
some circumstances. That didn't particularly matter while event
codes were 32-bit, but now that event codes are 64-bit it
might, so this fixes it.
[ Impact: extend PowerPC perfcounter interfaces from u32 to u64 ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18955.36874.472452.353104@drongo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/power4-pmu.c')
-rw-r--r-- | arch/powerpc/kernel/power4-pmu.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 744a2756958e..836fa118eb1e 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c | |||
@@ -213,7 +213,7 @@ static unsigned char direct_marked_event[8] = { | |||
213 | * Returns 1 if event counts things relating to marked instructions | 213 | * Returns 1 if event counts things relating to marked instructions |
214 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. | 214 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. |
215 | */ | 215 | */ |
216 | static int p4_marked_instr_event(unsigned int event) | 216 | static int p4_marked_instr_event(u64 event) |
217 | { | 217 | { |
218 | int pmc, psel, unit, byte, bit; | 218 | int pmc, psel, unit, byte, bit; |
219 | unsigned int mask; | 219 | unsigned int mask; |
@@ -249,7 +249,7 @@ static int p4_marked_instr_event(unsigned int event) | |||
249 | return (mask >> (byte * 8 + bit)) & 1; | 249 | return (mask >> (byte * 8 + bit)) & 1; |
250 | } | 250 | } |
251 | 251 | ||
252 | static int p4_get_constraint(unsigned int event, u64 *maskp, u64 *valp) | 252 | static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp) |
253 | { | 253 | { |
254 | int pmc, byte, unit, lower, sh; | 254 | int pmc, byte, unit, lower, sh; |
255 | u64 mask = 0, value = 0; | 255 | u64 mask = 0, value = 0; |
@@ -320,8 +320,7 @@ static unsigned int ppc_inst_cmpl[] = { | |||
320 | 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 | 320 | 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 |
321 | }; | 321 | }; |
322 | 322 | ||
323 | static int p4_get_alternatives(unsigned int event, unsigned int flags, | 323 | static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[]) |
324 | unsigned int alt[]) | ||
325 | { | 324 | { |
326 | int i, j, na; | 325 | int i, j, na; |
327 | 326 | ||
@@ -353,7 +352,7 @@ static int p4_get_alternatives(unsigned int event, unsigned int flags, | |||
353 | return na; | 352 | return na; |
354 | } | 353 | } |
355 | 354 | ||
356 | static int p4_compute_mmcr(unsigned int event[], int n_ev, | 355 | static int p4_compute_mmcr(u64 event[], int n_ev, |
357 | unsigned int hwc[], u64 mmcr[]) | 356 | unsigned int hwc[], u64 mmcr[]) |
358 | { | 357 | { |
359 | u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; | 358 | u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; |