aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-17 07:51:13 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-18 05:11:45 -0400
commit448d64f8f4c147db466c549550767cc515a4d34c (patch)
tree9c33191273219d8e4d77e3ea78304691e4fb4b56 /arch/powerpc/kernel/perf_counter.c
parent105988c015943e77092a6568bc5fb7e386df6ccd (diff)
perf_counter: powerpc: Use unsigned long for register and constraint values
This changes the powerpc perf_counter back-end to use unsigned long types for hardware register values and for the value/mask pairs used in checking whether a given set of events fit within the hardware constraints. This is in preparation for adding support for the PMU on some 32-bit powerpc processors. On 32-bit processors the hardware registers are only 32 bits wide, and the PMU structure is generally simpler, so 32 bits should be ample for expressing the hardware constraints. On 64-bit processors, unsigned long is 64 bits wide, so using unsigned long vs. u64 (unsigned long long) makes no actual difference. This makes some other very minor changes: adjusting whitespace to line things up in initialized structures, and simplifying some code in hw_perf_disable(). Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: linuxppc-dev@ozlabs.org Cc: benh@kernel.crashing.org LKML-Reference: <19000.55473.26174.331511@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r--arch/powerpc/kernel/perf_counter.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index e6dc1850191..9300638b8c2 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -29,7 +29,7 @@ struct cpu_hw_counters {
29 struct perf_counter *counter[MAX_HWCOUNTERS]; 29 struct perf_counter *counter[MAX_HWCOUNTERS];
30 u64 events[MAX_HWCOUNTERS]; 30 u64 events[MAX_HWCOUNTERS];
31 unsigned int flags[MAX_HWCOUNTERS]; 31 unsigned int flags[MAX_HWCOUNTERS];
32 u64 mmcr[3]; 32 unsigned long mmcr[3];
33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; 33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; 34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
35}; 35};
@@ -135,15 +135,15 @@ static void write_pmc(int idx, unsigned long val)
135static int power_check_constraints(u64 event[], unsigned int cflags[], 135static int power_check_constraints(u64 event[], unsigned int cflags[],
136 int n_ev) 136 int n_ev)
137{ 137{
138 u64 mask, value, nv; 138 unsigned long mask, value, nv;
139 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 139 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
140 u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 140 unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
141 u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 141 unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
142 u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; 142 unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
143 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; 143 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
144 int i, j; 144 int i, j;
145 u64 addf = ppmu->add_fields; 145 unsigned long addf = ppmu->add_fields;
146 u64 tadd = ppmu->test_adder; 146 unsigned long tadd = ppmu->test_adder;
147 147
148 if (n_ev > ppmu->n_counter) 148 if (n_ev > ppmu->n_counter)
149 return -1; 149 return -1;
@@ -403,14 +403,12 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
403void hw_perf_disable(void) 403void hw_perf_disable(void)
404{ 404{
405 struct cpu_hw_counters *cpuhw; 405 struct cpu_hw_counters *cpuhw;
406 unsigned long ret;
407 unsigned long flags; 406 unsigned long flags;
408 407
409 local_irq_save(flags); 408 local_irq_save(flags);
410 cpuhw = &__get_cpu_var(cpu_hw_counters); 409 cpuhw = &__get_cpu_var(cpu_hw_counters);
411 410
412 ret = cpuhw->disabled; 411 if (!cpuhw->disabled) {
413 if (!ret) {
414 cpuhw->disabled = 1; 412 cpuhw->disabled = 1;
415 cpuhw->n_added = 0; 413 cpuhw->n_added = 0;
416 414
@@ -1013,9 +1011,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
1013 struct pt_regs *regs, int nmi) 1011 struct pt_regs *regs, int nmi)
1014{ 1012{
1015 u64 period = counter->hw.sample_period; 1013 u64 period = counter->hw.sample_period;
1014 unsigned long mmcra, sdsync;
1016 s64 prev, delta, left; 1015 s64 prev, delta, left;
1017 int record = 0; 1016 int record = 0;
1018 u64 mmcra, sdsync;
1019 1017
1020 /* we don't have to worry about interrupts here */ 1018 /* we don't have to worry about interrupts here */
1021 prev = atomic64_read(&counter->hw.prev_count); 1019 prev = atomic64_read(&counter->hw.prev_count);