aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-02 09:13:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-02 15:45:30 -0400
commitb23f3325ed465f1bd914384884269af0d106778c (patch)
tree7b263c707e50463f1e1defc60d371b09e352a21a /arch/powerpc
parent8e5799b1ad2a0567fdfaaf0e91b40efee010f2c1 (diff)
perf_counter: Rename various fields
A few renames: s/irq_period/sample_period/ s/irq_freq/sample_freq/ s/PERF_RECORD_/PERF_SAMPLE_/ s/record_type/sample_type/ And change both the new sample_type and read_format to u64. Reported-by: Stephane Eranian <eranian@googlemail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/perf_counter.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index f96d55f55bd6..c9633321e7a5 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -535,7 +535,7 @@ void hw_perf_enable(void)
535 continue; 535 continue;
536 } 536 }
537 val = 0; 537 val = 0;
538 if (counter->hw.irq_period) { 538 if (counter->hw.sample_period) {
539 left = atomic64_read(&counter->hw.period_left); 539 left = atomic64_read(&counter->hw.period_left);
540 if (left < 0x80000000L) 540 if (left < 0x80000000L)
541 val = 0x80000000L - left; 541 val = 0x80000000L - left;
@@ -749,12 +749,12 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
749 s64 val, left; 749 s64 val, left;
750 unsigned long flags; 750 unsigned long flags;
751 751
752 if (!counter->hw.idx || !counter->hw.irq_period) 752 if (!counter->hw.idx || !counter->hw.sample_period)
753 return; 753 return;
754 local_irq_save(flags); 754 local_irq_save(flags);
755 perf_disable(); 755 perf_disable();
756 power_pmu_read(counter); 756 power_pmu_read(counter);
757 left = counter->hw.irq_period; 757 left = counter->hw.sample_period;
758 val = 0; 758 val = 0;
759 if (left < 0x80000000L) 759 if (left < 0x80000000L)
760 val = 0x80000000L - left; 760 val = 0x80000000L - left;
@@ -789,7 +789,7 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
789 if (counter->hw_event.exclude_user 789 if (counter->hw_event.exclude_user
790 || counter->hw_event.exclude_kernel 790 || counter->hw_event.exclude_kernel
791 || counter->hw_event.exclude_hv 791 || counter->hw_event.exclude_hv
792 || counter->hw_event.irq_period) 792 || counter->hw_event.sample_period)
793 return 0; 793 return 0;
794 794
795 if (ppmu->limited_pmc_event(ev)) 795 if (ppmu->limited_pmc_event(ev))
@@ -925,7 +925,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
925 925
926 counter->hw.config = events[n]; 926 counter->hw.config = events[n];
927 counter->hw.counter_base = cflags[n]; 927 counter->hw.counter_base = cflags[n];
928 atomic64_set(&counter->hw.period_left, counter->hw.irq_period); 928 atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
929 929
930 /* 930 /*
931 * See if we need to reserve the PMU. 931 * See if we need to reserve the PMU.
@@ -958,7 +958,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
958static void record_and_restart(struct perf_counter *counter, long val, 958static void record_and_restart(struct perf_counter *counter, long val,
959 struct pt_regs *regs, int nmi) 959 struct pt_regs *regs, int nmi)
960{ 960{
961 u64 period = counter->hw.irq_period; 961 u64 period = counter->hw.sample_period;
962 s64 prev, delta, left; 962 s64 prev, delta, left;
963 int record = 0; 963 int record = 0;
964 u64 addr, mmcra, sdsync; 964 u64 addr, mmcra, sdsync;