aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:03 -0400
commit4aeb0b4239bb3b67ed402cb9cef3e000c892cadf (patch)
tree0a025a30fa5de3b40ab1ea156a3f86ee2d000839 /arch/powerpc/kernel
parent527e26af3741a2168986d8b82653ffe173891324 (diff)
perfcounters: rename struct hw_perf_counter_ops into struct pmu
This patch renames struct hw_perf_counter_ops into struct pmu. It introduces a structure to describe a cpu specific pmu (performance monitoring unit). It may contain ops and data. The new name of the structure fits better, is shorter, and thus better to handle. Where it was appropriate, names of function and variable have been changed too. [ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-7-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/perf_counter.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bd76d0fa2c35..d9bbe5efc649 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -256,7 +256,7 @@ static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new)
256 return 0; 256 return 0;
257} 257}
258 258
259static void power_perf_read(struct perf_counter *counter) 259static void power_pmu_read(struct perf_counter *counter)
260{ 260{
261 long val, delta, prev; 261 long val, delta, prev;
262 262
@@ -405,7 +405,7 @@ void hw_perf_restore(u64 disable)
405 for (i = 0; i < cpuhw->n_counters; ++i) { 405 for (i = 0; i < cpuhw->n_counters; ++i) {
406 counter = cpuhw->counter[i]; 406 counter = cpuhw->counter[i];
407 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { 407 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
408 power_perf_read(counter); 408 power_pmu_read(counter);
409 write_pmc(counter->hw.idx, 0); 409 write_pmc(counter->hw.idx, 0);
410 counter->hw.idx = 0; 410 counter->hw.idx = 0;
411 } 411 }
@@ -477,7 +477,7 @@ static void counter_sched_in(struct perf_counter *counter, int cpu)
477 counter->oncpu = cpu; 477 counter->oncpu = cpu;
478 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; 478 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
479 if (is_software_counter(counter)) 479 if (is_software_counter(counter))
480 counter->hw_ops->enable(counter); 480 counter->pmu->enable(counter);
481} 481}
482 482
483/* 483/*
@@ -533,7 +533,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
533 * re-enable the PMU in order to get hw_perf_restore to do the 533 * re-enable the PMU in order to get hw_perf_restore to do the
534 * actual work of reconfiguring the PMU. 534 * actual work of reconfiguring the PMU.
535 */ 535 */
536static int power_perf_enable(struct perf_counter *counter) 536static int power_pmu_enable(struct perf_counter *counter)
537{ 537{
538 struct cpu_hw_counters *cpuhw; 538 struct cpu_hw_counters *cpuhw;
539 unsigned long flags; 539 unsigned long flags;
@@ -573,7 +573,7 @@ static int power_perf_enable(struct perf_counter *counter)
573/* 573/*
574 * Remove a counter from the PMU. 574 * Remove a counter from the PMU.
575 */ 575 */
576static void power_perf_disable(struct perf_counter *counter) 576static void power_pmu_disable(struct perf_counter *counter)
577{ 577{
578 struct cpu_hw_counters *cpuhw; 578 struct cpu_hw_counters *cpuhw;
579 long i; 579 long i;
@@ -583,7 +583,7 @@ static void power_perf_disable(struct perf_counter *counter)
583 local_irq_save(flags); 583 local_irq_save(flags);
584 pmudis = hw_perf_save_disable(); 584 pmudis = hw_perf_save_disable();
585 585
586 power_perf_read(counter); 586 power_pmu_read(counter);
587 587
588 cpuhw = &__get_cpu_var(cpu_hw_counters); 588 cpuhw = &__get_cpu_var(cpu_hw_counters);
589 for (i = 0; i < cpuhw->n_counters; ++i) { 589 for (i = 0; i < cpuhw->n_counters; ++i) {
@@ -607,10 +607,10 @@ static void power_perf_disable(struct perf_counter *counter)
607 local_irq_restore(flags); 607 local_irq_restore(flags);
608} 608}
609 609
610struct hw_perf_counter_ops power_perf_ops = { 610struct pmu power_pmu = {
611 .enable = power_perf_enable, 611 .enable = power_pmu_enable,
612 .disable = power_perf_disable, 612 .disable = power_pmu_disable,
613 .read = power_perf_read 613 .read = power_pmu_read,
614}; 614};
615 615
616/* Number of perf_counters counting hardware events */ 616/* Number of perf_counters counting hardware events */
@@ -631,8 +631,7 @@ static void hw_perf_counter_destroy(struct perf_counter *counter)
631 } 631 }
632} 632}
633 633
634const struct hw_perf_counter_ops * 634const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
635hw_perf_counter_init(struct perf_counter *counter)
636{ 635{
637 unsigned long ev; 636 unsigned long ev;
638 struct perf_counter *ctrs[MAX_HWCOUNTERS]; 637 struct perf_counter *ctrs[MAX_HWCOUNTERS];
@@ -705,7 +704,7 @@ hw_perf_counter_init(struct perf_counter *counter)
705 704
706 if (err) 705 if (err)
707 return ERR_PTR(err); 706 return ERR_PTR(err);
708 return &power_perf_ops; 707 return &power_pmu;
709} 708}
710 709
711/* 710/*