aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:03 -0400
commit4aeb0b4239bb3b67ed402cb9cef3e000c892cadf (patch)
tree0a025a30fa5de3b40ab1ea156a3f86ee2d000839 /arch
parent527e26af3741a2168986d8b82653ffe173891324 (diff)
perfcounters: rename struct hw_perf_counter_ops into struct pmu
This patch renames struct hw_perf_counter_ops into struct pmu. It introduces a structure to describe a cpu specific pmu (performance monitoring unit). It may contain ops and data. The new name of the structure fits better, is shorter, and thus better to handle. Where it was appropriate, names of function and variable have been changed too. [ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-7-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_counter.c25
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c37
2 files changed, 30 insertions, 32 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bd76d0fa2c35..d9bbe5efc649 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -256,7 +256,7 @@ static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new)
256 return 0; 256 return 0;
257} 257}
258 258
259static void power_perf_read(struct perf_counter *counter) 259static void power_pmu_read(struct perf_counter *counter)
260{ 260{
261 long val, delta, prev; 261 long val, delta, prev;
262 262
@@ -405,7 +405,7 @@ void hw_perf_restore(u64 disable)
405 for (i = 0; i < cpuhw->n_counters; ++i) { 405 for (i = 0; i < cpuhw->n_counters; ++i) {
406 counter = cpuhw->counter[i]; 406 counter = cpuhw->counter[i];
407 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { 407 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
408 power_perf_read(counter); 408 power_pmu_read(counter);
409 write_pmc(counter->hw.idx, 0); 409 write_pmc(counter->hw.idx, 0);
410 counter->hw.idx = 0; 410 counter->hw.idx = 0;
411 } 411 }
@@ -477,7 +477,7 @@ static void counter_sched_in(struct perf_counter *counter, int cpu)
477 counter->oncpu = cpu; 477 counter->oncpu = cpu;
478 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; 478 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
479 if (is_software_counter(counter)) 479 if (is_software_counter(counter))
480 counter->hw_ops->enable(counter); 480 counter->pmu->enable(counter);
481} 481}
482 482
483/* 483/*
@@ -533,7 +533,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
533 * re-enable the PMU in order to get hw_perf_restore to do the 533 * re-enable the PMU in order to get hw_perf_restore to do the
534 * actual work of reconfiguring the PMU. 534 * actual work of reconfiguring the PMU.
535 */ 535 */
536static int power_perf_enable(struct perf_counter *counter) 536static int power_pmu_enable(struct perf_counter *counter)
537{ 537{
538 struct cpu_hw_counters *cpuhw; 538 struct cpu_hw_counters *cpuhw;
539 unsigned long flags; 539 unsigned long flags;
@@ -573,7 +573,7 @@ static int power_perf_enable(struct perf_counter *counter)
573/* 573/*
574 * Remove a counter from the PMU. 574 * Remove a counter from the PMU.
575 */ 575 */
576static void power_perf_disable(struct perf_counter *counter) 576static void power_pmu_disable(struct perf_counter *counter)
577{ 577{
578 struct cpu_hw_counters *cpuhw; 578 struct cpu_hw_counters *cpuhw;
579 long i; 579 long i;
@@ -583,7 +583,7 @@ static void power_perf_disable(struct perf_counter *counter)
583 local_irq_save(flags); 583 local_irq_save(flags);
584 pmudis = hw_perf_save_disable(); 584 pmudis = hw_perf_save_disable();
585 585
586 power_perf_read(counter); 586 power_pmu_read(counter);
587 587
588 cpuhw = &__get_cpu_var(cpu_hw_counters); 588 cpuhw = &__get_cpu_var(cpu_hw_counters);
589 for (i = 0; i < cpuhw->n_counters; ++i) { 589 for (i = 0; i < cpuhw->n_counters; ++i) {
@@ -607,10 +607,10 @@ static void power_perf_disable(struct perf_counter *counter)
607 local_irq_restore(flags); 607 local_irq_restore(flags);
608} 608}
609 609
610struct hw_perf_counter_ops power_perf_ops = { 610struct pmu power_pmu = {
611 .enable = power_perf_enable, 611 .enable = power_pmu_enable,
612 .disable = power_perf_disable, 612 .disable = power_pmu_disable,
613 .read = power_perf_read 613 .read = power_pmu_read,
614}; 614};
615 615
616/* Number of perf_counters counting hardware events */ 616/* Number of perf_counters counting hardware events */
@@ -631,8 +631,7 @@ static void hw_perf_counter_destroy(struct perf_counter *counter)
631 } 631 }
632} 632}
633 633
634const struct hw_perf_counter_ops * 634const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
635hw_perf_counter_init(struct perf_counter *counter)
636{ 635{
637 unsigned long ev; 636 unsigned long ev;
638 struct perf_counter *ctrs[MAX_HWCOUNTERS]; 637 struct perf_counter *ctrs[MAX_HWCOUNTERS];
@@ -705,7 +704,7 @@ hw_perf_counter_init(struct perf_counter *counter)
705 704
706 if (err) 705 if (err)
707 return ERR_PTR(err); 706 return ERR_PTR(err);
708 return &power_perf_ops; 707 return &power_pmu;
709} 708}
710 709
711/* 710/*
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index ad663d5ad2d9..95de980c74a0 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -515,8 +515,8 @@ __pmc_fixed_disable(struct perf_counter *counter,
515} 515}
516 516
517static inline void 517static inline void
518__pmc_generic_disable(struct perf_counter *counter, 518__x86_pmu_disable(struct perf_counter *counter,
519 struct hw_perf_counter *hwc, unsigned int idx) 519 struct hw_perf_counter *hwc, unsigned int idx)
520{ 520{
521 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 521 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
522 __pmc_fixed_disable(counter, hwc, idx); 522 __pmc_fixed_disable(counter, hwc, idx);
@@ -591,8 +591,8 @@ __pmc_fixed_enable(struct perf_counter *counter,
591} 591}
592 592
593static void 593static void
594__pmc_generic_enable(struct perf_counter *counter, 594__x86_pmu_enable(struct perf_counter *counter,
595 struct hw_perf_counter *hwc, int idx) 595 struct hw_perf_counter *hwc, int idx)
596{ 596{
597 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 597 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
598 __pmc_fixed_enable(counter, hwc, idx); 598 __pmc_fixed_enable(counter, hwc, idx);
@@ -626,7 +626,7 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
626/* 626/*
627 * Find a PMC slot for the freshly enabled / scheduled in counter: 627 * Find a PMC slot for the freshly enabled / scheduled in counter:
628 */ 628 */
629static int pmc_generic_enable(struct perf_counter *counter) 629static int x86_pmu_enable(struct perf_counter *counter)
630{ 630{
631 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 631 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
632 struct hw_perf_counter *hwc = &counter->hw; 632 struct hw_perf_counter *hwc = &counter->hw;
@@ -667,7 +667,7 @@ try_generic:
667 667
668 perf_counters_lapic_init(hwc->nmi); 668 perf_counters_lapic_init(hwc->nmi);
669 669
670 __pmc_generic_disable(counter, hwc, idx); 670 __x86_pmu_disable(counter, hwc, idx);
671 671
672 cpuc->counters[idx] = counter; 672 cpuc->counters[idx] = counter;
673 /* 673 /*
@@ -676,7 +676,7 @@ try_generic:
676 barrier(); 676 barrier();
677 677
678 __hw_perf_counter_set_period(counter, hwc, idx); 678 __hw_perf_counter_set_period(counter, hwc, idx);
679 __pmc_generic_enable(counter, hwc, idx); 679 __x86_pmu_enable(counter, hwc, idx);
680 680
681 return 0; 681 return 0;
682} 682}
@@ -731,13 +731,13 @@ void perf_counter_print_debug(void)
731 local_irq_enable(); 731 local_irq_enable();
732} 732}
733 733
734static void pmc_generic_disable(struct perf_counter *counter) 734static void x86_pmu_disable(struct perf_counter *counter)
735{ 735{
736 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 736 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
737 struct hw_perf_counter *hwc = &counter->hw; 737 struct hw_perf_counter *hwc = &counter->hw;
738 unsigned int idx = hwc->idx; 738 unsigned int idx = hwc->idx;
739 739
740 __pmc_generic_disable(counter, hwc, idx); 740 __x86_pmu_disable(counter, hwc, idx);
741 741
742 clear_bit(idx, cpuc->used); 742 clear_bit(idx, cpuc->used);
743 cpuc->counters[idx] = NULL; 743 cpuc->counters[idx] = NULL;
@@ -767,7 +767,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
767 __hw_perf_counter_set_period(counter, hwc, idx); 767 __hw_perf_counter_set_period(counter, hwc, idx);
768 768
769 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 769 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
770 __pmc_generic_enable(counter, hwc, idx); 770 __x86_pmu_enable(counter, hwc, idx);
771} 771}
772 772
773/* 773/*
@@ -805,7 +805,7 @@ again:
805 805
806 perf_save_and_restart(counter); 806 perf_save_and_restart(counter);
807 if (perf_counter_overflow(counter, nmi, regs, 0)) 807 if (perf_counter_overflow(counter, nmi, regs, 0))
808 __pmc_generic_disable(counter, &counter->hw, bit); 808 __x86_pmu_disable(counter, &counter->hw, bit);
809 } 809 }
810 810
811 hw_perf_ack_status(ack); 811 hw_perf_ack_status(ack);
@@ -1034,19 +1034,18 @@ void __init init_hw_perf_counters(void)
1034 register_die_notifier(&perf_counter_nmi_notifier); 1034 register_die_notifier(&perf_counter_nmi_notifier);
1035} 1035}
1036 1036
1037static void pmc_generic_read(struct perf_counter *counter) 1037static void x86_pmu_read(struct perf_counter *counter)
1038{ 1038{
1039 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 1039 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1040} 1040}
1041 1041
1042static const struct hw_perf_counter_ops x86_perf_counter_ops = { 1042static const struct pmu pmu = {
1043 .enable = pmc_generic_enable, 1043 .enable = x86_pmu_enable,
1044 .disable = pmc_generic_disable, 1044 .disable = x86_pmu_disable,
1045 .read = pmc_generic_read, 1045 .read = x86_pmu_read,
1046}; 1046};
1047 1047
1048const struct hw_perf_counter_ops * 1048const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1049hw_perf_counter_init(struct perf_counter *counter)
1050{ 1049{
1051 int err; 1050 int err;
1052 1051
@@ -1054,7 +1053,7 @@ hw_perf_counter_init(struct perf_counter *counter)
1054 if (err) 1053 if (err)
1055 return ERR_PTR(err); 1054 return ERR_PTR(err);
1056 1055
1057 return &x86_perf_counter_ops; 1056 return &pmu;
1058} 1057}
1059 1058
1060/* 1059/*