aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:03 -0400
commit4aeb0b4239bb3b67ed402cb9cef3e000c892cadf (patch)
tree0a025a30fa5de3b40ab1ea156a3f86ee2d000839 /arch/x86/kernel/cpu/perf_counter.c
parent527e26af3741a2168986d8b82653ffe173891324 (diff)
perfcounters: rename struct hw_perf_counter_ops into struct pmu
This patch renames struct hw_perf_counter_ops into struct pmu. It introduces a structure to describe a cpu specific pmu (performance monitoring unit). It may contain ops and data. The new name of the structure fits better, is shorter, and thus better to handle. Where it was appropriate, names of function and variable have been changed too. [ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-7-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c37
1 files changed, 18 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index ad663d5ad2d9..95de980c74a0 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -515,8 +515,8 @@ __pmc_fixed_disable(struct perf_counter *counter,
515} 515}
516 516
517static inline void 517static inline void
518__pmc_generic_disable(struct perf_counter *counter, 518__x86_pmu_disable(struct perf_counter *counter,
519 struct hw_perf_counter *hwc, unsigned int idx) 519 struct hw_perf_counter *hwc, unsigned int idx)
520{ 520{
521 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 521 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
522 __pmc_fixed_disable(counter, hwc, idx); 522 __pmc_fixed_disable(counter, hwc, idx);
@@ -591,8 +591,8 @@ __pmc_fixed_enable(struct perf_counter *counter,
591} 591}
592 592
593static void 593static void
594__pmc_generic_enable(struct perf_counter *counter, 594__x86_pmu_enable(struct perf_counter *counter,
595 struct hw_perf_counter *hwc, int idx) 595 struct hw_perf_counter *hwc, int idx)
596{ 596{
597 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 597 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
598 __pmc_fixed_enable(counter, hwc, idx); 598 __pmc_fixed_enable(counter, hwc, idx);
@@ -626,7 +626,7 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
626/* 626/*
627 * Find a PMC slot for the freshly enabled / scheduled in counter: 627 * Find a PMC slot for the freshly enabled / scheduled in counter:
628 */ 628 */
629static int pmc_generic_enable(struct perf_counter *counter) 629static int x86_pmu_enable(struct perf_counter *counter)
630{ 630{
631 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 631 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
632 struct hw_perf_counter *hwc = &counter->hw; 632 struct hw_perf_counter *hwc = &counter->hw;
@@ -667,7 +667,7 @@ try_generic:
667 667
668 perf_counters_lapic_init(hwc->nmi); 668 perf_counters_lapic_init(hwc->nmi);
669 669
670 __pmc_generic_disable(counter, hwc, idx); 670 __x86_pmu_disable(counter, hwc, idx);
671 671
672 cpuc->counters[idx] = counter; 672 cpuc->counters[idx] = counter;
673 /* 673 /*
@@ -676,7 +676,7 @@ try_generic:
676 barrier(); 676 barrier();
677 677
678 __hw_perf_counter_set_period(counter, hwc, idx); 678 __hw_perf_counter_set_period(counter, hwc, idx);
679 __pmc_generic_enable(counter, hwc, idx); 679 __x86_pmu_enable(counter, hwc, idx);
680 680
681 return 0; 681 return 0;
682} 682}
@@ -731,13 +731,13 @@ void perf_counter_print_debug(void)
731 local_irq_enable(); 731 local_irq_enable();
732} 732}
733 733
734static void pmc_generic_disable(struct perf_counter *counter) 734static void x86_pmu_disable(struct perf_counter *counter)
735{ 735{
736 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 736 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
737 struct hw_perf_counter *hwc = &counter->hw; 737 struct hw_perf_counter *hwc = &counter->hw;
738 unsigned int idx = hwc->idx; 738 unsigned int idx = hwc->idx;
739 739
740 __pmc_generic_disable(counter, hwc, idx); 740 __x86_pmu_disable(counter, hwc, idx);
741 741
742 clear_bit(idx, cpuc->used); 742 clear_bit(idx, cpuc->used);
743 cpuc->counters[idx] = NULL; 743 cpuc->counters[idx] = NULL;
@@ -767,7 +767,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
767 __hw_perf_counter_set_period(counter, hwc, idx); 767 __hw_perf_counter_set_period(counter, hwc, idx);
768 768
769 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 769 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
770 __pmc_generic_enable(counter, hwc, idx); 770 __x86_pmu_enable(counter, hwc, idx);
771} 771}
772 772
773/* 773/*
@@ -805,7 +805,7 @@ again:
805 805
806 perf_save_and_restart(counter); 806 perf_save_and_restart(counter);
807 if (perf_counter_overflow(counter, nmi, regs, 0)) 807 if (perf_counter_overflow(counter, nmi, regs, 0))
808 __pmc_generic_disable(counter, &counter->hw, bit); 808 __x86_pmu_disable(counter, &counter->hw, bit);
809 } 809 }
810 810
811 hw_perf_ack_status(ack); 811 hw_perf_ack_status(ack);
@@ -1034,19 +1034,18 @@ void __init init_hw_perf_counters(void)
1034 register_die_notifier(&perf_counter_nmi_notifier); 1034 register_die_notifier(&perf_counter_nmi_notifier);
1035} 1035}
1036 1036
1037static void pmc_generic_read(struct perf_counter *counter) 1037static void x86_pmu_read(struct perf_counter *counter)
1038{ 1038{
1039 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 1039 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1040} 1040}
1041 1041
1042static const struct hw_perf_counter_ops x86_perf_counter_ops = { 1042static const struct pmu pmu = {
1043 .enable = pmc_generic_enable, 1043 .enable = x86_pmu_enable,
1044 .disable = pmc_generic_disable, 1044 .disable = x86_pmu_disable,
1045 .read = pmc_generic_read, 1045 .read = x86_pmu_read,
1046}; 1046};
1047 1047
1048const struct hw_perf_counter_ops * 1048const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1049hw_perf_counter_init(struct perf_counter *counter)
1050{ 1049{
1051 int err; 1050 int err;
1052 1051
@@ -1054,7 +1053,7 @@ hw_perf_counter_init(struct perf_counter *counter)
1054 if (err) 1053 if (err)
1055 return ERR_PTR(err); 1054 return ERR_PTR(err);
1056 1055
1057 return &x86_perf_counter_ops; 1056 return &pmu;
1058} 1057}
1059 1058
1060/* 1059/*