aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:08 -0400
commit0933e5c6a680ba8d8d786a6f7fa377b7ec0d1e49 (patch)
treea1ffa932852280eccae396e885d6c5cfdc2a20b5 /arch
parent4a06bd8508f65ad1dd5cd2046b85694813fa36a2 (diff)
perf_counter, x86: move counter parameters to struct x86_pmu
[ Impact: refactor and generalize code ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-16-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c80
1 files changed, 37 insertions, 43 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 68597d763389..75dbb1f0900e 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -24,16 +24,7 @@
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25 25
26static bool perf_counters_initialized __read_mostly; 26static bool perf_counters_initialized __read_mostly;
27
28/*
29 * Number of (generic) HW counters:
30 */
31static int nr_counters_generic __read_mostly;
32static u64 perf_counter_mask __read_mostly; 27static u64 perf_counter_mask __read_mostly;
33static u64 counter_value_mask __read_mostly;
34static int counter_value_bits __read_mostly;
35
36static int nr_counters_fixed __read_mostly;
37 28
38struct cpu_hw_counters { 29struct cpu_hw_counters {
39 struct perf_counter *counters[X86_PMC_IDX_MAX]; 30 struct perf_counter *counters[X86_PMC_IDX_MAX];
@@ -58,6 +49,10 @@ struct x86_pmu {
58 u64 (*event_map)(int); 49 u64 (*event_map)(int);
59 u64 (*raw_event)(u64); 50 u64 (*raw_event)(u64);
60 int max_events; 51 int max_events;
52 int num_counters;
53 int num_counters_fixed;
54 int counter_bits;
55 u64 counter_mask;
61}; 56};
62 57
63static struct x86_pmu x86_pmu __read_mostly; 58static struct x86_pmu x86_pmu __read_mostly;
@@ -183,12 +178,12 @@ static bool reserve_pmc_hardware(void)
183 if (nmi_watchdog == NMI_LOCAL_APIC) 178 if (nmi_watchdog == NMI_LOCAL_APIC)
184 disable_lapic_nmi_watchdog(); 179 disable_lapic_nmi_watchdog();
185 180
186 for (i = 0; i < nr_counters_generic; i++) { 181 for (i = 0; i < x86_pmu.num_counters; i++) {
187 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 182 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
188 goto perfctr_fail; 183 goto perfctr_fail;
189 } 184 }
190 185
191 for (i = 0; i < nr_counters_generic; i++) { 186 for (i = 0; i < x86_pmu.num_counters; i++) {
192 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 187 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
193 goto eventsel_fail; 188 goto eventsel_fail;
194 } 189 }
@@ -199,7 +194,7 @@ eventsel_fail:
199 for (i--; i >= 0; i--) 194 for (i--; i >= 0; i--)
200 release_evntsel_nmi(x86_pmu.eventsel + i); 195 release_evntsel_nmi(x86_pmu.eventsel + i);
201 196
202 i = nr_counters_generic; 197 i = x86_pmu.num_counters;
203 198
204perfctr_fail: 199perfctr_fail:
205 for (i--; i >= 0; i--) 200 for (i--; i >= 0; i--)
@@ -215,7 +210,7 @@ static void release_pmc_hardware(void)
215{ 210{
216 int i; 211 int i;
217 212
218 for (i = 0; i < nr_counters_generic; i++) { 213 for (i = 0; i < x86_pmu.num_counters; i++) {
219 release_perfctr_nmi(x86_pmu.perfctr + i); 214 release_perfctr_nmi(x86_pmu.perfctr + i);
220 release_evntsel_nmi(x86_pmu.eventsel + i); 215 release_evntsel_nmi(x86_pmu.eventsel + i);
221 } 216 }
@@ -336,7 +331,7 @@ static u64 amd_pmu_save_disable_all(void)
336 */ 331 */
337 barrier(); 332 barrier();
338 333
339 for (idx = 0; idx < nr_counters_generic; idx++) { 334 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
340 u64 val; 335 u64 val;
341 336
342 if (!test_bit(idx, cpuc->active_mask)) 337 if (!test_bit(idx, cpuc->active_mask))
@@ -378,7 +373,7 @@ static void amd_pmu_restore_all(u64 ctrl)
378 if (!ctrl) 373 if (!ctrl)
379 return; 374 return;
380 375
381 for (idx = 0; idx < nr_counters_generic; idx++) { 376 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
382 u64 val; 377 u64 val;
383 378
384 if (!test_bit(idx, cpuc->active_mask)) 379 if (!test_bit(idx, cpuc->active_mask))
@@ -527,7 +522,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
527 atomic64_set(&hwc->prev_count, (u64)-left); 522 atomic64_set(&hwc->prev_count, (u64)-left);
528 523
529 err = checking_wrmsrl(hwc->counter_base + idx, 524 err = checking_wrmsrl(hwc->counter_base + idx,
530 (u64)(-left) & counter_value_mask); 525 (u64)(-left) & x86_pmu.counter_mask);
531} 526}
532 527
533static inline void 528static inline void
@@ -621,8 +616,9 @@ static int x86_pmu_enable(struct perf_counter *counter)
621 /* Try to get the previous generic counter again */ 616 /* Try to get the previous generic counter again */
622 if (test_and_set_bit(idx, cpuc->used)) { 617 if (test_and_set_bit(idx, cpuc->used)) {
623try_generic: 618try_generic:
624 idx = find_first_zero_bit(cpuc->used, nr_counters_generic); 619 idx = find_first_zero_bit(cpuc->used,
625 if (idx == nr_counters_generic) 620 x86_pmu.num_counters);
621 if (idx == x86_pmu.num_counters)
626 return -EAGAIN; 622 return -EAGAIN;
627 623
628 set_bit(idx, cpuc->used); 624 set_bit(idx, cpuc->used);
@@ -654,7 +650,7 @@ void perf_counter_print_debug(void)
654 struct cpu_hw_counters *cpuc; 650 struct cpu_hw_counters *cpuc;
655 int cpu, idx; 651 int cpu, idx;
656 652
657 if (!nr_counters_generic) 653 if (!x86_pmu.num_counters)
658 return; 654 return;
659 655
660 local_irq_disable(); 656 local_irq_disable();
@@ -676,7 +672,7 @@ void perf_counter_print_debug(void)
676 } 672 }
677 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); 673 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
678 674
679 for (idx = 0; idx < nr_counters_generic; idx++) { 675 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
680 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 676 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
681 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 677 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
682 678
@@ -689,7 +685,7 @@ void perf_counter_print_debug(void)
689 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 685 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
690 cpu, idx, prev_left); 686 cpu, idx, prev_left);
691 } 687 }
692 for (idx = 0; idx < nr_counters_fixed; idx++) { 688 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
693 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 689 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
694 690
695 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 691 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -911,6 +907,9 @@ static struct x86_pmu amd_pmu = {
911 .event_map = amd_pmu_event_map, 907 .event_map = amd_pmu_event_map,
912 .raw_event = amd_pmu_raw_event, 908 .raw_event = amd_pmu_raw_event,
913 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 909 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
910 .num_counters = 4,
911 .counter_bits = 48,
912 .counter_mask = (1ULL << 48) - 1,
914}; 913};
915 914
916static int intel_pmu_init(void) 915static int intel_pmu_init(void)
@@ -941,10 +940,10 @@ static int intel_pmu_init(void)
941 pr_info("... mask length: %d\n", eax.split.mask_length); 940 pr_info("... mask length: %d\n", eax.split.mask_length);
942 941
943 x86_pmu = intel_pmu; 942 x86_pmu = intel_pmu;
944 943 x86_pmu.num_counters = eax.split.num_counters;
945 nr_counters_generic = eax.split.num_counters; 944 x86_pmu.num_counters_fixed = edx.split.num_counters_fixed;
946 nr_counters_fixed = edx.split.num_counters_fixed; 945 x86_pmu.counter_bits = eax.split.bit_width;
947 counter_value_mask = (1ULL << eax.split.bit_width) - 1; 946 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
948 947
949 return 0; 948 return 0;
950} 949}
@@ -952,12 +951,6 @@ static int intel_pmu_init(void)
952static int amd_pmu_init(void) 951static int amd_pmu_init(void)
953{ 952{
954 x86_pmu = amd_pmu; 953 x86_pmu = amd_pmu;
955
956 nr_counters_generic = 4;
957 nr_counters_fixed = 0;
958 counter_value_mask = 0x0000FFFFFFFFFFFFULL;
959 counter_value_bits = 48;
960
961 pr_info("AMD Performance Monitoring support detected.\n"); 954 pr_info("AMD Performance Monitoring support detected.\n");
962 return 0; 955 return 0;
963} 956}
@@ -979,25 +972,26 @@ void __init init_hw_perf_counters(void)
979 if (err != 0) 972 if (err != 0)
980 return; 973 return;
981 974
982 pr_info("... num counters: %d\n", nr_counters_generic); 975 pr_info("... num counters: %d\n", x86_pmu.num_counters);
983 if (nr_counters_generic > X86_PMC_MAX_GENERIC) { 976 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
984 nr_counters_generic = X86_PMC_MAX_GENERIC; 977 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
985 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", 978 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
986 nr_counters_generic, X86_PMC_MAX_GENERIC); 979 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
987 } 980 }
988 perf_counter_mask = (1 << nr_counters_generic) - 1; 981 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
989 perf_max_counters = nr_counters_generic; 982 perf_max_counters = x86_pmu.num_counters;
990 983
991 pr_info("... value mask: %016Lx\n", counter_value_mask); 984 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
992 985
993 if (nr_counters_fixed > X86_PMC_MAX_FIXED) { 986 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
994 nr_counters_fixed = X86_PMC_MAX_FIXED; 987 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
995 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", 988 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
996 nr_counters_fixed, X86_PMC_MAX_FIXED); 989 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
997 } 990 }
998 pr_info("... fixed counters: %d\n", nr_counters_fixed); 991 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
999 992
1000 perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED; 993 perf_counter_mask |=
994 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1001 995
1002 pr_info("... counter mask: %016Lx\n", perf_counter_mask); 996 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
1003 perf_counters_initialized = true; 997 perf_counters_initialized = true;