aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:23 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:13 -0400
commitc619b8ffb1cec6a431687a35695dc6fd292a79e6 (patch)
tree5a4a05abe12d7df461f030571b7258e62ffed830
parent4b7bfd0d276da3a006d37e85d3cf900d7a14ae2a (diff)
perf_counter, x86: introduce max_period variable
In x86 pmus the allowed counter period to programm differs. This introduces a max_period value and allows the generic implementation for all models to check the max period. [ Impact: generalize code ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-27-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index a8a53abd706d..4b8715b34f87 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -54,6 +54,7 @@ struct x86_pmu {
54 int num_counters_fixed; 54 int num_counters_fixed;
55 int counter_bits; 55 int counter_bits;
56 u64 counter_mask; 56 u64 counter_mask;
57 u64 max_period;
57}; 58};
58 59
59static struct x86_pmu x86_pmu __read_mostly; 60static struct x86_pmu x86_pmu __read_mostly;
@@ -279,14 +280,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
279 hwc->nmi = 1; 280 hwc->nmi = 1;
280 281
281 hwc->irq_period = hw_event->irq_period; 282 hwc->irq_period = hw_event->irq_period;
282 /* 283 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period)
283 * Intel PMCs cannot be accessed sanely above 32 bit width, 284 hwc->irq_period = x86_pmu.max_period;
284 * so we install an artificial 1<<31 period regardless of
285 * the generic counter period:
286 */
287 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
288 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
289 hwc->irq_period = 0x7FFFFFFF;
290 285
291 atomic64_set(&hwc->period_left, hwc->irq_period); 286 atomic64_set(&hwc->period_left, hwc->irq_period);
292 287
@@ -910,6 +905,12 @@ static struct x86_pmu intel_pmu = {
910 .event_map = intel_pmu_event_map, 905 .event_map = intel_pmu_event_map,
911 .raw_event = intel_pmu_raw_event, 906 .raw_event = intel_pmu_raw_event,
912 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 907 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
908 /*
909 * Intel PMCs cannot be accessed sanely above 32 bit width,
910 * so we install an artificial 1<<31 period regardless of
911 * the generic counter period:
912 */
913 .max_period = (1ULL << 31) - 1,
913}; 914};
914 915
915static struct x86_pmu amd_pmu = { 916static struct x86_pmu amd_pmu = {
@@ -927,6 +928,8 @@ static struct x86_pmu amd_pmu = {
927 .num_counters = 4, 928 .num_counters = 4,
928 .counter_bits = 48, 929 .counter_bits = 48,
929 .counter_mask = (1ULL << 48) - 1, 930 .counter_mask = (1ULL << 48) - 1,
931 /* use highest bit to detect overflow */
932 .max_period = (1ULL << 47) - 1,
930}; 933};
931 934
932static int intel_pmu_init(void) 935static int intel_pmu_init(void)
@@ -999,6 +1002,7 @@ void __init init_hw_perf_counters(void)
999 perf_max_counters = x86_pmu.num_counters; 1002 perf_max_counters = x86_pmu.num_counters;
1000 1003
1001 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); 1004 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
1005 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1002 1006
1003 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 1007 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1004 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; 1008 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;