diff options
author | Venkatesh Srinivas <venkateshs@google.com> | 2014-03-13 15:36:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-04-18 06:14:26 -0400 |
commit | 24223657806a0ebd0ae5c9caaf7b021091889cf2 (patch) | |
tree | 46f5e36797458f19f0fd1f53f6e0ac00de82f00a | |
parent | 6381c24cd6d5d6373620426ab0a96c80ed953e20 (diff) |
perf/x86/intel: Use rdmsrl_safe() when initializing RAPL PMU
CPUs which should support the RAPL counters according to
Family/Model/Stepping may still issue #GP when attempting to access
the RAPL MSRs. This may happen when Linux is running under KVM and
we are passing-through host F/M/S data, for example. Use rdmsrl_safe
to first access the RAPL_POWER_UNIT MSR; if this fails, do not
attempt to use this PMU.
Signed-off-by: Venkatesh Srinivas <venkateshs@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1394739386-22260-1-git-send-email-venkateshs@google.com
Cc: zheng.z.yan@intel.com
Cc: eranian@google.com
Cc: ak@linux.intel.com
Cc: linux-kernel@vger.kernel.org
[ The patch also silently fixes another bug: rapl_pmu_init() didn't handle the memory alloc failure case previously. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_rapl.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 4b9a9e9466bd..7c87424d4140 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -535,6 +535,7 @@ static int rapl_cpu_prepare(int cpu) | |||
535 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); | 535 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); |
536 | int phys_id = topology_physical_package_id(cpu); | 536 | int phys_id = topology_physical_package_id(cpu); |
537 | u64 ms; | 537 | u64 ms; |
538 | u64 msr_rapl_power_unit_bits; | ||
538 | 539 | ||
539 | if (pmu) | 540 | if (pmu) |
540 | return 0; | 541 | return 0; |
@@ -542,6 +543,9 @@ static int rapl_cpu_prepare(int cpu) | |||
542 | if (phys_id < 0) | 543 | if (phys_id < 0) |
543 | return -1; | 544 | return -1; |
544 | 545 | ||
546 | if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) | ||
547 | return -1; | ||
548 | |||
545 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); | 549 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); |
546 | if (!pmu) | 550 | if (!pmu) |
547 | return -1; | 551 | return -1; |
@@ -555,8 +559,7 @@ static int rapl_cpu_prepare(int cpu) | |||
555 | * | 559 | * |
556 | * we cache in local PMU instance | 560 | * we cache in local PMU instance |
557 | */ | 561 | */ |
558 | rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); | 562 | pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; |
559 | pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL; | ||
560 | pmu->pmu = &rapl_pmu_class; | 563 | pmu->pmu = &rapl_pmu_class; |
561 | 564 | ||
562 | /* | 565 | /* |
@@ -677,7 +680,9 @@ static int __init rapl_pmu_init(void) | |||
677 | cpu_notifier_register_begin(); | 680 | cpu_notifier_register_begin(); |
678 | 681 | ||
679 | for_each_online_cpu(cpu) { | 682 | for_each_online_cpu(cpu) { |
680 | rapl_cpu_prepare(cpu); | 683 | ret = rapl_cpu_prepare(cpu); |
684 | if (ret) | ||
685 | goto out; | ||
681 | rapl_cpu_init(cpu); | 686 | rapl_cpu_init(cpu); |
682 | } | 687 | } |
683 | 688 | ||
@@ -700,6 +705,7 @@ static int __init rapl_pmu_init(void) | |||
700 | hweight32(rapl_cntr_mask), | 705 | hweight32(rapl_cntr_mask), |
701 | ktime_to_ms(pmu->timer_interval)); | 706 | ktime_to_ms(pmu->timer_interval)); |
702 | 707 | ||
708 | out: | ||
703 | cpu_notifier_register_done(); | 709 | cpu_notifier_register_done(); |
704 | 710 | ||
705 | return 0; | 711 | return 0; |