aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKan Liang <kan.liang@linux.intel.com>2018-04-25 14:57:17 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-04-25 15:41:22 -0400
commit4e949e9b9d1e3edcdab3b54656c5851bd9e49c67 (patch)
tree13ba7875d137a83f1c330c8a7476d3267da50050
parent6d08b06e67cd117f6992c46611dfb4ce267cd71e (diff)
perf/x86/intel: Don't enable freeze-on-smi for PerfMon V1
The SMM freeze feature was introduced since PerfMon V2. But the current code unconditionally enables the feature for all platforms. It can generate #GP exception, if the related FREEZE_WHILE_SMM bit is set for the machine with PerfMon V1. To disable the feature for PerfMon V1, perf needs to - Remove the freeze_on_smi sysfs entry by moving intel_pmu_attrs to intel_pmu, which is only applied to PerfMon V2 and later. - Check the PerfMon version before flipping the SMM bit when starting CPU Fixes: 6089327f5424 ("perf/x86: Add sysfs entry to freeze counters on SMI") Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ak@linux.intel.com Cc: eranian@google.com Cc: acme@redhat.com Link: https://lkml.kernel.org/r/1524682637-63219-1-git-send-email-kan.liang@linux.intel.com
-rw-r--r--arch/x86/events/intel/core.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 607bf565a90c..707b2a96e516 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3339,7 +3339,8 @@ static void intel_pmu_cpu_starting(int cpu)
3339 3339
3340 cpuc->lbr_sel = NULL; 3340 cpuc->lbr_sel = NULL;
3341 3341
3342 flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 3342 if (x86_pmu.version > 1)
3343 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3343 3344
3344 if (!cpuc->shared_regs) 3345 if (!cpuc->shared_regs)
3345 return; 3346 return;
@@ -3502,6 +3503,8 @@ static __initconst const struct x86_pmu core_pmu = {
3502 .cpu_dying = intel_pmu_cpu_dying, 3503 .cpu_dying = intel_pmu_cpu_dying,
3503}; 3504};
3504 3505
3506static struct attribute *intel_pmu_attrs[];
3507
3505static __initconst const struct x86_pmu intel_pmu = { 3508static __initconst const struct x86_pmu intel_pmu = {
3506 .name = "Intel", 3509 .name = "Intel",
3507 .handle_irq = intel_pmu_handle_irq, 3510 .handle_irq = intel_pmu_handle_irq,
@@ -3533,6 +3536,8 @@ static __initconst const struct x86_pmu intel_pmu = {
3533 .format_attrs = intel_arch3_formats_attr, 3536 .format_attrs = intel_arch3_formats_attr,
3534 .events_sysfs_show = intel_event_sysfs_show, 3537 .events_sysfs_show = intel_event_sysfs_show,
3535 3538
3539 .attrs = intel_pmu_attrs,
3540
3536 .cpu_prepare = intel_pmu_cpu_prepare, 3541 .cpu_prepare = intel_pmu_cpu_prepare,
3537 .cpu_starting = intel_pmu_cpu_starting, 3542 .cpu_starting = intel_pmu_cpu_starting,
3538 .cpu_dying = intel_pmu_cpu_dying, 3543 .cpu_dying = intel_pmu_cpu_dying,
@@ -3911,8 +3916,6 @@ __init int intel_pmu_init(void)
3911 3916
3912 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); 3917 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
3913 3918
3914
3915 x86_pmu.attrs = intel_pmu_attrs;
3916 /* 3919 /*
3917 * Quirk: v2 perfmon does not report fixed-purpose events, so 3920 * Quirk: v2 perfmon does not report fixed-purpose events, so
3918 * assume at least 3 events, when not running in a hypervisor: 3921 * assume at least 3 events, when not running in a hypervisor: