aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
2 files changed, 8 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index c2c1e10f7b0..4e218d7ac85 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -844,10 +844,10 @@ void hw_perf_enable(void)
844 x86_pmu.enable_all(added); 844 x86_pmu.enable_all(added);
845} 845}
846 846
847static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) 847static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
848 u64 enable_mask)
848{ 849{
849 wrmsrl(hwc->config_base + hwc->idx, 850 wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
850 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
851} 851}
852 852
853static inline void x86_pmu_disable_event(struct perf_event *event) 853static inline void x86_pmu_disable_event(struct perf_event *event)
@@ -919,7 +919,8 @@ static void x86_pmu_enable_event(struct perf_event *event)
919{ 919{
920 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 920 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
921 if (cpuc->enabled) 921 if (cpuc->enabled)
922 __x86_pmu_enable_event(&event->hw); 922 __x86_pmu_enable_event(&event->hw,
923 ARCH_PERFMON_EVENTSEL_ENABLE);
923} 924}
924 925
925/* 926/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index a099df96f91..a4b56ac425c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -513,7 +513,8 @@ static void intel_pmu_nhm_enable_all(int added)
513 if (!event) 513 if (!event)
514 continue; 514 continue;
515 515
516 __x86_pmu_enable_event(&event->hw); 516 __x86_pmu_enable_event(&event->hw,
517 ARCH_PERFMON_EVENTSEL_ENABLE);
517 } 518 }
518 } 519 }
519 intel_pmu_enable_all(added); 520 intel_pmu_enable_all(added);
@@ -617,7 +618,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
617 if (unlikely(event->attr.precise)) 618 if (unlikely(event->attr.precise))
618 intel_pmu_pebs_enable(event); 619 intel_pmu_pebs_enable(event);
619 620
620 __x86_pmu_enable_event(hwc); 621 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
621} 622}
622 623
623/* 624/*