aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_event.h23
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c21
2 files changed, 37 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 82db83b5c3bc..66fda0c26402 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -268,6 +268,29 @@ struct x86_pmu_quirk {
268 void (*func)(void); 268 void (*func)(void);
269}; 269};
270 270
271union x86_pmu_config {
272 struct {
273 u64 event:8,
274 umask:8,
275 usr:1,
276 os:1,
277 edge:1,
278 pc:1,
279 interrupt:1,
280 __reserved1:1,
281 en:1,
282 inv:1,
283 cmask:8,
284 event2:4,
285 __reserved2:4,
286 go:1,
287 ho:1;
288 } bits;
289 u64 value;
290};
291
292#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
293
271/* 294/*
272 * struct x86_pmu - generic x86 pmu 295 * struct x86_pmu - generic x86 pmu
273 */ 296 */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 61d4f79a550e..4bd9c9ef9d42 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1288,7 +1288,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
1288 * 1288 *
1289 * Thereby we gain a PEBS capable cycle counter. 1289 * Thereby we gain a PEBS capable cycle counter.
1290 */ 1290 */
1291 u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */ 1291 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1292
1292 1293
1293 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 1294 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1294 event->hw.config = alt_config; 1295 event->hw.config = alt_config;
@@ -1690,9 +1691,11 @@ __init int intel_pmu_init(void)
1690 x86_pmu.extra_regs = intel_nehalem_extra_regs; 1691 x86_pmu.extra_regs = intel_nehalem_extra_regs;
1691 1692
1692 /* UOPS_ISSUED.STALLED_CYCLES */ 1693 /* UOPS_ISSUED.STALLED_CYCLES */
1693 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; 1694 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1695 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1694 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 1696 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1695 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; 1697 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1698 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
1696 1699
1697 x86_add_quirk(intel_nehalem_quirk); 1700 x86_add_quirk(intel_nehalem_quirk);
1698 1701
@@ -1727,9 +1730,11 @@ __init int intel_pmu_init(void)
1727 x86_pmu.er_flags |= ERF_HAS_RSP_1; 1730 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1728 1731
1729 /* UOPS_ISSUED.STALLED_CYCLES */ 1732 /* UOPS_ISSUED.STALLED_CYCLES */
1730 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; 1733 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1734 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1731 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 1735 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1732 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; 1736 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1737 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
1733 1738
1734 pr_cont("Westmere events, "); 1739 pr_cont("Westmere events, ");
1735 break; 1740 break;
@@ -1750,9 +1755,11 @@ __init int intel_pmu_init(void)
1750 x86_pmu.er_flags |= ERF_NO_HT_SHARING; 1755 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
1751 1756
1752 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 1757 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1753 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; 1758 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1759 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1754 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ 1760 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1755 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; 1761 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1762 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
1756 1763
1757 pr_cont("SandyBridge events, "); 1764 pr_cont("SandyBridge events, ");
1758 break; 1765 break;