aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_intel.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-03-12 07:44:35 -0400
committerIngo Molnar <mingo@elte.hu>2012-03-12 15:44:54 -0400
commitf9b4eeb809c6d031cc9561cc34dd691701cb2c2a (patch)
tree3c0c5710faa38d4ba446c0ccdd473010e1f1262b /arch/x86/kernel/cpu/perf_event_intel.c
parent35239e23c66f1614c76739b62a299c3c92d6eb68 (diff)
perf/x86: Prettify pmu config literals
I got somewhat tired of having to decode hex numbers.. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Robert Richter <robert.richter@amd.com> Link: http://lkml.kernel.org/n/tip-0vsy1sgywc4uar3mu1szm0rg@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 61d4f79a550e..4bd9c9ef9d42 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1288,7 +1288,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
1288 * 1288 *
1289 * Thereby we gain a PEBS capable cycle counter. 1289 * Thereby we gain a PEBS capable cycle counter.
1290 */ 1290 */
1291 u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */ 1291 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1292
1292 1293
1293 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 1294 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1294 event->hw.config = alt_config; 1295 event->hw.config = alt_config;
@@ -1690,9 +1691,11 @@ __init int intel_pmu_init(void)
1690 x86_pmu.extra_regs = intel_nehalem_extra_regs; 1691 x86_pmu.extra_regs = intel_nehalem_extra_regs;
1691 1692
1692 /* UOPS_ISSUED.STALLED_CYCLES */ 1693 /* UOPS_ISSUED.STALLED_CYCLES */
1693 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; 1694 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1695 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1694 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 1696 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1695 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; 1697 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1698 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
1696 1699
1697 x86_add_quirk(intel_nehalem_quirk); 1700 x86_add_quirk(intel_nehalem_quirk);
1698 1701
@@ -1727,9 +1730,11 @@ __init int intel_pmu_init(void)
1727 x86_pmu.er_flags |= ERF_HAS_RSP_1; 1730 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1728 1731
1729 /* UOPS_ISSUED.STALLED_CYCLES */ 1732 /* UOPS_ISSUED.STALLED_CYCLES */
1730 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; 1733 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1734 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1731 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 1735 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1732 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; 1736 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1737 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
1733 1738
1734 pr_cont("Westmere events, "); 1739 pr_cont("Westmere events, ");
1735 break; 1740 break;
@@ -1750,9 +1755,11 @@ __init int intel_pmu_init(void)
1750 x86_pmu.er_flags |= ERF_NO_HT_SHARING; 1755 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
1751 1756
1752 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 1757 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1753 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; 1758 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1759 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
1754 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ 1760 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1755 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; 1761 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1762 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
1756 1763
1757 pr_cont("SandyBridge events, "); 1764 pr_cont("SandyBridge events, ");
1758 break; 1765 break;