aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKan Liang <kan.liang@intel.com>2015-04-21 05:34:41 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-08 05:59:41 -0400
commit6d374056354a742eed4d0050498101e56e794c4b (patch)
treef03f738d250802781de0f3f84fc297e3b7694bf4 /arch/x86
parent8b10c5e2b59ef2a80a07ab594a3b4987a4676211 (diff)
perf/x86/intel: Fix SLM cache event list
iTLB-load-misses and LLC-load-misses count incorrectly on SLM. There is no ITLB.MISSES support on SLM. Event PAGE_WALKS.I_SIDE_WALK should be used to count iTLB-load-misses. This event counts when an instruction (I) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks. DMND_DATA_RD counts both demand and DCU prefetch data reads. However, LLC-load-misses should only count demand reads. There is no way to not include prefetches with a single counter on SLM. So the LLC-load-misses support should be removed on SLM. Signed-off-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1429608881-5055-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 960e85de13fb..3998131d1a68 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1134,7 +1134,7 @@ static __initconst const u64 slm_hw_cache_extra_regs
1134 [ C(LL ) ] = { 1134 [ C(LL ) ] = {
1135 [ C(OP_READ) ] = { 1135 [ C(OP_READ) ] = {
1136 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, 1136 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1137 [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS, 1137 [ C(RESULT_MISS) ] = 0,
1138 }, 1138 },
1139 [ C(OP_WRITE) ] = { 1139 [ C(OP_WRITE) ] = {
1140 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, 1140 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
@@ -1184,8 +1184,7 @@ static __initconst const u64 slm_hw_cache_event_ids
1184 [ C(OP_READ) ] = { 1184 [ C(OP_READ) ] = {
1185 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1185 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1186 [ C(RESULT_ACCESS) ] = 0x01b7, 1186 [ C(RESULT_ACCESS) ] = 0x01b7,
1187 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1187 [ C(RESULT_MISS) ] = 0,
1188 [ C(RESULT_MISS) ] = 0x01b7,
1189 }, 1188 },
1190 [ C(OP_WRITE) ] = { 1189 [ C(OP_WRITE) ] = {
1191 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1190 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
@@ -1217,7 +1216,7 @@ static __initconst const u64 slm_hw_cache_event_ids
1217 [ C(ITLB) ] = { 1216 [ C(ITLB) ] = {
1218 [ C(OP_READ) ] = { 1217 [ C(OP_READ) ] = {
1219 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1218 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1220 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ 1219 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1221 }, 1220 },
1222 [ C(OP_WRITE) ] = { 1221 [ C(OP_WRITE) ] = {
1223 [ C(RESULT_ACCESS) ] = -1, 1222 [ C(RESULT_ACCESS) ] = -1,