aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/perf_event.c')
-rw-r--r--arch/powerpc/kernel/perf_event.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index ab6f6beadb57..97e0ae414940 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
1269 return ip; 1269 return ip;
1270} 1270}
1271 1271
1272static bool pmc_overflow(unsigned long val)
1273{
1274 if ((int)val < 0)
1275 return true;
1276
1277 /*
1278 * Events on POWER7 can roll back if a speculative event doesn't
1279 * eventually complete. Unfortunately in some rare cases they will
1280 * raise a performance monitor exception. We need to catch this to
1281 * ensure we reset the PMC. In all cases the PMC will be 256 or less
1282 * cycles from overflow.
1283 *
1284 * We only do this if the first pass fails to find any overflowing
1285 * PMCs because a user might set a period of less than 256 and we
1286 * don't want to mistakenly reset them.
1287 */
1288 if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
1289 return true;
1290
1291 return false;
1292}
1293
1272/* 1294/*
1273 * Performance monitor interrupt stuff 1295 * Performance monitor interrupt stuff
1274 */ 1296 */
@@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1316 if (is_limited_pmc(i + 1)) 1338 if (is_limited_pmc(i + 1))
1317 continue; 1339 continue;
1318 val = read_pmc(i + 1); 1340 val = read_pmc(i + 1);
1319 if ((int)val < 0) 1341 if (pmc_overflow(val))
1320 write_pmc(i + 1, 0); 1342 write_pmc(i + 1, 0);
1321 } 1343 }
1322 } 1344 }