diff options
-rw-r--r-- | arch/powerpc/perf/core-book3s.c | 83 |
1 files changed, 54 insertions, 29 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index aa2465e21f1a..53fc7b8e5d9a 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -1412,11 +1412,8 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) | |||
1412 | return regs->nip; | 1412 | return regs->nip; |
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | static bool pmc_overflow(unsigned long val) | 1415 | static bool pmc_overflow_power7(unsigned long val) |
1416 | { | 1416 | { |
1417 | if ((int)val < 0) | ||
1418 | return true; | ||
1419 | |||
1420 | /* | 1417 | /* |
1421 | * Events on POWER7 can roll back if a speculative event doesn't | 1418 | * Events on POWER7 can roll back if a speculative event doesn't |
1422 | * eventually complete. Unfortunately in some rare cases they will | 1419 | * eventually complete. Unfortunately in some rare cases they will |
@@ -1428,7 +1425,15 @@ static bool pmc_overflow(unsigned long val) | |||
1428 | * PMCs because a user might set a period of less than 256 and we | 1425 | * PMCs because a user might set a period of less than 256 and we |
1429 | * don't want to mistakenly reset them. | 1426 | * don't want to mistakenly reset them. |
1430 | */ | 1427 | */ |
1431 | if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256)) | 1428 | if ((0x80000000 - val) <= 256) |
1429 | return true; | ||
1430 | |||
1431 | return false; | ||
1432 | } | ||
1433 | |||
1434 | static bool pmc_overflow(unsigned long val) | ||
1435 | { | ||
1436 | if ((int)val < 0) | ||
1432 | return true; | 1437 | return true; |
1433 | 1438 | ||
1434 | return false; | 1439 | return false; |
@@ -1439,11 +1444,11 @@ static bool pmc_overflow(unsigned long val) | |||
1439 | */ | 1444 | */ |
1440 | static void perf_event_interrupt(struct pt_regs *regs) | 1445 | static void perf_event_interrupt(struct pt_regs *regs) |
1441 | { | 1446 | { |
1442 | int i; | 1447 | int i, j; |
1443 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1448 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1444 | struct perf_event *event; | 1449 | struct perf_event *event; |
1445 | unsigned long val; | 1450 | unsigned long val[8]; |
1446 | int found = 0; | 1451 | int found, active; |
1447 | int nmi; | 1452 | int nmi; |
1448 | 1453 | ||
1449 | if (cpuhw->n_limited) | 1454 | if (cpuhw->n_limited) |
@@ -1458,33 +1463,53 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
1458 | else | 1463 | else |
1459 | irq_enter(); | 1464 | irq_enter(); |
1460 | 1465 | ||
1461 | for (i = 0; i < cpuhw->n_events; ++i) { | 1466 | /* Read all the PMCs since we'll need them a bunch of times */ |
1462 | event = cpuhw->event[i]; | 1467 | for (i = 0; i < ppmu->n_counter; ++i) |
1463 | if (!event->hw.idx || is_limited_pmc(event->hw.idx)) | 1468 | val[i] = read_pmc(i + 1); |
1469 | |||
1470 | /* Try to find what caused the IRQ */ | ||
1471 | found = 0; | ||
1472 | for (i = 0; i < ppmu->n_counter; ++i) { | ||
1473 | if (!pmc_overflow(val[i])) | ||
1464 | continue; | 1474 | continue; |
1465 | val = read_pmc(event->hw.idx); | 1475 | if (is_limited_pmc(i + 1)) |
1466 | if ((int)val < 0) { | 1476 | continue; /* these won't generate IRQs */ |
1467 | /* event has overflowed */ | 1477 | /* |
1468 | found = 1; | 1478 | * We've found one that's overflowed. For active |
1469 | record_and_restart(event, val, regs); | 1479 | * counters we need to log this. For inactive |
1480 | * counters, we need to reset it anyway | ||
1481 | */ | ||
1482 | found = 1; | ||
1483 | active = 0; | ||
1484 | for (j = 0; j < cpuhw->n_events; ++j) { | ||
1485 | event = cpuhw->event[j]; | ||
1486 | if (event->hw.idx == (i + 1)) { | ||
1487 | active = 1; | ||
1488 | record_and_restart(event, val[i], regs); | ||
1489 | break; | ||
1490 | } | ||
1470 | } | 1491 | } |
1492 | if (!active) | ||
1493 | /* reset non active counters that have overflowed */ | ||
1494 | write_pmc(i + 1, 0); | ||
1471 | } | 1495 | } |
1472 | 1496 | if (!found && pvr_version_is(PVR_POWER7)) { | |
1473 | /* | 1497 | /* check active counters for special buggy p7 overflow */ |
1474 | * In case we didn't find and reset the event that caused | 1498 | for (i = 0; i < cpuhw->n_events; ++i) { |
1475 | * the interrupt, scan all events and reset any that are | 1499 | event = cpuhw->event[i]; |
1476 | * negative, to avoid getting continual interrupts. | 1500 | if (!event->hw.idx || is_limited_pmc(event->hw.idx)) |
1477 | * Any that we processed in the previous loop will not be negative. | ||
1478 | */ | ||
1479 | if (!found) { | ||
1480 | for (i = 0; i < ppmu->n_counter; ++i) { | ||
1481 | if (is_limited_pmc(i + 1)) | ||
1482 | continue; | 1501 | continue; |
1483 | val = read_pmc(i + 1); | 1502 | if (pmc_overflow_power7(val[event->hw.idx - 1])) { |
1484 | if (pmc_overflow(val)) | 1503 | /* event has overflowed in a buggy way*/ |
1485 | write_pmc(i + 1, 0); | 1504 | found = 1; |
1505 | record_and_restart(event, | ||
1506 | val[event->hw.idx - 1], | ||
1507 | regs); | ||
1508 | } | ||
1486 | } | 1509 | } |
1487 | } | 1510 | } |
1511 | if ((!found) && printk_ratelimit()) | ||
1512 | printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); | ||
1488 | 1513 | ||
1489 | /* | 1514 | /* |
1490 | * Reset MMCR0 to its normal value. This will set PMXE and | 1515 | * Reset MMCR0 to its normal value. This will set PMXE and |