aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2013-05-13 14:44:56 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-05-14 02:00:21 -0400
commitd52f2dc40b52201700001e868093c5ec827a8f33 (patch)
tree9675b6c3b36449a5718fb9067391f5cd382ff935 /arch/powerpc/perf
parenta1797b2fd2051515689fd54a9db8fd20ebd5e5f7 (diff)
powerpc/perf: Move BHRB code into CONFIG_PPC64 region
The new Branch History Rolling buffer (BHRB) code is only useful on 64bit processors, so move it into the #ifdef CONFIG_PPC64 region. This avoids code bloat on 32bit systems. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/core-book3s.c248
1 files changed, 127 insertions, 121 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index c627843c5b2e..843bb8be8380 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -100,6 +100,10 @@ static inline int siar_valid(struct pt_regs *regs)
100 return 1; 100 return 1;
101} 101}
102 102
103static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
104static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
105void power_pmu_flush_branch_stack(void) {}
106static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
103#endif /* CONFIG_PPC32 */ 107#endif /* CONFIG_PPC32 */
104 108
105static bool regs_use_siar(struct pt_regs *regs) 109static bool regs_use_siar(struct pt_regs *regs)
@@ -308,6 +312,129 @@ static inline int siar_valid(struct pt_regs *regs)
308 return 1; 312 return 1;
309} 313}
310 314
315
316/* Reset all possible BHRB entries */
317static void power_pmu_bhrb_reset(void)
318{
319 asm volatile(PPC_CLRBHRB);
320}
321
322static void power_pmu_bhrb_enable(struct perf_event *event)
323{
324 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
325
326 if (!ppmu->bhrb_nr)
327 return;
328
329 /* Clear BHRB if we changed task context to avoid data leaks */
330 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
331 power_pmu_bhrb_reset();
332 cpuhw->bhrb_context = event->ctx;
333 }
334 cpuhw->bhrb_users++;
335}
336
337static void power_pmu_bhrb_disable(struct perf_event *event)
338{
339 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
340
341 if (!ppmu->bhrb_nr)
342 return;
343
344 cpuhw->bhrb_users--;
345 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
346
347 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
348 /* BHRB cannot be turned off when other
349 * events are active on the PMU.
350 */
351
352 /* avoid stale pointer */
353 cpuhw->bhrb_context = NULL;
354 }
355}
356
357/* Called from ctxsw to prevent one process's branch entries to
358 * mingle with the other process's entries during context switch.
359 */
360void power_pmu_flush_branch_stack(void)
361{
362 if (ppmu->bhrb_nr)
363 power_pmu_bhrb_reset();
364}
365
366
367/* Processing BHRB entries */
368static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
369{
370 u64 val;
371 u64 addr;
372 int r_index, u_index, target, pred;
373
374 r_index = 0;
375 u_index = 0;
376 while (r_index < ppmu->bhrb_nr) {
377 /* Assembly read function */
378 val = read_bhrb(r_index);
379
380 /* Terminal marker: End of valid BHRB entries */
381 if (val == 0) {
382 break;
383 } else {
384 /* BHRB field break up */
385 addr = val & BHRB_EA;
386 pred = val & BHRB_PREDICTION;
387 target = val & BHRB_TARGET;
388
389 /* Probable Missed entry: Not applicable for POWER8 */
390 if ((addr == 0) && (target == 0) && (pred == 1)) {
391 r_index++;
392 continue;
393 }
394
395 /* Real Missed entry: Power8 based missed entry */
396 if ((addr == 0) && (target == 1) && (pred == 1)) {
397 r_index++;
398 continue;
399 }
400
401 /* Reserved condition: Not a valid entry */
402 if ((addr == 0) && (target == 1) && (pred == 0)) {
403 r_index++;
404 continue;
405 }
406
407 /* Is a target address */
408 if (val & BHRB_TARGET) {
409 /* First address cannot be a target address */
410 if (r_index == 0) {
411 r_index++;
412 continue;
413 }
414
415 /* Update target address for the previous entry */
416 cpuhw->bhrb_entries[u_index - 1].to = addr;
417 cpuhw->bhrb_entries[u_index - 1].mispred = pred;
418 cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
419
420 /* Dont increment u_index */
421 r_index++;
422 } else {
423 /* Update address, flags for current entry */
424 cpuhw->bhrb_entries[u_index].from = addr;
425 cpuhw->bhrb_entries[u_index].mispred = pred;
426 cpuhw->bhrb_entries[u_index].predicted = ~pred;
427
428 /* Successfully popullated one entry */
429 u_index++;
430 r_index++;
431 }
432 }
433 }
434 cpuhw->bhrb_stack.nr = u_index;
435 return;
436}
437
311#endif /* CONFIG_PPC64 */ 438#endif /* CONFIG_PPC64 */
312 439
313static void perf_event_interrupt(struct pt_regs *regs); 440static void perf_event_interrupt(struct pt_regs *regs);
@@ -904,47 +1031,6 @@ static int collect_events(struct perf_event *group, int max_count,
904 return n; 1031 return n;
905} 1032}
906 1033
907/* Reset all possible BHRB entries */
908static void power_pmu_bhrb_reset(void)
909{
910 asm volatile(PPC_CLRBHRB);
911}
912
913void power_pmu_bhrb_enable(struct perf_event *event)
914{
915 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
916
917 if (!ppmu->bhrb_nr)
918 return;
919
920 /* Clear BHRB if we changed task context to avoid data leaks */
921 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
922 power_pmu_bhrb_reset();
923 cpuhw->bhrb_context = event->ctx;
924 }
925 cpuhw->bhrb_users++;
926}
927
928void power_pmu_bhrb_disable(struct perf_event *event)
929{
930 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
931
932 if (!ppmu->bhrb_nr)
933 return;
934
935 cpuhw->bhrb_users--;
936 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
937
938 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
939 /* BHRB cannot be turned off when other
940 * events are active on the PMU.
941 */
942
943 /* avoid stale pointer */
944 cpuhw->bhrb_context = NULL;
945 }
946}
947
948/* 1034/*
949 * Add a event to the PMU. 1035 * Add a event to the PMU.
950 * If all events are not already frozen, then we disable and 1036 * If all events are not already frozen, then we disable and
@@ -1180,15 +1266,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
1180 return 0; 1266 return 0;
1181} 1267}
1182 1268
1183/* Called from ctxsw to prevent one process's branch entries to
1184 * mingle with the other process's entries during context switch.
1185 */
1186void power_pmu_flush_branch_stack(void)
1187{
1188 if (ppmu->bhrb_nr)
1189 power_pmu_bhrb_reset();
1190}
1191
1192/* 1269/*
1193 * Return 1 if we might be able to put event on a limited PMC, 1270 * Return 1 if we might be able to put event on a limited PMC,
1194 * or 0 if not. 1271 * or 0 if not.
@@ -1458,77 +1535,6 @@ struct pmu power_pmu = {
1458 .flush_branch_stack = power_pmu_flush_branch_stack, 1535 .flush_branch_stack = power_pmu_flush_branch_stack,
1459}; 1536};
1460 1537
1461/* Processing BHRB entries */
1462void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
1463{
1464 u64 val;
1465 u64 addr;
1466 int r_index, u_index, target, pred;
1467
1468 r_index = 0;
1469 u_index = 0;
1470 while (r_index < ppmu->bhrb_nr) {
1471 /* Assembly read function */
1472 val = read_bhrb(r_index);
1473
1474 /* Terminal marker: End of valid BHRB entries */
1475 if (val == 0) {
1476 break;
1477 } else {
1478 /* BHRB field break up */
1479 addr = val & BHRB_EA;
1480 pred = val & BHRB_PREDICTION;
1481 target = val & BHRB_TARGET;
1482
1483 /* Probable Missed entry: Not applicable for POWER8 */
1484 if ((addr == 0) && (target == 0) && (pred == 1)) {
1485 r_index++;
1486 continue;
1487 }
1488
1489 /* Real Missed entry: Power8 based missed entry */
1490 if ((addr == 0) && (target == 1) && (pred == 1)) {
1491 r_index++;
1492 continue;
1493 }
1494
1495 /* Reserved condition: Not a valid entry */
1496 if ((addr == 0) && (target == 1) && (pred == 0)) {
1497 r_index++;
1498 continue;
1499 }
1500
1501 /* Is a target address */
1502 if (val & BHRB_TARGET) {
1503 /* First address cannot be a target address */
1504 if (r_index == 0) {
1505 r_index++;
1506 continue;
1507 }
1508
1509 /* Update target address for the previous entry */
1510 cpuhw->bhrb_entries[u_index - 1].to = addr;
1511 cpuhw->bhrb_entries[u_index - 1].mispred = pred;
1512 cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
1513
1514 /* Dont increment u_index */
1515 r_index++;
1516 } else {
1517 /* Update address, flags for current entry */
1518 cpuhw->bhrb_entries[u_index].from = addr;
1519 cpuhw->bhrb_entries[u_index].mispred = pred;
1520 cpuhw->bhrb_entries[u_index].predicted = ~pred;
1521
1522 /* Successfully popullated one entry */
1523 u_index++;
1524 r_index++;
1525 }
1526 }
1527 }
1528 cpuhw->bhrb_stack.nr = u_index;
1529 return;
1530}
1531
1532/* 1538/*
1533 * A counter has overflowed; update its count and record 1539 * A counter has overflowed; update its count and record
1534 * things if requested. Note that interrupts are hard-disabled 1540 * things if requested. Note that interrupts are hard-disabled