aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-16 08:37:10 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:30 -0400
commita4eaf7f14675cb512d69f0c928055e73d0c6d252 (patch)
treee8a0f631fc28d4bd9becd2e9e2c71743c64ee3ec /arch/alpha/kernel
parentfa407f35e0298d841e4088f95a7f9cf6e725c6d5 (diff)
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with pmu::{add,del,start,stop}, all of which take a flags argument. The new interface extends the capability to stop a counter while keeping it scheduled on the PMU. We replace the throttled state with the generic stopped state. This also allows us to efficiently stop/start counters over certain code paths (like IRQ handlers). It also allows scheduling a counter without it starting, allowing for a generic frozen state (useful for rotating stopped counters). The stopped state is implemented in two different ways, depending on how the architecture implemented the throttled state: 1) We disable the counter: a) the pmu has per-counter enable bits, we flip that b) we program a NOP event, preserving the counter state 2) We store the counter state and ignore all read/overflow events Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/alpha/kernel')
-rw-r--r--arch/alpha/kernel/perf_event.c71
1 files changed, 53 insertions, 18 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 3e260731f8e6..380ef02d557a 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -307,7 +307,7 @@ again:
307 new_raw_count) != prev_raw_count) 307 new_raw_count) != prev_raw_count)
308 goto again; 308 goto again;
309 309
310 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; 310 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
311 311
312 /* It is possible on very rare occasions that the PMC has overflowed 312 /* It is possible on very rare occasions that the PMC has overflowed
313 * but the interrupt is yet to come. Detect and fix this situation. 313 * but the interrupt is yet to come. Detect and fix this situation.
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
402 struct hw_perf_event *hwc = &pe->hw; 402 struct hw_perf_event *hwc = &pe->hw;
403 int idx = hwc->idx; 403 int idx = hwc->idx;
404 404
405 if (cpuc->current_idx[j] != PMC_NO_INDEX) { 405 if (cpuc->current_idx[j] == PMC_NO_INDEX) {
406 cpuc->idx_mask |= (1<<cpuc->current_idx[j]); 406 alpha_perf_event_set_period(pe, hwc, idx);
407 continue; 407 cpuc->current_idx[j] = idx;
408 } 408 }
409 409
410 alpha_perf_event_set_period(pe, hwc, idx); 410 if (!(hwc->state & PERF_HES_STOPPED))
411 cpuc->current_idx[j] = idx; 411 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
412 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
413 } 412 }
414 cpuc->config = cpuc->event[0]->hw.config_base; 413 cpuc->config = cpuc->event[0]->hw.config_base;
415} 414}
@@ -420,7 +419,7 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
420 * - this function is called from outside this module via the pmu struct 419 * - this function is called from outside this module via the pmu struct
421 * returned from perf event initialisation. 420 * returned from perf event initialisation.
422 */ 421 */
423static int alpha_pmu_enable(struct perf_event *event) 422static int alpha_pmu_add(struct perf_event *event, int flags)
424{ 423{
425 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 424 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
426 int n0; 425 int n0;
@@ -455,6 +454,10 @@ static int alpha_pmu_enable(struct perf_event *event)
455 } 454 }
456 } 455 }
457 456
457 hwc->state = PERF_HES_UPTODATE;
458 if (!(flags & PERF_EF_START))
459 hwc->state |= PERF_HES_STOPPED;
460
458 local_irq_restore(flags); 461 local_irq_restore(flags);
459 perf_pmu_enable(event->pmu); 462 perf_pmu_enable(event->pmu);
460 463
@@ -467,7 +470,7 @@ static int alpha_pmu_enable(struct perf_event *event)
467 * - this function is called from outside this module via the pmu struct 470 * - this function is called from outside this module via the pmu struct
468 * returned from perf event initialisation. 471 * returned from perf event initialisation.
469 */ 472 */
470static void alpha_pmu_disable(struct perf_event *event) 473static void alpha_pmu_del(struct perf_event *event, int flags)
471{ 474{
472 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 475 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
473 struct hw_perf_event *hwc = &event->hw; 476 struct hw_perf_event *hwc = &event->hw;
@@ -514,13 +517,44 @@ static void alpha_pmu_read(struct perf_event *event)
514} 517}
515 518
516 519
517static void alpha_pmu_unthrottle(struct perf_event *event) 520static void alpha_pmu_stop(struct perf_event *event, int flags)
518{ 521{
519 struct hw_perf_event *hwc = &event->hw; 522 struct hw_perf_event *hwc = &event->hw;
520 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 523 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
521 524
525 if (!(hwc->state & PERF_HES_STOPPED)) {
526 cpuc->idx_mask &= !(1UL<<hwc->idx);
527 hwc->state |= PERF_HES_STOPPED;
528 }
529
530 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
531 alpha_perf_event_update(event, hwc, hwc->idx, 0);
532 hwc->state |= PERF_HES_UPTODATE;
533 }
534
535 if (cpuc->enabled)
536 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
537}
538
539
540static void alpha_pmu_start(struct perf_event *event, int flags)
541{
542 struct hw_perf_event *hwc = &event->hw;
543 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
544
545 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
546 return;
547
548 if (flags & PERF_EF_RELOAD) {
549 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
550 alpha_perf_event_set_period(event, hwc, hwc->idx);
551 }
552
553 hwc->state = 0;
554
522 cpuc->idx_mask |= 1UL<<hwc->idx; 555 cpuc->idx_mask |= 1UL<<hwc->idx;
523 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); 556 if (cpuc->enabled)
557 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
524} 558}
525 559
526 560
@@ -671,7 +705,7 @@ static int alpha_pmu_event_init(struct perf_event *event)
671/* 705/*
672 * Main entry point - enable HW performance counters. 706 * Main entry point - enable HW performance counters.
673 */ 707 */
674static void alpha_pmu_pmu_enable(struct pmu *pmu) 708static void alpha_pmu_enable(struct pmu *pmu)
675{ 709{
676 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 710 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
677 711
@@ -697,7 +731,7 @@ static void alpha_pmu_pmu_enable(struct pmu *pmu)
697 * Main entry point - disable HW performance counters. 731 * Main entry point - disable HW performance counters.
698 */ 732 */
699 733
700static void alpha_pmu_pmu_disable(struct pmu *pmu) 734static void alpha_pmu_disable(struct pmu *pmu)
701{ 735{
702 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 736 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
703 737
@@ -711,13 +745,14 @@ static void alpha_pmu_pmu_disable(struct pmu *pmu)
711} 745}
712 746
713static struct pmu pmu = { 747static struct pmu pmu = {
714 .pmu_enable = alpha_pmu_pmu_enable, 748 .pmu_enable = alpha_pmu_enable,
715 .pmu_disable = alpha_pmu_pmu_disable, 749 .pmu_disable = alpha_pmu_disable,
716 .event_init = alpha_pmu_event_init, 750 .event_init = alpha_pmu_event_init,
717 .enable = alpha_pmu_enable, 751 .add = alpha_pmu_add,
718 .disable = alpha_pmu_disable, 752 .del = alpha_pmu_del,
753 .start = alpha_pmu_start,
754 .stop = alpha_pmu_stop,
719 .read = alpha_pmu_read, 755 .read = alpha_pmu_read,
720 .unthrottle = alpha_pmu_unthrottle,
721}; 756};
722 757
723 758