diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-14 02:49:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:29 -0400 |
commit | 33696fc0d141bbbcb12f75b69608ea83282e3117 (patch) | |
tree | 72e08dba377d57eb7dd8c08a937a6de10e8af9c4 /arch/alpha/kernel/perf_event.c | |
parent | 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff) |
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable().
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/alpha/kernel/perf_event.c')
-rw-r--r-- | arch/alpha/kernel/perf_event.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 19660b5c298f..3e260731f8e6 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -435,7 +435,7 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
435 | * nevertheless we disable the PMCs first to enable a potential | 435 | * nevertheless we disable the PMCs first to enable a potential |
436 | * final PMI to occur before we disable interrupts. | 436 | * final PMI to occur before we disable interrupts. |
437 | */ | 437 | */ |
438 | perf_disable(); | 438 | perf_pmu_disable(event->pmu); |
439 | local_irq_save(flags); | 439 | local_irq_save(flags); |
440 | 440 | ||
441 | /* Default to error to be returned */ | 441 | /* Default to error to be returned */ |
@@ -456,7 +456,7 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
456 | } | 456 | } |
457 | 457 | ||
458 | local_irq_restore(flags); | 458 | local_irq_restore(flags); |
459 | perf_enable(); | 459 | perf_pmu_enable(event->pmu); |
460 | 460 | ||
461 | return ret; | 461 | return ret; |
462 | } | 462 | } |
@@ -474,7 +474,7 @@ static void alpha_pmu_disable(struct perf_event *event) | |||
474 | unsigned long flags; | 474 | unsigned long flags; |
475 | int j; | 475 | int j; |
476 | 476 | ||
477 | perf_disable(); | 477 | perf_pmu_disable(event->pmu); |
478 | local_irq_save(flags); | 478 | local_irq_save(flags); |
479 | 479 | ||
480 | for (j = 0; j < cpuc->n_events; j++) { | 480 | for (j = 0; j < cpuc->n_events; j++) { |
@@ -502,7 +502,7 @@ static void alpha_pmu_disable(struct perf_event *event) | |||
502 | } | 502 | } |
503 | 503 | ||
504 | local_irq_restore(flags); | 504 | local_irq_restore(flags); |
505 | perf_enable(); | 505 | perf_pmu_enable(event->pmu); |
506 | } | 506 | } |
507 | 507 | ||
508 | 508 | ||
@@ -668,18 +668,10 @@ static int alpha_pmu_event_init(struct perf_event *event) | |||
668 | return err; | 668 | return err; |
669 | } | 669 | } |
670 | 670 | ||
671 | static struct pmu pmu = { | ||
672 | .event_init = alpha_pmu_event_init, | ||
673 | .enable = alpha_pmu_enable, | ||
674 | .disable = alpha_pmu_disable, | ||
675 | .read = alpha_pmu_read, | ||
676 | .unthrottle = alpha_pmu_unthrottle, | ||
677 | }; | ||
678 | |||
679 | /* | 671 | /* |
680 | * Main entry point - enable HW performance counters. | 672 | * Main entry point - enable HW performance counters. |
681 | */ | 673 | */ |
682 | void hw_perf_enable(void) | 674 | static void alpha_pmu_pmu_enable(struct pmu *pmu) |
683 | { | 675 | { |
684 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 676 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
685 | 677 | ||
@@ -705,7 +697,7 @@ void hw_perf_enable(void) | |||
705 | * Main entry point - disable HW performance counters. | 697 | * Main entry point - disable HW performance counters. |
706 | */ | 698 | */ |
707 | 699 | ||
708 | void hw_perf_disable(void) | 700 | static void alpha_pmu_pmu_disable(struct pmu *pmu) |
709 | { | 701 | { |
710 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 702 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
711 | 703 | ||
@@ -718,6 +710,16 @@ void hw_perf_disable(void) | |||
718 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 710 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
719 | } | 711 | } |
720 | 712 | ||
713 | static struct pmu pmu = { | ||
714 | .pmu_enable = alpha_pmu_pmu_enable, | ||
715 | .pmu_disable = alpha_pmu_pmu_disable, | ||
716 | .event_init = alpha_pmu_event_init, | ||
717 | .enable = alpha_pmu_enable, | ||
718 | .disable = alpha_pmu_disable, | ||
719 | .read = alpha_pmu_read, | ||
720 | .unthrottle = alpha_pmu_unthrottle, | ||
721 | }; | ||
722 | |||
721 | 723 | ||
722 | /* | 724 | /* |
723 | * Main entry point - don't know when this is called but it | 725 | * Main entry point - don't know when this is called but it |