aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-13 10:21:38 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 03:47:02 -0400
commit9e35ad388bea89f7d6f375af4c0ae98803688666 (patch)
tree9abbce9f6c9a914b1ea8d8dae82e159366030e4a /arch/powerpc
parent962bf7a66edca4d36a730a38ff8410a67f560e40 (diff)
perf_counter: Rework the perf counter disable/enable
The current disable/enable mechanism is: token = hw_perf_save_disable(); ... /* do bits */ ... hw_perf_restore(token); This works well, provided that the use nests properly. Except we don't. x86 NMI/INT throttling has non-nested use of this, breaking things. Therefore provide a reference counter disable/enable interface, where the first disable disables the hardware, and the last enable enables the hardware again. [ Impact: refactor, simplify the PMU disable/enable logic ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/perf_counter.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 15cdc8e67229..bb1b463c1361 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -386,7 +386,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
386 * Disable all counters to prevent PMU interrupts and to allow 386 * Disable all counters to prevent PMU interrupts and to allow
387 * counters to be added or removed. 387 * counters to be added or removed.
388 */ 388 */
389u64 hw_perf_save_disable(void) 389void hw_perf_disable(void)
390{ 390{
391 struct cpu_hw_counters *cpuhw; 391 struct cpu_hw_counters *cpuhw;
392 unsigned long ret; 392 unsigned long ret;
@@ -428,7 +428,6 @@ u64 hw_perf_save_disable(void)
428 mb(); 428 mb();
429 } 429 }
430 local_irq_restore(flags); 430 local_irq_restore(flags);
431 return ret;
432} 431}
433 432
434/* 433/*
@@ -436,7 +435,7 @@ u64 hw_perf_save_disable(void)
436 * If we were previously disabled and counters were added, then 435 * If we were previously disabled and counters were added, then
437 * put the new config on the PMU. 436 * put the new config on the PMU.
438 */ 437 */
439void hw_perf_restore(u64 disable) 438void hw_perf_enable(void)
440{ 439{
441 struct perf_counter *counter; 440 struct perf_counter *counter;
442 struct cpu_hw_counters *cpuhw; 441 struct cpu_hw_counters *cpuhw;
@@ -448,9 +447,12 @@ void hw_perf_restore(u64 disable)
448 int n_lim; 447 int n_lim;
449 int idx; 448 int idx;
450 449
451 if (disable)
452 return;
453 local_irq_save(flags); 450 local_irq_save(flags);
451 if (!cpuhw->disabled) {
452 local_irq_restore(flags);
453 return;
454 }
455
454 cpuhw = &__get_cpu_var(cpu_hw_counters); 456 cpuhw = &__get_cpu_var(cpu_hw_counters);
455 cpuhw->disabled = 0; 457 cpuhw->disabled = 0;
456 458
@@ -649,19 +651,18 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
649/* 651/*
650 * Add a counter to the PMU. 652 * Add a counter to the PMU.
651 * If all counters are not already frozen, then we disable and 653 * If all counters are not already frozen, then we disable and
652 * re-enable the PMU in order to get hw_perf_restore to do the 654 * re-enable the PMU in order to get hw_perf_enable to do the
653 * actual work of reconfiguring the PMU. 655 * actual work of reconfiguring the PMU.
654 */ 656 */
655static int power_pmu_enable(struct perf_counter *counter) 657static int power_pmu_enable(struct perf_counter *counter)
656{ 658{
657 struct cpu_hw_counters *cpuhw; 659 struct cpu_hw_counters *cpuhw;
658 unsigned long flags; 660 unsigned long flags;
659 u64 pmudis;
660 int n0; 661 int n0;
661 int ret = -EAGAIN; 662 int ret = -EAGAIN;
662 663
663 local_irq_save(flags); 664 local_irq_save(flags);
664 pmudis = hw_perf_save_disable(); 665 perf_disable();
665 666
666 /* 667 /*
667 * Add the counter to the list (if there is room) 668 * Add the counter to the list (if there is room)
@@ -685,7 +686,7 @@ static int power_pmu_enable(struct perf_counter *counter)
685 686
686 ret = 0; 687 ret = 0;
687 out: 688 out:
688 hw_perf_restore(pmudis); 689 perf_enable();
689 local_irq_restore(flags); 690 local_irq_restore(flags);
690 return ret; 691 return ret;
691} 692}
@@ -697,11 +698,10 @@ static void power_pmu_disable(struct perf_counter *counter)
697{ 698{
698 struct cpu_hw_counters *cpuhw; 699 struct cpu_hw_counters *cpuhw;
699 long i; 700 long i;
700 u64 pmudis;
701 unsigned long flags; 701 unsigned long flags;
702 702
703 local_irq_save(flags); 703 local_irq_save(flags);
704 pmudis = hw_perf_save_disable(); 704 perf_disable();
705 705
706 power_pmu_read(counter); 706 power_pmu_read(counter);
707 707
@@ -735,7 +735,7 @@ static void power_pmu_disable(struct perf_counter *counter)
735 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); 735 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
736 } 736 }
737 737
738 hw_perf_restore(pmudis); 738 perf_enable();
739 local_irq_restore(flags); 739 local_irq_restore(flags);
740} 740}
741 741