aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-30 13:07:02 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:36 -0400
commit925d519ab82b6dd7aca9420d809ee83819c08db2 (patch)
treeaa05bd7eb607915aa691d5434ec74521b487b466 /arch/powerpc/kernel/perf_counter.c
parent53cfbf593758916aac41db728f029986a62f1254 (diff)
perf_counter: unify and fix delayed counter wakeup
While going over the wakeup code I noticed delayed wakeups only work for hardware counters but basically all software counters rely on them. This patch unifies and generalizes the delayed wakeup to fix this issue. Since we're dealing with NMI context bits here, use a cmpxchg() based single link list implementation to track counters that have pending wakeups. [ This should really be generic code for delayed wakeups, but since we cannot use cmpxchg()/xchg() in generic code, I've let it live in the perf_counter code. -- Eric Dumazet could use it to aggregate the network wakeups. ] Furthermore, the x86 method of using TIF flags was flawed in that its quite possible to end up setting the bit on the idle task, loosing the wakeup. The powerpc method uses per-cpu storage and does appear to be sufficient. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090330171023.153932974@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r--arch/powerpc/kernel/perf_counter.c22
1 files changed, 2 insertions, 20 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index df007fe0cc0..cde720fc495 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -650,24 +650,6 @@ hw_perf_counter_init(struct perf_counter *counter)
650} 650}
651 651
652/* 652/*
653 * Handle wakeups.
654 */
655void perf_counter_do_pending(void)
656{
657 int i;
658 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
659 struct perf_counter *counter;
660
661 for (i = 0; i < cpuhw->n_counters; ++i) {
662 counter = cpuhw->counter[i];
663 if (counter && counter->wakeup_pending) {
664 counter->wakeup_pending = 0;
665 wake_up(&counter->waitq);
666 }
667 }
668}
669
670/*
671 * A counter has overflowed; update its count and record 653 * A counter has overflowed; update its count and record
672 * things if requested. Note that interrupts are hard-disabled 654 * things if requested. Note that interrupts are hard-disabled
673 * here so there is no possibility of being interrupted. 655 * here so there is no possibility of being interrupted.
@@ -720,7 +702,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
720 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); 702 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
721 struct perf_counter *counter; 703 struct perf_counter *counter;
722 long val; 704 long val;
723 int need_wakeup = 0, found = 0; 705 int found = 0;
724 706
725 for (i = 0; i < cpuhw->n_counters; ++i) { 707 for (i = 0; i < cpuhw->n_counters; ++i) {
726 counter = cpuhw->counter[i]; 708 counter = cpuhw->counter[i];
@@ -761,7 +743,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
761 * immediately; otherwise we'll have do the wakeup when interrupts 743 * immediately; otherwise we'll have do the wakeup when interrupts
762 * get soft-enabled. 744 * get soft-enabled.
763 */ 745 */
764 if (get_perf_counter_pending() && regs->softe) { 746 if (test_perf_counter_pending() && regs->softe) {
765 irq_enter(); 747 irq_enter();
766 clear_perf_counter_pending(); 748 clear_perf_counter_pending();
767 perf_counter_do_pending(); 749 perf_counter_do_pending();