aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-30 13:07:02 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:36 -0400
commit925d519ab82b6dd7aca9420d809ee83819c08db2 (patch)
treeaa05bd7eb607915aa691d5434ec74521b487b466 /arch/powerpc
parent53cfbf593758916aac41db728f029986a62f1254 (diff)
perf_counter: unify and fix delayed counter wakeup
While going over the wakeup code I noticed delayed wakeups only work for hardware counters but basically all software counters rely on them. This patch unifies and generalizes the delayed wakeup to fix this issue. Since we're dealing with NMI context bits here, use a cmpxchg() based single link list implementation to track counters that have pending wakeups. [ This should really be generic code for delayed wakeups, but since we cannot use cmpxchg()/xchg() in generic code, I've let it live in the perf_counter code. -- Eric Dumazet could use it to aggregate the network wakeups. ] Furthermore, the x86 method of using TIF flags was flawed in that its quite possible to end up setting the bit on the idle task, loosing the wakeup. The powerpc method uses per-cpu storage and does appear to be sufficient. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090330171023.153932974@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h4
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/perf_counter.c22
3 files changed, 5 insertions, 23 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index cb32d571c9c..20a44d0c9fd 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -132,7 +132,7 @@ static inline int irqs_disabled_flags(unsigned long flags)
132struct irq_chip; 132struct irq_chip;
133 133
134#ifdef CONFIG_PERF_COUNTERS 134#ifdef CONFIG_PERF_COUNTERS
135static inline unsigned long get_perf_counter_pending(void) 135static inline unsigned long test_perf_counter_pending(void)
136{ 136{
137 unsigned long x; 137 unsigned long x;
138 138
@@ -160,7 +160,7 @@ extern void perf_counter_do_pending(void);
160 160
161#else 161#else
162 162
163static inline unsigned long get_perf_counter_pending(void) 163static inline unsigned long test_perf_counter_pending(void)
164{ 164{
165 return 0; 165 return 0;
166} 166}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 469e9635ff0..2cd471f92fe 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -135,7 +135,7 @@ notrace void raw_local_irq_restore(unsigned long en)
135 iseries_handle_interrupts(); 135 iseries_handle_interrupts();
136 } 136 }
137 137
138 if (get_perf_counter_pending()) { 138 if (test_perf_counter_pending()) {
139 clear_perf_counter_pending(); 139 clear_perf_counter_pending();
140 perf_counter_do_pending(); 140 perf_counter_do_pending();
141 } 141 }
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index df007fe0cc0..cde720fc495 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -650,24 +650,6 @@ hw_perf_counter_init(struct perf_counter *counter)
650} 650}
651 651
652/* 652/*
653 * Handle wakeups.
654 */
655void perf_counter_do_pending(void)
656{
657 int i;
658 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
659 struct perf_counter *counter;
660
661 for (i = 0; i < cpuhw->n_counters; ++i) {
662 counter = cpuhw->counter[i];
663 if (counter && counter->wakeup_pending) {
664 counter->wakeup_pending = 0;
665 wake_up(&counter->waitq);
666 }
667 }
668}
669
670/*
671 * A counter has overflowed; update its count and record 653 * A counter has overflowed; update its count and record
672 * things if requested. Note that interrupts are hard-disabled 654 * things if requested. Note that interrupts are hard-disabled
673 * here so there is no possibility of being interrupted. 655 * here so there is no possibility of being interrupted.
@@ -720,7 +702,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
720 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); 702 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
721 struct perf_counter *counter; 703 struct perf_counter *counter;
722 long val; 704 long val;
723 int need_wakeup = 0, found = 0; 705 int found = 0;
724 706
725 for (i = 0; i < cpuhw->n_counters; ++i) { 707 for (i = 0; i < cpuhw->n_counters; ++i) {
726 counter = cpuhw->counter[i]; 708 counter = cpuhw->counter[i];
@@ -761,7 +743,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
761 * immediately; otherwise we'll have do the wakeup when interrupts 743 * immediately; otherwise we'll have do the wakeup when interrupts
762 * get soft-enabled. 744 * get soft-enabled.
763 */ 745 */
764 if (get_perf_counter_pending() && regs->softe) { 746 if (test_perf_counter_pending() && regs->softe) {
765 irq_enter(); 747 irq_enter();
766 clear_perf_counter_pending(); 748 clear_perf_counter_pending();
767 perf_counter_do_pending(); 749 perf_counter_do_pending();