aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-30 13:07:02 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:36 -0400
commit925d519ab82b6dd7aca9420d809ee83819c08db2 (patch)
treeaa05bd7eb607915aa691d5434ec74521b487b466 /arch/x86/kernel/cpu
parent53cfbf593758916aac41db728f029986a62f1254 (diff)
perf_counter: unify and fix delayed counter wakeup
While going over the wakeup code I noticed delayed wakeups only work for hardware counters but basically all software counters rely on them. This patch unifies and generalizes the delayed wakeup to fix this issue. Since we're dealing with NMI context bits here, use a cmpxchg() based single link list implementation to track counters that have pending wakeups. [ This should really be generic code for delayed wakeups, but since we cannot use cmpxchg()/xchg() in generic code, I've let it live in the perf_counter code. -- Eric Dumazet could use it to aggregate the network wakeups. ] Furthermore, the x86 method of using TIF flags was flawed in that its quite possible to end up setting the bit on the idle task, loosing the wakeup. The powerpc method uses per-cpu storage and does appear to be sufficient. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090330171023.153932974@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c29
1 files changed, 0 insertions, 29 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 3f95b0cdc550..7aab177fb566 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -227,7 +227,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
227 */ 227 */
228 hwc->config |= pmc_ops->event_map(perf_event_id(hw_event)); 228 hwc->config |= pmc_ops->event_map(perf_event_id(hw_event));
229 } 229 }
230 counter->wakeup_pending = 0;
231 230
232 return 0; 231 return 0;
233} 232}
@@ -773,34 +772,6 @@ void smp_perf_counter_interrupt(struct pt_regs *regs)
773 irq_exit(); 772 irq_exit();
774} 773}
775 774
776/*
777 * This handler is triggered by NMI contexts:
778 */
779void perf_counter_notify(struct pt_regs *regs)
780{
781 struct cpu_hw_counters *cpuc;
782 unsigned long flags;
783 int bit, cpu;
784
785 local_irq_save(flags);
786 cpu = smp_processor_id();
787 cpuc = &per_cpu(cpu_hw_counters, cpu);
788
789 for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
790 struct perf_counter *counter = cpuc->counters[bit];
791
792 if (!counter)
793 continue;
794
795 if (counter->wakeup_pending) {
796 counter->wakeup_pending = 0;
797 wake_up(&counter->waitq);
798 }
799 }
800
801 local_irq_restore(flags);
802}
803
804void perf_counters_lapic_init(int nmi) 775void perf_counters_lapic_init(int nmi)
805{ 776{
806 u32 apic_val; 777 u32 apic_val;