aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-30 13:07:02 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:36 -0400
commit925d519ab82b6dd7aca9420d809ee83819c08db2 (patch)
treeaa05bd7eb607915aa691d5434ec74521b487b466 /include/linux/perf_counter.h
parent53cfbf593758916aac41db728f029986a62f1254 (diff)
perf_counter: unify and fix delayed counter wakeup
While going over the wakeup code I noticed delayed wakeups only work for hardware counters but basically all software counters rely on them. This patch unifies and generalizes the delayed wakeup to fix this issue. Since we're dealing with NMI context bits here, use a cmpxchg() based single link list implementation to track counters that have pending wakeups. [ This should really be generic code for delayed wakeups, but since we cannot use cmpxchg()/xchg() in generic code, I've let it live in the perf_counter code. -- Eric Dumazet could use it to aggregate the network wakeups. ] Furthermore, the x86 method of using TIF flags was flawed in that its quite possible to end up setting the bit on the idle task, loosing the wakeup. The powerpc method uses per-cpu storage and does appear to be sufficient. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090330171023.153932974@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h15
1 files changed, 10 insertions, 5 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 6bf67ce17625..0d833228eee5 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -275,6 +275,10 @@ struct perf_mmap_data {
275 void *data_pages[0]; 275 void *data_pages[0];
276}; 276};
277 277
278struct perf_wakeup_entry {
279 struct perf_wakeup_entry *next;
280};
281
278/** 282/**
279 * struct perf_counter - performance counter kernel representation: 283 * struct perf_counter - performance counter kernel representation:
280 */ 284 */
@@ -350,7 +354,7 @@ struct perf_counter {
350 /* poll related */ 354 /* poll related */
351 wait_queue_head_t waitq; 355 wait_queue_head_t waitq;
352 /* optional: for NMIs */ 356 /* optional: for NMIs */
353 int wakeup_pending; 357 struct perf_wakeup_entry wakeup;
354 358
355 void (*destroy)(struct perf_counter *); 359 void (*destroy)(struct perf_counter *);
356 struct rcu_head rcu_head; 360 struct rcu_head rcu_head;
@@ -427,7 +431,7 @@ extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
427extern void perf_counter_task_tick(struct task_struct *task, int cpu); 431extern void perf_counter_task_tick(struct task_struct *task, int cpu);
428extern void perf_counter_init_task(struct task_struct *child); 432extern void perf_counter_init_task(struct task_struct *child);
429extern void perf_counter_exit_task(struct task_struct *child); 433extern void perf_counter_exit_task(struct task_struct *child);
430extern void perf_counter_notify(struct pt_regs *regs); 434extern void perf_counter_do_pending(void);
431extern void perf_counter_print_debug(void); 435extern void perf_counter_print_debug(void);
432extern void perf_counter_unthrottle(void); 436extern void perf_counter_unthrottle(void);
433extern u64 hw_perf_save_disable(void); 437extern u64 hw_perf_save_disable(void);
@@ -461,7 +465,7 @@ static inline void
461perf_counter_task_tick(struct task_struct *task, int cpu) { } 465perf_counter_task_tick(struct task_struct *task, int cpu) { }
462static inline void perf_counter_init_task(struct task_struct *child) { } 466static inline void perf_counter_init_task(struct task_struct *child) { }
463static inline void perf_counter_exit_task(struct task_struct *child) { } 467static inline void perf_counter_exit_task(struct task_struct *child) { }
464static inline void perf_counter_notify(struct pt_regs *regs) { } 468static inline void perf_counter_do_pending(void) { }
465static inline void perf_counter_print_debug(void) { } 469static inline void perf_counter_print_debug(void) { }
466static inline void perf_counter_unthrottle(void) { } 470static inline void perf_counter_unthrottle(void) { }
467static inline void hw_perf_restore(u64 ctrl) { } 471static inline void hw_perf_restore(u64 ctrl) { }
@@ -469,8 +473,9 @@ static inline u64 hw_perf_save_disable(void) { return 0; }
469static inline int perf_counter_task_disable(void) { return -EINVAL; } 473static inline int perf_counter_task_disable(void) { return -EINVAL; }
470static inline int perf_counter_task_enable(void) { return -EINVAL; } 474static inline int perf_counter_task_enable(void) { return -EINVAL; }
471 475
472static inline void perf_swcounter_event(u32 event, u64 nr, 476static inline void
473 int nmi, struct pt_regs *regs) { } 477perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { }
478
474#endif 479#endif
475 480
476#endif /* __KERNEL__ */ 481#endif /* __KERNEL__ */