diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-03-30 13:07:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:30:36 -0400 |
commit | 925d519ab82b6dd7aca9420d809ee83819c08db2 (patch) | |
tree | aa05bd7eb607915aa691d5434ec74521b487b466 /include/linux/perf_counter.h | |
parent | 53cfbf593758916aac41db728f029986a62f1254 (diff) |
perf_counter: unify and fix delayed counter wakeup
While going over the wakeup code I noticed delayed wakeups only work
for hardware counters but basically all software counters rely on
them.
This patch unifies and generalizes the delayed wakeup to fix this
issue.
Since we're dealing with NMI context bits here, use a cmpxchg() based
single link list implementation to track counters that have pending
wakeups.
[ This should really be generic code for delayed wakeups, but since we
cannot use cmpxchg()/xchg() in generic code, I've let it live in the
perf_counter code. -- Eric Dumazet could use it to aggregate the
network wakeups. ]
Furthermore, the x86 method of using TIF flags was flawed in that its
quite possible to end up setting the bit on the idle task, loosing the
wakeup.
The powerpc method uses per-cpu storage and does appear to be
sufficient.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Orig-LKML-Reference: <20090330171023.153932974@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r-- | include/linux/perf_counter.h | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 6bf67ce17625..0d833228eee5 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -275,6 +275,10 @@ struct perf_mmap_data { | |||
275 | void *data_pages[0]; | 275 | void *data_pages[0]; |
276 | }; | 276 | }; |
277 | 277 | ||
278 | struct perf_wakeup_entry { | ||
279 | struct perf_wakeup_entry *next; | ||
280 | }; | ||
281 | |||
278 | /** | 282 | /** |
279 | * struct perf_counter - performance counter kernel representation: | 283 | * struct perf_counter - performance counter kernel representation: |
280 | */ | 284 | */ |
@@ -350,7 +354,7 @@ struct perf_counter { | |||
350 | /* poll related */ | 354 | /* poll related */ |
351 | wait_queue_head_t waitq; | 355 | wait_queue_head_t waitq; |
352 | /* optional: for NMIs */ | 356 | /* optional: for NMIs */ |
353 | int wakeup_pending; | 357 | struct perf_wakeup_entry wakeup; |
354 | 358 | ||
355 | void (*destroy)(struct perf_counter *); | 359 | void (*destroy)(struct perf_counter *); |
356 | struct rcu_head rcu_head; | 360 | struct rcu_head rcu_head; |
@@ -427,7 +431,7 @@ extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); | |||
427 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | 431 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); |
428 | extern void perf_counter_init_task(struct task_struct *child); | 432 | extern void perf_counter_init_task(struct task_struct *child); |
429 | extern void perf_counter_exit_task(struct task_struct *child); | 433 | extern void perf_counter_exit_task(struct task_struct *child); |
430 | extern void perf_counter_notify(struct pt_regs *regs); | 434 | extern void perf_counter_do_pending(void); |
431 | extern void perf_counter_print_debug(void); | 435 | extern void perf_counter_print_debug(void); |
432 | extern void perf_counter_unthrottle(void); | 436 | extern void perf_counter_unthrottle(void); |
433 | extern u64 hw_perf_save_disable(void); | 437 | extern u64 hw_perf_save_disable(void); |
@@ -461,7 +465,7 @@ static inline void | |||
461 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | 465 | perf_counter_task_tick(struct task_struct *task, int cpu) { } |
462 | static inline void perf_counter_init_task(struct task_struct *child) { } | 466 | static inline void perf_counter_init_task(struct task_struct *child) { } |
463 | static inline void perf_counter_exit_task(struct task_struct *child) { } | 467 | static inline void perf_counter_exit_task(struct task_struct *child) { } |
464 | static inline void perf_counter_notify(struct pt_regs *regs) { } | 468 | static inline void perf_counter_do_pending(void) { } |
465 | static inline void perf_counter_print_debug(void) { } | 469 | static inline void perf_counter_print_debug(void) { } |
466 | static inline void perf_counter_unthrottle(void) { } | 470 | static inline void perf_counter_unthrottle(void) { } |
467 | static inline void hw_perf_restore(u64 ctrl) { } | 471 | static inline void hw_perf_restore(u64 ctrl) { } |
@@ -469,8 +473,9 @@ static inline u64 hw_perf_save_disable(void) { return 0; } | |||
469 | static inline int perf_counter_task_disable(void) { return -EINVAL; } | 473 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
470 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | 474 | static inline int perf_counter_task_enable(void) { return -EINVAL; } |
471 | 475 | ||
472 | static inline void perf_swcounter_event(u32 event, u64 nr, | 476 | static inline void |
473 | int nmi, struct pt_regs *regs) { } | 477 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { } |
478 | |||
474 | #endif | 479 | #endif |
475 | 480 | ||
476 | #endif /* __KERNEL__ */ | 481 | #endif /* __KERNEL__ */ |