diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-13 10:21:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-15 03:47:02 -0400 |
commit | 9e35ad388bea89f7d6f375af4c0ae98803688666 (patch) | |
tree | 9abbce9f6c9a914b1ea8d8dae82e159366030e4a /include/linux/perf_counter.h | |
parent | 962bf7a66edca4d36a730a38ff8410a67f560e40 (diff) |
perf_counter: Rework the perf counter disable/enable
The current disable/enable mechanism is:
token = hw_perf_save_disable();
...
/* do bits */
...
hw_perf_restore(token);
This works well, provided that the use nests properly. Except we don't.
x86 NMI/INT throttling has non-nested use of this, breaking things. Therefore
provide a reference counter disable/enable interface, where the first disable
disables the hardware, and the last enable enables the hardware again.
[ Impact: refactor, simplify the PMU disable/enable logic ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r-- | include/linux/perf_counter.h | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 614f921d616a..e543ecc129f1 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -544,8 +544,10 @@ extern void perf_counter_exit_task(struct task_struct *child); | |||
544 | extern void perf_counter_do_pending(void); | 544 | extern void perf_counter_do_pending(void); |
545 | extern void perf_counter_print_debug(void); | 545 | extern void perf_counter_print_debug(void); |
546 | extern void perf_counter_unthrottle(void); | 546 | extern void perf_counter_unthrottle(void); |
547 | extern u64 hw_perf_save_disable(void); | 547 | extern void __perf_disable(void); |
548 | extern void hw_perf_restore(u64 ctrl); | 548 | extern bool __perf_enable(void); |
549 | extern void perf_disable(void); | ||
550 | extern void perf_enable(void); | ||
549 | extern int perf_counter_task_disable(void); | 551 | extern int perf_counter_task_disable(void); |
550 | extern int perf_counter_task_enable(void); | 552 | extern int perf_counter_task_enable(void); |
551 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, | 553 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, |
@@ -600,8 +602,8 @@ static inline void perf_counter_exit_task(struct task_struct *child) { } | |||
600 | static inline void perf_counter_do_pending(void) { } | 602 | static inline void perf_counter_do_pending(void) { } |
601 | static inline void perf_counter_print_debug(void) { } | 603 | static inline void perf_counter_print_debug(void) { } |
602 | static inline void perf_counter_unthrottle(void) { } | 604 | static inline void perf_counter_unthrottle(void) { } |
603 | static inline void hw_perf_restore(u64 ctrl) { } | 605 | static inline void perf_disable(void) { } |
604 | static inline u64 hw_perf_save_disable(void) { return 0; } | 606 | static inline void perf_enable(void) { } |
605 | static inline int perf_counter_task_disable(void) { return -EINVAL; } | 607 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
606 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | 608 | static inline int perf_counter_task_enable(void) { return -EINVAL; } |
607 | 609 | ||