diff options
author | Mike Galbraith <efault@gmx.de> | 2009-01-23 04:13:01 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-23 05:33:18 -0500 |
commit | 1b023a96d9b44f50f4d8ff28c15f5b80e354760f (patch) | |
tree | 8b6a5956c4461c13c2e2b3769096afac5b767524 /include/linux/perf_counter.h | |
parent | 05e3423c8577126800841bc55de8a509f2433dca (diff) |
perfcounters: throttle on too high IRQ rates
Starting kerneltop with only -c 100 seems to be a bad idea, it can
easily lock the system due to perfcounter IRQ overload.
So add throttling: if a new IRQ arrives in a shorter than
PERFMON_MIN_PERIOD_NS time, turn off perfcounters and untrottle them
from the next timer tick.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r-- | include/linux/perf_counter.h | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 33ba9fe0a781..91f1ca4c01c0 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -254,6 +254,7 @@ extern void perf_counter_init_task(struct task_struct *child); | |||
254 | extern void perf_counter_exit_task(struct task_struct *child); | 254 | extern void perf_counter_exit_task(struct task_struct *child); |
255 | extern void perf_counter_notify(struct pt_regs *regs); | 255 | extern void perf_counter_notify(struct pt_regs *regs); |
256 | extern void perf_counter_print_debug(void); | 256 | extern void perf_counter_print_debug(void); |
257 | extern void perf_counter_unthrottle(void); | ||
257 | extern u64 hw_perf_save_disable(void); | 258 | extern u64 hw_perf_save_disable(void); |
258 | extern void hw_perf_restore(u64 ctrl); | 259 | extern void hw_perf_restore(u64 ctrl); |
259 | extern int perf_counter_task_disable(void); | 260 | extern int perf_counter_task_disable(void); |
@@ -270,6 +271,8 @@ static inline int is_software_counter(struct perf_counter *counter) | |||
270 | return !counter->hw_event.raw && counter->hw_event.type < 0; | 271 | return !counter->hw_event.raw && counter->hw_event.type < 0; |
271 | } | 272 | } |
272 | 273 | ||
274 | #define PERFMON_MIN_PERIOD_NS 10000 | ||
275 | |||
273 | #else | 276 | #else |
274 | static inline void | 277 | static inline void |
275 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | 278 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } |
@@ -281,6 +284,7 @@ static inline void perf_counter_init_task(struct task_struct *child) { } | |||
281 | static inline void perf_counter_exit_task(struct task_struct *child) { } | 284 | static inline void perf_counter_exit_task(struct task_struct *child) { } |
282 | static inline void perf_counter_notify(struct pt_regs *regs) { } | 285 | static inline void perf_counter_notify(struct pt_regs *regs) { } |
283 | static inline void perf_counter_print_debug(void) { } | 286 | static inline void perf_counter_print_debug(void) { } |
287 | static inline void perf_counter_unthrottle(void) { } | ||
284 | static inline void hw_perf_restore(u64 ctrl) { } | 288 | static inline void hw_perf_restore(u64 ctrl) { } |
285 | static inline u64 hw_perf_save_disable(void) { return 0; } | 289 | static inline u64 hw_perf_save_disable(void) { return 0; } |
286 | static inline int perf_counter_task_disable(void) { return -EINVAL; } | 290 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |