aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2009-01-23 04:13:01 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-23 05:33:18 -0500
commit1b023a96d9b44f50f4d8ff28c15f5b80e354760f (patch)
tree8b6a5956c4461c13c2e2b3769096afac5b767524 /include/linux
parent05e3423c8577126800841bc55de8a509f2433dca (diff)
perfcounters: throttle on too high IRQ rates
Starting kerneltop with only -c 100 seems to be a bad idea, it can easily lock the system due to perfcounter IRQ overload. So add throttling: if a new IRQ arrives in a shorter than PERFMON_MIN_PERIOD_NS time, turn off perfcounters and untrottle them from the next timer tick. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/perf_counter.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 33ba9fe0a781..91f1ca4c01c0 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -254,6 +254,7 @@ extern void perf_counter_init_task(struct task_struct *child);
254extern void perf_counter_exit_task(struct task_struct *child); 254extern void perf_counter_exit_task(struct task_struct *child);
255extern void perf_counter_notify(struct pt_regs *regs); 255extern void perf_counter_notify(struct pt_regs *regs);
256extern void perf_counter_print_debug(void); 256extern void perf_counter_print_debug(void);
257extern void perf_counter_unthrottle(void);
257extern u64 hw_perf_save_disable(void); 258extern u64 hw_perf_save_disable(void);
258extern void hw_perf_restore(u64 ctrl); 259extern void hw_perf_restore(u64 ctrl);
259extern int perf_counter_task_disable(void); 260extern int perf_counter_task_disable(void);
@@ -270,6 +271,8 @@ static inline int is_software_counter(struct perf_counter *counter)
270 return !counter->hw_event.raw && counter->hw_event.type < 0; 271 return !counter->hw_event.raw && counter->hw_event.type < 0;
271} 272}
272 273
274#define PERFMON_MIN_PERIOD_NS 10000
275
273#else 276#else
274static inline void 277static inline void
275perf_counter_task_sched_in(struct task_struct *task, int cpu) { } 278perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
@@ -281,6 +284,7 @@ static inline void perf_counter_init_task(struct task_struct *child) { }
281static inline void perf_counter_exit_task(struct task_struct *child) { } 284static inline void perf_counter_exit_task(struct task_struct *child) { }
282static inline void perf_counter_notify(struct pt_regs *regs) { } 285static inline void perf_counter_notify(struct pt_regs *regs) { }
283static inline void perf_counter_print_debug(void) { } 286static inline void perf_counter_print_debug(void) { }
287static inline void perf_counter_unthrottle(void) { }
284static inline void hw_perf_restore(u64 ctrl) { } 288static inline void hw_perf_restore(u64 ctrl) { }
285static inline u64 hw_perf_save_disable(void) { return 0; } 289static inline u64 hw_perf_save_disable(void) { return 0; }
286static inline int perf_counter_task_disable(void) { return -EINVAL; } 290static inline int perf_counter_task_disable(void) { return -EINVAL; }