diff options
-rw-r--r-- | include/linux/irq_work.h | 2 | ||||
-rw-r--r-- | kernel/events/core.c | 28 |
2 files changed, 23 insertions, 7 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 66017028dcb3..add13c8624b7 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h | |||
@@ -30,6 +30,8 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) | |||
30 | work->func = func; | 30 | work->func = func; |
31 | } | 31 | } |
32 | 32 | ||
33 | #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } | ||
34 | |||
33 | void irq_work_queue(struct irq_work *work); | 35 | void irq_work_queue(struct irq_work *work); |
34 | void irq_work_run(void); | 36 | void irq_work_run(void); |
35 | void irq_work_sync(struct irq_work *work); | 37 | void irq_work_sync(struct irq_work *work); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 56003c6edfd3..2067cbb378eb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -231,11 +231,29 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, | |||
231 | #define NR_ACCUMULATED_SAMPLES 128 | 231 | #define NR_ACCUMULATED_SAMPLES 128 |
232 | static DEFINE_PER_CPU(u64, running_sample_length); | 232 | static DEFINE_PER_CPU(u64, running_sample_length); |
233 | 233 | ||
234 | void perf_sample_event_took(u64 sample_len_ns) | 234 | static void perf_duration_warn(struct irq_work *w) |
235 | { | 235 | { |
236 | u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); | ||
236 | u64 avg_local_sample_len; | 237 | u64 avg_local_sample_len; |
237 | u64 local_samples_len; | 238 | u64 local_samples_len; |
239 | |||
240 | local_samples_len = __get_cpu_var(running_sample_length); | ||
241 | avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; | ||
242 | |||
243 | printk_ratelimited(KERN_WARNING | ||
244 | "perf interrupt took too long (%lld > %lld), lowering " | ||
245 | "kernel.perf_event_max_sample_rate to %d\n", | ||
246 | avg_local_sample_len, allowed_ns, | ||
247 | sysctl_perf_event_sample_rate); | ||
248 | } | ||
249 | |||
250 | static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); | ||
251 | |||
252 | void perf_sample_event_took(u64 sample_len_ns) | ||
253 | { | ||
238 | u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); | 254 | u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); |
255 | u64 avg_local_sample_len; | ||
256 | u64 local_samples_len; | ||
239 | 257 | ||
240 | if (allowed_ns == 0) | 258 | if (allowed_ns == 0) |
241 | return; | 259 | return; |
@@ -263,13 +281,9 @@ void perf_sample_event_took(u64 sample_len_ns) | |||
263 | sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; | 281 | sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; |
264 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; | 282 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
265 | 283 | ||
266 | printk_ratelimited(KERN_WARNING | ||
267 | "perf samples too long (%lld > %lld), lowering " | ||
268 | "kernel.perf_event_max_sample_rate to %d\n", | ||
269 | avg_local_sample_len, allowed_ns, | ||
270 | sysctl_perf_event_sample_rate); | ||
271 | |||
272 | update_perf_cpu_limits(); | 284 | update_perf_cpu_limits(); |
285 | |||
286 | irq_work_queue(&perf_duration_work); | ||
273 | } | 287 | } |
274 | 288 | ||
275 | static atomic64_t perf_event_id; | 289 | static atomic64_t perf_event_id; |