aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-02-11 10:01:16 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-02-21 15:49:07 -0500
commitcd578abb24aa67ce468c427d3356c08ea32cf768 (patch)
tree974a97cebfc368e8bee9c1beccbbd9bda00d89ef
parent90ed5b0fa5eb96e1cbb34aebf6a9ed96ee1587ec (diff)
perf/x86: Warn to early_printk() in case irq_work is too slow
On Mon, Feb 10, 2014 at 08:45:16AM -0800, Dave Hansen wrote: > The reason I coded this up was that NMIs were firing off so fast that > nothing else was getting a chance to run. With this patch, at least the > printk() would come out and I'd have some idea what was going on. It will start spewing to early_printk() (which is a lot nicer to use from NMI context too) when it fails to queue the IRQ-work because its already enqueued. It does have the false-positive for when two CPUs trigger the warn concurrently, but that should be rare and some extra clutter on the early printk shouldn't be a problem. Cc: hpa@zytor.com Cc: tglx@linutronix.de Cc: dzickus@redhat.com Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: mingo@kernel.org Fixes: 6a02ad66b2c4 ("perf/x86: Push the duration-logging printk() to IRQ context") Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140211150116.GO27965@twins.programming.kicks-ass.net Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/irq_work.h2
-rw-r--r--kernel/events/core.c9
-rw-r--r--kernel/irq_work.c6
3 files changed, 12 insertions, 5 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index add13c8624b7..19ae05d4b8ec 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -32,7 +32,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
32 32
33#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } 33#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
34 34
35void irq_work_queue(struct irq_work *work); 35bool irq_work_queue(struct irq_work *work);
36void irq_work_run(void); 36void irq_work_run(void);
37void irq_work_sync(struct irq_work *work); 37void irq_work_sync(struct irq_work *work);
38 38
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2067cbb378eb..45e5543e2a1e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -243,7 +243,7 @@ static void perf_duration_warn(struct irq_work *w)
243 printk_ratelimited(KERN_WARNING 243 printk_ratelimited(KERN_WARNING
244 "perf interrupt took too long (%lld > %lld), lowering " 244 "perf interrupt took too long (%lld > %lld), lowering "
245 "kernel.perf_event_max_sample_rate to %d\n", 245 "kernel.perf_event_max_sample_rate to %d\n",
246 avg_local_sample_len, allowed_ns, 246 avg_local_sample_len, allowed_ns >> 1,
247 sysctl_perf_event_sample_rate); 247 sysctl_perf_event_sample_rate);
248} 248}
249 249
@@ -283,7 +283,12 @@ void perf_sample_event_took(u64 sample_len_ns)
283 283
284 update_perf_cpu_limits(); 284 update_perf_cpu_limits();
285 285
286 irq_work_queue(&perf_duration_work); 286 if (!irq_work_queue(&perf_duration_work)) {
287 early_printk("perf interrupt took too long (%lld > %lld), lowering "
288 "kernel.perf_event_max_sample_rate to %d\n",
289 avg_local_sample_len, allowed_ns >> 1,
290 sysctl_perf_event_sample_rate);
291 }
287} 292}
288 293
289static atomic64_t perf_event_id; 294static atomic64_t perf_event_id;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 55fcce6065cf..a82170e2fa78 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -61,11 +61,11 @@ void __weak arch_irq_work_raise(void)
61 * 61 *
62 * Can be re-enqueued while the callback is still in progress. 62 * Can be re-enqueued while the callback is still in progress.
63 */ 63 */
64void irq_work_queue(struct irq_work *work) 64bool irq_work_queue(struct irq_work *work)
65{ 65{
66 /* Only queue if not already pending */ 66 /* Only queue if not already pending */
67 if (!irq_work_claim(work)) 67 if (!irq_work_claim(work))
68 return; 68 return false;
69 69
70 /* Queue the entry and raise the IPI if needed. */ 70 /* Queue the entry and raise the IPI if needed. */
71 preempt_disable(); 71 preempt_disable();
@@ -83,6 +83,8 @@ void irq_work_queue(struct irq_work *work)
83 } 83 }
84 84
85 preempt_enable(); 85 preempt_enable();
86
87 return true;
86} 88}
87EXPORT_SYMBOL_GPL(irq_work_queue); 89EXPORT_SYMBOL_GPL(irq_work_queue);
88 90