aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-06-16 21:22:48 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-06-17 14:16:27 -0400
commit8d707e8eb4de4b930573155ab4df4b3270ee25dd (patch)
tree2594b38c73fa1c944587f62a2f9ce70fe4f90ce4 /kernel
parentd47882078f05c2cb46b85f1e12a58ed9315b9d63 (diff)
ring-buffer: do not grab locks in nmi
If ftrace_dump_on_oops is set, and an NMI detects a lockup, then it will need to read from the ring buffer. But the read side of the ring buffer still takes locks. This patch adds a check on the read side that if it is in an NMI, then it will disable the ring buffer and not take any locks. Reads can still happen on a disabled ring buffer. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c59
1 files changed, 51 insertions, 8 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 969f7cbe8e93..589b3eedfa67 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2466,6 +2466,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2466} 2466}
2467EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 2467EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2468 2468
2469static inline int rb_ok_to_lock(void)
2470{
2471 /*
2472 * If an NMI die dumps out the content of the ring buffer
2473 * do not grab locks. We also permanently disable the ring
2474 * buffer too. A one time deal is all you get from reading
2475 * the ring buffer from an NMI.
2476 */
2477 if (likely(!in_nmi() && !oops_in_progress))
2478 return 1;
2479
2480 tracing_off_permanent();
2481 return 0;
2482}
2483
2469/** 2484/**
2470 * ring_buffer_peek - peek at the next event to be read 2485 * ring_buffer_peek - peek at the next event to be read
2471 * @buffer: The ring buffer to read 2486 * @buffer: The ring buffer to read
@@ -2481,14 +2496,20 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2481 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2496 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2482 struct ring_buffer_event *event; 2497 struct ring_buffer_event *event;
2483 unsigned long flags; 2498 unsigned long flags;
2499 int dolock;
2484 2500
2485 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2501 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2486 return NULL; 2502 return NULL;
2487 2503
2504 dolock = rb_ok_to_lock();
2488 again: 2505 again:
2489 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2506 local_irq_save(flags);
2507 if (dolock)
2508 spin_lock(&cpu_buffer->reader_lock);
2490 event = rb_buffer_peek(buffer, cpu, ts); 2509 event = rb_buffer_peek(buffer, cpu, ts);
2491 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2510 if (dolock)
2511 spin_unlock(&cpu_buffer->reader_lock);
2512 local_irq_restore(flags);
2492 2513
2493 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 2514 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2494 cpu_relax(); 2515 cpu_relax();
@@ -2540,6 +2561,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2540 struct ring_buffer_per_cpu *cpu_buffer; 2561 struct ring_buffer_per_cpu *cpu_buffer;
2541 struct ring_buffer_event *event = NULL; 2562 struct ring_buffer_event *event = NULL;
2542 unsigned long flags; 2563 unsigned long flags;
2564 int dolock;
2565
2566 dolock = rb_ok_to_lock();
2543 2567
2544 again: 2568 again:
2545 /* might be called in atomic */ 2569 /* might be called in atomic */
@@ -2549,7 +2573,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2549 goto out; 2573 goto out;
2550 2574
2551 cpu_buffer = buffer->buffers[cpu]; 2575 cpu_buffer = buffer->buffers[cpu];
2552 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2576 local_irq_save(flags);
2577 if (dolock)
2578 spin_lock(&cpu_buffer->reader_lock);
2553 2579
2554 event = rb_buffer_peek(buffer, cpu, ts); 2580 event = rb_buffer_peek(buffer, cpu, ts);
2555 if (!event) 2581 if (!event)
@@ -2558,7 +2584,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2558 rb_advance_reader(cpu_buffer); 2584 rb_advance_reader(cpu_buffer);
2559 2585
2560 out_unlock: 2586 out_unlock:
2561 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2587 if (dolock)
2588 spin_unlock(&cpu_buffer->reader_lock);
2589 local_irq_restore(flags);
2562 2590
2563 out: 2591 out:
2564 preempt_enable(); 2592 preempt_enable();
@@ -2757,15 +2785,23 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2757{ 2785{
2758 struct ring_buffer_per_cpu *cpu_buffer; 2786 struct ring_buffer_per_cpu *cpu_buffer;
2759 unsigned long flags; 2787 unsigned long flags;
2788 int dolock;
2760 int cpu; 2789 int cpu;
2761 int ret; 2790 int ret;
2762 2791
2792 dolock = rb_ok_to_lock();
2793
2763 /* yes this is racy, but if you don't like the race, lock the buffer */ 2794 /* yes this is racy, but if you don't like the race, lock the buffer */
2764 for_each_buffer_cpu(buffer, cpu) { 2795 for_each_buffer_cpu(buffer, cpu) {
2765 cpu_buffer = buffer->buffers[cpu]; 2796 cpu_buffer = buffer->buffers[cpu];
2766 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2797 local_irq_save(flags);
2798 if (dolock)
2799 spin_lock(&cpu_buffer->reader_lock);
2767 ret = rb_per_cpu_empty(cpu_buffer); 2800 ret = rb_per_cpu_empty(cpu_buffer);
2768 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2801 if (dolock)
2802 spin_unlock(&cpu_buffer->reader_lock);
2803 local_irq_restore(flags);
2804
2769 if (!ret) 2805 if (!ret)
2770 return 0; 2806 return 0;
2771 } 2807 }
@@ -2783,15 +2819,22 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2783{ 2819{
2784 struct ring_buffer_per_cpu *cpu_buffer; 2820 struct ring_buffer_per_cpu *cpu_buffer;
2785 unsigned long flags; 2821 unsigned long flags;
2822 int dolock;
2786 int ret; 2823 int ret;
2787 2824
2788 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2825 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2789 return 1; 2826 return 1;
2790 2827
2828 dolock = rb_ok_to_lock();
2829
2791 cpu_buffer = buffer->buffers[cpu]; 2830 cpu_buffer = buffer->buffers[cpu];
2792 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2831 local_irq_save(flags);
2832 if (dolock)
2833 spin_lock(&cpu_buffer->reader_lock);
2793 ret = rb_per_cpu_empty(cpu_buffer); 2834 ret = rb_per_cpu_empty(cpu_buffer);
2794 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2835 if (dolock)
2836 spin_unlock(&cpu_buffer->reader_lock);
2837 local_irq_restore(flags);
2795 2838
2796 return ret; 2839 return ret;
2797} 2840}