aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c82
1 files changed, 59 insertions, 23 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index ed3559944fc..589b3eedfa6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -620,12 +620,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
620 kfree(cpu_buffer); 620 kfree(cpu_buffer);
621} 621}
622 622
623/*
624 * Causes compile errors if the struct buffer_page gets bigger
625 * than the struct page.
626 */
627extern int ring_buffer_page_too_big(void);
628
629#ifdef CONFIG_HOTPLUG_CPU 623#ifdef CONFIG_HOTPLUG_CPU
630static int rb_cpu_notify(struct notifier_block *self, 624static int rb_cpu_notify(struct notifier_block *self,
631 unsigned long action, void *hcpu); 625 unsigned long action, void *hcpu);
@@ -648,11 +642,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
648 int bsize; 642 int bsize;
649 int cpu; 643 int cpu;
650 644
651 /* Paranoid! Optimizes out when all is well */
652 if (sizeof(struct buffer_page) > sizeof(struct page))
653 ring_buffer_page_too_big();
654
655
656 /* keep it in its own cache line */ 645 /* keep it in its own cache line */
657 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 646 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
658 GFP_KERNEL); 647 GFP_KERNEL);
@@ -668,8 +657,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
668 buffer->reader_lock_key = key; 657 buffer->reader_lock_key = key;
669 658
670 /* need at least two pages */ 659 /* need at least two pages */
671 if (buffer->pages == 1) 660 if (buffer->pages < 2)
672 buffer->pages++; 661 buffer->pages = 2;
673 662
674 /* 663 /*
675 * In case of non-hotplug cpu, if the ring-buffer is allocated 664 * In case of non-hotplug cpu, if the ring-buffer is allocated
@@ -1013,7 +1002,7 @@ rb_event_index(struct ring_buffer_event *event)
1013{ 1002{
1014 unsigned long addr = (unsigned long)event; 1003 unsigned long addr = (unsigned long)event;
1015 1004
1016 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); 1005 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1017} 1006}
1018 1007
1019static inline int 1008static inline int
@@ -1334,9 +1323,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1334 1323
1335 /* We reserved something on the buffer */ 1324 /* We reserved something on the buffer */
1336 1325
1337 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1338 return NULL;
1339
1340 event = __rb_page_index(tail_page, tail); 1326 event = __rb_page_index(tail_page, tail);
1341 rb_update_event(event, type, length); 1327 rb_update_event(event, type, length);
1342 1328
@@ -2480,6 +2466,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2480} 2466}
2481EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 2467EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2482 2468
2469static inline int rb_ok_to_lock(void)
2470{
2471 /*
2472 * If an NMI die dumps out the content of the ring buffer
2473 * do not grab locks. We also permanently disable the ring
2474 * buffer too. A one time deal is all you get from reading
2475 * the ring buffer from an NMI.
2476 */
2477 if (likely(!in_nmi() && !oops_in_progress))
2478 return 1;
2479
2480 tracing_off_permanent();
2481 return 0;
2482}
2483
2483/** 2484/**
2484 * ring_buffer_peek - peek at the next event to be read 2485 * ring_buffer_peek - peek at the next event to be read
2485 * @buffer: The ring buffer to read 2486 * @buffer: The ring buffer to read
@@ -2495,14 +2496,20 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2495 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2496 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2496 struct ring_buffer_event *event; 2497 struct ring_buffer_event *event;
2497 unsigned long flags; 2498 unsigned long flags;
2499 int dolock;
2498 2500
2499 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2501 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2500 return NULL; 2502 return NULL;
2501 2503
2504 dolock = rb_ok_to_lock();
2502 again: 2505 again:
2503 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2506 local_irq_save(flags);
2507 if (dolock)
2508 spin_lock(&cpu_buffer->reader_lock);
2504 event = rb_buffer_peek(buffer, cpu, ts); 2509 event = rb_buffer_peek(buffer, cpu, ts);
2505 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2510 if (dolock)
2511 spin_unlock(&cpu_buffer->reader_lock);
2512 local_irq_restore(flags);
2506 2513
2507 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 2514 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2508 cpu_relax(); 2515 cpu_relax();
@@ -2554,6 +2561,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2554 struct ring_buffer_per_cpu *cpu_buffer; 2561 struct ring_buffer_per_cpu *cpu_buffer;
2555 struct ring_buffer_event *event = NULL; 2562 struct ring_buffer_event *event = NULL;
2556 unsigned long flags; 2563 unsigned long flags;
2564 int dolock;
2565
2566 dolock = rb_ok_to_lock();
2557 2567
2558 again: 2568 again:
2559 /* might be called in atomic */ 2569 /* might be called in atomic */
@@ -2563,7 +2573,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2563 goto out; 2573 goto out;
2564 2574
2565 cpu_buffer = buffer->buffers[cpu]; 2575 cpu_buffer = buffer->buffers[cpu];
2566 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2576 local_irq_save(flags);
2577 if (dolock)
2578 spin_lock(&cpu_buffer->reader_lock);
2567 2579
2568 event = rb_buffer_peek(buffer, cpu, ts); 2580 event = rb_buffer_peek(buffer, cpu, ts);
2569 if (!event) 2581 if (!event)
@@ -2572,7 +2584,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2572 rb_advance_reader(cpu_buffer); 2584 rb_advance_reader(cpu_buffer);
2573 2585
2574 out_unlock: 2586 out_unlock:
2575 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2587 if (dolock)
2588 spin_unlock(&cpu_buffer->reader_lock);
2589 local_irq_restore(flags);
2576 2590
2577 out: 2591 out:
2578 preempt_enable(); 2592 preempt_enable();
@@ -2770,12 +2784,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
2770int ring_buffer_empty(struct ring_buffer *buffer) 2784int ring_buffer_empty(struct ring_buffer *buffer)
2771{ 2785{
2772 struct ring_buffer_per_cpu *cpu_buffer; 2786 struct ring_buffer_per_cpu *cpu_buffer;
2787 unsigned long flags;
2788 int dolock;
2773 int cpu; 2789 int cpu;
2790 int ret;
2791
2792 dolock = rb_ok_to_lock();
2774 2793
2775 /* yes this is racy, but if you don't like the race, lock the buffer */ 2794 /* yes this is racy, but if you don't like the race, lock the buffer */
2776 for_each_buffer_cpu(buffer, cpu) { 2795 for_each_buffer_cpu(buffer, cpu) {
2777 cpu_buffer = buffer->buffers[cpu]; 2796 cpu_buffer = buffer->buffers[cpu];
2778 if (!rb_per_cpu_empty(cpu_buffer)) 2797 local_irq_save(flags);
2798 if (dolock)
2799 spin_lock(&cpu_buffer->reader_lock);
2800 ret = rb_per_cpu_empty(cpu_buffer);
2801 if (dolock)
2802 spin_unlock(&cpu_buffer->reader_lock);
2803 local_irq_restore(flags);
2804
2805 if (!ret)
2779 return 0; 2806 return 0;
2780 } 2807 }
2781 2808
@@ -2791,14 +2818,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
2791int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 2818int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2792{ 2819{
2793 struct ring_buffer_per_cpu *cpu_buffer; 2820 struct ring_buffer_per_cpu *cpu_buffer;
2821 unsigned long flags;
2822 int dolock;
2794 int ret; 2823 int ret;
2795 2824
2796 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2825 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2797 return 1; 2826 return 1;
2798 2827
2828 dolock = rb_ok_to_lock();
2829
2799 cpu_buffer = buffer->buffers[cpu]; 2830 cpu_buffer = buffer->buffers[cpu];
2831 local_irq_save(flags);
2832 if (dolock)
2833 spin_lock(&cpu_buffer->reader_lock);
2800 ret = rb_per_cpu_empty(cpu_buffer); 2834 ret = rb_per_cpu_empty(cpu_buffer);
2801 2835 if (dolock)
2836 spin_unlock(&cpu_buffer->reader_lock);
2837 local_irq_restore(flags);
2802 2838
2803 return ret; 2839 return ret;
2804} 2840}