aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-31 14:46:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-31 14:46:59 -0500
commit495d714ad140e1732e66c45d0409054b24c1a0d6 (patch)
tree373ec6619adea47d848d36f140b32def27164bbd /kernel/trace/ring_buffer.c
parentf12e840c819bab42621685558a01d3f46ab9a226 (diff)
parent3d739c1f6156c70eb0548aa288dcfbac9e0bd162 (diff)
Merge tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: - Rework of the kprobe/uprobe and synthetic events to consolidate all the dynamic event code. This will make changes in the future easier. - Partial rewrite of the function graph tracing infrastructure. This will allow for multiple users of hooking onto functions to get the callback (return) of the function. This is the ground work for having kprobes and function graph tracer using one code base. - Clean up of the histogram code that will facilitate adding more features to the histograms in the future. - Addition of str_has_prefix() and a few use cases. There currently is a similar function strstart() that is used in a few places, but only returns a bool and not a length. These instances will be removed in the future to use str_has_prefix() instead. - A few other various clean ups as well. * tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits) tracing: Use the return of str_has_prefix() to remove open coded numbers tracing: Have the historgram use the result of str_has_prefix() for len of prefix tracing: Use str_has_prefix() instead of using fixed sizes tracing: Use str_has_prefix() helper for histogram code string.h: Add str_has_prefix() helper function tracing: Make function ‘ftrace_exports’ static tracing: Simplify printf'ing in seq_print_sym tracing: Avoid -Wformat-nonliteral warning tracing: Merge seq_print_sym_short() and seq_print_sym_offset() tracing: Add hist trigger comments for variable-related fields tracing: Remove hist trigger synth_var_refs tracing: Use hist trigger's var_ref array to destroy var_refs tracing: Remove open-coding of hist trigger var_ref management tracing: Use var_refs[] for hist trigger reference checking tracing: Change strlen to sizeof for hist trigger static strings tracing: Remove unnecessary hist trigger struct field tracing: Fix ftrace_graph_get_ret_stack() to use task and not current seq_buf: Use size_t for len in seq_buf_puts() seq_buf: Make seq_buf_puts() null-terminate the buffer arm64: Use ftrace_graph_get_ret_stack() instead of curr_ret_stack ...
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c94
1 files changed, 81 insertions, 13 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 4f3247a53259..06e864a334bb 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -487,6 +487,10 @@ struct ring_buffer_per_cpu {
487 local_t dropped_events; 487 local_t dropped_events;
488 local_t committing; 488 local_t committing;
489 local_t commits; 489 local_t commits;
490 local_t pages_touched;
491 local_t pages_read;
492 long last_pages_touch;
493 size_t shortest_full;
490 unsigned long read; 494 unsigned long read;
491 unsigned long read_bytes; 495 unsigned long read_bytes;
492 u64 write_stamp; 496 u64 write_stamp;
@@ -529,6 +533,41 @@ struct ring_buffer_iter {
529 u64 read_stamp; 533 u64 read_stamp;
530}; 534};
531 535
536/**
537 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
538 * @buffer: The ring_buffer to get the number of pages from
539 * @cpu: The cpu of the ring_buffer to get the number of pages from
540 *
541 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
542 */
543size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
544{
545 return buffer->buffers[cpu]->nr_pages;
546}
547
548/**
549 * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
550 * @buffer: The ring_buffer to get the number of pages from
551 * @cpu: The cpu of the ring_buffer to get the number of pages from
552 *
553 * Returns the number of pages that have content in the ring buffer.
554 */
555size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
556{
557 size_t read;
558 size_t cnt;
559
560 read = local_read(&buffer->buffers[cpu]->pages_read);
561 cnt = local_read(&buffer->buffers[cpu]->pages_touched);
562 /* The reader can read an empty page, but not more than that */
563 if (cnt < read) {
564 WARN_ON_ONCE(read > cnt + 1);
565 return 0;
566 }
567
568 return cnt - read;
569}
570
532/* 571/*
533 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 572 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
534 * 573 *
@@ -556,7 +595,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
556 * as data is added to any of the @buffer's cpu buffers. Otherwise 595 * as data is added to any of the @buffer's cpu buffers. Otherwise
557 * it will wait for data to be added to a specific cpu buffer. 596 * it will wait for data to be added to a specific cpu buffer.
558 */ 597 */
559int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) 598int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
560{ 599{
561 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); 600 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
562 DEFINE_WAIT(wait); 601 DEFINE_WAIT(wait);
@@ -571,7 +610,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
571 if (cpu == RING_BUFFER_ALL_CPUS) { 610 if (cpu == RING_BUFFER_ALL_CPUS) {
572 work = &buffer->irq_work; 611 work = &buffer->irq_work;
573 /* Full only makes sense on per cpu reads */ 612 /* Full only makes sense on per cpu reads */
574 full = false; 613 full = 0;
575 } else { 614 } else {
576 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 615 if (!cpumask_test_cpu(cpu, buffer->cpumask))
577 return -ENODEV; 616 return -ENODEV;
@@ -623,15 +662,22 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
623 !ring_buffer_empty_cpu(buffer, cpu)) { 662 !ring_buffer_empty_cpu(buffer, cpu)) {
624 unsigned long flags; 663 unsigned long flags;
625 bool pagebusy; 664 bool pagebusy;
665 size_t nr_pages;
666 size_t dirty;
626 667
627 if (!full) 668 if (!full)
628 break; 669 break;
629 670
630 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 671 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
631 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 672 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
673 nr_pages = cpu_buffer->nr_pages;
674 dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
675 if (!cpu_buffer->shortest_full ||
676 cpu_buffer->shortest_full < full)
677 cpu_buffer->shortest_full = full;
632 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 678 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
633 679 if (!pagebusy &&
634 if (!pagebusy) 680 (!nr_pages || (dirty * 100) > full * nr_pages))
635 break; 681 break;
636 } 682 }
637 683
@@ -1054,6 +1100,7 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1054 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1100 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1055 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1101 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1056 1102
1103 local_inc(&cpu_buffer->pages_touched);
1057 /* 1104 /*
1058 * Just make sure we have seen our old_write and synchronize 1105 * Just make sure we have seen our old_write and synchronize
1059 * with any interrupts that come in. 1106 * with any interrupts that come in.
@@ -2586,7 +2633,9 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2586static __always_inline void 2633static __always_inline void
2587rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 2634rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2588{ 2635{
2589 bool pagebusy; 2636 size_t nr_pages;
2637 size_t dirty;
2638 size_t full;
2590 2639
2591 if (buffer->irq_work.waiters_pending) { 2640 if (buffer->irq_work.waiters_pending) {
2592 buffer->irq_work.waiters_pending = false; 2641 buffer->irq_work.waiters_pending = false;
@@ -2600,14 +2649,27 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2600 irq_work_queue(&cpu_buffer->irq_work.work); 2649 irq_work_queue(&cpu_buffer->irq_work.work);
2601 } 2650 }
2602 2651
2603 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 2652 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
2653 return;
2604 2654
2605 if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) { 2655 if (cpu_buffer->reader_page == cpu_buffer->commit_page)
2606 cpu_buffer->irq_work.wakeup_full = true; 2656 return;
2607 cpu_buffer->irq_work.full_waiters_pending = false; 2657
2608 /* irq_work_queue() supplies it's own memory barriers */ 2658 if (!cpu_buffer->irq_work.full_waiters_pending)
2609 irq_work_queue(&cpu_buffer->irq_work.work); 2659 return;
2610 } 2660
2661 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
2662
2663 full = cpu_buffer->shortest_full;
2664 nr_pages = cpu_buffer->nr_pages;
2665 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
2666 if (full && nr_pages && (dirty * 100) <= full * nr_pages)
2667 return;
2668
2669 cpu_buffer->irq_work.wakeup_full = true;
2670 cpu_buffer->irq_work.full_waiters_pending = false;
2671 /* irq_work_queue() supplies it's own memory barriers */
2672 irq_work_queue(&cpu_buffer->irq_work.work);
2611} 2673}
2612 2674
2613/* 2675/*
@@ -3732,13 +3794,15 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3732 goto spin; 3794 goto spin;
3733 3795
3734 /* 3796 /*
3735 * Yeah! We succeeded in replacing the page. 3797 * Yay! We succeeded in replacing the page.
3736 * 3798 *
3737 * Now make the new head point back to the reader page. 3799 * Now make the new head point back to the reader page.
3738 */ 3800 */
3739 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 3801 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3740 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 3802 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3741 3803
3804 local_inc(&cpu_buffer->pages_read);
3805
3742 /* Finally update the reader page to the new head */ 3806 /* Finally update the reader page to the new head */
3743 cpu_buffer->reader_page = reader; 3807 cpu_buffer->reader_page = reader;
3744 cpu_buffer->reader_page->read = 0; 3808 cpu_buffer->reader_page->read = 0;
@@ -4334,6 +4398,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4334 local_set(&cpu_buffer->entries, 0); 4398 local_set(&cpu_buffer->entries, 0);
4335 local_set(&cpu_buffer->committing, 0); 4399 local_set(&cpu_buffer->committing, 0);
4336 local_set(&cpu_buffer->commits, 0); 4400 local_set(&cpu_buffer->commits, 0);
4401 local_set(&cpu_buffer->pages_touched, 0);
4402 local_set(&cpu_buffer->pages_read, 0);
4403 cpu_buffer->last_pages_touch = 0;
4404 cpu_buffer->shortest_full = 0;
4337 cpu_buffer->read = 0; 4405 cpu_buffer->read = 0;
4338 cpu_buffer->read_bytes = 0; 4406 cpu_buffer->read_bytes = 0;
4339 4407