diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-11-29 20:32:26 -0500 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-12-08 20:54:08 -0500 |
commit | 2c2b0a78b373908926e4683ea5571332f63c0eb5 (patch) | |
tree | 4ffbcfc1abdc5569e7f444425c86948f1cd2426c /kernel/trace/ring_buffer.c | |
parent | 9c8e2f6d3d361439cc6744a094f1c15681b55269 (diff) |
ring-buffer: Add percentage of ring buffer full to wake up reader
Instead of just waiting for a page to be full before waking up a pending
reader, allow the reader to pass in a "percentage" of pages that have
content before waking up a reader. This should help keep the process of
reading the events not cause wake ups that constantly cause reading of the
buffer.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 71 |
1 files changed, 66 insertions, 5 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 65bd4616220d..9edb628603ab 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -487,6 +487,9 @@ struct ring_buffer_per_cpu { | |||
487 | local_t dropped_events; | 487 | local_t dropped_events; |
488 | local_t committing; | 488 | local_t committing; |
489 | local_t commits; | 489 | local_t commits; |
490 | local_t pages_touched; | ||
491 | local_t pages_read; | ||
492 | size_t shortest_full; | ||
490 | unsigned long read; | 493 | unsigned long read; |
491 | unsigned long read_bytes; | 494 | unsigned long read_bytes; |
492 | u64 write_stamp; | 495 | u64 write_stamp; |
@@ -529,6 +532,41 @@ struct ring_buffer_iter { | |||
529 | u64 read_stamp; | 532 | u64 read_stamp; |
530 | }; | 533 | }; |
531 | 534 | ||
535 | /** | ||
536 | * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer | ||
537 | * @buffer: The ring_buffer to get the number of pages from | ||
538 | * @cpu: The cpu of the ring_buffer to get the number of pages from | ||
539 | * | ||
540 | * Returns the number of pages used by a per_cpu buffer of the ring buffer. | ||
541 | */ | ||
542 | size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu) | ||
543 | { | ||
544 | return buffer->buffers[cpu]->nr_pages; | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer | ||
549 | * @buffer: The ring_buffer to get the number of pages from | ||
550 | * @cpu: The cpu of the ring_buffer to get the number of pages from | ||
551 | * | ||
552 | * Returns the number of pages that have content in the ring buffer. | ||
553 | */ | ||
554 | size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu) | ||
555 | { | ||
556 | size_t read; | ||
557 | size_t cnt; | ||
558 | |||
559 | read = local_read(&buffer->buffers[cpu]->pages_read); | ||
560 | cnt = local_read(&buffer->buffers[cpu]->pages_touched); | ||
561 | /* The reader can read an empty page, but not more than that */ | ||
562 | if (cnt < read) { | ||
563 | WARN_ON_ONCE(read > cnt + 1); | ||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | return cnt - read; | ||
568 | } | ||
569 | |||
532 | /* | 570 | /* |
533 | * rb_wake_up_waiters - wake up tasks waiting for ring buffer input | 571 | * rb_wake_up_waiters - wake up tasks waiting for ring buffer input |
534 | * | 572 | * |
@@ -556,7 +594,7 @@ static void rb_wake_up_waiters(struct irq_work *work) | |||
556 | * as data is added to any of the @buffer's cpu buffers. Otherwise | 594 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
557 | * it will wait for data to be added to a specific cpu buffer. | 595 | * it will wait for data to be added to a specific cpu buffer. |
558 | */ | 596 | */ |
559 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) | 597 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full) |
560 | { | 598 | { |
561 | struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); | 599 | struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); |
562 | DEFINE_WAIT(wait); | 600 | DEFINE_WAIT(wait); |
@@ -571,7 +609,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) | |||
571 | if (cpu == RING_BUFFER_ALL_CPUS) { | 609 | if (cpu == RING_BUFFER_ALL_CPUS) { |
572 | work = &buffer->irq_work; | 610 | work = &buffer->irq_work; |
573 | /* Full only makes sense on per cpu reads */ | 611 | /* Full only makes sense on per cpu reads */ |
574 | full = false; | 612 | full = 0; |
575 | } else { | 613 | } else { |
576 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 614 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
577 | return -ENODEV; | 615 | return -ENODEV; |
@@ -623,15 +661,22 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) | |||
623 | !ring_buffer_empty_cpu(buffer, cpu)) { | 661 | !ring_buffer_empty_cpu(buffer, cpu)) { |
624 | unsigned long flags; | 662 | unsigned long flags; |
625 | bool pagebusy; | 663 | bool pagebusy; |
664 | size_t nr_pages; | ||
665 | size_t dirty; | ||
626 | 666 | ||
627 | if (!full) | 667 | if (!full) |
628 | break; | 668 | break; |
629 | 669 | ||
630 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 670 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
631 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; | 671 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; |
672 | nr_pages = cpu_buffer->nr_pages; | ||
673 | dirty = ring_buffer_nr_dirty_pages(buffer, cpu); | ||
674 | if (!cpu_buffer->shortest_full || | ||
675 | cpu_buffer->shortest_full < full) | ||
676 | cpu_buffer->shortest_full = full; | ||
632 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 677 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
633 | 678 | if (!pagebusy && | |
634 | if (!pagebusy) | 679 | (!nr_pages || (dirty * 100) > full * nr_pages)) |
635 | break; | 680 | break; |
636 | } | 681 | } |
637 | 682 | ||
@@ -1054,6 +1099,7 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, | |||
1054 | old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); | 1099 | old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); |
1055 | old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); | 1100 | old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); |
1056 | 1101 | ||
1102 | local_inc(&cpu_buffer->pages_touched); | ||
1057 | /* | 1103 | /* |
1058 | * Just make sure we have seen our old_write and synchronize | 1104 | * Just make sure we have seen our old_write and synchronize |
1059 | * with any interrupts that come in. | 1105 | * with any interrupts that come in. |
@@ -2603,6 +2649,16 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) | |||
2603 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; | 2649 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; |
2604 | 2650 | ||
2605 | if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) { | 2651 | if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) { |
2652 | size_t nr_pages; | ||
2653 | size_t dirty; | ||
2654 | size_t full; | ||
2655 | |||
2656 | full = cpu_buffer->shortest_full; | ||
2657 | nr_pages = cpu_buffer->nr_pages; | ||
2658 | dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); | ||
2659 | if (full && nr_pages && (dirty * 100) <= full * nr_pages) | ||
2660 | return; | ||
2661 | |||
2606 | cpu_buffer->irq_work.wakeup_full = true; | 2662 | cpu_buffer->irq_work.wakeup_full = true; |
2607 | cpu_buffer->irq_work.full_waiters_pending = false; | 2663 | cpu_buffer->irq_work.full_waiters_pending = false; |
2608 | /* irq_work_queue() supplies it's own memory barriers */ | 2664 | /* irq_work_queue() supplies it's own memory barriers */ |
@@ -3732,13 +3788,15 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
3732 | goto spin; | 3788 | goto spin; |
3733 | 3789 | ||
3734 | /* | 3790 | /* |
3735 | * Yeah! We succeeded in replacing the page. | 3791 | * Yay! We succeeded in replacing the page. |
3736 | * | 3792 | * |
3737 | * Now make the new head point back to the reader page. | 3793 | * Now make the new head point back to the reader page. |
3738 | */ | 3794 | */ |
3739 | rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; | 3795 | rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; |
3740 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 3796 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); |
3741 | 3797 | ||
3798 | local_inc(&cpu_buffer->pages_read); | ||
3799 | |||
3742 | /* Finally update the reader page to the new head */ | 3800 | /* Finally update the reader page to the new head */ |
3743 | cpu_buffer->reader_page = reader; | 3801 | cpu_buffer->reader_page = reader; |
3744 | cpu_buffer->reader_page->read = 0; | 3802 | cpu_buffer->reader_page->read = 0; |
@@ -4334,6 +4392,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
4334 | local_set(&cpu_buffer->entries, 0); | 4392 | local_set(&cpu_buffer->entries, 0); |
4335 | local_set(&cpu_buffer->committing, 0); | 4393 | local_set(&cpu_buffer->committing, 0); |
4336 | local_set(&cpu_buffer->commits, 0); | 4394 | local_set(&cpu_buffer->commits, 0); |
4395 | local_set(&cpu_buffer->pages_touched, 0); | ||
4396 | local_set(&cpu_buffer->pages_read, 0); | ||
4397 | cpu_buffer->shortest_full = 0; | ||
4337 | cpu_buffer->read = 0; | 4398 | cpu_buffer->read = 0; |
4338 | cpu_buffer->read_bytes = 0; | 4399 | cpu_buffer->read_bytes = 0; |
4339 | 4400 | ||