aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 16:55:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 16:55:38 -0400
commit9e8529afc4518f4e5d610001545ebc97e1333c79 (patch)
tree26e1aa2cbb50f3f511cfa7d8e39e6b7bd9221b68 /kernel/trace/ring_buffer.c
parentec25e246b94a3233ab064994ef05a170bdba0e7c (diff)
parent4c69e6ea415a35eb7f0fc8ee9390c8f7436492a2 (diff)
Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Along with the usual minor fixes and clean ups there are a few major changes with this pull request. 1) Multiple buffers for the ftrace facility This feature has been requested by many people over the last few years. I even heard that Google was about to implement it themselves. I finally had time and cleaned up the code such that you can now create multiple instances of the ftrace buffer and have different events go to different buffers. This way, a low frequency event will not be lost in the noise of a high frequency event. Note, currently only events can go to different buffers, the tracers (ie function, function_graph and the latency tracers) still can only be written to the main buffer. 2) The function tracer triggers have now been extended. The function tracer had two triggers. One to enable tracing when a function is hit, and one to disable tracing. Now you can record a stack trace on a single (or many) function(s), take a snapshot of the buffer (copy it to the snapshot buffer), and you can enable or disable an event to be traced when a function is hit. 3) A perf clock has been added. A "perf" clock can be chosen to be used when tracing. This will cause ftrace to use the same clock as perf uses, and hopefully this will make it easier to interleave the perf and ftrace data for analysis." * tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (82 commits) tracepoints: Prevent null probe from being added tracing: Compare to 1 instead of zero for is_signed_type() tracing: Remove obsolete macro guard _TRACE_PROFILE_INIT ftrace: Get rid of ftrace_profile_bits tracing: Check return value of tracing_init_dentry() tracing: Get rid of unneeded key calculation in ftrace_hash_move() tracing: Reset ftrace_graph_filter_enabled if count is zero tracing: Fix off-by-one on allocating stat->pages kernel: tracing: Use strlcpy instead of strncpy tracing: Update debugfs README file tracing: Fix ftrace_dump() tracing: Rename trace_event_mutex to trace_event_sem tracing: Fix comment about prefix in arch_syscall_match_sym_name() tracing: Convert trace_destroy_fields() to static tracing: Move find_event_field() into trace_events.c tracing: Use TRACE_MAX_PRINT instead of constant tracing: Use pr_warn_once instead of open coded implementation ring-buffer: Add ring buffer startup selftest tracing: Bring Documentation/trace/ftrace.txt up to date tracing: Add "perf" trace_clock ... Conflicts: kernel/trace/ftrace.c kernel/trace/trace.c
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c500
1 files changed, 494 insertions, 6 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6989df2ba194..b59aea2c48c2 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -8,13 +8,16 @@
8#include <linux/trace_clock.h> 8#include <linux/trace_clock.h>
9#include <linux/trace_seq.h> 9#include <linux/trace_seq.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/irq_work.h>
11#include <linux/debugfs.h> 12#include <linux/debugfs.h>
12#include <linux/uaccess.h> 13#include <linux/uaccess.h>
13#include <linux/hardirq.h> 14#include <linux/hardirq.h>
15#include <linux/kthread.h> /* for self test */
14#include <linux/kmemcheck.h> 16#include <linux/kmemcheck.h>
15#include <linux/module.h> 17#include <linux/module.h>
16#include <linux/percpu.h> 18#include <linux/percpu.h>
17#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/delay.h>
18#include <linux/slab.h> 21#include <linux/slab.h>
19#include <linux/init.h> 22#include <linux/init.h>
20#include <linux/hash.h> 23#include <linux/hash.h>
@@ -444,6 +447,12 @@ int ring_buffer_print_page_header(struct trace_seq *s)
444 return ret; 447 return ret;
445} 448}
446 449
450struct rb_irq_work {
451 struct irq_work work;
452 wait_queue_head_t waiters;
453 bool waiters_pending;
454};
455
447/* 456/*
448 * head_page == tail_page && head == tail then buffer is empty. 457 * head_page == tail_page && head == tail then buffer is empty.
449 */ 458 */
@@ -478,6 +487,8 @@ struct ring_buffer_per_cpu {
478 struct list_head new_pages; /* new pages to add */ 487 struct list_head new_pages; /* new pages to add */
479 struct work_struct update_pages_work; 488 struct work_struct update_pages_work;
480 struct completion update_done; 489 struct completion update_done;
490
491 struct rb_irq_work irq_work;
481}; 492};
482 493
483struct ring_buffer { 494struct ring_buffer {
@@ -497,6 +508,8 @@ struct ring_buffer {
497 struct notifier_block cpu_notify; 508 struct notifier_block cpu_notify;
498#endif 509#endif
499 u64 (*clock)(void); 510 u64 (*clock)(void);
511
512 struct rb_irq_work irq_work;
500}; 513};
501 514
502struct ring_buffer_iter { 515struct ring_buffer_iter {
@@ -508,6 +521,118 @@ struct ring_buffer_iter {
508 u64 read_stamp; 521 u64 read_stamp;
509}; 522};
510 523
524/*
525 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
526 *
527 * Schedules a delayed work to wake up any task that is blocked on the
528 * ring buffer waiters queue.
529 */
530static void rb_wake_up_waiters(struct irq_work *work)
531{
532 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
533
534 wake_up_all(&rbwork->waiters);
535}
536
537/**
538 * ring_buffer_wait - wait for input to the ring buffer
539 * @buffer: buffer to wait on
540 * @cpu: the cpu buffer to wait on
541 *
542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543 * as data is added to any of the @buffer's cpu buffers. Otherwise
544 * it will wait for data to be added to a specific cpu buffer.
545 */
546void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
547{
548 struct ring_buffer_per_cpu *cpu_buffer;
549 DEFINE_WAIT(wait);
550 struct rb_irq_work *work;
551
552 /*
553 * Depending on what the caller is waiting for, either any
554 * data in any cpu buffer, or a specific buffer, put the
555 * caller on the appropriate wait queue.
556 */
557 if (cpu == RING_BUFFER_ALL_CPUS)
558 work = &buffer->irq_work;
559 else {
560 cpu_buffer = buffer->buffers[cpu];
561 work = &cpu_buffer->irq_work;
562 }
563
564
565 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
566
567 /*
568 * The events can happen in critical sections where
569 * checking a work queue can cause deadlocks.
570 * After adding a task to the queue, this flag is set
571 * only to notify events to try to wake up the queue
572 * using irq_work.
573 *
574 * We don't clear it even if the buffer is no longer
575 * empty. The flag only causes the next event to run
576 * irq_work to do the work queue wake up. The worse
577 * that can happen if we race with !trace_empty() is that
578 * an event will cause an irq_work to try to wake up
579 * an empty queue.
580 *
581 * There's no reason to protect this flag either, as
582 * the work queue and irq_work logic will do the necessary
583 * synchronization for the wake ups. The only thing
584 * that is necessary is that the wake up happens after
585 * a task has been queued. It's OK for spurious wake ups.
586 */
587 work->waiters_pending = true;
588
589 if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
590 (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
591 schedule();
592
593 finish_wait(&work->waiters, &wait);
594}
595
596/**
597 * ring_buffer_poll_wait - poll on buffer input
598 * @buffer: buffer to wait on
599 * @cpu: the cpu buffer to wait on
600 * @filp: the file descriptor
601 * @poll_table: The poll descriptor
602 *
603 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
604 * as data is added to any of the @buffer's cpu buffers. Otherwise
605 * it will wait for data to be added to a specific cpu buffer.
606 *
607 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
608 * zero otherwise.
609 */
610int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
611 struct file *filp, poll_table *poll_table)
612{
613 struct ring_buffer_per_cpu *cpu_buffer;
614 struct rb_irq_work *work;
615
616 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
617 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
618 return POLLIN | POLLRDNORM;
619
620 if (cpu == RING_BUFFER_ALL_CPUS)
621 work = &buffer->irq_work;
622 else {
623 cpu_buffer = buffer->buffers[cpu];
624 work = &cpu_buffer->irq_work;
625 }
626
627 work->waiters_pending = true;
628 poll_wait(filp, &work->waiters, poll_table);
629
630 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
631 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
632 return POLLIN | POLLRDNORM;
633 return 0;
634}
635
511/* buffer may be either ring_buffer or ring_buffer_per_cpu */ 636/* buffer may be either ring_buffer or ring_buffer_per_cpu */
512#define RB_WARN_ON(b, cond) \ 637#define RB_WARN_ON(b, cond) \
513 ({ \ 638 ({ \
@@ -1063,6 +1188,8 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1063 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1188 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1064 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1189 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1065 init_completion(&cpu_buffer->update_done); 1190 init_completion(&cpu_buffer->update_done);
1191 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1192 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1066 1193
1067 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1194 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1068 GFP_KERNEL, cpu_to_node(cpu)); 1195 GFP_KERNEL, cpu_to_node(cpu));
@@ -1158,6 +1285,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1158 buffer->clock = trace_clock_local; 1285 buffer->clock = trace_clock_local;
1159 buffer->reader_lock_key = key; 1286 buffer->reader_lock_key = key;
1160 1287
1288 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1289 init_waitqueue_head(&buffer->irq_work.waiters);
1290
1161 /* need at least two pages */ 1291 /* need at least two pages */
1162 if (nr_pages < 2) 1292 if (nr_pages < 2)
1163 nr_pages = 2; 1293 nr_pages = 2;
@@ -1553,11 +1683,22 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1553 if (!cpu_buffer->nr_pages_to_update) 1683 if (!cpu_buffer->nr_pages_to_update)
1554 continue; 1684 continue;
1555 1685
1556 if (cpu_online(cpu)) 1686 /* The update must run on the CPU that is being updated. */
1687 preempt_disable();
1688 if (cpu == smp_processor_id() || !cpu_online(cpu)) {
1689 rb_update_pages(cpu_buffer);
1690 cpu_buffer->nr_pages_to_update = 0;
1691 } else {
1692 /*
1693 * Can not disable preemption for schedule_work_on()
1694 * on PREEMPT_RT.
1695 */
1696 preempt_enable();
1557 schedule_work_on(cpu, 1697 schedule_work_on(cpu,
1558 &cpu_buffer->update_pages_work); 1698 &cpu_buffer->update_pages_work);
1559 else 1699 preempt_disable();
1560 rb_update_pages(cpu_buffer); 1700 }
1701 preempt_enable();
1561 } 1702 }
1562 1703
1563 /* wait for all the updates to complete */ 1704 /* wait for all the updates to complete */
@@ -1595,12 +1736,22 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1595 1736
1596 get_online_cpus(); 1737 get_online_cpus();
1597 1738
1598 if (cpu_online(cpu_id)) { 1739 preempt_disable();
1740 /* The update must run on the CPU that is being updated. */
1741 if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
1742 rb_update_pages(cpu_buffer);
1743 else {
1744 /*
1745 * Can not disable preemption for schedule_work_on()
1746 * on PREEMPT_RT.
1747 */
1748 preempt_enable();
1599 schedule_work_on(cpu_id, 1749 schedule_work_on(cpu_id,
1600 &cpu_buffer->update_pages_work); 1750 &cpu_buffer->update_pages_work);
1601 wait_for_completion(&cpu_buffer->update_done); 1751 wait_for_completion(&cpu_buffer->update_done);
1602 } else 1752 preempt_disable();
1603 rb_update_pages(cpu_buffer); 1753 }
1754 preempt_enable();
1604 1755
1605 cpu_buffer->nr_pages_to_update = 0; 1756 cpu_buffer->nr_pages_to_update = 0;
1606 put_online_cpus(); 1757 put_online_cpus();
@@ -2612,6 +2763,22 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2612 rb_end_commit(cpu_buffer); 2763 rb_end_commit(cpu_buffer);
2613} 2764}
2614 2765
2766static __always_inline void
2767rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2768{
2769 if (buffer->irq_work.waiters_pending) {
2770 buffer->irq_work.waiters_pending = false;
2771 /* irq_work_queue() supplies it's own memory barriers */
2772 irq_work_queue(&buffer->irq_work.work);
2773 }
2774
2775 if (cpu_buffer->irq_work.waiters_pending) {
2776 cpu_buffer->irq_work.waiters_pending = false;
2777 /* irq_work_queue() supplies it's own memory barriers */
2778 irq_work_queue(&cpu_buffer->irq_work.work);
2779 }
2780}
2781
2615/** 2782/**
2616 * ring_buffer_unlock_commit - commit a reserved 2783 * ring_buffer_unlock_commit - commit a reserved
2617 * @buffer: The buffer to commit to 2784 * @buffer: The buffer to commit to
@@ -2631,6 +2798,8 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2631 2798
2632 rb_commit(cpu_buffer, event); 2799 rb_commit(cpu_buffer, event);
2633 2800
2801 rb_wakeups(buffer, cpu_buffer);
2802
2634 trace_recursive_unlock(); 2803 trace_recursive_unlock();
2635 2804
2636 preempt_enable_notrace(); 2805 preempt_enable_notrace();
@@ -2803,6 +2972,8 @@ int ring_buffer_write(struct ring_buffer *buffer,
2803 2972
2804 rb_commit(cpu_buffer, event); 2973 rb_commit(cpu_buffer, event);
2805 2974
2975 rb_wakeups(buffer, cpu_buffer);
2976
2806 ret = 0; 2977 ret = 0;
2807 out: 2978 out:
2808 preempt_enable_notrace(); 2979 preempt_enable_notrace();
@@ -4467,3 +4638,320 @@ static int rb_cpu_notify(struct notifier_block *self,
4467 return NOTIFY_OK; 4638 return NOTIFY_OK;
4468} 4639}
4469#endif 4640#endif
4641
4642#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4643/*
4644 * This is a basic integrity check of the ring buffer.
4645 * Late in the boot cycle this test will run when configured in.
4646 * It will kick off a thread per CPU that will go into a loop
4647 * writing to the per cpu ring buffer various sizes of data.
4648 * Some of the data will be large items, some small.
4649 *
4650 * Another thread is created that goes into a spin, sending out
4651 * IPIs to the other CPUs to also write into the ring buffer.
4652 * this is to test the nesting ability of the buffer.
4653 *
4654 * Basic stats are recorded and reported. If something in the
4655 * ring buffer should happen that's not expected, a big warning
4656 * is displayed and all ring buffers are disabled.
4657 */
4658static struct task_struct *rb_threads[NR_CPUS] __initdata;
4659
4660struct rb_test_data {
4661 struct ring_buffer *buffer;
4662 unsigned long events;
4663 unsigned long bytes_written;
4664 unsigned long bytes_alloc;
4665 unsigned long bytes_dropped;
4666 unsigned long events_nested;
4667 unsigned long bytes_written_nested;
4668 unsigned long bytes_alloc_nested;
4669 unsigned long bytes_dropped_nested;
4670 int min_size_nested;
4671 int max_size_nested;
4672 int max_size;
4673 int min_size;
4674 int cpu;
4675 int cnt;
4676};
4677
4678static struct rb_test_data rb_data[NR_CPUS] __initdata;
4679
4680/* 1 meg per cpu */
4681#define RB_TEST_BUFFER_SIZE 1048576
4682
4683static char rb_string[] __initdata =
4684 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4685 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4686 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4687
4688static bool rb_test_started __initdata;
4689
4690struct rb_item {
4691 int size;
4692 char str[];
4693};
4694
4695static __init int rb_write_something(struct rb_test_data *data, bool nested)
4696{
4697 struct ring_buffer_event *event;
4698 struct rb_item *item;
4699 bool started;
4700 int event_len;
4701 int size;
4702 int len;
4703 int cnt;
4704
4705 /* Have nested writes different that what is written */
4706 cnt = data->cnt + (nested ? 27 : 0);
4707
4708 /* Multiply cnt by ~e, to make some unique increment */
4709 size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4710
4711 len = size + sizeof(struct rb_item);
4712
4713 started = rb_test_started;
4714 /* read rb_test_started before checking buffer enabled */
4715 smp_rmb();
4716
4717 event = ring_buffer_lock_reserve(data->buffer, len);
4718 if (!event) {
4719 /* Ignore dropped events before test starts. */
4720 if (started) {
4721 if (nested)
4722 data->bytes_dropped += len;
4723 else
4724 data->bytes_dropped_nested += len;
4725 }
4726 return len;
4727 }
4728
4729 event_len = ring_buffer_event_length(event);
4730
4731 if (RB_WARN_ON(data->buffer, event_len < len))
4732 goto out;
4733
4734 item = ring_buffer_event_data(event);
4735 item->size = size;
4736 memcpy(item->str, rb_string, size);
4737
4738 if (nested) {
4739 data->bytes_alloc_nested += event_len;
4740 data->bytes_written_nested += len;
4741 data->events_nested++;
4742 if (!data->min_size_nested || len < data->min_size_nested)
4743 data->min_size_nested = len;
4744 if (len > data->max_size_nested)
4745 data->max_size_nested = len;
4746 } else {
4747 data->bytes_alloc += event_len;
4748 data->bytes_written += len;
4749 data->events++;
4750 if (!data->min_size || len < data->min_size)
4751 data->max_size = len;
4752 if (len > data->max_size)
4753 data->max_size = len;
4754 }
4755
4756 out:
4757 ring_buffer_unlock_commit(data->buffer, event);
4758
4759 return 0;
4760}
4761
4762static __init int rb_test(void *arg)
4763{
4764 struct rb_test_data *data = arg;
4765
4766 while (!kthread_should_stop()) {
4767 rb_write_something(data, false);
4768 data->cnt++;
4769
4770 set_current_state(TASK_INTERRUPTIBLE);
4771 /* Now sleep between a min of 100-300us and a max of 1ms */
4772 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4773 }
4774
4775 return 0;
4776}
4777
4778static __init void rb_ipi(void *ignore)
4779{
4780 struct rb_test_data *data;
4781 int cpu = smp_processor_id();
4782
4783 data = &rb_data[cpu];
4784 rb_write_something(data, true);
4785}
4786
4787static __init int rb_hammer_test(void *arg)
4788{
4789 while (!kthread_should_stop()) {
4790
4791 /* Send an IPI to all cpus to write data! */
4792 smp_call_function(rb_ipi, NULL, 1);
4793 /* No sleep, but for non preempt, let others run */
4794 schedule();
4795 }
4796
4797 return 0;
4798}
4799
4800static __init int test_ringbuffer(void)
4801{
4802 struct task_struct *rb_hammer;
4803 struct ring_buffer *buffer;
4804 int cpu;
4805 int ret = 0;
4806
4807 pr_info("Running ring buffer tests...\n");
4808
4809 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4810 if (WARN_ON(!buffer))
4811 return 0;
4812
4813 /* Disable buffer so that threads can't write to it yet */
4814 ring_buffer_record_off(buffer);
4815
4816 for_each_online_cpu(cpu) {
4817 rb_data[cpu].buffer = buffer;
4818 rb_data[cpu].cpu = cpu;
4819 rb_data[cpu].cnt = cpu;
4820 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4821 "rbtester/%d", cpu);
4822 if (WARN_ON(!rb_threads[cpu])) {
4823 pr_cont("FAILED\n");
4824 ret = -1;
4825 goto out_free;
4826 }
4827
4828 kthread_bind(rb_threads[cpu], cpu);
4829 wake_up_process(rb_threads[cpu]);
4830 }
4831
4832 /* Now create the rb hammer! */
4833 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4834 if (WARN_ON(!rb_hammer)) {
4835 pr_cont("FAILED\n");
4836 ret = -1;
4837 goto out_free;
4838 }
4839
4840 ring_buffer_record_on(buffer);
4841 /*
4842 * Show buffer is enabled before setting rb_test_started.
4843 * Yes there's a small race window where events could be
4844 * dropped and the thread wont catch it. But when a ring
4845 * buffer gets enabled, there will always be some kind of
4846 * delay before other CPUs see it. Thus, we don't care about
4847 * those dropped events. We care about events dropped after
4848 * the threads see that the buffer is active.
4849 */
4850 smp_wmb();
4851 rb_test_started = true;
4852
4853 set_current_state(TASK_INTERRUPTIBLE);
4854 /* Just run for 10 seconds */;
4855 schedule_timeout(10 * HZ);
4856
4857 kthread_stop(rb_hammer);
4858
4859 out_free:
4860 for_each_online_cpu(cpu) {
4861 if (!rb_threads[cpu])
4862 break;
4863 kthread_stop(rb_threads[cpu]);
4864 }
4865 if (ret) {
4866 ring_buffer_free(buffer);
4867 return ret;
4868 }
4869
4870 /* Report! */
4871 pr_info("finished\n");
4872 for_each_online_cpu(cpu) {
4873 struct ring_buffer_event *event;
4874 struct rb_test_data *data = &rb_data[cpu];
4875 struct rb_item *item;
4876 unsigned long total_events;
4877 unsigned long total_dropped;
4878 unsigned long total_written;
4879 unsigned long total_alloc;
4880 unsigned long total_read = 0;
4881 unsigned long total_size = 0;
4882 unsigned long total_len = 0;
4883 unsigned long total_lost = 0;
4884 unsigned long lost;
4885 int big_event_size;
4886 int small_event_size;
4887
4888 ret = -1;
4889
4890 total_events = data->events + data->events_nested;
4891 total_written = data->bytes_written + data->bytes_written_nested;
4892 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4893 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4894
4895 big_event_size = data->max_size + data->max_size_nested;
4896 small_event_size = data->min_size + data->min_size_nested;
4897
4898 pr_info("CPU %d:\n", cpu);
4899 pr_info(" events: %ld\n", total_events);
4900 pr_info(" dropped bytes: %ld\n", total_dropped);
4901 pr_info(" alloced bytes: %ld\n", total_alloc);
4902 pr_info(" written bytes: %ld\n", total_written);
4903 pr_info(" biggest event: %d\n", big_event_size);
4904 pr_info(" smallest event: %d\n", small_event_size);
4905
4906 if (RB_WARN_ON(buffer, total_dropped))
4907 break;
4908
4909 ret = 0;
4910
4911 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4912 total_lost += lost;
4913 item = ring_buffer_event_data(event);
4914 total_len += ring_buffer_event_length(event);
4915 total_size += item->size + sizeof(struct rb_item);
4916 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4917 pr_info("FAILED!\n");
4918 pr_info("buffer had: %.*s\n", item->size, item->str);
4919 pr_info("expected: %.*s\n", item->size, rb_string);
4920 RB_WARN_ON(buffer, 1);
4921 ret = -1;
4922 break;
4923 }
4924 total_read++;
4925 }
4926 if (ret)
4927 break;
4928
4929 ret = -1;
4930
4931 pr_info(" read events: %ld\n", total_read);
4932 pr_info(" lost events: %ld\n", total_lost);
4933 pr_info(" total events: %ld\n", total_lost + total_read);
4934 pr_info(" recorded len bytes: %ld\n", total_len);
4935 pr_info(" recorded size bytes: %ld\n", total_size);
4936 if (total_lost)
4937 pr_info(" With dropped events, record len and size may not match\n"
4938 " alloced and written from above\n");
4939 if (!total_lost) {
4940 if (RB_WARN_ON(buffer, total_len != total_alloc ||
4941 total_size != total_written))
4942 break;
4943 }
4944 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4945 break;
4946
4947 ret = 0;
4948 }
4949 if (!ret)
4950 pr_info("Ring buffer PASSED!\n");
4951
4952 ring_buffer_free(buffer);
4953 return 0;
4954}
4955
4956late_initcall(test_ringbuffer);
4957#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */