diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 503 |
1 files changed, 497 insertions, 6 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 6989df2ba194..e444ff88f0a4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -8,13 +8,16 @@ | |||
8 | #include <linux/trace_clock.h> | 8 | #include <linux/trace_clock.h> |
9 | #include <linux/trace_seq.h> | 9 | #include <linux/trace_seq.h> |
10 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
11 | #include <linux/irq_work.h> | ||
11 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
12 | #include <linux/uaccess.h> | 13 | #include <linux/uaccess.h> |
13 | #include <linux/hardirq.h> | 14 | #include <linux/hardirq.h> |
15 | #include <linux/kthread.h> /* for self test */ | ||
14 | #include <linux/kmemcheck.h> | 16 | #include <linux/kmemcheck.h> |
15 | #include <linux/module.h> | 17 | #include <linux/module.h> |
16 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
17 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/delay.h> | ||
18 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
19 | #include <linux/init.h> | 22 | #include <linux/init.h> |
20 | #include <linux/hash.h> | 23 | #include <linux/hash.h> |
@@ -444,6 +447,12 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
444 | return ret; | 447 | return ret; |
445 | } | 448 | } |
446 | 449 | ||
450 | struct rb_irq_work { | ||
451 | struct irq_work work; | ||
452 | wait_queue_head_t waiters; | ||
453 | bool waiters_pending; | ||
454 | }; | ||
455 | |||
447 | /* | 456 | /* |
448 | * head_page == tail_page && head == tail then buffer is empty. | 457 | * head_page == tail_page && head == tail then buffer is empty. |
449 | */ | 458 | */ |
@@ -478,6 +487,8 @@ struct ring_buffer_per_cpu { | |||
478 | struct list_head new_pages; /* new pages to add */ | 487 | struct list_head new_pages; /* new pages to add */ |
479 | struct work_struct update_pages_work; | 488 | struct work_struct update_pages_work; |
480 | struct completion update_done; | 489 | struct completion update_done; |
490 | |||
491 | struct rb_irq_work irq_work; | ||
481 | }; | 492 | }; |
482 | 493 | ||
483 | struct ring_buffer { | 494 | struct ring_buffer { |
@@ -497,6 +508,8 @@ struct ring_buffer { | |||
497 | struct notifier_block cpu_notify; | 508 | struct notifier_block cpu_notify; |
498 | #endif | 509 | #endif |
499 | u64 (*clock)(void); | 510 | u64 (*clock)(void); |
511 | |||
512 | struct rb_irq_work irq_work; | ||
500 | }; | 513 | }; |
501 | 514 | ||
502 | struct ring_buffer_iter { | 515 | struct ring_buffer_iter { |
@@ -508,6 +521,121 @@ struct ring_buffer_iter { | |||
508 | u64 read_stamp; | 521 | u64 read_stamp; |
509 | }; | 522 | }; |
510 | 523 | ||
524 | /* | ||
525 | * rb_wake_up_waiters - wake up tasks waiting for ring buffer input | ||
526 | * | ||
527 | * Schedules a delayed work to wake up any task that is blocked on the | ||
528 | * ring buffer waiters queue. | ||
529 | */ | ||
530 | static void rb_wake_up_waiters(struct irq_work *work) | ||
531 | { | ||
532 | struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); | ||
533 | |||
534 | wake_up_all(&rbwork->waiters); | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * ring_buffer_wait - wait for input to the ring buffer | ||
539 | * @buffer: buffer to wait on | ||
540 | * @cpu: the cpu buffer to wait on | ||
541 | * | ||
542 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon | ||
543 | * as data is added to any of the @buffer's cpu buffers. Otherwise | ||
544 | * it will wait for data to be added to a specific cpu buffer. | ||
545 | */ | ||
546 | void ring_buffer_wait(struct ring_buffer *buffer, int cpu) | ||
547 | { | ||
548 | struct ring_buffer_per_cpu *cpu_buffer; | ||
549 | DEFINE_WAIT(wait); | ||
550 | struct rb_irq_work *work; | ||
551 | |||
552 | /* | ||
553 | * Depending on what the caller is waiting for, either any | ||
554 | * data in any cpu buffer, or a specific buffer, put the | ||
555 | * caller on the appropriate wait queue. | ||
556 | */ | ||
557 | if (cpu == RING_BUFFER_ALL_CPUS) | ||
558 | work = &buffer->irq_work; | ||
559 | else { | ||
560 | cpu_buffer = buffer->buffers[cpu]; | ||
561 | work = &cpu_buffer->irq_work; | ||
562 | } | ||
563 | |||
564 | |||
565 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | ||
566 | |||
567 | /* | ||
568 | * The events can happen in critical sections where | ||
569 | * checking a work queue can cause deadlocks. | ||
570 | * After adding a task to the queue, this flag is set | ||
571 | * only to notify events to try to wake up the queue | ||
572 | * using irq_work. | ||
573 | * | ||
574 | * We don't clear it even if the buffer is no longer | ||
575 | * empty. The flag only causes the next event to run | ||
576 | * irq_work to do the work queue wake up. The worse | ||
577 | * that can happen if we race with !trace_empty() is that | ||
578 | * an event will cause an irq_work to try to wake up | ||
579 | * an empty queue. | ||
580 | * | ||
581 | * There's no reason to protect this flag either, as | ||
582 | * the work queue and irq_work logic will do the necessary | ||
583 | * synchronization for the wake ups. The only thing | ||
584 | * that is necessary is that the wake up happens after | ||
585 | * a task has been queued. It's OK for spurious wake ups. | ||
586 | */ | ||
587 | work->waiters_pending = true; | ||
588 | |||
589 | if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || | ||
590 | (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) | ||
591 | schedule(); | ||
592 | |||
593 | finish_wait(&work->waiters, &wait); | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * ring_buffer_poll_wait - poll on buffer input | ||
598 | * @buffer: buffer to wait on | ||
599 | * @cpu: the cpu buffer to wait on | ||
600 | * @filp: the file descriptor | ||
601 | * @poll_table: The poll descriptor | ||
602 | * | ||
603 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon | ||
604 | * as data is added to any of the @buffer's cpu buffers. Otherwise | ||
605 | * it will wait for data to be added to a specific cpu buffer. | ||
606 | * | ||
607 | * Returns POLLIN | POLLRDNORM if data exists in the buffers, | ||
608 | * zero otherwise. | ||
609 | */ | ||
610 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, | ||
611 | struct file *filp, poll_table *poll_table) | ||
612 | { | ||
613 | struct ring_buffer_per_cpu *cpu_buffer; | ||
614 | struct rb_irq_work *work; | ||
615 | |||
616 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || | ||
617 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) | ||
618 | return POLLIN | POLLRDNORM; | ||
619 | |||
620 | if (cpu == RING_BUFFER_ALL_CPUS) | ||
621 | work = &buffer->irq_work; | ||
622 | else { | ||
623 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | ||
624 | return -EINVAL; | ||
625 | |||
626 | cpu_buffer = buffer->buffers[cpu]; | ||
627 | work = &cpu_buffer->irq_work; | ||
628 | } | ||
629 | |||
630 | work->waiters_pending = true; | ||
631 | poll_wait(filp, &work->waiters, poll_table); | ||
632 | |||
633 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || | ||
634 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) | ||
635 | return POLLIN | POLLRDNORM; | ||
636 | return 0; | ||
637 | } | ||
638 | |||
511 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ | 639 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ |
512 | #define RB_WARN_ON(b, cond) \ | 640 | #define RB_WARN_ON(b, cond) \ |
513 | ({ \ | 641 | ({ \ |
@@ -1063,6 +1191,8 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) | |||
1063 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 1191 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1064 | INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); | 1192 | INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); |
1065 | init_completion(&cpu_buffer->update_done); | 1193 | init_completion(&cpu_buffer->update_done); |
1194 | init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); | ||
1195 | init_waitqueue_head(&cpu_buffer->irq_work.waiters); | ||
1066 | 1196 | ||
1067 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1197 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1068 | GFP_KERNEL, cpu_to_node(cpu)); | 1198 | GFP_KERNEL, cpu_to_node(cpu)); |
@@ -1158,6 +1288,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
1158 | buffer->clock = trace_clock_local; | 1288 | buffer->clock = trace_clock_local; |
1159 | buffer->reader_lock_key = key; | 1289 | buffer->reader_lock_key = key; |
1160 | 1290 | ||
1291 | init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); | ||
1292 | init_waitqueue_head(&buffer->irq_work.waiters); | ||
1293 | |||
1161 | /* need at least two pages */ | 1294 | /* need at least two pages */ |
1162 | if (nr_pages < 2) | 1295 | if (nr_pages < 2) |
1163 | nr_pages = 2; | 1296 | nr_pages = 2; |
@@ -1553,11 +1686,22 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1553 | if (!cpu_buffer->nr_pages_to_update) | 1686 | if (!cpu_buffer->nr_pages_to_update) |
1554 | continue; | 1687 | continue; |
1555 | 1688 | ||
1556 | if (cpu_online(cpu)) | 1689 | /* The update must run on the CPU that is being updated. */ |
1690 | preempt_disable(); | ||
1691 | if (cpu == smp_processor_id() || !cpu_online(cpu)) { | ||
1692 | rb_update_pages(cpu_buffer); | ||
1693 | cpu_buffer->nr_pages_to_update = 0; | ||
1694 | } else { | ||
1695 | /* | ||
1696 | * Can not disable preemption for schedule_work_on() | ||
1697 | * on PREEMPT_RT. | ||
1698 | */ | ||
1699 | preempt_enable(); | ||
1557 | schedule_work_on(cpu, | 1700 | schedule_work_on(cpu, |
1558 | &cpu_buffer->update_pages_work); | 1701 | &cpu_buffer->update_pages_work); |
1559 | else | 1702 | preempt_disable(); |
1560 | rb_update_pages(cpu_buffer); | 1703 | } |
1704 | preempt_enable(); | ||
1561 | } | 1705 | } |
1562 | 1706 | ||
1563 | /* wait for all the updates to complete */ | 1707 | /* wait for all the updates to complete */ |
@@ -1595,12 +1739,22 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1595 | 1739 | ||
1596 | get_online_cpus(); | 1740 | get_online_cpus(); |
1597 | 1741 | ||
1598 | if (cpu_online(cpu_id)) { | 1742 | preempt_disable(); |
1743 | /* The update must run on the CPU that is being updated. */ | ||
1744 | if (cpu_id == smp_processor_id() || !cpu_online(cpu_id)) | ||
1745 | rb_update_pages(cpu_buffer); | ||
1746 | else { | ||
1747 | /* | ||
1748 | * Can not disable preemption for schedule_work_on() | ||
1749 | * on PREEMPT_RT. | ||
1750 | */ | ||
1751 | preempt_enable(); | ||
1599 | schedule_work_on(cpu_id, | 1752 | schedule_work_on(cpu_id, |
1600 | &cpu_buffer->update_pages_work); | 1753 | &cpu_buffer->update_pages_work); |
1601 | wait_for_completion(&cpu_buffer->update_done); | 1754 | wait_for_completion(&cpu_buffer->update_done); |
1602 | } else | 1755 | preempt_disable(); |
1603 | rb_update_pages(cpu_buffer); | 1756 | } |
1757 | preempt_enable(); | ||
1604 | 1758 | ||
1605 | cpu_buffer->nr_pages_to_update = 0; | 1759 | cpu_buffer->nr_pages_to_update = 0; |
1606 | put_online_cpus(); | 1760 | put_online_cpus(); |
@@ -2612,6 +2766,22 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | |||
2612 | rb_end_commit(cpu_buffer); | 2766 | rb_end_commit(cpu_buffer); |
2613 | } | 2767 | } |
2614 | 2768 | ||
2769 | static __always_inline void | ||
2770 | rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) | ||
2771 | { | ||
2772 | if (buffer->irq_work.waiters_pending) { | ||
2773 | buffer->irq_work.waiters_pending = false; | ||
2774 | /* irq_work_queue() supplies it's own memory barriers */ | ||
2775 | irq_work_queue(&buffer->irq_work.work); | ||
2776 | } | ||
2777 | |||
2778 | if (cpu_buffer->irq_work.waiters_pending) { | ||
2779 | cpu_buffer->irq_work.waiters_pending = false; | ||
2780 | /* irq_work_queue() supplies it's own memory barriers */ | ||
2781 | irq_work_queue(&cpu_buffer->irq_work.work); | ||
2782 | } | ||
2783 | } | ||
2784 | |||
2615 | /** | 2785 | /** |
2616 | * ring_buffer_unlock_commit - commit a reserved | 2786 | * ring_buffer_unlock_commit - commit a reserved |
2617 | * @buffer: The buffer to commit to | 2787 | * @buffer: The buffer to commit to |
@@ -2631,6 +2801,8 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
2631 | 2801 | ||
2632 | rb_commit(cpu_buffer, event); | 2802 | rb_commit(cpu_buffer, event); |
2633 | 2803 | ||
2804 | rb_wakeups(buffer, cpu_buffer); | ||
2805 | |||
2634 | trace_recursive_unlock(); | 2806 | trace_recursive_unlock(); |
2635 | 2807 | ||
2636 | preempt_enable_notrace(); | 2808 | preempt_enable_notrace(); |
@@ -2803,6 +2975,8 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2803 | 2975 | ||
2804 | rb_commit(cpu_buffer, event); | 2976 | rb_commit(cpu_buffer, event); |
2805 | 2977 | ||
2978 | rb_wakeups(buffer, cpu_buffer); | ||
2979 | |||
2806 | ret = 0; | 2980 | ret = 0; |
2807 | out: | 2981 | out: |
2808 | preempt_enable_notrace(); | 2982 | preempt_enable_notrace(); |
@@ -4467,3 +4641,320 @@ static int rb_cpu_notify(struct notifier_block *self, | |||
4467 | return NOTIFY_OK; | 4641 | return NOTIFY_OK; |
4468 | } | 4642 | } |
4469 | #endif | 4643 | #endif |
4644 | |||
4645 | #ifdef CONFIG_RING_BUFFER_STARTUP_TEST | ||
4646 | /* | ||
4647 | * This is a basic integrity check of the ring buffer. | ||
4648 | * Late in the boot cycle this test will run when configured in. | ||
4649 | * It will kick off a thread per CPU that will go into a loop | ||
4650 | * writing to the per cpu ring buffer various sizes of data. | ||
4651 | * Some of the data will be large items, some small. | ||
4652 | * | ||
4653 | * Another thread is created that goes into a spin, sending out | ||
4654 | * IPIs to the other CPUs to also write into the ring buffer. | ||
4655 | * this is to test the nesting ability of the buffer. | ||
4656 | * | ||
4657 | * Basic stats are recorded and reported. If something in the | ||
4658 | * ring buffer should happen that's not expected, a big warning | ||
4659 | * is displayed and all ring buffers are disabled. | ||
4660 | */ | ||
4661 | static struct task_struct *rb_threads[NR_CPUS] __initdata; | ||
4662 | |||
4663 | struct rb_test_data { | ||
4664 | struct ring_buffer *buffer; | ||
4665 | unsigned long events; | ||
4666 | unsigned long bytes_written; | ||
4667 | unsigned long bytes_alloc; | ||
4668 | unsigned long bytes_dropped; | ||
4669 | unsigned long events_nested; | ||
4670 | unsigned long bytes_written_nested; | ||
4671 | unsigned long bytes_alloc_nested; | ||
4672 | unsigned long bytes_dropped_nested; | ||
4673 | int min_size_nested; | ||
4674 | int max_size_nested; | ||
4675 | int max_size; | ||
4676 | int min_size; | ||
4677 | int cpu; | ||
4678 | int cnt; | ||
4679 | }; | ||
4680 | |||
4681 | static struct rb_test_data rb_data[NR_CPUS] __initdata; | ||
4682 | |||
4683 | /* 1 meg per cpu */ | ||
4684 | #define RB_TEST_BUFFER_SIZE 1048576 | ||
4685 | |||
4686 | static char rb_string[] __initdata = | ||
4687 | "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" | ||
4688 | "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" | ||
4689 | "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; | ||
4690 | |||
4691 | static bool rb_test_started __initdata; | ||
4692 | |||
4693 | struct rb_item { | ||
4694 | int size; | ||
4695 | char str[]; | ||
4696 | }; | ||
4697 | |||
4698 | static __init int rb_write_something(struct rb_test_data *data, bool nested) | ||
4699 | { | ||
4700 | struct ring_buffer_event *event; | ||
4701 | struct rb_item *item; | ||
4702 | bool started; | ||
4703 | int event_len; | ||
4704 | int size; | ||
4705 | int len; | ||
4706 | int cnt; | ||
4707 | |||
4708 | /* Have nested writes different that what is written */ | ||
4709 | cnt = data->cnt + (nested ? 27 : 0); | ||
4710 | |||
4711 | /* Multiply cnt by ~e, to make some unique increment */ | ||
4712 | size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1); | ||
4713 | |||
4714 | len = size + sizeof(struct rb_item); | ||
4715 | |||
4716 | started = rb_test_started; | ||
4717 | /* read rb_test_started before checking buffer enabled */ | ||
4718 | smp_rmb(); | ||
4719 | |||
4720 | event = ring_buffer_lock_reserve(data->buffer, len); | ||
4721 | if (!event) { | ||
4722 | /* Ignore dropped events before test starts. */ | ||
4723 | if (started) { | ||
4724 | if (nested) | ||
4725 | data->bytes_dropped += len; | ||
4726 | else | ||
4727 | data->bytes_dropped_nested += len; | ||
4728 | } | ||
4729 | return len; | ||
4730 | } | ||
4731 | |||
4732 | event_len = ring_buffer_event_length(event); | ||
4733 | |||
4734 | if (RB_WARN_ON(data->buffer, event_len < len)) | ||
4735 | goto out; | ||
4736 | |||
4737 | item = ring_buffer_event_data(event); | ||
4738 | item->size = size; | ||
4739 | memcpy(item->str, rb_string, size); | ||
4740 | |||
4741 | if (nested) { | ||
4742 | data->bytes_alloc_nested += event_len; | ||
4743 | data->bytes_written_nested += len; | ||
4744 | data->events_nested++; | ||
4745 | if (!data->min_size_nested || len < data->min_size_nested) | ||
4746 | data->min_size_nested = len; | ||
4747 | if (len > data->max_size_nested) | ||
4748 | data->max_size_nested = len; | ||
4749 | } else { | ||
4750 | data->bytes_alloc += event_len; | ||
4751 | data->bytes_written += len; | ||
4752 | data->events++; | ||
4753 | if (!data->min_size || len < data->min_size) | ||
4754 | data->max_size = len; | ||
4755 | if (len > data->max_size) | ||
4756 | data->max_size = len; | ||
4757 | } | ||
4758 | |||
4759 | out: | ||
4760 | ring_buffer_unlock_commit(data->buffer, event); | ||
4761 | |||
4762 | return 0; | ||
4763 | } | ||
4764 | |||
4765 | static __init int rb_test(void *arg) | ||
4766 | { | ||
4767 | struct rb_test_data *data = arg; | ||
4768 | |||
4769 | while (!kthread_should_stop()) { | ||
4770 | rb_write_something(data, false); | ||
4771 | data->cnt++; | ||
4772 | |||
4773 | set_current_state(TASK_INTERRUPTIBLE); | ||
4774 | /* Now sleep between a min of 100-300us and a max of 1ms */ | ||
4775 | usleep_range(((data->cnt % 3) + 1) * 100, 1000); | ||
4776 | } | ||
4777 | |||
4778 | return 0; | ||
4779 | } | ||
4780 | |||
4781 | static __init void rb_ipi(void *ignore) | ||
4782 | { | ||
4783 | struct rb_test_data *data; | ||
4784 | int cpu = smp_processor_id(); | ||
4785 | |||
4786 | data = &rb_data[cpu]; | ||
4787 | rb_write_something(data, true); | ||
4788 | } | ||
4789 | |||
4790 | static __init int rb_hammer_test(void *arg) | ||
4791 | { | ||
4792 | while (!kthread_should_stop()) { | ||
4793 | |||
4794 | /* Send an IPI to all cpus to write data! */ | ||
4795 | smp_call_function(rb_ipi, NULL, 1); | ||
4796 | /* No sleep, but for non preempt, let others run */ | ||
4797 | schedule(); | ||
4798 | } | ||
4799 | |||
4800 | return 0; | ||
4801 | } | ||
4802 | |||
4803 | static __init int test_ringbuffer(void) | ||
4804 | { | ||
4805 | struct task_struct *rb_hammer; | ||
4806 | struct ring_buffer *buffer; | ||
4807 | int cpu; | ||
4808 | int ret = 0; | ||
4809 | |||
4810 | pr_info("Running ring buffer tests...\n"); | ||
4811 | |||
4812 | buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); | ||
4813 | if (WARN_ON(!buffer)) | ||
4814 | return 0; | ||
4815 | |||
4816 | /* Disable buffer so that threads can't write to it yet */ | ||
4817 | ring_buffer_record_off(buffer); | ||
4818 | |||
4819 | for_each_online_cpu(cpu) { | ||
4820 | rb_data[cpu].buffer = buffer; | ||
4821 | rb_data[cpu].cpu = cpu; | ||
4822 | rb_data[cpu].cnt = cpu; | ||
4823 | rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], | ||
4824 | "rbtester/%d", cpu); | ||
4825 | if (WARN_ON(!rb_threads[cpu])) { | ||
4826 | pr_cont("FAILED\n"); | ||
4827 | ret = -1; | ||
4828 | goto out_free; | ||
4829 | } | ||
4830 | |||
4831 | kthread_bind(rb_threads[cpu], cpu); | ||
4832 | wake_up_process(rb_threads[cpu]); | ||
4833 | } | ||
4834 | |||
4835 | /* Now create the rb hammer! */ | ||
4836 | rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); | ||
4837 | if (WARN_ON(!rb_hammer)) { | ||
4838 | pr_cont("FAILED\n"); | ||
4839 | ret = -1; | ||
4840 | goto out_free; | ||
4841 | } | ||
4842 | |||
4843 | ring_buffer_record_on(buffer); | ||
4844 | /* | ||
4845 | * Show buffer is enabled before setting rb_test_started. | ||
4846 | * Yes there's a small race window where events could be | ||
4847 | * dropped and the thread wont catch it. But when a ring | ||
4848 | * buffer gets enabled, there will always be some kind of | ||
4849 | * delay before other CPUs see it. Thus, we don't care about | ||
4850 | * those dropped events. We care about events dropped after | ||
4851 | * the threads see that the buffer is active. | ||
4852 | */ | ||
4853 | smp_wmb(); | ||
4854 | rb_test_started = true; | ||
4855 | |||
4856 | set_current_state(TASK_INTERRUPTIBLE); | ||
4857 | /* Just run for 10 seconds */; | ||
4858 | schedule_timeout(10 * HZ); | ||
4859 | |||
4860 | kthread_stop(rb_hammer); | ||
4861 | |||
4862 | out_free: | ||
4863 | for_each_online_cpu(cpu) { | ||
4864 | if (!rb_threads[cpu]) | ||
4865 | break; | ||
4866 | kthread_stop(rb_threads[cpu]); | ||
4867 | } | ||
4868 | if (ret) { | ||
4869 | ring_buffer_free(buffer); | ||
4870 | return ret; | ||
4871 | } | ||
4872 | |||
4873 | /* Report! */ | ||
4874 | pr_info("finished\n"); | ||
4875 | for_each_online_cpu(cpu) { | ||
4876 | struct ring_buffer_event *event; | ||
4877 | struct rb_test_data *data = &rb_data[cpu]; | ||
4878 | struct rb_item *item; | ||
4879 | unsigned long total_events; | ||
4880 | unsigned long total_dropped; | ||
4881 | unsigned long total_written; | ||
4882 | unsigned long total_alloc; | ||
4883 | unsigned long total_read = 0; | ||
4884 | unsigned long total_size = 0; | ||
4885 | unsigned long total_len = 0; | ||
4886 | unsigned long total_lost = 0; | ||
4887 | unsigned long lost; | ||
4888 | int big_event_size; | ||
4889 | int small_event_size; | ||
4890 | |||
4891 | ret = -1; | ||
4892 | |||
4893 | total_events = data->events + data->events_nested; | ||
4894 | total_written = data->bytes_written + data->bytes_written_nested; | ||
4895 | total_alloc = data->bytes_alloc + data->bytes_alloc_nested; | ||
4896 | total_dropped = data->bytes_dropped + data->bytes_dropped_nested; | ||
4897 | |||
4898 | big_event_size = data->max_size + data->max_size_nested; | ||
4899 | small_event_size = data->min_size + data->min_size_nested; | ||
4900 | |||
4901 | pr_info("CPU %d:\n", cpu); | ||
4902 | pr_info(" events: %ld\n", total_events); | ||
4903 | pr_info(" dropped bytes: %ld\n", total_dropped); | ||
4904 | pr_info(" alloced bytes: %ld\n", total_alloc); | ||
4905 | pr_info(" written bytes: %ld\n", total_written); | ||
4906 | pr_info(" biggest event: %d\n", big_event_size); | ||
4907 | pr_info(" smallest event: %d\n", small_event_size); | ||
4908 | |||
4909 | if (RB_WARN_ON(buffer, total_dropped)) | ||
4910 | break; | ||
4911 | |||
4912 | ret = 0; | ||
4913 | |||
4914 | while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { | ||
4915 | total_lost += lost; | ||
4916 | item = ring_buffer_event_data(event); | ||
4917 | total_len += ring_buffer_event_length(event); | ||
4918 | total_size += item->size + sizeof(struct rb_item); | ||
4919 | if (memcmp(&item->str[0], rb_string, item->size) != 0) { | ||
4920 | pr_info("FAILED!\n"); | ||
4921 | pr_info("buffer had: %.*s\n", item->size, item->str); | ||
4922 | pr_info("expected: %.*s\n", item->size, rb_string); | ||
4923 | RB_WARN_ON(buffer, 1); | ||
4924 | ret = -1; | ||
4925 | break; | ||
4926 | } | ||
4927 | total_read++; | ||
4928 | } | ||
4929 | if (ret) | ||
4930 | break; | ||
4931 | |||
4932 | ret = -1; | ||
4933 | |||
4934 | pr_info(" read events: %ld\n", total_read); | ||
4935 | pr_info(" lost events: %ld\n", total_lost); | ||
4936 | pr_info(" total events: %ld\n", total_lost + total_read); | ||
4937 | pr_info(" recorded len bytes: %ld\n", total_len); | ||
4938 | pr_info(" recorded size bytes: %ld\n", total_size); | ||
4939 | if (total_lost) | ||
4940 | pr_info(" With dropped events, record len and size may not match\n" | ||
4941 | " alloced and written from above\n"); | ||
4942 | if (!total_lost) { | ||
4943 | if (RB_WARN_ON(buffer, total_len != total_alloc || | ||
4944 | total_size != total_written)) | ||
4945 | break; | ||
4946 | } | ||
4947 | if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) | ||
4948 | break; | ||
4949 | |||
4950 | ret = 0; | ||
4951 | } | ||
4952 | if (!ret) | ||
4953 | pr_info("Ring buffer PASSED!\n"); | ||
4954 | |||
4955 | ring_buffer_free(buffer); | ||
4956 | return 0; | ||
4957 | } | ||
4958 | |||
4959 | late_initcall(test_ringbuffer); | ||
4960 | #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ | ||