diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 42 |
1 files changed, 35 insertions, 7 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 7a4104cb95cb..5040d44fe5a3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/trace_seq.h> | 9 | #include <linux/trace_seq.h> |
10 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
11 | #include <linux/irq_work.h> | 11 | #include <linux/irq_work.h> |
12 | #include <linux/debugfs.h> | ||
13 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
14 | #include <linux/hardirq.h> | 13 | #include <linux/hardirq.h> |
15 | #include <linux/kthread.h> /* for self test */ | 14 | #include <linux/kthread.h> /* for self test */ |
@@ -23,7 +22,6 @@ | |||
23 | #include <linux/hash.h> | 22 | #include <linux/hash.h> |
24 | #include <linux/list.h> | 23 | #include <linux/list.h> |
25 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
26 | #include <linux/fs.h> | ||
27 | 25 | ||
28 | #include <asm/local.h> | 26 | #include <asm/local.h> |
29 | 27 | ||
@@ -447,7 +445,10 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
447 | struct rb_irq_work { | 445 | struct rb_irq_work { |
448 | struct irq_work work; | 446 | struct irq_work work; |
449 | wait_queue_head_t waiters; | 447 | wait_queue_head_t waiters; |
448 | wait_queue_head_t full_waiters; | ||
450 | bool waiters_pending; | 449 | bool waiters_pending; |
450 | bool full_waiters_pending; | ||
451 | bool wakeup_full; | ||
451 | }; | 452 | }; |
452 | 453 | ||
453 | /* | 454 | /* |
@@ -529,6 +530,10 @@ static void rb_wake_up_waiters(struct irq_work *work) | |||
529 | struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); | 530 | struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); |
530 | 531 | ||
531 | wake_up_all(&rbwork->waiters); | 532 | wake_up_all(&rbwork->waiters); |
533 | if (rbwork->wakeup_full) { | ||
534 | rbwork->wakeup_full = false; | ||
535 | wake_up_all(&rbwork->full_waiters); | ||
536 | } | ||
532 | } | 537 | } |
533 | 538 | ||
534 | /** | 539 | /** |
@@ -553,9 +558,11 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) | |||
553 | * data in any cpu buffer, or a specific buffer, put the | 558 | * data in any cpu buffer, or a specific buffer, put the |
554 | * caller on the appropriate wait queue. | 559 | * caller on the appropriate wait queue. |
555 | */ | 560 | */ |
556 | if (cpu == RING_BUFFER_ALL_CPUS) | 561 | if (cpu == RING_BUFFER_ALL_CPUS) { |
557 | work = &buffer->irq_work; | 562 | work = &buffer->irq_work; |
558 | else { | 563 | /* Full only makes sense on per cpu reads */ |
564 | full = false; | ||
565 | } else { | ||
559 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 566 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
560 | return -ENODEV; | 567 | return -ENODEV; |
561 | cpu_buffer = buffer->buffers[cpu]; | 568 | cpu_buffer = buffer->buffers[cpu]; |
@@ -564,7 +571,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) | |||
564 | 571 | ||
565 | 572 | ||
566 | while (true) { | 573 | while (true) { |
567 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | 574 | if (full) |
575 | prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); | ||
576 | else | ||
577 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | ||
568 | 578 | ||
569 | /* | 579 | /* |
570 | * The events can happen in critical sections where | 580 | * The events can happen in critical sections where |
@@ -586,7 +596,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) | |||
586 | * that is necessary is that the wake up happens after | 596 | * that is necessary is that the wake up happens after |
587 | * a task has been queued. It's OK for spurious wake ups. | 597 | * a task has been queued. It's OK for spurious wake ups. |
588 | */ | 598 | */ |
589 | work->waiters_pending = true; | 599 | if (full) |
600 | work->full_waiters_pending = true; | ||
601 | else | ||
602 | work->waiters_pending = true; | ||
590 | 603 | ||
591 | if (signal_pending(current)) { | 604 | if (signal_pending(current)) { |
592 | ret = -EINTR; | 605 | ret = -EINTR; |
@@ -615,7 +628,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) | |||
615 | schedule(); | 628 | schedule(); |
616 | } | 629 | } |
617 | 630 | ||
618 | finish_wait(&work->waiters, &wait); | 631 | if (full) |
632 | finish_wait(&work->full_waiters, &wait); | ||
633 | else | ||
634 | finish_wait(&work->waiters, &wait); | ||
619 | 635 | ||
620 | return ret; | 636 | return ret; |
621 | } | 637 | } |
@@ -1230,6 +1246,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) | |||
1230 | init_completion(&cpu_buffer->update_done); | 1246 | init_completion(&cpu_buffer->update_done); |
1231 | init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); | 1247 | init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); |
1232 | init_waitqueue_head(&cpu_buffer->irq_work.waiters); | 1248 | init_waitqueue_head(&cpu_buffer->irq_work.waiters); |
1249 | init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); | ||
1233 | 1250 | ||
1234 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1251 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1235 | GFP_KERNEL, cpu_to_node(cpu)); | 1252 | GFP_KERNEL, cpu_to_node(cpu)); |
@@ -2801,6 +2818,8 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | |||
2801 | static __always_inline void | 2818 | static __always_inline void |
2802 | rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) | 2819 | rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) |
2803 | { | 2820 | { |
2821 | bool pagebusy; | ||
2822 | |||
2804 | if (buffer->irq_work.waiters_pending) { | 2823 | if (buffer->irq_work.waiters_pending) { |
2805 | buffer->irq_work.waiters_pending = false; | 2824 | buffer->irq_work.waiters_pending = false; |
2806 | /* irq_work_queue() supplies it's own memory barriers */ | 2825 | /* irq_work_queue() supplies it's own memory barriers */ |
@@ -2812,6 +2831,15 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) | |||
2812 | /* irq_work_queue() supplies it's own memory barriers */ | 2831 | /* irq_work_queue() supplies it's own memory barriers */ |
2813 | irq_work_queue(&cpu_buffer->irq_work.work); | 2832 | irq_work_queue(&cpu_buffer->irq_work.work); |
2814 | } | 2833 | } |
2834 | |||
2835 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; | ||
2836 | |||
2837 | if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) { | ||
2838 | cpu_buffer->irq_work.wakeup_full = true; | ||
2839 | cpu_buffer->irq_work.full_waiters_pending = false; | ||
2840 | /* irq_work_queue() supplies it's own memory barriers */ | ||
2841 | irq_work_queue(&cpu_buffer->irq_work.work); | ||
2842 | } | ||
2815 | } | 2843 | } |
2816 | 2844 | ||
2817 | /** | 2845 | /** |