diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 45 |
1 files changed, 32 insertions, 13 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2326b04c95c4..05a9f83b8819 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | 22 | ||
23 | #include <asm/local.h> | ||
23 | #include "trace.h" | 24 | #include "trace.h" |
24 | 25 | ||
25 | /* | 26 | /* |
@@ -464,6 +465,8 @@ struct ring_buffer_iter { | |||
464 | struct ring_buffer_per_cpu *cpu_buffer; | 465 | struct ring_buffer_per_cpu *cpu_buffer; |
465 | unsigned long head; | 466 | unsigned long head; |
466 | struct buffer_page *head_page; | 467 | struct buffer_page *head_page; |
468 | struct buffer_page *cache_reader_page; | ||
469 | unsigned long cache_read; | ||
467 | u64 read_stamp; | 470 | u64 read_stamp; |
468 | }; | 471 | }; |
469 | 472 | ||
@@ -2230,12 +2233,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2230 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2233 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2231 | return NULL; | 2234 | return NULL; |
2232 | 2235 | ||
2233 | if (atomic_read(&buffer->record_disabled)) | ||
2234 | return NULL; | ||
2235 | |||
2236 | /* If we are tracing schedule, we don't want to recurse */ | 2236 | /* If we are tracing schedule, we don't want to recurse */ |
2237 | resched = ftrace_preempt_disable(); | 2237 | resched = ftrace_preempt_disable(); |
2238 | 2238 | ||
2239 | if (atomic_read(&buffer->record_disabled)) | ||
2240 | goto out_nocheck; | ||
2241 | |||
2239 | if (trace_recursive_lock()) | 2242 | if (trace_recursive_lock()) |
2240 | goto out_nocheck; | 2243 | goto out_nocheck; |
2241 | 2244 | ||
@@ -2467,11 +2470,11 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2467 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2470 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2468 | return -EBUSY; | 2471 | return -EBUSY; |
2469 | 2472 | ||
2470 | if (atomic_read(&buffer->record_disabled)) | ||
2471 | return -EBUSY; | ||
2472 | |||
2473 | resched = ftrace_preempt_disable(); | 2473 | resched = ftrace_preempt_disable(); |
2474 | 2474 | ||
2475 | if (atomic_read(&buffer->record_disabled)) | ||
2476 | goto out; | ||
2477 | |||
2475 | cpu = raw_smp_processor_id(); | 2478 | cpu = raw_smp_processor_id(); |
2476 | 2479 | ||
2477 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2480 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
@@ -2539,7 +2542,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | |||
2539 | * @buffer: The ring buffer to enable writes | 2542 | * @buffer: The ring buffer to enable writes |
2540 | * | 2543 | * |
2541 | * Note, multiple disables will need the same number of enables | 2544 | * Note, multiple disables will need the same number of enables |
2542 | * to truely enable the writing (much like preempt_disable). | 2545 | * to truly enable the writing (much like preempt_disable). |
2543 | */ | 2546 | */ |
2544 | void ring_buffer_record_enable(struct ring_buffer *buffer) | 2547 | void ring_buffer_record_enable(struct ring_buffer *buffer) |
2545 | { | 2548 | { |
@@ -2575,7 +2578,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | |||
2575 | * @cpu: The CPU to enable. | 2578 | * @cpu: The CPU to enable. |
2576 | * | 2579 | * |
2577 | * Note, multiple disables will need the same number of enables | 2580 | * Note, multiple disables will need the same number of enables |
2578 | * to truely enable the writing (much like preempt_disable). | 2581 | * to truly enable the writing (much like preempt_disable). |
2579 | */ | 2582 | */ |
2580 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | 2583 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) |
2581 | { | 2584 | { |
@@ -2716,6 +2719,8 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) | |||
2716 | iter->read_stamp = cpu_buffer->read_stamp; | 2719 | iter->read_stamp = cpu_buffer->read_stamp; |
2717 | else | 2720 | else |
2718 | iter->read_stamp = iter->head_page->page->time_stamp; | 2721 | iter->read_stamp = iter->head_page->page->time_stamp; |
2722 | iter->cache_reader_page = cpu_buffer->reader_page; | ||
2723 | iter->cache_read = cpu_buffer->read; | ||
2719 | } | 2724 | } |
2720 | 2725 | ||
2721 | /** | 2726 | /** |
@@ -2869,7 +2874,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2869 | * Splice the empty reader page into the list around the head. | 2874 | * Splice the empty reader page into the list around the head. |
2870 | */ | 2875 | */ |
2871 | reader = rb_set_head_page(cpu_buffer); | 2876 | reader = rb_set_head_page(cpu_buffer); |
2872 | cpu_buffer->reader_page->list.next = reader->list.next; | 2877 | cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); |
2873 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 2878 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
2874 | 2879 | ||
2875 | /* | 2880 | /* |
@@ -2906,7 +2911,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2906 | * | 2911 | * |
2907 | * Now make the new head point back to the reader page. | 2912 | * Now make the new head point back to the reader page. |
2908 | */ | 2913 | */ |
2909 | reader->list.next->prev = &cpu_buffer->reader_page->list; | 2914 | rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; |
2910 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 2915 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); |
2911 | 2916 | ||
2912 | /* Finally update the reader page to the new head */ | 2917 | /* Finally update the reader page to the new head */ |
@@ -3060,13 +3065,22 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3060 | struct ring_buffer_event *event; | 3065 | struct ring_buffer_event *event; |
3061 | int nr_loops = 0; | 3066 | int nr_loops = 0; |
3062 | 3067 | ||
3063 | if (ring_buffer_iter_empty(iter)) | ||
3064 | return NULL; | ||
3065 | |||
3066 | cpu_buffer = iter->cpu_buffer; | 3068 | cpu_buffer = iter->cpu_buffer; |
3067 | buffer = cpu_buffer->buffer; | 3069 | buffer = cpu_buffer->buffer; |
3068 | 3070 | ||
3071 | /* | ||
3072 | * Check if someone performed a consuming read to | ||
3073 | * the buffer. A consuming read invalidates the iterator | ||
3074 | * and we need to reset the iterator in this case. | ||
3075 | */ | ||
3076 | if (unlikely(iter->cache_read != cpu_buffer->read || | ||
3077 | iter->cache_reader_page != cpu_buffer->reader_page)) | ||
3078 | rb_iter_reset(iter); | ||
3079 | |||
3069 | again: | 3080 | again: |
3081 | if (ring_buffer_iter_empty(iter)) | ||
3082 | return NULL; | ||
3083 | |||
3070 | /* | 3084 | /* |
3071 | * We repeat when a timestamp is encountered. | 3085 | * We repeat when a timestamp is encountered. |
3072 | * We can get multiple timestamps by nested interrupts or also | 3086 | * We can get multiple timestamps by nested interrupts or also |
@@ -3081,6 +3095,11 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3081 | if (rb_per_cpu_empty(cpu_buffer)) | 3095 | if (rb_per_cpu_empty(cpu_buffer)) |
3082 | return NULL; | 3096 | return NULL; |
3083 | 3097 | ||
3098 | if (iter->head >= local_read(&iter->head_page->page->commit)) { | ||
3099 | rb_inc_iter(iter); | ||
3100 | goto again; | ||
3101 | } | ||
3102 | |||
3084 | event = rb_iter_head_event(iter); | 3103 | event = rb_iter_head_event(iter); |
3085 | 3104 | ||
3086 | switch (event->type_len) { | 3105 | switch (event->type_len) { |