aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c65
1 files changed, 52 insertions, 13 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b979426d16c6..ce8514feedcd 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu {
460 unsigned long lost_events; 460 unsigned long lost_events;
461 unsigned long last_overrun; 461 unsigned long last_overrun;
462 local_t entries_bytes; 462 local_t entries_bytes;
463 local_t commit_overrun;
464 local_t overrun;
465 local_t entries; 463 local_t entries;
464 local_t overrun;
465 local_t commit_overrun;
466 local_t dropped_events;
466 local_t committing; 467 local_t committing;
467 local_t commits; 468 local_t commits;
468 unsigned long read; 469 unsigned long read;
@@ -1396,6 +1397,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1396 struct list_head *head_page_with_bit; 1397 struct list_head *head_page_with_bit;
1397 1398
1398 head_page = &rb_set_head_page(cpu_buffer)->list; 1399 head_page = &rb_set_head_page(cpu_buffer)->list;
1400 if (!head_page)
1401 break;
1399 prev_page = head_page->prev; 1402 prev_page = head_page->prev;
1400 1403
1401 first_page = pages->next; 1404 first_page = pages->next;
@@ -1820,7 +1823,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1820} 1823}
1821 1824
1822/** 1825/**
1823 * ring_buffer_update_event - update event type and data 1826 * rb_update_event - update event type and data
1824 * @event: the even to update 1827 * @event: the even to update
1825 * @type: the type of event 1828 * @type: the type of event
1826 * @length: the size of the event field in the ring buffer 1829 * @length: the size of the event field in the ring buffer
@@ -2155,8 +2158,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2155 * If we are not in overwrite mode, 2158 * If we are not in overwrite mode,
2156 * this is easy, just stop here. 2159 * this is easy, just stop here.
2157 */ 2160 */
2158 if (!(buffer->flags & RB_FL_OVERWRITE)) 2161 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2162 local_inc(&cpu_buffer->dropped_events);
2159 goto out_reset; 2163 goto out_reset;
2164 }
2160 2165
2161 ret = rb_handle_head_page(cpu_buffer, 2166 ret = rb_handle_head_page(cpu_buffer,
2162 tail_page, 2167 tail_page,
@@ -2720,8 +2725,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2720 * and not the length of the event which would hold the header. 2725 * and not the length of the event which would hold the header.
2721 */ 2726 */
2722int ring_buffer_write(struct ring_buffer *buffer, 2727int ring_buffer_write(struct ring_buffer *buffer,
2723 unsigned long length, 2728 unsigned long length,
2724 void *data) 2729 void *data)
2725{ 2730{
2726 struct ring_buffer_per_cpu *cpu_buffer; 2731 struct ring_buffer_per_cpu *cpu_buffer;
2727 struct ring_buffer_event *event; 2732 struct ring_buffer_event *event;
@@ -2929,12 +2934,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2929 * @buffer: The ring buffer 2934 * @buffer: The ring buffer
2930 * @cpu: The per CPU buffer to read from. 2935 * @cpu: The per CPU buffer to read from.
2931 */ 2936 */
2932unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) 2937u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2933{ 2938{
2934 unsigned long flags; 2939 unsigned long flags;
2935 struct ring_buffer_per_cpu *cpu_buffer; 2940 struct ring_buffer_per_cpu *cpu_buffer;
2936 struct buffer_page *bpage; 2941 struct buffer_page *bpage;
2937 unsigned long ret; 2942 u64 ret = 0;
2938 2943
2939 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2944 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2940 return 0; 2945 return 0;
@@ -2949,7 +2954,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2949 bpage = cpu_buffer->reader_page; 2954 bpage = cpu_buffer->reader_page;
2950 else 2955 else
2951 bpage = rb_set_head_page(cpu_buffer); 2956 bpage = rb_set_head_page(cpu_buffer);
2952 ret = bpage->page->time_stamp; 2957 if (bpage)
2958 ret = bpage->page->time_stamp;
2953 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2959 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2954 2960
2955 return ret; 2961 return ret;
@@ -2995,7 +3001,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2995EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 3001EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2996 3002
2997/** 3003/**
2998 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 3004 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3005 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
2999 * @buffer: The ring buffer 3006 * @buffer: The ring buffer
3000 * @cpu: The per CPU buffer to get the number of overruns from 3007 * @cpu: The per CPU buffer to get the number of overruns from
3001 */ 3008 */
@@ -3015,7 +3022,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3015EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 3022EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3016 3023
3017/** 3024/**
3018 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits 3025 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3026 * commits failing due to the buffer wrapping around while there are uncommitted
3027 * events, such as during an interrupt storm.
3019 * @buffer: The ring buffer 3028 * @buffer: The ring buffer
3020 * @cpu: The per CPU buffer to get the number of overruns from 3029 * @cpu: The per CPU buffer to get the number of overruns from
3021 */ 3030 */
@@ -3036,6 +3045,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3036EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 3045EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3037 3046
3038/** 3047/**
3048 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3049 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3050 * @buffer: The ring buffer
3051 * @cpu: The per CPU buffer to get the number of overruns from
3052 */
3053unsigned long
3054ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3055{
3056 struct ring_buffer_per_cpu *cpu_buffer;
3057 unsigned long ret;
3058
3059 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3060 return 0;
3061
3062 cpu_buffer = buffer->buffers[cpu];
3063 ret = local_read(&cpu_buffer->dropped_events);
3064
3065 return ret;
3066}
3067EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3068
3069/**
3039 * ring_buffer_entries - get the number of entries in a buffer 3070 * ring_buffer_entries - get the number of entries in a buffer
3040 * @buffer: The ring buffer 3071 * @buffer: The ring buffer
3041 * 3072 *
@@ -3260,6 +3291,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3260 * Splice the empty reader page into the list around the head. 3291 * Splice the empty reader page into the list around the head.
3261 */ 3292 */
3262 reader = rb_set_head_page(cpu_buffer); 3293 reader = rb_set_head_page(cpu_buffer);
3294 if (!reader)
3295 goto out;
3263 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 3296 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3264 cpu_buffer->reader_page->list.prev = reader->list.prev; 3297 cpu_buffer->reader_page->list.prev = reader->list.prev;
3265 3298
@@ -3778,12 +3811,17 @@ void
3778ring_buffer_read_finish(struct ring_buffer_iter *iter) 3811ring_buffer_read_finish(struct ring_buffer_iter *iter)
3779{ 3812{
3780 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3813 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3814 unsigned long flags;
3781 3815
3782 /* 3816 /*
3783 * Ring buffer is disabled from recording, here's a good place 3817 * Ring buffer is disabled from recording, here's a good place
3784 * to check the integrity of the ring buffer. 3818 * to check the integrity of the ring buffer.
3819 * Must prevent readers from trying to read, as the check
3820 * clears the HEAD page and readers require it.
3785 */ 3821 */
3822 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3786 rb_check_pages(cpu_buffer); 3823 rb_check_pages(cpu_buffer);
3824 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3787 3825
3788 atomic_dec(&cpu_buffer->record_disabled); 3826 atomic_dec(&cpu_buffer->record_disabled);
3789 atomic_dec(&cpu_buffer->buffer->resize_disabled); 3827 atomic_dec(&cpu_buffer->buffer->resize_disabled);
@@ -3864,9 +3902,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3864 local_set(&cpu_buffer->reader_page->page->commit, 0); 3902 local_set(&cpu_buffer->reader_page->page->commit, 0);
3865 cpu_buffer->reader_page->read = 0; 3903 cpu_buffer->reader_page->read = 0;
3866 3904
3867 local_set(&cpu_buffer->commit_overrun, 0);
3868 local_set(&cpu_buffer->entries_bytes, 0); 3905 local_set(&cpu_buffer->entries_bytes, 0);
3869 local_set(&cpu_buffer->overrun, 0); 3906 local_set(&cpu_buffer->overrun, 0);
3907 local_set(&cpu_buffer->commit_overrun, 0);
3908 local_set(&cpu_buffer->dropped_events, 0);
3870 local_set(&cpu_buffer->entries, 0); 3909 local_set(&cpu_buffer->entries, 0);
3871 local_set(&cpu_buffer->committing, 0); 3910 local_set(&cpu_buffer->committing, 0);
3872 local_set(&cpu_buffer->commits, 0); 3911 local_set(&cpu_buffer->commits, 0);