diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-05-01 19:40:05 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-05-05 16:58:24 -0400 |
commit | afbab76a62b69ea6197e19727d4b8a8aef8deb25 (patch) | |
tree | cfde06582fb3dbb7f8c00c7d1a8a57b0b0e9d0d1 /kernel/trace/ring_buffer.c | |
parent | 778c55d44eb4f5f658915ed631d68ed9d1ac3ad1 (diff) |
ring-buffer: have read page swap increment counter with page entries
In the swap page ring buffer code that is used by the ftrace splice code,
we scan the page to increment the counter of entries read.
With the number of entries already in the page we simply need to add it.
[ Impact: speed up reading page from ring buffer ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 28 |
1 files changed, 3 insertions, 25 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 342eacc4baa8..9e42a742a3f9 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2785,28 +2785,6 @@ out: | |||
2785 | } | 2785 | } |
2786 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | 2786 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); |
2787 | 2787 | ||
2788 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | ||
2789 | struct buffer_data_page *bpage, | ||
2790 | unsigned int offset) | ||
2791 | { | ||
2792 | struct ring_buffer_event *event; | ||
2793 | unsigned long head; | ||
2794 | |||
2795 | __raw_spin_lock(&cpu_buffer->lock); | ||
2796 | for (head = offset; head < local_read(&bpage->commit); | ||
2797 | head += rb_event_length(event)) { | ||
2798 | |||
2799 | event = __rb_data_page_index(bpage, head); | ||
2800 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) | ||
2801 | return; | ||
2802 | /* Only count data entries */ | ||
2803 | if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | ||
2804 | continue; | ||
2805 | cpu_buffer->read++; | ||
2806 | } | ||
2807 | __raw_spin_unlock(&cpu_buffer->lock); | ||
2808 | } | ||
2809 | |||
2810 | /** | 2788 | /** |
2811 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | 2789 | * ring_buffer_alloc_read_page - allocate a page to read from buffer |
2812 | * @buffer: the buffer to allocate for. | 2790 | * @buffer: the buffer to allocate for. |
@@ -2977,6 +2955,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2977 | /* we copied everything to the beginning */ | 2955 | /* we copied everything to the beginning */ |
2978 | read = 0; | 2956 | read = 0; |
2979 | } else { | 2957 | } else { |
2958 | /* update the entry counter */ | ||
2959 | cpu_buffer->read += local_read(&reader->entries); | ||
2960 | |||
2980 | /* swap the pages */ | 2961 | /* swap the pages */ |
2981 | rb_init_page(bpage); | 2962 | rb_init_page(bpage); |
2982 | bpage = reader->page; | 2963 | bpage = reader->page; |
@@ -2985,9 +2966,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2985 | local_set(&reader->entries, 0); | 2966 | local_set(&reader->entries, 0); |
2986 | reader->read = 0; | 2967 | reader->read = 0; |
2987 | *data_page = bpage; | 2968 | *data_page = bpage; |
2988 | |||
2989 | /* update the entry counter */ | ||
2990 | rb_remove_entries(cpu_buffer, bpage, read); | ||
2991 | } | 2969 | } |
2992 | ret = read; | 2970 | ret = read; |
2993 | 2971 | ||