diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-09-03 10:02:09 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-09-04 11:28:39 -0400 |
commit | 7e9391cfedce34eb9786bfa69d7d545dc93ef930 (patch) | |
tree | e3901037e9269cbf10bd2952835ebcc573e5741d /kernel | |
parent | 1b959e18c4d6b4b981f887260b0f8e7939efa411 (diff) |
ring-buffer: fix ring_buffer_read crossing pages
When the ring buffer uses an iterator (static read mode, not on the
fly reading), when it crosses a page boundery, it will skip the first
entry on the next page. The reason is that the last entry of a page
is usually padding if the page is not full. The padding will not be
returned to the user.
The problem arises on ring_buffer_read because it also increments the
iterator. Because both the read and peek use the same rb_iter_peek,
the rb_iter_peak will return the padding but also increment to the next
item. This is because the ring_buffer_peek will not incerment it
itself.
The ring_buffer_read will increment it again and then call rb_iter_peek
again to get the next item. But that will be the second item, not the
first one on the page.
The reason this never showed up before, is because the ftrace utility
always calls ring_buffer_peek first and only uses ring_buffer_read
to increment to the next item. The ring_buffer_peek will always keep
the pointer to a valid item and not padding. This just hid the bug.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a05541a8fbae..9d939e7ca924 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -3286,19 +3286,19 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
3286 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 3286 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
3287 | unsigned long flags; | 3287 | unsigned long flags; |
3288 | 3288 | ||
3289 | again: | ||
3290 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3289 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3290 | again: | ||
3291 | event = rb_iter_peek(iter, ts); | 3291 | event = rb_iter_peek(iter, ts); |
3292 | if (!event) | 3292 | if (!event) |
3293 | goto out; | 3293 | goto out; |
3294 | 3294 | ||
3295 | if (event->type_len == RINGBUF_TYPE_PADDING) | ||
3296 | goto again; | ||
3297 | |||
3295 | rb_advance_iter(iter); | 3298 | rb_advance_iter(iter); |
3296 | out: | 3299 | out: |
3297 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3300 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3298 | 3301 | ||
3299 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | ||
3300 | goto again; | ||
3301 | |||
3302 | return event; | 3302 | return event; |
3303 | } | 3303 | } |
3304 | EXPORT_SYMBOL_GPL(ring_buffer_read); | 3304 | EXPORT_SYMBOL_GPL(ring_buffer_read); |