diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-12-23 11:32:25 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-23 12:45:26 -0500 |
commit | 98db8df777438e16ad0f44a0fba05ebbdb73db8d (patch) | |
tree | f2cde3dbe3000f4bafd0c42df43f736b7e5a648b /kernel/trace/ring_buffer.c | |
parent | a8ccf1d6f60e3e6ae63122e02378cd4d40dd4aac (diff) |
ring-buffer: prevent false positive warning
Impact: eliminate false WARN_ON message
If an interrupt goes off after the setting of the local variable
tail_page and before incrementing the write index of that page,
the interrupt could push the commit forward to the next page.
Later a check is made to see if interrupts pushed the buffer around
the entire ring buffer by comparing the next page to the last commited
page. This can produce a false positive if the interrupt had pushed
the commit page forward as stated above.
Thanks to Jiaying Zhang for finding this race.
Reported-by: Jiaying Zhang <jiayingz@google.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Cc: <stable@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d03f4f44a823..76f34c0ef29c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -962,12 +962,15 @@ static struct ring_buffer_event * | |||
962 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 962 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
963 | unsigned type, unsigned long length, u64 *ts) | 963 | unsigned type, unsigned long length, u64 *ts) |
964 | { | 964 | { |
965 | struct buffer_page *tail_page, *head_page, *reader_page; | 965 | struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; |
966 | unsigned long tail, write; | 966 | unsigned long tail, write; |
967 | struct ring_buffer *buffer = cpu_buffer->buffer; | 967 | struct ring_buffer *buffer = cpu_buffer->buffer; |
968 | struct ring_buffer_event *event; | 968 | struct ring_buffer_event *event; |
969 | unsigned long flags; | 969 | unsigned long flags; |
970 | 970 | ||
971 | commit_page = cpu_buffer->commit_page; | ||
972 | /* we just need to protect against interrupts */ | ||
973 | barrier(); | ||
971 | tail_page = cpu_buffer->tail_page; | 974 | tail_page = cpu_buffer->tail_page; |
972 | write = local_add_return(length, &tail_page->write); | 975 | write = local_add_return(length, &tail_page->write); |
973 | tail = write - length; | 976 | tail = write - length; |
@@ -993,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
993 | * it all the way around the buffer, bail, and warn | 996 | * it all the way around the buffer, bail, and warn |
994 | * about it. | 997 | * about it. |
995 | */ | 998 | */ |
996 | if (unlikely(next_page == cpu_buffer->commit_page)) { | 999 | if (unlikely(next_page == commit_page)) { |
997 | WARN_ON_ONCE(1); | 1000 | WARN_ON_ONCE(1); |
998 | goto out_unlock; | 1001 | goto out_unlock; |
999 | } | 1002 | } |