diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-11-17 08:43:01 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-11-17 08:43:01 -0500 |
commit | 5a50e33cc916f6a81cb96f0f24f6a88c9ab78b79 (patch) | |
tree | cc00c32414107ade5492be200fcb1bc882968cae /kernel/trace/ring_buffer.c | |
parent | 8b2a5dac7859dd1954095fce8b6445c3ceb36ef6 (diff) |
ring-buffer: Move access to commit_page up into function used
With the change of the way we process commits. Where a commit only happens
at the outer most level, and that we don't need to worry about
a commit ending after the rb_start_commit() has been called, the code
use to grab the commit page before the tail page to prevent a possible
race. But this race no longer exists with the rb_start_commit()
rb_end_commit() interface.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3ffa502fb243..4b8293fa545e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1785,9 +1785,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1785 | static struct ring_buffer_event * | 1785 | static struct ring_buffer_event * |
1786 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 1786 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, |
1787 | unsigned long length, unsigned long tail, | 1787 | unsigned long length, unsigned long tail, |
1788 | struct buffer_page *commit_page, | ||
1789 | struct buffer_page *tail_page, u64 *ts) | 1788 | struct buffer_page *tail_page, u64 *ts) |
1790 | { | 1789 | { |
1790 | struct buffer_page *commit_page = cpu_buffer->commit_page; | ||
1791 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1791 | struct ring_buffer *buffer = cpu_buffer->buffer; |
1792 | struct buffer_page *next_page; | 1792 | struct buffer_page *next_page; |
1793 | int ret; | 1793 | int ret; |
@@ -1890,13 +1890,10 @@ static struct ring_buffer_event * | |||
1890 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 1890 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
1891 | unsigned type, unsigned long length, u64 *ts) | 1891 | unsigned type, unsigned long length, u64 *ts) |
1892 | { | 1892 | { |
1893 | struct buffer_page *tail_page, *commit_page; | 1893 | struct buffer_page *tail_page; |
1894 | struct ring_buffer_event *event; | 1894 | struct ring_buffer_event *event; |
1895 | unsigned long tail, write; | 1895 | unsigned long tail, write; |
1896 | 1896 | ||
1897 | commit_page = cpu_buffer->commit_page; | ||
1898 | /* we just need to protect against interrupts */ | ||
1899 | barrier(); | ||
1900 | tail_page = cpu_buffer->tail_page; | 1897 | tail_page = cpu_buffer->tail_page; |
1901 | write = local_add_return(length, &tail_page->write); | 1898 | write = local_add_return(length, &tail_page->write); |
1902 | 1899 | ||
@@ -1907,7 +1904,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1907 | /* See if we shot pass the end of this buffer page */ | 1904 | /* See if we shot pass the end of this buffer page */ |
1908 | if (write > BUF_PAGE_SIZE) | 1905 | if (write > BUF_PAGE_SIZE) |
1909 | return rb_move_tail(cpu_buffer, length, tail, | 1906 | return rb_move_tail(cpu_buffer, length, tail, |
1910 | commit_page, tail_page, ts); | 1907 | tail_page, ts); |
1911 | 1908 | ||
1912 | /* We reserved something on the buffer */ | 1909 | /* We reserved something on the buffer */ |
1913 | 1910 | ||