aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-10-20 10:58:02 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-10-20 10:58:02 -0400
commite8bc43e84fada397af1b677b07dbf26e6ac78fcc (patch)
tree60f6fe1acbd15fcd9fdc051660479f300c164ab6 /kernel/trace/ring_buffer.c
parent747e94ae3d1b4c9bf5380e569f614eb9040b79e7 (diff)
ring-buffer: Pass timestamp by value and not by reference
The original code for the ring buffer had locations that modified the timestamp and that change was used by the callers. Now, the timestamp is not reused by the callers and there is no reason to pass it by reference. By changing the call to pass by value, lets gcc optimize the code a bit more where it can store the timestamp in a register and not worry about updating the reference. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0b88df849a59..c8ce6bde7fa4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1829,7 +1829,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1829static noinline struct ring_buffer_event * 1829static noinline struct ring_buffer_event *
1830rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1830rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1831 unsigned long length, unsigned long tail, 1831 unsigned long length, unsigned long tail,
1832 struct buffer_page *tail_page, u64 *ts) 1832 struct buffer_page *tail_page, u64 ts)
1833{ 1833{
1834 struct buffer_page *commit_page = cpu_buffer->commit_page; 1834 struct buffer_page *commit_page = cpu_buffer->commit_page;
1835 struct ring_buffer *buffer = cpu_buffer->buffer; 1835 struct ring_buffer *buffer = cpu_buffer->buffer;
@@ -1912,8 +1912,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1912 * Nested commits always have zero deltas, so 1912 * Nested commits always have zero deltas, so
1913 * just reread the time stamp 1913 * just reread the time stamp
1914 */ 1914 */
1915 *ts = rb_time_stamp(buffer); 1915 ts = rb_time_stamp(buffer);
1916 next_page->page->time_stamp = *ts; 1916 next_page->page->time_stamp = ts;
1917 } 1917 }
1918 1918
1919 out_again: 1919 out_again:
@@ -1932,7 +1932,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1932 1932
1933static struct ring_buffer_event * 1933static struct ring_buffer_event *
1934__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1934__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1935 unsigned type, unsigned long length, u64 *ts) 1935 unsigned type, unsigned long length, u64 ts)
1936{ 1936{
1937 struct buffer_page *tail_page; 1937 struct buffer_page *tail_page;
1938 struct ring_buffer_event *event; 1938 struct ring_buffer_event *event;
@@ -1965,7 +1965,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1965 * its timestamp. 1965 * its timestamp.
1966 */ 1966 */
1967 if (!tail) 1967 if (!tail)
1968 tail_page->page->time_stamp = *ts; 1968 tail_page->page->time_stamp = ts;
1969 1969
1970 return event; 1970 return event;
1971} 1971}
@@ -2008,7 +2008,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2008 2008
2009static int 2009static int
2010rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, 2010rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2011 u64 *ts, u64 *delta) 2011 u64 ts, u64 *delta)
2012{ 2012{
2013 struct ring_buffer_event *event; 2013 struct ring_buffer_event *event;
2014 int ret; 2014 int ret;
@@ -2016,7 +2016,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2016 WARN_ONCE(*delta > (1ULL << 59), 2016 WARN_ONCE(*delta > (1ULL << 59),
2017 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", 2017 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
2018 (unsigned long long)*delta, 2018 (unsigned long long)*delta,
2019 (unsigned long long)*ts, 2019 (unsigned long long)ts,
2020 (unsigned long long)cpu_buffer->write_stamp); 2020 (unsigned long long)cpu_buffer->write_stamp);
2021 2021
2022 /* 2022 /*
@@ -2051,7 +2051,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2051 event->array[0] = 0; 2051 event->array[0] = 0;
2052 } 2052 }
2053 } 2053 }
2054 cpu_buffer->write_stamp = *ts; 2054 cpu_buffer->write_stamp = ts;
2055 /* let the caller know this was the commit */ 2055 /* let the caller know this was the commit */
2056 ret = 1; 2056 ret = 1;
2057 } else { 2057 } else {
@@ -2175,7 +2175,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2175 delta = diff; 2175 delta = diff;
2176 if (unlikely(test_time_stamp(delta))) { 2176 if (unlikely(test_time_stamp(delta))) {
2177 2177
2178 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); 2178 commit = rb_add_time_stamp(cpu_buffer, ts, &delta);
2179 if (commit == -EBUSY) 2179 if (commit == -EBUSY)
2180 goto out_fail; 2180 goto out_fail;
2181 2181
@@ -2187,7 +2187,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2187 } 2187 }
2188 2188
2189 get_event: 2189 get_event:
2190 event = __rb_reserve_next(cpu_buffer, 0, length, &ts); 2190 event = __rb_reserve_next(cpu_buffer, 0, length, ts);
2191 if (unlikely(PTR_ERR(event) == -EAGAIN)) 2191 if (unlikely(PTR_ERR(event) == -EAGAIN))
2192 goto again; 2192 goto again;
2193 2193