aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bca96377fd4e..0b88df849a59 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1823,7 +1823,10 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1823 local_sub(length, &tail_page->write); 1823 local_sub(length, &tail_page->write);
1824} 1824}
1825 1825
1826static struct ring_buffer_event * 1826/*
1827 * This is the slow path, force gcc not to inline it.
1828 */
1829static noinline struct ring_buffer_event *
1827rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1830rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1828 unsigned long length, unsigned long tail, 1831 unsigned long length, unsigned long tail,
1829 struct buffer_page *tail_page, u64 *ts) 1832 struct buffer_page *tail_page, u64 *ts)
@@ -1943,7 +1946,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1943 tail = write - length; 1946 tail = write - length;
1944 1947
1945 /* See if we shot pass the end of this buffer page */ 1948 /* See if we shot pass the end of this buffer page */
1946 if (write > BUF_PAGE_SIZE) 1949 if (unlikely(write > BUF_PAGE_SIZE))
1947 return rb_move_tail(cpu_buffer, length, tail, 1950 return rb_move_tail(cpu_buffer, length, tail,
1948 tail_page, ts); 1951 tail_page, ts);
1949 1952