aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2015-11-17 14:03:11 -0500
committerSteven Rostedt <rostedt@goodmis.org>2015-11-24 09:29:15 -0500
commit8573636ea794fa088f459429e65e47d7776532cf (patch)
treed6d64d4272ae43e061f97c8a3c0c4aa523ed4a0e /kernel/trace
parent3cbd6a43be932e56907abd21091314dc044175f2 (diff)
ring-buffer: Use READ_ONCE() for most tail_page access
As cpu_buffer->tail_page may be modified by interrupts at almost any time, the flow of logic is very important. Do not let gcc get smart with re-reading cpu_buffer->tail_page by adding READ_ONCE() around most of its accesses. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9c6045a27ba3..ab102e6259bc 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1036,7 +1036,7 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1036 * it is, then it is up to us to update the tail 1036 * it is, then it is up to us to update the tail
1037 * pointer. 1037 * pointer.
1038 */ 1038 */
1039 if (tail_page == cpu_buffer->tail_page) { 1039 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1040 /* Zero the write counter */ 1040 /* Zero the write counter */
1041 unsigned long val = old_write & ~RB_WRITE_MASK; 1041 unsigned long val = old_write & ~RB_WRITE_MASK;
1042 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1042 unsigned long eval = old_entries & ~RB_WRITE_MASK;
@@ -2036,12 +2036,15 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2036 * the tail page would have moved. 2036 * the tail page would have moved.
2037 */ 2037 */
2038 if (ret == RB_PAGE_NORMAL) { 2038 if (ret == RB_PAGE_NORMAL) {
2039 struct buffer_page *buffer_tail_page;
2040
2041 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2039 /* 2042 /*
2040 * If the tail had moved passed next, then we need 2043 * If the tail had moved passed next, then we need
2041 * to reset the pointer. 2044 * to reset the pointer.
2042 */ 2045 */
2043 if (cpu_buffer->tail_page != tail_page && 2046 if (buffer_tail_page != tail_page &&
2044 cpu_buffer->tail_page != next_page) 2047 buffer_tail_page != next_page)
2045 rb_head_page_set_normal(cpu_buffer, new_head, 2048 rb_head_page_set_normal(cpu_buffer, new_head,
2046 next_page, 2049 next_page,
2047 RB_PAGE_HEAD); 2050 RB_PAGE_HEAD);
@@ -2362,7 +2365,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2362 addr = (unsigned long)event; 2365 addr = (unsigned long)event;
2363 addr &= PAGE_MASK; 2366 addr &= PAGE_MASK;
2364 2367
2365 bpage = cpu_buffer->tail_page; 2368 bpage = READ_ONCE(cpu_buffer->tail_page);
2366 2369
2367 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 2370 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2368 unsigned long write_mask = 2371 unsigned long write_mask =
@@ -2410,7 +2413,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2410 again: 2413 again:
2411 max_count = cpu_buffer->nr_pages * 100; 2414 max_count = cpu_buffer->nr_pages * 100;
2412 2415
2413 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 2416 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2414 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 2417 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2415 return; 2418 return;
2416 if (RB_WARN_ON(cpu_buffer, 2419 if (RB_WARN_ON(cpu_buffer,
@@ -2443,7 +2446,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2443 * and pushed the tail page forward, we will be left with 2446 * and pushed the tail page forward, we will be left with
2444 * a dangling commit that will never go forward. 2447 * a dangling commit that will never go forward.
2445 */ 2448 */
2446 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) 2449 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2447 goto again; 2450 goto again;
2448} 2451}
2449 2452
@@ -2699,7 +2702,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2699 if (unlikely(info->add_timestamp)) 2702 if (unlikely(info->add_timestamp))
2700 info->length += RB_LEN_TIME_EXTEND; 2703 info->length += RB_LEN_TIME_EXTEND;
2701 2704
2702 tail_page = info->tail_page = cpu_buffer->tail_page; 2705 /* Don't let the compiler play games with cpu_buffer->tail_page */
2706 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2703 write = local_add_return(info->length, &tail_page->write); 2707 write = local_add_return(info->length, &tail_page->write);
2704 2708
2705 /* set write to only the index of the write */ 2709 /* set write to only the index of the write */