aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-09-04 14:11:34 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-09-04 19:38:42 -0400
commit62f0b3eb5cb58931a02ee4e599e19c80a171e351 (patch)
tree344b3b8f55e6162f7fa08316aa6fde333b304c04 /kernel/trace/ring_buffer.c
parente8165dbb03ed04d798163ee512074b9a9466a9c8 (diff)
ring-buffer: check for swapped buffers in start of committing
Because the irqsoff tracer can swap an internal CPU buffer, it is possible that a swap happens between the start of the write and before the committing bit is set (the committing bit will disable swapping). This patch adds a check for this and will fail the write if it detects it. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f83a42a79ee8..1766c0e8db5a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2073,7 +2073,8 @@ static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2073} 2073}
2074 2074
2075static struct ring_buffer_event * 2075static struct ring_buffer_event *
2076rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, 2076rb_reserve_next_event(struct ring_buffer *buffer,
2077 struct ring_buffer_per_cpu *cpu_buffer,
2077 unsigned long length) 2078 unsigned long length)
2078{ 2079{
2079 struct ring_buffer_event *event; 2080 struct ring_buffer_event *event;
@@ -2083,6 +2084,19 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
2083 2084
2084 rb_start_commit(cpu_buffer); 2085 rb_start_commit(cpu_buffer);
2085 2086
2087 /*
2088 * Due to the ability to swap a cpu buffer from a buffer
2089 * it is possible it was swapped before we committed.
2090 * (committing stops a swap). We check for it here and
2091 * if it happened, we have to fail the write.
2092 */
2093 barrier();
2094 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2095 local_dec(&cpu_buffer->committing);
2096 local_dec(&cpu_buffer->commits);
2097 return NULL;
2098 }
2099
2086 length = rb_calculate_event_length(length); 2100 length = rb_calculate_event_length(length);
2087 again: 2101 again:
2088 /* 2102 /*
@@ -2243,7 +2257,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2243 if (length > BUF_MAX_DATA_SIZE) 2257 if (length > BUF_MAX_DATA_SIZE)
2244 goto out; 2258 goto out;
2245 2259
2246 event = rb_reserve_next_event(cpu_buffer, length); 2260 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2247 if (!event) 2261 if (!event)
2248 goto out; 2262 goto out;
2249 2263
@@ -2476,7 +2490,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
2476 if (length > BUF_MAX_DATA_SIZE) 2490 if (length > BUF_MAX_DATA_SIZE)
2477 goto out; 2491 goto out;
2478 2492
2479 event = rb_reserve_next_event(cpu_buffer, length); 2493 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2480 if (!event) 2494 if (!event)
2481 goto out; 2495 goto out;
2482 2496