aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-05-14 17:02:33 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-05-16 19:50:23 -0400
commit659f451ff21315ebfeeb46b9adccee8ce1b52c25 (patch)
treeda818329ad9c47e3a5c5414e37e836cb8bda5bec
parent5040b4b7bcc26a311c799d46f67174bcb20d05dd (diff)
ring-buffer: Add integrity check at end of iter read
There use to be ring buffer integrity checks after updating the size of the ring buffer. But now that the ring buffer can modify the size while the system is running, the integrity checks were removed, as they require the ring buffer to be disabed to perform the check. Move the integrity check to the reading of the ring buffer via the iterator reads (the "trace" file). As reading via an iterator requires disabling the ring buffer, it is a perfect place to have it. If the ring buffer happens to be disabled when updating the size, we still perform the integrity check. Cc: Vaibhav Nagarnaik <vnagarnaik@google.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ring_buffer.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d673ef03d16d..e0573c523b5c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1599,6 +1599,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1599 } 1599 }
1600 1600
1601 out: 1601 out:
1602 /*
1603 * The ring buffer resize can happen with the ring buffer
1604 * enabled, so that the update disturbs the tracing as little
1605 * as possible. But if the buffer is disabled, we do not need
1606 * to worry about that, and we can take the time to verify
1607 * that the buffer is not corrupt.
1608 */
1609 if (atomic_read(&buffer->record_disabled)) {
1610 atomic_inc(&buffer->record_disabled);
1611 /*
1612 * Even though the buffer was disabled, we must make sure
1613 * that it is truly disabled before calling rb_check_pages.
1614 * There could have been a race between checking
1615 * record_disable and incrementing it.
1616 */
1617 synchronize_sched();
1618 for_each_buffer_cpu(buffer, cpu) {
1619 cpu_buffer = buffer->buffers[cpu];
1620 rb_check_pages(cpu_buffer);
1621 }
1622 atomic_dec(&buffer->record_disabled);
1623 }
1624
1602 mutex_unlock(&buffer->mutex); 1625 mutex_unlock(&buffer->mutex);
1603 return size; 1626 return size;
1604 1627
@@ -3750,6 +3773,12 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
3750{ 3773{
3751 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3774 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3752 3775
3776 /*
3777 * Ring buffer is disabled from recording, here's a good place
3778 * to check the integrity of the ring buffer.
3779 */
3780 rb_check_pages(cpu_buffer);
3781
3753 atomic_dec(&cpu_buffer->record_disabled); 3782 atomic_dec(&cpu_buffer->record_disabled);
3754 atomic_dec(&cpu_buffer->buffer->resize_disabled); 3783 atomic_dec(&cpu_buffer->buffer->resize_disabled);
3755 kfree(iter); 3784 kfree(iter);