aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorVaibhav Nagarnaik <vnagarnaik@google.com>2012-05-03 21:59:50 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-05-16 16:18:57 -0400
commit83f40318dab00e3298a1f6d0b12ac025e84e478d (patch)
treeefa4aa0d79337ec7e56d667d513ae69f1b2e38f7 /kernel/trace/trace.c
parent6edb2a8a385f0cdef51dae37ff23e74d76d8a6ce (diff)
ring-buffer: Make removal of ring buffer pages atomic
This patch adds the capability to remove pages from a ring buffer without destroying any existing data in it. This is done by removing the pages after the tail page. This makes sure that first all the empty pages in the ring buffer are removed. If the head page is one in the list of pages to be removed, then the page after the removed ones is made the head page. This removes the oldest data from the ring buffer and keeps the latest data around to be read. To do this in a non-racey manner, tracing is stopped for a very short time while the pages to be removed are identified and unlinked from the ring buffer. The pages are freed after the tracing is restarted to minimize the time needed to stop tracing. The context in which the pages from the per-cpu ring buffer are removed runs on the respective CPU. This minimizes the events not traced to only NMI trace contexts. Link: http://lkml.kernel.org/r/1336096792-25373-1-git-send-email-vnagarnaik@google.com Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Laurent Chavey <chavey@google.com> Cc: Justin Teravest <teravest@google.com> Cc: David Sharp <dhsharp@google.com> Signed-off-by: Vaibhav Nagarnaik <vnagarnaik@google.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c20
1 files changed, 1 insertions, 19 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d1b3469b62e3..dfbd86cc4876 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3076,20 +3076,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3076 3076
3077static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) 3077static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3078{ 3078{
3079 int cpu, ret = size; 3079 int ret = size;
3080 3080
3081 mutex_lock(&trace_types_lock); 3081 mutex_lock(&trace_types_lock);
3082 3082
3083 tracing_stop();
3084
3085 /* disable all cpu buffers */
3086 for_each_tracing_cpu(cpu) {
3087 if (global_trace.data[cpu])
3088 atomic_inc(&global_trace.data[cpu]->disabled);
3089 if (max_tr.data[cpu])
3090 atomic_inc(&max_tr.data[cpu]->disabled);
3091 }
3092
3093 if (cpu_id != RING_BUFFER_ALL_CPUS) { 3083 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3094 /* make sure, this cpu is enabled in the mask */ 3084 /* make sure, this cpu is enabled in the mask */
3095 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 3085 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
@@ -3103,14 +3093,6 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3103 ret = -ENOMEM; 3093 ret = -ENOMEM;
3104 3094
3105out: 3095out:
3106 for_each_tracing_cpu(cpu) {
3107 if (global_trace.data[cpu])
3108 atomic_dec(&global_trace.data[cpu]->disabled);
3109 if (max_tr.data[cpu])
3110 atomic_dec(&max_tr.data[cpu]->disabled);
3111 }
3112
3113 tracing_start();
3114 mutex_unlock(&trace_types_lock); 3096 mutex_unlock(&trace_types_lock);
3115 3097
3116 return ret; 3098 return ret;