aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-03-12 19:56:00 -0500
committerSteven Rostedt <rostedt@goodmis.org>2010-03-12 20:30:21 -0500
commita2f8071428ed9a0f06865f417c962421c9a6b488 (patch)
tree731d1e4f684fa5e5e27b56f1ed5f74d5945b20bd /kernel/trace/trace.c
parent283740c619d211e34572cc93c8cdba92ccbdb9cc (diff)
tracing: Disable buffer switching when starting or stopping trace
When the trace iterator is read, tracing_start() and tracing_stop() is called to stop tracing while the iterator is processing the trace output. These functions disable both the standard buffer and the max latency buffer. But if the wakeup tracer is running, it can switch these buffers between the two disables: buffer = global_trace.buffer; if (buffer) ring_buffer_record_disable(buffer); <<<--------- swap happens here buffer = max_tr.buffer; if (buffer) ring_buffer_record_disable(buffer); What happens is that we disabled the same buffer twice. On tracing_start() we can enable the same buffer twice. All ring_buffer_record_disable() must be matched with a ring_buffer_record_enable() or the buffer can be disable permanently, or enable prematurely, and cause a bug where a reset happens while a trace is commiting. This patch protects these two by taking the ftrace_max_lock to prevent a switch from occurring. Found with Li Zefan's ftrace_stress_test. Cc: stable@kernel.org Reported-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 60de37bd0f75..484337d33959 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -950,6 +950,8 @@ void tracing_start(void)
950 goto out; 950 goto out;
951 } 951 }
952 952
953 /* Prevent the buffers from switching */
954 arch_spin_lock(&ftrace_max_lock);
953 955
954 buffer = global_trace.buffer; 956 buffer = global_trace.buffer;
955 if (buffer) 957 if (buffer)
@@ -959,6 +961,8 @@ void tracing_start(void)
959 if (buffer) 961 if (buffer)
960 ring_buffer_record_enable(buffer); 962 ring_buffer_record_enable(buffer);
961 963
964 arch_spin_unlock(&ftrace_max_lock);
965
962 ftrace_start(); 966 ftrace_start();
963 out: 967 out:
964 spin_unlock_irqrestore(&tracing_start_lock, flags); 968 spin_unlock_irqrestore(&tracing_start_lock, flags);
@@ -980,6 +984,9 @@ void tracing_stop(void)
980 if (trace_stop_count++) 984 if (trace_stop_count++)
981 goto out; 985 goto out;
982 986
987 /* Prevent the buffers from switching */
988 arch_spin_lock(&ftrace_max_lock);
989
983 buffer = global_trace.buffer; 990 buffer = global_trace.buffer;
984 if (buffer) 991 if (buffer)
985 ring_buffer_record_disable(buffer); 992 ring_buffer_record_disable(buffer);
@@ -988,6 +995,8 @@ void tracing_stop(void)
988 if (buffer) 995 if (buffer)
989 ring_buffer_record_disable(buffer); 996 ring_buffer_record_disable(buffer);
990 997
998 arch_spin_unlock(&ftrace_max_lock);
999
991 out: 1000 out:
992 spin_unlock_irqrestore(&tracing_start_lock, flags); 1001 spin_unlock_irqrestore(&tracing_start_lock, flags);
993} 1002}