aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_selftest.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-05-12 15:20:56 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 15:15:59 -0400
commit30afdcb1de0a37a2086145a82ca3febebe47d019 (patch)
tree866c4d195207ac37d4477f9ce09d429085532180 /kernel/trace/trace_selftest.c
parentd15f57f23eaba975309a153b23699cd0c0236974 (diff)
ftrace: selftest protect againt max flip
There is a slight race condition in the selftest where the max update of the wakeup and irqs/preemption off tests can be doing a max update as the buffers are being tested. If this happens the system can crash with a GPF. This patch adds the max update spinlock around the checking of the buffers to prevent such a race. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r--kernel/trace/trace_selftest.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 83e55a2000cc..395274e783b3 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -82,10 +82,12 @@ trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
82 */ 82 */
83static int trace_test_buffer(struct trace_array *tr, unsigned long *count) 83static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
84{ 84{
85 unsigned long cnt = 0; 85 unsigned long flags, cnt = 0;
86 int cpu; 86 int cpu, ret = 0;
87 int ret = 0;
88 87
88 /* Don't allow flipping of max traces now */
89 raw_local_irq_save(flags);
90 __raw_spin_lock(&ftrace_max_lock);
89 for_each_possible_cpu(cpu) { 91 for_each_possible_cpu(cpu) {
90 if (!head_page(tr->data[cpu])) 92 if (!head_page(tr->data[cpu]))
91 continue; 93 continue;
@@ -96,6 +98,8 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
96 if (ret) 98 if (ret)
97 break; 99 break;
98 } 100 }
101 __raw_spin_unlock(&ftrace_max_lock);
102 raw_local_irq_restore(flags);
99 103
100 if (count) 104 if (count)
101 *count = cnt; 105 *count = cnt;