aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-04 02:01:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-14 04:39:20 -0400
commit3ea2e6d71aafe35b8aaf89ed711a283815acfae6 (patch)
treee4bae61f9bbe5ff7ccf6eac95416b98ebd4974a4 /kernel/trace/trace.c
parentbf41a158cacba6ca5fc6407a54e7ad8ce1567e2e (diff)
ftrace: make some tracers reentrant
Now that the ring buffer is reentrant, some of the ftrace tracers (sched_swich, debugging traces) can also be reentrant. Note: Never make the function tracer reentrant, that can cause recursion problems all over the kernel. The function tracer must disable reentrancy. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1cd2e8143bb4..caa4051ce778 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -839,7 +839,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
839{ 839{
840 struct trace_array *tr = &global_trace; 840 struct trace_array *tr = &global_trace;
841 struct trace_array_cpu *data; 841 struct trace_array_cpu *data;
842 long disabled;
843 int cpu; 842 int cpu;
844 int pc; 843 int pc;
845 844
@@ -850,12 +849,10 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
850 preempt_disable_notrace(); 849 preempt_disable_notrace();
851 cpu = raw_smp_processor_id(); 850 cpu = raw_smp_processor_id();
852 data = tr->data[cpu]; 851 data = tr->data[cpu];
853 disabled = atomic_inc_return(&data->disabled);
854 852
855 if (likely(disabled == 1)) 853 if (likely(!atomic_read(&data->disabled)))
856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 854 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
857 855
858 atomic_dec(&data->disabled);
859 preempt_enable_notrace(); 856 preempt_enable_notrace();
860} 857}
861 858
@@ -2961,7 +2958,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2961 struct trace_array_cpu *data; 2958 struct trace_array_cpu *data;
2962 struct print_entry *entry; 2959 struct print_entry *entry;
2963 unsigned long flags, irq_flags; 2960 unsigned long flags, irq_flags;
2964 long disabled;
2965 int cpu, len = 0, size, pc; 2961 int cpu, len = 0, size, pc;
2966 2962
2967 if (!tr->ctrl || tracing_disabled) 2963 if (!tr->ctrl || tracing_disabled)
@@ -2971,9 +2967,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2971 preempt_disable_notrace(); 2967 preempt_disable_notrace();
2972 cpu = raw_smp_processor_id(); 2968 cpu = raw_smp_processor_id();
2973 data = tr->data[cpu]; 2969 data = tr->data[cpu];
2974 disabled = atomic_inc_return(&data->disabled);
2975 2970
2976 if (unlikely(disabled != 1)) 2971 if (unlikely(atomic_read(&data->disabled)))
2977 goto out; 2972 goto out;
2978 2973
2979 spin_lock_irqsave(&trace_buf_lock, flags); 2974 spin_lock_irqsave(&trace_buf_lock, flags);
@@ -2999,7 +2994,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2999 spin_unlock_irqrestore(&trace_buf_lock, flags); 2994 spin_unlock_irqrestore(&trace_buf_lock, flags);
3000 2995
3001 out: 2996 out:
3002 atomic_dec(&data->disabled);
3003 preempt_enable_notrace(); 2997 preempt_enable_notrace();
3004 2998
3005 return len; 2999 return len;