aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-04 02:01:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-14 04:39:20 -0400
commit3ea2e6d71aafe35b8aaf89ed711a283815acfae6 (patch)
treee4bae61f9bbe5ff7ccf6eac95416b98ebd4974a4 /kernel/trace
parentbf41a158cacba6ca5fc6407a54e7ad8ce1567e2e (diff)
ftrace: make some tracers reentrant
Now that the ring buffer is reentrant, some of the ftrace tracers (sched_swich, debugging traces) can also be reentrant. Note: Never make the function tracer reentrant, that can cause recursion problems all over the kernel. The function tracer must disable reentrancy. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_sched_switch.c10
2 files changed, 4 insertions, 16 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1cd2e8143bb4..caa4051ce778 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -839,7 +839,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
839{ 839{
840 struct trace_array *tr = &global_trace; 840 struct trace_array *tr = &global_trace;
841 struct trace_array_cpu *data; 841 struct trace_array_cpu *data;
842 long disabled;
843 int cpu; 842 int cpu;
844 int pc; 843 int pc;
845 844
@@ -850,12 +849,10 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
850 preempt_disable_notrace(); 849 preempt_disable_notrace();
851 cpu = raw_smp_processor_id(); 850 cpu = raw_smp_processor_id();
852 data = tr->data[cpu]; 851 data = tr->data[cpu];
853 disabled = atomic_inc_return(&data->disabled);
854 852
855 if (likely(disabled == 1)) 853 if (likely(!atomic_read(&data->disabled)))
856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 854 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
857 855
858 atomic_dec(&data->disabled);
859 preempt_enable_notrace(); 856 preempt_enable_notrace();
860} 857}
861 858
@@ -2961,7 +2958,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2961 struct trace_array_cpu *data; 2958 struct trace_array_cpu *data;
2962 struct print_entry *entry; 2959 struct print_entry *entry;
2963 unsigned long flags, irq_flags; 2960 unsigned long flags, irq_flags;
2964 long disabled;
2965 int cpu, len = 0, size, pc; 2961 int cpu, len = 0, size, pc;
2966 2962
2967 if (!tr->ctrl || tracing_disabled) 2963 if (!tr->ctrl || tracing_disabled)
@@ -2971,9 +2967,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2971 preempt_disable_notrace(); 2967 preempt_disable_notrace();
2972 cpu = raw_smp_processor_id(); 2968 cpu = raw_smp_processor_id();
2973 data = tr->data[cpu]; 2969 data = tr->data[cpu];
2974 disabled = atomic_inc_return(&data->disabled);
2975 2970
2976 if (unlikely(disabled != 1)) 2971 if (unlikely(atomic_read(&data->disabled)))
2977 goto out; 2972 goto out;
2978 2973
2979 spin_lock_irqsave(&trace_buf_lock, flags); 2974 spin_lock_irqsave(&trace_buf_lock, flags);
@@ -2999,7 +2994,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2999 spin_unlock_irqrestore(&trace_buf_lock, flags); 2994 spin_unlock_irqrestore(&trace_buf_lock, flags);
3000 2995
3001 out: 2996 out:
3002 atomic_dec(&data->disabled);
3003 preempt_enable_notrace(); 2997 preempt_enable_notrace();
3004 2998
3005 return len; 2999 return len;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index c7fa08a5b7f4..b8f56beb1a62 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -24,7 +24,6 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
24{ 24{
25 struct trace_array_cpu *data; 25 struct trace_array_cpu *data;
26 unsigned long flags; 26 unsigned long flags;
27 long disabled;
28 int cpu; 27 int cpu;
29 int pc; 28 int pc;
30 29
@@ -41,12 +40,10 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
41 local_irq_save(flags); 40 local_irq_save(flags);
42 cpu = raw_smp_processor_id(); 41 cpu = raw_smp_processor_id();
43 data = ctx_trace->data[cpu]; 42 data = ctx_trace->data[cpu];
44 disabled = atomic_inc_return(&data->disabled);
45 43
46 if (likely(disabled == 1)) 44 if (likely(!atomic_read(&data->disabled)))
47 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); 45 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
48 46
49 atomic_dec(&data->disabled);
50 local_irq_restore(flags); 47 local_irq_restore(flags);
51} 48}
52 49
@@ -55,7 +52,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
55{ 52{
56 struct trace_array_cpu *data; 53 struct trace_array_cpu *data;
57 unsigned long flags; 54 unsigned long flags;
58 long disabled;
59 int cpu, pc; 55 int cpu, pc;
60 56
61 if (!likely(tracer_enabled)) 57 if (!likely(tracer_enabled))
@@ -67,13 +63,11 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
67 local_irq_save(flags); 63 local_irq_save(flags);
68 cpu = raw_smp_processor_id(); 64 cpu = raw_smp_processor_id();
69 data = ctx_trace->data[cpu]; 65 data = ctx_trace->data[cpu];
70 disabled = atomic_inc_return(&data->disabled);
71 66
72 if (likely(disabled == 1)) 67 if (likely(!atomic_read(&data->disabled)))
73 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, 68 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
74 flags, pc); 69 flags, pc);
75 70
76 atomic_dec(&data->disabled);
77 local_irq_restore(flags); 71 local_irq_restore(flags);
78} 72}
79 73