diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-10-04 02:01:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 04:39:20 -0400 |
commit | 3ea2e6d71aafe35b8aaf89ed711a283815acfae6 (patch) | |
tree | e4bae61f9bbe5ff7ccf6eac95416b98ebd4974a4 /kernel/trace/trace_sched_switch.c | |
parent | bf41a158cacba6ca5fc6407a54e7ad8ce1567e2e (diff) |
ftrace: make some tracers reentrant
Now that the ring buffer is reentrant, some of the ftrace tracers
(sched_swich, debugging traces) can also be reentrant.
Note: Never make the function tracer reentrant, that can cause
recursion problems all over the kernel. The function tracer
must disable reentrancy.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 10 |
1 files changed, 2 insertions, 8 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index c7fa08a5b7f4..b8f56beb1a62 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -24,7 +24,6 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
24 | { | 24 | { |
25 | struct trace_array_cpu *data; | 25 | struct trace_array_cpu *data; |
26 | unsigned long flags; | 26 | unsigned long flags; |
27 | long disabled; | ||
28 | int cpu; | 27 | int cpu; |
29 | int pc; | 28 | int pc; |
30 | 29 | ||
@@ -41,12 +40,10 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
41 | local_irq_save(flags); | 40 | local_irq_save(flags); |
42 | cpu = raw_smp_processor_id(); | 41 | cpu = raw_smp_processor_id(); |
43 | data = ctx_trace->data[cpu]; | 42 | data = ctx_trace->data[cpu]; |
44 | disabled = atomic_inc_return(&data->disabled); | ||
45 | 43 | ||
46 | if (likely(disabled == 1)) | 44 | if (likely(!atomic_read(&data->disabled))) |
47 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); | 45 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); |
48 | 46 | ||
49 | atomic_dec(&data->disabled); | ||
50 | local_irq_restore(flags); | 47 | local_irq_restore(flags); |
51 | } | 48 | } |
52 | 49 | ||
@@ -55,7 +52,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) | |||
55 | { | 52 | { |
56 | struct trace_array_cpu *data; | 53 | struct trace_array_cpu *data; |
57 | unsigned long flags; | 54 | unsigned long flags; |
58 | long disabled; | ||
59 | int cpu, pc; | 55 | int cpu, pc; |
60 | 56 | ||
61 | if (!likely(tracer_enabled)) | 57 | if (!likely(tracer_enabled)) |
@@ -67,13 +63,11 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) | |||
67 | local_irq_save(flags); | 63 | local_irq_save(flags); |
68 | cpu = raw_smp_processor_id(); | 64 | cpu = raw_smp_processor_id(); |
69 | data = ctx_trace->data[cpu]; | 65 | data = ctx_trace->data[cpu]; |
70 | disabled = atomic_inc_return(&data->disabled); | ||
71 | 66 | ||
72 | if (likely(disabled == 1)) | 67 | if (likely(!atomic_read(&data->disabled))) |
73 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, | 68 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, |
74 | flags, pc); | 69 | flags, pc); |
75 | 70 | ||
76 | atomic_dec(&data->disabled); | ||
77 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
78 | } | 72 | } |
79 | 73 | ||