aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-03-15 17:10:35 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-16 04:13:15 -0400
commitac99c58c9e56967037382e31f865b72b10127965 (patch)
tree703c5f043cb8067cca699b6c3b6a481430912dd5
parent7243f2145a9b06e5cf9a49fc9b8b9a4fff6fb42e (diff)
tracing/syscalls: fix missing release of tracing
Impact: fix 'stuck' syscall tracer The syscall tracer uses a refcounter to enable several users simultaneously. But the refcounter did not behave correctly and always restored its value to 0 after calling start_syscall_tracing(). Therefore, stop_syscall_tracing() couldn't release correctly the tasks from tracing. Also the tracer forgot to reset the buffer when it is released. Drop the pointless refcount decrement on start_syscall_tracing() and reset the buffer when we release the tracer. This fixes two reported issue: - when we switch from syscall tracer to another tracer, syscall tracing continued. - incorrect use of the refcount. Reported-by: Andrew Morton <akpm@linux-foundation.org> Reported-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <1237151439-6755-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/trace/trace_syscalls.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index c72e599230ff..c5fc1d8880f6 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -96,8 +96,9 @@ void start_ftrace_syscalls(void)
96 unsigned long flags; 96 unsigned long flags;
97 struct task_struct *g, *t; 97 struct task_struct *g, *t;
98 98
99 /* Don't enable the flag on the tasks twice */
99 if (atomic_inc_return(&refcount) != 1) 100 if (atomic_inc_return(&refcount) != 1)
100 goto out; 101 return;
101 102
102 arch_init_ftrace_syscalls(); 103 arch_init_ftrace_syscalls();
103 read_lock_irqsave(&tasklist_lock, flags); 104 read_lock_irqsave(&tasklist_lock, flags);
@@ -107,8 +108,6 @@ void start_ftrace_syscalls(void)
107 } while_each_thread(g, t); 108 } while_each_thread(g, t);
108 109
109 read_unlock_irqrestore(&tasklist_lock, flags); 110 read_unlock_irqrestore(&tasklist_lock, flags);
110out:
111 atomic_dec(&refcount);
112} 111}
113 112
114void stop_ftrace_syscalls(void) 113void stop_ftrace_syscalls(void)
@@ -116,8 +115,9 @@ void stop_ftrace_syscalls(void)
116 unsigned long flags; 115 unsigned long flags;
117 struct task_struct *g, *t; 116 struct task_struct *g, *t;
118 117
118 /* There are perhaps still some users */
119 if (atomic_dec_return(&refcount)) 119 if (atomic_dec_return(&refcount))
120 goto out; 120 return;
121 121
122 read_lock_irqsave(&tasklist_lock, flags); 122 read_lock_irqsave(&tasklist_lock, flags);
123 123
@@ -126,8 +126,6 @@ void stop_ftrace_syscalls(void)
126 } while_each_thread(g, t); 126 } while_each_thread(g, t);
127 127
128 read_unlock_irqrestore(&tasklist_lock, flags); 128 read_unlock_irqrestore(&tasklist_lock, flags);
129out:
130 atomic_inc(&refcount);
131} 129}
132 130
133void ftrace_syscall_enter(struct pt_regs *regs) 131void ftrace_syscall_enter(struct pt_regs *regs)
@@ -201,6 +199,7 @@ static int init_syscall_tracer(struct trace_array *tr)
201static void reset_syscall_tracer(struct trace_array *tr) 199static void reset_syscall_tracer(struct trace_array *tr)
202{ 200{
203 stop_ftrace_syscalls(); 201 stop_ftrace_syscalls();
202 tracing_reset_online_cpus(tr);
204} 203}
205 204
206static struct trace_event syscall_enter_event = { 205static struct trace_event syscall_enter_event = {