aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_event_profile.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-10-03 08:55:18 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-10-05 04:57:41 -0400
commitfe8e5b5a60f8427940d33b205e127aecfb0bca10 (patch)
treed4d18d4a69175f715c9483e0e92ae73f8deda090 /kernel/trace/trace_event_profile.c
parent8a0382f6fceaf0c6479e582e1054f36333ea3d24 (diff)
tracing: Check total refcount before releasing bufs in profile_enable failure
When we call the profile_enable() callback of an event, we release the shared perf event tracing buffers unconditionnaly in the failure path. This is wrong because there may be other users of these. Then check the total refcount before doing this. Reported-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'kernel/trace/trace_event_profile.c')
-rw-r--r--kernel/trace/trace_event_profile.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index dd44b8768867..e52784b7b844 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
31 if (atomic_inc_return(&event->profile_count)) 31 if (atomic_inc_return(&event->profile_count))
32 return 0; 32 return 0;
33 33
34 if (!total_profile_count++) { 34 if (!total_profile_count) {
35 buf = (char *)alloc_percpu(profile_buf_t); 35 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf) 36 if (!buf)
37 goto fail_buf; 37 goto fail_buf;
@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
46 } 46 }
47 47
48 ret = event->profile_enable(); 48 ret = event->profile_enable();
49 if (!ret) 49 if (!ret) {
50 total_profile_count++;
50 return 0; 51 return 0;
52 }
51 53
52 kfree(trace_profile_buf_nmi);
53fail_buf_nmi: 54fail_buf_nmi:
54 kfree(trace_profile_buf); 55 if (!total_profile_count) {
56 kfree(trace_profile_buf_nmi);
57 kfree(trace_profile_buf);
58 trace_profile_buf_nmi = NULL;
59 trace_profile_buf = NULL;
60 }
55fail_buf: 61fail_buf:
56 total_profile_count--;
57 atomic_dec(&event->profile_count); 62 atomic_dec(&event->profile_count);
58 63
59 return ret; 64 return ret;