diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2009-05-18 07:35:34 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-05-25 17:53:41 -0400 |
commit | 4f5359685af6de7dca101393dc606620adbe963f (patch) | |
tree | 1f9dc3fb9299008daa6a5fb6f03945008ea4a4f9 /kernel/trace/trace.c | |
parent | 5537937696c55530447c20aa27daccb8d0d29b33 (diff) |
tracing: add trace_event_read_lock()
I found that there is nothing to protect event_hash in
ftrace_find_event(). Rcu protects the event hashlist
but not the event itself while we use it after its extraction
through ftrace_find_event().
This lack of a proper locking in this spot opens a race
window between any event dereferencing and module removal.
Eg:
--Task A--
print_trace_line(trace) {
event = find_ftrace_event(trace)
--Task B--
trace_module_remove_events(mod) {
list_trace_events_module(ev, mod) {
unregister_ftrace_event(ev->event) {
hlist_del(ev->event->node)
list_del(....)
}
}
}
|--> module removed, the event has been dropped
--Task A--
event->print(trace); // Dereferencing freed memory
If the event retrieved belongs to a module and this module
is concurrently removed, we may end up dereferencing a data
from a freed module.
RCU could solve this, but it would add latency to the kernel and
forbid tracers output callbacks to call any sleepable code.
So this fix converts 'trace_event_mutex' to a read/write semaphore,
and adds trace_event_read_lock() to protect ftrace_find_event().
[ Impact: fix possible freed memory dereference in ftrace ]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <4A114806.7090302@cn.fujitsu.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index dd40d2320346..02d32baa23ac 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1569,12 +1569,14 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1569 | p = s_next(m, p, &l); | 1569 | p = s_next(m, p, &l); |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | trace_event_read_lock(); | ||
1572 | return p; | 1573 | return p; |
1573 | } | 1574 | } |
1574 | 1575 | ||
1575 | static void s_stop(struct seq_file *m, void *p) | 1576 | static void s_stop(struct seq_file *m, void *p) |
1576 | { | 1577 | { |
1577 | atomic_dec(&trace_record_cmdline_disabled); | 1578 | atomic_dec(&trace_record_cmdline_disabled); |
1579 | trace_event_read_unlock(); | ||
1578 | } | 1580 | } |
1579 | 1581 | ||
1580 | static void print_lat_help_header(struct seq_file *m) | 1582 | static void print_lat_help_header(struct seq_file *m) |
@@ -1817,6 +1819,7 @@ static int trace_empty(struct trace_iterator *iter) | |||
1817 | return 1; | 1819 | return 1; |
1818 | } | 1820 | } |
1819 | 1821 | ||
1822 | /* Called with trace_event_read_lock() held. */ | ||
1820 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 1823 | static enum print_line_t print_trace_line(struct trace_iterator *iter) |
1821 | { | 1824 | { |
1822 | enum print_line_t ret; | 1825 | enum print_line_t ret; |
@@ -3008,6 +3011,7 @@ waitagain: | |||
3008 | offsetof(struct trace_iterator, seq)); | 3011 | offsetof(struct trace_iterator, seq)); |
3009 | iter->pos = -1; | 3012 | iter->pos = -1; |
3010 | 3013 | ||
3014 | trace_event_read_lock(); | ||
3011 | while (find_next_entry_inc(iter) != NULL) { | 3015 | while (find_next_entry_inc(iter) != NULL) { |
3012 | enum print_line_t ret; | 3016 | enum print_line_t ret; |
3013 | int len = iter->seq.len; | 3017 | int len = iter->seq.len; |
@@ -3024,6 +3028,7 @@ waitagain: | |||
3024 | if (iter->seq.len >= cnt) | 3028 | if (iter->seq.len >= cnt) |
3025 | break; | 3029 | break; |
3026 | } | 3030 | } |
3031 | trace_event_read_unlock(); | ||
3027 | 3032 | ||
3028 | /* Now copy what we have to the user */ | 3033 | /* Now copy what we have to the user */ |
3029 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 3034 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
@@ -3146,6 +3151,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3146 | goto out_err; | 3151 | goto out_err; |
3147 | } | 3152 | } |
3148 | 3153 | ||
3154 | trace_event_read_lock(); | ||
3155 | |||
3149 | /* Fill as many pages as possible. */ | 3156 | /* Fill as many pages as possible. */ |
3150 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3157 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { |
3151 | pages[i] = alloc_page(GFP_KERNEL); | 3158 | pages[i] = alloc_page(GFP_KERNEL); |
@@ -3168,6 +3175,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3168 | trace_seq_init(&iter->seq); | 3175 | trace_seq_init(&iter->seq); |
3169 | } | 3176 | } |
3170 | 3177 | ||
3178 | trace_event_read_unlock(); | ||
3171 | mutex_unlock(&iter->mutex); | 3179 | mutex_unlock(&iter->mutex); |
3172 | 3180 | ||
3173 | spd.nr_pages = i; | 3181 | spd.nr_pages = i; |