diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2009-05-18 07:35:34 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-05-25 17:53:41 -0400 |
commit | 4f5359685af6de7dca101393dc606620adbe963f (patch) | |
tree | 1f9dc3fb9299008daa6a5fb6f03945008ea4a4f9 | |
parent | 5537937696c55530447c20aa27daccb8d0d29b33 (diff) |
tracing: add trace_event_read_lock()
I found that there is nothing to protect event_hash in
ftrace_find_event(). Rcu protects the event hashlist
but not the event itself while we use it after its extraction
through ftrace_find_event().
This lack of a proper locking in this spot opens a race
window between any event dereferencing and module removal.
Eg:
--Task A--
print_trace_line(trace) {
event = find_ftrace_event(trace)
--Task B--
trace_module_remove_events(mod) {
list_trace_events_module(ev, mod) {
unregister_ftrace_event(ev->event) {
hlist_del(ev->event->node)
list_del(....)
}
}
}
|--> module removed, the event has been dropped
--Task A--
event->print(trace); // Dereferencing freed memory
If the event retrieved belongs to a module and this module
is concurrently removed, we may end up dereferencing a data
from a freed module.
RCU could solve this, but it would add latency to the kernel and
forbid tracers output callbacks to call any sleepable code.
So this fix converts 'trace_event_mutex' to a read/write semaphore,
and adds trace_event_read_lock() to protect ftrace_find_event().
[ Impact: fix possible freed memory dereference in ftrace ]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <4A114806.7090302@cn.fujitsu.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r-- | kernel/trace/trace.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 25 | ||||
-rw-r--r-- | kernel/trace/trace_output.h | 2 |
3 files changed, 28 insertions, 7 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index dd40d2320346..02d32baa23ac 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1569,12 +1569,14 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1569 | p = s_next(m, p, &l); | 1569 | p = s_next(m, p, &l); |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | trace_event_read_lock(); | ||
1572 | return p; | 1573 | return p; |
1573 | } | 1574 | } |
1574 | 1575 | ||
1575 | static void s_stop(struct seq_file *m, void *p) | 1576 | static void s_stop(struct seq_file *m, void *p) |
1576 | { | 1577 | { |
1577 | atomic_dec(&trace_record_cmdline_disabled); | 1578 | atomic_dec(&trace_record_cmdline_disabled); |
1579 | trace_event_read_unlock(); | ||
1578 | } | 1580 | } |
1579 | 1581 | ||
1580 | static void print_lat_help_header(struct seq_file *m) | 1582 | static void print_lat_help_header(struct seq_file *m) |
@@ -1817,6 +1819,7 @@ static int trace_empty(struct trace_iterator *iter) | |||
1817 | return 1; | 1819 | return 1; |
1818 | } | 1820 | } |
1819 | 1821 | ||
1822 | /* Called with trace_event_read_lock() held. */ | ||
1820 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 1823 | static enum print_line_t print_trace_line(struct trace_iterator *iter) |
1821 | { | 1824 | { |
1822 | enum print_line_t ret; | 1825 | enum print_line_t ret; |
@@ -3008,6 +3011,7 @@ waitagain: | |||
3008 | offsetof(struct trace_iterator, seq)); | 3011 | offsetof(struct trace_iterator, seq)); |
3009 | iter->pos = -1; | 3012 | iter->pos = -1; |
3010 | 3013 | ||
3014 | trace_event_read_lock(); | ||
3011 | while (find_next_entry_inc(iter) != NULL) { | 3015 | while (find_next_entry_inc(iter) != NULL) { |
3012 | enum print_line_t ret; | 3016 | enum print_line_t ret; |
3013 | int len = iter->seq.len; | 3017 | int len = iter->seq.len; |
@@ -3024,6 +3028,7 @@ waitagain: | |||
3024 | if (iter->seq.len >= cnt) | 3028 | if (iter->seq.len >= cnt) |
3025 | break; | 3029 | break; |
3026 | } | 3030 | } |
3031 | trace_event_read_unlock(); | ||
3027 | 3032 | ||
3028 | /* Now copy what we have to the user */ | 3033 | /* Now copy what we have to the user */ |
3029 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 3034 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
@@ -3146,6 +3151,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3146 | goto out_err; | 3151 | goto out_err; |
3147 | } | 3152 | } |
3148 | 3153 | ||
3154 | trace_event_read_lock(); | ||
3155 | |||
3149 | /* Fill as many pages as possible. */ | 3156 | /* Fill as many pages as possible. */ |
3150 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3157 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { |
3151 | pages[i] = alloc_page(GFP_KERNEL); | 3158 | pages[i] = alloc_page(GFP_KERNEL); |
@@ -3168,6 +3175,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3168 | trace_seq_init(&iter->seq); | 3175 | trace_seq_init(&iter->seq); |
3169 | } | 3176 | } |
3170 | 3177 | ||
3178 | trace_event_read_unlock(); | ||
3171 | mutex_unlock(&iter->mutex); | 3179 | mutex_unlock(&iter->mutex); |
3172 | 3180 | ||
3173 | spd.nr_pages = i; | 3181 | spd.nr_pages = i; |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 489c0e8ada09..7136420603aa 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -14,7 +14,7 @@ | |||
14 | /* must be a power of 2 */ | 14 | /* must be a power of 2 */ |
15 | #define EVENT_HASHSIZE 128 | 15 | #define EVENT_HASHSIZE 128 |
16 | 16 | ||
17 | static DEFINE_MUTEX(trace_event_mutex); | 17 | static DECLARE_RWSEM(trace_event_mutex); |
18 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | 18 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; |
19 | 19 | ||
20 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 20 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
@@ -466,6 +466,7 @@ static int task_state_char(unsigned long state) | |||
466 | * @type: the type of event to look for | 466 | * @type: the type of event to look for |
467 | * | 467 | * |
468 | * Returns an event of type @type otherwise NULL | 468 | * Returns an event of type @type otherwise NULL |
469 | * Called with trace_event_read_lock() held. | ||
469 | */ | 470 | */ |
470 | struct trace_event *ftrace_find_event(int type) | 471 | struct trace_event *ftrace_find_event(int type) |
471 | { | 472 | { |
@@ -475,7 +476,7 @@ struct trace_event *ftrace_find_event(int type) | |||
475 | 476 | ||
476 | key = type & (EVENT_HASHSIZE - 1); | 477 | key = type & (EVENT_HASHSIZE - 1); |
477 | 478 | ||
478 | hlist_for_each_entry_rcu(event, n, &event_hash[key], node) { | 479 | hlist_for_each_entry(event, n, &event_hash[key], node) { |
479 | if (event->type == type) | 480 | if (event->type == type) |
480 | return event; | 481 | return event; |
481 | } | 482 | } |
@@ -513,6 +514,16 @@ static int trace_search_list(struct list_head **list) | |||
513 | return last + 1; | 514 | return last + 1; |
514 | } | 515 | } |
515 | 516 | ||
517 | void trace_event_read_lock(void) | ||
518 | { | ||
519 | down_read(&trace_event_mutex); | ||
520 | } | ||
521 | |||
522 | void trace_event_read_unlock(void) | ||
523 | { | ||
524 | up_read(&trace_event_mutex); | ||
525 | } | ||
526 | |||
516 | /** | 527 | /** |
517 | * register_ftrace_event - register output for an event type | 528 | * register_ftrace_event - register output for an event type |
518 | * @event: the event type to register | 529 | * @event: the event type to register |
@@ -533,7 +544,7 @@ int register_ftrace_event(struct trace_event *event) | |||
533 | unsigned key; | 544 | unsigned key; |
534 | int ret = 0; | 545 | int ret = 0; |
535 | 546 | ||
536 | mutex_lock(&trace_event_mutex); | 547 | down_write(&trace_event_mutex); |
537 | 548 | ||
538 | if (WARN_ON(!event)) | 549 | if (WARN_ON(!event)) |
539 | goto out; | 550 | goto out; |
@@ -581,11 +592,11 @@ int register_ftrace_event(struct trace_event *event) | |||
581 | 592 | ||
582 | key = event->type & (EVENT_HASHSIZE - 1); | 593 | key = event->type & (EVENT_HASHSIZE - 1); |
583 | 594 | ||
584 | hlist_add_head_rcu(&event->node, &event_hash[key]); | 595 | hlist_add_head(&event->node, &event_hash[key]); |
585 | 596 | ||
586 | ret = event->type; | 597 | ret = event->type; |
587 | out: | 598 | out: |
588 | mutex_unlock(&trace_event_mutex); | 599 | up_write(&trace_event_mutex); |
589 | 600 | ||
590 | return ret; | 601 | return ret; |
591 | } | 602 | } |
@@ -597,10 +608,10 @@ EXPORT_SYMBOL_GPL(register_ftrace_event); | |||
597 | */ | 608 | */ |
598 | int unregister_ftrace_event(struct trace_event *event) | 609 | int unregister_ftrace_event(struct trace_event *event) |
599 | { | 610 | { |
600 | mutex_lock(&trace_event_mutex); | 611 | down_write(&trace_event_mutex); |
601 | hlist_del(&event->node); | 612 | hlist_del(&event->node); |
602 | list_del(&event->list); | 613 | list_del(&event->list); |
603 | mutex_unlock(&trace_event_mutex); | 614 | up_write(&trace_event_mutex); |
604 | 615 | ||
605 | return 0; | 616 | return 0; |
606 | } | 617 | } |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 6e220a8e5706..ac240e76eb01 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -20,6 +20,8 @@ extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | |||
20 | extern int trace_print_context(struct trace_iterator *iter); | 20 | extern int trace_print_context(struct trace_iterator *iter); |
21 | extern int trace_print_lat_context(struct trace_iterator *iter); | 21 | extern int trace_print_lat_context(struct trace_iterator *iter); |
22 | 22 | ||
23 | extern void trace_event_read_lock(void); | ||
24 | extern void trace_event_read_unlock(void); | ||
23 | extern struct trace_event *ftrace_find_event(int type); | 25 | extern struct trace_event *ftrace_find_event(int type); |
24 | 26 | ||
25 | extern enum print_line_t trace_nop_print(struct trace_iterator *iter, | 27 | extern enum print_line_t trace_nop_print(struct trace_iterator *iter, |