diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-07-26 13:25:40 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-07-29 22:56:59 -0400 |
commit | e2912b091c26b8ea95e5e00a43a7ac620f6c94a6 (patch) | |
tree | 841eb09f7732ba1d1eb58405743bb61c8a88b2fe /kernel/trace/trace_events.c | |
parent | bc6f6b08dee5645770efb4b76186ded313f23752 (diff) |
tracing: Change event_filter_read/write to verify i_private != NULL
event_filter_read/write() are racy, ftrace_event_call can be already
freed by trace_remove_event_call() callers.
1. Shift mutex_lock(event_mutex) from print/apply_event_filter to
the callers.
2. Change the callers, event_filter_read() and event_filter_write()
to read i_private under this mutex and abort if it is NULL.
This fixes nothing, but now we can change debugfs_remove("filter")
callers to nullify ->i_private and fix the the problem.
Link: http://lkml.kernel.org/r/20130726172540.GA3619@redhat.com
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r-- | kernel/trace/trace_events.c | 26 |
1 files changed, 19 insertions, 7 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3dfa8419d0dc..1d7b6d03cd51 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -980,21 +980,28 @@ static ssize_t | |||
980 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 980 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
981 | loff_t *ppos) | 981 | loff_t *ppos) |
982 | { | 982 | { |
983 | struct ftrace_event_call *call = filp->private_data; | 983 | struct ftrace_event_call *call; |
984 | struct trace_seq *s; | 984 | struct trace_seq *s; |
985 | int r; | 985 | int r = -ENODEV; |
986 | 986 | ||
987 | if (*ppos) | 987 | if (*ppos) |
988 | return 0; | 988 | return 0; |
989 | 989 | ||
990 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 990 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
991 | |||
991 | if (!s) | 992 | if (!s) |
992 | return -ENOMEM; | 993 | return -ENOMEM; |
993 | 994 | ||
994 | trace_seq_init(s); | 995 | trace_seq_init(s); |
995 | 996 | ||
996 | print_event_filter(call, s); | 997 | mutex_lock(&event_mutex); |
997 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 998 | call = event_file_data(filp); |
999 | if (call) | ||
1000 | print_event_filter(call, s); | ||
1001 | mutex_unlock(&event_mutex); | ||
1002 | |||
1003 | if (call) | ||
1004 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | ||
998 | 1005 | ||
999 | kfree(s); | 1006 | kfree(s); |
1000 | 1007 | ||
@@ -1005,9 +1012,9 @@ static ssize_t | |||
1005 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1012 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1006 | loff_t *ppos) | 1013 | loff_t *ppos) |
1007 | { | 1014 | { |
1008 | struct ftrace_event_call *call = filp->private_data; | 1015 | struct ftrace_event_call *call; |
1009 | char *buf; | 1016 | char *buf; |
1010 | int err; | 1017 | int err = -ENODEV; |
1011 | 1018 | ||
1012 | if (cnt >= PAGE_SIZE) | 1019 | if (cnt >= PAGE_SIZE) |
1013 | return -EINVAL; | 1020 | return -EINVAL; |
@@ -1022,7 +1029,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1022 | } | 1029 | } |
1023 | buf[cnt] = '\0'; | 1030 | buf[cnt] = '\0'; |
1024 | 1031 | ||
1025 | err = apply_event_filter(call, buf); | 1032 | mutex_lock(&event_mutex); |
1033 | call = event_file_data(filp); | ||
1034 | if (call) | ||
1035 | err = apply_event_filter(call, buf); | ||
1036 | mutex_unlock(&event_mutex); | ||
1037 | |||
1026 | free_page((unsigned long) buf); | 1038 | free_page((unsigned long) buf); |
1027 | if (err < 0) | 1039 | if (err < 0) |
1028 | return err; | 1040 | return err; |