diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-07-26 13:25:40 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-07-29 22:56:59 -0400 |
commit | e2912b091c26b8ea95e5e00a43a7ac620f6c94a6 (patch) | |
tree | 841eb09f7732ba1d1eb58405743bb61c8a88b2fe /kernel | |
parent | bc6f6b08dee5645770efb4b76186ded313f23752 (diff) |
tracing: Change event_filter_read/write to verify i_private != NULL
event_filter_read/write() are racy, ftrace_event_call can be already
freed by trace_remove_event_call() callers.
1. Shift mutex_lock(event_mutex) from print/apply_event_filter to
the callers.
2. Change the callers, event_filter_read() and event_filter_write()
to read i_private under this mutex and abort if it is NULL.
This fixes nothing, but now we can change debugfs_remove("filter")
callers to nullify ->i_private and fix the the problem.
Link: http://lkml.kernel.org/r/20130726172540.GA3619@redhat.com
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace_events.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 17 |
2 files changed, 25 insertions, 18 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3dfa8419d0dc..1d7b6d03cd51 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -980,21 +980,28 @@ static ssize_t | |||
980 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 980 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
981 | loff_t *ppos) | 981 | loff_t *ppos) |
982 | { | 982 | { |
983 | struct ftrace_event_call *call = filp->private_data; | 983 | struct ftrace_event_call *call; |
984 | struct trace_seq *s; | 984 | struct trace_seq *s; |
985 | int r; | 985 | int r = -ENODEV; |
986 | 986 | ||
987 | if (*ppos) | 987 | if (*ppos) |
988 | return 0; | 988 | return 0; |
989 | 989 | ||
990 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 990 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
991 | |||
991 | if (!s) | 992 | if (!s) |
992 | return -ENOMEM; | 993 | return -ENOMEM; |
993 | 994 | ||
994 | trace_seq_init(s); | 995 | trace_seq_init(s); |
995 | 996 | ||
996 | print_event_filter(call, s); | 997 | mutex_lock(&event_mutex); |
997 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 998 | call = event_file_data(filp); |
999 | if (call) | ||
1000 | print_event_filter(call, s); | ||
1001 | mutex_unlock(&event_mutex); | ||
1002 | |||
1003 | if (call) | ||
1004 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | ||
998 | 1005 | ||
999 | kfree(s); | 1006 | kfree(s); |
1000 | 1007 | ||
@@ -1005,9 +1012,9 @@ static ssize_t | |||
1005 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1012 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1006 | loff_t *ppos) | 1013 | loff_t *ppos) |
1007 | { | 1014 | { |
1008 | struct ftrace_event_call *call = filp->private_data; | 1015 | struct ftrace_event_call *call; |
1009 | char *buf; | 1016 | char *buf; |
1010 | int err; | 1017 | int err = -ENODEV; |
1011 | 1018 | ||
1012 | if (cnt >= PAGE_SIZE) | 1019 | if (cnt >= PAGE_SIZE) |
1013 | return -EINVAL; | 1020 | return -EINVAL; |
@@ -1022,7 +1029,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1022 | } | 1029 | } |
1023 | buf[cnt] = '\0'; | 1030 | buf[cnt] = '\0'; |
1024 | 1031 | ||
1025 | err = apply_event_filter(call, buf); | 1032 | mutex_lock(&event_mutex); |
1033 | call = event_file_data(filp); | ||
1034 | if (call) | ||
1035 | err = apply_event_filter(call, buf); | ||
1036 | mutex_unlock(&event_mutex); | ||
1037 | |||
1026 | free_page((unsigned long) buf); | 1038 | free_page((unsigned long) buf); |
1027 | if (err < 0) | 1039 | if (err < 0) |
1028 | return err; | 1040 | return err; |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 0c7b75a8acc8..97daa8cf958d 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps, | |||
637 | free_page((unsigned long) buf); | 637 | free_page((unsigned long) buf); |
638 | } | 638 | } |
639 | 639 | ||
640 | /* caller must hold event_mutex */ | ||
640 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) | 641 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) |
641 | { | 642 | { |
642 | struct event_filter *filter; | 643 | struct event_filter *filter = call->filter; |
643 | 644 | ||
644 | mutex_lock(&event_mutex); | ||
645 | filter = call->filter; | ||
646 | if (filter && filter->filter_string) | 645 | if (filter && filter->filter_string) |
647 | trace_seq_printf(s, "%s\n", filter->filter_string); | 646 | trace_seq_printf(s, "%s\n", filter->filter_string); |
648 | else | 647 | else |
649 | trace_seq_puts(s, "none\n"); | 648 | trace_seq_puts(s, "none\n"); |
650 | mutex_unlock(&event_mutex); | ||
651 | } | 649 | } |
652 | 650 | ||
653 | void print_subsystem_event_filter(struct event_subsystem *system, | 651 | void print_subsystem_event_filter(struct event_subsystem *system, |
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system, | |||
1841 | return err; | 1839 | return err; |
1842 | } | 1840 | } |
1843 | 1841 | ||
1842 | /* caller must hold event_mutex */ | ||
1844 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | 1843 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) |
1845 | { | 1844 | { |
1846 | struct event_filter *filter; | 1845 | struct event_filter *filter; |
1847 | int err = 0; | 1846 | int err; |
1848 | |||
1849 | mutex_lock(&event_mutex); | ||
1850 | 1847 | ||
1851 | if (!strcmp(strstrip(filter_string), "0")) { | 1848 | if (!strcmp(strstrip(filter_string), "0")) { |
1852 | filter_disable(call); | 1849 | filter_disable(call); |
1853 | filter = call->filter; | 1850 | filter = call->filter; |
1854 | if (!filter) | 1851 | if (!filter) |
1855 | goto out_unlock; | 1852 | return 0; |
1856 | RCU_INIT_POINTER(call->filter, NULL); | 1853 | RCU_INIT_POINTER(call->filter, NULL); |
1857 | /* Make sure the filter is not being used */ | 1854 | /* Make sure the filter is not being used */ |
1858 | synchronize_sched(); | 1855 | synchronize_sched(); |
1859 | __free_filter(filter); | 1856 | __free_filter(filter); |
1860 | goto out_unlock; | 1857 | return 0; |
1861 | } | 1858 | } |
1862 | 1859 | ||
1863 | err = create_filter(call, filter_string, true, &filter); | 1860 | err = create_filter(call, filter_string, true, &filter); |
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1884 | __free_filter(tmp); | 1881 | __free_filter(tmp); |
1885 | } | 1882 | } |
1886 | } | 1883 | } |
1887 | out_unlock: | ||
1888 | mutex_unlock(&event_mutex); | ||
1889 | 1884 | ||
1890 | return err; | 1885 | return err; |
1891 | } | 1886 | } |