diff options
-rw-r--r-- | include/linux/ftrace_event.h | 25 | ||||
-rw-r--r-- | include/linux/syscalls.h | 4 | ||||
-rw-r--r-- | include/trace/ftrace.h | 7 | ||||
-rw-r--r-- | kernel/trace/trace.c | 40 | ||||
-rw-r--r-- | kernel/trace/trace.h | 18 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 23 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 218 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 3 |
15 files changed, 264 insertions, 102 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 5eaa746735ff..9abbe630c456 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -202,6 +202,7 @@ enum { | |||
202 | TRACE_EVENT_FL_NO_SET_FILTER_BIT, | 202 | TRACE_EVENT_FL_NO_SET_FILTER_BIT, |
203 | TRACE_EVENT_FL_IGNORE_ENABLE_BIT, | 203 | TRACE_EVENT_FL_IGNORE_ENABLE_BIT, |
204 | TRACE_EVENT_FL_WAS_ENABLED_BIT, | 204 | TRACE_EVENT_FL_WAS_ENABLED_BIT, |
205 | TRACE_EVENT_FL_USE_CALL_FILTER_BIT, | ||
205 | }; | 206 | }; |
206 | 207 | ||
207 | /* | 208 | /* |
@@ -213,6 +214,7 @@ enum { | |||
213 | * WAS_ENABLED - Set and stays set when an event was ever enabled | 214 | * WAS_ENABLED - Set and stays set when an event was ever enabled |
214 | * (used for module unloading, if a module event is enabled, | 215 | * (used for module unloading, if a module event is enabled, |
215 | * it is best to clear the buffers that used it). | 216 | * it is best to clear the buffers that used it). |
217 | * USE_CALL_FILTER - For ftrace internal events, don't use file filter | ||
216 | */ | 218 | */ |
217 | enum { | 219 | enum { |
218 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), | 220 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), |
@@ -220,6 +222,7 @@ enum { | |||
220 | TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), | 222 | TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), |
221 | TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), | 223 | TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), |
222 | TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), | 224 | TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), |
225 | TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), | ||
223 | }; | 226 | }; |
224 | 227 | ||
225 | struct ftrace_event_call { | 228 | struct ftrace_event_call { |
@@ -238,6 +241,7 @@ struct ftrace_event_call { | |||
238 | * bit 2: failed to apply filter | 241 | * bit 2: failed to apply filter |
239 | * bit 3: ftrace internal event (do not enable) | 242 | * bit 3: ftrace internal event (do not enable) |
240 | * bit 4: Event was enabled by module | 243 | * bit 4: Event was enabled by module |
244 | * bit 5: use call filter rather than file filter | ||
241 | */ | 245 | */ |
242 | int flags; /* static flags of different events */ | 246 | int flags; /* static flags of different events */ |
243 | 247 | ||
@@ -253,6 +257,8 @@ struct ftrace_subsystem_dir; | |||
253 | enum { | 257 | enum { |
254 | FTRACE_EVENT_FL_ENABLED_BIT, | 258 | FTRACE_EVENT_FL_ENABLED_BIT, |
255 | FTRACE_EVENT_FL_RECORDED_CMD_BIT, | 259 | FTRACE_EVENT_FL_RECORDED_CMD_BIT, |
260 | FTRACE_EVENT_FL_FILTERED_BIT, | ||
261 | FTRACE_EVENT_FL_NO_SET_FILTER_BIT, | ||
256 | FTRACE_EVENT_FL_SOFT_MODE_BIT, | 262 | FTRACE_EVENT_FL_SOFT_MODE_BIT, |
257 | FTRACE_EVENT_FL_SOFT_DISABLED_BIT, | 263 | FTRACE_EVENT_FL_SOFT_DISABLED_BIT, |
258 | }; | 264 | }; |
@@ -261,6 +267,8 @@ enum { | |||
261 | * Ftrace event file flags: | 267 | * Ftrace event file flags: |
262 | * ENABLED - The event is enabled | 268 | * ENABLED - The event is enabled |
263 | * RECORDED_CMD - The comms should be recorded at sched_switch | 269 | * RECORDED_CMD - The comms should be recorded at sched_switch |
270 | * FILTERED - The event has a filter attached | ||
271 | * NO_SET_FILTER - Set when filter has error and is to be ignored | ||
264 | * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED | 272 | * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED |
265 | * SOFT_DISABLED - When set, do not trace the event (even though its | 273 | * SOFT_DISABLED - When set, do not trace the event (even though its |
266 | * tracepoint may be enabled) | 274 | * tracepoint may be enabled) |
@@ -268,6 +276,8 @@ enum { | |||
268 | enum { | 276 | enum { |
269 | FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), | 277 | FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), |
270 | FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), | 278 | FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), |
279 | FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT), | ||
280 | FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT), | ||
271 | FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), | 281 | FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), |
272 | FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), | 282 | FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), |
273 | }; | 283 | }; |
@@ -275,6 +285,7 @@ enum { | |||
275 | struct ftrace_event_file { | 285 | struct ftrace_event_file { |
276 | struct list_head list; | 286 | struct list_head list; |
277 | struct ftrace_event_call *event_call; | 287 | struct ftrace_event_call *event_call; |
288 | struct event_filter *filter; | ||
278 | struct dentry *dir; | 289 | struct dentry *dir; |
279 | struct trace_array *tr; | 290 | struct trace_array *tr; |
280 | struct ftrace_subsystem_dir *system; | 291 | struct ftrace_subsystem_dir *system; |
@@ -310,12 +321,16 @@ struct ftrace_event_file { | |||
310 | 321 | ||
311 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ | 322 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
312 | 323 | ||
313 | extern void destroy_preds(struct ftrace_event_call *call); | 324 | extern void destroy_preds(struct ftrace_event_file *file); |
325 | extern void destroy_call_preds(struct ftrace_event_call *call); | ||
314 | extern int filter_match_preds(struct event_filter *filter, void *rec); | 326 | extern int filter_match_preds(struct event_filter *filter, void *rec); |
315 | extern int filter_current_check_discard(struct ring_buffer *buffer, | 327 | |
316 | struct ftrace_event_call *call, | 328 | extern int filter_check_discard(struct ftrace_event_file *file, void *rec, |
317 | void *rec, | 329 | struct ring_buffer *buffer, |
318 | struct ring_buffer_event *event); | 330 | struct ring_buffer_event *event); |
331 | extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, | ||
332 | struct ring_buffer *buffer, | ||
333 | struct ring_buffer_event *event); | ||
319 | 334 | ||
320 | enum { | 335 | enum { |
321 | FILTER_OTHER = 0, | 336 | FILTER_OTHER = 0, |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7fac04e7ff6e..10bafa97049d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
120 | .class = &event_class_syscall_enter, \ | 120 | .class = &event_class_syscall_enter, \ |
121 | .event.funcs = &enter_syscall_print_funcs, \ | 121 | .event.funcs = &enter_syscall_print_funcs, \ |
122 | .data = (void *)&__syscall_meta_##sname,\ | 122 | .data = (void *)&__syscall_meta_##sname,\ |
123 | .flags = TRACE_EVENT_FL_CAP_ANY, \ | 123 | .flags = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\ |
124 | }; \ | 124 | }; \ |
125 | static struct ftrace_event_call __used \ | 125 | static struct ftrace_event_call __used \ |
126 | __attribute__((section("_ftrace_events"))) \ | 126 | __attribute__((section("_ftrace_events"))) \ |
@@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
134 | .class = &event_class_syscall_exit, \ | 134 | .class = &event_class_syscall_exit, \ |
135 | .event.funcs = &exit_syscall_print_funcs, \ | 135 | .event.funcs = &exit_syscall_print_funcs, \ |
136 | .data = (void *)&__syscall_meta_##sname,\ | 136 | .data = (void *)&__syscall_meta_##sname,\ |
137 | .flags = TRACE_EVENT_FL_CAP_ANY, \ | 137 | .flags = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\ |
138 | }; \ | 138 | }; \ |
139 | static struct ftrace_event_call __used \ | 139 | static struct ftrace_event_call __used \ |
140 | __attribute__((section("_ftrace_events"))) \ | 140 | __attribute__((section("_ftrace_events"))) \ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 5c7ab17cbb02..52594b20179e 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -437,9 +437,8 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
437 | * { <assign>; } <-- Here we assign the entries by the __field and | 437 | * { <assign>; } <-- Here we assign the entries by the __field and |
438 | * __array macros. | 438 | * __array macros. |
439 | * | 439 | * |
440 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) | 440 | * if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
441 | * trace_nowake_buffer_unlock_commit(buffer, | 441 | * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); |
442 | * event, irq_flags, pc); | ||
443 | * } | 442 | * } |
444 | * | 443 | * |
445 | * static struct trace_event ftrace_event_type_<call> = { | 444 | * static struct trace_event ftrace_event_type_<call> = { |
@@ -553,7 +552,7 @@ ftrace_raw_event_##call(void *__data, proto) \ | |||
553 | \ | 552 | \ |
554 | { assign; } \ | 553 | { assign; } \ |
555 | \ | 554 | \ |
556 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ | 555 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) \ |
557 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ | 556 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ |
558 | } | 557 | } |
559 | /* | 558 | /* |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 063a92bad578..489da8b19f30 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -235,13 +235,33 @@ void trace_array_put(struct trace_array *this_tr) | |||
235 | mutex_unlock(&trace_types_lock); | 235 | mutex_unlock(&trace_types_lock); |
236 | } | 236 | } |
237 | 237 | ||
238 | int filter_current_check_discard(struct ring_buffer *buffer, | 238 | int filter_check_discard(struct ftrace_event_file *file, void *rec, |
239 | struct ftrace_event_call *call, void *rec, | 239 | struct ring_buffer *buffer, |
240 | struct ring_buffer_event *event) | 240 | struct ring_buffer_event *event) |
241 | { | 241 | { |
242 | return filter_check_discard(call, rec, buffer, event); | 242 | if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) && |
243 | !filter_match_preds(file->filter, rec)) { | ||
244 | ring_buffer_discard_commit(buffer, event); | ||
245 | return 1; | ||
246 | } | ||
247 | |||
248 | return 0; | ||
249 | } | ||
250 | EXPORT_SYMBOL_GPL(filter_check_discard); | ||
251 | |||
252 | int call_filter_check_discard(struct ftrace_event_call *call, void *rec, | ||
253 | struct ring_buffer *buffer, | ||
254 | struct ring_buffer_event *event) | ||
255 | { | ||
256 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && | ||
257 | !filter_match_preds(call->filter, rec)) { | ||
258 | ring_buffer_discard_commit(buffer, event); | ||
259 | return 1; | ||
260 | } | ||
261 | |||
262 | return 0; | ||
243 | } | 263 | } |
244 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 264 | EXPORT_SYMBOL_GPL(call_filter_check_discard); |
245 | 265 | ||
246 | cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) | 266 | cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
247 | { | 267 | { |
@@ -1633,7 +1653,7 @@ trace_function(struct trace_array *tr, | |||
1633 | entry->ip = ip; | 1653 | entry->ip = ip; |
1634 | entry->parent_ip = parent_ip; | 1654 | entry->parent_ip = parent_ip; |
1635 | 1655 | ||
1636 | if (!filter_check_discard(call, entry, buffer, event)) | 1656 | if (!call_filter_check_discard(call, entry, buffer, event)) |
1637 | __buffer_unlock_commit(buffer, event); | 1657 | __buffer_unlock_commit(buffer, event); |
1638 | } | 1658 | } |
1639 | 1659 | ||
@@ -1717,7 +1737,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1717 | 1737 | ||
1718 | entry->size = trace.nr_entries; | 1738 | entry->size = trace.nr_entries; |
1719 | 1739 | ||
1720 | if (!filter_check_discard(call, entry, buffer, event)) | 1740 | if (!call_filter_check_discard(call, entry, buffer, event)) |
1721 | __buffer_unlock_commit(buffer, event); | 1741 | __buffer_unlock_commit(buffer, event); |
1722 | 1742 | ||
1723 | out: | 1743 | out: |
@@ -1819,7 +1839,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1819 | trace.entries = entry->caller; | 1839 | trace.entries = entry->caller; |
1820 | 1840 | ||
1821 | save_stack_trace_user(&trace); | 1841 | save_stack_trace_user(&trace); |
1822 | if (!filter_check_discard(call, entry, buffer, event)) | 1842 | if (!call_filter_check_discard(call, entry, buffer, event)) |
1823 | __buffer_unlock_commit(buffer, event); | 1843 | __buffer_unlock_commit(buffer, event); |
1824 | 1844 | ||
1825 | out_drop_count: | 1845 | out_drop_count: |
@@ -2011,7 +2031,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
2011 | entry->fmt = fmt; | 2031 | entry->fmt = fmt; |
2012 | 2032 | ||
2013 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); | 2033 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
2014 | if (!filter_check_discard(call, entry, buffer, event)) { | 2034 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
2015 | __buffer_unlock_commit(buffer, event); | 2035 | __buffer_unlock_commit(buffer, event); |
2016 | ftrace_trace_stack(buffer, flags, 6, pc); | 2036 | ftrace_trace_stack(buffer, flags, 6, pc); |
2017 | } | 2037 | } |
@@ -2066,7 +2086,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, | |||
2066 | 2086 | ||
2067 | memcpy(&entry->buf, tbuffer, len); | 2087 | memcpy(&entry->buf, tbuffer, len); |
2068 | entry->buf[len] = '\0'; | 2088 | entry->buf[len] = '\0'; |
2069 | if (!filter_check_discard(call, entry, buffer, event)) { | 2089 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
2070 | __buffer_unlock_commit(buffer, event); | 2090 | __buffer_unlock_commit(buffer, event); |
2071 | ftrace_trace_stack(buffer, flags, 6, pc); | 2091 | ftrace_trace_stack(buffer, flags, 6, pc); |
2072 | } | 2092 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d1cf5159bec0..12d1a612a73e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -1007,9 +1007,9 @@ struct filter_pred { | |||
1007 | 1007 | ||
1008 | extern enum regex_type | 1008 | extern enum regex_type |
1009 | filter_parse_regex(char *buff, int len, char **search, int *not); | 1009 | filter_parse_regex(char *buff, int len, char **search, int *not); |
1010 | extern void print_event_filter(struct ftrace_event_call *call, | 1010 | extern void print_event_filter(struct ftrace_event_file *file, |
1011 | struct trace_seq *s); | 1011 | struct trace_seq *s); |
1012 | extern int apply_event_filter(struct ftrace_event_call *call, | 1012 | extern int apply_event_filter(struct ftrace_event_file *file, |
1013 | char *filter_string); | 1013 | char *filter_string); |
1014 | extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | 1014 | extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, |
1015 | char *filter_string); | 1015 | char *filter_string); |
@@ -1020,20 +1020,6 @@ extern int filter_assign_type(const char *type); | |||
1020 | struct ftrace_event_field * | 1020 | struct ftrace_event_field * |
1021 | trace_find_event_field(struct ftrace_event_call *call, char *name); | 1021 | trace_find_event_field(struct ftrace_event_call *call, char *name); |
1022 | 1022 | ||
1023 | static inline int | ||
1024 | filter_check_discard(struct ftrace_event_call *call, void *rec, | ||
1025 | struct ring_buffer *buffer, | ||
1026 | struct ring_buffer_event *event) | ||
1027 | { | ||
1028 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && | ||
1029 | !filter_match_preds(call->filter, rec)) { | ||
1030 | ring_buffer_discard_commit(buffer, event); | ||
1031 | return 1; | ||
1032 | } | ||
1033 | |||
1034 | return 0; | ||
1035 | } | ||
1036 | |||
1037 | extern void trace_event_enable_cmd_record(bool enable); | 1023 | extern void trace_event_enable_cmd_record(bool enable); |
1038 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); | 1024 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
1039 | extern int event_trace_del_tracer(struct trace_array *tr); | 1025 | extern int event_trace_del_tracer(struct trace_array *tr); |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index d594da0dc03c..697fb9bac8f0 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -78,7 +78,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
78 | entry->line = f->line; | 78 | entry->line = f->line; |
79 | entry->correct = val == expect; | 79 | entry->correct = val == expect; |
80 | 80 | ||
81 | if (!filter_check_discard(call, entry, buffer, event)) | 81 | if (!call_filter_check_discard(call, entry, buffer, event)) |
82 | __buffer_unlock_commit(buffer, event); | 82 | __buffer_unlock_commit(buffer, event); |
83 | 83 | ||
84 | out: | 84 | out: |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 368a4d50cc30..043f833246a0 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -989,7 +989,7 @@ static ssize_t | |||
989 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 989 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
990 | loff_t *ppos) | 990 | loff_t *ppos) |
991 | { | 991 | { |
992 | struct ftrace_event_call *call; | 992 | struct ftrace_event_file *file; |
993 | struct trace_seq *s; | 993 | struct trace_seq *s; |
994 | int r = -ENODEV; | 994 | int r = -ENODEV; |
995 | 995 | ||
@@ -1004,12 +1004,12 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
1004 | trace_seq_init(s); | 1004 | trace_seq_init(s); |
1005 | 1005 | ||
1006 | mutex_lock(&event_mutex); | 1006 | mutex_lock(&event_mutex); |
1007 | call = event_file_data(filp); | 1007 | file = event_file_data(filp); |
1008 | if (call) | 1008 | if (file) |
1009 | print_event_filter(call, s); | 1009 | print_event_filter(file, s); |
1010 | mutex_unlock(&event_mutex); | 1010 | mutex_unlock(&event_mutex); |
1011 | 1011 | ||
1012 | if (call) | 1012 | if (file) |
1013 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 1013 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); |
1014 | 1014 | ||
1015 | kfree(s); | 1015 | kfree(s); |
@@ -1021,7 +1021,7 @@ static ssize_t | |||
1021 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1021 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1022 | loff_t *ppos) | 1022 | loff_t *ppos) |
1023 | { | 1023 | { |
1024 | struct ftrace_event_call *call; | 1024 | struct ftrace_event_file *file; |
1025 | char *buf; | 1025 | char *buf; |
1026 | int err = -ENODEV; | 1026 | int err = -ENODEV; |
1027 | 1027 | ||
@@ -1039,9 +1039,9 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1039 | buf[cnt] = '\0'; | 1039 | buf[cnt] = '\0'; |
1040 | 1040 | ||
1041 | mutex_lock(&event_mutex); | 1041 | mutex_lock(&event_mutex); |
1042 | call = event_file_data(filp); | 1042 | file = event_file_data(filp); |
1043 | if (call) | 1043 | if (file) |
1044 | err = apply_event_filter(call, buf); | 1044 | err = apply_event_filter(file, buf); |
1045 | mutex_unlock(&event_mutex); | 1045 | mutex_unlock(&event_mutex); |
1046 | 1046 | ||
1047 | free_page((unsigned long) buf); | 1047 | free_page((unsigned long) buf); |
@@ -1539,7 +1539,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
1539 | return -1; | 1539 | return -1; |
1540 | } | 1540 | } |
1541 | } | 1541 | } |
1542 | trace_create_file("filter", 0644, file->dir, call, | 1542 | trace_create_file("filter", 0644, file->dir, file, |
1543 | &ftrace_event_filter_fops); | 1543 | &ftrace_event_filter_fops); |
1544 | 1544 | ||
1545 | trace_create_file("format", 0444, file->dir, call, | 1545 | trace_create_file("format", 0444, file->dir, call, |
@@ -1577,6 +1577,7 @@ static void event_remove(struct ftrace_event_call *call) | |||
1577 | if (file->event_call != call) | 1577 | if (file->event_call != call) |
1578 | continue; | 1578 | continue; |
1579 | ftrace_event_enable_disable(file, 0); | 1579 | ftrace_event_enable_disable(file, 0); |
1580 | destroy_preds(file); | ||
1580 | /* | 1581 | /* |
1581 | * The do_for_each_event_file() is | 1582 | * The do_for_each_event_file() is |
1582 | * a double loop. After finding the call for this | 1583 | * a double loop. After finding the call for this |
@@ -1700,7 +1701,7 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) | |||
1700 | { | 1701 | { |
1701 | event_remove(call); | 1702 | event_remove(call); |
1702 | trace_destroy_fields(call); | 1703 | trace_destroy_fields(call); |
1703 | destroy_preds(call); | 1704 | destroy_call_preds(call); |
1704 | } | 1705 | } |
1705 | 1706 | ||
1706 | static int probe_remove_event_call(struct ftrace_event_call *call) | 1707 | static int probe_remove_event_call(struct ftrace_event_call *call) |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 97daa8cf958d..2468f56dc5db 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -637,10 +637,18 @@ static void append_filter_err(struct filter_parse_state *ps, | |||
637 | free_page((unsigned long) buf); | 637 | free_page((unsigned long) buf); |
638 | } | 638 | } |
639 | 639 | ||
640 | static inline struct event_filter *event_filter(struct ftrace_event_file *file) | ||
641 | { | ||
642 | if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
643 | return file->event_call->filter; | ||
644 | else | ||
645 | return file->filter; | ||
646 | } | ||
647 | |||
640 | /* caller must hold event_mutex */ | 648 | /* caller must hold event_mutex */ |
641 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) | 649 | void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s) |
642 | { | 650 | { |
643 | struct event_filter *filter = call->filter; | 651 | struct event_filter *filter = event_filter(file); |
644 | 652 | ||
645 | if (filter && filter->filter_string) | 653 | if (filter && filter->filter_string) |
646 | trace_seq_printf(s, "%s\n", filter->filter_string); | 654 | trace_seq_printf(s, "%s\n", filter->filter_string); |
@@ -766,11 +774,21 @@ static void __free_preds(struct event_filter *filter) | |||
766 | filter->n_preds = 0; | 774 | filter->n_preds = 0; |
767 | } | 775 | } |
768 | 776 | ||
769 | static void filter_disable(struct ftrace_event_call *call) | 777 | static void call_filter_disable(struct ftrace_event_call *call) |
770 | { | 778 | { |
771 | call->flags &= ~TRACE_EVENT_FL_FILTERED; | 779 | call->flags &= ~TRACE_EVENT_FL_FILTERED; |
772 | } | 780 | } |
773 | 781 | ||
782 | static void filter_disable(struct ftrace_event_file *file) | ||
783 | { | ||
784 | struct ftrace_event_call *call = file->event_call; | ||
785 | |||
786 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
787 | call_filter_disable(call); | ||
788 | else | ||
789 | file->flags &= ~FTRACE_EVENT_FL_FILTERED; | ||
790 | } | ||
791 | |||
774 | static void __free_filter(struct event_filter *filter) | 792 | static void __free_filter(struct event_filter *filter) |
775 | { | 793 | { |
776 | if (!filter) | 794 | if (!filter) |
@@ -781,16 +799,30 @@ static void __free_filter(struct event_filter *filter) | |||
781 | kfree(filter); | 799 | kfree(filter); |
782 | } | 800 | } |
783 | 801 | ||
802 | void destroy_call_preds(struct ftrace_event_call *call) | ||
803 | { | ||
804 | __free_filter(call->filter); | ||
805 | call->filter = NULL; | ||
806 | } | ||
807 | |||
808 | static void destroy_file_preds(struct ftrace_event_file *file) | ||
809 | { | ||
810 | __free_filter(file->filter); | ||
811 | file->filter = NULL; | ||
812 | } | ||
813 | |||
784 | /* | 814 | /* |
785 | * Called when destroying the ftrace_event_call. | 815 | * Called when destroying the ftrace_event_file. |
786 | * The call is being freed, so we do not need to worry about | 816 | * The file is being freed, so we do not need to worry about |
787 | * the call being currently used. This is for module code removing | 817 | * the file being currently used. This is for module code removing |
788 | * the tracepoints from within it. | 818 | * the tracepoints from within it. |
789 | */ | 819 | */ |
790 | void destroy_preds(struct ftrace_event_call *call) | 820 | void destroy_preds(struct ftrace_event_file *file) |
791 | { | 821 | { |
792 | __free_filter(call->filter); | 822 | if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
793 | call->filter = NULL; | 823 | destroy_call_preds(file->event_call); |
824 | else | ||
825 | destroy_file_preds(file); | ||
794 | } | 826 | } |
795 | 827 | ||
796 | static struct event_filter *__alloc_filter(void) | 828 | static struct event_filter *__alloc_filter(void) |
@@ -825,28 +857,56 @@ static int __alloc_preds(struct event_filter *filter, int n_preds) | |||
825 | return 0; | 857 | return 0; |
826 | } | 858 | } |
827 | 859 | ||
828 | static void filter_free_subsystem_preds(struct event_subsystem *system) | 860 | static inline void __remove_filter(struct ftrace_event_file *file) |
829 | { | 861 | { |
862 | struct ftrace_event_call *call = file->event_call; | ||
863 | |||
864 | filter_disable(file); | ||
865 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
866 | remove_filter_string(call->filter); | ||
867 | else | ||
868 | remove_filter_string(file->filter); | ||
869 | } | ||
870 | |||
871 | static void filter_free_subsystem_preds(struct event_subsystem *system, | ||
872 | struct trace_array *tr) | ||
873 | { | ||
874 | struct ftrace_event_file *file; | ||
830 | struct ftrace_event_call *call; | 875 | struct ftrace_event_call *call; |
831 | 876 | ||
832 | list_for_each_entry(call, &ftrace_events, list) { | 877 | list_for_each_entry(file, &tr->events, list) { |
878 | call = file->event_call; | ||
833 | if (strcmp(call->class->system, system->name) != 0) | 879 | if (strcmp(call->class->system, system->name) != 0) |
834 | continue; | 880 | continue; |
835 | 881 | ||
836 | filter_disable(call); | 882 | __remove_filter(file); |
837 | remove_filter_string(call->filter); | ||
838 | } | 883 | } |
839 | } | 884 | } |
840 | 885 | ||
841 | static void filter_free_subsystem_filters(struct event_subsystem *system) | 886 | static inline void __free_subsystem_filter(struct ftrace_event_file *file) |
842 | { | 887 | { |
888 | struct ftrace_event_call *call = file->event_call; | ||
889 | |||
890 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { | ||
891 | __free_filter(call->filter); | ||
892 | call->filter = NULL; | ||
893 | } else { | ||
894 | __free_filter(file->filter); | ||
895 | file->filter = NULL; | ||
896 | } | ||
897 | } | ||
898 | |||
899 | static void filter_free_subsystem_filters(struct event_subsystem *system, | ||
900 | struct trace_array *tr) | ||
901 | { | ||
902 | struct ftrace_event_file *file; | ||
843 | struct ftrace_event_call *call; | 903 | struct ftrace_event_call *call; |
844 | 904 | ||
845 | list_for_each_entry(call, &ftrace_events, list) { | 905 | list_for_each_entry(file, &tr->events, list) { |
906 | call = file->event_call; | ||
846 | if (strcmp(call->class->system, system->name) != 0) | 907 | if (strcmp(call->class->system, system->name) != 0) |
847 | continue; | 908 | continue; |
848 | __free_filter(call->filter); | 909 | __free_subsystem_filter(file); |
849 | call->filter = NULL; | ||
850 | } | 910 | } |
851 | } | 911 | } |
852 | 912 | ||
@@ -1617,15 +1677,85 @@ fail: | |||
1617 | return err; | 1677 | return err; |
1618 | } | 1678 | } |
1619 | 1679 | ||
1680 | static inline void event_set_filtered_flag(struct ftrace_event_file *file) | ||
1681 | { | ||
1682 | struct ftrace_event_call *call = file->event_call; | ||
1683 | |||
1684 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
1685 | call->flags |= TRACE_EVENT_FL_FILTERED; | ||
1686 | else | ||
1687 | file->flags |= FTRACE_EVENT_FL_FILTERED; | ||
1688 | } | ||
1689 | |||
1690 | static inline void event_set_filter(struct ftrace_event_file *file, | ||
1691 | struct event_filter *filter) | ||
1692 | { | ||
1693 | struct ftrace_event_call *call = file->event_call; | ||
1694 | |||
1695 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
1696 | rcu_assign_pointer(call->filter, filter); | ||
1697 | else | ||
1698 | rcu_assign_pointer(file->filter, filter); | ||
1699 | } | ||
1700 | |||
1701 | static inline void event_clear_filter(struct ftrace_event_file *file) | ||
1702 | { | ||
1703 | struct ftrace_event_call *call = file->event_call; | ||
1704 | |||
1705 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
1706 | RCU_INIT_POINTER(call->filter, NULL); | ||
1707 | else | ||
1708 | RCU_INIT_POINTER(file->filter, NULL); | ||
1709 | } | ||
1710 | |||
1711 | static inline void | ||
1712 | event_set_no_set_filter_flag(struct ftrace_event_file *file) | ||
1713 | { | ||
1714 | struct ftrace_event_call *call = file->event_call; | ||
1715 | |||
1716 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
1717 | call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; | ||
1718 | else | ||
1719 | file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER; | ||
1720 | } | ||
1721 | |||
1722 | static inline void | ||
1723 | event_clear_no_set_filter_flag(struct ftrace_event_file *file) | ||
1724 | { | ||
1725 | struct ftrace_event_call *call = file->event_call; | ||
1726 | |||
1727 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
1728 | call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; | ||
1729 | else | ||
1730 | file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER; | ||
1731 | } | ||
1732 | |||
1733 | static inline bool | ||
1734 | event_no_set_filter_flag(struct ftrace_event_file *file) | ||
1735 | { | ||
1736 | struct ftrace_event_call *call = file->event_call; | ||
1737 | |||
1738 | if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER) | ||
1739 | return true; | ||
1740 | |||
1741 | if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) && | ||
1742 | (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)) | ||
1743 | return true; | ||
1744 | |||
1745 | return false; | ||
1746 | } | ||
1747 | |||
1620 | struct filter_list { | 1748 | struct filter_list { |
1621 | struct list_head list; | 1749 | struct list_head list; |
1622 | struct event_filter *filter; | 1750 | struct event_filter *filter; |
1623 | }; | 1751 | }; |
1624 | 1752 | ||
1625 | static int replace_system_preds(struct event_subsystem *system, | 1753 | static int replace_system_preds(struct event_subsystem *system, |
1754 | struct trace_array *tr, | ||
1626 | struct filter_parse_state *ps, | 1755 | struct filter_parse_state *ps, |
1627 | char *filter_string) | 1756 | char *filter_string) |
1628 | { | 1757 | { |
1758 | struct ftrace_event_file *file; | ||
1629 | struct ftrace_event_call *call; | 1759 | struct ftrace_event_call *call; |
1630 | struct filter_list *filter_item; | 1760 | struct filter_list *filter_item; |
1631 | struct filter_list *tmp; | 1761 | struct filter_list *tmp; |
@@ -1633,8 +1763,8 @@ static int replace_system_preds(struct event_subsystem *system, | |||
1633 | bool fail = true; | 1763 | bool fail = true; |
1634 | int err; | 1764 | int err; |
1635 | 1765 | ||
1636 | list_for_each_entry(call, &ftrace_events, list) { | 1766 | list_for_each_entry(file, &tr->events, list) { |
1637 | 1767 | call = file->event_call; | |
1638 | if (strcmp(call->class->system, system->name) != 0) | 1768 | if (strcmp(call->class->system, system->name) != 0) |
1639 | continue; | 1769 | continue; |
1640 | 1770 | ||
@@ -1644,18 +1774,20 @@ static int replace_system_preds(struct event_subsystem *system, | |||
1644 | */ | 1774 | */ |
1645 | err = replace_preds(call, NULL, ps, filter_string, true); | 1775 | err = replace_preds(call, NULL, ps, filter_string, true); |
1646 | if (err) | 1776 | if (err) |
1647 | call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; | 1777 | event_set_no_set_filter_flag(file); |
1648 | else | 1778 | else |
1649 | call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; | 1779 | event_clear_no_set_filter_flag(file); |
1650 | } | 1780 | } |
1651 | 1781 | ||
1652 | list_for_each_entry(call, &ftrace_events, list) { | 1782 | list_for_each_entry(file, &tr->events, list) { |
1653 | struct event_filter *filter; | 1783 | struct event_filter *filter; |
1654 | 1784 | ||
1785 | call = file->event_call; | ||
1786 | |||
1655 | if (strcmp(call->class->system, system->name) != 0) | 1787 | if (strcmp(call->class->system, system->name) != 0) |
1656 | continue; | 1788 | continue; |
1657 | 1789 | ||
1658 | if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER) | 1790 | if (event_no_set_filter_flag(file)) |
1659 | continue; | 1791 | continue; |
1660 | 1792 | ||
1661 | filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); | 1793 | filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); |
@@ -1676,17 +1808,17 @@ static int replace_system_preds(struct event_subsystem *system, | |||
1676 | 1808 | ||
1677 | err = replace_preds(call, filter, ps, filter_string, false); | 1809 | err = replace_preds(call, filter, ps, filter_string, false); |
1678 | if (err) { | 1810 | if (err) { |
1679 | filter_disable(call); | 1811 | filter_disable(file); |
1680 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); | 1812 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); |
1681 | append_filter_err(ps, filter); | 1813 | append_filter_err(ps, filter); |
1682 | } else | 1814 | } else |
1683 | call->flags |= TRACE_EVENT_FL_FILTERED; | 1815 | event_set_filtered_flag(file); |
1684 | /* | 1816 | /* |
1685 | * Regardless of if this returned an error, we still | 1817 | * Regardless of if this returned an error, we still |
1686 | * replace the filter for the call. | 1818 | * replace the filter for the call. |
1687 | */ | 1819 | */ |
1688 | filter = call->filter; | 1820 | filter = event_filter(file); |
1689 | rcu_assign_pointer(call->filter, filter_item->filter); | 1821 | event_set_filter(file, filter_item->filter); |
1690 | filter_item->filter = filter; | 1822 | filter_item->filter = filter; |
1691 | 1823 | ||
1692 | fail = false; | 1824 | fail = false; |
@@ -1816,6 +1948,7 @@ static int create_filter(struct ftrace_event_call *call, | |||
1816 | * and always remembers @filter_str. | 1948 | * and always remembers @filter_str. |
1817 | */ | 1949 | */ |
1818 | static int create_system_filter(struct event_subsystem *system, | 1950 | static int create_system_filter(struct event_subsystem *system, |
1951 | struct trace_array *tr, | ||
1819 | char *filter_str, struct event_filter **filterp) | 1952 | char *filter_str, struct event_filter **filterp) |
1820 | { | 1953 | { |
1821 | struct event_filter *filter = NULL; | 1954 | struct event_filter *filter = NULL; |
@@ -1824,7 +1957,7 @@ static int create_system_filter(struct event_subsystem *system, | |||
1824 | 1957 | ||
1825 | err = create_filter_start(filter_str, true, &ps, &filter); | 1958 | err = create_filter_start(filter_str, true, &ps, &filter); |
1826 | if (!err) { | 1959 | if (!err) { |
1827 | err = replace_system_preds(system, ps, filter_str); | 1960 | err = replace_system_preds(system, tr, ps, filter_str); |
1828 | if (!err) { | 1961 | if (!err) { |
1829 | /* System filters just show a default message */ | 1962 | /* System filters just show a default message */ |
1830 | kfree(filter->filter_string); | 1963 | kfree(filter->filter_string); |
@@ -1840,20 +1973,25 @@ static int create_system_filter(struct event_subsystem *system, | |||
1840 | } | 1973 | } |
1841 | 1974 | ||
1842 | /* caller must hold event_mutex */ | 1975 | /* caller must hold event_mutex */ |
1843 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | 1976 | int apply_event_filter(struct ftrace_event_file *file, char *filter_string) |
1844 | { | 1977 | { |
1978 | struct ftrace_event_call *call = file->event_call; | ||
1845 | struct event_filter *filter; | 1979 | struct event_filter *filter; |
1846 | int err; | 1980 | int err; |
1847 | 1981 | ||
1848 | if (!strcmp(strstrip(filter_string), "0")) { | 1982 | if (!strcmp(strstrip(filter_string), "0")) { |
1849 | filter_disable(call); | 1983 | filter_disable(file); |
1850 | filter = call->filter; | 1984 | filter = event_filter(file); |
1985 | |||
1851 | if (!filter) | 1986 | if (!filter) |
1852 | return 0; | 1987 | return 0; |
1853 | RCU_INIT_POINTER(call->filter, NULL); | 1988 | |
1989 | event_clear_filter(file); | ||
1990 | |||
1854 | /* Make sure the filter is not being used */ | 1991 | /* Make sure the filter is not being used */ |
1855 | synchronize_sched(); | 1992 | synchronize_sched(); |
1856 | __free_filter(filter); | 1993 | __free_filter(filter); |
1994 | |||
1857 | return 0; | 1995 | return 0; |
1858 | } | 1996 | } |
1859 | 1997 | ||
@@ -1866,14 +2004,15 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1866 | * string | 2004 | * string |
1867 | */ | 2005 | */ |
1868 | if (filter) { | 2006 | if (filter) { |
1869 | struct event_filter *tmp = call->filter; | 2007 | struct event_filter *tmp; |
1870 | 2008 | ||
2009 | tmp = event_filter(file); | ||
1871 | if (!err) | 2010 | if (!err) |
1872 | call->flags |= TRACE_EVENT_FL_FILTERED; | 2011 | event_set_filtered_flag(file); |
1873 | else | 2012 | else |
1874 | filter_disable(call); | 2013 | filter_disable(file); |
1875 | 2014 | ||
1876 | rcu_assign_pointer(call->filter, filter); | 2015 | event_set_filter(file, filter); |
1877 | 2016 | ||
1878 | if (tmp) { | 2017 | if (tmp) { |
1879 | /* Make sure the call is done with the filter */ | 2018 | /* Make sure the call is done with the filter */ |
@@ -1889,6 +2028,7 @@ int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | |||
1889 | char *filter_string) | 2028 | char *filter_string) |
1890 | { | 2029 | { |
1891 | struct event_subsystem *system = dir->subsystem; | 2030 | struct event_subsystem *system = dir->subsystem; |
2031 | struct trace_array *tr = dir->tr; | ||
1892 | struct event_filter *filter; | 2032 | struct event_filter *filter; |
1893 | int err = 0; | 2033 | int err = 0; |
1894 | 2034 | ||
@@ -1901,18 +2041,18 @@ int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | |||
1901 | } | 2041 | } |
1902 | 2042 | ||
1903 | if (!strcmp(strstrip(filter_string), "0")) { | 2043 | if (!strcmp(strstrip(filter_string), "0")) { |
1904 | filter_free_subsystem_preds(system); | 2044 | filter_free_subsystem_preds(system, tr); |
1905 | remove_filter_string(system->filter); | 2045 | remove_filter_string(system->filter); |
1906 | filter = system->filter; | 2046 | filter = system->filter; |
1907 | system->filter = NULL; | 2047 | system->filter = NULL; |
1908 | /* Ensure all filters are no longer used */ | 2048 | /* Ensure all filters are no longer used */ |
1909 | synchronize_sched(); | 2049 | synchronize_sched(); |
1910 | filter_free_subsystem_filters(system); | 2050 | filter_free_subsystem_filters(system, tr); |
1911 | __free_filter(filter); | 2051 | __free_filter(filter); |
1912 | goto out_unlock; | 2052 | goto out_unlock; |
1913 | } | 2053 | } |
1914 | 2054 | ||
1915 | err = create_system_filter(system, filter_string, &filter); | 2055 | err = create_system_filter(system, tr, filter_string, &filter); |
1916 | if (filter) { | 2056 | if (filter) { |
1917 | /* | 2057 | /* |
1918 | * No event actually uses the system filter | 2058 | * No event actually uses the system filter |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index d21a74670088..7c3e3e72e2b6 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -180,7 +180,7 @@ struct ftrace_event_call __used event_##call = { \ | |||
180 | .event.type = etype, \ | 180 | .event.type = etype, \ |
181 | .class = &event_class_ftrace_##call, \ | 181 | .class = &event_class_ftrace_##call, \ |
182 | .print_fmt = print, \ | 182 | .print_fmt = print, \ |
183 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ | 183 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ |
184 | }; \ | 184 | }; \ |
185 | struct ftrace_event_call __used \ | 185 | struct ftrace_event_call __used \ |
186 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; | 186 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index e08c030b8f38..80387d1d27e1 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -270,7 +270,7 @@ int __trace_graph_entry(struct trace_array *tr, | |||
270 | return 0; | 270 | return 0; |
271 | entry = ring_buffer_event_data(event); | 271 | entry = ring_buffer_event_data(event); |
272 | entry->graph_ent = *trace; | 272 | entry->graph_ent = *trace; |
273 | if (!filter_current_check_discard(buffer, call, entry, event)) | 273 | if (!call_filter_check_discard(call, entry, buffer, event)) |
274 | __buffer_unlock_commit(buffer, event); | 274 | __buffer_unlock_commit(buffer, event); |
275 | 275 | ||
276 | return 1; | 276 | return 1; |
@@ -385,7 +385,7 @@ void __trace_graph_return(struct trace_array *tr, | |||
385 | return; | 385 | return; |
386 | entry = ring_buffer_event_data(event); | 386 | entry = ring_buffer_event_data(event); |
387 | entry->ret = *trace; | 387 | entry->ret = *trace; |
388 | if (!filter_current_check_discard(buffer, call, entry, event)) | 388 | if (!call_filter_check_discard(call, entry, buffer, event)) |
389 | __buffer_unlock_commit(buffer, event); | 389 | __buffer_unlock_commit(buffer, event); |
390 | } | 390 | } |
391 | 391 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 243f6834d026..dae9541ada9e 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -835,7 +835,7 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
835 | entry->ip = (unsigned long)tp->rp.kp.addr; | 835 | entry->ip = (unsigned long)tp->rp.kp.addr; |
836 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 836 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
837 | 837 | ||
838 | if (!filter_current_check_discard(buffer, call, entry, event)) | 838 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
839 | trace_buffer_unlock_commit_regs(buffer, event, | 839 | trace_buffer_unlock_commit_regs(buffer, event, |
840 | irq_flags, pc, regs); | 840 | irq_flags, pc, regs); |
841 | } | 841 | } |
@@ -884,7 +884,7 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
884 | entry->ret_ip = (unsigned long)ri->ret_addr; | 884 | entry->ret_ip = (unsigned long)ri->ret_addr; |
885 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 885 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
886 | 886 | ||
887 | if (!filter_current_check_discard(buffer, call, entry, event)) | 887 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
888 | trace_buffer_unlock_commit_regs(buffer, event, | 888 | trace_buffer_unlock_commit_regs(buffer, event, |
889 | irq_flags, pc, regs); | 889 | irq_flags, pc, regs); |
890 | } | 890 | } |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index b3dcfb2f0fef..0abd9b863474 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -323,7 +323,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
323 | entry = ring_buffer_event_data(event); | 323 | entry = ring_buffer_event_data(event); |
324 | entry->rw = *rw; | 324 | entry->rw = *rw; |
325 | 325 | ||
326 | if (!filter_check_discard(call, entry, buffer, event)) | 326 | if (!call_filter_check_discard(call, entry, buffer, event)) |
327 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 327 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
328 | } | 328 | } |
329 | 329 | ||
@@ -353,7 +353,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
353 | entry = ring_buffer_event_data(event); | 353 | entry = ring_buffer_event_data(event); |
354 | entry->map = *map; | 354 | entry->map = *map; |
355 | 355 | ||
356 | if (!filter_check_discard(call, entry, buffer, event)) | 356 | if (!call_filter_check_discard(call, entry, buffer, event)) |
357 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 357 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
358 | } | 358 | } |
359 | 359 | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 4e98e3b257a3..3f34dc9b40f3 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -45,7 +45,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
45 | entry->next_state = next->state; | 45 | entry->next_state = next->state; |
46 | entry->next_cpu = task_cpu(next); | 46 | entry->next_cpu = task_cpu(next); |
47 | 47 | ||
48 | if (!filter_check_discard(call, entry, buffer, event)) | 48 | if (!call_filter_check_discard(call, entry, buffer, event)) |
49 | trace_buffer_unlock_commit(buffer, event, flags, pc); | 49 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
50 | } | 50 | } |
51 | 51 | ||
@@ -101,7 +101,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
101 | entry->next_state = wakee->state; | 101 | entry->next_state = wakee->state; |
102 | entry->next_cpu = task_cpu(wakee); | 102 | entry->next_cpu = task_cpu(wakee); |
103 | 103 | ||
104 | if (!filter_check_discard(call, entry, buffer, event)) | 104 | if (!call_filter_check_discard(call, entry, buffer, event)) |
105 | trace_buffer_unlock_commit(buffer, event, flags, pc); | 105 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
106 | } | 106 | } |
107 | 107 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 559329d9bd2f..32644eece429 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -336,8 +336,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
336 | entry->nr = syscall_nr; | 336 | entry->nr = syscall_nr; |
337 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 337 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
338 | 338 | ||
339 | if (!filter_current_check_discard(buffer, sys_data->enter_event, | 339 | if (!call_filter_check_discard(sys_data->enter_event, entry, |
340 | entry, event)) | 340 | buffer, event)) |
341 | trace_current_buffer_unlock_commit(buffer, event, | 341 | trace_current_buffer_unlock_commit(buffer, event, |
342 | irq_flags, pc); | 342 | irq_flags, pc); |
343 | } | 343 | } |
@@ -377,8 +377,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
377 | entry->nr = syscall_nr; | 377 | entry->nr = syscall_nr; |
378 | entry->ret = syscall_get_return_value(current, regs); | 378 | entry->ret = syscall_get_return_value(current, regs); |
379 | 379 | ||
380 | if (!filter_current_check_discard(buffer, sys_data->exit_event, | 380 | if (!call_filter_check_discard(sys_data->exit_event, entry, |
381 | entry, event)) | 381 | buffer, event)) |
382 | trace_current_buffer_unlock_commit(buffer, event, | 382 | trace_current_buffer_unlock_commit(buffer, event, |
383 | irq_flags, pc); | 383 | irq_flags, pc); |
384 | } | 384 | } |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 272261b5f94f..b6dcc42ef7f5 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -128,6 +128,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
128 | if (is_ret) | 128 | if (is_ret) |
129 | tu->consumer.ret_handler = uretprobe_dispatcher; | 129 | tu->consumer.ret_handler = uretprobe_dispatcher; |
130 | init_trace_uprobe_filter(&tu->filter); | 130 | init_trace_uprobe_filter(&tu->filter); |
131 | tu->call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER; | ||
131 | return tu; | 132 | return tu; |
132 | 133 | ||
133 | error: | 134 | error: |
@@ -561,7 +562,7 @@ static void uprobe_trace_print(struct trace_uprobe *tu, | |||
561 | for (i = 0; i < tu->nr_args; i++) | 562 | for (i = 0; i < tu->nr_args; i++) |
562 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); | 563 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); |
563 | 564 | ||
564 | if (!filter_current_check_discard(buffer, call, entry, event)) | 565 | if (!call_filter_check_discard(call, entry, buffer, event)) |
565 | trace_buffer_unlock_commit(buffer, event, 0, 0); | 566 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
566 | } | 567 | } |
567 | 568 | ||