aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c134
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace.h38
-rw-r--r--kernel/trace/trace_entries.h54
-rw-r--r--kernel/trace/trace_event_perf.c208
-rw-r--r--kernel/trace/trace_events.c12
-rw-r--r--kernel/trace/trace_events_filter.c175
-rw-r--r--kernel/trace/trace_export.c64
-rw-r--r--kernel/trace/trace_kprobe.c8
-rw-r--r--kernel/trace/trace_output.c12
-rw-r--r--kernel/trace/trace_syscalls.c22
11 files changed, 605 insertions, 128 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 683d559a0eef..867bd1dd2dd0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -62,6 +62,8 @@
62#define FTRACE_HASH_DEFAULT_BITS 10 62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12 63#define FTRACE_HASH_MAX_BITS 12
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
65/* ftrace_enabled is a method to turn ftrace on or off */ 67/* ftrace_enabled is a method to turn ftrace on or off */
66int ftrace_enabled __read_mostly; 68int ftrace_enabled __read_mostly;
67static int last_ftrace_enabled; 69static int last_ftrace_enabled;
@@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
89}; 91};
90 92
91static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; 93static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
92static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 95static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
93ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 96ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
94static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; 97static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
95ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 98ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
96ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 99ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
97static struct ftrace_ops global_ops; 100static struct ftrace_ops global_ops;
101static struct ftrace_ops control_ops;
98 102
99static void 103static void
100ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); 104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
@@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
168} 172}
169#endif 173#endif
170 174
175static void control_ops_disable_all(struct ftrace_ops *ops)
176{
177 int cpu;
178
179 for_each_possible_cpu(cpu)
180 *per_cpu_ptr(ops->disabled, cpu) = 1;
181}
182
183static int control_ops_alloc(struct ftrace_ops *ops)
184{
185 int __percpu *disabled;
186
187 disabled = alloc_percpu(int);
188 if (!disabled)
189 return -ENOMEM;
190
191 ops->disabled = disabled;
192 control_ops_disable_all(ops);
193 return 0;
194}
195
196static void control_ops_free(struct ftrace_ops *ops)
197{
198 free_percpu(ops->disabled);
199}
200
171static void update_global_ops(void) 201static void update_global_ops(void)
172{ 202{
173 ftrace_func_t func; 203 ftrace_func_t func;
@@ -259,6 +289,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
259 return 0; 289 return 0;
260} 290}
261 291
292static void add_ftrace_list_ops(struct ftrace_ops **list,
293 struct ftrace_ops *main_ops,
294 struct ftrace_ops *ops)
295{
296 int first = *list == &ftrace_list_end;
297 add_ftrace_ops(list, ops);
298 if (first)
299 add_ftrace_ops(&ftrace_ops_list, main_ops);
300}
301
302static int remove_ftrace_list_ops(struct ftrace_ops **list,
303 struct ftrace_ops *main_ops,
304 struct ftrace_ops *ops)
305{
306 int ret = remove_ftrace_ops(list, ops);
307 if (!ret && *list == &ftrace_list_end)
308 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
309 return ret;
310}
311
262static int __register_ftrace_function(struct ftrace_ops *ops) 312static int __register_ftrace_function(struct ftrace_ops *ops)
263{ 313{
264 if (ftrace_disabled) 314 if (ftrace_disabled)
@@ -270,15 +320,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
270 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 320 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
271 return -EBUSY; 321 return -EBUSY;
272 322
323 /* We don't support both control and global flags set. */
324 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
325 return -EINVAL;
326
273 if (!core_kernel_data((unsigned long)ops)) 327 if (!core_kernel_data((unsigned long)ops))
274 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 328 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
275 329
276 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 330 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
277 int first = ftrace_global_list == &ftrace_list_end; 331 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
278 add_ftrace_ops(&ftrace_global_list, ops);
279 ops->flags |= FTRACE_OPS_FL_ENABLED; 332 ops->flags |= FTRACE_OPS_FL_ENABLED;
280 if (first) 333 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
281 add_ftrace_ops(&ftrace_ops_list, &global_ops); 334 if (control_ops_alloc(ops))
335 return -ENOMEM;
336 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
282 } else 337 } else
283 add_ftrace_ops(&ftrace_ops_list, ops); 338 add_ftrace_ops(&ftrace_ops_list, ops);
284 339
@@ -302,11 +357,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
302 return -EINVAL; 357 return -EINVAL;
303 358
304 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 359 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
305 ret = remove_ftrace_ops(&ftrace_global_list, ops); 360 ret = remove_ftrace_list_ops(&ftrace_global_list,
306 if (!ret && ftrace_global_list == &ftrace_list_end) 361 &global_ops, ops);
307 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
308 if (!ret) 362 if (!ret)
309 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 363 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
364 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
365 ret = remove_ftrace_list_ops(&ftrace_control_list,
366 &control_ops, ops);
367 if (!ret) {
368 /*
369 * The ftrace_ops is now removed from the list,
370 * so there'll be no new users. We must ensure
371 * all current users are done before we free
372 * the control data.
373 */
374 synchronize_sched();
375 control_ops_free(ops);
376 }
310 } else 377 } else
311 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 378 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
312 379
@@ -1119,6 +1186,12 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1119 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); 1186 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1120} 1187}
1121 1188
1189void ftrace_free_filter(struct ftrace_ops *ops)
1190{
1191 free_ftrace_hash(ops->filter_hash);
1192 free_ftrace_hash(ops->notrace_hash);
1193}
1194
1122static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1195static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1123{ 1196{
1124 struct ftrace_hash *hash; 1197 struct ftrace_hash *hash;
@@ -1129,7 +1202,7 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1129 return NULL; 1202 return NULL;
1130 1203
1131 size = 1 << size_bits; 1204 size = 1 << size_bits;
1132 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); 1205 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1133 1206
1134 if (!hash->buckets) { 1207 if (!hash->buckets) {
1135 kfree(hash); 1208 kfree(hash);
@@ -3146,8 +3219,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3146 mutex_lock(&ftrace_regex_lock); 3219 mutex_lock(&ftrace_regex_lock);
3147 if (reset) 3220 if (reset)
3148 ftrace_filter_reset(hash); 3221 ftrace_filter_reset(hash);
3149 if (buf) 3222 if (buf && !ftrace_match_records(hash, buf, len)) {
3150 ftrace_match_records(hash, buf, len); 3223 ret = -EINVAL;
3224 goto out_regex_unlock;
3225 }
3151 3226
3152 mutex_lock(&ftrace_lock); 3227 mutex_lock(&ftrace_lock);
3153 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3228 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
@@ -3157,6 +3232,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3157 3232
3158 mutex_unlock(&ftrace_lock); 3233 mutex_unlock(&ftrace_lock);
3159 3234
3235 out_regex_unlock:
3160 mutex_unlock(&ftrace_regex_lock); 3236 mutex_unlock(&ftrace_regex_lock);
3161 3237
3162 free_ftrace_hash(hash); 3238 free_ftrace_hash(hash);
@@ -3173,10 +3249,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3173 * Filters denote which functions should be enabled when tracing is enabled. 3249 * Filters denote which functions should be enabled when tracing is enabled.
3174 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 3250 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3175 */ 3251 */
3176void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 3252int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3177 int len, int reset) 3253 int len, int reset)
3178{ 3254{
3179 ftrace_set_regex(ops, buf, len, reset, 1); 3255 return ftrace_set_regex(ops, buf, len, reset, 1);
3180} 3256}
3181EXPORT_SYMBOL_GPL(ftrace_set_filter); 3257EXPORT_SYMBOL_GPL(ftrace_set_filter);
3182 3258
@@ -3191,10 +3267,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
3191 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 3267 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3192 * for tracing. 3268 * for tracing.
3193 */ 3269 */
3194void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 3270int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3195 int len, int reset) 3271 int len, int reset)
3196{ 3272{
3197 ftrace_set_regex(ops, buf, len, reset, 0); 3273 return ftrace_set_regex(ops, buf, len, reset, 0);
3198} 3274}
3199EXPORT_SYMBOL_GPL(ftrace_set_notrace); 3275EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3200/** 3276/**
@@ -3871,6 +3947,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3871#endif /* CONFIG_DYNAMIC_FTRACE */ 3947#endif /* CONFIG_DYNAMIC_FTRACE */
3872 3948
3873static void 3949static void
3950ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3951{
3952 struct ftrace_ops *op;
3953
3954 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3955 return;
3956
3957 /*
3958 * Some of the ops may be dynamically allocated,
3959 * they must be freed after a synchronize_sched().
3960 */
3961 preempt_disable_notrace();
3962 trace_recursion_set(TRACE_CONTROL_BIT);
3963 op = rcu_dereference_raw(ftrace_control_list);
3964 while (op != &ftrace_list_end) {
3965 if (!ftrace_function_local_disabled(op) &&
3966 ftrace_ops_test(op, ip))
3967 op->func(ip, parent_ip);
3968
3969 op = rcu_dereference_raw(op->next);
3970 };
3971 trace_recursion_clear(TRACE_CONTROL_BIT);
3972 preempt_enable_notrace();
3973}
3974
3975static struct ftrace_ops control_ops = {
3976 .func = ftrace_ops_control_func,
3977};
3978
3979static void
3874ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 3980ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3875{ 3981{
3876 struct ftrace_ops *op; 3982 struct ftrace_ops *op;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a3f1bc5d2a00..10d5503f0d04 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2764,12 +2764,12 @@ static const char readme_msg[] =
2764 "tracing mini-HOWTO:\n\n" 2764 "tracing mini-HOWTO:\n\n"
2765 "# mount -t debugfs nodev /sys/kernel/debug\n\n" 2765 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2766 "# cat /sys/kernel/debug/tracing/available_tracers\n" 2766 "# cat /sys/kernel/debug/tracing/available_tracers\n"
2767 "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" 2767 "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
2768 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2768 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2769 "nop\n" 2769 "nop\n"
2770 "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" 2770 "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
2771 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2771 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2772 "sched_switch\n" 2772 "wakeup\n"
2773 "# cat /sys/kernel/debug/tracing/trace_options\n" 2773 "# cat /sys/kernel/debug/tracing/trace_options\n"
2774 "noprint-parent nosym-offset nosym-addr noverbose\n" 2774 "noprint-parent nosym-offset nosym-addr noverbose\n"
2775 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" 2775 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index b93ecbadad6d..54faec790bc1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -56,17 +56,23 @@ enum trace_type {
56#define F_STRUCT(args...) args 56#define F_STRUCT(args...) args
57 57
58#undef FTRACE_ENTRY 58#undef FTRACE_ENTRY
59#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 59#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
60 struct struct_name { \ 60 struct struct_name { \
61 struct trace_entry ent; \ 61 struct trace_entry ent; \
62 tstruct \ 62 tstruct \
63 } 63 }
64 64
65#undef TP_ARGS 65#undef TP_ARGS
66#define TP_ARGS(args...) args 66#define TP_ARGS(args...) args
67 67
68#undef FTRACE_ENTRY_DUP 68#undef FTRACE_ENTRY_DUP
69#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) 69#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
70
71#undef FTRACE_ENTRY_REG
72#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
73 filter, regfn) \
74 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
75 filter)
70 76
71#include "trace_entries.h" 77#include "trace_entries.h"
72 78
@@ -288,6 +294,8 @@ struct tracer {
288/* for function tracing recursion */ 294/* for function tracing recursion */
289#define TRACE_INTERNAL_BIT (1<<11) 295#define TRACE_INTERNAL_BIT (1<<11)
290#define TRACE_GLOBAL_BIT (1<<12) 296#define TRACE_GLOBAL_BIT (1<<12)
297#define TRACE_CONTROL_BIT (1<<13)
298
291/* 299/*
292 * Abuse of the trace_recursion. 300 * Abuse of the trace_recursion.
293 * As we need a way to maintain state if we are tracing the function 301 * As we need a way to maintain state if we are tracing the function
@@ -589,6 +597,8 @@ static inline int ftrace_trace_task(struct task_struct *task)
589static inline int ftrace_is_dead(void) { return 0; } 597static inline int ftrace_is_dead(void) { return 0; }
590#endif 598#endif
591 599
600int ftrace_event_is_function(struct ftrace_event_call *call);
601
592/* 602/*
593 * struct trace_parser - servers for reading the user input separated by spaces 603 * struct trace_parser - servers for reading the user input separated by spaces
594 * @cont: set if the input is not complete - no final space char was found 604 * @cont: set if the input is not complete - no final space char was found
@@ -766,9 +776,7 @@ struct filter_pred {
766 u64 val; 776 u64 val;
767 struct regex regex; 777 struct regex regex;
768 unsigned short *ops; 778 unsigned short *ops;
769#ifdef CONFIG_FTRACE_STARTUP_TEST
770 struct ftrace_event_field *field; 779 struct ftrace_event_field *field;
771#endif
772 int offset; 780 int offset;
773 int not; 781 int not;
774 int op; 782 int op;
@@ -818,12 +826,22 @@ extern const char *__start___trace_bprintk_fmt[];
818extern const char *__stop___trace_bprintk_fmt[]; 826extern const char *__stop___trace_bprintk_fmt[];
819 827
820#undef FTRACE_ENTRY 828#undef FTRACE_ENTRY
821#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ 829#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
822 extern struct ftrace_event_call \ 830 extern struct ftrace_event_call \
823 __attribute__((__aligned__(4))) event_##call; 831 __attribute__((__aligned__(4))) event_##call;
824#undef FTRACE_ENTRY_DUP 832#undef FTRACE_ENTRY_DUP
825#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ 833#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
826 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 834 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
835 filter)
827#include "trace_entries.h" 836#include "trace_entries.h"
828 837
838#ifdef CONFIG_PERF_EVENTS
839#ifdef CONFIG_FUNCTION_TRACER
840int perf_ftrace_event_register(struct ftrace_event_call *call,
841 enum trace_reg type, void *data);
842#else
843#define perf_ftrace_event_register NULL
844#endif /* CONFIG_FUNCTION_TRACER */
845#endif /* CONFIG_PERF_EVENTS */
846
829#endif /* _LINUX_KERNEL_TRACE_H */ 847#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 93365907f219..d91eb0541b3a 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -55,7 +55,7 @@
55/* 55/*
56 * Function trace entry - function address and parent function address: 56 * Function trace entry - function address and parent function address:
57 */ 57 */
58FTRACE_ENTRY(function, ftrace_entry, 58FTRACE_ENTRY_REG(function, ftrace_entry,
59 59
60 TRACE_FN, 60 TRACE_FN,
61 61
@@ -64,7 +64,11 @@ FTRACE_ENTRY(function, ftrace_entry,
64 __field( unsigned long, parent_ip ) 64 __field( unsigned long, parent_ip )
65 ), 65 ),
66 66
67 F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip) 67 F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip),
68
69 FILTER_TRACE_FN,
70
71 perf_ftrace_event_register
68); 72);
69 73
70/* Function call entry */ 74/* Function call entry */
@@ -78,7 +82,9 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
78 __field_desc( int, graph_ent, depth ) 82 __field_desc( int, graph_ent, depth )
79 ), 83 ),
80 84
81 F_printk("--> %lx (%d)", __entry->func, __entry->depth) 85 F_printk("--> %lx (%d)", __entry->func, __entry->depth),
86
87 FILTER_OTHER
82); 88);
83 89
84/* Function return entry */ 90/* Function return entry */
@@ -98,7 +104,9 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
98 F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d", 104 F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d",
99 __entry->func, __entry->depth, 105 __entry->func, __entry->depth,
100 __entry->calltime, __entry->rettime, 106 __entry->calltime, __entry->rettime,
101 __entry->depth) 107 __entry->depth),
108
109 FILTER_OTHER
102); 110);
103 111
104/* 112/*
@@ -127,8 +135,9 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry,
127 F_printk("%u:%u:%u ==> %u:%u:%u [%03u]", 135 F_printk("%u:%u:%u ==> %u:%u:%u [%03u]",
128 __entry->prev_pid, __entry->prev_prio, __entry->prev_state, 136 __entry->prev_pid, __entry->prev_prio, __entry->prev_state,
129 __entry->next_pid, __entry->next_prio, __entry->next_state, 137 __entry->next_pid, __entry->next_prio, __entry->next_state,
130 __entry->next_cpu 138 __entry->next_cpu),
131 ) 139
140 FILTER_OTHER
132); 141);
133 142
134/* 143/*
@@ -146,8 +155,9 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
146 F_printk("%u:%u:%u ==+ %u:%u:%u [%03u]", 155 F_printk("%u:%u:%u ==+ %u:%u:%u [%03u]",
147 __entry->prev_pid, __entry->prev_prio, __entry->prev_state, 156 __entry->prev_pid, __entry->prev_prio, __entry->prev_state,
148 __entry->next_pid, __entry->next_prio, __entry->next_state, 157 __entry->next_pid, __entry->next_prio, __entry->next_state,
149 __entry->next_cpu 158 __entry->next_cpu),
150 ) 159
160 FILTER_OTHER
151); 161);
152 162
153/* 163/*
@@ -169,7 +179,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
169 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", 179 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
170 __entry->caller[0], __entry->caller[1], __entry->caller[2], 180 __entry->caller[0], __entry->caller[1], __entry->caller[2],
171 __entry->caller[3], __entry->caller[4], __entry->caller[5], 181 __entry->caller[3], __entry->caller[4], __entry->caller[5],
172 __entry->caller[6], __entry->caller[7]) 182 __entry->caller[6], __entry->caller[7]),
183
184 FILTER_OTHER
173); 185);
174 186
175FTRACE_ENTRY(user_stack, userstack_entry, 187FTRACE_ENTRY(user_stack, userstack_entry,
@@ -185,7 +197,9 @@ FTRACE_ENTRY(user_stack, userstack_entry,
185 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", 197 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
186 __entry->caller[0], __entry->caller[1], __entry->caller[2], 198 __entry->caller[0], __entry->caller[1], __entry->caller[2],
187 __entry->caller[3], __entry->caller[4], __entry->caller[5], 199 __entry->caller[3], __entry->caller[4], __entry->caller[5],
188 __entry->caller[6], __entry->caller[7]) 200 __entry->caller[6], __entry->caller[7]),
201
202 FILTER_OTHER
189); 203);
190 204
191/* 205/*
@@ -202,7 +216,9 @@ FTRACE_ENTRY(bprint, bprint_entry,
202 ), 216 ),
203 217
204 F_printk("%08lx fmt:%p", 218 F_printk("%08lx fmt:%p",
205 __entry->ip, __entry->fmt) 219 __entry->ip, __entry->fmt),
220
221 FILTER_OTHER
206); 222);
207 223
208FTRACE_ENTRY(print, print_entry, 224FTRACE_ENTRY(print, print_entry,
@@ -215,7 +231,9 @@ FTRACE_ENTRY(print, print_entry,
215 ), 231 ),
216 232
217 F_printk("%08lx %s", 233 F_printk("%08lx %s",
218 __entry->ip, __entry->buf) 234 __entry->ip, __entry->buf),
235
236 FILTER_OTHER
219); 237);
220 238
221FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw, 239FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
@@ -234,7 +252,9 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
234 252
235 F_printk("%lx %lx %lx %d %x %x", 253 F_printk("%lx %lx %lx %d %x %x",
236 (unsigned long)__entry->phys, __entry->value, __entry->pc, 254 (unsigned long)__entry->phys, __entry->value, __entry->pc,
237 __entry->map_id, __entry->opcode, __entry->width) 255 __entry->map_id, __entry->opcode, __entry->width),
256
257 FILTER_OTHER
238); 258);
239 259
240FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, 260FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
@@ -252,7 +272,9 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
252 272
253 F_printk("%lx %lx %lx %d %x", 273 F_printk("%lx %lx %lx %d %x",
254 (unsigned long)__entry->phys, __entry->virt, __entry->len, 274 (unsigned long)__entry->phys, __entry->virt, __entry->len,
255 __entry->map_id, __entry->opcode) 275 __entry->map_id, __entry->opcode),
276
277 FILTER_OTHER
256); 278);
257 279
258 280
@@ -272,6 +294,8 @@ FTRACE_ENTRY(branch, trace_branch,
272 294
273 F_printk("%u:%s:%s (%u)", 295 F_printk("%u:%s:%s (%u)",
274 __entry->line, 296 __entry->line,
275 __entry->func, __entry->file, __entry->correct) 297 __entry->func, __entry->file, __entry->correct),
298
299 FILTER_OTHER
276); 300);
277 301
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 19a359d5e6d5..fee3752ae8f6 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -24,6 +24,11 @@ static int total_ref_count;
24static int perf_trace_event_perm(struct ftrace_event_call *tp_event, 24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event) 25 struct perf_event *p_event)
26{ 26{
27 /* The ftrace function trace is allowed only for root. */
28 if (ftrace_event_is_function(tp_event) &&
29 perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
30 return -EPERM;
31
27 /* No tracing, just counting, so no obvious leak */ 32 /* No tracing, just counting, so no obvious leak */
28 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) 33 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
29 return 0; 34 return 0;
@@ -44,23 +49,17 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
44 return 0; 49 return 0;
45} 50}
46 51
47static int perf_trace_event_init(struct ftrace_event_call *tp_event, 52static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
48 struct perf_event *p_event) 53 struct perf_event *p_event)
49{ 54{
50 struct hlist_head __percpu *list; 55 struct hlist_head __percpu *list;
51 int ret; 56 int ret = -ENOMEM;
52 int cpu; 57 int cpu;
53 58
54 ret = perf_trace_event_perm(tp_event, p_event);
55 if (ret)
56 return ret;
57
58 p_event->tp_event = tp_event; 59 p_event->tp_event = tp_event;
59 if (tp_event->perf_refcount++ > 0) 60 if (tp_event->perf_refcount++ > 0)
60 return 0; 61 return 0;
61 62
62 ret = -ENOMEM;
63
64 list = alloc_percpu(struct hlist_head); 63 list = alloc_percpu(struct hlist_head);
65 if (!list) 64 if (!list)
66 goto fail; 65 goto fail;
@@ -83,7 +82,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
83 } 82 }
84 } 83 }
85 84
86 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); 85 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
87 if (ret) 86 if (ret)
88 goto fail; 87 goto fail;
89 88
@@ -108,6 +107,69 @@ fail:
108 return ret; 107 return ret;
109} 108}
110 109
110static void perf_trace_event_unreg(struct perf_event *p_event)
111{
112 struct ftrace_event_call *tp_event = p_event->tp_event;
113 int i;
114
115 if (--tp_event->perf_refcount > 0)
116 goto out;
117
118 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
119
120 /*
121 * Ensure our callback won't be called anymore. The buffers
122 * will be freed after that.
123 */
124 tracepoint_synchronize_unregister();
125
126 free_percpu(tp_event->perf_events);
127 tp_event->perf_events = NULL;
128
129 if (!--total_ref_count) {
130 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
131 free_percpu(perf_trace_buf[i]);
132 perf_trace_buf[i] = NULL;
133 }
134 }
135out:
136 module_put(tp_event->mod);
137}
138
139static int perf_trace_event_open(struct perf_event *p_event)
140{
141 struct ftrace_event_call *tp_event = p_event->tp_event;
142 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
143}
144
145static void perf_trace_event_close(struct perf_event *p_event)
146{
147 struct ftrace_event_call *tp_event = p_event->tp_event;
148 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
149}
150
151static int perf_trace_event_init(struct ftrace_event_call *tp_event,
152 struct perf_event *p_event)
153{
154 int ret;
155
156 ret = perf_trace_event_perm(tp_event, p_event);
157 if (ret)
158 return ret;
159
160 ret = perf_trace_event_reg(tp_event, p_event);
161 if (ret)
162 return ret;
163
164 ret = perf_trace_event_open(p_event);
165 if (ret) {
166 perf_trace_event_unreg(p_event);
167 return ret;
168 }
169
170 return 0;
171}
172
111int perf_trace_init(struct perf_event *p_event) 173int perf_trace_init(struct perf_event *p_event)
112{ 174{
113 struct ftrace_event_call *tp_event; 175 struct ftrace_event_call *tp_event;
@@ -130,6 +192,14 @@ int perf_trace_init(struct perf_event *p_event)
130 return ret; 192 return ret;
131} 193}
132 194
195void perf_trace_destroy(struct perf_event *p_event)
196{
197 mutex_lock(&event_mutex);
198 perf_trace_event_close(p_event);
199 perf_trace_event_unreg(p_event);
200 mutex_unlock(&event_mutex);
201}
202
133int perf_trace_add(struct perf_event *p_event, int flags) 203int perf_trace_add(struct perf_event *p_event, int flags)
134{ 204{
135 struct ftrace_event_call *tp_event = p_event->tp_event; 205 struct ftrace_event_call *tp_event = p_event->tp_event;
@@ -146,43 +216,14 @@ int perf_trace_add(struct perf_event *p_event, int flags)
146 list = this_cpu_ptr(pcpu_list); 216 list = this_cpu_ptr(pcpu_list);
147 hlist_add_head_rcu(&p_event->hlist_entry, list); 217 hlist_add_head_rcu(&p_event->hlist_entry, list);
148 218
149 return 0; 219 return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
150} 220}
151 221
152void perf_trace_del(struct perf_event *p_event, int flags) 222void perf_trace_del(struct perf_event *p_event, int flags)
153{ 223{
154 hlist_del_rcu(&p_event->hlist_entry);
155}
156
157void perf_trace_destroy(struct perf_event *p_event)
158{
159 struct ftrace_event_call *tp_event = p_event->tp_event; 224 struct ftrace_event_call *tp_event = p_event->tp_event;
160 int i; 225 hlist_del_rcu(&p_event->hlist_entry);
161 226 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
162 mutex_lock(&event_mutex);
163 if (--tp_event->perf_refcount > 0)
164 goto out;
165
166 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
167
168 /*
169 * Ensure our callback won't be called anymore. The buffers
170 * will be freed after that.
171 */
172 tracepoint_synchronize_unregister();
173
174 free_percpu(tp_event->perf_events);
175 tp_event->perf_events = NULL;
176
177 if (!--total_ref_count) {
178 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
179 free_percpu(perf_trace_buf[i]);
180 perf_trace_buf[i] = NULL;
181 }
182 }
183out:
184 module_put(tp_event->mod);
185 mutex_unlock(&event_mutex);
186} 227}
187 228
188__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, 229__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
@@ -214,3 +255,86 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
214 return raw_data; 255 return raw_data;
215} 256}
216EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); 257EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
258
259#ifdef CONFIG_FUNCTION_TRACER
260static void
261perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)
262{
263 struct ftrace_entry *entry;
264 struct hlist_head *head;
265 struct pt_regs regs;
266 int rctx;
267
268#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
269 sizeof(u64)) - sizeof(u32))
270
271 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
272
273 perf_fetch_caller_regs(&regs);
274
275 entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
276 if (!entry)
277 return;
278
279 entry->ip = ip;
280 entry->parent_ip = parent_ip;
281
282 head = this_cpu_ptr(event_function.perf_events);
283 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
284 1, &regs, head);
285
286#undef ENTRY_SIZE
287}
288
289static int perf_ftrace_function_register(struct perf_event *event)
290{
291 struct ftrace_ops *ops = &event->ftrace_ops;
292
293 ops->flags |= FTRACE_OPS_FL_CONTROL;
294 ops->func = perf_ftrace_function_call;
295 return register_ftrace_function(ops);
296}
297
298static int perf_ftrace_function_unregister(struct perf_event *event)
299{
300 struct ftrace_ops *ops = &event->ftrace_ops;
301 int ret = unregister_ftrace_function(ops);
302 ftrace_free_filter(ops);
303 return ret;
304}
305
306static void perf_ftrace_function_enable(struct perf_event *event)
307{
308 ftrace_function_local_enable(&event->ftrace_ops);
309}
310
311static void perf_ftrace_function_disable(struct perf_event *event)
312{
313 ftrace_function_local_disable(&event->ftrace_ops);
314}
315
316int perf_ftrace_event_register(struct ftrace_event_call *call,
317 enum trace_reg type, void *data)
318{
319 switch (type) {
320 case TRACE_REG_REGISTER:
321 case TRACE_REG_UNREGISTER:
322 break;
323 case TRACE_REG_PERF_REGISTER:
324 case TRACE_REG_PERF_UNREGISTER:
325 return 0;
326 case TRACE_REG_PERF_OPEN:
327 return perf_ftrace_function_register(data);
328 case TRACE_REG_PERF_CLOSE:
329 return perf_ftrace_function_unregister(data);
330 case TRACE_REG_PERF_ADD:
331 perf_ftrace_function_enable(data);
332 return 0;
333 case TRACE_REG_PERF_DEL:
334 perf_ftrace_function_disable(data);
335 return 0;
336 }
337
338 return -EINVAL;
339}
340#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c212a7f934ec..079a93ae8a9d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -147,7 +147,8 @@ int trace_event_raw_init(struct ftrace_event_call *call)
147} 147}
148EXPORT_SYMBOL_GPL(trace_event_raw_init); 148EXPORT_SYMBOL_GPL(trace_event_raw_init);
149 149
150int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) 150int ftrace_event_reg(struct ftrace_event_call *call,
151 enum trace_reg type, void *data)
151{ 152{
152 switch (type) { 153 switch (type) {
153 case TRACE_REG_REGISTER: 154 case TRACE_REG_REGISTER:
@@ -170,6 +171,11 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
170 call->class->perf_probe, 171 call->class->perf_probe,
171 call); 172 call);
172 return 0; 173 return 0;
174 case TRACE_REG_PERF_OPEN:
175 case TRACE_REG_PERF_CLOSE:
176 case TRACE_REG_PERF_ADD:
177 case TRACE_REG_PERF_DEL:
178 return 0;
173#endif 179#endif
174 } 180 }
175 return 0; 181 return 0;
@@ -209,7 +215,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
209 tracing_stop_cmdline_record(); 215 tracing_stop_cmdline_record();
210 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; 216 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
211 } 217 }
212 call->class->reg(call, TRACE_REG_UNREGISTER); 218 call->class->reg(call, TRACE_REG_UNREGISTER, NULL);
213 } 219 }
214 break; 220 break;
215 case 1: 221 case 1:
@@ -218,7 +224,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
218 tracing_start_cmdline_record(); 224 tracing_start_cmdline_record();
219 call->flags |= TRACE_EVENT_FL_RECORDED_CMD; 225 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
220 } 226 }
221 ret = call->class->reg(call, TRACE_REG_REGISTER); 227 ret = call->class->reg(call, TRACE_REG_REGISTER, NULL);
222 if (ret) { 228 if (ret) {
223 tracing_stop_cmdline_record(); 229 tracing_stop_cmdline_record();
224 pr_info("event trace: Could not enable event " 230 pr_info("event trace: Could not enable event "
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 24aee7127451..431dba8b7542 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -81,6 +81,7 @@ enum {
81 FILT_ERR_TOO_MANY_PREDS, 81 FILT_ERR_TOO_MANY_PREDS,
82 FILT_ERR_MISSING_FIELD, 82 FILT_ERR_MISSING_FIELD,
83 FILT_ERR_INVALID_FILTER, 83 FILT_ERR_INVALID_FILTER,
84 FILT_ERR_IP_FIELD_ONLY,
84}; 85};
85 86
86static char *err_text[] = { 87static char *err_text[] = {
@@ -96,6 +97,7 @@ static char *err_text[] = {
96 "Too many terms in predicate expression", 97 "Too many terms in predicate expression",
97 "Missing field name and/or value", 98 "Missing field name and/or value",
98 "Meaningless filter expression", 99 "Meaningless filter expression",
100 "Only 'ip' field is supported for function trace",
99}; 101};
100 102
101struct opstack_op { 103struct opstack_op {
@@ -685,7 +687,7 @@ find_event_field(struct ftrace_event_call *call, char *name)
685 687
686static int __alloc_pred_stack(struct pred_stack *stack, int n_preds) 688static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
687{ 689{
688 stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL); 690 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
689 if (!stack->preds) 691 if (!stack->preds)
690 return -ENOMEM; 692 return -ENOMEM;
691 stack->index = n_preds; 693 stack->index = n_preds;
@@ -826,8 +828,7 @@ static int __alloc_preds(struct event_filter *filter, int n_preds)
826 if (filter->preds) 828 if (filter->preds)
827 __free_preds(filter); 829 __free_preds(filter);
828 830
829 filter->preds = 831 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
830 kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
831 832
832 if (!filter->preds) 833 if (!filter->preds)
833 return -ENOMEM; 834 return -ENOMEM;
@@ -900,6 +901,11 @@ int filter_assign_type(const char *type)
900 return FILTER_OTHER; 901 return FILTER_OTHER;
901} 902}
902 903
904static bool is_function_field(struct ftrace_event_field *field)
905{
906 return field->filter_type == FILTER_TRACE_FN;
907}
908
903static bool is_string_field(struct ftrace_event_field *field) 909static bool is_string_field(struct ftrace_event_field *field)
904{ 910{
905 return field->filter_type == FILTER_DYN_STRING || 911 return field->filter_type == FILTER_DYN_STRING ||
@@ -987,6 +993,11 @@ static int init_pred(struct filter_parse_state *ps,
987 fn = filter_pred_strloc; 993 fn = filter_pred_strloc;
988 else 994 else
989 fn = filter_pred_pchar; 995 fn = filter_pred_pchar;
996 } else if (is_function_field(field)) {
997 if (strcmp(field->name, "ip")) {
998 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
999 return -EINVAL;
1000 }
990 } else { 1001 } else {
991 if (field->is_signed) 1002 if (field->is_signed)
992 ret = strict_strtoll(pred->regex.pattern, 0, &val); 1003 ret = strict_strtoll(pred->regex.pattern, 0, &val);
@@ -1334,10 +1345,7 @@ static struct filter_pred *create_pred(struct filter_parse_state *ps,
1334 1345
1335 strcpy(pred.regex.pattern, operand2); 1346 strcpy(pred.regex.pattern, operand2);
1336 pred.regex.len = strlen(pred.regex.pattern); 1347 pred.regex.len = strlen(pred.regex.pattern);
1337
1338#ifdef CONFIG_FTRACE_STARTUP_TEST
1339 pred.field = field; 1348 pred.field = field;
1340#endif
1341 return init_pred(ps, field, &pred) ? NULL : &pred; 1349 return init_pred(ps, field, &pred) ? NULL : &pred;
1342} 1350}
1343 1351
@@ -1486,7 +1494,7 @@ static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1486 children = count_leafs(preds, &preds[root->left]); 1494 children = count_leafs(preds, &preds[root->left]);
1487 children += count_leafs(preds, &preds[root->right]); 1495 children += count_leafs(preds, &preds[root->right]);
1488 1496
1489 root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL); 1497 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1490 if (!root->ops) 1498 if (!root->ops)
1491 return -ENOMEM; 1499 return -ENOMEM;
1492 1500
@@ -1950,6 +1958,148 @@ void ftrace_profile_free_filter(struct perf_event *event)
1950 __free_filter(filter); 1958 __free_filter(filter);
1951} 1959}
1952 1960
1961struct function_filter_data {
1962 struct ftrace_ops *ops;
1963 int first_filter;
1964 int first_notrace;
1965};
1966
1967#ifdef CONFIG_FUNCTION_TRACER
1968static char **
1969ftrace_function_filter_re(char *buf, int len, int *count)
1970{
1971 char *str, *sep, **re;
1972
1973 str = kstrndup(buf, len, GFP_KERNEL);
1974 if (!str)
1975 return NULL;
1976
1977 /*
1978 * The argv_split function takes white space
1979 * as a separator, so convert ',' into spaces.
1980 */
1981 while ((sep = strchr(str, ',')))
1982 *sep = ' ';
1983
1984 re = argv_split(GFP_KERNEL, str, count);
1985 kfree(str);
1986 return re;
1987}
1988
1989static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
1990 int reset, char *re, int len)
1991{
1992 int ret;
1993
1994 if (filter)
1995 ret = ftrace_set_filter(ops, re, len, reset);
1996 else
1997 ret = ftrace_set_notrace(ops, re, len, reset);
1998
1999 return ret;
2000}
2001
2002static int __ftrace_function_set_filter(int filter, char *buf, int len,
2003 struct function_filter_data *data)
2004{
2005 int i, re_cnt, ret;
2006 int *reset;
2007 char **re;
2008
2009 reset = filter ? &data->first_filter : &data->first_notrace;
2010
2011 /*
2012 * The 'ip' field could have multiple filters set, separated
2013 * either by space or comma. We first cut the filter and apply
2014 * all pieces separatelly.
2015 */
2016 re = ftrace_function_filter_re(buf, len, &re_cnt);
2017 if (!re)
2018 return -EINVAL;
2019
2020 for (i = 0; i < re_cnt; i++) {
2021 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2022 re[i], strlen(re[i]));
2023 if (ret)
2024 break;
2025
2026 if (*reset)
2027 *reset = 0;
2028 }
2029
2030 argv_free(re);
2031 return ret;
2032}
2033
2034static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2035{
2036 struct ftrace_event_field *field = pred->field;
2037
2038 if (leaf) {
2039 /*
2040 * Check the leaf predicate for function trace, verify:
2041 * - only '==' and '!=' is used
2042 * - the 'ip' field is used
2043 */
2044 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2045 return -EINVAL;
2046
2047 if (strcmp(field->name, "ip"))
2048 return -EINVAL;
2049 } else {
2050 /*
2051 * Check the non leaf predicate for function trace, verify:
2052 * - only '||' is used
2053 */
2054 if (pred->op != OP_OR)
2055 return -EINVAL;
2056 }
2057
2058 return 0;
2059}
2060
2061static int ftrace_function_set_filter_cb(enum move_type move,
2062 struct filter_pred *pred,
2063 int *err, void *data)
2064{
2065 /* Checking the node is valid for function trace. */
2066 if ((move != MOVE_DOWN) ||
2067 (pred->left != FILTER_PRED_INVALID)) {
2068 *err = ftrace_function_check_pred(pred, 0);
2069 } else {
2070 *err = ftrace_function_check_pred(pred, 1);
2071 if (*err)
2072 return WALK_PRED_ABORT;
2073
2074 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2075 pred->regex.pattern,
2076 pred->regex.len,
2077 data);
2078 }
2079
2080 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2081}
2082
2083static int ftrace_function_set_filter(struct perf_event *event,
2084 struct event_filter *filter)
2085{
2086 struct function_filter_data data = {
2087 .first_filter = 1,
2088 .first_notrace = 1,
2089 .ops = &event->ftrace_ops,
2090 };
2091
2092 return walk_pred_tree(filter->preds, filter->root,
2093 ftrace_function_set_filter_cb, &data);
2094}
2095#else
2096static int ftrace_function_set_filter(struct perf_event *event,
2097 struct event_filter *filter)
2098{
2099 return -ENODEV;
2100}
2101#endif /* CONFIG_FUNCTION_TRACER */
2102
1953int ftrace_profile_set_filter(struct perf_event *event, int event_id, 2103int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1954 char *filter_str) 2104 char *filter_str)
1955{ 2105{
@@ -1970,9 +2120,16 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1970 goto out_unlock; 2120 goto out_unlock;
1971 2121
1972 err = create_filter(call, filter_str, false, &filter); 2122 err = create_filter(call, filter_str, false, &filter);
1973 if (!err) 2123 if (err)
1974 event->filter = filter; 2124 goto free_filter;
2125
2126 if (ftrace_event_is_function(call))
2127 err = ftrace_function_set_filter(event, filter);
1975 else 2128 else
2129 event->filter = filter;
2130
2131free_filter:
2132 if (err || ftrace_event_is_function(call))
1976 __free_filter(filter); 2133 __free_filter(filter);
1977 2134
1978out_unlock: 2135out_unlock:
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index bbeec31e0ae3..7b46c9bd22ae 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -18,6 +18,16 @@
18#undef TRACE_SYSTEM 18#undef TRACE_SYSTEM
19#define TRACE_SYSTEM ftrace 19#define TRACE_SYSTEM ftrace
20 20
21/*
22 * The FTRACE_ENTRY_REG macro allows ftrace entry to define register
23 * function and thus become accesible via perf.
24 */
25#undef FTRACE_ENTRY_REG
26#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
27 filter, regfn) \
28 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
29 filter)
30
21/* not needed for this file */ 31/* not needed for this file */
22#undef __field_struct 32#undef __field_struct
23#define __field_struct(type, item) 33#define __field_struct(type, item)
@@ -44,21 +54,22 @@
44#define F_printk(fmt, args...) fmt, args 54#define F_printk(fmt, args...) fmt, args
45 55
46#undef FTRACE_ENTRY 56#undef FTRACE_ENTRY
47#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 57#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
48struct ____ftrace_##name { \ 58struct ____ftrace_##name { \
49 tstruct \ 59 tstruct \
50}; \ 60}; \
51static void __always_unused ____ftrace_check_##name(void) \ 61static void __always_unused ____ftrace_check_##name(void) \
52{ \ 62{ \
53 struct ____ftrace_##name *__entry = NULL; \ 63 struct ____ftrace_##name *__entry = NULL; \
54 \ 64 \
55 /* force compile-time check on F_printk() */ \ 65 /* force compile-time check on F_printk() */ \
56 printk(print); \ 66 printk(print); \
57} 67}
58 68
59#undef FTRACE_ENTRY_DUP 69#undef FTRACE_ENTRY_DUP
60#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \ 70#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print, filter) \
61 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 71 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
72 filter)
62 73
63#include "trace_entries.h" 74#include "trace_entries.h"
64 75
@@ -67,7 +78,7 @@ static void __always_unused ____ftrace_check_##name(void) \
67 ret = trace_define_field(event_call, #type, #item, \ 78 ret = trace_define_field(event_call, #type, #item, \
68 offsetof(typeof(field), item), \ 79 offsetof(typeof(field), item), \
69 sizeof(field.item), \ 80 sizeof(field.item), \
70 is_signed_type(type), FILTER_OTHER); \ 81 is_signed_type(type), filter_type); \
71 if (ret) \ 82 if (ret) \
72 return ret; 83 return ret;
73 84
@@ -77,7 +88,7 @@ static void __always_unused ____ftrace_check_##name(void) \
77 offsetof(typeof(field), \ 88 offsetof(typeof(field), \
78 container.item), \ 89 container.item), \
79 sizeof(field.container.item), \ 90 sizeof(field.container.item), \
80 is_signed_type(type), FILTER_OTHER); \ 91 is_signed_type(type), filter_type); \
81 if (ret) \ 92 if (ret) \
82 return ret; 93 return ret;
83 94
@@ -91,7 +102,7 @@ static void __always_unused ____ftrace_check_##name(void) \
91 ret = trace_define_field(event_call, event_storage, #item, \ 102 ret = trace_define_field(event_call, event_storage, #item, \
92 offsetof(typeof(field), item), \ 103 offsetof(typeof(field), item), \
93 sizeof(field.item), \ 104 sizeof(field.item), \
94 is_signed_type(type), FILTER_OTHER); \ 105 is_signed_type(type), filter_type); \
95 mutex_unlock(&event_storage_mutex); \ 106 mutex_unlock(&event_storage_mutex); \
96 if (ret) \ 107 if (ret) \
97 return ret; \ 108 return ret; \
@@ -104,7 +115,7 @@ static void __always_unused ____ftrace_check_##name(void) \
104 offsetof(typeof(field), \ 115 offsetof(typeof(field), \
105 container.item), \ 116 container.item), \
106 sizeof(field.container.item), \ 117 sizeof(field.container.item), \
107 is_signed_type(type), FILTER_OTHER); \ 118 is_signed_type(type), filter_type); \
108 if (ret) \ 119 if (ret) \
109 return ret; 120 return ret;
110 121
@@ -112,17 +123,18 @@ static void __always_unused ____ftrace_check_##name(void) \
112#define __dynamic_array(type, item) \ 123#define __dynamic_array(type, item) \
113 ret = trace_define_field(event_call, #type, #item, \ 124 ret = trace_define_field(event_call, #type, #item, \
114 offsetof(typeof(field), item), \ 125 offsetof(typeof(field), item), \
115 0, is_signed_type(type), FILTER_OTHER);\ 126 0, is_signed_type(type), filter_type);\
116 if (ret) \ 127 if (ret) \
117 return ret; 128 return ret;
118 129
119#undef FTRACE_ENTRY 130#undef FTRACE_ENTRY
120#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 131#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
121int \ 132int \
122ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ 133ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
123{ \ 134{ \
124 struct struct_name field; \ 135 struct struct_name field; \
125 int ret; \ 136 int ret; \
137 int filter_type = filter; \
126 \ 138 \
127 tstruct; \ 139 tstruct; \
128 \ 140 \
@@ -152,13 +164,15 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
152#undef F_printk 164#undef F_printk
153#define F_printk(fmt, args...) #fmt ", " __stringify(args) 165#define F_printk(fmt, args...) #fmt ", " __stringify(args)
154 166
155#undef FTRACE_ENTRY 167#undef FTRACE_ENTRY_REG
156#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \ 168#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
169 regfn) \
157 \ 170 \
158struct ftrace_event_class event_class_ftrace_##call = { \ 171struct ftrace_event_class event_class_ftrace_##call = { \
159 .system = __stringify(TRACE_SYSTEM), \ 172 .system = __stringify(TRACE_SYSTEM), \
160 .define_fields = ftrace_define_fields_##call, \ 173 .define_fields = ftrace_define_fields_##call, \
161 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ 174 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
175 .reg = regfn, \
162}; \ 176}; \
163 \ 177 \
164struct ftrace_event_call __used event_##call = { \ 178struct ftrace_event_call __used event_##call = { \
@@ -170,4 +184,14 @@ struct ftrace_event_call __used event_##call = { \
170struct ftrace_event_call __used \ 184struct ftrace_event_call __used \
171__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; 185__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
172 186
187#undef FTRACE_ENTRY
188#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print, filter) \
189 FTRACE_ENTRY_REG(call, struct_name, etype, \
190 PARAMS(tstruct), PARAMS(print), filter, NULL)
191
192int ftrace_event_is_function(struct ftrace_event_call *call)
193{
194 return call == &event_function;
195}
196
173#include "trace_entries.h" 197#include "trace_entries.h"
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 00d527c945a4..580a05ec926b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1892,7 +1892,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1892#endif /* CONFIG_PERF_EVENTS */ 1892#endif /* CONFIG_PERF_EVENTS */
1893 1893
1894static __kprobes 1894static __kprobes
1895int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) 1895int kprobe_register(struct ftrace_event_call *event,
1896 enum trace_reg type, void *data)
1896{ 1897{
1897 struct trace_probe *tp = (struct trace_probe *)event->data; 1898 struct trace_probe *tp = (struct trace_probe *)event->data;
1898 1899
@@ -1909,6 +1910,11 @@ int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
1909 case TRACE_REG_PERF_UNREGISTER: 1910 case TRACE_REG_PERF_UNREGISTER:
1910 disable_trace_probe(tp, TP_FLAG_PROFILE); 1911 disable_trace_probe(tp, TP_FLAG_PROFILE);
1911 return 0; 1912 return 0;
1913 case TRACE_REG_PERF_OPEN:
1914 case TRACE_REG_PERF_CLOSE:
1915 case TRACE_REG_PERF_ADD:
1916 case TRACE_REG_PERF_DEL:
1917 return 0;
1912#endif 1918#endif
1913 } 1919 }
1914 return 0; 1920 return 0;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 690987198ad7..859fae6b1825 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -300,7 +300,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
300 unsigned long mask; 300 unsigned long mask;
301 const char *str; 301 const char *str;
302 const char *ret = p->buffer + p->len; 302 const char *ret = p->buffer + p->len;
303 int i; 303 int i, first = 1;
304 304
305 for (i = 0; flag_array[i].name && flags; i++) { 305 for (i = 0; flag_array[i].name && flags; i++) {
306 306
@@ -310,14 +310,16 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
310 310
311 str = flag_array[i].name; 311 str = flag_array[i].name;
312 flags &= ~mask; 312 flags &= ~mask;
313 if (p->len && delim) 313 if (!first && delim)
314 trace_seq_puts(p, delim); 314 trace_seq_puts(p, delim);
315 else
316 first = 0;
315 trace_seq_puts(p, str); 317 trace_seq_puts(p, str);
316 } 318 }
317 319
318 /* check for left over flags */ 320 /* check for left over flags */
319 if (flags) { 321 if (flags) {
320 if (p->len && delim) 322 if (!first && delim)
321 trace_seq_puts(p, delim); 323 trace_seq_puts(p, delim);
322 trace_seq_printf(p, "0x%lx", flags); 324 trace_seq_printf(p, "0x%lx", flags);
323 } 325 }
@@ -344,7 +346,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
344 break; 346 break;
345 } 347 }
346 348
347 if (!p->len) 349 if (ret == (const char *)(p->buffer + p->len))
348 trace_seq_printf(p, "0x%lx", val); 350 trace_seq_printf(p, "0x%lx", val);
349 351
350 trace_seq_putc(p, 0); 352 trace_seq_putc(p, 0);
@@ -370,7 +372,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
370 break; 372 break;
371 } 373 }
372 374
373 if (!p->len) 375 if (ret == (const char *)(p->buffer + p->len))
374 trace_seq_printf(p, "0x%llx", val); 376 trace_seq_printf(p, "0x%llx", val);
375 377
376 trace_seq_putc(p, 0); 378 trace_seq_putc(p, 0);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index cb654542c1a1..96fc73369099 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -17,9 +17,9 @@ static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
17static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); 17static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
18 18
19static int syscall_enter_register(struct ftrace_event_call *event, 19static int syscall_enter_register(struct ftrace_event_call *event,
20 enum trace_reg type); 20 enum trace_reg type, void *data);
21static int syscall_exit_register(struct ftrace_event_call *event, 21static int syscall_exit_register(struct ftrace_event_call *event,
22 enum trace_reg type); 22 enum trace_reg type, void *data);
23 23
24static int syscall_enter_define_fields(struct ftrace_event_call *call); 24static int syscall_enter_define_fields(struct ftrace_event_call *call);
25static int syscall_exit_define_fields(struct ftrace_event_call *call); 25static int syscall_exit_define_fields(struct ftrace_event_call *call);
@@ -468,8 +468,8 @@ int __init init_ftrace_syscalls(void)
468 unsigned long addr; 468 unsigned long addr;
469 int i; 469 int i;
470 470
471 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * 471 syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
472 NR_syscalls, GFP_KERNEL); 472 GFP_KERNEL);
473 if (!syscalls_metadata) { 473 if (!syscalls_metadata) {
474 WARN_ON(1); 474 WARN_ON(1);
475 return -ENOMEM; 475 return -ENOMEM;
@@ -649,7 +649,7 @@ void perf_sysexit_disable(struct ftrace_event_call *call)
649#endif /* CONFIG_PERF_EVENTS */ 649#endif /* CONFIG_PERF_EVENTS */
650 650
651static int syscall_enter_register(struct ftrace_event_call *event, 651static int syscall_enter_register(struct ftrace_event_call *event,
652 enum trace_reg type) 652 enum trace_reg type, void *data)
653{ 653{
654 switch (type) { 654 switch (type) {
655 case TRACE_REG_REGISTER: 655 case TRACE_REG_REGISTER:
@@ -664,13 +664,18 @@ static int syscall_enter_register(struct ftrace_event_call *event,
664 case TRACE_REG_PERF_UNREGISTER: 664 case TRACE_REG_PERF_UNREGISTER:
665 perf_sysenter_disable(event); 665 perf_sysenter_disable(event);
666 return 0; 666 return 0;
667 case TRACE_REG_PERF_OPEN:
668 case TRACE_REG_PERF_CLOSE:
669 case TRACE_REG_PERF_ADD:
670 case TRACE_REG_PERF_DEL:
671 return 0;
667#endif 672#endif
668 } 673 }
669 return 0; 674 return 0;
670} 675}
671 676
672static int syscall_exit_register(struct ftrace_event_call *event, 677static int syscall_exit_register(struct ftrace_event_call *event,
673 enum trace_reg type) 678 enum trace_reg type, void *data)
674{ 679{
675 switch (type) { 680 switch (type) {
676 case TRACE_REG_REGISTER: 681 case TRACE_REG_REGISTER:
@@ -685,6 +690,11 @@ static int syscall_exit_register(struct ftrace_event_call *event,
685 case TRACE_REG_PERF_UNREGISTER: 690 case TRACE_REG_PERF_UNREGISTER:
686 perf_sysexit_disable(event); 691 perf_sysexit_disable(event);
687 return 0; 692 return 0;
693 case TRACE_REG_PERF_OPEN:
694 case TRACE_REG_PERF_CLOSE:
695 case TRACE_REG_PERF_ADD:
696 case TRACE_REG_PERF_DEL:
697 return 0;
688#endif 698#endif
689 } 699 }
690 return 0; 700 return 0;