aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/blktrace.c3
-rw-r--r--kernel/trace/ftrace.c162
-rw-r--r--kernel/trace/ring_buffer.c19
-rw-r--r--kernel/trace/trace.c197
-rw-r--r--kernel/trace/trace.h41
-rw-r--r--kernel/trace/trace_events.c85
-rw-r--r--kernel/trace/trace_events_trigger.c2
-rw-r--r--kernel/trace/trace_export.c6
-rw-r--r--kernel/trace/trace_functions.c147
-rw-r--r--kernel/trace/trace_functions_graph.c3
-rw-r--r--kernel/trace/trace_irqsoff.c10
-rw-r--r--kernel/trace/trace_kprobe.c38
-rw-r--r--kernel/trace/trace_nop.c5
-rw-r--r--kernel/trace/trace_output.c33
-rw-r--r--kernel/trace/trace_probe.h17
-rw-r--r--kernel/trace/trace_sched_wakeup.c10
-rw-r--r--kernel/trace/trace_stack.c3
-rw-r--r--kernel/trace/trace_uprobe.c217
19 files changed, 696 insertions, 303 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 015f85aaca08..8639819f6cef 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -424,6 +424,7 @@ config UPROBE_EVENT
424 bool "Enable uprobes-based dynamic events" 424 bool "Enable uprobes-based dynamic events"
425 depends on ARCH_SUPPORTS_UPROBES 425 depends on ARCH_SUPPORTS_UPROBES
426 depends on MMU 426 depends on MMU
427 depends on PERF_EVENTS
427 select UPROBES 428 select UPROBES
428 select PROBE_EVENTS 429 select PROBE_EVENTS
429 select TRACING 430 select TRACING
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 4f3a3c03eadb..c1bd4ada2a04 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1429,7 +1429,8 @@ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1429 return print_one_line(iter, true); 1429 return print_one_line(iter, true);
1430} 1430}
1431 1431
1432static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) 1432static int
1433blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1433{ 1434{
1434 /* don't output context-info for blk_classic output */ 1435 /* don't output context-info for blk_classic output */
1435 if (bit == TRACE_BLK_OPT_CLASSIC) { 1436 if (bit == TRACE_BLK_OPT_CLASSIC) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index cd7f76d1eb86..1fd4b9479210 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -237,14 +237,13 @@ static int control_ops_alloc(struct ftrace_ops *ops)
237 return 0; 237 return 0;
238} 238}
239 239
240static void control_ops_free(struct ftrace_ops *ops)
241{
242 free_percpu(ops->disabled);
243}
244
245static void update_global_ops(void) 240static void update_global_ops(void)
246{ 241{
247 ftrace_func_t func; 242 ftrace_func_t func = ftrace_global_list_func;
243 void *private = NULL;
244
245 /* The list has its own recursion protection. */
246 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
248 247
249 /* 248 /*
250 * If there's only one function registered, then call that 249 * If there's only one function registered, then call that
@@ -254,23 +253,17 @@ static void update_global_ops(void)
254 if (ftrace_global_list == &ftrace_list_end || 253 if (ftrace_global_list == &ftrace_list_end ||
255 ftrace_global_list->next == &ftrace_list_end) { 254 ftrace_global_list->next == &ftrace_list_end) {
256 func = ftrace_global_list->func; 255 func = ftrace_global_list->func;
256 private = ftrace_global_list->private;
257 /* 257 /*
258 * As we are calling the function directly. 258 * As we are calling the function directly.
259 * If it does not have recursion protection, 259 * If it does not have recursion protection,
260 * the function_trace_op needs to be updated 260 * the function_trace_op needs to be updated
261 * accordingly. 261 * accordingly.
262 */ 262 */
263 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) 263 if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
264 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
265 else
266 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; 264 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
267 } else {
268 func = ftrace_global_list_func;
269 /* The list has its own recursion protection. */
270 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
271 } 265 }
272 266
273
274 /* If we filter on pids, update to use the pid function */ 267 /* If we filter on pids, update to use the pid function */
275 if (!list_empty(&ftrace_pids)) { 268 if (!list_empty(&ftrace_pids)) {
276 set_ftrace_pid_function(func); 269 set_ftrace_pid_function(func);
@@ -278,6 +271,7 @@ static void update_global_ops(void)
278 } 271 }
279 272
280 global_ops.func = func; 273 global_ops.func = func;
274 global_ops.private = private;
281} 275}
282 276
283static void ftrace_sync(struct work_struct *work) 277static void ftrace_sync(struct work_struct *work)
@@ -437,6 +431,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
437 431
438static int __register_ftrace_function(struct ftrace_ops *ops) 432static int __register_ftrace_function(struct ftrace_ops *ops)
439{ 433{
434 if (ops->flags & FTRACE_OPS_FL_DELETED)
435 return -EINVAL;
436
440 if (FTRACE_WARN_ON(ops == &global_ops)) 437 if (FTRACE_WARN_ON(ops == &global_ops))
441 return -EINVAL; 438 return -EINVAL;
442 439
@@ -1172,8 +1169,6 @@ struct ftrace_page {
1172 int size; 1169 int size;
1173}; 1170};
1174 1171
1175static struct ftrace_page *ftrace_new_pgs;
1176
1177#define ENTRY_SIZE sizeof(struct dyn_ftrace) 1172#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1178#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1173#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1179 1174
@@ -1560,7 +1555,7 @@ unsigned long ftrace_location(unsigned long ip)
1560 * the function tracer. It checks the ftrace internal tables to 1555 * the function tracer. It checks the ftrace internal tables to
1561 * determine if the address belongs or not. 1556 * determine if the address belongs or not.
1562 */ 1557 */
1563int ftrace_text_reserved(void *start, void *end) 1558int ftrace_text_reserved(const void *start, const void *end)
1564{ 1559{
1565 unsigned long ret; 1560 unsigned long ret;
1566 1561
@@ -1994,6 +1989,7 @@ int __weak ftrace_arch_code_modify_post_process(void)
1994void ftrace_modify_all_code(int command) 1989void ftrace_modify_all_code(int command)
1995{ 1990{
1996 int update = command & FTRACE_UPDATE_TRACE_FUNC; 1991 int update = command & FTRACE_UPDATE_TRACE_FUNC;
1992 int err = 0;
1997 1993
1998 /* 1994 /*
1999 * If the ftrace_caller calls a ftrace_ops func directly, 1995 * If the ftrace_caller calls a ftrace_ops func directly,
@@ -2005,8 +2001,11 @@ void ftrace_modify_all_code(int command)
2005 * to make sure the ops are having the right functions 2001 * to make sure the ops are having the right functions
2006 * traced. 2002 * traced.
2007 */ 2003 */
2008 if (update) 2004 if (update) {
2009 ftrace_update_ftrace_func(ftrace_ops_list_func); 2005 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2006 if (FTRACE_WARN_ON(err))
2007 return;
2008 }
2010 2009
2011 if (command & FTRACE_UPDATE_CALLS) 2010 if (command & FTRACE_UPDATE_CALLS)
2012 ftrace_replace_code(1); 2011 ftrace_replace_code(1);
@@ -2019,13 +2018,16 @@ void ftrace_modify_all_code(int command)
2019 /* If irqs are disabled, we are in stop machine */ 2018 /* If irqs are disabled, we are in stop machine */
2020 if (!irqs_disabled()) 2019 if (!irqs_disabled())
2021 smp_call_function(ftrace_sync_ipi, NULL, 1); 2020 smp_call_function(ftrace_sync_ipi, NULL, 1);
2022 ftrace_update_ftrace_func(ftrace_trace_function); 2021 err = ftrace_update_ftrace_func(ftrace_trace_function);
2022 if (FTRACE_WARN_ON(err))
2023 return;
2023 } 2024 }
2024 2025
2025 if (command & FTRACE_START_FUNC_RET) 2026 if (command & FTRACE_START_FUNC_RET)
2026 ftrace_enable_ftrace_graph_caller(); 2027 err = ftrace_enable_ftrace_graph_caller();
2027 else if (command & FTRACE_STOP_FUNC_RET) 2028 else if (command & FTRACE_STOP_FUNC_RET)
2028 ftrace_disable_ftrace_graph_caller(); 2029 err = ftrace_disable_ftrace_graph_caller();
2030 FTRACE_WARN_ON(err);
2029} 2031}
2030 2032
2031static int __ftrace_modify_code(void *data) 2033static int __ftrace_modify_code(void *data)
@@ -2093,6 +2095,11 @@ static ftrace_func_t saved_ftrace_func;
2093static int ftrace_start_up; 2095static int ftrace_start_up;
2094static int global_start_up; 2096static int global_start_up;
2095 2097
2098static void control_ops_free(struct ftrace_ops *ops)
2099{
2100 free_percpu(ops->disabled);
2101}
2102
2096static void ftrace_startup_enable(int command) 2103static void ftrace_startup_enable(int command)
2097{ 2104{
2098 if (saved_ftrace_func != ftrace_trace_function) { 2105 if (saved_ftrace_func != ftrace_trace_function) {
@@ -2244,7 +2251,6 @@ static void ftrace_shutdown_sysctl(void)
2244} 2251}
2245 2252
2246static cycle_t ftrace_update_time; 2253static cycle_t ftrace_update_time;
2247static unsigned long ftrace_update_cnt;
2248unsigned long ftrace_update_tot_cnt; 2254unsigned long ftrace_update_tot_cnt;
2249 2255
2250static inline int ops_traces_mod(struct ftrace_ops *ops) 2256static inline int ops_traces_mod(struct ftrace_ops *ops)
@@ -2300,11 +2306,12 @@ static int referenced_filters(struct dyn_ftrace *rec)
2300 return cnt; 2306 return cnt;
2301} 2307}
2302 2308
2303static int ftrace_update_code(struct module *mod) 2309static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2304{ 2310{
2305 struct ftrace_page *pg; 2311 struct ftrace_page *pg;
2306 struct dyn_ftrace *p; 2312 struct dyn_ftrace *p;
2307 cycle_t start, stop; 2313 cycle_t start, stop;
2314 unsigned long update_cnt = 0;
2308 unsigned long ref = 0; 2315 unsigned long ref = 0;
2309 bool test = false; 2316 bool test = false;
2310 int i; 2317 int i;
@@ -2330,9 +2337,8 @@ static int ftrace_update_code(struct module *mod)
2330 } 2337 }
2331 2338
2332 start = ftrace_now(raw_smp_processor_id()); 2339 start = ftrace_now(raw_smp_processor_id());
2333 ftrace_update_cnt = 0;
2334 2340
2335 for (pg = ftrace_new_pgs; pg; pg = pg->next) { 2341 for (pg = new_pgs; pg; pg = pg->next) {
2336 2342
2337 for (i = 0; i < pg->index; i++) { 2343 for (i = 0; i < pg->index; i++) {
2338 int cnt = ref; 2344 int cnt = ref;
@@ -2353,7 +2359,7 @@ static int ftrace_update_code(struct module *mod)
2353 if (!ftrace_code_disable(mod, p)) 2359 if (!ftrace_code_disable(mod, p))
2354 break; 2360 break;
2355 2361
2356 ftrace_update_cnt++; 2362 update_cnt++;
2357 2363
2358 /* 2364 /*
2359 * If the tracing is enabled, go ahead and enable the record. 2365 * If the tracing is enabled, go ahead and enable the record.
@@ -2372,11 +2378,9 @@ static int ftrace_update_code(struct module *mod)
2372 } 2378 }
2373 } 2379 }
2374 2380
2375 ftrace_new_pgs = NULL;
2376
2377 stop = ftrace_now(raw_smp_processor_id()); 2381 stop = ftrace_now(raw_smp_processor_id());
2378 ftrace_update_time = stop - start; 2382 ftrace_update_time = stop - start;
2379 ftrace_update_tot_cnt += ftrace_update_cnt; 2383 ftrace_update_tot_cnt += update_cnt;
2380 2384
2381 return 0; 2385 return 0;
2382} 2386}
@@ -2468,22 +2472,6 @@ ftrace_allocate_pages(unsigned long num_to_init)
2468 return NULL; 2472 return NULL;
2469} 2473}
2470 2474
2471static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2472{
2473 int cnt;
2474
2475 if (!num_to_init) {
2476 pr_info("ftrace: No functions to be traced?\n");
2477 return -1;
2478 }
2479
2480 cnt = num_to_init / ENTRIES_PER_PAGE;
2481 pr_info("ftrace: allocating %ld entries in %d pages\n",
2482 num_to_init, cnt + 1);
2483
2484 return 0;
2485}
2486
2487#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 2475#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2488 2476
2489struct ftrace_iterator { 2477struct ftrace_iterator {
@@ -2871,7 +2859,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2871static int 2859static int
2872ftrace_filter_open(struct inode *inode, struct file *file) 2860ftrace_filter_open(struct inode *inode, struct file *file)
2873{ 2861{
2874 return ftrace_regex_open(&global_ops, 2862 struct ftrace_ops *ops = inode->i_private;
2863
2864 return ftrace_regex_open(ops,
2875 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, 2865 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2876 inode, file); 2866 inode, file);
2877} 2867}
@@ -2879,7 +2869,9 @@ ftrace_filter_open(struct inode *inode, struct file *file)
2879static int 2869static int
2880ftrace_notrace_open(struct inode *inode, struct file *file) 2870ftrace_notrace_open(struct inode *inode, struct file *file)
2881{ 2871{
2882 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, 2872 struct ftrace_ops *ops = inode->i_private;
2873
2874 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
2883 inode, file); 2875 inode, file);
2884} 2876}
2885 2877
@@ -4109,6 +4101,36 @@ static const struct file_operations ftrace_graph_notrace_fops = {
4109}; 4101};
4110#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4102#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4111 4103
4104void ftrace_create_filter_files(struct ftrace_ops *ops,
4105 struct dentry *parent)
4106{
4107
4108 trace_create_file("set_ftrace_filter", 0644, parent,
4109 ops, &ftrace_filter_fops);
4110
4111 trace_create_file("set_ftrace_notrace", 0644, parent,
4112 ops, &ftrace_notrace_fops);
4113}
4114
4115/*
4116 * The name "destroy_filter_files" is really a misnomer. Although
4117 * in the future, it may actualy delete the files, but this is
4118 * really intended to make sure the ops passed in are disabled
4119 * and that when this function returns, the caller is free to
4120 * free the ops.
4121 *
4122 * The "destroy" name is only to match the "create" name that this
4123 * should be paired with.
4124 */
4125void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4126{
4127 mutex_lock(&ftrace_lock);
4128 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4129 ftrace_shutdown(ops, 0);
4130 ops->flags |= FTRACE_OPS_FL_DELETED;
4131 mutex_unlock(&ftrace_lock);
4132}
4133
4112static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) 4134static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4113{ 4135{
4114 4136
@@ -4118,11 +4140,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4118 trace_create_file("enabled_functions", 0444, 4140 trace_create_file("enabled_functions", 0444,
4119 d_tracer, NULL, &ftrace_enabled_fops); 4141 d_tracer, NULL, &ftrace_enabled_fops);
4120 4142
4121 trace_create_file("set_ftrace_filter", 0644, d_tracer, 4143 ftrace_create_filter_files(&global_ops, d_tracer);
4122 NULL, &ftrace_filter_fops);
4123
4124 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
4125 NULL, &ftrace_notrace_fops);
4126 4144
4127#ifdef CONFIG_FUNCTION_GRAPH_TRACER 4145#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4128 trace_create_file("set_graph_function", 0444, d_tracer, 4146 trace_create_file("set_graph_function", 0444, d_tracer,
@@ -4238,9 +4256,6 @@ static int ftrace_process_locs(struct module *mod,
4238 /* Assign the last page to ftrace_pages */ 4256 /* Assign the last page to ftrace_pages */
4239 ftrace_pages = pg; 4257 ftrace_pages = pg;
4240 4258
4241 /* These new locations need to be initialized */
4242 ftrace_new_pgs = start_pg;
4243
4244 /* 4259 /*
4245 * We only need to disable interrupts on start up 4260 * We only need to disable interrupts on start up
4246 * because we are modifying code that an interrupt 4261 * because we are modifying code that an interrupt
@@ -4251,7 +4266,7 @@ static int ftrace_process_locs(struct module *mod,
4251 */ 4266 */
4252 if (!mod) 4267 if (!mod)
4253 local_irq_save(flags); 4268 local_irq_save(flags);
4254 ftrace_update_code(mod); 4269 ftrace_update_code(mod, start_pg);
4255 if (!mod) 4270 if (!mod)
4256 local_irq_restore(flags); 4271 local_irq_restore(flags);
4257 ret = 0; 4272 ret = 0;
@@ -4360,30 +4375,27 @@ struct notifier_block ftrace_module_exit_nb = {
4360 .priority = INT_MIN, /* Run after anything that can remove kprobes */ 4375 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4361}; 4376};
4362 4377
4363extern unsigned long __start_mcount_loc[];
4364extern unsigned long __stop_mcount_loc[];
4365
4366void __init ftrace_init(void) 4378void __init ftrace_init(void)
4367{ 4379{
4368 unsigned long count, addr, flags; 4380 extern unsigned long __start_mcount_loc[];
4381 extern unsigned long __stop_mcount_loc[];
4382 unsigned long count, flags;
4369 int ret; 4383 int ret;
4370 4384
4371 /* Keep the ftrace pointer to the stub */
4372 addr = (unsigned long)ftrace_stub;
4373
4374 local_irq_save(flags); 4385 local_irq_save(flags);
4375 ftrace_dyn_arch_init(&addr); 4386 ret = ftrace_dyn_arch_init();
4376 local_irq_restore(flags); 4387 local_irq_restore(flags);
4377 4388 if (ret)
4378 /* ftrace_dyn_arch_init places the return code in addr */
4379 if (addr)
4380 goto failed; 4389 goto failed;
4381 4390
4382 count = __stop_mcount_loc - __start_mcount_loc; 4391 count = __stop_mcount_loc - __start_mcount_loc;
4383 4392 if (!count) {
4384 ret = ftrace_dyn_table_alloc(count); 4393 pr_info("ftrace: No functions to be traced?\n");
4385 if (ret)
4386 goto failed; 4394 goto failed;
4395 }
4396
4397 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4398 count, count / ENTRIES_PER_PAGE + 1);
4387 4399
4388 last_ftrace_enabled = ftrace_enabled = 1; 4400 last_ftrace_enabled = ftrace_enabled = 1;
4389 4401
@@ -4431,7 +4443,13 @@ static inline void ftrace_startup_enable(int command) { }
4431 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 4443 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4432 ___ret; \ 4444 ___ret; \
4433 }) 4445 })
4434# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) 4446# define ftrace_shutdown(ops, command) \
4447 ({ \
4448 int ___ret = __unregister_ftrace_function(ops); \
4449 if (!___ret) \
4450 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4451 ___ret; \
4452 })
4435 4453
4436# define ftrace_startup_sysctl() do { } while (0) 4454# define ftrace_startup_sysctl() do { } while (0)
4437# define ftrace_shutdown_sysctl() do { } while (0) 4455# define ftrace_shutdown_sysctl() do { } while (0)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index fc4da2d97f9b..c634868c2921 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1301 * In that off case, we need to allocate for all possible cpus. 1301 * In that off case, we need to allocate for all possible cpus.
1302 */ 1302 */
1303#ifdef CONFIG_HOTPLUG_CPU 1303#ifdef CONFIG_HOTPLUG_CPU
1304 get_online_cpus(); 1304 cpu_notifier_register_begin();
1305 cpumask_copy(buffer->cpumask, cpu_online_mask); 1305 cpumask_copy(buffer->cpumask, cpu_online_mask);
1306#else 1306#else
1307 cpumask_copy(buffer->cpumask, cpu_possible_mask); 1307 cpumask_copy(buffer->cpumask, cpu_possible_mask);
@@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1324#ifdef CONFIG_HOTPLUG_CPU 1324#ifdef CONFIG_HOTPLUG_CPU
1325 buffer->cpu_notify.notifier_call = rb_cpu_notify; 1325 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1326 buffer->cpu_notify.priority = 0; 1326 buffer->cpu_notify.priority = 0;
1327 register_cpu_notifier(&buffer->cpu_notify); 1327 __register_cpu_notifier(&buffer->cpu_notify);
1328 cpu_notifier_register_done();
1328#endif 1329#endif
1329 1330
1330 put_online_cpus();
1331 mutex_init(&buffer->mutex); 1331 mutex_init(&buffer->mutex);
1332 1332
1333 return buffer; 1333 return buffer;
@@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1341 1341
1342 fail_free_cpumask: 1342 fail_free_cpumask:
1343 free_cpumask_var(buffer->cpumask); 1343 free_cpumask_var(buffer->cpumask);
1344 put_online_cpus(); 1344#ifdef CONFIG_HOTPLUG_CPU
1345 cpu_notifier_register_done();
1346#endif
1345 1347
1346 fail_free_buffer: 1348 fail_free_buffer:
1347 kfree(buffer); 1349 kfree(buffer);
@@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer)
1358{ 1360{
1359 int cpu; 1361 int cpu;
1360 1362
1361 get_online_cpus();
1362
1363#ifdef CONFIG_HOTPLUG_CPU 1363#ifdef CONFIG_HOTPLUG_CPU
1364 unregister_cpu_notifier(&buffer->cpu_notify); 1364 cpu_notifier_register_begin();
1365 __unregister_cpu_notifier(&buffer->cpu_notify);
1365#endif 1366#endif
1366 1367
1367 for_each_buffer_cpu(buffer, cpu) 1368 for_each_buffer_cpu(buffer, cpu)
1368 rb_free_cpu_buffer(buffer->buffers[cpu]); 1369 rb_free_cpu_buffer(buffer->buffers[cpu]);
1369 1370
1370 put_online_cpus(); 1371#ifdef CONFIG_HOTPLUG_CPU
1372 cpu_notifier_register_done();
1373#endif
1371 1374
1372 kfree(buffer->buffers); 1375 kfree(buffer->buffers);
1373 free_cpumask_var(buffer->cpumask); 1376 free_cpumask_var(buffer->cpumask);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 24c1f2382557..737b0efa1a62 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -73,7 +73,8 @@ static struct tracer_flags dummy_tracer_flags = {
73 .opts = dummy_tracer_opt 73 .opts = dummy_tracer_opt
74}; 74};
75 75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set) 76static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
77{ 78{
78 return 0; 79 return 0;
79} 80}
@@ -118,7 +119,7 @@ enum ftrace_dump_mode ftrace_dump_on_oops;
118/* When set, tracing will stop when a WARN*() is hit */ 119/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning; 120int __disable_trace_on_warning;
120 121
121static int tracing_set_tracer(const char *buf); 122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
122 123
123#define MAX_TRACER_SIZE 100 124#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@ -180,6 +181,17 @@ static int __init set_trace_boot_options(char *str)
180} 181}
181__setup("trace_options=", set_trace_boot_options); 182__setup("trace_options=", set_trace_boot_options);
182 183
184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
183 195
184unsigned long long ns2usecs(cycle_t nsec) 196unsigned long long ns2usecs(cycle_t nsec)
185{ 197{
@@ -1230,7 +1242,7 @@ int register_tracer(struct tracer *type)
1230 1242
1231 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 1243 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1232 /* Do we want this tracer to start on bootup? */ 1244 /* Do we want this tracer to start on bootup? */
1233 tracing_set_tracer(type->name); 1245 tracing_set_tracer(&global_trace, type->name);
1234 default_bootup_tracer = NULL; 1246 default_bootup_tracer = NULL;
1235 /* disable other selftests, since this will break it. */ 1247 /* disable other selftests, since this will break it. */
1236 tracing_selftest_disabled = true; 1248 tracing_selftest_disabled = true;
@@ -3137,27 +3149,52 @@ static int tracing_open(struct inode *inode, struct file *file)
3137 return ret; 3149 return ret;
3138} 3150}
3139 3151
3152/*
3153 * Some tracers are not suitable for instance buffers.
3154 * A tracer is always available for the global array (toplevel)
3155 * or if it explicitly states that it is.
3156 */
3157static bool
3158trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3159{
3160 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3161}
3162
3163/* Find the next tracer that this trace array may use */
3164static struct tracer *
3165get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3166{
3167 while (t && !trace_ok_for_array(t, tr))
3168 t = t->next;
3169
3170 return t;
3171}
3172
3140static void * 3173static void *
3141t_next(struct seq_file *m, void *v, loff_t *pos) 3174t_next(struct seq_file *m, void *v, loff_t *pos)
3142{ 3175{
3176 struct trace_array *tr = m->private;
3143 struct tracer *t = v; 3177 struct tracer *t = v;
3144 3178
3145 (*pos)++; 3179 (*pos)++;
3146 3180
3147 if (t) 3181 if (t)
3148 t = t->next; 3182 t = get_tracer_for_array(tr, t->next);
3149 3183
3150 return t; 3184 return t;
3151} 3185}
3152 3186
3153static void *t_start(struct seq_file *m, loff_t *pos) 3187static void *t_start(struct seq_file *m, loff_t *pos)
3154{ 3188{
3189 struct trace_array *tr = m->private;
3155 struct tracer *t; 3190 struct tracer *t;
3156 loff_t l = 0; 3191 loff_t l = 0;
3157 3192
3158 mutex_lock(&trace_types_lock); 3193 mutex_lock(&trace_types_lock);
3159 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 3194
3160 ; 3195 t = get_tracer_for_array(tr, trace_types);
3196 for (; t && l < *pos; t = t_next(m, t, &l))
3197 ;
3161 3198
3162 return t; 3199 return t;
3163} 3200}
@@ -3192,10 +3229,21 @@ static const struct seq_operations show_traces_seq_ops = {
3192 3229
3193static int show_traces_open(struct inode *inode, struct file *file) 3230static int show_traces_open(struct inode *inode, struct file *file)
3194{ 3231{
3232 struct trace_array *tr = inode->i_private;
3233 struct seq_file *m;
3234 int ret;
3235
3195 if (tracing_disabled) 3236 if (tracing_disabled)
3196 return -ENODEV; 3237 return -ENODEV;
3197 3238
3198 return seq_open(file, &show_traces_seq_ops); 3239 ret = seq_open(file, &show_traces_seq_ops);
3240 if (ret)
3241 return ret;
3242
3243 m = file->private_data;
3244 m->private = tr;
3245
3246 return 0;
3199} 3247}
3200 3248
3201static ssize_t 3249static ssize_t
@@ -3355,13 +3403,14 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
3355 return 0; 3403 return 0;
3356} 3404}
3357 3405
3358static int __set_tracer_option(struct tracer *trace, 3406static int __set_tracer_option(struct trace_array *tr,
3359 struct tracer_flags *tracer_flags, 3407 struct tracer_flags *tracer_flags,
3360 struct tracer_opt *opts, int neg) 3408 struct tracer_opt *opts, int neg)
3361{ 3409{
3410 struct tracer *trace = tr->current_trace;
3362 int ret; 3411 int ret;
3363 3412
3364 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 3413 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3365 if (ret) 3414 if (ret)
3366 return ret; 3415 return ret;
3367 3416
@@ -3373,8 +3422,9 @@ static int __set_tracer_option(struct tracer *trace,
3373} 3422}
3374 3423
3375/* Try to assign a tracer specific option */ 3424/* Try to assign a tracer specific option */
3376static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 3425static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3377{ 3426{
3427 struct tracer *trace = tr->current_trace;
3378 struct tracer_flags *tracer_flags = trace->flags; 3428 struct tracer_flags *tracer_flags = trace->flags;
3379 struct tracer_opt *opts = NULL; 3429 struct tracer_opt *opts = NULL;
3380 int i; 3430 int i;
@@ -3383,8 +3433,7 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3383 opts = &tracer_flags->opts[i]; 3433 opts = &tracer_flags->opts[i];
3384 3434
3385 if (strcmp(cmp, opts->name) == 0) 3435 if (strcmp(cmp, opts->name) == 0)
3386 return __set_tracer_option(trace, trace->flags, 3436 return __set_tracer_option(tr, trace->flags, opts, neg);
3387 opts, neg);
3388 } 3437 }
3389 3438
3390 return -EINVAL; 3439 return -EINVAL;
@@ -3407,7 +3456,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3407 3456
3408 /* Give the tracer a chance to approve the change */ 3457 /* Give the tracer a chance to approve the change */
3409 if (tr->current_trace->flag_changed) 3458 if (tr->current_trace->flag_changed)
3410 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) 3459 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3411 return -EINVAL; 3460 return -EINVAL;
3412 3461
3413 if (enabled) 3462 if (enabled)
@@ -3456,7 +3505,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
3456 3505
3457 /* If no option could be set, test the specific tracer options */ 3506 /* If no option could be set, test the specific tracer options */
3458 if (!trace_options[i]) 3507 if (!trace_options[i])
3459 ret = set_tracer_option(tr->current_trace, cmp, neg); 3508 ret = set_tracer_option(tr, cmp, neg);
3460 3509
3461 mutex_unlock(&trace_types_lock); 3510 mutex_unlock(&trace_types_lock);
3462 3511
@@ -3562,6 +3611,8 @@ static const char readme_msg[] =
3562#ifdef CONFIG_TRACER_SNAPSHOT 3611#ifdef CONFIG_TRACER_SNAPSHOT
3563 "\t\t snapshot\n" 3612 "\t\t snapshot\n"
3564#endif 3613#endif
3614 "\t\t dump\n"
3615 "\t\t cpudump\n"
3565 "\t example: echo do_fault:traceoff > set_ftrace_filter\n" 3616 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3566 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" 3617 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3567 "\t The first one will disable tracing every time do_fault is hit\n" 3618 "\t The first one will disable tracing every time do_fault is hit\n"
@@ -3885,10 +3936,26 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3885static void 3936static void
3886destroy_trace_option_files(struct trace_option_dentry *topts); 3937destroy_trace_option_files(struct trace_option_dentry *topts);
3887 3938
3888static int tracing_set_tracer(const char *buf) 3939/*
3940 * Used to clear out the tracer before deletion of an instance.
3941 * Must have trace_types_lock held.
3942 */
3943static void tracing_set_nop(struct trace_array *tr)
3944{
3945 if (tr->current_trace == &nop_trace)
3946 return;
3947
3948 tr->current_trace->enabled--;
3949
3950 if (tr->current_trace->reset)
3951 tr->current_trace->reset(tr);
3952
3953 tr->current_trace = &nop_trace;
3954}
3955
3956static int tracing_set_tracer(struct trace_array *tr, const char *buf)
3889{ 3957{
3890 static struct trace_option_dentry *topts; 3958 static struct trace_option_dentry *topts;
3891 struct trace_array *tr = &global_trace;
3892 struct tracer *t; 3959 struct tracer *t;
3893#ifdef CONFIG_TRACER_MAX_TRACE 3960#ifdef CONFIG_TRACER_MAX_TRACE
3894 bool had_max_tr; 3961 bool had_max_tr;
@@ -3916,9 +3983,15 @@ static int tracing_set_tracer(const char *buf)
3916 if (t == tr->current_trace) 3983 if (t == tr->current_trace)
3917 goto out; 3984 goto out;
3918 3985
3986 /* Some tracers are only allowed for the top level buffer */
3987 if (!trace_ok_for_array(t, tr)) {
3988 ret = -EINVAL;
3989 goto out;
3990 }
3991
3919 trace_branch_disable(); 3992 trace_branch_disable();
3920 3993
3921 tr->current_trace->enabled = false; 3994 tr->current_trace->enabled--;
3922 3995
3923 if (tr->current_trace->reset) 3996 if (tr->current_trace->reset)
3924 tr->current_trace->reset(tr); 3997 tr->current_trace->reset(tr);
@@ -3941,9 +4014,11 @@ static int tracing_set_tracer(const char *buf)
3941 free_snapshot(tr); 4014 free_snapshot(tr);
3942 } 4015 }
3943#endif 4016#endif
3944 destroy_trace_option_files(topts); 4017 /* Currently, only the top instance has options */
3945 4018 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
3946 topts = create_trace_option_files(tr, t); 4019 destroy_trace_option_files(topts);
4020 topts = create_trace_option_files(tr, t);
4021 }
3947 4022
3948#ifdef CONFIG_TRACER_MAX_TRACE 4023#ifdef CONFIG_TRACER_MAX_TRACE
3949 if (t->use_max_tr && !had_max_tr) { 4024 if (t->use_max_tr && !had_max_tr) {
@@ -3960,7 +4035,7 @@ static int tracing_set_tracer(const char *buf)
3960 } 4035 }
3961 4036
3962 tr->current_trace = t; 4037 tr->current_trace = t;
3963 tr->current_trace->enabled = true; 4038 tr->current_trace->enabled++;
3964 trace_branch_enable(tr); 4039 trace_branch_enable(tr);
3965 out: 4040 out:
3966 mutex_unlock(&trace_types_lock); 4041 mutex_unlock(&trace_types_lock);
@@ -3972,6 +4047,7 @@ static ssize_t
3972tracing_set_trace_write(struct file *filp, const char __user *ubuf, 4047tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3973 size_t cnt, loff_t *ppos) 4048 size_t cnt, loff_t *ppos)
3974{ 4049{
4050 struct trace_array *tr = filp->private_data;
3975 char buf[MAX_TRACER_SIZE+1]; 4051 char buf[MAX_TRACER_SIZE+1];
3976 int i; 4052 int i;
3977 size_t ret; 4053 size_t ret;
@@ -3991,7 +4067,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3991 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 4067 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3992 buf[i] = 0; 4068 buf[i] = 0;
3993 4069
3994 err = tracing_set_tracer(buf); 4070 err = tracing_set_tracer(tr, buf);
3995 if (err) 4071 if (err)
3996 return err; 4072 return err;
3997 4073
@@ -4316,8 +4392,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4316 4392
4317static const struct pipe_buf_operations tracing_pipe_buf_ops = { 4393static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4318 .can_merge = 0, 4394 .can_merge = 0,
4319 .map = generic_pipe_buf_map,
4320 .unmap = generic_pipe_buf_unmap,
4321 .confirm = generic_pipe_buf_confirm, 4395 .confirm = generic_pipe_buf_confirm,
4322 .release = generic_pipe_buf_release, 4396 .release = generic_pipe_buf_release,
4323 .steal = generic_pipe_buf_steal, 4397 .steal = generic_pipe_buf_steal,
@@ -4412,7 +4486,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4412 trace_access_lock(iter->cpu_file); 4486 trace_access_lock(iter->cpu_file);
4413 4487
4414 /* Fill as many pages as possible. */ 4488 /* Fill as many pages as possible. */
4415 for (i = 0, rem = len; i < pipe->buffers && rem; i++) { 4489 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4416 spd.pages[i] = alloc_page(GFP_KERNEL); 4490 spd.pages[i] = alloc_page(GFP_KERNEL);
4417 if (!spd.pages[i]) 4491 if (!spd.pages[i])
4418 break; 4492 break;
@@ -4699,25 +4773,10 @@ static int tracing_clock_show(struct seq_file *m, void *v)
4699 return 0; 4773 return 0;
4700} 4774}
4701 4775
4702static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 4776static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4703 size_t cnt, loff_t *fpos)
4704{ 4777{
4705 struct seq_file *m = filp->private_data;
4706 struct trace_array *tr = m->private;
4707 char buf[64];
4708 const char *clockstr;
4709 int i; 4778 int i;
4710 4779
4711 if (cnt >= sizeof(buf))
4712 return -EINVAL;
4713
4714 if (copy_from_user(&buf, ubuf, cnt))
4715 return -EFAULT;
4716
4717 buf[cnt] = 0;
4718
4719 clockstr = strstrip(buf);
4720
4721 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 4780 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4722 if (strcmp(trace_clocks[i].name, clockstr) == 0) 4781 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4723 break; 4782 break;
@@ -4745,6 +4804,32 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4745 4804
4746 mutex_unlock(&trace_types_lock); 4805 mutex_unlock(&trace_types_lock);
4747 4806
4807 return 0;
4808}
4809
4810static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4811 size_t cnt, loff_t *fpos)
4812{
4813 struct seq_file *m = filp->private_data;
4814 struct trace_array *tr = m->private;
4815 char buf[64];
4816 const char *clockstr;
4817 int ret;
4818
4819 if (cnt >= sizeof(buf))
4820 return -EINVAL;
4821
4822 if (copy_from_user(&buf, ubuf, cnt))
4823 return -EFAULT;
4824
4825 buf[cnt] = 0;
4826
4827 clockstr = strstrip(buf);
4828
4829 ret = tracing_set_clock(tr, clockstr);
4830 if (ret)
4831 return ret;
4832
4748 *fpos += cnt; 4833 *fpos += cnt;
4749 4834
4750 return cnt; 4835 return cnt;
@@ -5194,8 +5279,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5194/* Pipe buffer operations for a buffer. */ 5279/* Pipe buffer operations for a buffer. */
5195static const struct pipe_buf_operations buffer_pipe_buf_ops = { 5280static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5196 .can_merge = 0, 5281 .can_merge = 0,
5197 .map = generic_pipe_buf_map,
5198 .unmap = generic_pipe_buf_unmap,
5199 .confirm = generic_pipe_buf_confirm, 5282 .confirm = generic_pipe_buf_confirm,
5200 .release = buffer_pipe_buf_release, 5283 .release = buffer_pipe_buf_release,
5201 .steal = generic_pipe_buf_steal, 5284 .steal = generic_pipe_buf_steal,
@@ -5271,7 +5354,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5271 trace_access_lock(iter->cpu_file); 5354 trace_access_lock(iter->cpu_file);
5272 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 5355 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5273 5356
5274 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 5357 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5275 struct page *page; 5358 struct page *page;
5276 int r; 5359 int r;
5277 5360
@@ -5705,7 +5788,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5705 5788
5706 if (!!(topt->flags->val & topt->opt->bit) != val) { 5789 if (!!(topt->flags->val & topt->opt->bit) != val) {
5707 mutex_lock(&trace_types_lock); 5790 mutex_lock(&trace_types_lock);
5708 ret = __set_tracer_option(topt->tr->current_trace, topt->flags, 5791 ret = __set_tracer_option(topt->tr, topt->flags,
5709 topt->opt, !val); 5792 topt->opt, !val);
5710 mutex_unlock(&trace_types_lock); 5793 mutex_unlock(&trace_types_lock);
5711 if (ret) 5794 if (ret)
@@ -6112,7 +6195,9 @@ static int instance_delete(const char *name)
6112 6195
6113 list_del(&tr->list); 6196 list_del(&tr->list);
6114 6197
6198 tracing_set_nop(tr);
6115 event_trace_del_tracer(tr); 6199 event_trace_del_tracer(tr);
6200 ftrace_destroy_function_files(tr);
6116 debugfs_remove_recursive(tr->dir); 6201 debugfs_remove_recursive(tr->dir);
6117 free_percpu(tr->trace_buffer.data); 6202 free_percpu(tr->trace_buffer.data);
6118 ring_buffer_free(tr->trace_buffer.buffer); 6203 ring_buffer_free(tr->trace_buffer.buffer);
@@ -6207,6 +6292,12 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6207{ 6292{
6208 int cpu; 6293 int cpu;
6209 6294
6295 trace_create_file("available_tracers", 0444, d_tracer,
6296 tr, &show_traces_fops);
6297
6298 trace_create_file("current_tracer", 0644, d_tracer,
6299 tr, &set_tracer_fops);
6300
6210 trace_create_file("tracing_cpumask", 0644, d_tracer, 6301 trace_create_file("tracing_cpumask", 0644, d_tracer,
6211 tr, &tracing_cpumask_fops); 6302 tr, &tracing_cpumask_fops);
6212 6303
@@ -6237,6 +6328,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6237 trace_create_file("tracing_on", 0644, d_tracer, 6328 trace_create_file("tracing_on", 0644, d_tracer,
6238 tr, &rb_simple_fops); 6329 tr, &rb_simple_fops);
6239 6330
6331 if (ftrace_create_function_files(tr, d_tracer))
6332 WARN(1, "Could not allocate function filter files");
6333
6240#ifdef CONFIG_TRACER_SNAPSHOT 6334#ifdef CONFIG_TRACER_SNAPSHOT
6241 trace_create_file("snapshot", 0644, d_tracer, 6335 trace_create_file("snapshot", 0644, d_tracer,
6242 tr, &snapshot_fops); 6336 tr, &snapshot_fops);
@@ -6259,12 +6353,6 @@ static __init int tracer_init_debugfs(void)
6259 6353
6260 init_tracer_debugfs(&global_trace, d_tracer); 6354 init_tracer_debugfs(&global_trace, d_tracer);
6261 6355
6262 trace_create_file("available_tracers", 0444, d_tracer,
6263 &global_trace, &show_traces_fops);
6264
6265 trace_create_file("current_tracer", 0644, d_tracer,
6266 &global_trace, &set_tracer_fops);
6267
6268#ifdef CONFIG_TRACER_MAX_TRACE 6356#ifdef CONFIG_TRACER_MAX_TRACE
6269 trace_create_file("tracing_max_latency", 0644, d_tracer, 6357 trace_create_file("tracing_max_latency", 0644, d_tracer,
6270 &tracing_max_latency, &tracing_max_lat_fops); 6358 &tracing_max_latency, &tracing_max_lat_fops);
@@ -6527,6 +6615,13 @@ __init static int tracer_alloc_buffers(void)
6527 6615
6528 trace_init_cmdlines(); 6616 trace_init_cmdlines();
6529 6617
6618 if (trace_boot_clock) {
6619 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6620 if (ret < 0)
6621 pr_warning("Trace clock %s not defined, going back to default\n",
6622 trace_boot_clock);
6623 }
6624
6530 /* 6625 /*
6531 * register_tracer() might reference current_trace, so it 6626 * register_tracer() might reference current_trace, so it
6532 * needs to be set before we register anything. This is 6627 * needs to be set before we register anything. This is
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 02b592f2d4b7..2e29d7ba5a52 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -13,6 +13,7 @@
13#include <linux/hw_breakpoint.h> 13#include <linux/hw_breakpoint.h>
14#include <linux/trace_seq.h> 14#include <linux/trace_seq.h>
15#include <linux/ftrace_event.h> 15#include <linux/ftrace_event.h>
16#include <linux/compiler.h>
16 17
17#ifdef CONFIG_FTRACE_SYSCALLS 18#ifdef CONFIG_FTRACE_SYSCALLS
18#include <asm/unistd.h> /* For NR_SYSCALLS */ 19#include <asm/unistd.h> /* For NR_SYSCALLS */
@@ -210,6 +211,11 @@ struct trace_array {
210 struct list_head events; 211 struct list_head events;
211 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 212 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
212 int ref; 213 int ref;
214#ifdef CONFIG_FUNCTION_TRACER
215 struct ftrace_ops *ops;
216 /* function tracing enabled */
217 int function_enabled;
218#endif
213}; 219};
214 220
215enum { 221enum {
@@ -355,14 +361,16 @@ struct tracer {
355 void (*print_header)(struct seq_file *m); 361 void (*print_header)(struct seq_file *m);
356 enum print_line_t (*print_line)(struct trace_iterator *iter); 362 enum print_line_t (*print_line)(struct trace_iterator *iter);
357 /* If you handled the flag setting, return 0 */ 363 /* If you handled the flag setting, return 0 */
358 int (*set_flag)(u32 old_flags, u32 bit, int set); 364 int (*set_flag)(struct trace_array *tr,
365 u32 old_flags, u32 bit, int set);
359 /* Return 0 if OK with change, else return non-zero */ 366 /* Return 0 if OK with change, else return non-zero */
360 int (*flag_changed)(struct tracer *tracer, 367 int (*flag_changed)(struct trace_array *tr,
361 u32 mask, int set); 368 u32 mask, int set);
362 struct tracer *next; 369 struct tracer *next;
363 struct tracer_flags *flags; 370 struct tracer_flags *flags;
371 int enabled;
364 bool print_max; 372 bool print_max;
365 bool enabled; 373 bool allow_instances;
366#ifdef CONFIG_TRACER_MAX_TRACE 374#ifdef CONFIG_TRACER_MAX_TRACE
367 bool use_max_tr; 375 bool use_max_tr;
368#endif 376#endif
@@ -812,13 +820,36 @@ static inline int ftrace_trace_task(struct task_struct *task)
812 return test_tsk_trace_trace(task); 820 return test_tsk_trace_trace(task);
813} 821}
814extern int ftrace_is_dead(void); 822extern int ftrace_is_dead(void);
823int ftrace_create_function_files(struct trace_array *tr,
824 struct dentry *parent);
825void ftrace_destroy_function_files(struct trace_array *tr);
815#else 826#else
816static inline int ftrace_trace_task(struct task_struct *task) 827static inline int ftrace_trace_task(struct task_struct *task)
817{ 828{
818 return 1; 829 return 1;
819} 830}
820static inline int ftrace_is_dead(void) { return 0; } 831static inline int ftrace_is_dead(void) { return 0; }
821#endif 832static inline int
833ftrace_create_function_files(struct trace_array *tr,
834 struct dentry *parent)
835{
836 return 0;
837}
838static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
839#endif /* CONFIG_FUNCTION_TRACER */
840
841#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
842void ftrace_create_filter_files(struct ftrace_ops *ops,
843 struct dentry *parent);
844void ftrace_destroy_filter_files(struct ftrace_ops *ops);
845#else
846/*
847 * The ops parameter passed in is usually undefined.
848 * This must be a macro.
849 */
850#define ftrace_create_filter_files(ops, parent) do { } while (0)
851#define ftrace_destroy_filter_files(ops) do { } while (0)
852#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
822 853
823int ftrace_event_is_function(struct ftrace_event_call *call); 854int ftrace_event_is_function(struct ftrace_event_call *call);
824 855
@@ -1249,7 +1280,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1249#undef FTRACE_ENTRY 1280#undef FTRACE_ENTRY
1250#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1281#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1251 extern struct ftrace_event_call \ 1282 extern struct ftrace_event_call \
1252 __attribute__((__aligned__(4))) event_##call; 1283 __aligned(4) event_##call;
1253#undef FTRACE_ENTRY_DUP 1284#undef FTRACE_ENTRY_DUP
1254#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ 1285#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1255 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1286 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7b16d40bd64d..3ddfd8f62c05 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -188,29 +188,60 @@ int trace_event_raw_init(struct ftrace_event_call *call)
188} 188}
189EXPORT_SYMBOL_GPL(trace_event_raw_init); 189EXPORT_SYMBOL_GPL(trace_event_raw_init);
190 190
191void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
192 struct ftrace_event_file *ftrace_file,
193 unsigned long len)
194{
195 struct ftrace_event_call *event_call = ftrace_file->event_call;
196
197 local_save_flags(fbuffer->flags);
198 fbuffer->pc = preempt_count();
199 fbuffer->ftrace_file = ftrace_file;
200
201 fbuffer->event =
202 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
203 event_call->event.type, len,
204 fbuffer->flags, fbuffer->pc);
205 if (!fbuffer->event)
206 return NULL;
207
208 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
209 return fbuffer->entry;
210}
211EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
212
213void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
214{
215 event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
216 fbuffer->event, fbuffer->entry,
217 fbuffer->flags, fbuffer->pc);
218}
219EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
220
191int ftrace_event_reg(struct ftrace_event_call *call, 221int ftrace_event_reg(struct ftrace_event_call *call,
192 enum trace_reg type, void *data) 222 enum trace_reg type, void *data)
193{ 223{
194 struct ftrace_event_file *file = data; 224 struct ftrace_event_file *file = data;
195 225
226 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
196 switch (type) { 227 switch (type) {
197 case TRACE_REG_REGISTER: 228 case TRACE_REG_REGISTER:
198 return tracepoint_probe_register(call->name, 229 return tracepoint_probe_register(call->tp,
199 call->class->probe, 230 call->class->probe,
200 file); 231 file);
201 case TRACE_REG_UNREGISTER: 232 case TRACE_REG_UNREGISTER:
202 tracepoint_probe_unregister(call->name, 233 tracepoint_probe_unregister(call->tp,
203 call->class->probe, 234 call->class->probe,
204 file); 235 file);
205 return 0; 236 return 0;
206 237
207#ifdef CONFIG_PERF_EVENTS 238#ifdef CONFIG_PERF_EVENTS
208 case TRACE_REG_PERF_REGISTER: 239 case TRACE_REG_PERF_REGISTER:
209 return tracepoint_probe_register(call->name, 240 return tracepoint_probe_register(call->tp,
210 call->class->perf_probe, 241 call->class->perf_probe,
211 call); 242 call);
212 case TRACE_REG_PERF_UNREGISTER: 243 case TRACE_REG_PERF_UNREGISTER:
213 tracepoint_probe_unregister(call->name, 244 tracepoint_probe_unregister(call->tp,
214 call->class->perf_probe, 245 call->class->perf_probe,
215 call); 246 call);
216 return 0; 247 return 0;
@@ -322,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
322 if (ret) { 353 if (ret) {
323 tracing_stop_cmdline_record(); 354 tracing_stop_cmdline_record();
324 pr_info("event trace: Could not enable event " 355 pr_info("event trace: Could not enable event "
325 "%s\n", call->name); 356 "%s\n", ftrace_event_name(call));
326 break; 357 break;
327 } 358 }
328 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 359 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
@@ -451,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
451{ 482{
452 struct ftrace_event_file *file; 483 struct ftrace_event_file *file;
453 struct ftrace_event_call *call; 484 struct ftrace_event_call *call;
485 const char *name;
454 int ret = -EINVAL; 486 int ret = -EINVAL;
455 487
456 list_for_each_entry(file, &tr->events, list) { 488 list_for_each_entry(file, &tr->events, list) {
457 489
458 call = file->event_call; 490 call = file->event_call;
491 name = ftrace_event_name(call);
459 492
460 if (!call->name || !call->class || !call->class->reg) 493 if (!name || !call->class || !call->class->reg)
461 continue; 494 continue;
462 495
463 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 496 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
464 continue; 497 continue;
465 498
466 if (match && 499 if (match &&
467 strcmp(match, call->name) != 0 && 500 strcmp(match, name) != 0 &&
468 strcmp(match, call->class->system) != 0) 501 strcmp(match, call->class->system) != 0)
469 continue; 502 continue;
470 503
471 if (sub && strcmp(sub, call->class->system) != 0) 504 if (sub && strcmp(sub, call->class->system) != 0)
472 continue; 505 continue;
473 506
474 if (event && strcmp(event, call->name) != 0) 507 if (event && strcmp(event, name) != 0)
475 continue; 508 continue;
476 509
477 ftrace_event_enable_disable(file, set); 510 ftrace_event_enable_disable(file, set);
@@ -669,7 +702,7 @@ static int t_show(struct seq_file *m, void *v)
669 702
670 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 703 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
671 seq_printf(m, "%s:", call->class->system); 704 seq_printf(m, "%s:", call->class->system);
672 seq_printf(m, "%s\n", call->name); 705 seq_printf(m, "%s\n", ftrace_event_name(call));
673 706
674 return 0; 707 return 0;
675} 708}
@@ -762,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
762 mutex_lock(&event_mutex); 795 mutex_lock(&event_mutex);
763 list_for_each_entry(file, &tr->events, list) { 796 list_for_each_entry(file, &tr->events, list) {
764 call = file->event_call; 797 call = file->event_call;
765 if (!call->name || !call->class || !call->class->reg) 798 if (!ftrace_event_name(call) || !call->class || !call->class->reg)
766 continue; 799 continue;
767 800
768 if (system && strcmp(call->class->system, system->name) != 0) 801 if (system && strcmp(call->class->system, system->name) != 0)
@@ -877,7 +910,7 @@ static int f_show(struct seq_file *m, void *v)
877 910
878 switch ((unsigned long)v) { 911 switch ((unsigned long)v) {
879 case FORMAT_HEADER: 912 case FORMAT_HEADER:
880 seq_printf(m, "name: %s\n", call->name); 913 seq_printf(m, "name: %s\n", ftrace_event_name(call));
881 seq_printf(m, "ID: %d\n", call->event.type); 914 seq_printf(m, "ID: %d\n", call->event.type);
882 seq_printf(m, "format:\n"); 915 seq_printf(m, "format:\n");
883 return 0; 916 return 0;
@@ -1497,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1497 struct trace_array *tr = file->tr; 1530 struct trace_array *tr = file->tr;
1498 struct list_head *head; 1531 struct list_head *head;
1499 struct dentry *d_events; 1532 struct dentry *d_events;
1533 const char *name;
1500 int ret; 1534 int ret;
1501 1535
1502 /* 1536 /*
@@ -1510,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1510 } else 1544 } else
1511 d_events = parent; 1545 d_events = parent;
1512 1546
1513 file->dir = debugfs_create_dir(call->name, d_events); 1547 name = ftrace_event_name(call);
1548 file->dir = debugfs_create_dir(name, d_events);
1514 if (!file->dir) { 1549 if (!file->dir) {
1515 pr_warning("Could not create debugfs '%s' directory\n", 1550 pr_warning("Could not create debugfs '%s' directory\n",
1516 call->name); 1551 name);
1517 return -1; 1552 return -1;
1518 } 1553 }
1519 1554
@@ -1537,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1537 ret = call->class->define_fields(call); 1572 ret = call->class->define_fields(call);
1538 if (ret < 0) { 1573 if (ret < 0) {
1539 pr_warning("Could not initialize trace point" 1574 pr_warning("Could not initialize trace point"
1540 " events/%s\n", call->name); 1575 " events/%s\n", name);
1541 return -1; 1576 return -1;
1542 } 1577 }
1543 } 1578 }
@@ -1601,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call)
1601static int event_init(struct ftrace_event_call *call) 1636static int event_init(struct ftrace_event_call *call)
1602{ 1637{
1603 int ret = 0; 1638 int ret = 0;
1639 const char *name;
1604 1640
1605 if (WARN_ON(!call->name)) 1641 name = ftrace_event_name(call);
1642 if (WARN_ON(!name))
1606 return -EINVAL; 1643 return -EINVAL;
1607 1644
1608 if (call->class->raw_init) { 1645 if (call->class->raw_init) {
1609 ret = call->class->raw_init(call); 1646 ret = call->class->raw_init(call);
1610 if (ret < 0 && ret != -ENOSYS) 1647 if (ret < 0 && ret != -ENOSYS)
1611 pr_warn("Could not initialize trace events/%s\n", 1648 pr_warn("Could not initialize trace events/%s\n",
1612 call->name); 1649 name);
1613 } 1650 }
1614 1651
1615 return ret; 1652 return ret;
@@ -1855,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr)
1855 ret = __trace_add_new_event(call, tr); 1892 ret = __trace_add_new_event(call, tr);
1856 if (ret < 0) 1893 if (ret < 0)
1857 pr_warning("Could not create directory for event %s\n", 1894 pr_warning("Could not create directory for event %s\n",
1858 call->name); 1895 ftrace_event_name(call));
1859 } 1896 }
1860} 1897}
1861 1898
@@ -1864,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
1864{ 1901{
1865 struct ftrace_event_file *file; 1902 struct ftrace_event_file *file;
1866 struct ftrace_event_call *call; 1903 struct ftrace_event_call *call;
1904 const char *name;
1867 1905
1868 list_for_each_entry(file, &tr->events, list) { 1906 list_for_each_entry(file, &tr->events, list) {
1869 1907
1870 call = file->event_call; 1908 call = file->event_call;
1909 name = ftrace_event_name(call);
1871 1910
1872 if (!call->name || !call->class || !call->class->reg) 1911 if (!name || !call->class || !call->class->reg)
1873 continue; 1912 continue;
1874 1913
1875 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 1914 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1876 continue; 1915 continue;
1877 1916
1878 if (strcmp(event, call->name) == 0 && 1917 if (strcmp(event, name) == 0 &&
1879 strcmp(system, call->class->system) == 0) 1918 strcmp(system, call->class->system) == 0)
1880 return file; 1919 return file;
1881 } 1920 }
@@ -1943,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip,
1943 seq_printf(m, "%s:%s:%s", 1982 seq_printf(m, "%s:%s:%s",
1944 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1983 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1945 data->file->event_call->class->system, 1984 data->file->event_call->class->system,
1946 data->file->event_call->name); 1985 ftrace_event_name(data->file->event_call));
1947 1986
1948 if (data->count == -1) 1987 if (data->count == -1)
1949 seq_printf(m, ":unlimited\n"); 1988 seq_printf(m, ":unlimited\n");
@@ -2163,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
2163 ret = event_create_dir(tr->event_dir, file); 2202 ret = event_create_dir(tr->event_dir, file);
2164 if (ret < 0) 2203 if (ret < 0)
2165 pr_warning("Could not create directory for event %s\n", 2204 pr_warning("Could not create directory for event %s\n",
2166 file->event_call->name); 2205 ftrace_event_name(file->event_call));
2167 } 2206 }
2168} 2207}
2169 2208
@@ -2187,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr)
2187 ret = __trace_early_add_new_event(call, tr); 2226 ret = __trace_early_add_new_event(call, tr);
2188 if (ret < 0) 2227 if (ret < 0)
2189 pr_warning("Could not create early event %s\n", 2228 pr_warning("Could not create early event %s\n",
2190 call->name); 2229 ftrace_event_name(call));
2191 } 2230 }
2192} 2231}
2193 2232
@@ -2519,7 +2558,7 @@ static __init void event_trace_self_tests(void)
2519 continue; 2558 continue;
2520#endif 2559#endif
2521 2560
2522 pr_info("Testing event %s: ", call->name); 2561 pr_info("Testing event %s: ", ftrace_event_name(call));
2523 2562
2524 /* 2563 /*
2525 * If an event is already enabled, someone is using 2564 * If an event is already enabled, someone is using
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 8efbb69b04f0..925f537f07d1 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1095 seq_printf(m, "%s:%s:%s", 1095 seq_printf(m, "%s:%s:%s",
1096 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1096 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1097 enable_data->file->event_call->class->system, 1097 enable_data->file->event_call->class->system,
1098 enable_data->file->event_call->name); 1098 ftrace_event_name(enable_data->file->event_call));
1099 1099
1100 if (data->count == -1) 1100 if (data->count == -1)
1101 seq_puts(m, ":unlimited"); 1101 seq_puts(m, ":unlimited");
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index ee0a5098ac43..d4ddde28a81a 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -173,9 +173,11 @@ struct ftrace_event_class __refdata event_class_ftrace_##call = { \
173}; \ 173}; \
174 \ 174 \
175struct ftrace_event_call __used event_##call = { \ 175struct ftrace_event_call __used event_##call = { \
176 .name = #call, \
177 .event.type = etype, \
178 .class = &event_class_ftrace_##call, \ 176 .class = &event_class_ftrace_##call, \
177 { \
178 .name = #call, \
179 }, \
180 .event.type = etype, \
179 .print_fmt = print, \ 181 .print_fmt = print, \
180 .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ 182 .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \
181}; \ 183}; \
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 38fe1483c508..ffd56351b521 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,32 +13,110 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/slab.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17 18
18#include "trace.h" 19#include "trace.h"
19 20
20/* function tracing enabled */ 21static void tracing_start_function_trace(struct trace_array *tr);
21static int ftrace_function_enabled; 22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags;
32
33/* Our option */
34enum {
35 TRACE_FUNC_OPT_STACK = 0x1,
36};
37
38static int allocate_ftrace_ops(struct trace_array *tr)
39{
40 struct ftrace_ops *ops;
41
42 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
43 if (!ops)
44 return -ENOMEM;
45
46 /* Currently only the non stack verision is supported */
47 ops->func = function_trace_call;
48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
22 49
23static struct trace_array *func_trace; 50 tr->ops = ops;
51 ops->private = tr;
52 return 0;
53}
54
55
56int ftrace_create_function_files(struct trace_array *tr,
57 struct dentry *parent)
58{
59 int ret;
60
61 /*
62 * The top level array uses the "global_ops", and the files are
63 * created on boot up.
64 */
65 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66 return 0;
24 67
25static void tracing_start_function_trace(void); 68 ret = allocate_ftrace_ops(tr);
26static void tracing_stop_function_trace(void); 69 if (ret)
70 return ret;
71
72 ftrace_create_filter_files(tr->ops, parent);
73
74 return 0;
75}
76
77void ftrace_destroy_function_files(struct trace_array *tr)
78{
79 ftrace_destroy_filter_files(tr->ops);
80 kfree(tr->ops);
81 tr->ops = NULL;
82}
27 83
28static int function_trace_init(struct trace_array *tr) 84static int function_trace_init(struct trace_array *tr)
29{ 85{
30 func_trace = tr; 86 struct ftrace_ops *ops;
87
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89 /* There's only one global tr */
90 if (!trace_ops.private) {
91 trace_ops.private = tr;
92 trace_stack_ops.private = tr;
93 }
94
95 if (func_flags.val & TRACE_FUNC_OPT_STACK)
96 ops = &trace_stack_ops;
97 else
98 ops = &trace_ops;
99 tr->ops = ops;
100 } else if (!tr->ops) {
101 /*
102 * Instance trace_arrays get their ops allocated
103 * at instance creation. Unless it failed
104 * the allocation.
105 */
106 return -ENOMEM;
107 }
108
31 tr->trace_buffer.cpu = get_cpu(); 109 tr->trace_buffer.cpu = get_cpu();
32 put_cpu(); 110 put_cpu();
33 111
34 tracing_start_cmdline_record(); 112 tracing_start_cmdline_record();
35 tracing_start_function_trace(); 113 tracing_start_function_trace(tr);
36 return 0; 114 return 0;
37} 115}
38 116
39static void function_trace_reset(struct trace_array *tr) 117static void function_trace_reset(struct trace_array *tr)
40{ 118{
41 tracing_stop_function_trace(); 119 tracing_stop_function_trace(tr);
42 tracing_stop_cmdline_record(); 120 tracing_stop_cmdline_record();
43} 121}
44 122
@@ -47,25 +125,18 @@ static void function_trace_start(struct trace_array *tr)
47 tracing_reset_online_cpus(&tr->trace_buffer); 125 tracing_reset_online_cpus(&tr->trace_buffer);
48} 126}
49 127
50/* Our option */
51enum {
52 TRACE_FUNC_OPT_STACK = 0x1,
53};
54
55static struct tracer_flags func_flags;
56
57static void 128static void
58function_trace_call(unsigned long ip, unsigned long parent_ip, 129function_trace_call(unsigned long ip, unsigned long parent_ip,
59 struct ftrace_ops *op, struct pt_regs *pt_regs) 130 struct ftrace_ops *op, struct pt_regs *pt_regs)
60{ 131{
61 struct trace_array *tr = func_trace; 132 struct trace_array *tr = op->private;
62 struct trace_array_cpu *data; 133 struct trace_array_cpu *data;
63 unsigned long flags; 134 unsigned long flags;
64 int bit; 135 int bit;
65 int cpu; 136 int cpu;
66 int pc; 137 int pc;
67 138
68 if (unlikely(!ftrace_function_enabled)) 139 if (unlikely(!tr->function_enabled))
69 return; 140 return;
70 141
71 pc = preempt_count(); 142 pc = preempt_count();
@@ -91,14 +162,14 @@ static void
91function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 162function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
92 struct ftrace_ops *op, struct pt_regs *pt_regs) 163 struct ftrace_ops *op, struct pt_regs *pt_regs)
93{ 164{
94 struct trace_array *tr = func_trace; 165 struct trace_array *tr = op->private;
95 struct trace_array_cpu *data; 166 struct trace_array_cpu *data;
96 unsigned long flags; 167 unsigned long flags;
97 long disabled; 168 long disabled;
98 int cpu; 169 int cpu;
99 int pc; 170 int pc;
100 171
101 if (unlikely(!ftrace_function_enabled)) 172 if (unlikely(!tr->function_enabled))
102 return; 173 return;
103 174
104 /* 175 /*
@@ -128,7 +199,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
128 local_irq_restore(flags); 199 local_irq_restore(flags);
129} 200}
130 201
131
132static struct ftrace_ops trace_ops __read_mostly = 202static struct ftrace_ops trace_ops __read_mostly =
133{ 203{
134 .func = function_trace_call, 204 .func = function_trace_call,
@@ -153,29 +223,21 @@ static struct tracer_flags func_flags = {
153 .opts = func_opts 223 .opts = func_opts
154}; 224};
155 225
156static void tracing_start_function_trace(void) 226static void tracing_start_function_trace(struct trace_array *tr)
157{ 227{
158 ftrace_function_enabled = 0; 228 tr->function_enabled = 0;
159 229 register_ftrace_function(tr->ops);
160 if (func_flags.val & TRACE_FUNC_OPT_STACK) 230 tr->function_enabled = 1;
161 register_ftrace_function(&trace_stack_ops);
162 else
163 register_ftrace_function(&trace_ops);
164
165 ftrace_function_enabled = 1;
166} 231}
167 232
168static void tracing_stop_function_trace(void) 233static void tracing_stop_function_trace(struct trace_array *tr)
169{ 234{
170 ftrace_function_enabled = 0; 235 tr->function_enabled = 0;
171 236 unregister_ftrace_function(tr->ops);
172 if (func_flags.val & TRACE_FUNC_OPT_STACK)
173 unregister_ftrace_function(&trace_stack_ops);
174 else
175 unregister_ftrace_function(&trace_ops);
176} 237}
177 238
178static int func_set_flag(u32 old_flags, u32 bit, int set) 239static int
240func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
179{ 241{
180 switch (bit) { 242 switch (bit) {
181 case TRACE_FUNC_OPT_STACK: 243 case TRACE_FUNC_OPT_STACK:
@@ -183,12 +245,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
183 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 245 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
184 break; 246 break;
185 247
248 unregister_ftrace_function(tr->ops);
249
186 if (set) { 250 if (set) {
187 unregister_ftrace_function(&trace_ops); 251 tr->ops = &trace_stack_ops;
188 register_ftrace_function(&trace_stack_ops); 252 register_ftrace_function(tr->ops);
189 } else { 253 } else {
190 unregister_ftrace_function(&trace_stack_ops); 254 tr->ops = &trace_ops;
191 register_ftrace_function(&trace_ops); 255 register_ftrace_function(tr->ops);
192 } 256 }
193 257
194 break; 258 break;
@@ -208,6 +272,7 @@ static struct tracer function_trace __tracer_data =
208 .wait_pipe = poll_wait_pipe, 272 .wait_pipe = poll_wait_pipe,
209 .flags = &func_flags, 273 .flags = &func_flags,
210 .set_flag = func_set_flag, 274 .set_flag = func_set_flag,
275 .allow_instances = true,
211#ifdef CONFIG_FTRACE_SELFTEST 276#ifdef CONFIG_FTRACE_SELFTEST
212 .selftest = trace_selftest_startup_function, 277 .selftest = trace_selftest_startup_function,
213#endif 278#endif
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 0b99120d395c..deff11200261 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1476,7 +1476,8 @@ void graph_trace_close(struct trace_iterator *iter)
1476 } 1476 }
1477} 1477}
1478 1478
1479static int func_graph_set_flag(u32 old_flags, u32 bit, int set) 1479static int
1480func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1480{ 1481{
1481 if (bit == TRACE_GRAPH_PRINT_IRQS) 1482 if (bit == TRACE_GRAPH_PRINT_IRQS)
1482 ftrace_graph_skip_irqs = !set; 1483 ftrace_graph_skip_irqs = !set;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 887ef88b0bc7..8ff02cbb892f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -160,7 +160,8 @@ static struct ftrace_ops trace_ops __read_mostly =
160#endif /* CONFIG_FUNCTION_TRACER */ 160#endif /* CONFIG_FUNCTION_TRACER */
161 161
162#ifdef CONFIG_FUNCTION_GRAPH_TRACER 162#ifdef CONFIG_FUNCTION_GRAPH_TRACER
163static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) 163static int
164irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
164{ 165{
165 int cpu; 166 int cpu;
166 167
@@ -266,7 +267,8 @@ __trace_function(struct trace_array *tr,
266#else 267#else
267#define __trace_function trace_function 268#define __trace_function trace_function
268 269
269static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) 270static int
271irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
270{ 272{
271 return -EINVAL; 273 return -EINVAL;
272} 274}
@@ -570,8 +572,10 @@ static void irqsoff_function_set(int set)
570 unregister_irqsoff_function(is_graph()); 572 unregister_irqsoff_function(is_graph());
571} 573}
572 574
573static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set) 575static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
574{ 576{
577 struct tracer *tracer = tr->current_trace;
578
575 if (mask & TRACE_ITER_FUNCTION) 579 if (mask & TRACE_ITER_FUNCTION)
576 irqsoff_function_set(set); 580 irqsoff_function_set(set);
577 581
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index bdbae450c13e..903ae28962be 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -35,11 +35,6 @@ struct trace_kprobe {
35 struct trace_probe tp; 35 struct trace_probe tp;
36}; 36};
37 37
38struct event_file_link {
39 struct ftrace_event_file *file;
40 struct list_head list;
41};
42
43#define SIZEOF_TRACE_KPROBE(n) \ 38#define SIZEOF_TRACE_KPROBE(n) \
44 (offsetof(struct trace_kprobe, tp.args) + \ 39 (offsetof(struct trace_kprobe, tp.args) + \
45 (sizeof(struct probe_arg) * (n))) 40 (sizeof(struct probe_arg) * (n)))
@@ -346,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
346 struct trace_kprobe *tk; 341 struct trace_kprobe *tk;
347 342
348 list_for_each_entry(tk, &probe_list, list) 343 list_for_each_entry(tk, &probe_list, list)
349 if (strcmp(tk->tp.call.name, event) == 0 && 344 if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 &&
350 strcmp(tk->tp.call.class->system, group) == 0) 345 strcmp(tk->tp.call.class->system, group) == 0)
351 return tk; 346 return tk;
352 return NULL; 347 return NULL;
@@ -387,18 +382,6 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
387 return ret; 382 return ret;
388} 383}
389 384
390static struct event_file_link *
391find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
392{
393 struct event_file_link *link;
394
395 list_for_each_entry(link, &tp->files, list)
396 if (link->file == file)
397 return link;
398
399 return NULL;
400}
401
402/* 385/*
403 * Disable trace_probe 386 * Disable trace_probe
404 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 387 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
@@ -533,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
533 mutex_lock(&probe_lock); 516 mutex_lock(&probe_lock);
534 517
535 /* Delete old (same name) event if exist */ 518 /* Delete old (same name) event if exist */
536 old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); 519 old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call),
520 tk->tp.call.class->system);
537 if (old_tk) { 521 if (old_tk) {
538 ret = unregister_trace_kprobe(old_tk); 522 ret = unregister_trace_kprobe(old_tk);
539 if (ret < 0) 523 if (ret < 0)
@@ -581,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
581 if (ret) 565 if (ret)
582 pr_warning("Failed to re-register probe %s on" 566 pr_warning("Failed to re-register probe %s on"
583 "%s: %d\n", 567 "%s: %d\n",
584 tk->tp.call.name, mod->name, ret); 568 ftrace_event_name(&tk->tp.call),
569 mod->name, ret);
585 } 570 }
586 } 571 }
587 mutex_unlock(&probe_lock); 572 mutex_unlock(&probe_lock);
@@ -835,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
835 int i; 820 int i;
836 821
837 seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); 822 seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
838 seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); 823 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
824 ftrace_event_name(&tk->tp.call));
839 825
840 if (!tk->symbol) 826 if (!tk->symbol)
841 seq_printf(m, " 0x%p", tk->rp.kp.addr); 827 seq_printf(m, " 0x%p", tk->rp.kp.addr);
@@ -893,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
893{ 879{
894 struct trace_kprobe *tk = v; 880 struct trace_kprobe *tk = v;
895 881
896 seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, 882 seq_printf(m, " %-44s %15lu %15lu\n",
883 ftrace_event_name(&tk->tp.call), tk->nhit,
897 tk->rp.kp.nmissed); 884 tk->rp.kp.nmissed);
898 885
899 return 0; 886 return 0;
@@ -1028,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
1028 field = (struct kprobe_trace_entry_head *)iter->ent; 1015 field = (struct kprobe_trace_entry_head *)iter->ent;
1029 tp = container_of(event, struct trace_probe, call.event); 1016 tp = container_of(event, struct trace_probe, call.event);
1030 1017
1031 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1018 if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
1032 goto partial; 1019 goto partial;
1033 1020
1034 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1021 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1064,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
1064 field = (struct kretprobe_trace_entry_head *)iter->ent; 1051 field = (struct kretprobe_trace_entry_head *)iter->ent;
1065 tp = container_of(event, struct trace_probe, call.event); 1052 tp = container_of(event, struct trace_probe, call.event);
1066 1053
1067 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1054 if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
1068 goto partial; 1055 goto partial;
1069 1056
1070 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1057 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1303,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk)
1303 call->data = tk; 1290 call->data = tk;
1304 ret = trace_add_event_call(call); 1291 ret = trace_add_event_call(call);
1305 if (ret) { 1292 if (ret) {
1306 pr_info("Failed to register kprobe event: %s\n", call->name); 1293 pr_info("Failed to register kprobe event: %s\n",
1294 ftrace_event_name(call));
1307 kfree(call->print_fmt); 1295 kfree(call->print_fmt);
1308 unregister_ftrace_event(&call->event); 1296 unregister_ftrace_event(&call->event);
1309 } 1297 }
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index 394f94417e2f..69a5cc94c01a 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -62,7 +62,7 @@ static void nop_trace_reset(struct trace_array *tr)
62 * If you don't implement it, then the flag setting will be 62 * If you don't implement it, then the flag setting will be
63 * automatically accepted. 63 * automatically accepted.
64 */ 64 */
65static int nop_set_flag(u32 old_flags, u32 bit, int set) 65static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
66{ 66{
67 /* 67 /*
68 * Note that you don't need to update nop_flags.val yourself. 68 * Note that you don't need to update nop_flags.val yourself.
@@ -96,6 +96,7 @@ struct tracer nop_trace __read_mostly =
96 .selftest = trace_selftest_startup_nop, 96 .selftest = trace_selftest_startup_nop,
97#endif 97#endif
98 .flags = &nop_flags, 98 .flags = &nop_flags,
99 .set_flag = nop_set_flag 99 .set_flag = nop_set_flag,
100 .allow_instances = true,
100}; 101};
101 102
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ed32284fbe32..a436de18aa99 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter,
431 } 431 }
432 432
433 trace_seq_init(p); 433 trace_seq_init(p);
434 ret = trace_seq_printf(s, "%s: ", event->name); 434 ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event));
435 if (!ret) 435 if (!ret)
436 return TRACE_TYPE_PARTIAL_LINE; 436 return TRACE_TYPE_PARTIAL_LINE;
437 437
@@ -439,6 +439,37 @@ int ftrace_raw_output_prep(struct trace_iterator *iter,
439} 439}
440EXPORT_SYMBOL(ftrace_raw_output_prep); 440EXPORT_SYMBOL(ftrace_raw_output_prep);
441 441
442static int ftrace_output_raw(struct trace_iterator *iter, char *name,
443 char *fmt, va_list ap)
444{
445 struct trace_seq *s = &iter->seq;
446 int ret;
447
448 ret = trace_seq_printf(s, "%s: ", name);
449 if (!ret)
450 return TRACE_TYPE_PARTIAL_LINE;
451
452 ret = trace_seq_vprintf(s, fmt, ap);
453
454 if (!ret)
455 return TRACE_TYPE_PARTIAL_LINE;
456
457 return TRACE_TYPE_HANDLED;
458}
459
460int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
461{
462 va_list ap;
463 int ret;
464
465 va_start(ap, fmt);
466 ret = ftrace_output_raw(iter, name, fmt, ap);
467 va_end(ap);
468
469 return ret;
470}
471EXPORT_SYMBOL_GPL(ftrace_output_call);
472
442#ifdef CONFIG_KRETPROBES 473#ifdef CONFIG_KRETPROBES
443static inline const char *kretprobed(const char *name) 474static inline const char *kretprobed(const char *name)
444{ 475{
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index b73574a5f429..fb1ab5dfbd42 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -288,6 +288,11 @@ struct trace_probe {
288 struct probe_arg args[]; 288 struct probe_arg args[];
289}; 289};
290 290
291struct event_file_link {
292 struct ftrace_event_file *file;
293 struct list_head list;
294};
295
291static inline bool trace_probe_is_enabled(struct trace_probe *tp) 296static inline bool trace_probe_is_enabled(struct trace_probe *tp)
292{ 297{
293 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); 298 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
@@ -316,6 +321,18 @@ static inline int is_good_name(const char *name)
316 return 1; 321 return 1;
317} 322}
318 323
324static inline struct event_file_link *
325find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
326{
327 struct event_file_link *link;
328
329 list_for_each_entry(link, &tp->files, list)
330 if (link->file == file)
331 return link;
332
333 return NULL;
334}
335
319extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, 336extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
320 struct probe_arg *parg, bool is_return, bool is_kprobe); 337 struct probe_arg *parg, bool is_return, bool is_kprobe);
321 338
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 6e32635e5e57..e14da5e97a69 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -179,8 +179,10 @@ static void wakeup_function_set(int set)
179 unregister_wakeup_function(is_graph()); 179 unregister_wakeup_function(is_graph());
180} 180}
181 181
182static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) 182static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
183{ 183{
184 struct tracer *tracer = tr->current_trace;
185
184 if (mask & TRACE_ITER_FUNCTION) 186 if (mask & TRACE_ITER_FUNCTION)
185 wakeup_function_set(set); 187 wakeup_function_set(set);
186 188
@@ -209,7 +211,8 @@ static void stop_func_tracer(int graph)
209} 211}
210 212
211#ifdef CONFIG_FUNCTION_GRAPH_TRACER 213#ifdef CONFIG_FUNCTION_GRAPH_TRACER
212static int wakeup_set_flag(u32 old_flags, u32 bit, int set) 214static int
215wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
213{ 216{
214 217
215 if (!(bit & TRACE_DISPLAY_GRAPH)) 218 if (!(bit & TRACE_DISPLAY_GRAPH))
@@ -311,7 +314,8 @@ __trace_function(struct trace_array *tr,
311#else 314#else
312#define __trace_function trace_function 315#define __trace_function trace_function
313 316
314static int wakeup_set_flag(u32 old_flags, u32 bit, int set) 317static int
318wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
315{ 319{
316 return -EINVAL; 320 return -EINVAL;
317} 321}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index e6be585cf06a..21b320e5d163 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -13,6 +13,7 @@
13#include <linux/sysctl.h> 13#include <linux/sysctl.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/magic.h>
16 17
17#include <asm/setup.h> 18#include <asm/setup.h>
18 19
@@ -144,6 +145,8 @@ check_stack(unsigned long ip, unsigned long *stack)
144 i++; 145 i++;
145 } 146 }
146 147
148 BUG_ON(current != &init_task &&
149 *(end_of_stack(current)) != STACK_END_MAGIC);
147 out: 150 out:
148 arch_spin_unlock(&max_stack_lock); 151 arch_spin_unlock(&max_stack_lock);
149 local_irq_restore(flags); 152 local_irq_restore(flags);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 79e52d93860b..c082a7441345 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -260,6 +260,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
260 goto error; 260 goto error;
261 261
262 INIT_LIST_HEAD(&tu->list); 262 INIT_LIST_HEAD(&tu->list);
263 INIT_LIST_HEAD(&tu->tp.files);
263 tu->consumer.handler = uprobe_dispatcher; 264 tu->consumer.handler = uprobe_dispatcher;
264 if (is_ret) 265 if (is_ret)
265 tu->consumer.ret_handler = uretprobe_dispatcher; 266 tu->consumer.ret_handler = uretprobe_dispatcher;
@@ -293,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
293 struct trace_uprobe *tu; 294 struct trace_uprobe *tu;
294 295
295 list_for_each_entry(tu, &uprobe_list, list) 296 list_for_each_entry(tu, &uprobe_list, list)
296 if (strcmp(tu->tp.call.name, event) == 0 && 297 if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 &&
297 strcmp(tu->tp.call.class->system, group) == 0) 298 strcmp(tu->tp.call.class->system, group) == 0)
298 return tu; 299 return tu;
299 300
@@ -323,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
323 mutex_lock(&uprobe_lock); 324 mutex_lock(&uprobe_lock);
324 325
325 /* register as an event */ 326 /* register as an event */
326 old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); 327 old_tu = find_probe_event(ftrace_event_name(&tu->tp.call),
328 tu->tp.call.class->system);
327 if (old_tu) { 329 if (old_tu) {
328 /* delete old event */ 330 /* delete old event */
329 ret = unregister_trace_uprobe(old_tu); 331 ret = unregister_trace_uprobe(old_tu);
@@ -598,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
598 char c = is_ret_probe(tu) ? 'r' : 'p'; 600 char c = is_ret_probe(tu) ? 'r' : 'p';
599 int i; 601 int i;
600 602
601 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); 603 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
604 ftrace_event_name(&tu->tp.call));
602 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); 605 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
603 606
604 for (i = 0; i < tu->tp.nr_args; i++) 607 for (i = 0; i < tu->tp.nr_args; i++)
@@ -648,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
648{ 651{
649 struct trace_uprobe *tu = v; 652 struct trace_uprobe *tu = v;
650 653
651 seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); 654 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
655 ftrace_event_name(&tu->tp.call), tu->nhit);
652 return 0; 656 return 0;
653} 657}
654 658
@@ -728,9 +732,15 @@ static int uprobe_buffer_enable(void)
728 732
729static void uprobe_buffer_disable(void) 733static void uprobe_buffer_disable(void)
730{ 734{
735 int cpu;
736
731 BUG_ON(!mutex_is_locked(&event_mutex)); 737 BUG_ON(!mutex_is_locked(&event_mutex));
732 738
733 if (--uprobe_buffer_refcnt == 0) { 739 if (--uprobe_buffer_refcnt == 0) {
740 for_each_possible_cpu(cpu)
741 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
742 cpu)->buf);
743
734 free_percpu(uprobe_cpu_buffer); 744 free_percpu(uprobe_cpu_buffer);
735 uprobe_cpu_buffer = NULL; 745 uprobe_cpu_buffer = NULL;
736 } 746 }
@@ -758,31 +768,32 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
758 mutex_unlock(&ucb->mutex); 768 mutex_unlock(&ucb->mutex);
759} 769}
760 770
761static void uprobe_trace_print(struct trace_uprobe *tu, 771static void __uprobe_trace_func(struct trace_uprobe *tu,
762 unsigned long func, struct pt_regs *regs) 772 unsigned long func, struct pt_regs *regs,
773 struct uprobe_cpu_buffer *ucb, int dsize,
774 struct ftrace_event_file *ftrace_file)
763{ 775{
764 struct uprobe_trace_entry_head *entry; 776 struct uprobe_trace_entry_head *entry;
765 struct ring_buffer_event *event; 777 struct ring_buffer_event *event;
766 struct ring_buffer *buffer; 778 struct ring_buffer *buffer;
767 struct uprobe_cpu_buffer *ucb;
768 void *data; 779 void *data;
769 int size, dsize, esize; 780 int size, esize;
770 struct ftrace_event_call *call = &tu->tp.call; 781 struct ftrace_event_call *call = &tu->tp.call;
771 782
772 dsize = __get_data_size(&tu->tp, regs); 783 WARN_ON(call != ftrace_file->event_call);
773 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
774 784
775 if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) 785 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
776 return; 786 return;
777 787
778 ucb = uprobe_buffer_get(); 788 if (ftrace_trigger_soft_disabled(ftrace_file))
779 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 789 return;
780 790
791 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
781 size = esize + tu->tp.size + dsize; 792 size = esize + tu->tp.size + dsize;
782 event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 793 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
783 size, 0, 0); 794 call->event.type, size, 0, 0);
784 if (!event) 795 if (!event)
785 goto out; 796 return;
786 797
787 entry = ring_buffer_event_data(event); 798 entry = ring_buffer_event_data(event);
788 if (is_ret_probe(tu)) { 799 if (is_ret_probe(tu)) {
@@ -796,25 +807,36 @@ static void uprobe_trace_print(struct trace_uprobe *tu,
796 807
797 memcpy(data, ucb->buf, tu->tp.size + dsize); 808 memcpy(data, ucb->buf, tu->tp.size + dsize);
798 809
799 if (!call_filter_check_discard(call, entry, buffer, event)) 810 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0);
800 trace_buffer_unlock_commit(buffer, event, 0, 0);
801
802out:
803 uprobe_buffer_put(ucb);
804} 811}
805 812
806/* uprobe handler */ 813/* uprobe handler */
807static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) 814static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
815 struct uprobe_cpu_buffer *ucb, int dsize)
808{ 816{
809 if (!is_ret_probe(tu)) 817 struct event_file_link *link;
810 uprobe_trace_print(tu, 0, regs); 818
819 if (is_ret_probe(tu))
820 return 0;
821
822 rcu_read_lock();
823 list_for_each_entry_rcu(link, &tu->tp.files, list)
824 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
825 rcu_read_unlock();
826
811 return 0; 827 return 0;
812} 828}
813 829
814static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, 830static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
815 struct pt_regs *regs) 831 struct pt_regs *regs,
832 struct uprobe_cpu_buffer *ucb, int dsize)
816{ 833{
817 uprobe_trace_print(tu, func, regs); 834 struct event_file_link *link;
835
836 rcu_read_lock();
837 list_for_each_entry_rcu(link, &tu->tp.files, list)
838 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
839 rcu_read_unlock();
818} 840}
819 841
820/* Event entry printers */ 842/* Event entry printers */
@@ -831,12 +853,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
831 tu = container_of(event, struct trace_uprobe, tp.call.event); 853 tu = container_of(event, struct trace_uprobe, tp.call.event);
832 854
833 if (is_ret_probe(tu)) { 855 if (is_ret_probe(tu)) {
834 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, 856 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
857 ftrace_event_name(&tu->tp.call),
835 entry->vaddr[1], entry->vaddr[0])) 858 entry->vaddr[1], entry->vaddr[0]))
836 goto partial; 859 goto partial;
837 data = DATAOF_TRACE_ENTRY(entry, true); 860 data = DATAOF_TRACE_ENTRY(entry, true);
838 } else { 861 } else {
839 if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, 862 if (!trace_seq_printf(s, "%s: (0x%lx)",
863 ftrace_event_name(&tu->tp.call),
840 entry->vaddr[0])) 864 entry->vaddr[0]))
841 goto partial; 865 goto partial;
842 data = DATAOF_TRACE_ENTRY(entry, false); 866 data = DATAOF_TRACE_ENTRY(entry, false);
@@ -861,12 +885,24 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self,
861 struct mm_struct *mm); 885 struct mm_struct *mm);
862 886
863static int 887static int
864probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) 888probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
889 filter_func_t filter)
865{ 890{
866 int ret = 0; 891 bool enabled = trace_probe_is_enabled(&tu->tp);
892 struct event_file_link *link = NULL;
893 int ret;
894
895 if (file) {
896 link = kmalloc(sizeof(*link), GFP_KERNEL);
897 if (!link)
898 return -ENOMEM;
867 899
868 if (trace_probe_is_enabled(&tu->tp)) 900 link->file = file;
869 return -EINTR; 901 list_add_tail_rcu(&link->list, &tu->tp.files);
902
903 tu->tp.flags |= TP_FLAG_TRACE;
904 } else
905 tu->tp.flags |= TP_FLAG_PROFILE;
870 906
871 ret = uprobe_buffer_enable(); 907 ret = uprobe_buffer_enable();
872 if (ret < 0) 908 if (ret < 0)
@@ -874,24 +910,49 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter)
874 910
875 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 911 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
876 912
877 tu->tp.flags |= flag; 913 if (enabled)
914 return 0;
915
878 tu->consumer.filter = filter; 916 tu->consumer.filter = filter;
879 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 917 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
880 if (ret) 918 if (ret) {
881 tu->tp.flags &= ~flag; 919 if (file) {
920 list_del(&link->list);
921 kfree(link);
922 tu->tp.flags &= ~TP_FLAG_TRACE;
923 } else
924 tu->tp.flags &= ~TP_FLAG_PROFILE;
925 }
882 926
883 return ret; 927 return ret;
884} 928}
885 929
886static void probe_event_disable(struct trace_uprobe *tu, int flag) 930static void
931probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file)
887{ 932{
888 if (!trace_probe_is_enabled(&tu->tp)) 933 if (!trace_probe_is_enabled(&tu->tp))
889 return; 934 return;
890 935
936 if (file) {
937 struct event_file_link *link;
938
939 link = find_event_file_link(&tu->tp, file);
940 if (!link)
941 return;
942
943 list_del_rcu(&link->list);
944 /* synchronize with u{,ret}probe_trace_func */
945 synchronize_sched();
946 kfree(link);
947
948 if (!list_empty(&tu->tp.files))
949 return;
950 }
951
891 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 952 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
892 953
893 uprobe_unregister(tu->inode, tu->offset, &tu->consumer); 954 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
894 tu->tp.flags &= ~flag; 955 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
895 956
896 uprobe_buffer_disable(); 957 uprobe_buffer_disable();
897} 958}
@@ -1014,31 +1075,24 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1014 return ret; 1075 return ret;
1015} 1076}
1016 1077
1017static void uprobe_perf_print(struct trace_uprobe *tu, 1078static void __uprobe_perf_func(struct trace_uprobe *tu,
1018 unsigned long func, struct pt_regs *regs) 1079 unsigned long func, struct pt_regs *regs,
1080 struct uprobe_cpu_buffer *ucb, int dsize)
1019{ 1081{
1020 struct ftrace_event_call *call = &tu->tp.call; 1082 struct ftrace_event_call *call = &tu->tp.call;
1021 struct uprobe_trace_entry_head *entry; 1083 struct uprobe_trace_entry_head *entry;
1022 struct hlist_head *head; 1084 struct hlist_head *head;
1023 struct uprobe_cpu_buffer *ucb;
1024 void *data; 1085 void *data;
1025 int size, dsize, esize; 1086 int size, esize;
1026 int rctx; 1087 int rctx;
1027 1088
1028 dsize = __get_data_size(&tu->tp, regs);
1029 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1089 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1030 1090
1031 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1032 return;
1033
1034 size = esize + tu->tp.size + dsize; 1091 size = esize + tu->tp.size + dsize;
1035 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); 1092 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1036 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 1093 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1037 return; 1094 return;
1038 1095
1039 ucb = uprobe_buffer_get();
1040 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1041
1042 preempt_disable(); 1096 preempt_disable();
1043 head = this_cpu_ptr(call->perf_events); 1097 head = this_cpu_ptr(call->perf_events);
1044 if (hlist_empty(head)) 1098 if (hlist_empty(head))
@@ -1068,46 +1122,49 @@ static void uprobe_perf_print(struct trace_uprobe *tu,
1068 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1122 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1069 out: 1123 out:
1070 preempt_enable(); 1124 preempt_enable();
1071 uprobe_buffer_put(ucb);
1072} 1125}
1073 1126
1074/* uprobe profile handler */ 1127/* uprobe profile handler */
1075static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) 1128static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1129 struct uprobe_cpu_buffer *ucb, int dsize)
1076{ 1130{
1077 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) 1131 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1078 return UPROBE_HANDLER_REMOVE; 1132 return UPROBE_HANDLER_REMOVE;
1079 1133
1080 if (!is_ret_probe(tu)) 1134 if (!is_ret_probe(tu))
1081 uprobe_perf_print(tu, 0, regs); 1135 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1082 return 0; 1136 return 0;
1083} 1137}
1084 1138
1085static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, 1139static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1086 struct pt_regs *regs) 1140 struct pt_regs *regs,
1141 struct uprobe_cpu_buffer *ucb, int dsize)
1087{ 1142{
1088 uprobe_perf_print(tu, func, regs); 1143 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1089} 1144}
1090#endif /* CONFIG_PERF_EVENTS */ 1145#endif /* CONFIG_PERF_EVENTS */
1091 1146
1092static 1147static int
1093int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) 1148trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
1149 void *data)
1094{ 1150{
1095 struct trace_uprobe *tu = event->data; 1151 struct trace_uprobe *tu = event->data;
1152 struct ftrace_event_file *file = data;
1096 1153
1097 switch (type) { 1154 switch (type) {
1098 case TRACE_REG_REGISTER: 1155 case TRACE_REG_REGISTER:
1099 return probe_event_enable(tu, TP_FLAG_TRACE, NULL); 1156 return probe_event_enable(tu, file, NULL);
1100 1157
1101 case TRACE_REG_UNREGISTER: 1158 case TRACE_REG_UNREGISTER:
1102 probe_event_disable(tu, TP_FLAG_TRACE); 1159 probe_event_disable(tu, file);
1103 return 0; 1160 return 0;
1104 1161
1105#ifdef CONFIG_PERF_EVENTS 1162#ifdef CONFIG_PERF_EVENTS
1106 case TRACE_REG_PERF_REGISTER: 1163 case TRACE_REG_PERF_REGISTER:
1107 return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); 1164 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1108 1165
1109 case TRACE_REG_PERF_UNREGISTER: 1166 case TRACE_REG_PERF_UNREGISTER:
1110 probe_event_disable(tu, TP_FLAG_PROFILE); 1167 probe_event_disable(tu, NULL);
1111 return 0; 1168 return 0;
1112 1169
1113 case TRACE_REG_PERF_OPEN: 1170 case TRACE_REG_PERF_OPEN:
@@ -1127,8 +1184,11 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1127{ 1184{
1128 struct trace_uprobe *tu; 1185 struct trace_uprobe *tu;
1129 struct uprobe_dispatch_data udd; 1186 struct uprobe_dispatch_data udd;
1187 struct uprobe_cpu_buffer *ucb;
1188 int dsize, esize;
1130 int ret = 0; 1189 int ret = 0;
1131 1190
1191
1132 tu = container_of(con, struct trace_uprobe, consumer); 1192 tu = container_of(con, struct trace_uprobe, consumer);
1133 tu->nhit++; 1193 tu->nhit++;
1134 1194
@@ -1137,13 +1197,29 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1137 1197
1138 current->utask->vaddr = (unsigned long) &udd; 1198 current->utask->vaddr = (unsigned long) &udd;
1139 1199
1200#ifdef CONFIG_PERF_EVENTS
1201 if ((tu->tp.flags & TP_FLAG_TRACE) == 0 &&
1202 !uprobe_perf_filter(&tu->consumer, 0, current->mm))
1203 return UPROBE_HANDLER_REMOVE;
1204#endif
1205
1206 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1207 return 0;
1208
1209 dsize = __get_data_size(&tu->tp, regs);
1210 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1211
1212 ucb = uprobe_buffer_get();
1213 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1214
1140 if (tu->tp.flags & TP_FLAG_TRACE) 1215 if (tu->tp.flags & TP_FLAG_TRACE)
1141 ret |= uprobe_trace_func(tu, regs); 1216 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1142 1217
1143#ifdef CONFIG_PERF_EVENTS 1218#ifdef CONFIG_PERF_EVENTS
1144 if (tu->tp.flags & TP_FLAG_PROFILE) 1219 if (tu->tp.flags & TP_FLAG_PROFILE)
1145 ret |= uprobe_perf_func(tu, regs); 1220 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1146#endif 1221#endif
1222 uprobe_buffer_put(ucb);
1147 return ret; 1223 return ret;
1148} 1224}
1149 1225
@@ -1152,6 +1228,8 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
1152{ 1228{
1153 struct trace_uprobe *tu; 1229 struct trace_uprobe *tu;
1154 struct uprobe_dispatch_data udd; 1230 struct uprobe_dispatch_data udd;
1231 struct uprobe_cpu_buffer *ucb;
1232 int dsize, esize;
1155 1233
1156 tu = container_of(con, struct trace_uprobe, consumer); 1234 tu = container_of(con, struct trace_uprobe, consumer);
1157 1235
@@ -1160,13 +1238,23 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
1160 1238
1161 current->utask->vaddr = (unsigned long) &udd; 1239 current->utask->vaddr = (unsigned long) &udd;
1162 1240
1241 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1242 return 0;
1243
1244 dsize = __get_data_size(&tu->tp, regs);
1245 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1246
1247 ucb = uprobe_buffer_get();
1248 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1249
1163 if (tu->tp.flags & TP_FLAG_TRACE) 1250 if (tu->tp.flags & TP_FLAG_TRACE)
1164 uretprobe_trace_func(tu, func, regs); 1251 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1165 1252
1166#ifdef CONFIG_PERF_EVENTS 1253#ifdef CONFIG_PERF_EVENTS
1167 if (tu->tp.flags & TP_FLAG_PROFILE) 1254 if (tu->tp.flags & TP_FLAG_PROFILE)
1168 uretprobe_perf_func(tu, func, regs); 1255 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1169#endif 1256#endif
1257 uprobe_buffer_put(ucb);
1170 return 0; 1258 return 0;
1171} 1259}
1172 1260
@@ -1198,7 +1286,8 @@ static int register_uprobe_event(struct trace_uprobe *tu)
1198 ret = trace_add_event_call(call); 1286 ret = trace_add_event_call(call);
1199 1287
1200 if (ret) { 1288 if (ret) {
1201 pr_info("Failed to register uprobe event: %s\n", call->name); 1289 pr_info("Failed to register uprobe event: %s\n",
1290 ftrace_event_name(call));
1202 kfree(call->print_fmt); 1291 kfree(call->print_fmt);
1203 unregister_ftrace_event(&call->event); 1292 unregister_ftrace_event(&call->event);
1204 } 1293 }