diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/blktrace.c | 3 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 162 | ||||
-rw-r--r-- | kernel/trace/trace.c | 187 | ||||
-rw-r--r-- | kernel/trace/trace.h | 38 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 30 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 143 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 17 | ||||
-rw-r--r-- | kernel/trace/trace_nop.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 31 | ||||
-rw-r--r-- | kernel/trace/trace_probe.h | 17 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 191 |
15 files changed, 604 insertions, 246 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 4f3a3c03eadb..c1bd4ada2a04 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -1429,7 +1429,8 @@ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) | |||
1429 | return print_one_line(iter, true); | 1429 | return print_one_line(iter, true); |
1430 | } | 1430 | } |
1431 | 1431 | ||
1432 | static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) | 1432 | static int |
1433 | blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
1433 | { | 1434 | { |
1434 | /* don't output context-info for blk_classic output */ | 1435 | /* don't output context-info for blk_classic output */ |
1435 | if (bit == TRACE_BLK_OPT_CLASSIC) { | 1436 | if (bit == TRACE_BLK_OPT_CLASSIC) { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index cd7f76d1eb86..1fd4b9479210 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -237,14 +237,13 @@ static int control_ops_alloc(struct ftrace_ops *ops) | |||
237 | return 0; | 237 | return 0; |
238 | } | 238 | } |
239 | 239 | ||
240 | static void control_ops_free(struct ftrace_ops *ops) | ||
241 | { | ||
242 | free_percpu(ops->disabled); | ||
243 | } | ||
244 | |||
245 | static void update_global_ops(void) | 240 | static void update_global_ops(void) |
246 | { | 241 | { |
247 | ftrace_func_t func; | 242 | ftrace_func_t func = ftrace_global_list_func; |
243 | void *private = NULL; | ||
244 | |||
245 | /* The list has its own recursion protection. */ | ||
246 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
248 | 247 | ||
249 | /* | 248 | /* |
250 | * If there's only one function registered, then call that | 249 | * If there's only one function registered, then call that |
@@ -254,23 +253,17 @@ static void update_global_ops(void) | |||
254 | if (ftrace_global_list == &ftrace_list_end || | 253 | if (ftrace_global_list == &ftrace_list_end || |
255 | ftrace_global_list->next == &ftrace_list_end) { | 254 | ftrace_global_list->next == &ftrace_list_end) { |
256 | func = ftrace_global_list->func; | 255 | func = ftrace_global_list->func; |
256 | private = ftrace_global_list->private; | ||
257 | /* | 257 | /* |
258 | * As we are calling the function directly. | 258 | * As we are calling the function directly. |
259 | * If it does not have recursion protection, | 259 | * If it does not have recursion protection, |
260 | * the function_trace_op needs to be updated | 260 | * the function_trace_op needs to be updated |
261 | * accordingly. | 261 | * accordingly. |
262 | */ | 262 | */ |
263 | if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) | 263 | if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)) |
264 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
265 | else | ||
266 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; | 264 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; |
267 | } else { | ||
268 | func = ftrace_global_list_func; | ||
269 | /* The list has its own recursion protection. */ | ||
270 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
271 | } | 265 | } |
272 | 266 | ||
273 | |||
274 | /* If we filter on pids, update to use the pid function */ | 267 | /* If we filter on pids, update to use the pid function */ |
275 | if (!list_empty(&ftrace_pids)) { | 268 | if (!list_empty(&ftrace_pids)) { |
276 | set_ftrace_pid_function(func); | 269 | set_ftrace_pid_function(func); |
@@ -278,6 +271,7 @@ static void update_global_ops(void) | |||
278 | } | 271 | } |
279 | 272 | ||
280 | global_ops.func = func; | 273 | global_ops.func = func; |
274 | global_ops.private = private; | ||
281 | } | 275 | } |
282 | 276 | ||
283 | static void ftrace_sync(struct work_struct *work) | 277 | static void ftrace_sync(struct work_struct *work) |
@@ -437,6 +431,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, | |||
437 | 431 | ||
438 | static int __register_ftrace_function(struct ftrace_ops *ops) | 432 | static int __register_ftrace_function(struct ftrace_ops *ops) |
439 | { | 433 | { |
434 | if (ops->flags & FTRACE_OPS_FL_DELETED) | ||
435 | return -EINVAL; | ||
436 | |||
440 | if (FTRACE_WARN_ON(ops == &global_ops)) | 437 | if (FTRACE_WARN_ON(ops == &global_ops)) |
441 | return -EINVAL; | 438 | return -EINVAL; |
442 | 439 | ||
@@ -1172,8 +1169,6 @@ struct ftrace_page { | |||
1172 | int size; | 1169 | int size; |
1173 | }; | 1170 | }; |
1174 | 1171 | ||
1175 | static struct ftrace_page *ftrace_new_pgs; | ||
1176 | |||
1177 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) | 1172 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
1178 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) | 1173 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
1179 | 1174 | ||
@@ -1560,7 +1555,7 @@ unsigned long ftrace_location(unsigned long ip) | |||
1560 | * the function tracer. It checks the ftrace internal tables to | 1555 | * the function tracer. It checks the ftrace internal tables to |
1561 | * determine if the address belongs or not. | 1556 | * determine if the address belongs or not. |
1562 | */ | 1557 | */ |
1563 | int ftrace_text_reserved(void *start, void *end) | 1558 | int ftrace_text_reserved(const void *start, const void *end) |
1564 | { | 1559 | { |
1565 | unsigned long ret; | 1560 | unsigned long ret; |
1566 | 1561 | ||
@@ -1994,6 +1989,7 @@ int __weak ftrace_arch_code_modify_post_process(void) | |||
1994 | void ftrace_modify_all_code(int command) | 1989 | void ftrace_modify_all_code(int command) |
1995 | { | 1990 | { |
1996 | int update = command & FTRACE_UPDATE_TRACE_FUNC; | 1991 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
1992 | int err = 0; | ||
1997 | 1993 | ||
1998 | /* | 1994 | /* |
1999 | * If the ftrace_caller calls a ftrace_ops func directly, | 1995 | * If the ftrace_caller calls a ftrace_ops func directly, |
@@ -2005,8 +2001,11 @@ void ftrace_modify_all_code(int command) | |||
2005 | * to make sure the ops are having the right functions | 2001 | * to make sure the ops are having the right functions |
2006 | * traced. | 2002 | * traced. |
2007 | */ | 2003 | */ |
2008 | if (update) | 2004 | if (update) { |
2009 | ftrace_update_ftrace_func(ftrace_ops_list_func); | 2005 | err = ftrace_update_ftrace_func(ftrace_ops_list_func); |
2006 | if (FTRACE_WARN_ON(err)) | ||
2007 | return; | ||
2008 | } | ||
2010 | 2009 | ||
2011 | if (command & FTRACE_UPDATE_CALLS) | 2010 | if (command & FTRACE_UPDATE_CALLS) |
2012 | ftrace_replace_code(1); | 2011 | ftrace_replace_code(1); |
@@ -2019,13 +2018,16 @@ void ftrace_modify_all_code(int command) | |||
2019 | /* If irqs are disabled, we are in stop machine */ | 2018 | /* If irqs are disabled, we are in stop machine */ |
2020 | if (!irqs_disabled()) | 2019 | if (!irqs_disabled()) |
2021 | smp_call_function(ftrace_sync_ipi, NULL, 1); | 2020 | smp_call_function(ftrace_sync_ipi, NULL, 1); |
2022 | ftrace_update_ftrace_func(ftrace_trace_function); | 2021 | err = ftrace_update_ftrace_func(ftrace_trace_function); |
2022 | if (FTRACE_WARN_ON(err)) | ||
2023 | return; | ||
2023 | } | 2024 | } |
2024 | 2025 | ||
2025 | if (command & FTRACE_START_FUNC_RET) | 2026 | if (command & FTRACE_START_FUNC_RET) |
2026 | ftrace_enable_ftrace_graph_caller(); | 2027 | err = ftrace_enable_ftrace_graph_caller(); |
2027 | else if (command & FTRACE_STOP_FUNC_RET) | 2028 | else if (command & FTRACE_STOP_FUNC_RET) |
2028 | ftrace_disable_ftrace_graph_caller(); | 2029 | err = ftrace_disable_ftrace_graph_caller(); |
2030 | FTRACE_WARN_ON(err); | ||
2029 | } | 2031 | } |
2030 | 2032 | ||
2031 | static int __ftrace_modify_code(void *data) | 2033 | static int __ftrace_modify_code(void *data) |
@@ -2093,6 +2095,11 @@ static ftrace_func_t saved_ftrace_func; | |||
2093 | static int ftrace_start_up; | 2095 | static int ftrace_start_up; |
2094 | static int global_start_up; | 2096 | static int global_start_up; |
2095 | 2097 | ||
2098 | static void control_ops_free(struct ftrace_ops *ops) | ||
2099 | { | ||
2100 | free_percpu(ops->disabled); | ||
2101 | } | ||
2102 | |||
2096 | static void ftrace_startup_enable(int command) | 2103 | static void ftrace_startup_enable(int command) |
2097 | { | 2104 | { |
2098 | if (saved_ftrace_func != ftrace_trace_function) { | 2105 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -2244,7 +2251,6 @@ static void ftrace_shutdown_sysctl(void) | |||
2244 | } | 2251 | } |
2245 | 2252 | ||
2246 | static cycle_t ftrace_update_time; | 2253 | static cycle_t ftrace_update_time; |
2247 | static unsigned long ftrace_update_cnt; | ||
2248 | unsigned long ftrace_update_tot_cnt; | 2254 | unsigned long ftrace_update_tot_cnt; |
2249 | 2255 | ||
2250 | static inline int ops_traces_mod(struct ftrace_ops *ops) | 2256 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
@@ -2300,11 +2306,12 @@ static int referenced_filters(struct dyn_ftrace *rec) | |||
2300 | return cnt; | 2306 | return cnt; |
2301 | } | 2307 | } |
2302 | 2308 | ||
2303 | static int ftrace_update_code(struct module *mod) | 2309 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
2304 | { | 2310 | { |
2305 | struct ftrace_page *pg; | 2311 | struct ftrace_page *pg; |
2306 | struct dyn_ftrace *p; | 2312 | struct dyn_ftrace *p; |
2307 | cycle_t start, stop; | 2313 | cycle_t start, stop; |
2314 | unsigned long update_cnt = 0; | ||
2308 | unsigned long ref = 0; | 2315 | unsigned long ref = 0; |
2309 | bool test = false; | 2316 | bool test = false; |
2310 | int i; | 2317 | int i; |
@@ -2330,9 +2337,8 @@ static int ftrace_update_code(struct module *mod) | |||
2330 | } | 2337 | } |
2331 | 2338 | ||
2332 | start = ftrace_now(raw_smp_processor_id()); | 2339 | start = ftrace_now(raw_smp_processor_id()); |
2333 | ftrace_update_cnt = 0; | ||
2334 | 2340 | ||
2335 | for (pg = ftrace_new_pgs; pg; pg = pg->next) { | 2341 | for (pg = new_pgs; pg; pg = pg->next) { |
2336 | 2342 | ||
2337 | for (i = 0; i < pg->index; i++) { | 2343 | for (i = 0; i < pg->index; i++) { |
2338 | int cnt = ref; | 2344 | int cnt = ref; |
@@ -2353,7 +2359,7 @@ static int ftrace_update_code(struct module *mod) | |||
2353 | if (!ftrace_code_disable(mod, p)) | 2359 | if (!ftrace_code_disable(mod, p)) |
2354 | break; | 2360 | break; |
2355 | 2361 | ||
2356 | ftrace_update_cnt++; | 2362 | update_cnt++; |
2357 | 2363 | ||
2358 | /* | 2364 | /* |
2359 | * If the tracing is enabled, go ahead and enable the record. | 2365 | * If the tracing is enabled, go ahead and enable the record. |
@@ -2372,11 +2378,9 @@ static int ftrace_update_code(struct module *mod) | |||
2372 | } | 2378 | } |
2373 | } | 2379 | } |
2374 | 2380 | ||
2375 | ftrace_new_pgs = NULL; | ||
2376 | |||
2377 | stop = ftrace_now(raw_smp_processor_id()); | 2381 | stop = ftrace_now(raw_smp_processor_id()); |
2378 | ftrace_update_time = stop - start; | 2382 | ftrace_update_time = stop - start; |
2379 | ftrace_update_tot_cnt += ftrace_update_cnt; | 2383 | ftrace_update_tot_cnt += update_cnt; |
2380 | 2384 | ||
2381 | return 0; | 2385 | return 0; |
2382 | } | 2386 | } |
@@ -2468,22 +2472,6 @@ ftrace_allocate_pages(unsigned long num_to_init) | |||
2468 | return NULL; | 2472 | return NULL; |
2469 | } | 2473 | } |
2470 | 2474 | ||
2471 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | ||
2472 | { | ||
2473 | int cnt; | ||
2474 | |||
2475 | if (!num_to_init) { | ||
2476 | pr_info("ftrace: No functions to be traced?\n"); | ||
2477 | return -1; | ||
2478 | } | ||
2479 | |||
2480 | cnt = num_to_init / ENTRIES_PER_PAGE; | ||
2481 | pr_info("ftrace: allocating %ld entries in %d pages\n", | ||
2482 | num_to_init, cnt + 1); | ||
2483 | |||
2484 | return 0; | ||
2485 | } | ||
2486 | |||
2487 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 2475 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
2488 | 2476 | ||
2489 | struct ftrace_iterator { | 2477 | struct ftrace_iterator { |
@@ -2871,7 +2859,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
2871 | static int | 2859 | static int |
2872 | ftrace_filter_open(struct inode *inode, struct file *file) | 2860 | ftrace_filter_open(struct inode *inode, struct file *file) |
2873 | { | 2861 | { |
2874 | return ftrace_regex_open(&global_ops, | 2862 | struct ftrace_ops *ops = inode->i_private; |
2863 | |||
2864 | return ftrace_regex_open(ops, | ||
2875 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, | 2865 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, |
2876 | inode, file); | 2866 | inode, file); |
2877 | } | 2867 | } |
@@ -2879,7 +2869,9 @@ ftrace_filter_open(struct inode *inode, struct file *file) | |||
2879 | static int | 2869 | static int |
2880 | ftrace_notrace_open(struct inode *inode, struct file *file) | 2870 | ftrace_notrace_open(struct inode *inode, struct file *file) |
2881 | { | 2871 | { |
2882 | return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, | 2872 | struct ftrace_ops *ops = inode->i_private; |
2873 | |||
2874 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, | ||
2883 | inode, file); | 2875 | inode, file); |
2884 | } | 2876 | } |
2885 | 2877 | ||
@@ -4109,6 +4101,36 @@ static const struct file_operations ftrace_graph_notrace_fops = { | |||
4109 | }; | 4101 | }; |
4110 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4102 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
4111 | 4103 | ||
4104 | void ftrace_create_filter_files(struct ftrace_ops *ops, | ||
4105 | struct dentry *parent) | ||
4106 | { | ||
4107 | |||
4108 | trace_create_file("set_ftrace_filter", 0644, parent, | ||
4109 | ops, &ftrace_filter_fops); | ||
4110 | |||
4111 | trace_create_file("set_ftrace_notrace", 0644, parent, | ||
4112 | ops, &ftrace_notrace_fops); | ||
4113 | } | ||
4114 | |||
4115 | /* | ||
4116 | * The name "destroy_filter_files" is really a misnomer. Although | ||
4117 | * in the future, it may actualy delete the files, but this is | ||
4118 | * really intended to make sure the ops passed in are disabled | ||
4119 | * and that when this function returns, the caller is free to | ||
4120 | * free the ops. | ||
4121 | * | ||
4122 | * The "destroy" name is only to match the "create" name that this | ||
4123 | * should be paired with. | ||
4124 | */ | ||
4125 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) | ||
4126 | { | ||
4127 | mutex_lock(&ftrace_lock); | ||
4128 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | ||
4129 | ftrace_shutdown(ops, 0); | ||
4130 | ops->flags |= FTRACE_OPS_FL_DELETED; | ||
4131 | mutex_unlock(&ftrace_lock); | ||
4132 | } | ||
4133 | |||
4112 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 4134 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
4113 | { | 4135 | { |
4114 | 4136 | ||
@@ -4118,11 +4140,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
4118 | trace_create_file("enabled_functions", 0444, | 4140 | trace_create_file("enabled_functions", 0444, |
4119 | d_tracer, NULL, &ftrace_enabled_fops); | 4141 | d_tracer, NULL, &ftrace_enabled_fops); |
4120 | 4142 | ||
4121 | trace_create_file("set_ftrace_filter", 0644, d_tracer, | 4143 | ftrace_create_filter_files(&global_ops, d_tracer); |
4122 | NULL, &ftrace_filter_fops); | ||
4123 | |||
4124 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, | ||
4125 | NULL, &ftrace_notrace_fops); | ||
4126 | 4144 | ||
4127 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 4145 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
4128 | trace_create_file("set_graph_function", 0444, d_tracer, | 4146 | trace_create_file("set_graph_function", 0444, d_tracer, |
@@ -4238,9 +4256,6 @@ static int ftrace_process_locs(struct module *mod, | |||
4238 | /* Assign the last page to ftrace_pages */ | 4256 | /* Assign the last page to ftrace_pages */ |
4239 | ftrace_pages = pg; | 4257 | ftrace_pages = pg; |
4240 | 4258 | ||
4241 | /* These new locations need to be initialized */ | ||
4242 | ftrace_new_pgs = start_pg; | ||
4243 | |||
4244 | /* | 4259 | /* |
4245 | * We only need to disable interrupts on start up | 4260 | * We only need to disable interrupts on start up |
4246 | * because we are modifying code that an interrupt | 4261 | * because we are modifying code that an interrupt |
@@ -4251,7 +4266,7 @@ static int ftrace_process_locs(struct module *mod, | |||
4251 | */ | 4266 | */ |
4252 | if (!mod) | 4267 | if (!mod) |
4253 | local_irq_save(flags); | 4268 | local_irq_save(flags); |
4254 | ftrace_update_code(mod); | 4269 | ftrace_update_code(mod, start_pg); |
4255 | if (!mod) | 4270 | if (!mod) |
4256 | local_irq_restore(flags); | 4271 | local_irq_restore(flags); |
4257 | ret = 0; | 4272 | ret = 0; |
@@ -4360,30 +4375,27 @@ struct notifier_block ftrace_module_exit_nb = { | |||
4360 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 4375 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
4361 | }; | 4376 | }; |
4362 | 4377 | ||
4363 | extern unsigned long __start_mcount_loc[]; | ||
4364 | extern unsigned long __stop_mcount_loc[]; | ||
4365 | |||
4366 | void __init ftrace_init(void) | 4378 | void __init ftrace_init(void) |
4367 | { | 4379 | { |
4368 | unsigned long count, addr, flags; | 4380 | extern unsigned long __start_mcount_loc[]; |
4381 | extern unsigned long __stop_mcount_loc[]; | ||
4382 | unsigned long count, flags; | ||
4369 | int ret; | 4383 | int ret; |
4370 | 4384 | ||
4371 | /* Keep the ftrace pointer to the stub */ | ||
4372 | addr = (unsigned long)ftrace_stub; | ||
4373 | |||
4374 | local_irq_save(flags); | 4385 | local_irq_save(flags); |
4375 | ftrace_dyn_arch_init(&addr); | 4386 | ret = ftrace_dyn_arch_init(); |
4376 | local_irq_restore(flags); | 4387 | local_irq_restore(flags); |
4377 | 4388 | if (ret) | |
4378 | /* ftrace_dyn_arch_init places the return code in addr */ | ||
4379 | if (addr) | ||
4380 | goto failed; | 4389 | goto failed; |
4381 | 4390 | ||
4382 | count = __stop_mcount_loc - __start_mcount_loc; | 4391 | count = __stop_mcount_loc - __start_mcount_loc; |
4383 | 4392 | if (!count) { | |
4384 | ret = ftrace_dyn_table_alloc(count); | 4393 | pr_info("ftrace: No functions to be traced?\n"); |
4385 | if (ret) | ||
4386 | goto failed; | 4394 | goto failed; |
4395 | } | ||
4396 | |||
4397 | pr_info("ftrace: allocating %ld entries in %ld pages\n", | ||
4398 | count, count / ENTRIES_PER_PAGE + 1); | ||
4387 | 4399 | ||
4388 | last_ftrace_enabled = ftrace_enabled = 1; | 4400 | last_ftrace_enabled = ftrace_enabled = 1; |
4389 | 4401 | ||
@@ -4431,7 +4443,13 @@ static inline void ftrace_startup_enable(int command) { } | |||
4431 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | 4443 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ |
4432 | ___ret; \ | 4444 | ___ret; \ |
4433 | }) | 4445 | }) |
4434 | # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) | 4446 | # define ftrace_shutdown(ops, command) \ |
4447 | ({ \ | ||
4448 | int ___ret = __unregister_ftrace_function(ops); \ | ||
4449 | if (!___ret) \ | ||
4450 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ | ||
4451 | ___ret; \ | ||
4452 | }) | ||
4435 | 4453 | ||
4436 | # define ftrace_startup_sysctl() do { } while (0) | 4454 | # define ftrace_startup_sysctl() do { } while (0) |
4437 | # define ftrace_shutdown_sysctl() do { } while (0) | 4455 | # define ftrace_shutdown_sysctl() do { } while (0) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 24c1f2382557..9be67c5e5b0f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -73,7 +73,8 @@ static struct tracer_flags dummy_tracer_flags = { | |||
73 | .opts = dummy_tracer_opt | 73 | .opts = dummy_tracer_opt |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | 76 | static int |
77 | dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
77 | { | 78 | { |
78 | return 0; | 79 | return 0; |
79 | } | 80 | } |
@@ -118,7 +119,7 @@ enum ftrace_dump_mode ftrace_dump_on_oops; | |||
118 | /* When set, tracing will stop when a WARN*() is hit */ | 119 | /* When set, tracing will stop when a WARN*() is hit */ |
119 | int __disable_trace_on_warning; | 120 | int __disable_trace_on_warning; |
120 | 121 | ||
121 | static int tracing_set_tracer(const char *buf); | 122 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
122 | 123 | ||
123 | #define MAX_TRACER_SIZE 100 | 124 | #define MAX_TRACER_SIZE 100 |
124 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 125 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
@@ -180,6 +181,17 @@ static int __init set_trace_boot_options(char *str) | |||
180 | } | 181 | } |
181 | __setup("trace_options=", set_trace_boot_options); | 182 | __setup("trace_options=", set_trace_boot_options); |
182 | 183 | ||
184 | static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; | ||
185 | static char *trace_boot_clock __initdata; | ||
186 | |||
187 | static int __init set_trace_boot_clock(char *str) | ||
188 | { | ||
189 | strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); | ||
190 | trace_boot_clock = trace_boot_clock_buf; | ||
191 | return 0; | ||
192 | } | ||
193 | __setup("trace_clock=", set_trace_boot_clock); | ||
194 | |||
183 | 195 | ||
184 | unsigned long long ns2usecs(cycle_t nsec) | 196 | unsigned long long ns2usecs(cycle_t nsec) |
185 | { | 197 | { |
@@ -1230,7 +1242,7 @@ int register_tracer(struct tracer *type) | |||
1230 | 1242 | ||
1231 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 1243 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
1232 | /* Do we want this tracer to start on bootup? */ | 1244 | /* Do we want this tracer to start on bootup? */ |
1233 | tracing_set_tracer(type->name); | 1245 | tracing_set_tracer(&global_trace, type->name); |
1234 | default_bootup_tracer = NULL; | 1246 | default_bootup_tracer = NULL; |
1235 | /* disable other selftests, since this will break it. */ | 1247 | /* disable other selftests, since this will break it. */ |
1236 | tracing_selftest_disabled = true; | 1248 | tracing_selftest_disabled = true; |
@@ -3137,27 +3149,52 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
3137 | return ret; | 3149 | return ret; |
3138 | } | 3150 | } |
3139 | 3151 | ||
3152 | /* | ||
3153 | * Some tracers are not suitable for instance buffers. | ||
3154 | * A tracer is always available for the global array (toplevel) | ||
3155 | * or if it explicitly states that it is. | ||
3156 | */ | ||
3157 | static bool | ||
3158 | trace_ok_for_array(struct tracer *t, struct trace_array *tr) | ||
3159 | { | ||
3160 | return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; | ||
3161 | } | ||
3162 | |||
3163 | /* Find the next tracer that this trace array may use */ | ||
3164 | static struct tracer * | ||
3165 | get_tracer_for_array(struct trace_array *tr, struct tracer *t) | ||
3166 | { | ||
3167 | while (t && !trace_ok_for_array(t, tr)) | ||
3168 | t = t->next; | ||
3169 | |||
3170 | return t; | ||
3171 | } | ||
3172 | |||
3140 | static void * | 3173 | static void * |
3141 | t_next(struct seq_file *m, void *v, loff_t *pos) | 3174 | t_next(struct seq_file *m, void *v, loff_t *pos) |
3142 | { | 3175 | { |
3176 | struct trace_array *tr = m->private; | ||
3143 | struct tracer *t = v; | 3177 | struct tracer *t = v; |
3144 | 3178 | ||
3145 | (*pos)++; | 3179 | (*pos)++; |
3146 | 3180 | ||
3147 | if (t) | 3181 | if (t) |
3148 | t = t->next; | 3182 | t = get_tracer_for_array(tr, t->next); |
3149 | 3183 | ||
3150 | return t; | 3184 | return t; |
3151 | } | 3185 | } |
3152 | 3186 | ||
3153 | static void *t_start(struct seq_file *m, loff_t *pos) | 3187 | static void *t_start(struct seq_file *m, loff_t *pos) |
3154 | { | 3188 | { |
3189 | struct trace_array *tr = m->private; | ||
3155 | struct tracer *t; | 3190 | struct tracer *t; |
3156 | loff_t l = 0; | 3191 | loff_t l = 0; |
3157 | 3192 | ||
3158 | mutex_lock(&trace_types_lock); | 3193 | mutex_lock(&trace_types_lock); |
3159 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) | 3194 | |
3160 | ; | 3195 | t = get_tracer_for_array(tr, trace_types); |
3196 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
3197 | ; | ||
3161 | 3198 | ||
3162 | return t; | 3199 | return t; |
3163 | } | 3200 | } |
@@ -3192,10 +3229,21 @@ static const struct seq_operations show_traces_seq_ops = { | |||
3192 | 3229 | ||
3193 | static int show_traces_open(struct inode *inode, struct file *file) | 3230 | static int show_traces_open(struct inode *inode, struct file *file) |
3194 | { | 3231 | { |
3232 | struct trace_array *tr = inode->i_private; | ||
3233 | struct seq_file *m; | ||
3234 | int ret; | ||
3235 | |||
3195 | if (tracing_disabled) | 3236 | if (tracing_disabled) |
3196 | return -ENODEV; | 3237 | return -ENODEV; |
3197 | 3238 | ||
3198 | return seq_open(file, &show_traces_seq_ops); | 3239 | ret = seq_open(file, &show_traces_seq_ops); |
3240 | if (ret) | ||
3241 | return ret; | ||
3242 | |||
3243 | m = file->private_data; | ||
3244 | m->private = tr; | ||
3245 | |||
3246 | return 0; | ||
3199 | } | 3247 | } |
3200 | 3248 | ||
3201 | static ssize_t | 3249 | static ssize_t |
@@ -3355,13 +3403,14 @@ static int tracing_trace_options_show(struct seq_file *m, void *v) | |||
3355 | return 0; | 3403 | return 0; |
3356 | } | 3404 | } |
3357 | 3405 | ||
3358 | static int __set_tracer_option(struct tracer *trace, | 3406 | static int __set_tracer_option(struct trace_array *tr, |
3359 | struct tracer_flags *tracer_flags, | 3407 | struct tracer_flags *tracer_flags, |
3360 | struct tracer_opt *opts, int neg) | 3408 | struct tracer_opt *opts, int neg) |
3361 | { | 3409 | { |
3410 | struct tracer *trace = tr->current_trace; | ||
3362 | int ret; | 3411 | int ret; |
3363 | 3412 | ||
3364 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); | 3413 | ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); |
3365 | if (ret) | 3414 | if (ret) |
3366 | return ret; | 3415 | return ret; |
3367 | 3416 | ||
@@ -3373,8 +3422,9 @@ static int __set_tracer_option(struct tracer *trace, | |||
3373 | } | 3422 | } |
3374 | 3423 | ||
3375 | /* Try to assign a tracer specific option */ | 3424 | /* Try to assign a tracer specific option */ |
3376 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 3425 | static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) |
3377 | { | 3426 | { |
3427 | struct tracer *trace = tr->current_trace; | ||
3378 | struct tracer_flags *tracer_flags = trace->flags; | 3428 | struct tracer_flags *tracer_flags = trace->flags; |
3379 | struct tracer_opt *opts = NULL; | 3429 | struct tracer_opt *opts = NULL; |
3380 | int i; | 3430 | int i; |
@@ -3383,8 +3433,7 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
3383 | opts = &tracer_flags->opts[i]; | 3433 | opts = &tracer_flags->opts[i]; |
3384 | 3434 | ||
3385 | if (strcmp(cmp, opts->name) == 0) | 3435 | if (strcmp(cmp, opts->name) == 0) |
3386 | return __set_tracer_option(trace, trace->flags, | 3436 | return __set_tracer_option(tr, trace->flags, opts, neg); |
3387 | opts, neg); | ||
3388 | } | 3437 | } |
3389 | 3438 | ||
3390 | return -EINVAL; | 3439 | return -EINVAL; |
@@ -3407,7 +3456,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) | |||
3407 | 3456 | ||
3408 | /* Give the tracer a chance to approve the change */ | 3457 | /* Give the tracer a chance to approve the change */ |
3409 | if (tr->current_trace->flag_changed) | 3458 | if (tr->current_trace->flag_changed) |
3410 | if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) | 3459 | if (tr->current_trace->flag_changed(tr, mask, !!enabled)) |
3411 | return -EINVAL; | 3460 | return -EINVAL; |
3412 | 3461 | ||
3413 | if (enabled) | 3462 | if (enabled) |
@@ -3456,7 +3505,7 @@ static int trace_set_options(struct trace_array *tr, char *option) | |||
3456 | 3505 | ||
3457 | /* If no option could be set, test the specific tracer options */ | 3506 | /* If no option could be set, test the specific tracer options */ |
3458 | if (!trace_options[i]) | 3507 | if (!trace_options[i]) |
3459 | ret = set_tracer_option(tr->current_trace, cmp, neg); | 3508 | ret = set_tracer_option(tr, cmp, neg); |
3460 | 3509 | ||
3461 | mutex_unlock(&trace_types_lock); | 3510 | mutex_unlock(&trace_types_lock); |
3462 | 3511 | ||
@@ -3885,10 +3934,26 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer); | |||
3885 | static void | 3934 | static void |
3886 | destroy_trace_option_files(struct trace_option_dentry *topts); | 3935 | destroy_trace_option_files(struct trace_option_dentry *topts); |
3887 | 3936 | ||
3888 | static int tracing_set_tracer(const char *buf) | 3937 | /* |
3938 | * Used to clear out the tracer before deletion of an instance. | ||
3939 | * Must have trace_types_lock held. | ||
3940 | */ | ||
3941 | static void tracing_set_nop(struct trace_array *tr) | ||
3942 | { | ||
3943 | if (tr->current_trace == &nop_trace) | ||
3944 | return; | ||
3945 | |||
3946 | tr->current_trace->enabled--; | ||
3947 | |||
3948 | if (tr->current_trace->reset) | ||
3949 | tr->current_trace->reset(tr); | ||
3950 | |||
3951 | tr->current_trace = &nop_trace; | ||
3952 | } | ||
3953 | |||
3954 | static int tracing_set_tracer(struct trace_array *tr, const char *buf) | ||
3889 | { | 3955 | { |
3890 | static struct trace_option_dentry *topts; | 3956 | static struct trace_option_dentry *topts; |
3891 | struct trace_array *tr = &global_trace; | ||
3892 | struct tracer *t; | 3957 | struct tracer *t; |
3893 | #ifdef CONFIG_TRACER_MAX_TRACE | 3958 | #ifdef CONFIG_TRACER_MAX_TRACE |
3894 | bool had_max_tr; | 3959 | bool had_max_tr; |
@@ -3916,9 +3981,15 @@ static int tracing_set_tracer(const char *buf) | |||
3916 | if (t == tr->current_trace) | 3981 | if (t == tr->current_trace) |
3917 | goto out; | 3982 | goto out; |
3918 | 3983 | ||
3984 | /* Some tracers are only allowed for the top level buffer */ | ||
3985 | if (!trace_ok_for_array(t, tr)) { | ||
3986 | ret = -EINVAL; | ||
3987 | goto out; | ||
3988 | } | ||
3989 | |||
3919 | trace_branch_disable(); | 3990 | trace_branch_disable(); |
3920 | 3991 | ||
3921 | tr->current_trace->enabled = false; | 3992 | tr->current_trace->enabled--; |
3922 | 3993 | ||
3923 | if (tr->current_trace->reset) | 3994 | if (tr->current_trace->reset) |
3924 | tr->current_trace->reset(tr); | 3995 | tr->current_trace->reset(tr); |
@@ -3941,9 +4012,11 @@ static int tracing_set_tracer(const char *buf) | |||
3941 | free_snapshot(tr); | 4012 | free_snapshot(tr); |
3942 | } | 4013 | } |
3943 | #endif | 4014 | #endif |
3944 | destroy_trace_option_files(topts); | 4015 | /* Currently, only the top instance has options */ |
3945 | 4016 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | |
3946 | topts = create_trace_option_files(tr, t); | 4017 | destroy_trace_option_files(topts); |
4018 | topts = create_trace_option_files(tr, t); | ||
4019 | } | ||
3947 | 4020 | ||
3948 | #ifdef CONFIG_TRACER_MAX_TRACE | 4021 | #ifdef CONFIG_TRACER_MAX_TRACE |
3949 | if (t->use_max_tr && !had_max_tr) { | 4022 | if (t->use_max_tr && !had_max_tr) { |
@@ -3960,7 +4033,7 @@ static int tracing_set_tracer(const char *buf) | |||
3960 | } | 4033 | } |
3961 | 4034 | ||
3962 | tr->current_trace = t; | 4035 | tr->current_trace = t; |
3963 | tr->current_trace->enabled = true; | 4036 | tr->current_trace->enabled++; |
3964 | trace_branch_enable(tr); | 4037 | trace_branch_enable(tr); |
3965 | out: | 4038 | out: |
3966 | mutex_unlock(&trace_types_lock); | 4039 | mutex_unlock(&trace_types_lock); |
@@ -3972,6 +4045,7 @@ static ssize_t | |||
3972 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 4045 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
3973 | size_t cnt, loff_t *ppos) | 4046 | size_t cnt, loff_t *ppos) |
3974 | { | 4047 | { |
4048 | struct trace_array *tr = filp->private_data; | ||
3975 | char buf[MAX_TRACER_SIZE+1]; | 4049 | char buf[MAX_TRACER_SIZE+1]; |
3976 | int i; | 4050 | int i; |
3977 | size_t ret; | 4051 | size_t ret; |
@@ -3991,7 +4065,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
3991 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 4065 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
3992 | buf[i] = 0; | 4066 | buf[i] = 0; |
3993 | 4067 | ||
3994 | err = tracing_set_tracer(buf); | 4068 | err = tracing_set_tracer(tr, buf); |
3995 | if (err) | 4069 | if (err) |
3996 | return err; | 4070 | return err; |
3997 | 4071 | ||
@@ -4699,25 +4773,10 @@ static int tracing_clock_show(struct seq_file *m, void *v) | |||
4699 | return 0; | 4773 | return 0; |
4700 | } | 4774 | } |
4701 | 4775 | ||
4702 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 4776 | static int tracing_set_clock(struct trace_array *tr, const char *clockstr) |
4703 | size_t cnt, loff_t *fpos) | ||
4704 | { | 4777 | { |
4705 | struct seq_file *m = filp->private_data; | ||
4706 | struct trace_array *tr = m->private; | ||
4707 | char buf[64]; | ||
4708 | const char *clockstr; | ||
4709 | int i; | 4778 | int i; |
4710 | 4779 | ||
4711 | if (cnt >= sizeof(buf)) | ||
4712 | return -EINVAL; | ||
4713 | |||
4714 | if (copy_from_user(&buf, ubuf, cnt)) | ||
4715 | return -EFAULT; | ||
4716 | |||
4717 | buf[cnt] = 0; | ||
4718 | |||
4719 | clockstr = strstrip(buf); | ||
4720 | |||
4721 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { | 4780 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { |
4722 | if (strcmp(trace_clocks[i].name, clockstr) == 0) | 4781 | if (strcmp(trace_clocks[i].name, clockstr) == 0) |
4723 | break; | 4782 | break; |
@@ -4745,6 +4804,32 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4745 | 4804 | ||
4746 | mutex_unlock(&trace_types_lock); | 4805 | mutex_unlock(&trace_types_lock); |
4747 | 4806 | ||
4807 | return 0; | ||
4808 | } | ||
4809 | |||
4810 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | ||
4811 | size_t cnt, loff_t *fpos) | ||
4812 | { | ||
4813 | struct seq_file *m = filp->private_data; | ||
4814 | struct trace_array *tr = m->private; | ||
4815 | char buf[64]; | ||
4816 | const char *clockstr; | ||
4817 | int ret; | ||
4818 | |||
4819 | if (cnt >= sizeof(buf)) | ||
4820 | return -EINVAL; | ||
4821 | |||
4822 | if (copy_from_user(&buf, ubuf, cnt)) | ||
4823 | return -EFAULT; | ||
4824 | |||
4825 | buf[cnt] = 0; | ||
4826 | |||
4827 | clockstr = strstrip(buf); | ||
4828 | |||
4829 | ret = tracing_set_clock(tr, clockstr); | ||
4830 | if (ret) | ||
4831 | return ret; | ||
4832 | |||
4748 | *fpos += cnt; | 4833 | *fpos += cnt; |
4749 | 4834 | ||
4750 | return cnt; | 4835 | return cnt; |
@@ -5705,7 +5790,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
5705 | 5790 | ||
5706 | if (!!(topt->flags->val & topt->opt->bit) != val) { | 5791 | if (!!(topt->flags->val & topt->opt->bit) != val) { |
5707 | mutex_lock(&trace_types_lock); | 5792 | mutex_lock(&trace_types_lock); |
5708 | ret = __set_tracer_option(topt->tr->current_trace, topt->flags, | 5793 | ret = __set_tracer_option(topt->tr, topt->flags, |
5709 | topt->opt, !val); | 5794 | topt->opt, !val); |
5710 | mutex_unlock(&trace_types_lock); | 5795 | mutex_unlock(&trace_types_lock); |
5711 | if (ret) | 5796 | if (ret) |
@@ -6112,7 +6197,9 @@ static int instance_delete(const char *name) | |||
6112 | 6197 | ||
6113 | list_del(&tr->list); | 6198 | list_del(&tr->list); |
6114 | 6199 | ||
6200 | tracing_set_nop(tr); | ||
6115 | event_trace_del_tracer(tr); | 6201 | event_trace_del_tracer(tr); |
6202 | ftrace_destroy_function_files(tr); | ||
6116 | debugfs_remove_recursive(tr->dir); | 6203 | debugfs_remove_recursive(tr->dir); |
6117 | free_percpu(tr->trace_buffer.data); | 6204 | free_percpu(tr->trace_buffer.data); |
6118 | ring_buffer_free(tr->trace_buffer.buffer); | 6205 | ring_buffer_free(tr->trace_buffer.buffer); |
@@ -6207,6 +6294,12 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
6207 | { | 6294 | { |
6208 | int cpu; | 6295 | int cpu; |
6209 | 6296 | ||
6297 | trace_create_file("available_tracers", 0444, d_tracer, | ||
6298 | tr, &show_traces_fops); | ||
6299 | |||
6300 | trace_create_file("current_tracer", 0644, d_tracer, | ||
6301 | tr, &set_tracer_fops); | ||
6302 | |||
6210 | trace_create_file("tracing_cpumask", 0644, d_tracer, | 6303 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
6211 | tr, &tracing_cpumask_fops); | 6304 | tr, &tracing_cpumask_fops); |
6212 | 6305 | ||
@@ -6237,6 +6330,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
6237 | trace_create_file("tracing_on", 0644, d_tracer, | 6330 | trace_create_file("tracing_on", 0644, d_tracer, |
6238 | tr, &rb_simple_fops); | 6331 | tr, &rb_simple_fops); |
6239 | 6332 | ||
6333 | if (ftrace_create_function_files(tr, d_tracer)) | ||
6334 | WARN(1, "Could not allocate function filter files"); | ||
6335 | |||
6240 | #ifdef CONFIG_TRACER_SNAPSHOT | 6336 | #ifdef CONFIG_TRACER_SNAPSHOT |
6241 | trace_create_file("snapshot", 0644, d_tracer, | 6337 | trace_create_file("snapshot", 0644, d_tracer, |
6242 | tr, &snapshot_fops); | 6338 | tr, &snapshot_fops); |
@@ -6259,12 +6355,6 @@ static __init int tracer_init_debugfs(void) | |||
6259 | 6355 | ||
6260 | init_tracer_debugfs(&global_trace, d_tracer); | 6356 | init_tracer_debugfs(&global_trace, d_tracer); |
6261 | 6357 | ||
6262 | trace_create_file("available_tracers", 0444, d_tracer, | ||
6263 | &global_trace, &show_traces_fops); | ||
6264 | |||
6265 | trace_create_file("current_tracer", 0644, d_tracer, | ||
6266 | &global_trace, &set_tracer_fops); | ||
6267 | |||
6268 | #ifdef CONFIG_TRACER_MAX_TRACE | 6358 | #ifdef CONFIG_TRACER_MAX_TRACE |
6269 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 6359 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
6270 | &tracing_max_latency, &tracing_max_lat_fops); | 6360 | &tracing_max_latency, &tracing_max_lat_fops); |
@@ -6527,6 +6617,13 @@ __init static int tracer_alloc_buffers(void) | |||
6527 | 6617 | ||
6528 | trace_init_cmdlines(); | 6618 | trace_init_cmdlines(); |
6529 | 6619 | ||
6620 | if (trace_boot_clock) { | ||
6621 | ret = tracing_set_clock(&global_trace, trace_boot_clock); | ||
6622 | if (ret < 0) | ||
6623 | pr_warning("Trace clock %s not defined, going back to default\n", | ||
6624 | trace_boot_clock); | ||
6625 | } | ||
6626 | |||
6530 | /* | 6627 | /* |
6531 | * register_tracer() might reference current_trace, so it | 6628 | * register_tracer() might reference current_trace, so it |
6532 | * needs to be set before we register anything. This is | 6629 | * needs to be set before we register anything. This is |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 02b592f2d4b7..ffc314b7e92b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -210,6 +210,11 @@ struct trace_array { | |||
210 | struct list_head events; | 210 | struct list_head events; |
211 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ | 211 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
212 | int ref; | 212 | int ref; |
213 | #ifdef CONFIG_FUNCTION_TRACER | ||
214 | struct ftrace_ops *ops; | ||
215 | /* function tracing enabled */ | ||
216 | int function_enabled; | ||
217 | #endif | ||
213 | }; | 218 | }; |
214 | 219 | ||
215 | enum { | 220 | enum { |
@@ -355,14 +360,16 @@ struct tracer { | |||
355 | void (*print_header)(struct seq_file *m); | 360 | void (*print_header)(struct seq_file *m); |
356 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 361 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
357 | /* If you handled the flag setting, return 0 */ | 362 | /* If you handled the flag setting, return 0 */ |
358 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 363 | int (*set_flag)(struct trace_array *tr, |
364 | u32 old_flags, u32 bit, int set); | ||
359 | /* Return 0 if OK with change, else return non-zero */ | 365 | /* Return 0 if OK with change, else return non-zero */ |
360 | int (*flag_changed)(struct tracer *tracer, | 366 | int (*flag_changed)(struct trace_array *tr, |
361 | u32 mask, int set); | 367 | u32 mask, int set); |
362 | struct tracer *next; | 368 | struct tracer *next; |
363 | struct tracer_flags *flags; | 369 | struct tracer_flags *flags; |
370 | int enabled; | ||
364 | bool print_max; | 371 | bool print_max; |
365 | bool enabled; | 372 | bool allow_instances; |
366 | #ifdef CONFIG_TRACER_MAX_TRACE | 373 | #ifdef CONFIG_TRACER_MAX_TRACE |
367 | bool use_max_tr; | 374 | bool use_max_tr; |
368 | #endif | 375 | #endif |
@@ -812,13 +819,36 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
812 | return test_tsk_trace_trace(task); | 819 | return test_tsk_trace_trace(task); |
813 | } | 820 | } |
814 | extern int ftrace_is_dead(void); | 821 | extern int ftrace_is_dead(void); |
822 | int ftrace_create_function_files(struct trace_array *tr, | ||
823 | struct dentry *parent); | ||
824 | void ftrace_destroy_function_files(struct trace_array *tr); | ||
815 | #else | 825 | #else |
816 | static inline int ftrace_trace_task(struct task_struct *task) | 826 | static inline int ftrace_trace_task(struct task_struct *task) |
817 | { | 827 | { |
818 | return 1; | 828 | return 1; |
819 | } | 829 | } |
820 | static inline int ftrace_is_dead(void) { return 0; } | 830 | static inline int ftrace_is_dead(void) { return 0; } |
821 | #endif | 831 | static inline int |
832 | ftrace_create_function_files(struct trace_array *tr, | ||
833 | struct dentry *parent) | ||
834 | { | ||
835 | return 0; | ||
836 | } | ||
837 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } | ||
838 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
839 | |||
840 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | ||
841 | void ftrace_create_filter_files(struct ftrace_ops *ops, | ||
842 | struct dentry *parent); | ||
843 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); | ||
844 | #else | ||
845 | /* | ||
846 | * The ops parameter passed in is usually undefined. | ||
847 | * This must be a macro. | ||
848 | */ | ||
849 | #define ftrace_create_filter_files(ops, parent) do { } while (0) | ||
850 | #define ftrace_destroy_filter_files(ops) do { } while (0) | ||
851 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ | ||
822 | 852 | ||
823 | int ftrace_event_is_function(struct ftrace_event_call *call); | 853 | int ftrace_event_is_function(struct ftrace_event_call *call); |
824 | 854 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 7b16d40bd64d..83a4378dc5e0 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -188,6 +188,36 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
188 | } | 188 | } |
189 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 189 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
190 | 190 | ||
191 | void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | ||
192 | struct ftrace_event_file *ftrace_file, | ||
193 | unsigned long len) | ||
194 | { | ||
195 | struct ftrace_event_call *event_call = ftrace_file->event_call; | ||
196 | |||
197 | local_save_flags(fbuffer->flags); | ||
198 | fbuffer->pc = preempt_count(); | ||
199 | fbuffer->ftrace_file = ftrace_file; | ||
200 | |||
201 | fbuffer->event = | ||
202 | trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, | ||
203 | event_call->event.type, len, | ||
204 | fbuffer->flags, fbuffer->pc); | ||
205 | if (!fbuffer->event) | ||
206 | return NULL; | ||
207 | |||
208 | fbuffer->entry = ring_buffer_event_data(fbuffer->event); | ||
209 | return fbuffer->entry; | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); | ||
212 | |||
213 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) | ||
214 | { | ||
215 | event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, | ||
216 | fbuffer->event, fbuffer->entry, | ||
217 | fbuffer->flags, fbuffer->pc); | ||
218 | } | ||
219 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); | ||
220 | |||
191 | int ftrace_event_reg(struct ftrace_event_call *call, | 221 | int ftrace_event_reg(struct ftrace_event_call *call, |
192 | enum trace_reg type, void *data) | 222 | enum trace_reg type, void *data) |
193 | { | 223 | { |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 38fe1483c508..5b781d2be383 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -13,32 +13,106 @@ | |||
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
17 | 18 | ||
18 | #include "trace.h" | 19 | #include "trace.h" |
19 | 20 | ||
20 | /* function tracing enabled */ | 21 | static void tracing_start_function_trace(struct trace_array *tr); |
21 | static int ftrace_function_enabled; | 22 | static void tracing_stop_function_trace(struct trace_array *tr); |
23 | static void | ||
24 | function_trace_call(unsigned long ip, unsigned long parent_ip, | ||
25 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
26 | static void | ||
27 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | ||
28 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
29 | static struct ftrace_ops trace_ops; | ||
30 | static struct ftrace_ops trace_stack_ops; | ||
31 | static struct tracer_flags func_flags; | ||
32 | |||
33 | /* Our option */ | ||
34 | enum { | ||
35 | TRACE_FUNC_OPT_STACK = 0x1, | ||
36 | }; | ||
37 | |||
38 | static int allocate_ftrace_ops(struct trace_array *tr) | ||
39 | { | ||
40 | struct ftrace_ops *ops; | ||
41 | |||
42 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
43 | if (!ops) | ||
44 | return -ENOMEM; | ||
22 | 45 | ||
23 | static struct trace_array *func_trace; | 46 | /* Currently only the non stack verision is supported */ |
47 | ops->func = function_trace_call; | ||
48 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; | ||
49 | |||
50 | tr->ops = ops; | ||
51 | ops->private = tr; | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | |||
56 | int ftrace_create_function_files(struct trace_array *tr, | ||
57 | struct dentry *parent) | ||
58 | { | ||
59 | int ret; | ||
60 | |||
61 | /* The top level array uses the "global_ops". */ | ||
62 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { | ||
63 | ret = allocate_ftrace_ops(tr); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | ftrace_create_filter_files(tr->ops, parent); | ||
69 | |||
70 | return 0; | ||
71 | } | ||
24 | 72 | ||
25 | static void tracing_start_function_trace(void); | 73 | void ftrace_destroy_function_files(struct trace_array *tr) |
26 | static void tracing_stop_function_trace(void); | 74 | { |
75 | ftrace_destroy_filter_files(tr->ops); | ||
76 | kfree(tr->ops); | ||
77 | tr->ops = NULL; | ||
78 | } | ||
27 | 79 | ||
28 | static int function_trace_init(struct trace_array *tr) | 80 | static int function_trace_init(struct trace_array *tr) |
29 | { | 81 | { |
30 | func_trace = tr; | 82 | struct ftrace_ops *ops; |
83 | |||
84 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | ||
85 | /* There's only one global tr */ | ||
86 | if (!trace_ops.private) { | ||
87 | trace_ops.private = tr; | ||
88 | trace_stack_ops.private = tr; | ||
89 | } | ||
90 | |||
91 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
92 | ops = &trace_stack_ops; | ||
93 | else | ||
94 | ops = &trace_ops; | ||
95 | tr->ops = ops; | ||
96 | } else if (!tr->ops) { | ||
97 | /* | ||
98 | * Instance trace_arrays get their ops allocated | ||
99 | * at instance creation. Unless it failed | ||
100 | * the allocation. | ||
101 | */ | ||
102 | return -ENOMEM; | ||
103 | } | ||
104 | |||
31 | tr->trace_buffer.cpu = get_cpu(); | 105 | tr->trace_buffer.cpu = get_cpu(); |
32 | put_cpu(); | 106 | put_cpu(); |
33 | 107 | ||
34 | tracing_start_cmdline_record(); | 108 | tracing_start_cmdline_record(); |
35 | tracing_start_function_trace(); | 109 | tracing_start_function_trace(tr); |
36 | return 0; | 110 | return 0; |
37 | } | 111 | } |
38 | 112 | ||
39 | static void function_trace_reset(struct trace_array *tr) | 113 | static void function_trace_reset(struct trace_array *tr) |
40 | { | 114 | { |
41 | tracing_stop_function_trace(); | 115 | tracing_stop_function_trace(tr); |
42 | tracing_stop_cmdline_record(); | 116 | tracing_stop_cmdline_record(); |
43 | } | 117 | } |
44 | 118 | ||
@@ -47,25 +121,18 @@ static void function_trace_start(struct trace_array *tr) | |||
47 | tracing_reset_online_cpus(&tr->trace_buffer); | 121 | tracing_reset_online_cpus(&tr->trace_buffer); |
48 | } | 122 | } |
49 | 123 | ||
50 | /* Our option */ | ||
51 | enum { | ||
52 | TRACE_FUNC_OPT_STACK = 0x1, | ||
53 | }; | ||
54 | |||
55 | static struct tracer_flags func_flags; | ||
56 | |||
57 | static void | 124 | static void |
58 | function_trace_call(unsigned long ip, unsigned long parent_ip, | 125 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
59 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 126 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
60 | { | 127 | { |
61 | struct trace_array *tr = func_trace; | 128 | struct trace_array *tr = op->private; |
62 | struct trace_array_cpu *data; | 129 | struct trace_array_cpu *data; |
63 | unsigned long flags; | 130 | unsigned long flags; |
64 | int bit; | 131 | int bit; |
65 | int cpu; | 132 | int cpu; |
66 | int pc; | 133 | int pc; |
67 | 134 | ||
68 | if (unlikely(!ftrace_function_enabled)) | 135 | if (unlikely(!tr->function_enabled)) |
69 | return; | 136 | return; |
70 | 137 | ||
71 | pc = preempt_count(); | 138 | pc = preempt_count(); |
@@ -91,14 +158,14 @@ static void | |||
91 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | 158 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
92 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 159 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
93 | { | 160 | { |
94 | struct trace_array *tr = func_trace; | 161 | struct trace_array *tr = op->private; |
95 | struct trace_array_cpu *data; | 162 | struct trace_array_cpu *data; |
96 | unsigned long flags; | 163 | unsigned long flags; |
97 | long disabled; | 164 | long disabled; |
98 | int cpu; | 165 | int cpu; |
99 | int pc; | 166 | int pc; |
100 | 167 | ||
101 | if (unlikely(!ftrace_function_enabled)) | 168 | if (unlikely(!tr->function_enabled)) |
102 | return; | 169 | return; |
103 | 170 | ||
104 | /* | 171 | /* |
@@ -128,7 +195,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
128 | local_irq_restore(flags); | 195 | local_irq_restore(flags); |
129 | } | 196 | } |
130 | 197 | ||
131 | |||
132 | static struct ftrace_ops trace_ops __read_mostly = | 198 | static struct ftrace_ops trace_ops __read_mostly = |
133 | { | 199 | { |
134 | .func = function_trace_call, | 200 | .func = function_trace_call, |
@@ -153,29 +219,21 @@ static struct tracer_flags func_flags = { | |||
153 | .opts = func_opts | 219 | .opts = func_opts |
154 | }; | 220 | }; |
155 | 221 | ||
156 | static void tracing_start_function_trace(void) | 222 | static void tracing_start_function_trace(struct trace_array *tr) |
157 | { | 223 | { |
158 | ftrace_function_enabled = 0; | 224 | tr->function_enabled = 0; |
159 | 225 | register_ftrace_function(tr->ops); | |
160 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | 226 | tr->function_enabled = 1; |
161 | register_ftrace_function(&trace_stack_ops); | ||
162 | else | ||
163 | register_ftrace_function(&trace_ops); | ||
164 | |||
165 | ftrace_function_enabled = 1; | ||
166 | } | 227 | } |
167 | 228 | ||
168 | static void tracing_stop_function_trace(void) | 229 | static void tracing_stop_function_trace(struct trace_array *tr) |
169 | { | 230 | { |
170 | ftrace_function_enabled = 0; | 231 | tr->function_enabled = 0; |
171 | 232 | unregister_ftrace_function(tr->ops); | |
172 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
173 | unregister_ftrace_function(&trace_stack_ops); | ||
174 | else | ||
175 | unregister_ftrace_function(&trace_ops); | ||
176 | } | 233 | } |
177 | 234 | ||
178 | static int func_set_flag(u32 old_flags, u32 bit, int set) | 235 | static int |
236 | func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
179 | { | 237 | { |
180 | switch (bit) { | 238 | switch (bit) { |
181 | case TRACE_FUNC_OPT_STACK: | 239 | case TRACE_FUNC_OPT_STACK: |
@@ -183,12 +241,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) | |||
183 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | 241 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) |
184 | break; | 242 | break; |
185 | 243 | ||
244 | unregister_ftrace_function(tr->ops); | ||
245 | |||
186 | if (set) { | 246 | if (set) { |
187 | unregister_ftrace_function(&trace_ops); | 247 | tr->ops = &trace_stack_ops; |
188 | register_ftrace_function(&trace_stack_ops); | 248 | register_ftrace_function(tr->ops); |
189 | } else { | 249 | } else { |
190 | unregister_ftrace_function(&trace_stack_ops); | 250 | tr->ops = &trace_ops; |
191 | register_ftrace_function(&trace_ops); | 251 | register_ftrace_function(tr->ops); |
192 | } | 252 | } |
193 | 253 | ||
194 | break; | 254 | break; |
@@ -208,6 +268,7 @@ static struct tracer function_trace __tracer_data = | |||
208 | .wait_pipe = poll_wait_pipe, | 268 | .wait_pipe = poll_wait_pipe, |
209 | .flags = &func_flags, | 269 | .flags = &func_flags, |
210 | .set_flag = func_set_flag, | 270 | .set_flag = func_set_flag, |
271 | .allow_instances = true, | ||
211 | #ifdef CONFIG_FTRACE_SELFTEST | 272 | #ifdef CONFIG_FTRACE_SELFTEST |
212 | .selftest = trace_selftest_startup_function, | 273 | .selftest = trace_selftest_startup_function, |
213 | #endif | 274 | #endif |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 0b99120d395c..deff11200261 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1476,7 +1476,8 @@ void graph_trace_close(struct trace_iterator *iter) | |||
1476 | } | 1476 | } |
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) | 1479 | static int |
1480 | func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
1480 | { | 1481 | { |
1481 | if (bit == TRACE_GRAPH_PRINT_IRQS) | 1482 | if (bit == TRACE_GRAPH_PRINT_IRQS) |
1482 | ftrace_graph_skip_irqs = !set; | 1483 | ftrace_graph_skip_irqs = !set; |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 887ef88b0bc7..8ff02cbb892f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -160,7 +160,8 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
160 | #endif /* CONFIG_FUNCTION_TRACER */ | 160 | #endif /* CONFIG_FUNCTION_TRACER */ |
161 | 161 | ||
162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
163 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 163 | static int |
164 | irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
164 | { | 165 | { |
165 | int cpu; | 166 | int cpu; |
166 | 167 | ||
@@ -266,7 +267,8 @@ __trace_function(struct trace_array *tr, | |||
266 | #else | 267 | #else |
267 | #define __trace_function trace_function | 268 | #define __trace_function trace_function |
268 | 269 | ||
269 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 270 | static int |
271 | irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
270 | { | 272 | { |
271 | return -EINVAL; | 273 | return -EINVAL; |
272 | } | 274 | } |
@@ -570,8 +572,10 @@ static void irqsoff_function_set(int set) | |||
570 | unregister_irqsoff_function(is_graph()); | 572 | unregister_irqsoff_function(is_graph()); |
571 | } | 573 | } |
572 | 574 | ||
573 | static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set) | 575 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
574 | { | 576 | { |
577 | struct tracer *tracer = tr->current_trace; | ||
578 | |||
575 | if (mask & TRACE_ITER_FUNCTION) | 579 | if (mask & TRACE_ITER_FUNCTION) |
576 | irqsoff_function_set(set); | 580 | irqsoff_function_set(set); |
577 | 581 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index bdbae450c13e..d021d21dd150 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -35,11 +35,6 @@ struct trace_kprobe { | |||
35 | struct trace_probe tp; | 35 | struct trace_probe tp; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | struct event_file_link { | ||
39 | struct ftrace_event_file *file; | ||
40 | struct list_head list; | ||
41 | }; | ||
42 | |||
43 | #define SIZEOF_TRACE_KPROBE(n) \ | 38 | #define SIZEOF_TRACE_KPROBE(n) \ |
44 | (offsetof(struct trace_kprobe, tp.args) + \ | 39 | (offsetof(struct trace_kprobe, tp.args) + \ |
45 | (sizeof(struct probe_arg) * (n))) | 40 | (sizeof(struct probe_arg) * (n))) |
@@ -387,18 +382,6 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) | |||
387 | return ret; | 382 | return ret; |
388 | } | 383 | } |
389 | 384 | ||
390 | static struct event_file_link * | ||
391 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | ||
392 | { | ||
393 | struct event_file_link *link; | ||
394 | |||
395 | list_for_each_entry(link, &tp->files, list) | ||
396 | if (link->file == file) | ||
397 | return link; | ||
398 | |||
399 | return NULL; | ||
400 | } | ||
401 | |||
402 | /* | 385 | /* |
403 | * Disable trace_probe | 386 | * Disable trace_probe |
404 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. | 387 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 394f94417e2f..69a5cc94c01a 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -62,7 +62,7 @@ static void nop_trace_reset(struct trace_array *tr) | |||
62 | * If you don't implement it, then the flag setting will be | 62 | * If you don't implement it, then the flag setting will be |
63 | * automatically accepted. | 63 | * automatically accepted. |
64 | */ | 64 | */ |
65 | static int nop_set_flag(u32 old_flags, u32 bit, int set) | 65 | static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) |
66 | { | 66 | { |
67 | /* | 67 | /* |
68 | * Note that you don't need to update nop_flags.val yourself. | 68 | * Note that you don't need to update nop_flags.val yourself. |
@@ -96,6 +96,7 @@ struct tracer nop_trace __read_mostly = | |||
96 | .selftest = trace_selftest_startup_nop, | 96 | .selftest = trace_selftest_startup_nop, |
97 | #endif | 97 | #endif |
98 | .flags = &nop_flags, | 98 | .flags = &nop_flags, |
99 | .set_flag = nop_set_flag | 99 | .set_flag = nop_set_flag, |
100 | .allow_instances = true, | ||
100 | }; | 101 | }; |
101 | 102 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ed32284fbe32..ca0e79e2abaa 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -439,6 +439,37 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
439 | } | 439 | } |
440 | EXPORT_SYMBOL(ftrace_raw_output_prep); | 440 | EXPORT_SYMBOL(ftrace_raw_output_prep); |
441 | 441 | ||
442 | static int ftrace_output_raw(struct trace_iterator *iter, char *name, | ||
443 | char *fmt, va_list ap) | ||
444 | { | ||
445 | struct trace_seq *s = &iter->seq; | ||
446 | int ret; | ||
447 | |||
448 | ret = trace_seq_printf(s, "%s: ", name); | ||
449 | if (!ret) | ||
450 | return TRACE_TYPE_PARTIAL_LINE; | ||
451 | |||
452 | ret = trace_seq_vprintf(s, fmt, ap); | ||
453 | |||
454 | if (!ret) | ||
455 | return TRACE_TYPE_PARTIAL_LINE; | ||
456 | |||
457 | return TRACE_TYPE_HANDLED; | ||
458 | } | ||
459 | |||
460 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) | ||
461 | { | ||
462 | va_list ap; | ||
463 | int ret; | ||
464 | |||
465 | va_start(ap, fmt); | ||
466 | ret = ftrace_output_raw(iter, name, fmt, ap); | ||
467 | va_end(ap); | ||
468 | |||
469 | return ret; | ||
470 | } | ||
471 | EXPORT_SYMBOL_GPL(ftrace_output_call); | ||
472 | |||
442 | #ifdef CONFIG_KRETPROBES | 473 | #ifdef CONFIG_KRETPROBES |
443 | static inline const char *kretprobed(const char *name) | 474 | static inline const char *kretprobed(const char *name) |
444 | { | 475 | { |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index b73574a5f429..fb1ab5dfbd42 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
@@ -288,6 +288,11 @@ struct trace_probe { | |||
288 | struct probe_arg args[]; | 288 | struct probe_arg args[]; |
289 | }; | 289 | }; |
290 | 290 | ||
291 | struct event_file_link { | ||
292 | struct ftrace_event_file *file; | ||
293 | struct list_head list; | ||
294 | }; | ||
295 | |||
291 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) | 296 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) |
292 | { | 297 | { |
293 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | 298 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); |
@@ -316,6 +321,18 @@ static inline int is_good_name(const char *name) | |||
316 | return 1; | 321 | return 1; |
317 | } | 322 | } |
318 | 323 | ||
324 | static inline struct event_file_link * | ||
325 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | ||
326 | { | ||
327 | struct event_file_link *link; | ||
328 | |||
329 | list_for_each_entry(link, &tp->files, list) | ||
330 | if (link->file == file) | ||
331 | return link; | ||
332 | |||
333 | return NULL; | ||
334 | } | ||
335 | |||
319 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | 336 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, |
320 | struct probe_arg *parg, bool is_return, bool is_kprobe); | 337 | struct probe_arg *parg, bool is_return, bool is_kprobe); |
321 | 338 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 6e32635e5e57..e14da5e97a69 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -179,8 +179,10 @@ static void wakeup_function_set(int set) | |||
179 | unregister_wakeup_function(is_graph()); | 179 | unregister_wakeup_function(is_graph()); |
180 | } | 180 | } |
181 | 181 | ||
182 | static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) | 182 | static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) |
183 | { | 183 | { |
184 | struct tracer *tracer = tr->current_trace; | ||
185 | |||
184 | if (mask & TRACE_ITER_FUNCTION) | 186 | if (mask & TRACE_ITER_FUNCTION) |
185 | wakeup_function_set(set); | 187 | wakeup_function_set(set); |
186 | 188 | ||
@@ -209,7 +211,8 @@ static void stop_func_tracer(int graph) | |||
209 | } | 211 | } |
210 | 212 | ||
211 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 213 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
212 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | 214 | static int |
215 | wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
213 | { | 216 | { |
214 | 217 | ||
215 | if (!(bit & TRACE_DISPLAY_GRAPH)) | 218 | if (!(bit & TRACE_DISPLAY_GRAPH)) |
@@ -311,7 +314,8 @@ __trace_function(struct trace_array *tr, | |||
311 | #else | 314 | #else |
312 | #define __trace_function trace_function | 315 | #define __trace_function trace_function |
313 | 316 | ||
314 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | 317 | static int |
318 | wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
315 | { | 319 | { |
316 | return -EINVAL; | 320 | return -EINVAL; |
317 | } | 321 | } |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index e6be585cf06a..21b320e5d163 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <linux/magic.h> | ||
16 | 17 | ||
17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
18 | 19 | ||
@@ -144,6 +145,8 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
144 | i++; | 145 | i++; |
145 | } | 146 | } |
146 | 147 | ||
148 | BUG_ON(current != &init_task && | ||
149 | *(end_of_stack(current)) != STACK_END_MAGIC); | ||
147 | out: | 150 | out: |
148 | arch_spin_unlock(&max_stack_lock); | 151 | arch_spin_unlock(&max_stack_lock); |
149 | local_irq_restore(flags); | 152 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 79e52d93860b..e4473367e7a4 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -260,6 +260,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
260 | goto error; | 260 | goto error; |
261 | 261 | ||
262 | INIT_LIST_HEAD(&tu->list); | 262 | INIT_LIST_HEAD(&tu->list); |
263 | INIT_LIST_HEAD(&tu->tp.files); | ||
263 | tu->consumer.handler = uprobe_dispatcher; | 264 | tu->consumer.handler = uprobe_dispatcher; |
264 | if (is_ret) | 265 | if (is_ret) |
265 | tu->consumer.ret_handler = uretprobe_dispatcher; | 266 | tu->consumer.ret_handler = uretprobe_dispatcher; |
@@ -758,31 +759,32 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | |||
758 | mutex_unlock(&ucb->mutex); | 759 | mutex_unlock(&ucb->mutex); |
759 | } | 760 | } |
760 | 761 | ||
761 | static void uprobe_trace_print(struct trace_uprobe *tu, | 762 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
762 | unsigned long func, struct pt_regs *regs) | 763 | unsigned long func, struct pt_regs *regs, |
764 | struct uprobe_cpu_buffer *ucb, int dsize, | ||
765 | struct ftrace_event_file *ftrace_file) | ||
763 | { | 766 | { |
764 | struct uprobe_trace_entry_head *entry; | 767 | struct uprobe_trace_entry_head *entry; |
765 | struct ring_buffer_event *event; | 768 | struct ring_buffer_event *event; |
766 | struct ring_buffer *buffer; | 769 | struct ring_buffer *buffer; |
767 | struct uprobe_cpu_buffer *ucb; | ||
768 | void *data; | 770 | void *data; |
769 | int size, dsize, esize; | 771 | int size, esize; |
770 | struct ftrace_event_call *call = &tu->tp.call; | 772 | struct ftrace_event_call *call = &tu->tp.call; |
771 | 773 | ||
772 | dsize = __get_data_size(&tu->tp, regs); | 774 | WARN_ON(call != ftrace_file->event_call); |
773 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
774 | 775 | ||
775 | if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) | 776 | if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) |
776 | return; | 777 | return; |
777 | 778 | ||
778 | ucb = uprobe_buffer_get(); | 779 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
779 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | 780 | return; |
780 | 781 | ||
782 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
781 | size = esize + tu->tp.size + dsize; | 783 | size = esize + tu->tp.size + dsize; |
782 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 784 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
783 | size, 0, 0); | 785 | call->event.type, size, 0, 0); |
784 | if (!event) | 786 | if (!event) |
785 | goto out; | 787 | return; |
786 | 788 | ||
787 | entry = ring_buffer_event_data(event); | 789 | entry = ring_buffer_event_data(event); |
788 | if (is_ret_probe(tu)) { | 790 | if (is_ret_probe(tu)) { |
@@ -796,25 +798,36 @@ static void uprobe_trace_print(struct trace_uprobe *tu, | |||
796 | 798 | ||
797 | memcpy(data, ucb->buf, tu->tp.size + dsize); | 799 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
798 | 800 | ||
799 | if (!call_filter_check_discard(call, entry, buffer, event)) | 801 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); |
800 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
801 | |||
802 | out: | ||
803 | uprobe_buffer_put(ucb); | ||
804 | } | 802 | } |
805 | 803 | ||
806 | /* uprobe handler */ | 804 | /* uprobe handler */ |
807 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) | 805 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, |
806 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
808 | { | 807 | { |
809 | if (!is_ret_probe(tu)) | 808 | struct event_file_link *link; |
810 | uprobe_trace_print(tu, 0, regs); | 809 | |
810 | if (is_ret_probe(tu)) | ||
811 | return 0; | ||
812 | |||
813 | rcu_read_lock(); | ||
814 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
815 | __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); | ||
816 | rcu_read_unlock(); | ||
817 | |||
811 | return 0; | 818 | return 0; |
812 | } | 819 | } |
813 | 820 | ||
814 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, | 821 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, |
815 | struct pt_regs *regs) | 822 | struct pt_regs *regs, |
823 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
816 | { | 824 | { |
817 | uprobe_trace_print(tu, func, regs); | 825 | struct event_file_link *link; |
826 | |||
827 | rcu_read_lock(); | ||
828 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
829 | __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); | ||
830 | rcu_read_unlock(); | ||
818 | } | 831 | } |
819 | 832 | ||
820 | /* Event entry printers */ | 833 | /* Event entry printers */ |
@@ -861,12 +874,24 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, | |||
861 | struct mm_struct *mm); | 874 | struct mm_struct *mm); |
862 | 875 | ||
863 | static int | 876 | static int |
864 | probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | 877 | probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, |
878 | filter_func_t filter) | ||
865 | { | 879 | { |
866 | int ret = 0; | 880 | bool enabled = trace_probe_is_enabled(&tu->tp); |
881 | struct event_file_link *link = NULL; | ||
882 | int ret; | ||
883 | |||
884 | if (file) { | ||
885 | link = kmalloc(sizeof(*link), GFP_KERNEL); | ||
886 | if (!link) | ||
887 | return -ENOMEM; | ||
867 | 888 | ||
868 | if (trace_probe_is_enabled(&tu->tp)) | 889 | link->file = file; |
869 | return -EINTR; | 890 | list_add_tail_rcu(&link->list, &tu->tp.files); |
891 | |||
892 | tu->tp.flags |= TP_FLAG_TRACE; | ||
893 | } else | ||
894 | tu->tp.flags |= TP_FLAG_PROFILE; | ||
870 | 895 | ||
871 | ret = uprobe_buffer_enable(); | 896 | ret = uprobe_buffer_enable(); |
872 | if (ret < 0) | 897 | if (ret < 0) |
@@ -874,24 +899,49 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | |||
874 | 899 | ||
875 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 900 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
876 | 901 | ||
877 | tu->tp.flags |= flag; | 902 | if (enabled) |
903 | return 0; | ||
904 | |||
878 | tu->consumer.filter = filter; | 905 | tu->consumer.filter = filter; |
879 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 906 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
880 | if (ret) | 907 | if (ret) { |
881 | tu->tp.flags &= ~flag; | 908 | if (file) { |
909 | list_del(&link->list); | ||
910 | kfree(link); | ||
911 | tu->tp.flags &= ~TP_FLAG_TRACE; | ||
912 | } else | ||
913 | tu->tp.flags &= ~TP_FLAG_PROFILE; | ||
914 | } | ||
882 | 915 | ||
883 | return ret; | 916 | return ret; |
884 | } | 917 | } |
885 | 918 | ||
886 | static void probe_event_disable(struct trace_uprobe *tu, int flag) | 919 | static void |
920 | probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) | ||
887 | { | 921 | { |
888 | if (!trace_probe_is_enabled(&tu->tp)) | 922 | if (!trace_probe_is_enabled(&tu->tp)) |
889 | return; | 923 | return; |
890 | 924 | ||
925 | if (file) { | ||
926 | struct event_file_link *link; | ||
927 | |||
928 | link = find_event_file_link(&tu->tp, file); | ||
929 | if (!link) | ||
930 | return; | ||
931 | |||
932 | list_del_rcu(&link->list); | ||
933 | /* synchronize with u{,ret}probe_trace_func */ | ||
934 | synchronize_sched(); | ||
935 | kfree(link); | ||
936 | |||
937 | if (!list_empty(&tu->tp.files)) | ||
938 | return; | ||
939 | } | ||
940 | |||
891 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 941 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
892 | 942 | ||
893 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | 943 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
894 | tu->tp.flags &= ~flag; | 944 | tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; |
895 | 945 | ||
896 | uprobe_buffer_disable(); | 946 | uprobe_buffer_disable(); |
897 | } | 947 | } |
@@ -1014,31 +1064,24 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, | |||
1014 | return ret; | 1064 | return ret; |
1015 | } | 1065 | } |
1016 | 1066 | ||
1017 | static void uprobe_perf_print(struct trace_uprobe *tu, | 1067 | static void __uprobe_perf_func(struct trace_uprobe *tu, |
1018 | unsigned long func, struct pt_regs *regs) | 1068 | unsigned long func, struct pt_regs *regs, |
1069 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
1019 | { | 1070 | { |
1020 | struct ftrace_event_call *call = &tu->tp.call; | 1071 | struct ftrace_event_call *call = &tu->tp.call; |
1021 | struct uprobe_trace_entry_head *entry; | 1072 | struct uprobe_trace_entry_head *entry; |
1022 | struct hlist_head *head; | 1073 | struct hlist_head *head; |
1023 | struct uprobe_cpu_buffer *ucb; | ||
1024 | void *data; | 1074 | void *data; |
1025 | int size, dsize, esize; | 1075 | int size, esize; |
1026 | int rctx; | 1076 | int rctx; |
1027 | 1077 | ||
1028 | dsize = __get_data_size(&tu->tp, regs); | ||
1029 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 1078 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
1030 | 1079 | ||
1031 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
1032 | return; | ||
1033 | |||
1034 | size = esize + tu->tp.size + dsize; | 1080 | size = esize + tu->tp.size + dsize; |
1035 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 1081 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
1036 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | 1082 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) |
1037 | return; | 1083 | return; |
1038 | 1084 | ||
1039 | ucb = uprobe_buffer_get(); | ||
1040 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
1041 | |||
1042 | preempt_disable(); | 1085 | preempt_disable(); |
1043 | head = this_cpu_ptr(call->perf_events); | 1086 | head = this_cpu_ptr(call->perf_events); |
1044 | if (hlist_empty(head)) | 1087 | if (hlist_empty(head)) |
@@ -1068,46 +1111,49 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
1068 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1111 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
1069 | out: | 1112 | out: |
1070 | preempt_enable(); | 1113 | preempt_enable(); |
1071 | uprobe_buffer_put(ucb); | ||
1072 | } | 1114 | } |
1073 | 1115 | ||
1074 | /* uprobe profile handler */ | 1116 | /* uprobe profile handler */ |
1075 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) | 1117 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, |
1118 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
1076 | { | 1119 | { |
1077 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) | 1120 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) |
1078 | return UPROBE_HANDLER_REMOVE; | 1121 | return UPROBE_HANDLER_REMOVE; |
1079 | 1122 | ||
1080 | if (!is_ret_probe(tu)) | 1123 | if (!is_ret_probe(tu)) |
1081 | uprobe_perf_print(tu, 0, regs); | 1124 | __uprobe_perf_func(tu, 0, regs, ucb, dsize); |
1082 | return 0; | 1125 | return 0; |
1083 | } | 1126 | } |
1084 | 1127 | ||
1085 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | 1128 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, |
1086 | struct pt_regs *regs) | 1129 | struct pt_regs *regs, |
1130 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
1087 | { | 1131 | { |
1088 | uprobe_perf_print(tu, func, regs); | 1132 | __uprobe_perf_func(tu, func, regs, ucb, dsize); |
1089 | } | 1133 | } |
1090 | #endif /* CONFIG_PERF_EVENTS */ | 1134 | #endif /* CONFIG_PERF_EVENTS */ |
1091 | 1135 | ||
1092 | static | 1136 | static int |
1093 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) | 1137 | trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, |
1138 | void *data) | ||
1094 | { | 1139 | { |
1095 | struct trace_uprobe *tu = event->data; | 1140 | struct trace_uprobe *tu = event->data; |
1141 | struct ftrace_event_file *file = data; | ||
1096 | 1142 | ||
1097 | switch (type) { | 1143 | switch (type) { |
1098 | case TRACE_REG_REGISTER: | 1144 | case TRACE_REG_REGISTER: |
1099 | return probe_event_enable(tu, TP_FLAG_TRACE, NULL); | 1145 | return probe_event_enable(tu, file, NULL); |
1100 | 1146 | ||
1101 | case TRACE_REG_UNREGISTER: | 1147 | case TRACE_REG_UNREGISTER: |
1102 | probe_event_disable(tu, TP_FLAG_TRACE); | 1148 | probe_event_disable(tu, file); |
1103 | return 0; | 1149 | return 0; |
1104 | 1150 | ||
1105 | #ifdef CONFIG_PERF_EVENTS | 1151 | #ifdef CONFIG_PERF_EVENTS |
1106 | case TRACE_REG_PERF_REGISTER: | 1152 | case TRACE_REG_PERF_REGISTER: |
1107 | return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); | 1153 | return probe_event_enable(tu, NULL, uprobe_perf_filter); |
1108 | 1154 | ||
1109 | case TRACE_REG_PERF_UNREGISTER: | 1155 | case TRACE_REG_PERF_UNREGISTER: |
1110 | probe_event_disable(tu, TP_FLAG_PROFILE); | 1156 | probe_event_disable(tu, NULL); |
1111 | return 0; | 1157 | return 0; |
1112 | 1158 | ||
1113 | case TRACE_REG_PERF_OPEN: | 1159 | case TRACE_REG_PERF_OPEN: |
@@ -1127,8 +1173,11 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
1127 | { | 1173 | { |
1128 | struct trace_uprobe *tu; | 1174 | struct trace_uprobe *tu; |
1129 | struct uprobe_dispatch_data udd; | 1175 | struct uprobe_dispatch_data udd; |
1176 | struct uprobe_cpu_buffer *ucb; | ||
1177 | int dsize, esize; | ||
1130 | int ret = 0; | 1178 | int ret = 0; |
1131 | 1179 | ||
1180 | |||
1132 | tu = container_of(con, struct trace_uprobe, consumer); | 1181 | tu = container_of(con, struct trace_uprobe, consumer); |
1133 | tu->nhit++; | 1182 | tu->nhit++; |
1134 | 1183 | ||
@@ -1137,13 +1186,29 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
1137 | 1186 | ||
1138 | current->utask->vaddr = (unsigned long) &udd; | 1187 | current->utask->vaddr = (unsigned long) &udd; |
1139 | 1188 | ||
1189 | #ifdef CONFIG_PERF_EVENTS | ||
1190 | if ((tu->tp.flags & TP_FLAG_TRACE) == 0 && | ||
1191 | !uprobe_perf_filter(&tu->consumer, 0, current->mm)) | ||
1192 | return UPROBE_HANDLER_REMOVE; | ||
1193 | #endif | ||
1194 | |||
1195 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
1196 | return 0; | ||
1197 | |||
1198 | dsize = __get_data_size(&tu->tp, regs); | ||
1199 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
1200 | |||
1201 | ucb = uprobe_buffer_get(); | ||
1202 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
1203 | |||
1140 | if (tu->tp.flags & TP_FLAG_TRACE) | 1204 | if (tu->tp.flags & TP_FLAG_TRACE) |
1141 | ret |= uprobe_trace_func(tu, regs); | 1205 | ret |= uprobe_trace_func(tu, regs, ucb, dsize); |
1142 | 1206 | ||
1143 | #ifdef CONFIG_PERF_EVENTS | 1207 | #ifdef CONFIG_PERF_EVENTS |
1144 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1208 | if (tu->tp.flags & TP_FLAG_PROFILE) |
1145 | ret |= uprobe_perf_func(tu, regs); | 1209 | ret |= uprobe_perf_func(tu, regs, ucb, dsize); |
1146 | #endif | 1210 | #endif |
1211 | uprobe_buffer_put(ucb); | ||
1147 | return ret; | 1212 | return ret; |
1148 | } | 1213 | } |
1149 | 1214 | ||
@@ -1152,6 +1217,8 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
1152 | { | 1217 | { |
1153 | struct trace_uprobe *tu; | 1218 | struct trace_uprobe *tu; |
1154 | struct uprobe_dispatch_data udd; | 1219 | struct uprobe_dispatch_data udd; |
1220 | struct uprobe_cpu_buffer *ucb; | ||
1221 | int dsize, esize; | ||
1155 | 1222 | ||
1156 | tu = container_of(con, struct trace_uprobe, consumer); | 1223 | tu = container_of(con, struct trace_uprobe, consumer); |
1157 | 1224 | ||
@@ -1160,13 +1227,23 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
1160 | 1227 | ||
1161 | current->utask->vaddr = (unsigned long) &udd; | 1228 | current->utask->vaddr = (unsigned long) &udd; |
1162 | 1229 | ||
1230 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
1231 | return 0; | ||
1232 | |||
1233 | dsize = __get_data_size(&tu->tp, regs); | ||
1234 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
1235 | |||
1236 | ucb = uprobe_buffer_get(); | ||
1237 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
1238 | |||
1163 | if (tu->tp.flags & TP_FLAG_TRACE) | 1239 | if (tu->tp.flags & TP_FLAG_TRACE) |
1164 | uretprobe_trace_func(tu, func, regs); | 1240 | uretprobe_trace_func(tu, func, regs, ucb, dsize); |
1165 | 1241 | ||
1166 | #ifdef CONFIG_PERF_EVENTS | 1242 | #ifdef CONFIG_PERF_EVENTS |
1167 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1243 | if (tu->tp.flags & TP_FLAG_PROFILE) |
1168 | uretprobe_perf_func(tu, func, regs); | 1244 | uretprobe_perf_func(tu, func, regs, ucb, dsize); |
1169 | #endif | 1245 | #endif |
1246 | uprobe_buffer_put(ucb); | ||
1170 | return 0; | 1247 | return 0; |
1171 | } | 1248 | } |
1172 | 1249 | ||