aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c215
1 files changed, 154 insertions, 61 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 72a0f81dc5a8..cd7f76d1eb86 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
85 85
86/* Current function tracing op */ 86/* Current function tracing op */
87struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 87struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88/* What to set function_trace_op to */
89static struct ftrace_ops *set_function_trace_op;
88 90
89/* List for set_ftrace_pid's pids. */ 91/* List for set_ftrace_pid's pids. */
90LIST_HEAD(ftrace_pids); 92LIST_HEAD(ftrace_pids);
@@ -278,6 +280,29 @@ static void update_global_ops(void)
278 global_ops.func = func; 280 global_ops.func = func;
279} 281}
280 282
283static void ftrace_sync(struct work_struct *work)
284{
285 /*
286 * This function is just a stub to implement a hard force
287 * of synchronize_sched(). This requires synchronizing
288 * tasks even in userspace and idle.
289 *
290 * Yes, function tracing is rude.
291 */
292}
293
294static void ftrace_sync_ipi(void *data)
295{
296 /* Probably not needed, but do it anyway */
297 smp_rmb();
298}
299
300#ifdef CONFIG_FUNCTION_GRAPH_TRACER
301static void update_function_graph_func(void);
302#else
303static inline void update_function_graph_func(void) { }
304#endif
305
281static void update_ftrace_function(void) 306static void update_ftrace_function(void)
282{ 307{
283 ftrace_func_t func; 308 ftrace_func_t func;
@@ -296,16 +321,61 @@ static void update_ftrace_function(void)
296 !FTRACE_FORCE_LIST_FUNC)) { 321 !FTRACE_FORCE_LIST_FUNC)) {
297 /* Set the ftrace_ops that the arch callback uses */ 322 /* Set the ftrace_ops that the arch callback uses */
298 if (ftrace_ops_list == &global_ops) 323 if (ftrace_ops_list == &global_ops)
299 function_trace_op = ftrace_global_list; 324 set_function_trace_op = ftrace_global_list;
300 else 325 else
301 function_trace_op = ftrace_ops_list; 326 set_function_trace_op = ftrace_ops_list;
302 func = ftrace_ops_list->func; 327 func = ftrace_ops_list->func;
303 } else { 328 } else {
304 /* Just use the default ftrace_ops */ 329 /* Just use the default ftrace_ops */
305 function_trace_op = &ftrace_list_end; 330 set_function_trace_op = &ftrace_list_end;
306 func = ftrace_ops_list_func; 331 func = ftrace_ops_list_func;
307 } 332 }
308 333
334 /* If there's no change, then do nothing more here */
335 if (ftrace_trace_function == func)
336 return;
337
338 update_function_graph_func();
339
340 /*
341 * If we are using the list function, it doesn't care
342 * about the function_trace_ops.
343 */
344 if (func == ftrace_ops_list_func) {
345 ftrace_trace_function = func;
346 /*
347 * Don't even bother setting function_trace_ops,
348 * it would be racy to do so anyway.
349 */
350 return;
351 }
352
353#ifndef CONFIG_DYNAMIC_FTRACE
354 /*
355 * For static tracing, we need to be a bit more careful.
356 * The function change takes affect immediately. Thus,
357 * we need to coorditate the setting of the function_trace_ops
358 * with the setting of the ftrace_trace_function.
359 *
360 * Set the function to the list ops, which will call the
361 * function we want, albeit indirectly, but it handles the
362 * ftrace_ops and doesn't depend on function_trace_op.
363 */
364 ftrace_trace_function = ftrace_ops_list_func;
365 /*
366 * Make sure all CPUs see this. Yes this is slow, but static
367 * tracing is slow and nasty to have enabled.
368 */
369 schedule_on_each_cpu(ftrace_sync);
370 /* Now all cpus are using the list ops. */
371 function_trace_op = set_function_trace_op;
372 /* Make sure the function_trace_op is visible on all CPUs */
373 smp_wmb();
374 /* Nasty way to force a rmb on all cpus */
375 smp_call_function(ftrace_sync_ipi, NULL, 1);
376 /* OK, we are all set to update the ftrace_trace_function now! */
377#endif /* !CONFIG_DYNAMIC_FTRACE */
378
309 ftrace_trace_function = func; 379 ftrace_trace_function = func;
310} 380}
311 381
@@ -410,17 +480,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
410 return 0; 480 return 0;
411} 481}
412 482
413static void ftrace_sync(struct work_struct *work)
414{
415 /*
416 * This function is just a stub to implement a hard force
417 * of synchronize_sched(). This requires synchronizing
418 * tasks even in userspace and idle.
419 *
420 * Yes, function tracing is rude.
421 */
422}
423
424static int __unregister_ftrace_function(struct ftrace_ops *ops) 483static int __unregister_ftrace_function(struct ftrace_ops *ops)
425{ 484{
426 int ret; 485 int ret;
@@ -439,20 +498,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
439 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { 498 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
440 ret = remove_ftrace_list_ops(&ftrace_control_list, 499 ret = remove_ftrace_list_ops(&ftrace_control_list,
441 &control_ops, ops); 500 &control_ops, ops);
442 if (!ret) {
443 /*
444 * The ftrace_ops is now removed from the list,
445 * so there'll be no new users. We must ensure
446 * all current users are done before we free
447 * the control data.
448 * Note synchronize_sched() is not enough, as we
449 * use preempt_disable() to do RCU, but the function
450 * tracer can be called where RCU is not active
451 * (before user_exit()).
452 */
453 schedule_on_each_cpu(ftrace_sync);
454 control_ops_free(ops);
455 }
456 } else 501 } else
457 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 502 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
458 503
@@ -462,17 +507,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
462 if (ftrace_enabled) 507 if (ftrace_enabled)
463 update_ftrace_function(); 508 update_ftrace_function();
464 509
465 /*
466 * Dynamic ops may be freed, we must make sure that all
467 * callers are done before leaving this function.
468 *
469 * Again, normal synchronize_sched() is not good enough.
470 * We need to do a hard force of sched synchronization.
471 */
472 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
473 schedule_on_each_cpu(ftrace_sync);
474
475
476 return 0; 510 return 0;
477} 511}
478 512
@@ -1082,19 +1116,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1082 1116
1083static struct pid * const ftrace_swapper_pid = &init_struct_pid; 1117static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1084 1118
1085loff_t
1086ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1087{
1088 loff_t ret;
1089
1090 if (file->f_mode & FMODE_READ)
1091 ret = seq_lseek(file, offset, whence);
1092 else
1093 file->f_pos = ret = 1;
1094
1095 return ret;
1096}
1097
1098#ifdef CONFIG_DYNAMIC_FTRACE 1119#ifdef CONFIG_DYNAMIC_FTRACE
1099 1120
1100#ifndef CONFIG_FTRACE_MCOUNT_RECORD 1121#ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -1992,8 +2013,14 @@ void ftrace_modify_all_code(int command)
1992 else if (command & FTRACE_DISABLE_CALLS) 2013 else if (command & FTRACE_DISABLE_CALLS)
1993 ftrace_replace_code(0); 2014 ftrace_replace_code(0);
1994 2015
1995 if (update && ftrace_trace_function != ftrace_ops_list_func) 2016 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2017 function_trace_op = set_function_trace_op;
2018 smp_wmb();
2019 /* If irqs are disabled, we are in stop machine */
2020 if (!irqs_disabled())
2021 smp_call_function(ftrace_sync_ipi, NULL, 1);
1996 ftrace_update_ftrace_func(ftrace_trace_function); 2022 ftrace_update_ftrace_func(ftrace_trace_function);
2023 }
1997 2024
1998 if (command & FTRACE_START_FUNC_RET) 2025 if (command & FTRACE_START_FUNC_RET)
1999 ftrace_enable_ftrace_graph_caller(); 2026 ftrace_enable_ftrace_graph_caller();
@@ -2156,10 +2183,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2156 command |= FTRACE_UPDATE_TRACE_FUNC; 2183 command |= FTRACE_UPDATE_TRACE_FUNC;
2157 } 2184 }
2158 2185
2159 if (!command || !ftrace_enabled) 2186 if (!command || !ftrace_enabled) {
2187 /*
2188 * If these are control ops, they still need their
2189 * per_cpu field freed. Since, function tracing is
2190 * not currently active, we can just free them
2191 * without synchronizing all CPUs.
2192 */
2193 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2194 control_ops_free(ops);
2160 return 0; 2195 return 0;
2196 }
2161 2197
2162 ftrace_run_update_code(command); 2198 ftrace_run_update_code(command);
2199
2200 /*
2201 * Dynamic ops may be freed, we must make sure that all
2202 * callers are done before leaving this function.
2203 * The same goes for freeing the per_cpu data of the control
2204 * ops.
2205 *
2206 * Again, normal synchronize_sched() is not good enough.
2207 * We need to do a hard force of sched synchronization.
2208 * This is because we use preempt_disable() to do RCU, but
2209 * the function tracers can be called where RCU is not watching
2210 * (like before user_exit()). We can not rely on the RCU
2211 * infrastructure to do the synchronization, thus we must do it
2212 * ourselves.
2213 */
2214 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2215 schedule_on_each_cpu(ftrace_sync);
2216
2217 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2218 control_ops_free(ops);
2219 }
2220
2163 return 0; 2221 return 0;
2164} 2222}
2165 2223
@@ -2739,7 +2797,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
2739 * routine, you can use ftrace_filter_write() for the write 2797 * routine, you can use ftrace_filter_write() for the write
2740 * routine if @flag has FTRACE_ITER_FILTER set, or 2798 * routine if @flag has FTRACE_ITER_FILTER set, or
2741 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 2799 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2742 * ftrace_filter_lseek() should be used as the lseek routine, and 2800 * tracing_lseek() should be used as the lseek routine, and
2743 * release must call ftrace_regex_release(). 2801 * release must call ftrace_regex_release().
2744 */ 2802 */
2745int 2803int
@@ -3767,7 +3825,7 @@ static const struct file_operations ftrace_filter_fops = {
3767 .open = ftrace_filter_open, 3825 .open = ftrace_filter_open,
3768 .read = seq_read, 3826 .read = seq_read,
3769 .write = ftrace_filter_write, 3827 .write = ftrace_filter_write,
3770 .llseek = ftrace_filter_lseek, 3828 .llseek = tracing_lseek,
3771 .release = ftrace_regex_release, 3829 .release = ftrace_regex_release,
3772}; 3830};
3773 3831
@@ -3775,7 +3833,7 @@ static const struct file_operations ftrace_notrace_fops = {
3775 .open = ftrace_notrace_open, 3833 .open = ftrace_notrace_open,
3776 .read = seq_read, 3834 .read = seq_read,
3777 .write = ftrace_notrace_write, 3835 .write = ftrace_notrace_write,
3778 .llseek = ftrace_filter_lseek, 3836 .llseek = tracing_lseek,
3779 .release = ftrace_regex_release, 3837 .release = ftrace_regex_release,
3780}; 3838};
3781 3839
@@ -4038,7 +4096,7 @@ static const struct file_operations ftrace_graph_fops = {
4038 .open = ftrace_graph_open, 4096 .open = ftrace_graph_open,
4039 .read = seq_read, 4097 .read = seq_read,
4040 .write = ftrace_graph_write, 4098 .write = ftrace_graph_write,
4041 .llseek = ftrace_filter_lseek, 4099 .llseek = tracing_lseek,
4042 .release = ftrace_graph_release, 4100 .release = ftrace_graph_release,
4043}; 4101};
4044 4102
@@ -4046,7 +4104,7 @@ static const struct file_operations ftrace_graph_notrace_fops = {
4046 .open = ftrace_graph_notrace_open, 4104 .open = ftrace_graph_notrace_open,
4047 .read = seq_read, 4105 .read = seq_read,
4048 .write = ftrace_graph_write, 4106 .write = ftrace_graph_write,
4049 .llseek = ftrace_filter_lseek, 4107 .llseek = tracing_lseek,
4050 .release = ftrace_graph_release, 4108 .release = ftrace_graph_release,
4051}; 4109};
4052#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4110#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -4719,7 +4777,7 @@ static const struct file_operations ftrace_pid_fops = {
4719 .open = ftrace_pid_open, 4777 .open = ftrace_pid_open,
4720 .write = ftrace_pid_write, 4778 .write = ftrace_pid_write,
4721 .read = seq_read, 4779 .read = seq_read,
4722 .llseek = ftrace_filter_lseek, 4780 .llseek = tracing_lseek,
4723 .release = ftrace_pid_release, 4781 .release = ftrace_pid_release,
4724}; 4782};
4725 4783
@@ -4862,6 +4920,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4862trace_func_graph_ret_t ftrace_graph_return = 4920trace_func_graph_ret_t ftrace_graph_return =
4863 (trace_func_graph_ret_t)ftrace_stub; 4921 (trace_func_graph_ret_t)ftrace_stub;
4864trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 4922trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4923static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
4865 4924
4866/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 4925/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4867static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 4926static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -5003,6 +5062,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
5003 FTRACE_OPS_FL_RECURSION_SAFE, 5062 FTRACE_OPS_FL_RECURSION_SAFE,
5004}; 5063};
5005 5064
5065static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5066{
5067 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5068 return 0;
5069 return __ftrace_graph_entry(trace);
5070}
5071
5072/*
5073 * The function graph tracer should only trace the functions defined
5074 * by set_ftrace_filter and set_ftrace_notrace. If another function
5075 * tracer ops is registered, the graph tracer requires testing the
5076 * function against the global ops, and not just trace any function
5077 * that any ftrace_ops registered.
5078 */
5079static void update_function_graph_func(void)
5080{
5081 if (ftrace_ops_list == &ftrace_list_end ||
5082 (ftrace_ops_list == &global_ops &&
5083 global_ops.next == &ftrace_list_end))
5084 ftrace_graph_entry = __ftrace_graph_entry;
5085 else
5086 ftrace_graph_entry = ftrace_graph_entry_test;
5087}
5088
5006int register_ftrace_graph(trace_func_graph_ret_t retfunc, 5089int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5007 trace_func_graph_ent_t entryfunc) 5090 trace_func_graph_ent_t entryfunc)
5008{ 5091{
@@ -5027,7 +5110,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5027 } 5110 }
5028 5111
5029 ftrace_graph_return = retfunc; 5112 ftrace_graph_return = retfunc;
5030 ftrace_graph_entry = entryfunc; 5113
5114 /*
5115 * Update the indirect function to the entryfunc, and the
5116 * function that gets called to the entry_test first. Then
5117 * call the update fgraph entry function to determine if
5118 * the entryfunc should be called directly or not.
5119 */
5120 __ftrace_graph_entry = entryfunc;
5121 ftrace_graph_entry = ftrace_graph_entry_test;
5122 update_function_graph_func();
5031 5123
5032 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); 5124 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5033 5125
@@ -5046,6 +5138,7 @@ void unregister_ftrace_graph(void)
5046 ftrace_graph_active--; 5138 ftrace_graph_active--;
5047 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5139 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5048 ftrace_graph_entry = ftrace_graph_entry_stub; 5140 ftrace_graph_entry = ftrace_graph_entry_stub;
5141 __ftrace_graph_entry = ftrace_graph_entry_stub;
5049 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); 5142 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5050 unregister_pm_notifier(&ftrace_suspend_notifier); 5143 unregister_pm_notifier(&ftrace_suspend_notifier);
5051 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5144 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);