aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c60
1 files changed, 38 insertions, 22 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 92376aeac4a7..08aca65d709a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -68,8 +68,12 @@
68#define INIT_OPS_HASH(opsname) \ 68#define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \ 69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71#define ASSIGN_OPS_HASH(opsname, val) \
72 .func_hash = val, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71#else 74#else
72#define INIT_OPS_HASH(opsname) 75#define INIT_OPS_HASH(opsname)
76#define ASSIGN_OPS_HASH(opsname, val)
73#endif 77#endif
74 78
75static struct ftrace_ops ftrace_list_end __read_mostly = { 79static struct ftrace_ops ftrace_list_end __read_mostly = {
@@ -4663,7 +4667,6 @@ void __init ftrace_init(void)
4663static struct ftrace_ops global_ops = { 4667static struct ftrace_ops global_ops = {
4664 .func = ftrace_stub, 4668 .func = ftrace_stub,
4665 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4669 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4666 INIT_OPS_HASH(global_ops)
4667}; 4670};
4668 4671
4669static int __init ftrace_nodyn_init(void) 4672static int __init ftrace_nodyn_init(void)
@@ -5197,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
5197 5200
5198#ifdef CONFIG_FUNCTION_GRAPH_TRACER 5201#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5199 5202
5203static struct ftrace_ops graph_ops = {
5204 .func = ftrace_stub,
5205 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5206 FTRACE_OPS_FL_INITIALIZED |
5207 FTRACE_OPS_FL_STUB,
5208#ifdef FTRACE_GRAPH_TRAMP_ADDR
5209 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5210#endif
5211 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5212};
5213
5200static int ftrace_graph_active; 5214static int ftrace_graph_active;
5201 5215
5202int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 5216int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -5359,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5359 */ 5373 */
5360static void update_function_graph_func(void) 5374static void update_function_graph_func(void)
5361{ 5375{
5362 if (ftrace_ops_list == &ftrace_list_end || 5376 struct ftrace_ops *op;
5363 (ftrace_ops_list == &global_ops && 5377 bool do_test = false;
5364 global_ops.next == &ftrace_list_end)) 5378
5365 ftrace_graph_entry = __ftrace_graph_entry; 5379 /*
5366 else 5380 * The graph and global ops share the same set of functions
5381 * to test. If any other ops is on the list, then
5382 * the graph tracing needs to test if its the function
5383 * it should call.
5384 */
5385 do_for_each_ftrace_op(op, ftrace_ops_list) {
5386 if (op != &global_ops && op != &graph_ops &&
5387 op != &ftrace_list_end) {
5388 do_test = true;
5389 /* in double loop, break out with goto */
5390 goto out;
5391 }
5392 } while_for_each_ftrace_op(op);
5393 out:
5394 if (do_test)
5367 ftrace_graph_entry = ftrace_graph_entry_test; 5395 ftrace_graph_entry = ftrace_graph_entry_test;
5396 else
5397 ftrace_graph_entry = __ftrace_graph_entry;
5368} 5398}
5369 5399
5370static struct notifier_block ftrace_suspend_notifier = { 5400static struct notifier_block ftrace_suspend_notifier = {
@@ -5405,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5405 ftrace_graph_entry = ftrace_graph_entry_test; 5435 ftrace_graph_entry = ftrace_graph_entry_test;
5406 update_function_graph_func(); 5436 update_function_graph_func();
5407 5437
5408 /* Function graph doesn't use the .func field of global_ops */ 5438 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5409 global_ops.flags |= FTRACE_OPS_FL_STUB;
5410
5411#ifdef CONFIG_DYNAMIC_FTRACE
5412 /* Optimize function graph calling (if implemented by arch) */
5413 if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5414 global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
5415#endif
5416
5417 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
5418 5439
5419out: 5440out:
5420 mutex_unlock(&ftrace_lock); 5441 mutex_unlock(&ftrace_lock);
@@ -5432,12 +5453,7 @@ void unregister_ftrace_graph(void)
5432 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5453 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5433 ftrace_graph_entry = ftrace_graph_entry_stub; 5454 ftrace_graph_entry = ftrace_graph_entry_stub;
5434 __ftrace_graph_entry = ftrace_graph_entry_stub; 5455 __ftrace_graph_entry = ftrace_graph_entry_stub;
5435 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5456 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5436 global_ops.flags &= ~FTRACE_OPS_FL_STUB;
5437#ifdef CONFIG_DYNAMIC_FTRACE
5438 if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5439 global_ops.trampoline = 0;
5440#endif
5441 unregister_pm_notifier(&ftrace_suspend_notifier); 5457 unregister_pm_notifier(&ftrace_suspend_notifier);
5442 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5458 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5443 5459