diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 52 |
1 files changed, 34 insertions, 18 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 02bece4a99ea..eb11011b5292 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -98,6 +98,13 @@ struct ftrace_pid { | |||
98 | struct pid *pid; | 98 | struct pid *pid; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static bool ftrace_pids_enabled(void) | ||
102 | { | ||
103 | return !list_empty(&ftrace_pids); | ||
104 | } | ||
105 | |||
106 | static void ftrace_update_trampoline(struct ftrace_ops *ops); | ||
107 | |||
101 | /* | 108 | /* |
102 | * ftrace_disabled is set when an anomaly is discovered. | 109 | * ftrace_disabled is set when an anomaly is discovered. |
103 | * ftrace_disabled is much stronger than ftrace_enabled. | 110 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock); | |||
109 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | 116 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; |
110 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 117 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
111 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 118 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
112 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | ||
113 | static struct ftrace_ops global_ops; | 119 | static struct ftrace_ops global_ops; |
114 | static struct ftrace_ops control_ops; | 120 | static struct ftrace_ops control_ops; |
115 | 121 | ||
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, | |||
183 | if (!test_tsk_trace_trace(current)) | 189 | if (!test_tsk_trace_trace(current)) |
184 | return; | 190 | return; |
185 | 191 | ||
186 | ftrace_pid_function(ip, parent_ip, op, regs); | 192 | op->saved_func(ip, parent_ip, op, regs); |
187 | } | ||
188 | |||
189 | static void set_ftrace_pid_function(ftrace_func_t func) | ||
190 | { | ||
191 | /* do not set ftrace_pid_function to itself! */ | ||
192 | if (func != ftrace_pid_func) | ||
193 | ftrace_pid_function = func; | ||
194 | } | 193 | } |
195 | 194 | ||
196 | /** | 195 | /** |
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func) | |||
202 | void clear_ftrace_function(void) | 201 | void clear_ftrace_function(void) |
203 | { | 202 | { |
204 | ftrace_trace_function = ftrace_stub; | 203 | ftrace_trace_function = ftrace_stub; |
205 | ftrace_pid_function = ftrace_stub; | ||
206 | } | 204 | } |
207 | 205 | ||
208 | static void control_ops_disable_all(struct ftrace_ops *ops) | 206 | static void control_ops_disable_all(struct ftrace_ops *ops) |
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
436 | } else | 434 | } else |
437 | add_ftrace_ops(&ftrace_ops_list, ops); | 435 | add_ftrace_ops(&ftrace_ops_list, ops); |
438 | 436 | ||
437 | /* Always save the function, and reset at unregistering */ | ||
438 | ops->saved_func = ops->func; | ||
439 | |||
440 | if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled()) | ||
441 | ops->func = ftrace_pid_func; | ||
442 | |||
439 | ftrace_update_trampoline(ops); | 443 | ftrace_update_trampoline(ops); |
440 | 444 | ||
441 | if (ftrace_enabled) | 445 | if (ftrace_enabled) |
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
463 | if (ftrace_enabled) | 467 | if (ftrace_enabled) |
464 | update_ftrace_function(); | 468 | update_ftrace_function(); |
465 | 469 | ||
470 | ops->func = ops->saved_func; | ||
471 | |||
466 | return 0; | 472 | return 0; |
467 | } | 473 | } |
468 | 474 | ||
469 | static void ftrace_update_pid_func(void) | 475 | static void ftrace_update_pid_func(void) |
470 | { | 476 | { |
477 | bool enabled = ftrace_pids_enabled(); | ||
478 | struct ftrace_ops *op; | ||
479 | |||
471 | /* Only do something if we are tracing something */ | 480 | /* Only do something if we are tracing something */ |
472 | if (ftrace_trace_function == ftrace_stub) | 481 | if (ftrace_trace_function == ftrace_stub) |
473 | return; | 482 | return; |
474 | 483 | ||
484 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
485 | if (op->flags & FTRACE_OPS_FL_PID) { | ||
486 | op->func = enabled ? ftrace_pid_func : | ||
487 | op->saved_func; | ||
488 | ftrace_update_trampoline(op); | ||
489 | } | ||
490 | } while_for_each_ftrace_op(op); | ||
491 | |||
475 | update_ftrace_function(); | 492 | update_ftrace_function(); |
476 | } | 493 | } |
477 | 494 | ||
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = { | |||
1133 | .local_hash.filter_hash = EMPTY_HASH, | 1150 | .local_hash.filter_hash = EMPTY_HASH, |
1134 | INIT_OPS_HASH(global_ops) | 1151 | INIT_OPS_HASH(global_ops) |
1135 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | 1152 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
1136 | FTRACE_OPS_FL_INITIALIZED, | 1153 | FTRACE_OPS_FL_INITIALIZED | |
1154 | FTRACE_OPS_FL_PID, | ||
1137 | }; | 1155 | }; |
1138 | 1156 | ||
1139 | /* | 1157 | /* |
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops) | |||
5023 | 5041 | ||
5024 | static struct ftrace_ops global_ops = { | 5042 | static struct ftrace_ops global_ops = { |
5025 | .func = ftrace_stub, | 5043 | .func = ftrace_stub, |
5026 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 5044 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
5045 | FTRACE_OPS_FL_INITIALIZED | | ||
5046 | FTRACE_OPS_FL_PID, | ||
5027 | }; | 5047 | }; |
5028 | 5048 | ||
5029 | static int __init ftrace_nodyn_init(void) | 5049 | static int __init ftrace_nodyn_init(void) |
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) | |||
5080 | if (WARN_ON(tr->ops->func != ftrace_stub)) | 5100 | if (WARN_ON(tr->ops->func != ftrace_stub)) |
5081 | printk("ftrace ops had %pS for function\n", | 5101 | printk("ftrace ops had %pS for function\n", |
5082 | tr->ops->func); | 5102 | tr->ops->func); |
5083 | /* Only the top level instance does pid tracing */ | ||
5084 | if (!list_empty(&ftrace_pids)) { | ||
5085 | set_ftrace_pid_function(func); | ||
5086 | func = ftrace_pid_func; | ||
5087 | } | ||
5088 | } | 5103 | } |
5089 | tr->ops->func = func; | 5104 | tr->ops->func = func; |
5090 | tr->ops->private = tr; | 5105 | tr->ops->private = tr; |
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos) | |||
5371 | { | 5386 | { |
5372 | mutex_lock(&ftrace_lock); | 5387 | mutex_lock(&ftrace_lock); |
5373 | 5388 | ||
5374 | if (list_empty(&ftrace_pids) && (!*pos)) | 5389 | if (!ftrace_pids_enabled() && (!*pos)) |
5375 | return (void *) 1; | 5390 | return (void *) 1; |
5376 | 5391 | ||
5377 | return seq_list_start(&ftrace_pids, *pos); | 5392 | return seq_list_start(&ftrace_pids, *pos); |
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = { | |||
5610 | .func = ftrace_stub, | 5625 | .func = ftrace_stub, |
5611 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | 5626 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
5612 | FTRACE_OPS_FL_INITIALIZED | | 5627 | FTRACE_OPS_FL_INITIALIZED | |
5628 | FTRACE_OPS_FL_PID | | ||
5613 | FTRACE_OPS_FL_STUB, | 5629 | FTRACE_OPS_FL_STUB, |
5614 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | 5630 | #ifdef FTRACE_GRAPH_TRAMP_ADDR |
5615 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | 5631 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, |