diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ftrace.c | 50 |
1 files changed, 32 insertions, 18 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fc21312dad6d..17b95a492948 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -490,16 +490,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
490 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { | 490 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { |
491 | ret = remove_ftrace_list_ops(&ftrace_control_list, | 491 | ret = remove_ftrace_list_ops(&ftrace_control_list, |
492 | &control_ops, ops); | 492 | &control_ops, ops); |
493 | if (!ret) { | ||
494 | /* | ||
495 | * The ftrace_ops is now removed from the list, | ||
496 | * so there'll be no new users. We must ensure | ||
497 | * all current users are done before we free | ||
498 | * the control data. | ||
499 | */ | ||
500 | synchronize_sched(); | ||
501 | control_ops_free(ops); | ||
502 | } | ||
503 | } else | 493 | } else |
504 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | 494 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); |
505 | 495 | ||
@@ -509,13 +499,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
509 | if (ftrace_enabled) | 499 | if (ftrace_enabled) |
510 | update_ftrace_function(); | 500 | update_ftrace_function(); |
511 | 501 | ||
512 | /* | ||
513 | * Dynamic ops may be freed, we must make sure that all | ||
514 | * callers are done before leaving this function. | ||
515 | */ | ||
516 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
517 | synchronize_sched(); | ||
518 | |||
519 | return 0; | 502 | return 0; |
520 | } | 503 | } |
521 | 504 | ||
@@ -2184,10 +2167,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2184 | command |= FTRACE_UPDATE_TRACE_FUNC; | 2167 | command |= FTRACE_UPDATE_TRACE_FUNC; |
2185 | } | 2168 | } |
2186 | 2169 | ||
2187 | if (!command || !ftrace_enabled) | 2170 | if (!command || !ftrace_enabled) { |
2171 | /* | ||
2172 | * If these are control ops, they still need their | ||
2173 | * per_cpu field freed. Since, function tracing is | ||
2174 | * not currently active, we can just free them | ||
2175 | * without synchronizing all CPUs. | ||
2176 | */ | ||
2177 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | ||
2178 | control_ops_free(ops); | ||
2188 | return 0; | 2179 | return 0; |
2180 | } | ||
2189 | 2181 | ||
2190 | ftrace_run_update_code(command); | 2182 | ftrace_run_update_code(command); |
2183 | |||
2184 | /* | ||
2185 | * Dynamic ops may be freed, we must make sure that all | ||
2186 | * callers are done before leaving this function. | ||
2187 | * The same goes for freeing the per_cpu data of the control | ||
2188 | * ops. | ||
2189 | * | ||
2190 | * Again, normal synchronize_sched() is not good enough. | ||
2191 | * We need to do a hard force of sched synchronization. | ||
2192 | * This is because we use preempt_disable() to do RCU, but | ||
2193 | * the function tracers can be called where RCU is not watching | ||
2194 | * (like before user_exit()). We can not rely on the RCU | ||
2195 | * infrastructure to do the synchronization, thus we must do it | ||
2196 | * ourselves. | ||
2197 | */ | ||
2198 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { | ||
2199 | schedule_on_each_cpu(ftrace_sync); | ||
2200 | |||
2201 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | ||
2202 | control_ops_free(ops); | ||
2203 | } | ||
2204 | |||
2191 | return 0; | 2205 | return 0; |
2192 | } | 2206 | } |
2193 | 2207 | ||