aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-01-13 12:56:21 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-01-13 12:56:21 -0500
commita4c35ed241129dd142be4cadb1e5a474a56d5464 (patch)
treec003e1fba1f088b31c99d5e388b7992297265aed
parent23a8e8441a0a74dd612edf81dc89d1600bc0a3d1 (diff)
ftrace: Fix synchronization location disabling and freeing ftrace_ops
The synchronization needed after ftrace_ops are unregistered must happen after the callback is disabled from becing called by functions. The current location happens after the function is being removed from the internal lists, but not after the function callbacks were disabled, leaving the functions susceptible of being called after their callbacks are freed. This affects perf and any externel users of function tracing (LTTng and SystemTap). Cc: stable@vger.kernel.org # 3.0+ Fixes: cdbe61bfe704 "ftrace: Allow dynamically allocated function tracers" Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ftrace.c58
1 files changed, 32 insertions, 26 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7f21b06648e9..7181ad15923b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -498,20 +498,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
498 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { 498 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
499 ret = remove_ftrace_list_ops(&ftrace_control_list, 499 ret = remove_ftrace_list_ops(&ftrace_control_list,
500 &control_ops, ops); 500 &control_ops, ops);
501 if (!ret) {
502 /*
503 * The ftrace_ops is now removed from the list,
504 * so there'll be no new users. We must ensure
505 * all current users are done before we free
506 * the control data.
507 * Note synchronize_sched() is not enough, as we
508 * use preempt_disable() to do RCU, but the function
509 * tracer can be called where RCU is not active
510 * (before user_exit()).
511 */
512 schedule_on_each_cpu(ftrace_sync);
513 control_ops_free(ops);
514 }
515 } else 501 } else
516 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 502 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
517 503
@@ -521,17 +507,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
521 if (ftrace_enabled) 507 if (ftrace_enabled)
522 update_ftrace_function(); 508 update_ftrace_function();
523 509
524 /*
525 * Dynamic ops may be freed, we must make sure that all
526 * callers are done before leaving this function.
527 *
528 * Again, normal synchronize_sched() is not good enough.
529 * We need to do a hard force of sched synchronization.
530 */
531 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
532 schedule_on_each_cpu(ftrace_sync);
533
534
535 return 0; 510 return 0;
536} 511}
537 512
@@ -2208,10 +2183,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2208 command |= FTRACE_UPDATE_TRACE_FUNC; 2183 command |= FTRACE_UPDATE_TRACE_FUNC;
2209 } 2184 }
2210 2185
2211 if (!command || !ftrace_enabled) 2186 if (!command || !ftrace_enabled) {
2187 /*
2188 * If these are control ops, they still need their
2189 * per_cpu field freed. Since, function tracing is
2190 * not currently active, we can just free them
2191 * without synchronizing all CPUs.
2192 */
2193 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2194 control_ops_free(ops);
2212 return 0; 2195 return 0;
2196 }
2213 2197
2214 ftrace_run_update_code(command); 2198 ftrace_run_update_code(command);
2199
2200 /*
2201 * Dynamic ops may be freed, we must make sure that all
2202 * callers are done before leaving this function.
2203 * The same goes for freeing the per_cpu data of the control
2204 * ops.
2205 *
2206 * Again, normal synchronize_sched() is not good enough.
2207 * We need to do a hard force of sched synchronization.
2208 * This is because we use preempt_disable() to do RCU, but
2209 * the function tracers can be called where RCU is not watching
2210 * (like before user_exit()). We can not rely on the RCU
2211 * infrastructure to do the synchronization, thus we must do it
2212 * ourselves.
2213 */
2214 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2215 schedule_on_each_cpu(ftrace_sync);
2216
2217 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2218 control_ops_free(ops);
2219 }
2220
2215 return 0; 2221 return 0;
2216} 2222}
2217 2223