aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2014-02-07 14:41:17 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-02-13 16:48:03 -0500
commita0d0a2a5a36361f7695bb243bf7ea499f0c82fd2 (patch)
treecd23315ad0fa6660df5cbcb19230d38847309549 /kernel/trace
parent1505c0baa0a8e3dc311a90b25eb24cc46b0894ea (diff)
ftrace: Synchronize setting function_trace_op with ftrace_trace_function
commit 405e1d834807e51b2ebd3dea81cb51e53fb61504 upstream. ftrace_trace_function is a variable that holds what function will be called directly by the assembly code (mcount). If just a single function is registered and it handles recursion itself, then the assembly will call that function directly without any helper function. It also passes in the ftrace_op that was registered with the callback. The ftrace_op to send is stored in the function_trace_op variable. The ftrace_trace_function and function_trace_op needs to be coordinated such that the called callback wont be called with the wrong ftrace_op, otherwise bad things can happen if it expected a different op. Luckily, there's no callback that doesn't use the helper functions that requires this. But there soon will be and this needs to be fixed. Use a set_function_trace_op to store the ftrace_op to set the function_trace_op to when it is safe to do so (during the update function within the breakpoint or stop machine calls). Or if dynamic ftrace is not being used (static tracing) then we have to do a bit more synchronization when the ftrace_trace_function is set as that takes affect immediately (as oppose to dynamic ftrace doing it with the modification of the trampoline). Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c76
1 files changed, 72 insertions, 4 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d0c5c3f0d939..fc21312dad6d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
85 85
86/* Current function tracing op */ 86/* Current function tracing op */
87struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 87struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88/* What to set function_trace_op to */
89static struct ftrace_ops *set_function_trace_op;
88 90
89/* List for set_ftrace_pid's pids. */ 91/* List for set_ftrace_pid's pids. */
90LIST_HEAD(ftrace_pids); 92LIST_HEAD(ftrace_pids);
@@ -278,6 +280,23 @@ static void update_global_ops(void)
278 global_ops.func = func; 280 global_ops.func = func;
279} 281}
280 282
283static void ftrace_sync(struct work_struct *work)
284{
285 /*
286 * This function is just a stub to implement a hard force
287 * of synchronize_sched(). This requires synchronizing
288 * tasks even in userspace and idle.
289 *
290 * Yes, function tracing is rude.
291 */
292}
293
294static void ftrace_sync_ipi(void *data)
295{
296 /* Probably not needed, but do it anyway */
297 smp_rmb();
298}
299
281static void update_ftrace_function(void) 300static void update_ftrace_function(void)
282{ 301{
283 ftrace_func_t func; 302 ftrace_func_t func;
@@ -296,16 +315,59 @@ static void update_ftrace_function(void)
296 !FTRACE_FORCE_LIST_FUNC)) { 315 !FTRACE_FORCE_LIST_FUNC)) {
297 /* Set the ftrace_ops that the arch callback uses */ 316 /* Set the ftrace_ops that the arch callback uses */
298 if (ftrace_ops_list == &global_ops) 317 if (ftrace_ops_list == &global_ops)
299 function_trace_op = ftrace_global_list; 318 set_function_trace_op = ftrace_global_list;
300 else 319 else
301 function_trace_op = ftrace_ops_list; 320 set_function_trace_op = ftrace_ops_list;
302 func = ftrace_ops_list->func; 321 func = ftrace_ops_list->func;
303 } else { 322 } else {
304 /* Just use the default ftrace_ops */ 323 /* Just use the default ftrace_ops */
305 function_trace_op = &ftrace_list_end; 324 set_function_trace_op = &ftrace_list_end;
306 func = ftrace_ops_list_func; 325 func = ftrace_ops_list_func;
307 } 326 }
308 327
328 /* If there's no change, then do nothing more here */
329 if (ftrace_trace_function == func)
330 return;
331
332 /*
333 * If we are using the list function, it doesn't care
334 * about the function_trace_ops.
335 */
336 if (func == ftrace_ops_list_func) {
337 ftrace_trace_function = func;
338 /*
339 * Don't even bother setting function_trace_ops,
340 * it would be racy to do so anyway.
341 */
342 return;
343 }
344
345#ifndef CONFIG_DYNAMIC_FTRACE
346 /*
347 * For static tracing, we need to be a bit more careful.
348 * The function change takes affect immediately. Thus,
349 * we need to coorditate the setting of the function_trace_ops
350 * with the setting of the ftrace_trace_function.
351 *
352 * Set the function to the list ops, which will call the
353 * function we want, albeit indirectly, but it handles the
354 * ftrace_ops and doesn't depend on function_trace_op.
355 */
356 ftrace_trace_function = ftrace_ops_list_func;
357 /*
358 * Make sure all CPUs see this. Yes this is slow, but static
359 * tracing is slow and nasty to have enabled.
360 */
361 schedule_on_each_cpu(ftrace_sync);
362 /* Now all cpus are using the list ops. */
363 function_trace_op = set_function_trace_op;
364 /* Make sure the function_trace_op is visible on all CPUs */
365 smp_wmb();
366 /* Nasty way to force a rmb on all cpus */
367 smp_call_function(ftrace_sync_ipi, NULL, 1);
368 /* OK, we are all set to update the ftrace_trace_function now! */
369#endif /* !CONFIG_DYNAMIC_FTRACE */
370
309 ftrace_trace_function = func; 371 ftrace_trace_function = func;
310} 372}
311 373
@@ -1952,8 +2014,14 @@ void ftrace_modify_all_code(int command)
1952 else if (command & FTRACE_DISABLE_CALLS) 2014 else if (command & FTRACE_DISABLE_CALLS)
1953 ftrace_replace_code(0); 2015 ftrace_replace_code(0);
1954 2016
1955 if (command & FTRACE_UPDATE_TRACE_FUNC) 2017 if (command & FTRACE_UPDATE_TRACE_FUNC) {
2018 function_trace_op = set_function_trace_op;
2019 smp_wmb();
2020 /* If irqs are disabled, we are in stop machine */
2021 if (!irqs_disabled())
2022 smp_call_function(ftrace_sync_ipi, NULL, 1);
1956 ftrace_update_ftrace_func(ftrace_trace_function); 2023 ftrace_update_ftrace_func(ftrace_trace_function);
2024 }
1957 2025
1958 if (command & FTRACE_START_FUNC_RET) 2026 if (command & FTRACE_START_FUNC_RET)
1959 ftrace_enable_ftrace_graph_caller(); 2027 ftrace_enable_ftrace_graph_caller();