aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2013-11-08 14:17:30 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-01-09 22:00:25 -0500
commit405e1d834807e51b2ebd3dea81cb51e53fb61504 (patch)
treecd490ffb339a7e744ace83f95ef6895a15b0ad45 /kernel
parentdd97b95438c812d8fd93d9426661a6c8e1520005 (diff)
ftrace: Synchronize setting function_trace_op with ftrace_trace_function
ftrace_trace_function is a variable that holds what function will be called directly by the assembly code (mcount). If just a single function is registered and it handles recursion itself, then the assembly will call that function directly without any helper function. It also passes in the ftrace_op that was registered with the callback. The ftrace_op to send is stored in the function_trace_op variable. The ftrace_trace_function and function_trace_op needs to be coordinated such that the called callback wont be called with the wrong ftrace_op, otherwise bad things can happen if it expected a different op. Luckily, there's no callback that doesn't use the helper functions that requires this. But there soon will be and this needs to be fixed. Use a set_function_trace_op to store the ftrace_op to set the function_trace_op to when it is safe to do so (during the update function within the breakpoint or stop machine calls). Or if dynamic ftrace is not being used (static tracing) then we have to do a bit more synchronization when the ftrace_trace_function is set as that takes affect immediately (as oppose to dynamic ftrace doing it with the modification of the trampoline). Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c87
1 files changed, 72 insertions, 15 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 531ffa6f5a7b..0ffb811cbb1f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
85 85
86/* Current function tracing op */ 86/* Current function tracing op */
87struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 87struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88/* What to set function_trace_op to */
89static struct ftrace_ops *set_function_trace_op;
88 90
89/* List for set_ftrace_pid's pids. */ 91/* List for set_ftrace_pid's pids. */
90LIST_HEAD(ftrace_pids); 92LIST_HEAD(ftrace_pids);
@@ -278,6 +280,23 @@ static void update_global_ops(void)
278 global_ops.func = func; 280 global_ops.func = func;
279} 281}
280 282
283static void ftrace_sync(struct work_struct *work)
284{
285 /*
286 * This function is just a stub to implement a hard force
287 * of synchronize_sched(). This requires synchronizing
288 * tasks even in userspace and idle.
289 *
290 * Yes, function tracing is rude.
291 */
292}
293
294static void ftrace_sync_ipi(void *data)
295{
296 /* Probably not needed, but do it anyway */
297 smp_rmb();
298}
299
281static void update_ftrace_function(void) 300static void update_ftrace_function(void)
282{ 301{
283 ftrace_func_t func; 302 ftrace_func_t func;
@@ -296,16 +315,59 @@ static void update_ftrace_function(void)
296 !FTRACE_FORCE_LIST_FUNC)) { 315 !FTRACE_FORCE_LIST_FUNC)) {
297 /* Set the ftrace_ops that the arch callback uses */ 316 /* Set the ftrace_ops that the arch callback uses */
298 if (ftrace_ops_list == &global_ops) 317 if (ftrace_ops_list == &global_ops)
299 function_trace_op = ftrace_global_list; 318 set_function_trace_op = ftrace_global_list;
300 else 319 else
301 function_trace_op = ftrace_ops_list; 320 set_function_trace_op = ftrace_ops_list;
302 func = ftrace_ops_list->func; 321 func = ftrace_ops_list->func;
303 } else { 322 } else {
304 /* Just use the default ftrace_ops */ 323 /* Just use the default ftrace_ops */
305 function_trace_op = &ftrace_list_end; 324 set_function_trace_op = &ftrace_list_end;
306 func = ftrace_ops_list_func; 325 func = ftrace_ops_list_func;
307 } 326 }
308 327
328 /* If there's no change, then do nothing more here */
329 if (ftrace_trace_function == func)
330 return;
331
332 /*
333 * If we are using the list function, it doesn't care
334 * about the function_trace_ops.
335 */
336 if (func == ftrace_ops_list_func) {
337 ftrace_trace_function = func;
338 /*
339 * Don't even bother setting function_trace_ops,
340 * it would be racy to do so anyway.
341 */
342 return;
343 }
344
345#ifndef CONFIG_DYNAMIC_FTRACE
346 /*
347 * For static tracing, we need to be a bit more careful.
348 * The function change takes affect immediately. Thus,
349 * we need to coorditate the setting of the function_trace_ops
350 * with the setting of the ftrace_trace_function.
351 *
352 * Set the function to the list ops, which will call the
353 * function we want, albeit indirectly, but it handles the
354 * ftrace_ops and doesn't depend on function_trace_op.
355 */
356 ftrace_trace_function = ftrace_ops_list_func;
357 /*
358 * Make sure all CPUs see this. Yes this is slow, but static
359 * tracing is slow and nasty to have enabled.
360 */
361 schedule_on_each_cpu(ftrace_sync);
362 /* Now all cpus are using the list ops. */
363 function_trace_op = set_function_trace_op;
364 /* Make sure the function_trace_op is visible on all CPUs */
365 smp_wmb();
366 /* Nasty way to force a rmb on all cpus */
367 smp_call_function(ftrace_sync_ipi, NULL, 1);
368 /* OK, we are all set to update the ftrace_trace_function now! */
369#endif /* !CONFIG_DYNAMIC_FTRACE */
370
309 ftrace_trace_function = func; 371 ftrace_trace_function = func;
310} 372}
311 373
@@ -410,17 +472,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
410 return 0; 472 return 0;
411} 473}
412 474
413static void ftrace_sync(struct work_struct *work)
414{
415 /*
416 * This function is just a stub to implement a hard force
417 * of synchronize_sched(). This requires synchronizing
418 * tasks even in userspace and idle.
419 *
420 * Yes, function tracing is rude.
421 */
422}
423
424static int __unregister_ftrace_function(struct ftrace_ops *ops) 475static int __unregister_ftrace_function(struct ftrace_ops *ops)
425{ 476{
426 int ret; 477 int ret;
@@ -1979,8 +2030,14 @@ void ftrace_modify_all_code(int command)
1979 else if (command & FTRACE_DISABLE_CALLS) 2030 else if (command & FTRACE_DISABLE_CALLS)
1980 ftrace_replace_code(0); 2031 ftrace_replace_code(0);
1981 2032
1982 if (update && ftrace_trace_function != ftrace_ops_list_func) 2033 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2034 function_trace_op = set_function_trace_op;
2035 smp_wmb();
2036 /* If irqs are disabled, we are in stop machine */
2037 if (!irqs_disabled())
2038 smp_call_function(ftrace_sync_ipi, NULL, 1);
1983 ftrace_update_ftrace_func(ftrace_trace_function); 2039 ftrace_update_ftrace_func(ftrace_trace_function);
2040 }
1984 2041
1985 if (command & FTRACE_START_FUNC_RET) 2042 if (command & FTRACE_START_FUNC_RET)
1986 ftrace_enable_ftrace_graph_caller(); 2043 ftrace_enable_ftrace_graph_caller();