diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 76 |
1 files changed, 72 insertions, 4 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d0c5c3f0d939..fc21312dad6d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly; | |||
85 | 85 | ||
86 | /* Current function tracing op */ | 86 | /* Current function tracing op */ |
87 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | 87 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; |
88 | /* What to set function_trace_op to */ | ||
89 | static struct ftrace_ops *set_function_trace_op; | ||
88 | 90 | ||
89 | /* List for set_ftrace_pid's pids. */ | 91 | /* List for set_ftrace_pid's pids. */ |
90 | LIST_HEAD(ftrace_pids); | 92 | LIST_HEAD(ftrace_pids); |
@@ -278,6 +280,23 @@ static void update_global_ops(void) | |||
278 | global_ops.func = func; | 280 | global_ops.func = func; |
279 | } | 281 | } |
280 | 282 | ||
283 | static void ftrace_sync(struct work_struct *work) | ||
284 | { | ||
285 | /* | ||
286 | * This function is just a stub to implement a hard force | ||
287 | * of synchronize_sched(). This requires synchronizing | ||
288 | * tasks even in userspace and idle. | ||
289 | * | ||
290 | * Yes, function tracing is rude. | ||
291 | */ | ||
292 | } | ||
293 | |||
294 | static void ftrace_sync_ipi(void *data) | ||
295 | { | ||
296 | /* Probably not needed, but do it anyway */ | ||
297 | smp_rmb(); | ||
298 | } | ||
299 | |||
281 | static void update_ftrace_function(void) | 300 | static void update_ftrace_function(void) |
282 | { | 301 | { |
283 | ftrace_func_t func; | 302 | ftrace_func_t func; |
@@ -296,16 +315,59 @@ static void update_ftrace_function(void) | |||
296 | !FTRACE_FORCE_LIST_FUNC)) { | 315 | !FTRACE_FORCE_LIST_FUNC)) { |
297 | /* Set the ftrace_ops that the arch callback uses */ | 316 | /* Set the ftrace_ops that the arch callback uses */ |
298 | if (ftrace_ops_list == &global_ops) | 317 | if (ftrace_ops_list == &global_ops) |
299 | function_trace_op = ftrace_global_list; | 318 | set_function_trace_op = ftrace_global_list; |
300 | else | 319 | else |
301 | function_trace_op = ftrace_ops_list; | 320 | set_function_trace_op = ftrace_ops_list; |
302 | func = ftrace_ops_list->func; | 321 | func = ftrace_ops_list->func; |
303 | } else { | 322 | } else { |
304 | /* Just use the default ftrace_ops */ | 323 | /* Just use the default ftrace_ops */ |
305 | function_trace_op = &ftrace_list_end; | 324 | set_function_trace_op = &ftrace_list_end; |
306 | func = ftrace_ops_list_func; | 325 | func = ftrace_ops_list_func; |
307 | } | 326 | } |
308 | 327 | ||
328 | /* If there's no change, then do nothing more here */ | ||
329 | if (ftrace_trace_function == func) | ||
330 | return; | ||
331 | |||
332 | /* | ||
333 | * If we are using the list function, it doesn't care | ||
334 | * about the function_trace_ops. | ||
335 | */ | ||
336 | if (func == ftrace_ops_list_func) { | ||
337 | ftrace_trace_function = func; | ||
338 | /* | ||
339 | * Don't even bother setting function_trace_ops, | ||
340 | * it would be racy to do so anyway. | ||
341 | */ | ||
342 | return; | ||
343 | } | ||
344 | |||
345 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
346 | /* | ||
347 | * For static tracing, we need to be a bit more careful. | ||
348 | * The function change takes affect immediately. Thus, | ||
349 | * we need to coorditate the setting of the function_trace_ops | ||
350 | * with the setting of the ftrace_trace_function. | ||
351 | * | ||
352 | * Set the function to the list ops, which will call the | ||
353 | * function we want, albeit indirectly, but it handles the | ||
354 | * ftrace_ops and doesn't depend on function_trace_op. | ||
355 | */ | ||
356 | ftrace_trace_function = ftrace_ops_list_func; | ||
357 | /* | ||
358 | * Make sure all CPUs see this. Yes this is slow, but static | ||
359 | * tracing is slow and nasty to have enabled. | ||
360 | */ | ||
361 | schedule_on_each_cpu(ftrace_sync); | ||
362 | /* Now all cpus are using the list ops. */ | ||
363 | function_trace_op = set_function_trace_op; | ||
364 | /* Make sure the function_trace_op is visible on all CPUs */ | ||
365 | smp_wmb(); | ||
366 | /* Nasty way to force a rmb on all cpus */ | ||
367 | smp_call_function(ftrace_sync_ipi, NULL, 1); | ||
368 | /* OK, we are all set to update the ftrace_trace_function now! */ | ||
369 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | ||
370 | |||
309 | ftrace_trace_function = func; | 371 | ftrace_trace_function = func; |
310 | } | 372 | } |
311 | 373 | ||
@@ -1952,8 +2014,14 @@ void ftrace_modify_all_code(int command) | |||
1952 | else if (command & FTRACE_DISABLE_CALLS) | 2014 | else if (command & FTRACE_DISABLE_CALLS) |
1953 | ftrace_replace_code(0); | 2015 | ftrace_replace_code(0); |
1954 | 2016 | ||
1955 | if (command & FTRACE_UPDATE_TRACE_FUNC) | 2017 | if (command & FTRACE_UPDATE_TRACE_FUNC) { |
2018 | function_trace_op = set_function_trace_op; | ||
2019 | smp_wmb(); | ||
2020 | /* If irqs are disabled, we are in stop machine */ | ||
2021 | if (!irqs_disabled()) | ||
2022 | smp_call_function(ftrace_sync_ipi, NULL, 1); | ||
1956 | ftrace_update_ftrace_func(ftrace_trace_function); | 2023 | ftrace_update_ftrace_func(ftrace_trace_function); |
2024 | } | ||
1957 | 2025 | ||
1958 | if (command & FTRACE_START_FUNC_RET) | 2026 | if (command & FTRACE_START_FUNC_RET) |
1959 | ftrace_enable_ftrace_graph_caller(); | 2027 | ftrace_enable_ftrace_graph_caller(); |