aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-07-03 15:48:16 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-11-11 12:41:52 -0500
commit12cce594fa8f12e002e7eb5d10141853c1e6a112 (patch)
treef508de25f865ead9162c4d5213874394ac06fdde
parent15d5b02cc575e5b20ddfa1645fc1242f0b0ba1c8 (diff)
ftrace/x86: Allow !CONFIG_PREEMPT dynamic ops to use allocated trampolines
When the static ftrace_ops (like function tracer) enables tracing, and it is the only callback that is referencing a function, a trampoline is dynamically allocated to the function that calls the callback directly instead of calling a loop function that iterates over all the registered ftrace ops (if more than one ops is registered). But when it comes to dynamically allocated ftrace_ops, where they may be freed, on a CONFIG_PREEMPT kernel there's no way to know when it is safe to free the trampoline. If a task was preempted while executing on the trampoline, there's currently no way to know when it will be off that trampoline. But this is not true when it comes to !CONFIG_PREEMPT. The current method of calling schedule_on_each_cpu() will force tasks off the trampoline, becaues they can not schedule while on it (kernel preemption is not configured). That means it is safe to free a dynamically allocated ftrace ops trampoline when CONFIG_PREEMPT is not configured. Cc: H. Peter Anvin <hpa@linux.intel.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Borislav Petkov <bp@suse.de> Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Tested-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--arch/x86/kernel/ftrace.c8
-rw-r--r--kernel/trace/ftrace.c18
2 files changed, 26 insertions, 0 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index ca17c20a1010..4cfeca6ffe11 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -913,6 +913,14 @@ void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec
913 return addr_from_call((void *)ops->trampoline + offset); 913 return addr_from_call((void *)ops->trampoline + offset);
914} 914}
915 915
916void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
917{
918 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
919 return;
920
921 tramp_free((void *)ops->trampoline);
922 ops->trampoline = 0;
923}
916 924
917#endif /* CONFIG_X86_64 */ 925#endif /* CONFIG_X86_64 */
918#endif /* CONFIG_DYNAMIC_FTRACE */ 926#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 422e1f8300b1..eab3123a1fbe 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2324,6 +2324,10 @@ static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2324static ftrace_func_t saved_ftrace_func; 2324static ftrace_func_t saved_ftrace_func;
2325static int ftrace_start_up; 2325static int ftrace_start_up;
2326 2326
2327void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2328{
2329}
2330
2327static void control_ops_free(struct ftrace_ops *ops) 2331static void control_ops_free(struct ftrace_ops *ops)
2328{ 2332{
2329 free_percpu(ops->disabled); 2333 free_percpu(ops->disabled);
@@ -2475,6 +2479,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2475 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { 2479 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2476 schedule_on_each_cpu(ftrace_sync); 2480 schedule_on_each_cpu(ftrace_sync);
2477 2481
2482 arch_ftrace_trampoline_free(ops);
2483
2478 if (ops->flags & FTRACE_OPS_FL_CONTROL) 2484 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2479 control_ops_free(ops); 2485 control_ops_free(ops);
2480 } 2486 }
@@ -4725,9 +4731,21 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
4725 4731
4726static void ftrace_update_trampoline(struct ftrace_ops *ops) 4732static void ftrace_update_trampoline(struct ftrace_ops *ops)
4727{ 4733{
4734
4735/*
4736 * Currently there's no safe way to free a trampoline when the kernel
4737 * is configured with PREEMPT. That is because a task could be preempted
4738 * when it jumped to the trampoline, it may be preempted for a long time
4739 * depending on the system load, and currently there's no way to know
4740 * when it will be off the trampoline. If the trampoline is freed
4741 * too early, when the task runs again, it will be executing on freed
4742 * memory and crash.
4743 */
4744#ifdef CONFIG_PREEMPT
4728 /* Currently, only non dynamic ops can have a trampoline */ 4745 /* Currently, only non dynamic ops can have a trampoline */
4729 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 4746 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
4730 return; 4747 return;
4748#endif
4731 4749
4732 arch_ftrace_update_trampoline(ops); 4750 arch_ftrace_update_trampoline(ops);
4733} 4751}