diff options
-rw-r--r-- | arch/x86/kernel/ftrace.c | 8 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 18 |
2 files changed, 26 insertions, 0 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index ca17c20a1010..4cfeca6ffe11 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -913,6 +913,14 @@ void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec | |||
913 | return addr_from_call((void *)ops->trampoline + offset); | 913 | return addr_from_call((void *)ops->trampoline + offset); |
914 | } | 914 | } |
915 | 915 | ||
916 | void arch_ftrace_trampoline_free(struct ftrace_ops *ops) | ||
917 | { | ||
918 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | ||
919 | return; | ||
920 | |||
921 | tramp_free((void *)ops->trampoline); | ||
922 | ops->trampoline = 0; | ||
923 | } | ||
916 | 924 | ||
917 | #endif /* CONFIG_X86_64 */ | 925 | #endif /* CONFIG_X86_64 */ |
918 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 926 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 422e1f8300b1..eab3123a1fbe 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2324,6 +2324,10 @@ static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, | |||
2324 | static ftrace_func_t saved_ftrace_func; | 2324 | static ftrace_func_t saved_ftrace_func; |
2325 | static int ftrace_start_up; | 2325 | static int ftrace_start_up; |
2326 | 2326 | ||
2327 | void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) | ||
2328 | { | ||
2329 | } | ||
2330 | |||
2327 | static void control_ops_free(struct ftrace_ops *ops) | 2331 | static void control_ops_free(struct ftrace_ops *ops) |
2328 | { | 2332 | { |
2329 | free_percpu(ops->disabled); | 2333 | free_percpu(ops->disabled); |
@@ -2475,6 +2479,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2475 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { | 2479 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { |
2476 | schedule_on_each_cpu(ftrace_sync); | 2480 | schedule_on_each_cpu(ftrace_sync); |
2477 | 2481 | ||
2482 | arch_ftrace_trampoline_free(ops); | ||
2483 | |||
2478 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | 2484 | if (ops->flags & FTRACE_OPS_FL_CONTROL) |
2479 | control_ops_free(ops); | 2485 | control_ops_free(ops); |
2480 | } | 2486 | } |
@@ -4725,9 +4731,21 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |||
4725 | 4731 | ||
4726 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | 4732 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
4727 | { | 4733 | { |
4734 | |||
4735 | /* | ||
4736 | * Currently there's no safe way to free a trampoline when the kernel | ||
4737 | * is configured with PREEMPT. That is because a task could be preempted | ||
4738 | * when it jumped to the trampoline, it may be preempted for a long time | ||
4739 | * depending on the system load, and currently there's no way to know | ||
4740 | * when it will be off the trampoline. If the trampoline is freed | ||
4741 | * too early, when the task runs again, it will be executing on freed | ||
4742 | * memory and crash. | ||
4743 | */ | ||
4744 | #ifdef CONFIG_PREEMPT | ||
4728 | /* Currently, only non dynamic ops can have a trampoline */ | 4745 | /* Currently, only non dynamic ops can have a trampoline */ |
4729 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | 4746 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
4730 | return; | 4747 | return; |
4748 | #endif | ||
4731 | 4749 | ||
4732 | arch_ftrace_update_trampoline(ops); | 4750 | arch_ftrace_update_trampoline(ops); |
4733 | } | 4751 | } |