diff options
| author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2014-07-22 20:16:57 -0400 |
|---|---|---|
| committer | Steven Rostedt <rostedt@goodmis.org> | 2014-09-09 10:26:48 -0400 |
| commit | f1ff6348b30b3658d138f05643149706f99078ae (patch) | |
| tree | e35e085be010b5174f85b94939876d13a19eac4f /kernel/trace | |
| parent | 2ce7598c9a453e0acd0e07be7be3f5eb39608ebd (diff) | |
ftrace: Add separate function for non recursive callbacks
Instead of using the generic list function for callbacks that
are not recursive, call a new helper function from the mcount
trampoline called ftrace_ops_recur_func() that will do the recursion
checking for the callback.
This eliminates an indirection as well as will help in future code
that will use dynamically allocated trampolines.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/ftrace.c | 33 |
1 files changed, 31 insertions, 2 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5916a8e59e87..17b606362ab4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -113,6 +113,9 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | |||
| 113 | static struct ftrace_ops global_ops; | 113 | static struct ftrace_ops global_ops; |
| 114 | static struct ftrace_ops control_ops; | 114 | static struct ftrace_ops control_ops; |
| 115 | 115 | ||
| 116 | static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | ||
| 117 | struct ftrace_ops *op, struct pt_regs *regs); | ||
| 118 | |||
| 116 | #if ARCH_SUPPORTS_FTRACE_OPS | 119 | #if ARCH_SUPPORTS_FTRACE_OPS |
| 117 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 120 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
| 118 | struct ftrace_ops *op, struct pt_regs *regs); | 121 | struct ftrace_ops *op, struct pt_regs *regs); |
| @@ -258,11 +261,18 @@ static void update_ftrace_function(void) | |||
| 258 | if (ftrace_ops_list == &ftrace_list_end || | 261 | if (ftrace_ops_list == &ftrace_list_end || |
| 259 | (ftrace_ops_list->next == &ftrace_list_end && | 262 | (ftrace_ops_list->next == &ftrace_list_end && |
| 260 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && | 263 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && |
| 261 | (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && | ||
| 262 | !FTRACE_FORCE_LIST_FUNC)) { | 264 | !FTRACE_FORCE_LIST_FUNC)) { |
| 263 | /* Set the ftrace_ops that the arch callback uses */ | 265 | /* Set the ftrace_ops that the arch callback uses */ |
| 264 | set_function_trace_op = ftrace_ops_list; | 266 | set_function_trace_op = ftrace_ops_list; |
| 265 | func = ftrace_ops_list->func; | 267 | /* |
| 268 | * If the func handles its own recursion, call it directly. | ||
| 269 | * Otherwise call the recursion protected function that | ||
| 270 | * will call the ftrace ops function. | ||
| 271 | */ | ||
| 272 | if (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) | ||
| 273 | func = ftrace_ops_list->func; | ||
| 274 | else | ||
| 275 | func = ftrace_ops_recurs_func; | ||
| 266 | } else { | 276 | } else { |
| 267 | /* Just use the default ftrace_ops */ | 277 | /* Just use the default ftrace_ops */ |
| 268 | set_function_trace_op = &ftrace_list_end; | 278 | set_function_trace_op = &ftrace_list_end; |
| @@ -4827,6 +4837,25 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | |||
| 4827 | } | 4837 | } |
| 4828 | #endif | 4838 | #endif |
| 4829 | 4839 | ||
| 4840 | /* | ||
| 4841 | * If there's only one function registered but it does not support | ||
| 4842 | * recursion, this function will be called by the mcount trampoline. | ||
| 4843 | * This function will handle recursion protection. | ||
| 4844 | */ | ||
| 4845 | static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | ||
| 4846 | struct ftrace_ops *op, struct pt_regs *regs) | ||
| 4847 | { | ||
| 4848 | int bit; | ||
| 4849 | |||
| 4850 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); | ||
| 4851 | if (bit < 0) | ||
| 4852 | return; | ||
| 4853 | |||
| 4854 | op->func(ip, parent_ip, op, regs); | ||
| 4855 | |||
| 4856 | trace_clear_recursion(bit); | ||
| 4857 | } | ||
| 4858 | |||
| 4830 | static void clear_ftrace_swapper(void) | 4859 | static void clear_ftrace_swapper(void) |
| 4831 | { | 4860 | { |
| 4832 | struct task_struct *p; | 4861 | struct task_struct *p; |
