aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2015-02-19 09:56:14 -0500
committerSteven Rostedt <rostedt@goodmis.org>2015-04-02 15:43:33 -0400
commit00ccbf2f5b7580cd7dcdaeda84828d14f0cba3c9 (patch)
treef72ca4ef6ea59bdaff123d6da6665fbef737b1c9 /kernel/trace
parentd631c8cceb1d1d06f372878935949d421585186b (diff)
ftrace/x86: Let dynamic trampolines call ops->func even for dynamic fops
Dynamically allocated trampolines call ftrace_ops_get_func to get the function which they should call. For dynamic fops (FTRACE_OPS_FL_DYNAMIC flag is set) ftrace_ops_list_func is always returned. This is reasonable for static trampolines but goes against the main advantage of dynamic ones, that is avoidance of going through the list of all registered callbacks for functions that are only being traced by a single callback. We can fix it by returning ops->func (or recursion safe version) from ftrace_ops_get_func whenever it is possible for dynamic trampolines. Note that dynamic trampolines are not allowed for dynamic fops if CONFIG_PREEMPT=y. Link: http://lkml.kernel.org/r/alpine.LNX.2.00.1501291023000.25445@pobox.suse.cz Link: http://lkml.kernel.org/r/1424357773-13536-1-git-send-email-mbenes@suse.cz Reported-by: Miroslav Benes <mbenes@suse.cz> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4f228024055b..d01d238d8ef4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -249,6 +249,19 @@ static void update_function_graph_func(void);
249static inline void update_function_graph_func(void) { } 249static inline void update_function_graph_func(void) { }
250#endif 250#endif
251 251
252
253static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
254{
255 /*
256 * If this is a dynamic ops or we force list func,
257 * then it needs to call the list anyway.
258 */
259 if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
260 return ftrace_ops_list_func;
261
262 return ftrace_ops_get_func(ops);
263}
264
252static void update_ftrace_function(void) 265static void update_ftrace_function(void)
253{ 266{
254 ftrace_func_t func; 267 ftrace_func_t func;
@@ -270,7 +283,7 @@ static void update_ftrace_function(void)
270 * then have the mcount trampoline call the function directly. 283 * then have the mcount trampoline call the function directly.
271 */ 284 */
272 } else if (ftrace_ops_list->next == &ftrace_list_end) { 285 } else if (ftrace_ops_list->next == &ftrace_list_end) {
273 func = ftrace_ops_get_func(ftrace_ops_list); 286 func = ftrace_ops_get_list_func(ftrace_ops_list);
274 287
275 } else { 288 } else {
276 /* Just use the default ftrace_ops */ 289 /* Just use the default ftrace_ops */
@@ -5209,13 +5222,6 @@ static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
5209ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 5222ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5210{ 5223{
5211 /* 5224 /*
5212 * If this is a dynamic ops or we force list func,
5213 * then it needs to call the list anyway.
5214 */
5215 if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
5216 return ftrace_ops_list_func;
5217
5218 /*
5219 * If the func handles its own recursion, call it directly. 5225 * If the func handles its own recursion, call it directly.
5220 * Otherwise call the recursion protected function that 5226 * Otherwise call the recursion protected function that
5221 * will call the ftrace ops function. 5227 * will call the ftrace ops function.