aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorAnton Vorontsov <anton.vorontsov@linaro.org>2012-07-17 17:26:15 -0400
committerAnton Vorontsov <anton.vorontsov@linaro.org>2012-09-07 01:16:58 -0400
commit65f8c95e46a1827ae8bbc52a817ea308dd7d65ae (patch)
treeadc856e8b50441b055350d8f1d83e3f641c77456 /kernel/trace/trace_functions.c
parentb4a871bce619dc5ca03cc6c78e1c467ceacb8e7e (diff)
pstore/ftrace: Convert to its own enable/disable debugfs knob
With this patch we no longer reuse function tracer infrastructure, now we register our own tracer back-end via a debugfs knob. It's a bit more code, but that is the only downside. On the bright side we have: - Ability to make persistent_ram module removable (when needed, we can move ftrace_ops struct into a module). Note that persistent_ram is still not removable for other reasons, but with this patch it's just one thing less to worry about; - Pstore part is more isolated from the generic function tracer. We tried it already by registering our own tracer in available_tracers, but that way we're loosing ability to see the traces while we record them to pstore. This solution is somewhere in the middle: we only register "internal ftracer" back-end, but not the "front-end"; - When there is only pstore tracing enabled, the kernel will only write to the pstore buffer, omitting function tracer buffer (which, of course, still can be enabled via 'echo function > current_tracer'). Suggested-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c15
1 files changed, 1 insertions, 14 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index a426f410c060..0ad83e3929d1 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,7 +13,6 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/pstore.h>
17#include <linux/fs.h> 16#include <linux/fs.h>
18 17
19#include "trace.h" 18#include "trace.h"
@@ -75,10 +74,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
75 preempt_enable_notrace(); 74 preempt_enable_notrace();
76} 75}
77 76
78/* Our two options */ 77/* Our option */
79enum { 78enum {
80 TRACE_FUNC_OPT_STACK = 0x1, 79 TRACE_FUNC_OPT_STACK = 0x1,
81 TRACE_FUNC_OPT_PSTORE = 0x2,
82}; 80};
83 81
84static struct tracer_flags func_flags; 82static struct tracer_flags func_flags;
@@ -106,12 +104,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
106 disabled = atomic_inc_return(&data->disabled); 104 disabled = atomic_inc_return(&data->disabled);
107 105
108 if (likely(disabled == 1)) { 106 if (likely(disabled == 1)) {
109 /*
110 * So far tracing doesn't support multiple buffers, so
111 * we make an explicit call for now.
112 */
113 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
114 pstore_ftrace_call(ip, parent_ip);
115 pc = preempt_count(); 107 pc = preempt_count();
116 trace_function(tr, ip, parent_ip, flags, pc); 108 trace_function(tr, ip, parent_ip, flags, pc);
117 } 109 }
@@ -177,9 +169,6 @@ static struct tracer_opt func_opts[] = {
177#ifdef CONFIG_STACKTRACE 169#ifdef CONFIG_STACKTRACE
178 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 170 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
179#endif 171#endif
180#ifdef CONFIG_PSTORE_FTRACE
181 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
182#endif
183 { } /* Always set a last empty entry */ 172 { } /* Always set a last empty entry */
184}; 173};
185 174
@@ -232,8 +221,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
232 } 221 }
233 222
234 break; 223 break;
235 case TRACE_FUNC_OPT_PSTORE:
236 break;
237 default: 224 default:
238 return -EINVAL; 225 return -EINVAL;
239 } 226 }