aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index fdff65dff1bb..483162a9f908 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,6 +13,7 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/pstore.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17 18
18#include "trace.h" 19#include "trace.h"
@@ -75,6 +76,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
75 preempt_enable_notrace(); 76 preempt_enable_notrace();
76} 77}
77 78
79/* Our two options */
80enum {
81 TRACE_FUNC_OPT_STACK = 0x1,
82 TRACE_FUNC_OPT_PSTORE = 0x2,
83};
84
85static struct tracer_flags func_flags;
86
78static void 87static void
79function_trace_call(unsigned long ip, unsigned long parent_ip, 88function_trace_call(unsigned long ip, unsigned long parent_ip,
80 struct ftrace_ops *op, struct pt_regs *pt_regs) 89 struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -100,6 +109,12 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
100 disabled = atomic_inc_return(&data->disabled); 109 disabled = atomic_inc_return(&data->disabled);
101 110
102 if (likely(disabled == 1)) { 111 if (likely(disabled == 1)) {
112 /*
113 * So far tracing doesn't support multiple buffers, so
114 * we make an explicit call for now.
115 */
116 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
117 pstore_ftrace_call(ip, parent_ip);
103 pc = preempt_count(); 118 pc = preempt_count();
104 trace_function(tr, ip, parent_ip, flags, pc); 119 trace_function(tr, ip, parent_ip, flags, pc);
105 } 120 }
@@ -162,15 +177,13 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
162 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, 177 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
163}; 178};
164 179
165/* Our two options */
166enum {
167 TRACE_FUNC_OPT_STACK = 0x1,
168};
169
170static struct tracer_opt func_opts[] = { 180static struct tracer_opt func_opts[] = {
171#ifdef CONFIG_STACKTRACE 181#ifdef CONFIG_STACKTRACE
172 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 182 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
173#endif 183#endif
184#ifdef CONFIG_PSTORE_FTRACE
185 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
186#endif
174 { } /* Always set a last empty entry */ 187 { } /* Always set a last empty entry */
175}; 188};
176 189
@@ -208,10 +221,11 @@ static void tracing_stop_function_trace(void)
208 221
209static int func_set_flag(u32 old_flags, u32 bit, int set) 222static int func_set_flag(u32 old_flags, u32 bit, int set)
210{ 223{
211 if (bit == TRACE_FUNC_OPT_STACK) { 224 switch (bit) {
225 case TRACE_FUNC_OPT_STACK:
212 /* do nothing if already set */ 226 /* do nothing if already set */
213 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 227 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
214 return 0; 228 break;
215 229
216 if (set) { 230 if (set) {
217 unregister_ftrace_function(&trace_ops); 231 unregister_ftrace_function(&trace_ops);
@@ -221,10 +235,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
221 register_ftrace_function(&trace_ops); 235 register_ftrace_function(&trace_ops);
222 } 236 }
223 237
224 return 0; 238 break;
239 case TRACE_FUNC_OPT_PSTORE:
240 break;
241 default:
242 return -EINVAL;
225 } 243 }
226 244
227 return -EINVAL; 245 return 0;
228} 246}
229 247
230static struct tracer function_trace __read_mostly = 248static struct tracer function_trace __read_mostly =