aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace_functions.c36
2 files changed, 31 insertions, 12 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a120f98c4112..5c38c81496ce 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3187,10 +3187,10 @@ static int tracing_set_tracer(const char *buf)
3187 } 3187 }
3188 destroy_trace_option_files(topts); 3188 destroy_trace_option_files(topts);
3189 3189
3190 current_trace = t; 3190 current_trace = &nop_trace;
3191 3191
3192 topts = create_trace_option_files(current_trace); 3192 topts = create_trace_option_files(t);
3193 if (current_trace->use_max_tr) { 3193 if (t->use_max_tr) {
3194 int cpu; 3194 int cpu;
3195 /* we need to make per cpu buffer sizes equivalent */ 3195 /* we need to make per cpu buffer sizes equivalent */
3196 for_each_tracing_cpu(cpu) { 3196 for_each_tracing_cpu(cpu) {
@@ -3210,6 +3210,7 @@ static int tracing_set_tracer(const char *buf)
3210 goto out; 3210 goto out;
3211 } 3211 }
3212 3212
3213 current_trace = t;
3213 trace_branch_enable(tr); 3214 trace_branch_enable(tr);
3214 out: 3215 out:
3215 mutex_unlock(&trace_types_lock); 3216 mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index c7b0c6a7db09..a426f410c060 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,6 +13,7 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/pstore.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17 18
18#include "trace.h" 19#include "trace.h"
@@ -74,6 +75,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
74 preempt_enable_notrace(); 75 preempt_enable_notrace();
75} 76}
76 77
78/* Our two options */
79enum {
80 TRACE_FUNC_OPT_STACK = 0x1,
81 TRACE_FUNC_OPT_PSTORE = 0x2,
82};
83
84static struct tracer_flags func_flags;
85
77static void 86static void
78function_trace_call(unsigned long ip, unsigned long parent_ip) 87function_trace_call(unsigned long ip, unsigned long parent_ip)
79{ 88{
@@ -97,6 +106,12 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
97 disabled = atomic_inc_return(&data->disabled); 106 disabled = atomic_inc_return(&data->disabled);
98 107
99 if (likely(disabled == 1)) { 108 if (likely(disabled == 1)) {
109 /*
110 * So far tracing doesn't support multiple buffers, so
111 * we make an explicit call for now.
112 */
113 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
114 pstore_ftrace_call(ip, parent_ip);
100 pc = preempt_count(); 115 pc = preempt_count();
101 trace_function(tr, ip, parent_ip, flags, pc); 116 trace_function(tr, ip, parent_ip, flags, pc);
102 } 117 }
@@ -158,15 +173,13 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
158 .flags = FTRACE_OPS_FL_GLOBAL, 173 .flags = FTRACE_OPS_FL_GLOBAL,
159}; 174};
160 175
161/* Our two options */
162enum {
163 TRACE_FUNC_OPT_STACK = 0x1,
164};
165
166static struct tracer_opt func_opts[] = { 176static struct tracer_opt func_opts[] = {
167#ifdef CONFIG_STACKTRACE 177#ifdef CONFIG_STACKTRACE
168 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 178 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
169#endif 179#endif
180#ifdef CONFIG_PSTORE_FTRACE
181 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
182#endif
170 { } /* Always set a last empty entry */ 183 { } /* Always set a last empty entry */
171}; 184};
172 185
@@ -204,10 +217,11 @@ static void tracing_stop_function_trace(void)
204 217
205static int func_set_flag(u32 old_flags, u32 bit, int set) 218static int func_set_flag(u32 old_flags, u32 bit, int set)
206{ 219{
207 if (bit == TRACE_FUNC_OPT_STACK) { 220 switch (bit) {
221 case TRACE_FUNC_OPT_STACK:
208 /* do nothing if already set */ 222 /* do nothing if already set */
209 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 223 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
210 return 0; 224 break;
211 225
212 if (set) { 226 if (set) {
213 unregister_ftrace_function(&trace_ops); 227 unregister_ftrace_function(&trace_ops);
@@ -217,10 +231,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
217 register_ftrace_function(&trace_ops); 231 register_ftrace_function(&trace_ops);
218 } 232 }
219 233
220 return 0; 234 break;
235 case TRACE_FUNC_OPT_PSTORE:
236 break;
237 default:
238 return -EINVAL;
221 } 239 }
222 240
223 return -EINVAL; 241 return 0;
224} 242}
225 243
226static struct tracer function_trace __read_mostly = 244static struct tracer function_trace __read_mostly =