diff options
Diffstat (limited to 'kernel/trace/trace_functions.c')
| -rw-r--r-- | kernel/trace/trace_functions.c | 29 |
1 files changed, 10 insertions, 19 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index a426f410c060..507a7a9630bf 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
| 14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
| 16 | #include <linux/pstore.h> | ||
| 17 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
| 18 | 17 | ||
| 19 | #include "trace.h" | 18 | #include "trace.h" |
| @@ -49,7 +48,8 @@ static void function_trace_start(struct trace_array *tr) | |||
| 49 | } | 48 | } |
| 50 | 49 | ||
| 51 | static void | 50 | static void |
| 52 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | 51 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, |
| 52 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
| 53 | { | 53 | { |
| 54 | struct trace_array *tr = func_trace; | 54 | struct trace_array *tr = func_trace; |
| 55 | struct trace_array_cpu *data; | 55 | struct trace_array_cpu *data; |
| @@ -75,16 +75,17 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |||
| 75 | preempt_enable_notrace(); | 75 | preempt_enable_notrace(); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | /* Our two options */ | 78 | /* Our option */ |
| 79 | enum { | 79 | enum { |
| 80 | TRACE_FUNC_OPT_STACK = 0x1, | 80 | TRACE_FUNC_OPT_STACK = 0x1, |
| 81 | TRACE_FUNC_OPT_PSTORE = 0x2, | ||
| 82 | }; | 81 | }; |
| 83 | 82 | ||
| 84 | static struct tracer_flags func_flags; | 83 | static struct tracer_flags func_flags; |
| 85 | 84 | ||
| 86 | static void | 85 | static void |
| 87 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 86 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
| 87 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
| 88 | |||
| 88 | { | 89 | { |
| 89 | struct trace_array *tr = func_trace; | 90 | struct trace_array *tr = func_trace; |
| 90 | struct trace_array_cpu *data; | 91 | struct trace_array_cpu *data; |
| @@ -106,12 +107,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 106 | disabled = atomic_inc_return(&data->disabled); | 107 | disabled = atomic_inc_return(&data->disabled); |
| 107 | 108 | ||
| 108 | if (likely(disabled == 1)) { | 109 | if (likely(disabled == 1)) { |
| 109 | /* | ||
| 110 | * So far tracing doesn't support multiple buffers, so | ||
| 111 | * we make an explicit call for now. | ||
| 112 | */ | ||
| 113 | if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE)) | ||
| 114 | pstore_ftrace_call(ip, parent_ip); | ||
| 115 | pc = preempt_count(); | 110 | pc = preempt_count(); |
| 116 | trace_function(tr, ip, parent_ip, flags, pc); | 111 | trace_function(tr, ip, parent_ip, flags, pc); |
| 117 | } | 112 | } |
| @@ -121,7 +116,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 121 | } | 116 | } |
| 122 | 117 | ||
| 123 | static void | 118 | static void |
| 124 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | 119 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
| 120 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
| 125 | { | 121 | { |
| 126 | struct trace_array *tr = func_trace; | 122 | struct trace_array *tr = func_trace; |
| 127 | struct trace_array_cpu *data; | 123 | struct trace_array_cpu *data; |
| @@ -164,22 +160,19 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 164 | static struct ftrace_ops trace_ops __read_mostly = | 160 | static struct ftrace_ops trace_ops __read_mostly = |
| 165 | { | 161 | { |
| 166 | .func = function_trace_call, | 162 | .func = function_trace_call, |
| 167 | .flags = FTRACE_OPS_FL_GLOBAL, | 163 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
| 168 | }; | 164 | }; |
| 169 | 165 | ||
| 170 | static struct ftrace_ops trace_stack_ops __read_mostly = | 166 | static struct ftrace_ops trace_stack_ops __read_mostly = |
| 171 | { | 167 | { |
| 172 | .func = function_stack_trace_call, | 168 | .func = function_stack_trace_call, |
| 173 | .flags = FTRACE_OPS_FL_GLOBAL, | 169 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
| 174 | }; | 170 | }; |
| 175 | 171 | ||
| 176 | static struct tracer_opt func_opts[] = { | 172 | static struct tracer_opt func_opts[] = { |
| 177 | #ifdef CONFIG_STACKTRACE | 173 | #ifdef CONFIG_STACKTRACE |
| 178 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | 174 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, |
| 179 | #endif | 175 | #endif |
| 180 | #ifdef CONFIG_PSTORE_FTRACE | ||
| 181 | { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) }, | ||
| 182 | #endif | ||
| 183 | { } /* Always set a last empty entry */ | 176 | { } /* Always set a last empty entry */ |
| 184 | }; | 177 | }; |
| 185 | 178 | ||
| @@ -232,8 +225,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) | |||
| 232 | } | 225 | } |
| 233 | 226 | ||
| 234 | break; | 227 | break; |
| 235 | case TRACE_FUNC_OPT_PSTORE: | ||
| 236 | break; | ||
| 237 | default: | 228 | default: |
| 238 | return -EINVAL; | 229 | return -EINVAL; |
| 239 | } | 230 | } |
