aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2012-10-19 10:54:24 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2012-10-19 10:55:09 -0400
commit4533d86270d7986e00594495dde9a109d6be27ae (patch)
treec2473cac653f7b98e5bd5e6475e63734be4b7644 /kernel/trace/trace_functions.c
parent21c5e50e15b1abd797e62f18fd7f90b9cc004cbd (diff)
parent5bc66170dc486556a1e36fd384463536573f4b82 (diff)
Merge commit '5bc66170dc486556a1e36fd384463536573f4b82' into x86/urgent
From Borislav Petkov <bp@amd64.org>: Below is a RAS fix which reverts the addition of a sysfs attribute which we agreed is not needed, post-factum. And this should go in now because that sysfs attribute is going to end up in 3.7 otherwise and thus exposed to userspace; removing it then would be a lot harder. This is done as a merge rather than a simple patch/cherry-pick since the baseline for this patch was not in the previous x86/urgent. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c29
1 files changed, 10 insertions, 19 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index a426f410c060..507a7a9630bf 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,7 +13,6 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/pstore.h>
17#include <linux/fs.h> 16#include <linux/fs.h>
18 17
19#include "trace.h" 18#include "trace.h"
@@ -49,7 +48,8 @@ static void function_trace_start(struct trace_array *tr)
49} 48}
50 49
51static void 50static void
52function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) 51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 struct ftrace_ops *op, struct pt_regs *pt_regs)
53{ 53{
54 struct trace_array *tr = func_trace; 54 struct trace_array *tr = func_trace;
55 struct trace_array_cpu *data; 55 struct trace_array_cpu *data;
@@ -75,16 +75,17 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
75 preempt_enable_notrace(); 75 preempt_enable_notrace();
76} 76}
77 77
78/* Our two options */ 78/* Our option */
79enum { 79enum {
80 TRACE_FUNC_OPT_STACK = 0x1, 80 TRACE_FUNC_OPT_STACK = 0x1,
81 TRACE_FUNC_OPT_PSTORE = 0x2,
82}; 81};
83 82
84static struct tracer_flags func_flags; 83static struct tracer_flags func_flags;
85 84
86static void 85static void
87function_trace_call(unsigned long ip, unsigned long parent_ip) 86function_trace_call(unsigned long ip, unsigned long parent_ip,
87 struct ftrace_ops *op, struct pt_regs *pt_regs)
88
88{ 89{
89 struct trace_array *tr = func_trace; 90 struct trace_array *tr = func_trace;
90 struct trace_array_cpu *data; 91 struct trace_array_cpu *data;
@@ -106,12 +107,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
106 disabled = atomic_inc_return(&data->disabled); 107 disabled = atomic_inc_return(&data->disabled);
107 108
108 if (likely(disabled == 1)) { 109 if (likely(disabled == 1)) {
109 /*
110 * So far tracing doesn't support multiple buffers, so
111 * we make an explicit call for now.
112 */
113 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
114 pstore_ftrace_call(ip, parent_ip);
115 pc = preempt_count(); 110 pc = preempt_count();
116 trace_function(tr, ip, parent_ip, flags, pc); 111 trace_function(tr, ip, parent_ip, flags, pc);
117 } 112 }
@@ -121,7 +116,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
121} 116}
122 117
123static void 118static void
124function_stack_trace_call(unsigned long ip, unsigned long parent_ip) 119function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
120 struct ftrace_ops *op, struct pt_regs *pt_regs)
125{ 121{
126 struct trace_array *tr = func_trace; 122 struct trace_array *tr = func_trace;
127 struct trace_array_cpu *data; 123 struct trace_array_cpu *data;
@@ -164,22 +160,19 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
164static struct ftrace_ops trace_ops __read_mostly = 160static struct ftrace_ops trace_ops __read_mostly =
165{ 161{
166 .func = function_trace_call, 162 .func = function_trace_call,
167 .flags = FTRACE_OPS_FL_GLOBAL, 163 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
168}; 164};
169 165
170static struct ftrace_ops trace_stack_ops __read_mostly = 166static struct ftrace_ops trace_stack_ops __read_mostly =
171{ 167{
172 .func = function_stack_trace_call, 168 .func = function_stack_trace_call,
173 .flags = FTRACE_OPS_FL_GLOBAL, 169 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
174}; 170};
175 171
176static struct tracer_opt func_opts[] = { 172static struct tracer_opt func_opts[] = {
177#ifdef CONFIG_STACKTRACE 173#ifdef CONFIG_STACKTRACE
178 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 174 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
179#endif 175#endif
180#ifdef CONFIG_PSTORE_FTRACE
181 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
182#endif
183 { } /* Always set a last empty entry */ 176 { } /* Always set a last empty entry */
184}; 177};
185 178
@@ -232,8 +225,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
232 } 225 }
233 226
234 break; 227 break;
235 case TRACE_FUNC_OPT_PSTORE:
236 break;
237 default: 228 default:
238 return -EINVAL; 229 return -EINVAL;
239 } 230 }