aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-08-21 05:27:00 -0400
committerIngo Molnar <mingo@kernel.org>2012-08-21 05:27:00 -0400
commitbcada3d4b8c96b8792c2306f363992ca5ab9da42 (patch)
treee420679a5db6ea4e1694eef57f9abb6acac8d4d3 /kernel/trace/trace_functions.c
parent26198c21d1b286a084fe5d514a30bc7e6c712a34 (diff)
parent000078bc3ee69efb1124b8478c7527389a826074 (diff)
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: * Fix include order for bison/flex-generated C files, from Ben Hutchings * Build fixes and documentation corrections from David Ahern * Group parsing support, from Jiri Olsa * UI/gtk refactorings and improvements from Namhyung Kim * NULL deref fix for perf script, from Namhyung Kim * Assorted cleanups from Robert Richter * Let O= makes handle relative paths, from Steven Rostedt * perf script python fixes, from Feng Tang. * Improve 'perf lock' error message when the needed tracepoints are not present, from David Ahern. * Initial bash completion support, from Frederic Weisbecker * Allow building without libelf, from Namhyung Kim. * Support DWARF CFI based unwind to have callchains when %bp based unwinding is not possible, from Jiri Olsa. * Symbol resolution fixes, while fixing support PPC64 files with an .opt ELF section was the end goal, several fixes for code that handles all architectures and cleanups are included, from Cody Schafer. * Add a description for the JIT interface, from Andi Kleen. * Assorted fixes for Documentation and build in 32 bit, from Robert Richter * Add support for non-tracepoint events in perf script python, from Feng Tang * Cache the libtraceevent event_format associated to each evsel early, so that we avoid relookups, i.e. calling pevent_find_event repeatedly when processing tracepoint events. [ This is to reduce the surface contact with libtraceevents and make clear what is that the perf tools needs from that lib: so far parsing the common and per event fields. ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index fdff65dff1bb..483162a9f908 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,6 +13,7 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/pstore.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17 18
18#include "trace.h" 19#include "trace.h"
@@ -75,6 +76,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
75 preempt_enable_notrace(); 76 preempt_enable_notrace();
76} 77}
77 78
79/* Our two options */
80enum {
81 TRACE_FUNC_OPT_STACK = 0x1,
82 TRACE_FUNC_OPT_PSTORE = 0x2,
83};
84
85static struct tracer_flags func_flags;
86
78static void 87static void
79function_trace_call(unsigned long ip, unsigned long parent_ip, 88function_trace_call(unsigned long ip, unsigned long parent_ip,
80 struct ftrace_ops *op, struct pt_regs *pt_regs) 89 struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -100,6 +109,12 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
100 disabled = atomic_inc_return(&data->disabled); 109 disabled = atomic_inc_return(&data->disabled);
101 110
102 if (likely(disabled == 1)) { 111 if (likely(disabled == 1)) {
112 /*
113 * So far tracing doesn't support multiple buffers, so
114 * we make an explicit call for now.
115 */
116 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
117 pstore_ftrace_call(ip, parent_ip);
103 pc = preempt_count(); 118 pc = preempt_count();
104 trace_function(tr, ip, parent_ip, flags, pc); 119 trace_function(tr, ip, parent_ip, flags, pc);
105 } 120 }
@@ -162,15 +177,13 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
162 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, 177 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
163}; 178};
164 179
165/* Our two options */
166enum {
167 TRACE_FUNC_OPT_STACK = 0x1,
168};
169
170static struct tracer_opt func_opts[] = { 180static struct tracer_opt func_opts[] = {
171#ifdef CONFIG_STACKTRACE 181#ifdef CONFIG_STACKTRACE
172 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 182 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
173#endif 183#endif
184#ifdef CONFIG_PSTORE_FTRACE
185 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
186#endif
174 { } /* Always set a last empty entry */ 187 { } /* Always set a last empty entry */
175}; 188};
176 189
@@ -208,10 +221,11 @@ static void tracing_stop_function_trace(void)
208 221
209static int func_set_flag(u32 old_flags, u32 bit, int set) 222static int func_set_flag(u32 old_flags, u32 bit, int set)
210{ 223{
211 if (bit == TRACE_FUNC_OPT_STACK) { 224 switch (bit) {
225 case TRACE_FUNC_OPT_STACK:
212 /* do nothing if already set */ 226 /* do nothing if already set */
213 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 227 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
214 return 0; 228 break;
215 229
216 if (set) { 230 if (set) {
217 unregister_ftrace_function(&trace_ops); 231 unregister_ftrace_function(&trace_ops);
@@ -221,10 +235,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
221 register_ftrace_function(&trace_ops); 235 register_ftrace_function(&trace_ops);
222 } 236 }
223 237
224 return 0; 238 break;
239 case TRACE_FUNC_OPT_PSTORE:
240 break;
241 default:
242 return -EINVAL;
225 } 243 }
226 244
227 return -EINVAL; 245 return 0;
228} 246}
229 247
230static struct tracer function_trace __read_mostly = 248static struct tracer function_trace __read_mostly =