aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-08-21 05:27:00 -0400
committerIngo Molnar <mingo@kernel.org>2012-08-21 05:27:00 -0400
commitbcada3d4b8c96b8792c2306f363992ca5ab9da42 (patch)
treee420679a5db6ea4e1694eef57f9abb6acac8d4d3 /kernel/trace
parent26198c21d1b286a084fe5d514a30bc7e6c712a34 (diff)
parent000078bc3ee69efb1124b8478c7527389a826074 (diff)
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: * Fix include order for bison/flex-generated C files, from Ben Hutchings * Build fixes and documentation corrections from David Ahern * Group parsing support, from Jiri Olsa * UI/gtk refactorings and improvements from Namhyung Kim * NULL deref fix for perf script, from Namhyung Kim * Assorted cleanups from Robert Richter * Let O= makes handle relative paths, from Steven Rostedt * perf script python fixes, from Feng Tang. * Improve 'perf lock' error message when the needed tracepoints are not present, from David Ahern. * Initial bash completion support, from Frederic Weisbecker * Allow building without libelf, from Namhyung Kim. * Support DWARF CFI based unwind to have callchains when %bp based unwinding is not possible, from Jiri Olsa. * Symbol resolution fixes, while fixing support PPC64 files with an .opt ELF section was the end goal, several fixes for code that handles all architectures and cleanups are included, from Cody Schafer. * Add a description for the JIT interface, from Andi Kleen. * Assorted fixes for Documentation and build in 32 bit, from Robert Richter * Add support for non-tracepoint events in perf script python, from Feng Tang * Cache the libtraceevent event_format associated to each evsel early, so that we avoid relookups, i.e. calling pevent_find_event repeatedly when processing tracepoint events. [ This is to reduce the surface contact with libtraceevents and make clear what is that the perf tools needs from that lib: so far parsing the common and per event fields. ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace_event_perf.c2
-rw-r--r--kernel/trace/trace_functions.c36
-rw-r--r--kernel/trace/trace_kprobe.c6
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
6 files changed, 39 insertions, 18 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a120f98c4112..5c38c81496ce 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3187,10 +3187,10 @@ static int tracing_set_tracer(const char *buf)
3187 } 3187 }
3188 destroy_trace_option_files(topts); 3188 destroy_trace_option_files(topts);
3189 3189
3190 current_trace = t; 3190 current_trace = &nop_trace;
3191 3191
3192 topts = create_trace_option_files(current_trace); 3192 topts = create_trace_option_files(t);
3193 if (current_trace->use_max_tr) { 3193 if (t->use_max_tr) {
3194 int cpu; 3194 int cpu;
3195 /* we need to make per cpu buffer sizes equivalent */ 3195 /* we need to make per cpu buffer sizes equivalent */
3196 for_each_tracing_cpu(cpu) { 3196 for_each_tracing_cpu(cpu) {
@@ -3210,6 +3210,7 @@ static int tracing_set_tracer(const char *buf)
3210 goto out; 3210 goto out;
3211 } 3211 }
3212 3212
3213 current_trace = t;
3213 trace_branch_enable(tr); 3214 trace_branch_enable(tr);
3214 out: 3215 out:
3215 mutex_unlock(&trace_types_lock); 3216 mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 9824419c8404..84b1e045faba 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -282,7 +282,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
282 282
283 head = this_cpu_ptr(event_function.perf_events); 283 head = this_cpu_ptr(event_function.perf_events);
284 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, 284 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
285 1, &regs, head); 285 1, &regs, head, NULL);
286 286
287#undef ENTRY_SIZE 287#undef ENTRY_SIZE
288} 288}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index fdff65dff1bb..483162a9f908 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,6 +13,7 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/pstore.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17 18
18#include "trace.h" 19#include "trace.h"
@@ -75,6 +76,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
75 preempt_enable_notrace(); 76 preempt_enable_notrace();
76} 77}
77 78
79/* Our two options */
80enum {
81 TRACE_FUNC_OPT_STACK = 0x1,
82 TRACE_FUNC_OPT_PSTORE = 0x2,
83};
84
85static struct tracer_flags func_flags;
86
78static void 87static void
79function_trace_call(unsigned long ip, unsigned long parent_ip, 88function_trace_call(unsigned long ip, unsigned long parent_ip,
80 struct ftrace_ops *op, struct pt_regs *pt_regs) 89 struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -100,6 +109,12 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
100 disabled = atomic_inc_return(&data->disabled); 109 disabled = atomic_inc_return(&data->disabled);
101 110
102 if (likely(disabled == 1)) { 111 if (likely(disabled == 1)) {
112 /*
113 * So far tracing doesn't support multiple buffers, so
114 * we make an explicit call for now.
115 */
116 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
117 pstore_ftrace_call(ip, parent_ip);
103 pc = preempt_count(); 118 pc = preempt_count();
104 trace_function(tr, ip, parent_ip, flags, pc); 119 trace_function(tr, ip, parent_ip, flags, pc);
105 } 120 }
@@ -162,15 +177,13 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
162 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, 177 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
163}; 178};
164 179
165/* Our two options */
166enum {
167 TRACE_FUNC_OPT_STACK = 0x1,
168};
169
170static struct tracer_opt func_opts[] = { 180static struct tracer_opt func_opts[] = {
171#ifdef CONFIG_STACKTRACE 181#ifdef CONFIG_STACKTRACE
172 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 182 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
173#endif 183#endif
184#ifdef CONFIG_PSTORE_FTRACE
185 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
186#endif
174 { } /* Always set a last empty entry */ 187 { } /* Always set a last empty entry */
175}; 188};
176 189
@@ -208,10 +221,11 @@ static void tracing_stop_function_trace(void)
208 221
209static int func_set_flag(u32 old_flags, u32 bit, int set) 222static int func_set_flag(u32 old_flags, u32 bit, int set)
210{ 223{
211 if (bit == TRACE_FUNC_OPT_STACK) { 224 switch (bit) {
225 case TRACE_FUNC_OPT_STACK:
212 /* do nothing if already set */ 226 /* do nothing if already set */
213 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 227 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
214 return 0; 228 break;
215 229
216 if (set) { 230 if (set) {
217 unregister_ftrace_function(&trace_ops); 231 unregister_ftrace_function(&trace_ops);
@@ -221,10 +235,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
221 register_ftrace_function(&trace_ops); 235 register_ftrace_function(&trace_ops);
222 } 236 }
223 237
224 return 0; 238 break;
239 case TRACE_FUNC_OPT_PSTORE:
240 break;
241 default:
242 return -EINVAL;
225 } 243 }
226 244
227 return -EINVAL; 245 return 0;
228} 246}
229 247
230static struct tracer function_trace __read_mostly = 248static struct tracer function_trace __read_mostly =
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b31d3d5699fe..1a2117043bb1 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1002,7 +1002,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1003 1003
1004 head = this_cpu_ptr(call->perf_events); 1004 head = this_cpu_ptr(call->perf_events);
1005 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); 1005 perf_trace_buf_submit(entry, size, rctx,
1006 entry->ip, 1, regs, head, NULL);
1006} 1007}
1007 1008
1008/* Kretprobe profile handler */ 1009/* Kretprobe profile handler */
@@ -1033,7 +1034,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1033 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 1034 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1034 1035
1035 head = this_cpu_ptr(call->perf_events); 1036 head = this_cpu_ptr(call->perf_events);
1036 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); 1037 perf_trace_buf_submit(entry, size, rctx,
1038 entry->ret_ip, 1, regs, head, NULL);
1037} 1039}
1038#endif /* CONFIG_PERF_EVENTS */ 1040#endif /* CONFIG_PERF_EVENTS */
1039 1041
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 96fc73369099..60e4d7875672 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -532,7 +532,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
532 (unsigned long *)&rec->args); 532 (unsigned long *)&rec->args);
533 533
534 head = this_cpu_ptr(sys_data->enter_event->perf_events); 534 head = this_cpu_ptr(sys_data->enter_event->perf_events);
535 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); 535 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
536} 536}
537 537
538int perf_sysenter_enable(struct ftrace_event_call *call) 538int perf_sysenter_enable(struct ftrace_event_call *call)
@@ -608,7 +608,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
608 rec->ret = syscall_get_return_value(current, regs); 608 rec->ret = syscall_get_return_value(current, regs);
609 609
610 head = this_cpu_ptr(sys_data->exit_event->perf_events); 610 head = this_cpu_ptr(sys_data->exit_event->perf_events);
611 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); 611 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
612} 612}
613 613
614int perf_sysexit_enable(struct ftrace_event_call *call) 614int perf_sysexit_enable(struct ftrace_event_call *call)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 2b36ac68549e..03003cd7dd96 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -670,7 +670,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
670 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 670 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
671 671
672 head = this_cpu_ptr(call->perf_events); 672 head = this_cpu_ptr(call->perf_events);
673 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); 673 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);
674 674
675 out: 675 out:
676 preempt_enable(); 676 preempt_enable();