aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sysprof.c
diff options
context:
space:
mode:
authorSoeren Sandmann Pedersen <sandmann@redhat.com>2008-05-12 15:20:54 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 17:58:50 -0400
commitcd2134b1dda92fd450e6a1e12b1c7960dd6a2178 (patch)
tree2b5105a3055ffc8613719b07558d96a32a71fa30 /kernel/trace/trace_sysprof.c
parent5fc4511c756860149b81aead6eca5bdf5c438ea7 (diff)
sysprof: kernel trace
add kernel backtracing to the sysprof tracer. change the format of the data, so that type=0 means beginning of stack trace, 1 means kernel address, 2 means user address, and 3 means end of trace. EIP addresses are no longer distinguished from return addresses, mostly because sysprof userspace doesn't make use of it. It may be worthwhile adding this back in though, just in case it becomes interesting. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_sysprof.c')
-rw-r--r--kernel/trace/trace_sysprof.c89
1 files changed, 80 insertions, 9 deletions
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 76dd953eeccd..ebcb66d054cc 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -14,6 +14,8 @@
14#include <linux/irq.h> 14#include <linux/irq.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16 16
17#include <asm/stacktrace.h>
18
17#include "trace.h" 19#include "trace.h"
18 20
19static struct trace_array *sysprof_trace; 21static struct trace_array *sysprof_trace;
@@ -52,6 +54,77 @@ static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
52 return ret; 54 return ret;
53} 55}
54 56
57struct backtrace_info {
58 struct trace_array_cpu *data;
59 struct trace_array *tr;
60 int pos;
61};
62
63static void
64backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
65{
66 /* Ignore warnings */
67}
68
69static void backtrace_warning(void *data, char *msg)
70{
71 /* Ignore warnings */
72}
73
74static int backtrace_stack(void *data, char *name)
75{
76 /* Don't bother with IRQ stacks for now */
77 return -1;
78}
79
80static void backtrace_address(void *data, unsigned long addr, int reliable)
81{
82 struct backtrace_info *info = data;
83
84 if (info->pos < sample_max_depth && reliable) {
85 __trace_special(info->tr, info->data, 1, addr, 0);
86
87 info->pos++;
88 }
89}
90
91const static struct stacktrace_ops backtrace_ops = {
92 .warning = backtrace_warning,
93 .warning_symbol = backtrace_warning_symbol,
94 .stack = backtrace_stack,
95 .address = backtrace_address,
96};
97
98static struct pt_regs *
99trace_kernel(struct pt_regs *regs, struct trace_array *tr,
100 struct trace_array_cpu *data)
101{
102 struct backtrace_info info;
103 unsigned long bp;
104 char *user_stack;
105 char *stack;
106
107 info.tr = tr;
108 info.data = data;
109 info.pos = 1;
110
111 __trace_special(info.tr, info.data, 1, regs->ip, 0);
112
113 stack = ((char *)regs + sizeof(struct pt_regs));
114#ifdef CONFIG_FRAME_POINTER
115 bp = regs->bp;
116#else
117 bp = 0;
118#endif
119
120 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);
121
122 /* Now trace the user stack */
123 user_stack = ((char *)current->thread.sp0 - sizeof(struct pt_regs));
124
125 return (struct pt_regs *)user_stack;
126}
127
55static void timer_notify(struct pt_regs *regs, int cpu) 128static void timer_notify(struct pt_regs *regs, int cpu)
56{ 129{
57 struct trace_array_cpu *data; 130 struct trace_array_cpu *data;
@@ -74,17 +147,15 @@ static void timer_notify(struct pt_regs *regs, int cpu)
74 if (is_user && current->state != TASK_RUNNING) 147 if (is_user && current->state != TASK_RUNNING)
75 return; 148 return;
76 149
77 if (!is_user) { 150 __trace_special(tr, data, 0, 0, current->pid);
78 /* kernel */
79 ftrace(tr, data, current->pid, 1, 0);
80 return;
81 151
82 } 152 if (!is_user)
83 153 regs = trace_kernel(regs, tr, data);
84 __trace_special(tr, data, 0, current->pid, regs->ip);
85 154
86 fp = (void __user *)regs->bp; 155 fp = (void __user *)regs->bp;
87 156
157 __trace_special(tr, data, 2, regs->ip, 0);
158
88 for (i = 0; i < sample_max_depth; i++) { 159 for (i = 0; i < sample_max_depth; i++) {
89 frame.next_fp = 0; 160 frame.next_fp = 0;
90 frame.return_address = 0; 161 frame.return_address = 0;
@@ -93,12 +164,12 @@ static void timer_notify(struct pt_regs *regs, int cpu)
93 if ((unsigned long)fp < regs->sp) 164 if ((unsigned long)fp < regs->sp)
94 break; 165 break;
95 166
96 __trace_special(tr, data, 1, frame.return_address, 167 __trace_special(tr, data, 2, frame.return_address,
97 (unsigned long)fp); 168 (unsigned long)fp);
98 fp = frame.next_fp; 169 fp = frame.next_fp;
99 } 170 }
100 171
101 __trace_special(tr, data, 2, current->pid, i); 172 __trace_special(tr, data, 3, current->pid, i);
102 173
103 /* 174 /*
104 * Special trace entry if we overflow the max depth: 175 * Special trace entry if we overflow the max depth: