aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-24 10:21:36 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-24 21:54:05 -0500
commitb77e38aa240c3bd9c55c98b9f7c81541e042eae5 (patch)
treebbb40993e76edc52d2cae1040b941ba4e4d2f965
parent7c37730cd31ddb2d3a1da142af9b18c29b8c433b (diff)
tracing: add event trace infrastructure
This patch creates the event tracing infrastructure of ftrace. It will create the files: /debug/tracing/available_events /debug/tracing/set_event The available_events will list the trace points that have been registered with the event tracer. set_events will allow the user to enable or disable an event hook. example: # echo sched_wakeup > /debug/tracing/set_event Will enable the sched_wakeup event (if it is registered). # echo "!sched_wakeup" >> /debug/tracing/set_event Will disable the sched_wakeup event (and only that event). # echo > /debug/tracing/set_event Will disable all events (notice the '>') # cat /debug/tracing/available_events > /debug/tracing/set_event Will enable all registered event hooks. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-rw-r--r--include/asm-generic/vmlinux.lds.h11
-rw-r--r--kernel/trace/Kconfig9
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace_events.c280
-rw-r--r--kernel/trace/trace_events.h52
5 files changed, 352 insertions, 1 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c61fab1dd2f8..0add6b28c366 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -61,6 +61,14 @@
61#define BRANCH_PROFILE() 61#define BRANCH_PROFILE()
62#endif 62#endif
63 63
64#ifdef CONFIG_EVENT_TRACER
65#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
66 *(_ftrace_events) \
67 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
68#else
69#define FTRACE_EVENTS()
70#endif
71
64/* .data section */ 72/* .data section */
65#define DATA_DATA \ 73#define DATA_DATA \
66 *(.data) \ 74 *(.data) \
@@ -81,7 +89,8 @@
81 *(__tracepoints) \ 89 *(__tracepoints) \
82 VMLINUX_SYMBOL(__stop___tracepoints) = .; \ 90 VMLINUX_SYMBOL(__stop___tracepoints) = .; \
83 LIKELY_PROFILE() \ 91 LIKELY_PROFILE() \
84 BRANCH_PROFILE() 92 BRANCH_PROFILE() \
93 FTRACE_EVENTS()
85 94
86#define RO_DATA(align) \ 95#define RO_DATA(align) \
87 . = ALIGN((align)); \ 96 . = ALIGN((align)); \
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 07877f4b5233..999c6a2485df 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -159,6 +159,15 @@ config CONTEXT_SWITCH_TRACER
159 This tracer gets called from the context switch and records 159 This tracer gets called from the context switch and records
160 all switching of tasks. 160 all switching of tasks.
161 161
162config EVENT_TRACER
163 bool "Trace various events in the kernel"
164 depends on DEBUG_KERNEL
165 select TRACING
166 help
167 This tracer hooks to various trace points in the kernel
168 allowing the user to pick and choose which trace point they
169 want to trace.
170
162config BOOT_TRACER 171config BOOT_TRACER
163 bool "Trace boot initcalls" 172 bool "Trace boot initcalls"
164 depends on DEBUG_KERNEL 173 depends on DEBUG_KERNEL
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 627090bc262d..c7363568b1cf 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -38,5 +38,6 @@ obj-$(CONFIG_POWER_TRACER) += trace_power.o
38obj-$(CONFIG_KMEMTRACE) += kmemtrace.o 38obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
39obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o 39obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
40obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o 40obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
41obj-$(CONFIG_EVENT_TRACER) += trace_events.o
41 42
42libftrace-y := ftrace.o 43libftrace-y := ftrace.o
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
new file mode 100644
index 000000000000..05bc80ec8d2c
--- /dev/null
+++ b/kernel/trace/trace_events.c
@@ -0,0 +1,280 @@
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/ctype.h>
12
13#include "trace_events.h"
14
15void event_trace_printk(unsigned long ip, const char *fmt, ...)
16{
17 va_list ap;
18
19 va_start(ap, fmt);
20 tracing_record_cmdline(current);
21 trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
22 va_end(ap);
23}
24
25static void ftrace_clear_events(void)
26{
27 struct ftrace_event_call *call = (void *)__start_ftrace_events;
28
29
30 while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
31
32 if (call->enabled) {
33 call->enabled = 0;
34 call->unregfunc();
35 }
36 call++;
37 }
38}
39
40static int ftrace_set_clr_event(char *buf, int set)
41{
42 struct ftrace_event_call *call = (void *)__start_ftrace_events;
43
44
45 while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
46
47 if (strcmp(buf, call->name) != 0) {
48 call++;
49 continue;
50 }
51
52 if (set) {
53 /* Already set? */
54 if (call->enabled)
55 return 0;
56 call->enabled = 1;
57 call->regfunc();
58 } else {
59 /* Already cleared? */
60 if (!call->enabled)
61 return 0;
62 call->enabled = 0;
63 call->unregfunc();
64 }
65 return 0;
66 }
67 return -EINVAL;
68}
69
70/* 128 should be much more than enough */
71#define EVENT_BUF_SIZE 127
72
73static ssize_t
74ftrace_event_write(struct file *file, const char __user *ubuf,
75 size_t cnt, loff_t *ppos)
76{
77 size_t read = 0;
78 int i, set = 1;
79 ssize_t ret;
80 char *buf;
81 char ch;
82
83 if (!cnt || cnt < 0)
84 return 0;
85
86 ret = get_user(ch, ubuf++);
87 if (ret)
88 return ret;
89 read++;
90 cnt--;
91
92 /* skip white space */
93 while (cnt && isspace(ch)) {
94 ret = get_user(ch, ubuf++);
95 if (ret)
96 return ret;
97 read++;
98 cnt--;
99 }
100
101 /* Only white space found? */
102 if (isspace(ch)) {
103 file->f_pos += read;
104 ret = read;
105 return ret;
106 }
107
108 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
109 if (!buf)
110 return -ENOMEM;
111
112 if (cnt > EVENT_BUF_SIZE)
113 cnt = EVENT_BUF_SIZE;
114
115 i = 0;
116 while (cnt && !isspace(ch)) {
117 if (!i && ch == '!')
118 set = 0;
119 else
120 buf[i++] = ch;
121
122 ret = get_user(ch, ubuf++);
123 if (ret)
124 goto out_free;
125 read++;
126 cnt--;
127 }
128 buf[i] = 0;
129
130 file->f_pos += read;
131
132 ret = ftrace_set_clr_event(buf, set);
133 if (ret)
134 goto out_free;
135
136 ret = read;
137
138 out_free:
139 kfree(buf);
140
141 return ret;
142}
143
144static void *
145t_next(struct seq_file *m, void *v, loff_t *pos)
146{
147 struct ftrace_event_call *call = m->private;
148 struct ftrace_event_call *next = call;
149
150 (*pos)++;
151
152 if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
153 return NULL;
154
155 m->private = ++next;
156
157 return call;
158}
159
160static void *t_start(struct seq_file *m, loff_t *pos)
161{
162 return t_next(m, NULL, pos);
163}
164
165static void *
166s_next(struct seq_file *m, void *v, loff_t *pos)
167{
168 struct ftrace_event_call *call = m->private;
169 struct ftrace_event_call *next;
170
171 (*pos)++;
172
173 retry:
174 if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
175 return NULL;
176
177 if (!call->enabled) {
178 call++;
179 goto retry;
180 }
181
182 next = call;
183 m->private = ++next;
184
185 return call;
186}
187
188static void *s_start(struct seq_file *m, loff_t *pos)
189{
190 return s_next(m, NULL, pos);
191}
192
193static int t_show(struct seq_file *m, void *v)
194{
195 struct ftrace_event_call *call = v;
196
197 seq_printf(m, "%s\n", call->name);
198
199 return 0;
200}
201
202static void t_stop(struct seq_file *m, void *p)
203{
204}
205
206static int
207ftrace_event_seq_open(struct inode *inode, struct file *file)
208{
209 int ret;
210 const struct seq_operations *seq_ops;
211
212 if ((file->f_mode & FMODE_WRITE) &&
213 !(file->f_flags & O_APPEND))
214 ftrace_clear_events();
215
216 seq_ops = inode->i_private;
217 ret = seq_open(file, seq_ops);
218 if (!ret) {
219 struct seq_file *m = file->private_data;
220
221 m->private = __start_ftrace_events;
222 }
223 return ret;
224}
225
226static const struct seq_operations show_event_seq_ops = {
227 .start = t_start,
228 .next = t_next,
229 .show = t_show,
230 .stop = t_stop,
231};
232
233static const struct seq_operations show_set_event_seq_ops = {
234 .start = s_start,
235 .next = s_next,
236 .show = t_show,
237 .stop = t_stop,
238};
239
240static const struct file_operations ftrace_avail_fops = {
241 .open = ftrace_event_seq_open,
242 .read = seq_read,
243 .llseek = seq_lseek,
244 .release = seq_release,
245};
246
247static const struct file_operations ftrace_set_event_fops = {
248 .open = ftrace_event_seq_open,
249 .read = seq_read,
250 .write = ftrace_event_write,
251 .llseek = seq_lseek,
252 .release = seq_release,
253};
254
255static __init int event_trace_init(void)
256{
257 struct dentry *d_tracer;
258 struct dentry *entry;
259
260 d_tracer = tracing_init_dentry();
261 if (!d_tracer)
262 return 0;
263
264 entry = debugfs_create_file("available_events", 0444, d_tracer,
265 (void *)&show_event_seq_ops,
266 &ftrace_avail_fops);
267 if (!entry)
268 pr_warning("Could not create debugfs "
269 "'available_events' entry\n");
270
271 entry = debugfs_create_file("set_event", 0644, d_tracer,
272 (void *)&show_set_event_seq_ops,
273 &ftrace_set_event_fops);
274 if (!entry)
275 pr_warning("Could not create debugfs "
276 "'set_event' entry\n");
277
278 return 0;
279}
280fs_initcall(event_trace_init);
diff --git a/kernel/trace/trace_events.h b/kernel/trace/trace_events.h
new file mode 100644
index 000000000000..39342f86db27
--- /dev/null
+++ b/kernel/trace/trace_events.h
@@ -0,0 +1,52 @@
1#ifndef _LINUX_KERNEL_TRACE_EVENTS_H
2#define _LINUX_KERNEL_TRACE_EVENTS_H
3
4#include <linux/ftrace.h>
5#include "trace.h"
6
7struct ftrace_event_call {
8 char *name;
9 int enabled;
10 int (*regfunc)(void);
11 void (*unregfunc)(void);
12};
13
14
15#undef TPFMT
16#define TPFMT(fmt, args...) fmt "\n", ##args
17
18#undef DEFINE_TRACE_FMT
19#define DEFINE_TRACE_FMT(call, proto, args, fmt) \
20static void ftrace_event_##call(proto) \
21{ \
22 event_trace_printk(_RET_IP_, "(" #call ") " fmt); \
23} \
24 \
25static int ftrace_reg_event_##call(void) \
26{ \
27 int ret; \
28 \
29 ret = register_trace_##call(ftrace_event_##call); \
30 if (!ret) \
31 pr_info("event trace: Could not activate trace point " \
32 "probe to " #call); \
33 return ret; \
34} \
35 \
36static void ftrace_unreg_event_##call(void) \
37{ \
38 unregister_trace_##call(ftrace_event_##call); \
39} \
40 \
41static struct ftrace_event_call __used \
42__attribute__((section("_ftrace_events"))) event_##call = { \
43 .name = #call, \
44 .regfunc = ftrace_reg_event_##call, \
45 .unregfunc = ftrace_unreg_event_##call, \
46}
47
48void event_trace_printk(unsigned long ip, const char *fmt, ...);
49extern unsigned long __start_ftrace_events[];
50extern unsigned long __stop_ftrace_events[];
51
52#endif /* _LINUX_KERNEL_TRACE_EVENTS_H */