aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace.c3
-rw-r--r--kernel/trace/trace.h17
-rw-r--r--kernel/trace/trace_stat.c251
4 files changed, 271 insertions, 1 deletions
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 549f93c9b393..31cd5fbc0eed 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
20 20
21obj-$(CONFIG_TRACING) += trace.o 21obj-$(CONFIG_TRACING) += trace.o
22obj-$(CONFIG_TRACING) += trace_output.o 22obj-$(CONFIG_TRACING) += trace_output.o
23obj-$(CONFIG_TRACING) += trace_stat.o
23obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 24obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
24obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o 25obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
25obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 26obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3f0317586cfd..b789c010512c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2354,6 +2354,7 @@ static int tracing_set_tracer(char *buf)
2354 if (ret) 2354 if (ret)
2355 goto out; 2355 goto out;
2356 } 2356 }
2357 init_tracer_stat(t);
2357 2358
2358 trace_branch_enable(tr); 2359 trace_branch_enable(tr);
2359 out: 2360 out:
@@ -3206,7 +3207,7 @@ __init static int tracer_alloc_buffers(void)
3206#else 3207#else
3207 current_trace = &nop_trace; 3208 current_trace = &nop_trace;
3208#endif 3209#endif
3209 3210 init_tracer_stat(current_trace);
3210 /* All seems OK, enable tracing */ 3211 /* All seems OK, enable tracing */
3211 tracing_disabled = 0; 3212 tracing_disabled = 0;
3212 3213
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6bd71fa1e1c7..05fa804d1c16 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -336,6 +336,21 @@ struct tracer {
336 struct tracer *next; 336 struct tracer *next;
337 int print_max; 337 int print_max;
338 struct tracer_flags *flags; 338 struct tracer_flags *flags;
339
340 /*
341 * If you change one of the following on tracing runtime, recall
342 * init_tracer_stat()
343 */
344
345 /* Iteration over statistic entries */
346 void *(*stat_start)(void);
347 void *(*stat_next)(void *prev, int idx);
348 /* Compare two entries for sorting (optional) for stats */
349 int (*stat_cmp)(void *p1, void *p2);
350 /* Print a stat entry */
351 int (*stat_show)(struct seq_file *s, void *p);
352 /* Print the headers of your stat entries */
353 int (*stat_headers)(struct seq_file *s);
339}; 354};
340 355
341struct trace_seq { 356struct trace_seq {
@@ -421,6 +436,8 @@ void tracing_start_sched_switch_record(void);
421int register_tracer(struct tracer *type); 436int register_tracer(struct tracer *type);
422void unregister_tracer(struct tracer *type); 437void unregister_tracer(struct tracer *type);
423 438
439void init_tracer_stat(struct tracer *trace);
440
424extern unsigned long nsecs_to_usecs(unsigned long nsecs); 441extern unsigned long nsecs_to_usecs(unsigned long nsecs);
425 442
426extern unsigned long tracing_max_latency; 443extern unsigned long tracing_max_latency;
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
new file mode 100644
index 000000000000..6f194a33a64a
--- /dev/null
+++ b/kernel/trace/trace_stat.c
@@ -0,0 +1,251 @@
1/*
2 * Infrastructure for statistic tracing (histogram output).
3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 *
6 * Based on the code from trace_branch.c which is
7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10
11
12#include <linux/list.h>
13#include <linux/seq_file.h>
14#include <linux/debugfs.h>
15#include "trace.h"
16
17
18/* List of stat entries from a tracer */
19struct trace_stat_list {
20 struct list_head list;
21 void *stat;
22};
23
24static struct trace_stat_list stat_list;
25
26/*
27 * This is a copy of the current tracer to avoid racy
28 * and dangerous output while the current tracer is
29 * switched.
30 */
31static struct tracer current_tracer;
32
33/*
34 * Protect both the current tracer and the global
35 * stat list.
36 */
37static DEFINE_MUTEX(stat_list_mutex);
38
39
40static void reset_stat_list(void)
41{
42 struct trace_stat_list *node;
43 struct list_head *next;
44
45 if (list_empty(&stat_list.list))
46 return;
47
48 node = list_entry(stat_list.list.next, struct trace_stat_list, list);
49 next = node->list.next;
50
51 while (&node->list != next) {
52 kfree(node);
53 node = list_entry(next, struct trace_stat_list, list);
54 }
55 kfree(node);
56
57 INIT_LIST_HEAD(&stat_list.list);
58}
59
60void init_tracer_stat(struct tracer *trace)
61{
62 mutex_lock(&stat_list_mutex);
63 current_tracer = *trace;
64 mutex_unlock(&stat_list_mutex);
65}
66
67/*
68 * For tracers that don't provide a stat_cmp callback.
69 * This one will force an immediate insertion on tail of
70 * the list.
71 */
72static int dummy_cmp(void *p1, void *p2)
73{
74 return 1;
75}
76
77/*
78 * Initialize the stat list at each trace_stat file opening.
79 * All of these copies and sorting are required on all opening
80 * since the stats could have changed between two file sessions.
81 */
82static int stat_seq_init(void)
83{
84 struct trace_stat_list *iter_entry, *new_entry;
85 void *prev_stat;
86 int ret = 0;
87 int i;
88
89 mutex_lock(&stat_list_mutex);
90 reset_stat_list();
91
92 if (!current_tracer.stat_start || !current_tracer.stat_next ||
93 !current_tracer.stat_show)
94 goto exit;
95
96 if (!current_tracer.stat_cmp)
97 current_tracer.stat_cmp = dummy_cmp;
98
99 /*
100 * The first entry. Actually this is the second, but the first
101 * one (the stat_list head) is pointless.
102 */
103 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
104 if (!new_entry) {
105 ret = -ENOMEM;
106 goto exit;
107 }
108
109 INIT_LIST_HEAD(&new_entry->list);
110 list_add(&new_entry->list, &stat_list.list);
111 new_entry->stat = current_tracer.stat_start();
112
113 prev_stat = new_entry->stat;
114
115 /*
116 * Iterate over the tracer stat entries and store them in a sorted
117 * list.
118 */
119 for (i = 1; ; i++) {
120 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
121 if (!new_entry) {
122 ret = -ENOMEM;
123 goto exit_free_list;
124 }
125
126 INIT_LIST_HEAD(&new_entry->list);
127 new_entry->stat = current_tracer.stat_next(prev_stat, i);
128
129 /* End of insertion */
130 if (!new_entry->stat)
131 break;
132
133 list_for_each_entry(iter_entry, &stat_list.list, list) {
134 /* Insertion with a descendent sorting */
135 if (current_tracer.stat_cmp(new_entry->stat,
136 iter_entry->stat) > 0) {
137
138 list_add_tail(&new_entry->list,
139 &iter_entry->list);
140 break;
141
142 /* The current smaller value */
143 } else if (list_is_last(&iter_entry->list,
144 &stat_list.list)) {
145 list_add(&new_entry->list, &iter_entry->list);
146 break;
147 }
148 }
149
150 prev_stat = new_entry->stat;
151 }
152exit:
153 mutex_unlock(&stat_list_mutex);
154 return ret;
155
156exit_free_list:
157 reset_stat_list();
158 mutex_unlock(&stat_list_mutex);
159 return ret;
160}
161
162
163static void *stat_seq_start(struct seq_file *s, loff_t *pos)
164{
165 struct trace_stat_list *l = (struct trace_stat_list *)s->private;
166
167 /* Prevent from tracer switch or stat_list modification */
168 mutex_lock(&stat_list_mutex);
169
170 /* If we are in the beginning of the file, print the headers */
171 if (!*pos && current_tracer.stat_headers)
172 current_tracer.stat_headers(s);
173
174 return seq_list_start(&l->list, *pos);
175}
176
177static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
178{
179 struct trace_stat_list *l = (struct trace_stat_list *)s->private;
180
181 return seq_list_next(p, &l->list, pos);
182}
183
184static void stat_seq_stop(struct seq_file *m, void *p)
185{
186 mutex_unlock(&stat_list_mutex);
187}
188
189static int stat_seq_show(struct seq_file *s, void *v)
190{
191 struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
192 return current_tracer.stat_show(s, l->stat);
193}
194
195static const struct seq_operations trace_stat_seq_ops = {
196 .start = stat_seq_start,
197 .next = stat_seq_next,
198 .stop = stat_seq_stop,
199 .show = stat_seq_show
200};
201
202static int tracing_stat_open(struct inode *inode, struct file *file)
203{
204 int ret;
205
206 ret = seq_open(file, &trace_stat_seq_ops);
207 if (!ret) {
208 struct seq_file *m = file->private_data;
209 m->private = &stat_list;
210 ret = stat_seq_init();
211 }
212
213 return ret;
214}
215
216
217/*
218 * Avoid consuming memory with our now useless list.
219 */
220static int tracing_stat_release(struct inode *i, struct file *f)
221{
222 mutex_lock(&stat_list_mutex);
223 reset_stat_list();
224 mutex_unlock(&stat_list_mutex);
225 return 0;
226}
227
228static const struct file_operations tracing_stat_fops = {
229 .open = tracing_stat_open,
230 .read = seq_read,
231 .llseek = seq_lseek,
232 .release = tracing_stat_release
233};
234
235static int __init tracing_stat_init(void)
236{
237 struct dentry *d_tracing;
238 struct dentry *entry;
239
240 INIT_LIST_HEAD(&stat_list.list);
241 d_tracing = tracing_init_dentry();
242
243 entry = debugfs_create_file("trace_stat", 0444, d_tracing,
244 NULL,
245 &tracing_stat_fops);
246 if (!entry)
247 pr_warning("Could not create debugfs "
248 "'trace_stat' entry\n");
249 return 0;
250}
251fs_initcall(tracing_stat_init);