aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_stat.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-12-28 23:44:51 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-29 06:55:45 -0500
commitdbd0b4b33074aa6b7832a9d9a5bd985eca5c1aa2 (patch)
treeb2f498a25c176cdba29cb1f9d1e854d38204192e /kernel/trace/trace_stat.c
parentf633cef0200bbaec539e2dbb0bc4bed7f022f98b (diff)
tracing/ftrace: provide the base infrastructure for histogram tracing
Impact: extend the tracing API The goal of this patch is to normalize and make more easy the implementation of statistical (histogram) tracing. It implements a trace_stat file into the /debugfs/tracing directory where one can print a one-shot output of statistics/histogram entries. A tracer has to provide two basic iterator callbacks: stat_start() => the first entry stat_next(prev, idx) => the next one. Note that it is adapted for arrays or hash tables or lists.... since it provides a pointer to the previous entry and the current index of the iterator. These two callbacks are called to get a snapshot of the statistics at each opening of the trace_stat file because. The values are so updated between two "cat trace_stat". And the tracer is free to lock its datas during the iteration to keep consistent values. Since it is almost always interesting to sort statisticals values to address the problems by priority, this infrastructure provides a "sorting" of the stat entries too if desired. A tracer has just to provide a stat_cmp callback to compare two entries and the stat tracing infrastructure will build a sorted list of the given entries. A last callback, called stat_headers, can be implemented by a tracer to output headers on its trace. If one of these callbacks is changed on runtime, it just have to signal it to the stat tracing API by calling the init_tracer_stat() helper. Changes in V2: - Fix a memory leak if the user opens multiple times the trace_stat file without closing it. Now we always free our list before rebuilding it. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_stat.c')
-rw-r--r--kernel/trace/trace_stat.c251
1 files changed, 251 insertions, 0 deletions
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
new file mode 100644
index 000000000000..6f194a33a64a
--- /dev/null
+++ b/kernel/trace/trace_stat.c
@@ -0,0 +1,251 @@
1/*
2 * Infrastructure for statistic tracing (histogram output).
3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 *
6 * Based on the code from trace_branch.c which is
7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10
11
12#include <linux/list.h>
13#include <linux/seq_file.h>
14#include <linux/debugfs.h>
15#include "trace.h"
16
17
18/* List of stat entries from a tracer */
19struct trace_stat_list {
20 struct list_head list;
21 void *stat;
22};
23
24static struct trace_stat_list stat_list;
25
26/*
27 * This is a copy of the current tracer to avoid racy
28 * and dangerous output while the current tracer is
29 * switched.
30 */
31static struct tracer current_tracer;
32
33/*
34 * Protect both the current tracer and the global
35 * stat list.
36 */
37static DEFINE_MUTEX(stat_list_mutex);
38
39
40static void reset_stat_list(void)
41{
42 struct trace_stat_list *node;
43 struct list_head *next;
44
45 if (list_empty(&stat_list.list))
46 return;
47
48 node = list_entry(stat_list.list.next, struct trace_stat_list, list);
49 next = node->list.next;
50
51 while (&node->list != next) {
52 kfree(node);
53 node = list_entry(next, struct trace_stat_list, list);
54 }
55 kfree(node);
56
57 INIT_LIST_HEAD(&stat_list.list);
58}
59
60void init_tracer_stat(struct tracer *trace)
61{
62 mutex_lock(&stat_list_mutex);
63 current_tracer = *trace;
64 mutex_unlock(&stat_list_mutex);
65}
66
67/*
68 * For tracers that don't provide a stat_cmp callback.
69 * This one will force an immediate insertion on tail of
70 * the list.
71 */
72static int dummy_cmp(void *p1, void *p2)
73{
74 return 1;
75}
76
77/*
78 * Initialize the stat list at each trace_stat file opening.
79 * All of these copies and sorting are required on all opening
80 * since the stats could have changed between two file sessions.
81 */
82static int stat_seq_init(void)
83{
84 struct trace_stat_list *iter_entry, *new_entry;
85 void *prev_stat;
86 int ret = 0;
87 int i;
88
89 mutex_lock(&stat_list_mutex);
90 reset_stat_list();
91
92 if (!current_tracer.stat_start || !current_tracer.stat_next ||
93 !current_tracer.stat_show)
94 goto exit;
95
96 if (!current_tracer.stat_cmp)
97 current_tracer.stat_cmp = dummy_cmp;
98
99 /*
100 * The first entry. Actually this is the second, but the first
101 * one (the stat_list head) is pointless.
102 */
103 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
104 if (!new_entry) {
105 ret = -ENOMEM;
106 goto exit;
107 }
108
109 INIT_LIST_HEAD(&new_entry->list);
110 list_add(&new_entry->list, &stat_list.list);
111 new_entry->stat = current_tracer.stat_start();
112
113 prev_stat = new_entry->stat;
114
115 /*
116 * Iterate over the tracer stat entries and store them in a sorted
117 * list.
118 */
119 for (i = 1; ; i++) {
120 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
121 if (!new_entry) {
122 ret = -ENOMEM;
123 goto exit_free_list;
124 }
125
126 INIT_LIST_HEAD(&new_entry->list);
127 new_entry->stat = current_tracer.stat_next(prev_stat, i);
128
129 /* End of insertion */
130 if (!new_entry->stat)
131 break;
132
133 list_for_each_entry(iter_entry, &stat_list.list, list) {
134 /* Insertion with a descendent sorting */
135 if (current_tracer.stat_cmp(new_entry->stat,
136 iter_entry->stat) > 0) {
137
138 list_add_tail(&new_entry->list,
139 &iter_entry->list);
140 break;
141
142 /* The current smaller value */
143 } else if (list_is_last(&iter_entry->list,
144 &stat_list.list)) {
145 list_add(&new_entry->list, &iter_entry->list);
146 break;
147 }
148 }
149
150 prev_stat = new_entry->stat;
151 }
152exit:
153 mutex_unlock(&stat_list_mutex);
154 return ret;
155
156exit_free_list:
157 reset_stat_list();
158 mutex_unlock(&stat_list_mutex);
159 return ret;
160}
161
162
163static void *stat_seq_start(struct seq_file *s, loff_t *pos)
164{
165 struct trace_stat_list *l = (struct trace_stat_list *)s->private;
166
167 /* Prevent from tracer switch or stat_list modification */
168 mutex_lock(&stat_list_mutex);
169
170 /* If we are in the beginning of the file, print the headers */
171 if (!*pos && current_tracer.stat_headers)
172 current_tracer.stat_headers(s);
173
174 return seq_list_start(&l->list, *pos);
175}
176
177static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
178{
179 struct trace_stat_list *l = (struct trace_stat_list *)s->private;
180
181 return seq_list_next(p, &l->list, pos);
182}
183
184static void stat_seq_stop(struct seq_file *m, void *p)
185{
186 mutex_unlock(&stat_list_mutex);
187}
188
189static int stat_seq_show(struct seq_file *s, void *v)
190{
191 struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
192 return current_tracer.stat_show(s, l->stat);
193}
194
195static const struct seq_operations trace_stat_seq_ops = {
196 .start = stat_seq_start,
197 .next = stat_seq_next,
198 .stop = stat_seq_stop,
199 .show = stat_seq_show
200};
201
202static int tracing_stat_open(struct inode *inode, struct file *file)
203{
204 int ret;
205
206 ret = seq_open(file, &trace_stat_seq_ops);
207 if (!ret) {
208 struct seq_file *m = file->private_data;
209 m->private = &stat_list;
210 ret = stat_seq_init();
211 }
212
213 return ret;
214}
215
216
217/*
218 * Avoid consuming memory with our now useless list.
219 */
220static int tracing_stat_release(struct inode *i, struct file *f)
221{
222 mutex_lock(&stat_list_mutex);
223 reset_stat_list();
224 mutex_unlock(&stat_list_mutex);
225 return 0;
226}
227
228static const struct file_operations tracing_stat_fops = {
229 .open = tracing_stat_open,
230 .read = seq_read,
231 .llseek = seq_lseek,
232 .release = tracing_stat_release
233};
234
235static int __init tracing_stat_init(void)
236{
237 struct dentry *d_tracing;
238 struct dentry *entry;
239
240 INIT_LIST_HEAD(&stat_list.list);
241 d_tracing = tracing_init_dentry();
242
243 entry = debugfs_create_file("trace_stat", 0444, d_tracing,
244 NULL,
245 &tracing_stat_fops);
246 if (!entry)
247 pr_warning("Could not create debugfs "
248 "'trace_stat' entry\n");
249 return 0;
250}
251fs_initcall(tracing_stat_init);