diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-01-08 13:03:56 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-10 22:00:44 -0500 |
commit | 034939b65ad5ff64b9709210b3469a95153c51a3 (patch) | |
tree | 14fc36bdcabc9c76ac179e4b39b7af11fcbdfb35 /kernel | |
parent | 67d347245f76a149c45bffb1a10145d31d61d1da (diff) |
tracing/ftrace: handle more than one stat file per tracer
Impact: new API for tracers
Make the stat tracing API reentrant. And also provide the new directory
/debugfs/tracing/trace_stat which will contain all the stat files for the
current active tracer.
Now a tracer will, if desired, want to provide a zero terminated array of
tracer_stat structures.
Each one contains the callbacks necessary for one stat file.
It have to provide at least a name for its stat file, an iterator with
stat_start/start_next callback and an output callback for one stat entry.
Also adapt the branch tracer to this new API.
We create two files "all" and "annotated" inside the /debugfs/tracing/trace_stat
directory, making the both stats simultaneously available instead of needing
to change an option to switch from one stat file to another.
The output of these stats haven't changed.
Changes in v2:
_ Apply the previous memory leak fix (rebase against tip/master)
Changes in v3:
_ Merge the patch that adapted the branch tracer to this Api in this patch to
not break the kernel build.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.h | 35 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 69 | ||||
-rw-r--r-- | kernel/trace/trace_stat.c | 230 |
3 files changed, 217 insertions, 117 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 94ed45e93a80..b3f9ad1b4d84 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -335,6 +335,25 @@ struct tracer_flags { | |||
335 | #define TRACER_OPT(s, b) .name = #s, .bit = b | 335 | #define TRACER_OPT(s, b) .name = #s, .bit = b |
336 | 336 | ||
337 | /* | 337 | /* |
338 | * If you want to provide a stat file (one-shot statistics), fill | ||
339 | * an iterator with stat_start/stat_next and a stat_show callbacks. | ||
340 | * The others callbacks are optional. | ||
341 | */ | ||
342 | struct tracer_stat { | ||
343 | /* The name of your stat file */ | ||
344 | const char *name; | ||
345 | /* Iteration over statistic entries */ | ||
346 | void *(*stat_start)(void); | ||
347 | void *(*stat_next)(void *prev, int idx); | ||
348 | /* Compare two entries for sorting (optional) for stats */ | ||
349 | int (*stat_cmp)(void *p1, void *p2); | ||
350 | /* Print a stat entry */ | ||
351 | int (*stat_show)(struct seq_file *s, void *p); | ||
352 | /* Print the headers of your stat entries */ | ||
353 | int (*stat_headers)(struct seq_file *s); | ||
354 | }; | ||
355 | |||
356 | /* | ||
338 | * A specific tracer, represented by methods that operate on a trace array: | 357 | * A specific tracer, represented by methods that operate on a trace array: |
339 | */ | 358 | */ |
340 | struct tracer { | 359 | struct tracer { |
@@ -361,21 +380,7 @@ struct tracer { | |||
361 | struct tracer *next; | 380 | struct tracer *next; |
362 | int print_max; | 381 | int print_max; |
363 | struct tracer_flags *flags; | 382 | struct tracer_flags *flags; |
364 | 383 | struct tracer_stat *stats; | |
365 | /* | ||
366 | * If you change one of the following on tracing runtime, recall | ||
367 | * init_tracer_stat() | ||
368 | */ | ||
369 | |||
370 | /* Iteration over statistic entries */ | ||
371 | void *(*stat_start)(void); | ||
372 | void *(*stat_next)(void *prev, int idx); | ||
373 | /* Compare two entries for sorting (optional) for stats */ | ||
374 | int (*stat_cmp)(void *p1, void *p2); | ||
375 | /* Print a stat entry */ | ||
376 | int (*stat_show)(struct seq_file *s, void *p); | ||
377 | /* Print the headers of your stat entries */ | ||
378 | int (*stat_headers)(struct seq_file *s); | ||
379 | }; | 384 | }; |
380 | 385 | ||
381 | struct trace_seq { | 386 | struct trace_seq { |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 4785a3b9bc4a..da5cf3e5581b 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -306,19 +306,6 @@ static int annotated_branch_stat_cmp(void *p1, void *p2) | |||
306 | } | 306 | } |
307 | 307 | ||
308 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | 308 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
309 | enum { | ||
310 | TRACE_BRANCH_OPT_ALL = 0x1 | ||
311 | }; | ||
312 | |||
313 | static struct tracer_opt branch_opts[] = { | ||
314 | { TRACER_OPT(stat_all_branch, TRACE_BRANCH_OPT_ALL) }, | ||
315 | { } | ||
316 | }; | ||
317 | |||
318 | static struct tracer_flags branch_flags = { | ||
319 | .val = 0, | ||
320 | .opts = branch_opts | ||
321 | }; | ||
322 | 309 | ||
323 | extern unsigned long __start_branch_profile[]; | 310 | extern unsigned long __start_branch_profile[]; |
324 | extern unsigned long __stop_branch_profile[]; | 311 | extern unsigned long __stop_branch_profile[]; |
@@ -352,28 +339,36 @@ all_branch_stat_next(void *v, int idx) | |||
352 | return p; | 339 | return p; |
353 | } | 340 | } |
354 | 341 | ||
355 | static int branch_set_flag(u32 old_flags, u32 bit, int set) | 342 | static struct tracer_stat branch_stats[] = { |
356 | { | 343 | {.name = "annotated", |
357 | if (bit == TRACE_BRANCH_OPT_ALL) { | 344 | .stat_start = annotated_branch_stat_start, |
358 | if (set) { | 345 | .stat_next = annotated_branch_stat_next, |
359 | branch_trace.stat_headers = all_branch_stat_headers; | 346 | .stat_cmp = annotated_branch_stat_cmp, |
360 | branch_trace.stat_start = all_branch_stat_start; | 347 | .stat_headers = annotated_branch_stat_headers, |
361 | branch_trace.stat_next = all_branch_stat_next; | 348 | .stat_show = branch_stat_show}, |
362 | branch_trace.stat_cmp = NULL; | ||
363 | } else { | ||
364 | branch_trace.stat_headers = | ||
365 | annotated_branch_stat_headers; | ||
366 | branch_trace.stat_start = annotated_branch_stat_start; | ||
367 | branch_trace.stat_next = annotated_branch_stat_next; | ||
368 | branch_trace.stat_cmp = annotated_branch_stat_cmp; | ||
369 | } | ||
370 | init_tracer_stat(&branch_trace); | ||
371 | } | ||
372 | return 0; | ||
373 | } | ||
374 | 349 | ||
350 | {.name = "all", | ||
351 | .stat_start = all_branch_stat_start, | ||
352 | .stat_next = all_branch_stat_next, | ||
353 | .stat_headers = all_branch_stat_headers, | ||
354 | .stat_show = branch_stat_show}, | ||
355 | |||
356 | { } | ||
357 | }; | ||
358 | #else | ||
359 | static struct tracer_stat branch_stats[] = { | ||
360 | {.name = "annotated", | ||
361 | .stat_start = annotated_branch_stat_start, | ||
362 | .stat_next = annotated_branch_stat_next, | ||
363 | .stat_cmp = annotated_branch_stat_cmp, | ||
364 | .stat_headers = annotated_branch_stat_headers, | ||
365 | .stat_show = branch_stat_show}, | ||
366 | |||
367 | { } | ||
368 | }; | ||
375 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | 369 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
376 | 370 | ||
371 | |||
377 | static struct tracer branch_trace __read_mostly = | 372 | static struct tracer branch_trace __read_mostly = |
378 | { | 373 | { |
379 | .name = "branch", | 374 | .name = "branch", |
@@ -383,16 +378,8 @@ static struct tracer branch_trace __read_mostly = | |||
383 | #ifdef CONFIG_FTRACE_SELFTEST | 378 | #ifdef CONFIG_FTRACE_SELFTEST |
384 | .selftest = trace_selftest_startup_branch, | 379 | .selftest = trace_selftest_startup_branch, |
385 | #endif /* CONFIG_FTRACE_SELFTEST */ | 380 | #endif /* CONFIG_FTRACE_SELFTEST */ |
386 | #endif /* CONFIG_BRANCH_TRACER */ | ||
387 | .stat_start = annotated_branch_stat_start, | ||
388 | .stat_next = annotated_branch_stat_next, | ||
389 | .stat_show = branch_stat_show, | ||
390 | .stat_headers = annotated_branch_stat_headers, | ||
391 | .stat_cmp = annotated_branch_stat_cmp, | ||
392 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | ||
393 | .flags = &branch_flags, | ||
394 | .set_flag = branch_set_flag, | ||
395 | #endif | 381 | #endif |
382 | .stats = branch_stats | ||
396 | }; | 383 | }; |
397 | 384 | ||
398 | __init static int init_branch_trace(void) | 385 | __init static int init_branch_trace(void) |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index f110ce9ce7fb..1515f9e7adfc 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
@@ -21,37 +21,87 @@ struct trace_stat_list { | |||
21 | void *stat; | 21 | void *stat; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | static LIST_HEAD(stat_list); | 24 | /* A stat session is the stats output in one file */ |
25 | 25 | struct tracer_stat_session { | |
26 | /* | 26 | struct tracer_stat *ts; |
27 | * This is a copy of the current tracer to avoid racy | 27 | struct list_head stat_list; |
28 | * and dangerous output while the current tracer is | 28 | struct mutex stat_mutex; |
29 | * switched. | 29 | }; |
30 | */ | ||
31 | static struct tracer current_tracer; | ||
32 | 30 | ||
33 | /* | 31 | /* All of the sessions currently in use. Each stat file embeed one session */ |
34 | * Protect both the current tracer and the global | 32 | static struct tracer_stat_session **all_stat_sessions; |
35 | * stat list. | 33 | static int nb_sessions; |
36 | */ | 34 | static struct dentry *stat_dir, **stat_files; |
37 | static DEFINE_MUTEX(stat_list_mutex); | ||
38 | 35 | ||
39 | 36 | ||
40 | static void reset_stat_list(void) | 37 | static void reset_stat_session(struct tracer_stat_session *session) |
41 | { | 38 | { |
42 | struct trace_stat_list *node, *next; | 39 | struct trace_stat_list *node, *next; |
43 | 40 | ||
44 | list_for_each_entry_safe(node, next, &stat_list, list) | 41 | list_for_each_entry_safe(node, next, &session->stat_list, list) |
45 | kfree(node); | 42 | kfree(node); |
46 | 43 | ||
47 | INIT_LIST_HEAD(&stat_list); | 44 | INIT_LIST_HEAD(&session->stat_list); |
48 | } | 45 | } |
49 | 46 | ||
50 | void init_tracer_stat(struct tracer *trace) | 47 | /* Called when a tracer is initialized */ |
48 | static int init_all_sessions(int nb, struct tracer_stat *ts) | ||
51 | { | 49 | { |
52 | mutex_lock(&stat_list_mutex); | 50 | int i, j; |
53 | current_tracer = *trace; | 51 | struct tracer_stat_session *session; |
54 | mutex_unlock(&stat_list_mutex); | 52 | |
53 | nb_sessions = 0; | ||
54 | |||
55 | if (all_stat_sessions) { | ||
56 | for (i = 0; i < nb_sessions; i++) { | ||
57 | session = all_stat_sessions[i]; | ||
58 | reset_stat_session(session); | ||
59 | mutex_destroy(&session->stat_mutex); | ||
60 | kfree(session); | ||
61 | } | ||
62 | } | ||
63 | all_stat_sessions = kmalloc(sizeof(struct tracer_stat_session *) * nb, | ||
64 | GFP_KERNEL); | ||
65 | if (!all_stat_sessions) | ||
66 | return -ENOMEM; | ||
67 | |||
68 | for (i = 0; i < nb; i++) { | ||
69 | session = kmalloc(sizeof(struct tracer_stat_session) * nb, | ||
70 | GFP_KERNEL); | ||
71 | if (!session) | ||
72 | goto free_sessions; | ||
73 | |||
74 | INIT_LIST_HEAD(&session->stat_list); | ||
75 | mutex_init(&session->stat_mutex); | ||
76 | session->ts = &ts[i]; | ||
77 | all_stat_sessions[i] = session; | ||
78 | } | ||
79 | nb_sessions = nb; | ||
80 | return 0; | ||
81 | |||
82 | free_sessions: | ||
83 | |||
84 | for (j = 0; j < i; j++) | ||
85 | kfree(all_stat_sessions[i]); | ||
86 | |||
87 | kfree(all_stat_sessions); | ||
88 | all_stat_sessions = NULL; | ||
89 | |||
90 | return -ENOMEM; | ||
91 | } | ||
92 | |||
93 | static int basic_tracer_stat_checks(struct tracer_stat *ts) | ||
94 | { | ||
95 | int i; | ||
96 | |||
97 | if (!ts) | ||
98 | return 0; | ||
99 | |||
100 | for (i = 0; ts[i].name; i++) { | ||
101 | if (!ts[i].stat_start || !ts[i].stat_next || !ts[i].stat_show) | ||
102 | return -EBUSY; | ||
103 | } | ||
104 | return i; | ||
55 | } | 105 | } |
56 | 106 | ||
57 | /* | 107 | /* |
@@ -69,22 +119,19 @@ static int dummy_cmp(void *p1, void *p2) | |||
69 | * All of these copies and sorting are required on all opening | 119 | * All of these copies and sorting are required on all opening |
70 | * since the stats could have changed between two file sessions. | 120 | * since the stats could have changed between two file sessions. |
71 | */ | 121 | */ |
72 | static int stat_seq_init(void) | 122 | static int stat_seq_init(struct tracer_stat_session *session) |
73 | { | 123 | { |
74 | struct trace_stat_list *iter_entry, *new_entry; | 124 | struct trace_stat_list *iter_entry, *new_entry; |
125 | struct tracer_stat *ts = session->ts; | ||
75 | void *prev_stat; | 126 | void *prev_stat; |
76 | int ret = 0; | 127 | int ret = 0; |
77 | int i; | 128 | int i; |
78 | 129 | ||
79 | mutex_lock(&stat_list_mutex); | 130 | mutex_lock(&session->stat_mutex); |
80 | reset_stat_list(); | 131 | reset_stat_session(session); |
81 | |||
82 | if (!current_tracer.stat_start || !current_tracer.stat_next || | ||
83 | !current_tracer.stat_show) | ||
84 | goto exit; | ||
85 | 132 | ||
86 | if (!current_tracer.stat_cmp) | 133 | if (!ts->stat_cmp) |
87 | current_tracer.stat_cmp = dummy_cmp; | 134 | ts->stat_cmp = dummy_cmp; |
88 | 135 | ||
89 | /* | 136 | /* |
90 | * The first entry. Actually this is the second, but the first | 137 | * The first entry. Actually this is the second, but the first |
@@ -97,9 +144,10 @@ static int stat_seq_init(void) | |||
97 | } | 144 | } |
98 | 145 | ||
99 | INIT_LIST_HEAD(&new_entry->list); | 146 | INIT_LIST_HEAD(&new_entry->list); |
100 | list_add(&new_entry->list, &stat_list); | ||
101 | new_entry->stat = current_tracer.stat_start(); | ||
102 | 147 | ||
148 | list_add(&new_entry->list, &session->stat_list); | ||
149 | |||
150 | new_entry->stat = ts->stat_start(); | ||
103 | prev_stat = new_entry->stat; | 151 | prev_stat = new_entry->stat; |
104 | 152 | ||
105 | /* | 153 | /* |
@@ -114,15 +162,16 @@ static int stat_seq_init(void) | |||
114 | } | 162 | } |
115 | 163 | ||
116 | INIT_LIST_HEAD(&new_entry->list); | 164 | INIT_LIST_HEAD(&new_entry->list); |
117 | new_entry->stat = current_tracer.stat_next(prev_stat, i); | 165 | new_entry->stat = ts->stat_next(prev_stat, i); |
118 | 166 | ||
119 | /* End of insertion */ | 167 | /* End of insertion */ |
120 | if (!new_entry->stat) | 168 | if (!new_entry->stat) |
121 | break; | 169 | break; |
122 | 170 | ||
123 | list_for_each_entry(iter_entry, &stat_list, list) { | 171 | list_for_each_entry(iter_entry, &session->stat_list, list) { |
172 | |||
124 | /* Insertion with a descendent sorting */ | 173 | /* Insertion with a descendent sorting */ |
125 | if (current_tracer.stat_cmp(new_entry->stat, | 174 | if (ts->stat_cmp(new_entry->stat, |
126 | iter_entry->stat) > 0) { | 175 | iter_entry->stat) > 0) { |
127 | 176 | ||
128 | list_add_tail(&new_entry->list, | 177 | list_add_tail(&new_entry->list, |
@@ -131,7 +180,7 @@ static int stat_seq_init(void) | |||
131 | 180 | ||
132 | /* The current smaller value */ | 181 | /* The current smaller value */ |
133 | } else if (list_is_last(&iter_entry->list, | 182 | } else if (list_is_last(&iter_entry->list, |
134 | &stat_list)) { | 183 | &session->stat_list)) { |
135 | list_add(&new_entry->list, &iter_entry->list); | 184 | list_add(&new_entry->list, &iter_entry->list); |
136 | break; | 185 | break; |
137 | } | 186 | } |
@@ -140,49 +189,49 @@ static int stat_seq_init(void) | |||
140 | prev_stat = new_entry->stat; | 189 | prev_stat = new_entry->stat; |
141 | } | 190 | } |
142 | exit: | 191 | exit: |
143 | mutex_unlock(&stat_list_mutex); | 192 | mutex_unlock(&session->stat_mutex); |
144 | return ret; | 193 | return ret; |
145 | 194 | ||
146 | exit_free_list: | 195 | exit_free_list: |
147 | reset_stat_list(); | 196 | reset_stat_session(session); |
148 | mutex_unlock(&stat_list_mutex); | 197 | mutex_unlock(&session->stat_mutex); |
149 | return ret; | 198 | return ret; |
150 | } | 199 | } |
151 | 200 | ||
152 | 201 | ||
153 | static void *stat_seq_start(struct seq_file *s, loff_t *pos) | 202 | static void *stat_seq_start(struct seq_file *s, loff_t *pos) |
154 | { | 203 | { |
155 | struct list_head *l = (struct list_head *)s->private; | 204 | struct tracer_stat_session *session = s->private; |
156 | 205 | ||
157 | /* Prevent from tracer switch or stat_list modification */ | 206 | /* Prevent from tracer switch or stat_list modification */ |
158 | mutex_lock(&stat_list_mutex); | 207 | mutex_lock(&session->stat_mutex); |
159 | 208 | ||
160 | /* If we are in the beginning of the file, print the headers */ | 209 | /* If we are in the beginning of the file, print the headers */ |
161 | if (!*pos && current_tracer.stat_headers) | 210 | if (!*pos && session->ts->stat_headers) |
162 | current_tracer.stat_headers(s); | 211 | session->ts->stat_headers(s); |
163 | 212 | ||
164 | return seq_list_start(l, *pos); | 213 | return seq_list_start(&session->stat_list, *pos); |
165 | } | 214 | } |
166 | 215 | ||
167 | static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) | 216 | static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) |
168 | { | 217 | { |
169 | struct list_head *l = (struct list_head *)s->private; | 218 | struct tracer_stat_session *session = s->private; |
170 | 219 | ||
171 | return seq_list_next(p, l, pos); | 220 | return seq_list_next(p, &session->stat_list, pos); |
172 | } | 221 | } |
173 | 222 | ||
174 | static void stat_seq_stop(struct seq_file *m, void *p) | 223 | static void stat_seq_stop(struct seq_file *s, void *p) |
175 | { | 224 | { |
176 | mutex_unlock(&stat_list_mutex); | 225 | struct tracer_stat_session *session = s->private; |
226 | mutex_unlock(&session->stat_mutex); | ||
177 | } | 227 | } |
178 | 228 | ||
179 | static int stat_seq_show(struct seq_file *s, void *v) | 229 | static int stat_seq_show(struct seq_file *s, void *v) |
180 | { | 230 | { |
181 | struct trace_stat_list *entry; | 231 | struct tracer_stat_session *session = s->private; |
182 | 232 | struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list); | |
183 | entry = list_entry(v, struct trace_stat_list, list); | ||
184 | 233 | ||
185 | return current_tracer.stat_show(s, entry->stat); | 234 | return session->ts->stat_show(s, l->stat); |
186 | } | 235 | } |
187 | 236 | ||
188 | static const struct seq_operations trace_stat_seq_ops = { | 237 | static const struct seq_operations trace_stat_seq_ops = { |
@@ -192,15 +241,18 @@ static const struct seq_operations trace_stat_seq_ops = { | |||
192 | .show = stat_seq_show | 241 | .show = stat_seq_show |
193 | }; | 242 | }; |
194 | 243 | ||
244 | /* The session stat is refilled and resorted at each stat file opening */ | ||
195 | static int tracing_stat_open(struct inode *inode, struct file *file) | 245 | static int tracing_stat_open(struct inode *inode, struct file *file) |
196 | { | 246 | { |
197 | int ret; | 247 | int ret; |
198 | 248 | ||
249 | struct tracer_stat_session *session = inode->i_private; | ||
250 | |||
199 | ret = seq_open(file, &trace_stat_seq_ops); | 251 | ret = seq_open(file, &trace_stat_seq_ops); |
200 | if (!ret) { | 252 | if (!ret) { |
201 | struct seq_file *m = file->private_data; | 253 | struct seq_file *m = file->private_data; |
202 | m->private = &stat_list; | 254 | m->private = session; |
203 | ret = stat_seq_init(); | 255 | ret = stat_seq_init(session); |
204 | } | 256 | } |
205 | 257 | ||
206 | return ret; | 258 | return ret; |
@@ -212,9 +264,12 @@ static int tracing_stat_open(struct inode *inode, struct file *file) | |||
212 | */ | 264 | */ |
213 | static int tracing_stat_release(struct inode *i, struct file *f) | 265 | static int tracing_stat_release(struct inode *i, struct file *f) |
214 | { | 266 | { |
215 | mutex_lock(&stat_list_mutex); | 267 | struct tracer_stat_session *session = i->i_private; |
216 | reset_stat_list(); | 268 | |
217 | mutex_unlock(&stat_list_mutex); | 269 | mutex_lock(&session->stat_mutex); |
270 | reset_stat_session(session); | ||
271 | mutex_unlock(&session->stat_mutex); | ||
272 | |||
218 | return 0; | 273 | return 0; |
219 | } | 274 | } |
220 | 275 | ||
@@ -225,17 +280,70 @@ static const struct file_operations tracing_stat_fops = { | |||
225 | .release = tracing_stat_release | 280 | .release = tracing_stat_release |
226 | }; | 281 | }; |
227 | 282 | ||
283 | |||
284 | static void destroy_trace_stat_files(void) | ||
285 | { | ||
286 | int i; | ||
287 | |||
288 | if (stat_files) { | ||
289 | for (i = 0; i < nb_sessions; i++) | ||
290 | debugfs_remove(stat_files[i]); | ||
291 | kfree(stat_files); | ||
292 | stat_files = NULL; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | static void init_trace_stat_files(void) | ||
297 | { | ||
298 | int i; | ||
299 | |||
300 | if (!stat_dir || !nb_sessions) | ||
301 | return; | ||
302 | |||
303 | stat_files = kmalloc(sizeof(struct dentry *) * nb_sessions, GFP_KERNEL); | ||
304 | |||
305 | if (!stat_files) { | ||
306 | pr_warning("trace stat: not enough memory\n"); | ||
307 | return; | ||
308 | } | ||
309 | |||
310 | for (i = 0; i < nb_sessions; i++) { | ||
311 | struct tracer_stat_session *session = all_stat_sessions[i]; | ||
312 | stat_files[i] = debugfs_create_file(session->ts->name, 0644, | ||
313 | stat_dir, | ||
314 | session, &tracing_stat_fops); | ||
315 | if (!stat_files[i]) | ||
316 | pr_warning("cannot create %s entry\n", | ||
317 | session->ts->name); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | void init_tracer_stat(struct tracer *trace) | ||
322 | { | ||
323 | int nb = basic_tracer_stat_checks(trace->stats); | ||
324 | |||
325 | destroy_trace_stat_files(); | ||
326 | |||
327 | if (nb < 0) { | ||
328 | pr_warning("stat tracing: missing stat callback on %s\n", | ||
329 | trace->name); | ||
330 | return; | ||
331 | } | ||
332 | if (!nb) | ||
333 | return; | ||
334 | |||
335 | init_all_sessions(nb, trace->stats); | ||
336 | init_trace_stat_files(); | ||
337 | } | ||
338 | |||
228 | static int __init tracing_stat_init(void) | 339 | static int __init tracing_stat_init(void) |
229 | { | 340 | { |
230 | struct dentry *d_tracing; | 341 | struct dentry *d_tracing; |
231 | struct dentry *entry; | ||
232 | 342 | ||
233 | d_tracing = tracing_init_dentry(); | 343 | d_tracing = tracing_init_dentry(); |
234 | 344 | ||
235 | entry = debugfs_create_file("trace_stat", 0444, d_tracing, | 345 | stat_dir = debugfs_create_dir("trace_stat", d_tracing); |
236 | NULL, | 346 | if (!stat_dir) |
237 | &tracing_stat_fops); | ||
238 | if (!entry) | ||
239 | pr_warning("Could not create debugfs " | 347 | pr_warning("Could not create debugfs " |
240 | "'trace_stat' entry\n"); | 348 | "'trace_stat' entry\n"); |
241 | return 0; | 349 | return 0; |