diff options
author | Steven Rostedt <srostedt@redhat.com> | 2012-05-03 23:09:03 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:34:40 -0400 |
commit | ae63b31e4d0e2ec09c569306ea46f664508ef717 (patch) | |
tree | 0d40e8fddca53d1776254cd92fc73bc4413ee1f5 /kernel/trace/trace_events.c | |
parent | 613f04a0f51e6e68ac6fe571ab79da3c0a5eb4da (diff) |
tracing: Separate out trace events from global variables
The trace events for ftrace are all defined via global variables.
The arrays of events and event systems are linked to a global list.
This prevents multiple users of the event system (what to enable and
what not to).
By adding descriptors to represent the event/file relation, as well
as to which trace_array descriptor they are associated with, allows
for more than one set of events to be defined. Once the trace events
files have a link between the trace event and the trace_array they
are associated with, we can create multiple trace_arrays that can
record separate events in separate buffers.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r-- | kernel/trace/trace_events.c | 776 |
1 files changed, 533 insertions, 243 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 57e9b284250c..439955239bae 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -36,6 +36,19 @@ EXPORT_SYMBOL_GPL(event_storage); | |||
36 | LIST_HEAD(ftrace_events); | 36 | LIST_HEAD(ftrace_events); |
37 | LIST_HEAD(ftrace_common_fields); | 37 | LIST_HEAD(ftrace_common_fields); |
38 | 38 | ||
39 | /* Double loops, do not use break, only goto's work */ | ||
40 | #define do_for_each_event_file(tr, file) \ | ||
41 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ | ||
42 | list_for_each_entry(file, &tr->events, list) | ||
43 | |||
44 | #define do_for_each_event_file_safe(tr, file) \ | ||
45 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ | ||
46 | struct ftrace_event_file *___n; \ | ||
47 | list_for_each_entry_safe(file, ___n, &tr->events, list) | ||
48 | |||
49 | #define while_for_each_event_file() \ | ||
50 | } | ||
51 | |||
39 | struct list_head * | 52 | struct list_head * |
40 | trace_get_fields(struct ftrace_event_call *event_call) | 53 | trace_get_fields(struct ftrace_event_call *event_call) |
41 | { | 54 | { |
@@ -149,15 +162,17 @@ EXPORT_SYMBOL_GPL(trace_event_raw_init); | |||
149 | int ftrace_event_reg(struct ftrace_event_call *call, | 162 | int ftrace_event_reg(struct ftrace_event_call *call, |
150 | enum trace_reg type, void *data) | 163 | enum trace_reg type, void *data) |
151 | { | 164 | { |
165 | struct ftrace_event_file *file = data; | ||
166 | |||
152 | switch (type) { | 167 | switch (type) { |
153 | case TRACE_REG_REGISTER: | 168 | case TRACE_REG_REGISTER: |
154 | return tracepoint_probe_register(call->name, | 169 | return tracepoint_probe_register(call->name, |
155 | call->class->probe, | 170 | call->class->probe, |
156 | call); | 171 | file); |
157 | case TRACE_REG_UNREGISTER: | 172 | case TRACE_REG_UNREGISTER: |
158 | tracepoint_probe_unregister(call->name, | 173 | tracepoint_probe_unregister(call->name, |
159 | call->class->probe, | 174 | call->class->probe, |
160 | call); | 175 | file); |
161 | return 0; | 176 | return 0; |
162 | 177 | ||
163 | #ifdef CONFIG_PERF_EVENTS | 178 | #ifdef CONFIG_PERF_EVENTS |
@@ -183,54 +198,57 @@ EXPORT_SYMBOL_GPL(ftrace_event_reg); | |||
183 | 198 | ||
184 | void trace_event_enable_cmd_record(bool enable) | 199 | void trace_event_enable_cmd_record(bool enable) |
185 | { | 200 | { |
186 | struct ftrace_event_call *call; | 201 | struct ftrace_event_file *file; |
202 | struct trace_array *tr; | ||
187 | 203 | ||
188 | mutex_lock(&event_mutex); | 204 | mutex_lock(&event_mutex); |
189 | list_for_each_entry(call, &ftrace_events, list) { | 205 | do_for_each_event_file(tr, file) { |
190 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) | 206 | |
207 | if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) | ||
191 | continue; | 208 | continue; |
192 | 209 | ||
193 | if (enable) { | 210 | if (enable) { |
194 | tracing_start_cmdline_record(); | 211 | tracing_start_cmdline_record(); |
195 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; | 212 | file->flags |= FTRACE_EVENT_FL_RECORDED_CMD; |
196 | } else { | 213 | } else { |
197 | tracing_stop_cmdline_record(); | 214 | tracing_stop_cmdline_record(); |
198 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; | 215 | file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD; |
199 | } | 216 | } |
200 | } | 217 | } while_for_each_event_file(); |
201 | mutex_unlock(&event_mutex); | 218 | mutex_unlock(&event_mutex); |
202 | } | 219 | } |
203 | 220 | ||
204 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, | 221 | static int ftrace_event_enable_disable(struct ftrace_event_file *file, |
205 | int enable) | 222 | int enable) |
206 | { | 223 | { |
224 | struct ftrace_event_call *call = file->event_call; | ||
207 | int ret = 0; | 225 | int ret = 0; |
208 | 226 | ||
209 | switch (enable) { | 227 | switch (enable) { |
210 | case 0: | 228 | case 0: |
211 | if (call->flags & TRACE_EVENT_FL_ENABLED) { | 229 | if (file->flags & FTRACE_EVENT_FL_ENABLED) { |
212 | call->flags &= ~TRACE_EVENT_FL_ENABLED; | 230 | file->flags &= ~FTRACE_EVENT_FL_ENABLED; |
213 | if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) { | 231 | if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) { |
214 | tracing_stop_cmdline_record(); | 232 | tracing_stop_cmdline_record(); |
215 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; | 233 | file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD; |
216 | } | 234 | } |
217 | call->class->reg(call, TRACE_REG_UNREGISTER, NULL); | 235 | call->class->reg(call, TRACE_REG_UNREGISTER, file); |
218 | } | 236 | } |
219 | break; | 237 | break; |
220 | case 1: | 238 | case 1: |
221 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { | 239 | if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { |
222 | if (trace_flags & TRACE_ITER_RECORD_CMD) { | 240 | if (trace_flags & TRACE_ITER_RECORD_CMD) { |
223 | tracing_start_cmdline_record(); | 241 | tracing_start_cmdline_record(); |
224 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; | 242 | file->flags |= FTRACE_EVENT_FL_RECORDED_CMD; |
225 | } | 243 | } |
226 | ret = call->class->reg(call, TRACE_REG_REGISTER, NULL); | 244 | ret = call->class->reg(call, TRACE_REG_REGISTER, file); |
227 | if (ret) { | 245 | if (ret) { |
228 | tracing_stop_cmdline_record(); | 246 | tracing_stop_cmdline_record(); |
229 | pr_info("event trace: Could not enable event " | 247 | pr_info("event trace: Could not enable event " |
230 | "%s\n", call->name); | 248 | "%s\n", call->name); |
231 | break; | 249 | break; |
232 | } | 250 | } |
233 | call->flags |= TRACE_EVENT_FL_ENABLED; | 251 | file->flags |= FTRACE_EVENT_FL_ENABLED; |
234 | } | 252 | } |
235 | break; | 253 | break; |
236 | } | 254 | } |
@@ -238,13 +256,13 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
238 | return ret; | 256 | return ret; |
239 | } | 257 | } |
240 | 258 | ||
241 | static void ftrace_clear_events(void) | 259 | static void ftrace_clear_events(struct trace_array *tr) |
242 | { | 260 | { |
243 | struct ftrace_event_call *call; | 261 | struct ftrace_event_file *file; |
244 | 262 | ||
245 | mutex_lock(&event_mutex); | 263 | mutex_lock(&event_mutex); |
246 | list_for_each_entry(call, &ftrace_events, list) { | 264 | list_for_each_entry(file, &tr->events, list) { |
247 | ftrace_event_enable_disable(call, 0); | 265 | ftrace_event_enable_disable(file, 0); |
248 | } | 266 | } |
249 | mutex_unlock(&event_mutex); | 267 | mutex_unlock(&event_mutex); |
250 | } | 268 | } |
@@ -257,6 +275,8 @@ static void __put_system(struct event_subsystem *system) | |||
257 | if (--system->ref_count) | 275 | if (--system->ref_count) |
258 | return; | 276 | return; |
259 | 277 | ||
278 | list_del(&system->list); | ||
279 | |||
260 | if (filter) { | 280 | if (filter) { |
261 | kfree(filter->filter_string); | 281 | kfree(filter->filter_string); |
262 | kfree(filter); | 282 | kfree(filter); |
@@ -271,24 +291,45 @@ static void __get_system(struct event_subsystem *system) | |||
271 | system->ref_count++; | 291 | system->ref_count++; |
272 | } | 292 | } |
273 | 293 | ||
274 | static void put_system(struct event_subsystem *system) | 294 | static void __get_system_dir(struct ftrace_subsystem_dir *dir) |
295 | { | ||
296 | WARN_ON_ONCE(dir->ref_count == 0); | ||
297 | dir->ref_count++; | ||
298 | __get_system(dir->subsystem); | ||
299 | } | ||
300 | |||
301 | static void __put_system_dir(struct ftrace_subsystem_dir *dir) | ||
302 | { | ||
303 | WARN_ON_ONCE(dir->ref_count == 0); | ||
304 | /* If the subsystem is about to be freed, the dir must be too */ | ||
305 | WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1); | ||
306 | |||
307 | __put_system(dir->subsystem); | ||
308 | if (!--dir->ref_count) | ||
309 | kfree(dir); | ||
310 | } | ||
311 | |||
312 | static void put_system(struct ftrace_subsystem_dir *dir) | ||
275 | { | 313 | { |
276 | mutex_lock(&event_mutex); | 314 | mutex_lock(&event_mutex); |
277 | __put_system(system); | 315 | __put_system_dir(dir); |
278 | mutex_unlock(&event_mutex); | 316 | mutex_unlock(&event_mutex); |
279 | } | 317 | } |
280 | 318 | ||
281 | /* | 319 | /* |
282 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. | 320 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. |
283 | */ | 321 | */ |
284 | static int __ftrace_set_clr_event(const char *match, const char *sub, | 322 | static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, |
285 | const char *event, int set) | 323 | const char *sub, const char *event, int set) |
286 | { | 324 | { |
325 | struct ftrace_event_file *file; | ||
287 | struct ftrace_event_call *call; | 326 | struct ftrace_event_call *call; |
288 | int ret = -EINVAL; | 327 | int ret = -EINVAL; |
289 | 328 | ||
290 | mutex_lock(&event_mutex); | 329 | mutex_lock(&event_mutex); |
291 | list_for_each_entry(call, &ftrace_events, list) { | 330 | list_for_each_entry(file, &tr->events, list) { |
331 | |||
332 | call = file->event_call; | ||
292 | 333 | ||
293 | if (!call->name || !call->class || !call->class->reg) | 334 | if (!call->name || !call->class || !call->class->reg) |
294 | continue; | 335 | continue; |
@@ -307,7 +348,7 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, | |||
307 | if (event && strcmp(event, call->name) != 0) | 348 | if (event && strcmp(event, call->name) != 0) |
308 | continue; | 349 | continue; |
309 | 350 | ||
310 | ftrace_event_enable_disable(call, set); | 351 | ftrace_event_enable_disable(file, set); |
311 | 352 | ||
312 | ret = 0; | 353 | ret = 0; |
313 | } | 354 | } |
@@ -316,7 +357,7 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, | |||
316 | return ret; | 357 | return ret; |
317 | } | 358 | } |
318 | 359 | ||
319 | static int ftrace_set_clr_event(char *buf, int set) | 360 | static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
320 | { | 361 | { |
321 | char *event = NULL, *sub = NULL, *match; | 362 | char *event = NULL, *sub = NULL, *match; |
322 | 363 | ||
@@ -344,7 +385,7 @@ static int ftrace_set_clr_event(char *buf, int set) | |||
344 | event = NULL; | 385 | event = NULL; |
345 | } | 386 | } |
346 | 387 | ||
347 | return __ftrace_set_clr_event(match, sub, event, set); | 388 | return __ftrace_set_clr_event(tr, match, sub, event, set); |
348 | } | 389 | } |
349 | 390 | ||
350 | /** | 391 | /** |
@@ -361,7 +402,9 @@ static int ftrace_set_clr_event(char *buf, int set) | |||
361 | */ | 402 | */ |
362 | int trace_set_clr_event(const char *system, const char *event, int set) | 403 | int trace_set_clr_event(const char *system, const char *event, int set) |
363 | { | 404 | { |
364 | return __ftrace_set_clr_event(NULL, system, event, set); | 405 | struct trace_array *tr = top_trace_array(); |
406 | |||
407 | return __ftrace_set_clr_event(tr, NULL, system, event, set); | ||
365 | } | 408 | } |
366 | EXPORT_SYMBOL_GPL(trace_set_clr_event); | 409 | EXPORT_SYMBOL_GPL(trace_set_clr_event); |
367 | 410 | ||
@@ -373,6 +416,8 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
373 | size_t cnt, loff_t *ppos) | 416 | size_t cnt, loff_t *ppos) |
374 | { | 417 | { |
375 | struct trace_parser parser; | 418 | struct trace_parser parser; |
419 | struct seq_file *m = file->private_data; | ||
420 | struct trace_array *tr = m->private; | ||
376 | ssize_t read, ret; | 421 | ssize_t read, ret; |
377 | 422 | ||
378 | if (!cnt) | 423 | if (!cnt) |
@@ -395,7 +440,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
395 | 440 | ||
396 | parser.buffer[parser.idx] = 0; | 441 | parser.buffer[parser.idx] = 0; |
397 | 442 | ||
398 | ret = ftrace_set_clr_event(parser.buffer + !set, set); | 443 | ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); |
399 | if (ret) | 444 | if (ret) |
400 | goto out_put; | 445 | goto out_put; |
401 | } | 446 | } |
@@ -411,17 +456,20 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
411 | static void * | 456 | static void * |
412 | t_next(struct seq_file *m, void *v, loff_t *pos) | 457 | t_next(struct seq_file *m, void *v, loff_t *pos) |
413 | { | 458 | { |
414 | struct ftrace_event_call *call = v; | 459 | struct ftrace_event_file *file = v; |
460 | struct ftrace_event_call *call; | ||
461 | struct trace_array *tr = m->private; | ||
415 | 462 | ||
416 | (*pos)++; | 463 | (*pos)++; |
417 | 464 | ||
418 | list_for_each_entry_continue(call, &ftrace_events, list) { | 465 | list_for_each_entry_continue(file, &tr->events, list) { |
466 | call = file->event_call; | ||
419 | /* | 467 | /* |
420 | * The ftrace subsystem is for showing formats only. | 468 | * The ftrace subsystem is for showing formats only. |
421 | * They can not be enabled or disabled via the event files. | 469 | * They can not be enabled or disabled via the event files. |
422 | */ | 470 | */ |
423 | if (call->class && call->class->reg) | 471 | if (call->class && call->class->reg) |
424 | return call; | 472 | return file; |
425 | } | 473 | } |
426 | 474 | ||
427 | return NULL; | 475 | return NULL; |
@@ -429,30 +477,32 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
429 | 477 | ||
430 | static void *t_start(struct seq_file *m, loff_t *pos) | 478 | static void *t_start(struct seq_file *m, loff_t *pos) |
431 | { | 479 | { |
432 | struct ftrace_event_call *call; | 480 | struct ftrace_event_file *file; |
481 | struct trace_array *tr = m->private; | ||
433 | loff_t l; | 482 | loff_t l; |
434 | 483 | ||
435 | mutex_lock(&event_mutex); | 484 | mutex_lock(&event_mutex); |
436 | 485 | ||
437 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); | 486 | file = list_entry(&tr->events, struct ftrace_event_file, list); |
438 | for (l = 0; l <= *pos; ) { | 487 | for (l = 0; l <= *pos; ) { |
439 | call = t_next(m, call, &l); | 488 | file = t_next(m, file, &l); |
440 | if (!call) | 489 | if (!file) |
441 | break; | 490 | break; |
442 | } | 491 | } |
443 | return call; | 492 | return file; |
444 | } | 493 | } |
445 | 494 | ||
446 | static void * | 495 | static void * |
447 | s_next(struct seq_file *m, void *v, loff_t *pos) | 496 | s_next(struct seq_file *m, void *v, loff_t *pos) |
448 | { | 497 | { |
449 | struct ftrace_event_call *call = v; | 498 | struct ftrace_event_file *file = v; |
499 | struct trace_array *tr = m->private; | ||
450 | 500 | ||
451 | (*pos)++; | 501 | (*pos)++; |
452 | 502 | ||
453 | list_for_each_entry_continue(call, &ftrace_events, list) { | 503 | list_for_each_entry_continue(file, &tr->events, list) { |
454 | if (call->flags & TRACE_EVENT_FL_ENABLED) | 504 | if (file->flags & FTRACE_EVENT_FL_ENABLED) |
455 | return call; | 505 | return file; |
456 | } | 506 | } |
457 | 507 | ||
458 | return NULL; | 508 | return NULL; |
@@ -460,23 +510,25 @@ s_next(struct seq_file *m, void *v, loff_t *pos) | |||
460 | 510 | ||
461 | static void *s_start(struct seq_file *m, loff_t *pos) | 511 | static void *s_start(struct seq_file *m, loff_t *pos) |
462 | { | 512 | { |
463 | struct ftrace_event_call *call; | 513 | struct ftrace_event_file *file; |
514 | struct trace_array *tr = m->private; | ||
464 | loff_t l; | 515 | loff_t l; |
465 | 516 | ||
466 | mutex_lock(&event_mutex); | 517 | mutex_lock(&event_mutex); |
467 | 518 | ||
468 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); | 519 | file = list_entry(&tr->events, struct ftrace_event_file, list); |
469 | for (l = 0; l <= *pos; ) { | 520 | for (l = 0; l <= *pos; ) { |
470 | call = s_next(m, call, &l); | 521 | file = s_next(m, file, &l); |
471 | if (!call) | 522 | if (!file) |
472 | break; | 523 | break; |
473 | } | 524 | } |
474 | return call; | 525 | return file; |
475 | } | 526 | } |
476 | 527 | ||
477 | static int t_show(struct seq_file *m, void *v) | 528 | static int t_show(struct seq_file *m, void *v) |
478 | { | 529 | { |
479 | struct ftrace_event_call *call = v; | 530 | struct ftrace_event_file *file = v; |
531 | struct ftrace_event_call *call = file->event_call; | ||
480 | 532 | ||
481 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) | 533 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
482 | seq_printf(m, "%s:", call->class->system); | 534 | seq_printf(m, "%s:", call->class->system); |
@@ -494,10 +546,10 @@ static ssize_t | |||
494 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | 546 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
495 | loff_t *ppos) | 547 | loff_t *ppos) |
496 | { | 548 | { |
497 | struct ftrace_event_call *call = filp->private_data; | 549 | struct ftrace_event_file *file = filp->private_data; |
498 | char *buf; | 550 | char *buf; |
499 | 551 | ||
500 | if (call->flags & TRACE_EVENT_FL_ENABLED) | 552 | if (file->flags & FTRACE_EVENT_FL_ENABLED) |
501 | buf = "1\n"; | 553 | buf = "1\n"; |
502 | else | 554 | else |
503 | buf = "0\n"; | 555 | buf = "0\n"; |
@@ -509,10 +561,13 @@ static ssize_t | |||
509 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | 561 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
510 | loff_t *ppos) | 562 | loff_t *ppos) |
511 | { | 563 | { |
512 | struct ftrace_event_call *call = filp->private_data; | 564 | struct ftrace_event_file *file = filp->private_data; |
513 | unsigned long val; | 565 | unsigned long val; |
514 | int ret; | 566 | int ret; |
515 | 567 | ||
568 | if (!file) | ||
569 | return -EINVAL; | ||
570 | |||
516 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 571 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
517 | if (ret) | 572 | if (ret) |
518 | return ret; | 573 | return ret; |
@@ -525,7 +580,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
525 | case 0: | 580 | case 0: |
526 | case 1: | 581 | case 1: |
527 | mutex_lock(&event_mutex); | 582 | mutex_lock(&event_mutex); |
528 | ret = ftrace_event_enable_disable(call, val); | 583 | ret = ftrace_event_enable_disable(file, val); |
529 | mutex_unlock(&event_mutex); | 584 | mutex_unlock(&event_mutex); |
530 | break; | 585 | break; |
531 | 586 | ||
@@ -543,14 +598,18 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
543 | loff_t *ppos) | 598 | loff_t *ppos) |
544 | { | 599 | { |
545 | const char set_to_char[4] = { '?', '0', '1', 'X' }; | 600 | const char set_to_char[4] = { '?', '0', '1', 'X' }; |
546 | struct event_subsystem *system = filp->private_data; | 601 | struct ftrace_subsystem_dir *dir = filp->private_data; |
602 | struct event_subsystem *system = dir->subsystem; | ||
547 | struct ftrace_event_call *call; | 603 | struct ftrace_event_call *call; |
604 | struct ftrace_event_file *file; | ||
605 | struct trace_array *tr = dir->tr; | ||
548 | char buf[2]; | 606 | char buf[2]; |
549 | int set = 0; | 607 | int set = 0; |
550 | int ret; | 608 | int ret; |
551 | 609 | ||
552 | mutex_lock(&event_mutex); | 610 | mutex_lock(&event_mutex); |
553 | list_for_each_entry(call, &ftrace_events, list) { | 611 | list_for_each_entry(file, &tr->events, list) { |
612 | call = file->event_call; | ||
554 | if (!call->name || !call->class || !call->class->reg) | 613 | if (!call->name || !call->class || !call->class->reg) |
555 | continue; | 614 | continue; |
556 | 615 | ||
@@ -562,7 +621,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
562 | * or if all events or cleared, or if we have | 621 | * or if all events or cleared, or if we have |
563 | * a mixture. | 622 | * a mixture. |
564 | */ | 623 | */ |
565 | set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED)); | 624 | set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED)); |
566 | 625 | ||
567 | /* | 626 | /* |
568 | * If we have a mixture, no need to look further. | 627 | * If we have a mixture, no need to look further. |
@@ -584,7 +643,8 @@ static ssize_t | |||
584 | system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | 643 | system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
585 | loff_t *ppos) | 644 | loff_t *ppos) |
586 | { | 645 | { |
587 | struct event_subsystem *system = filp->private_data; | 646 | struct ftrace_subsystem_dir *dir = filp->private_data; |
647 | struct event_subsystem *system = dir->subsystem; | ||
588 | const char *name = NULL; | 648 | const char *name = NULL; |
589 | unsigned long val; | 649 | unsigned long val; |
590 | ssize_t ret; | 650 | ssize_t ret; |
@@ -607,7 +667,7 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
607 | if (system) | 667 | if (system) |
608 | name = system->name; | 668 | name = system->name; |
609 | 669 | ||
610 | ret = __ftrace_set_clr_event(NULL, name, NULL, val); | 670 | ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); |
611 | if (ret) | 671 | if (ret) |
612 | goto out; | 672 | goto out; |
613 | 673 | ||
@@ -845,43 +905,75 @@ static LIST_HEAD(event_subsystems); | |||
845 | static int subsystem_open(struct inode *inode, struct file *filp) | 905 | static int subsystem_open(struct inode *inode, struct file *filp) |
846 | { | 906 | { |
847 | struct event_subsystem *system = NULL; | 907 | struct event_subsystem *system = NULL; |
908 | struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ | ||
909 | struct trace_array *tr; | ||
848 | int ret; | 910 | int ret; |
849 | 911 | ||
850 | if (!inode->i_private) | ||
851 | goto skip_search; | ||
852 | |||
853 | /* Make sure the system still exists */ | 912 | /* Make sure the system still exists */ |
854 | mutex_lock(&event_mutex); | 913 | mutex_lock(&event_mutex); |
855 | list_for_each_entry(system, &event_subsystems, list) { | 914 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
856 | if (system == inode->i_private) { | 915 | list_for_each_entry(dir, &tr->systems, list) { |
857 | /* Don't open systems with no events */ | 916 | if (dir == inode->i_private) { |
858 | if (!system->nr_events) { | 917 | /* Don't open systems with no events */ |
859 | system = NULL; | 918 | if (dir->nr_events) { |
860 | break; | 919 | __get_system_dir(dir); |
920 | system = dir->subsystem; | ||
921 | } | ||
922 | goto exit_loop; | ||
861 | } | 923 | } |
862 | __get_system(system); | ||
863 | break; | ||
864 | } | 924 | } |
865 | } | 925 | } |
926 | exit_loop: | ||
866 | mutex_unlock(&event_mutex); | 927 | mutex_unlock(&event_mutex); |
867 | 928 | ||
868 | if (system != inode->i_private) | 929 | if (!system) |
869 | return -ENODEV; | 930 | return -ENODEV; |
870 | 931 | ||
871 | skip_search: | 932 | /* Some versions of gcc think dir can be uninitialized here */ |
933 | WARN_ON(!dir); | ||
934 | |||
872 | ret = tracing_open_generic(inode, filp); | 935 | ret = tracing_open_generic(inode, filp); |
873 | if (ret < 0 && system) | 936 | if (ret < 0) |
874 | put_system(system); | 937 | put_system(dir); |
938 | |||
939 | return ret; | ||
940 | } | ||
941 | |||
942 | static int system_tr_open(struct inode *inode, struct file *filp) | ||
943 | { | ||
944 | struct ftrace_subsystem_dir *dir; | ||
945 | struct trace_array *tr = inode->i_private; | ||
946 | int ret; | ||
947 | |||
948 | /* Make a temporary dir that has no system but points to tr */ | ||
949 | dir = kzalloc(sizeof(*dir), GFP_KERNEL); | ||
950 | if (!dir) | ||
951 | return -ENOMEM; | ||
952 | |||
953 | dir->tr = tr; | ||
954 | |||
955 | ret = tracing_open_generic(inode, filp); | ||
956 | if (ret < 0) | ||
957 | kfree(dir); | ||
958 | |||
959 | filp->private_data = dir; | ||
875 | 960 | ||
876 | return ret; | 961 | return ret; |
877 | } | 962 | } |
878 | 963 | ||
879 | static int subsystem_release(struct inode *inode, struct file *file) | 964 | static int subsystem_release(struct inode *inode, struct file *file) |
880 | { | 965 | { |
881 | struct event_subsystem *system = inode->i_private; | 966 | struct ftrace_subsystem_dir *dir = file->private_data; |
882 | 967 | ||
883 | if (system) | 968 | /* |
884 | put_system(system); | 969 | * If dir->subsystem is NULL, then this is a temporary |
970 | * descriptor that was made for a trace_array to enable | ||
971 | * all subsystems. | ||
972 | */ | ||
973 | if (dir->subsystem) | ||
974 | put_system(dir); | ||
975 | else | ||
976 | kfree(dir); | ||
885 | 977 | ||
886 | return 0; | 978 | return 0; |
887 | } | 979 | } |
@@ -890,7 +982,8 @@ static ssize_t | |||
890 | subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 982 | subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
891 | loff_t *ppos) | 983 | loff_t *ppos) |
892 | { | 984 | { |
893 | struct event_subsystem *system = filp->private_data; | 985 | struct ftrace_subsystem_dir *dir = filp->private_data; |
986 | struct event_subsystem *system = dir->subsystem; | ||
894 | struct trace_seq *s; | 987 | struct trace_seq *s; |
895 | int r; | 988 | int r; |
896 | 989 | ||
@@ -915,7 +1008,7 @@ static ssize_t | |||
915 | subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1008 | subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
916 | loff_t *ppos) | 1009 | loff_t *ppos) |
917 | { | 1010 | { |
918 | struct event_subsystem *system = filp->private_data; | 1011 | struct ftrace_subsystem_dir *dir = filp->private_data; |
919 | char *buf; | 1012 | char *buf; |
920 | int err; | 1013 | int err; |
921 | 1014 | ||
@@ -932,7 +1025,7 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
932 | } | 1025 | } |
933 | buf[cnt] = '\0'; | 1026 | buf[cnt] = '\0'; |
934 | 1027 | ||
935 | err = apply_subsystem_event_filter(system, buf); | 1028 | err = apply_subsystem_event_filter(dir, buf); |
936 | free_page((unsigned long) buf); | 1029 | free_page((unsigned long) buf); |
937 | if (err < 0) | 1030 | if (err < 0) |
938 | return err; | 1031 | return err; |
@@ -1041,30 +1134,35 @@ static const struct file_operations ftrace_system_enable_fops = { | |||
1041 | .release = subsystem_release, | 1134 | .release = subsystem_release, |
1042 | }; | 1135 | }; |
1043 | 1136 | ||
1137 | static const struct file_operations ftrace_tr_enable_fops = { | ||
1138 | .open = system_tr_open, | ||
1139 | .read = system_enable_read, | ||
1140 | .write = system_enable_write, | ||
1141 | .llseek = default_llseek, | ||
1142 | .release = subsystem_release, | ||
1143 | }; | ||
1144 | |||
1044 | static const struct file_operations ftrace_show_header_fops = { | 1145 | static const struct file_operations ftrace_show_header_fops = { |
1045 | .open = tracing_open_generic, | 1146 | .open = tracing_open_generic, |
1046 | .read = show_header, | 1147 | .read = show_header, |
1047 | .llseek = default_llseek, | 1148 | .llseek = default_llseek, |
1048 | }; | 1149 | }; |
1049 | 1150 | ||
1050 | static struct dentry *event_trace_events_dir(void) | 1151 | static int |
1152 | ftrace_event_open(struct inode *inode, struct file *file, | ||
1153 | const struct seq_operations *seq_ops) | ||
1051 | { | 1154 | { |
1052 | static struct dentry *d_tracer; | 1155 | struct seq_file *m; |
1053 | static struct dentry *d_events; | 1156 | int ret; |
1054 | |||
1055 | if (d_events) | ||
1056 | return d_events; | ||
1057 | |||
1058 | d_tracer = tracing_init_dentry(); | ||
1059 | if (!d_tracer) | ||
1060 | return NULL; | ||
1061 | 1157 | ||
1062 | d_events = debugfs_create_dir("events", d_tracer); | 1158 | ret = seq_open(file, seq_ops); |
1063 | if (!d_events) | 1159 | if (ret < 0) |
1064 | pr_warning("Could not create debugfs " | 1160 | return ret; |
1065 | "'events' directory\n"); | 1161 | m = file->private_data; |
1162 | /* copy tr over to seq ops */ | ||
1163 | m->private = inode->i_private; | ||
1066 | 1164 | ||
1067 | return d_events; | 1165 | return ret; |
1068 | } | 1166 | } |
1069 | 1167 | ||
1070 | static int | 1168 | static int |
@@ -1072,117 +1170,169 @@ ftrace_event_avail_open(struct inode *inode, struct file *file) | |||
1072 | { | 1170 | { |
1073 | const struct seq_operations *seq_ops = &show_event_seq_ops; | 1171 | const struct seq_operations *seq_ops = &show_event_seq_ops; |
1074 | 1172 | ||
1075 | return seq_open(file, seq_ops); | 1173 | return ftrace_event_open(inode, file, seq_ops); |
1076 | } | 1174 | } |
1077 | 1175 | ||
1078 | static int | 1176 | static int |
1079 | ftrace_event_set_open(struct inode *inode, struct file *file) | 1177 | ftrace_event_set_open(struct inode *inode, struct file *file) |
1080 | { | 1178 | { |
1081 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; | 1179 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; |
1180 | struct trace_array *tr = inode->i_private; | ||
1082 | 1181 | ||
1083 | if ((file->f_mode & FMODE_WRITE) && | 1182 | if ((file->f_mode & FMODE_WRITE) && |
1084 | (file->f_flags & O_TRUNC)) | 1183 | (file->f_flags & O_TRUNC)) |
1085 | ftrace_clear_events(); | 1184 | ftrace_clear_events(tr); |
1086 | 1185 | ||
1087 | return seq_open(file, seq_ops); | 1186 | return ftrace_event_open(inode, file, seq_ops); |
1187 | } | ||
1188 | |||
1189 | static struct event_subsystem * | ||
1190 | create_new_subsystem(const char *name) | ||
1191 | { | ||
1192 | struct event_subsystem *system; | ||
1193 | |||
1194 | /* need to create new entry */ | ||
1195 | system = kmalloc(sizeof(*system), GFP_KERNEL); | ||
1196 | if (!system) | ||
1197 | return NULL; | ||
1198 | |||
1199 | system->ref_count = 1; | ||
1200 | system->name = kstrdup(name, GFP_KERNEL); | ||
1201 | |||
1202 | if (!system->name) | ||
1203 | goto out_free; | ||
1204 | |||
1205 | system->filter = NULL; | ||
1206 | |||
1207 | system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); | ||
1208 | if (!system->filter) | ||
1209 | goto out_free; | ||
1210 | |||
1211 | list_add(&system->list, &event_subsystems); | ||
1212 | |||
1213 | return system; | ||
1214 | |||
1215 | out_free: | ||
1216 | kfree(system->name); | ||
1217 | kfree(system); | ||
1218 | return NULL; | ||
1088 | } | 1219 | } |
1089 | 1220 | ||
1090 | static struct dentry * | 1221 | static struct dentry * |
1091 | event_subsystem_dir(const char *name, struct dentry *d_events) | 1222 | event_subsystem_dir(struct trace_array *tr, const char *name, |
1223 | struct ftrace_event_file *file, struct dentry *parent) | ||
1092 | { | 1224 | { |
1225 | struct ftrace_subsystem_dir *dir; | ||
1093 | struct event_subsystem *system; | 1226 | struct event_subsystem *system; |
1094 | struct dentry *entry; | 1227 | struct dentry *entry; |
1095 | 1228 | ||
1096 | /* First see if we did not already create this dir */ | 1229 | /* First see if we did not already create this dir */ |
1097 | list_for_each_entry(system, &event_subsystems, list) { | 1230 | list_for_each_entry(dir, &tr->systems, list) { |
1231 | system = dir->subsystem; | ||
1098 | if (strcmp(system->name, name) == 0) { | 1232 | if (strcmp(system->name, name) == 0) { |
1099 | system->nr_events++; | 1233 | dir->nr_events++; |
1100 | return system->entry; | 1234 | file->system = dir; |
1235 | return dir->entry; | ||
1101 | } | 1236 | } |
1102 | } | 1237 | } |
1103 | 1238 | ||
1104 | /* need to create new entry */ | 1239 | /* Now see if the system itself exists. */ |
1105 | system = kmalloc(sizeof(*system), GFP_KERNEL); | 1240 | list_for_each_entry(system, &event_subsystems, list) { |
1106 | if (!system) { | 1241 | if (strcmp(system->name, name) == 0) |
1107 | pr_warning("No memory to create event subsystem %s\n", | 1242 | break; |
1108 | name); | ||
1109 | return d_events; | ||
1110 | } | 1243 | } |
1244 | /* Reset system variable when not found */ | ||
1245 | if (&system->list == &event_subsystems) | ||
1246 | system = NULL; | ||
1111 | 1247 | ||
1112 | system->entry = debugfs_create_dir(name, d_events); | 1248 | dir = kmalloc(sizeof(*dir), GFP_KERNEL); |
1113 | if (!system->entry) { | 1249 | if (!dir) |
1114 | pr_warning("Could not create event subsystem %s\n", | 1250 | goto out_fail; |
1115 | name); | ||
1116 | kfree(system); | ||
1117 | return d_events; | ||
1118 | } | ||
1119 | 1251 | ||
1120 | system->nr_events = 1; | 1252 | if (!system) { |
1121 | system->ref_count = 1; | 1253 | system = create_new_subsystem(name); |
1122 | system->name = kstrdup(name, GFP_KERNEL); | 1254 | if (!system) |
1123 | if (!system->name) { | 1255 | goto out_free; |
1124 | debugfs_remove(system->entry); | 1256 | } else |
1125 | kfree(system); | 1257 | __get_system(system); |
1126 | return d_events; | 1258 | |
1259 | dir->entry = debugfs_create_dir(name, parent); | ||
1260 | if (!dir->entry) { | ||
1261 | pr_warning("Failed to create system directory %s\n", name); | ||
1262 | __put_system(system); | ||
1263 | goto out_free; | ||
1127 | } | 1264 | } |
1128 | 1265 | ||
1129 | list_add(&system->list, &event_subsystems); | 1266 | dir->tr = tr; |
1130 | 1267 | dir->ref_count = 1; | |
1131 | system->filter = NULL; | 1268 | dir->nr_events = 1; |
1132 | 1269 | dir->subsystem = system; | |
1133 | system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); | 1270 | file->system = dir; |
1134 | if (!system->filter) { | ||
1135 | pr_warning("Could not allocate filter for subsystem " | ||
1136 | "'%s'\n", name); | ||
1137 | return system->entry; | ||
1138 | } | ||
1139 | 1271 | ||
1140 | entry = debugfs_create_file("filter", 0644, system->entry, system, | 1272 | entry = debugfs_create_file("filter", 0644, dir->entry, dir, |
1141 | &ftrace_subsystem_filter_fops); | 1273 | &ftrace_subsystem_filter_fops); |
1142 | if (!entry) { | 1274 | if (!entry) { |
1143 | kfree(system->filter); | 1275 | kfree(system->filter); |
1144 | system->filter = NULL; | 1276 | system->filter = NULL; |
1145 | pr_warning("Could not create debugfs " | 1277 | pr_warning("Could not create debugfs '%s/filter' entry\n", name); |
1146 | "'%s/filter' entry\n", name); | ||
1147 | } | 1278 | } |
1148 | 1279 | ||
1149 | trace_create_file("enable", 0644, system->entry, system, | 1280 | trace_create_file("enable", 0644, dir->entry, dir, |
1150 | &ftrace_system_enable_fops); | 1281 | &ftrace_system_enable_fops); |
1151 | 1282 | ||
1152 | return system->entry; | 1283 | list_add(&dir->list, &tr->systems); |
1284 | |||
1285 | return dir->entry; | ||
1286 | |||
1287 | out_free: | ||
1288 | kfree(dir); | ||
1289 | out_fail: | ||
1290 | /* Only print this message if failed on memory allocation */ | ||
1291 | if (!dir || !system) | ||
1292 | pr_warning("No memory to create event subsystem %s\n", | ||
1293 | name); | ||
1294 | return NULL; | ||
1153 | } | 1295 | } |
1154 | 1296 | ||
1155 | static int | 1297 | static int |
1156 | event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | 1298 | event_create_dir(struct dentry *parent, |
1299 | struct ftrace_event_file *file, | ||
1157 | const struct file_operations *id, | 1300 | const struct file_operations *id, |
1158 | const struct file_operations *enable, | 1301 | const struct file_operations *enable, |
1159 | const struct file_operations *filter, | 1302 | const struct file_operations *filter, |
1160 | const struct file_operations *format) | 1303 | const struct file_operations *format) |
1161 | { | 1304 | { |
1305 | struct ftrace_event_call *call = file->event_call; | ||
1306 | struct trace_array *tr = file->tr; | ||
1162 | struct list_head *head; | 1307 | struct list_head *head; |
1308 | struct dentry *d_events; | ||
1163 | int ret; | 1309 | int ret; |
1164 | 1310 | ||
1165 | /* | 1311 | /* |
1166 | * If the trace point header did not define TRACE_SYSTEM | 1312 | * If the trace point header did not define TRACE_SYSTEM |
1167 | * then the system would be called "TRACE_SYSTEM". | 1313 | * then the system would be called "TRACE_SYSTEM". |
1168 | */ | 1314 | */ |
1169 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) | 1315 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { |
1170 | d_events = event_subsystem_dir(call->class->system, d_events); | 1316 | d_events = event_subsystem_dir(tr, call->class->system, file, parent); |
1171 | 1317 | if (!d_events) | |
1172 | call->dir = debugfs_create_dir(call->name, d_events); | 1318 | return -ENOMEM; |
1173 | if (!call->dir) { | 1319 | } else |
1174 | pr_warning("Could not create debugfs " | 1320 | d_events = parent; |
1175 | "'%s' directory\n", call->name); | 1321 | |
1322 | file->dir = debugfs_create_dir(call->name, d_events); | ||
1323 | if (!file->dir) { | ||
1324 | pr_warning("Could not create debugfs '%s' directory\n", | ||
1325 | call->name); | ||
1176 | return -1; | 1326 | return -1; |
1177 | } | 1327 | } |
1178 | 1328 | ||
1179 | if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) | 1329 | if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) |
1180 | trace_create_file("enable", 0644, call->dir, call, | 1330 | trace_create_file("enable", 0644, file->dir, file, |
1181 | enable); | 1331 | enable); |
1182 | 1332 | ||
1183 | #ifdef CONFIG_PERF_EVENTS | 1333 | #ifdef CONFIG_PERF_EVENTS |
1184 | if (call->event.type && call->class->reg) | 1334 | if (call->event.type && call->class->reg) |
1185 | trace_create_file("id", 0444, call->dir, call, | 1335 | trace_create_file("id", 0444, file->dir, call, |
1186 | id); | 1336 | id); |
1187 | #endif | 1337 | #endif |
1188 | 1338 | ||
@@ -1196,23 +1346,76 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
1196 | if (ret < 0) { | 1346 | if (ret < 0) { |
1197 | pr_warning("Could not initialize trace point" | 1347 | pr_warning("Could not initialize trace point" |
1198 | " events/%s\n", call->name); | 1348 | " events/%s\n", call->name); |
1199 | return ret; | 1349 | return -1; |
1200 | } | 1350 | } |
1201 | } | 1351 | } |
1202 | trace_create_file("filter", 0644, call->dir, call, | 1352 | trace_create_file("filter", 0644, file->dir, call, |
1203 | filter); | 1353 | filter); |
1204 | 1354 | ||
1205 | trace_create_file("format", 0444, call->dir, call, | 1355 | trace_create_file("format", 0444, file->dir, call, |
1206 | format); | 1356 | format); |
1207 | 1357 | ||
1208 | return 0; | 1358 | return 0; |
1209 | } | 1359 | } |
1210 | 1360 | ||
1361 | static void remove_subsystem(struct ftrace_subsystem_dir *dir) | ||
1362 | { | ||
1363 | if (!dir) | ||
1364 | return; | ||
1365 | |||
1366 | if (!--dir->nr_events) { | ||
1367 | debugfs_remove_recursive(dir->entry); | ||
1368 | list_del(&dir->list); | ||
1369 | __put_system_dir(dir); | ||
1370 | } | ||
1371 | } | ||
1372 | |||
1373 | static void remove_event_from_tracers(struct ftrace_event_call *call) | ||
1374 | { | ||
1375 | struct ftrace_event_file *file; | ||
1376 | struct trace_array *tr; | ||
1377 | |||
1378 | do_for_each_event_file_safe(tr, file) { | ||
1379 | |||
1380 | if (file->event_call != call) | ||
1381 | continue; | ||
1382 | |||
1383 | list_del(&file->list); | ||
1384 | debugfs_remove_recursive(file->dir); | ||
1385 | remove_subsystem(file->system); | ||
1386 | kfree(file); | ||
1387 | |||
1388 | /* | ||
1389 | * The do_for_each_event_file_safe() is | ||
1390 | * a double loop. After finding the call for this | ||
1391 | * trace_array, we use break to jump to the next | ||
1392 | * trace_array. | ||
1393 | */ | ||
1394 | break; | ||
1395 | } while_for_each_event_file(); | ||
1396 | } | ||
1397 | |||
1211 | static void event_remove(struct ftrace_event_call *call) | 1398 | static void event_remove(struct ftrace_event_call *call) |
1212 | { | 1399 | { |
1213 | ftrace_event_enable_disable(call, 0); | 1400 | struct trace_array *tr; |
1401 | struct ftrace_event_file *file; | ||
1402 | |||
1403 | do_for_each_event_file(tr, file) { | ||
1404 | if (file->event_call != call) | ||
1405 | continue; | ||
1406 | ftrace_event_enable_disable(file, 0); | ||
1407 | /* | ||
1408 | * The do_for_each_event_file() is | ||
1409 | * a double loop. After finding the call for this | ||
1410 | * trace_array, we use break to jump to the next | ||
1411 | * trace_array. | ||
1412 | */ | ||
1413 | break; | ||
1414 | } while_for_each_event_file(); | ||
1415 | |||
1214 | if (call->event.funcs) | 1416 | if (call->event.funcs) |
1215 | __unregister_ftrace_event(&call->event); | 1417 | __unregister_ftrace_event(&call->event); |
1418 | remove_event_from_tracers(call); | ||
1216 | list_del(&call->list); | 1419 | list_del(&call->list); |
1217 | } | 1420 | } |
1218 | 1421 | ||
@@ -1234,61 +1437,58 @@ static int event_init(struct ftrace_event_call *call) | |||
1234 | } | 1437 | } |
1235 | 1438 | ||
1236 | static int | 1439 | static int |
1237 | __trace_add_event_call(struct ftrace_event_call *call, struct module *mod, | 1440 | __register_event(struct ftrace_event_call *call, struct module *mod) |
1238 | const struct file_operations *id, | ||
1239 | const struct file_operations *enable, | ||
1240 | const struct file_operations *filter, | ||
1241 | const struct file_operations *format) | ||
1242 | { | 1441 | { |
1243 | struct dentry *d_events; | ||
1244 | int ret; | 1442 | int ret; |
1245 | 1443 | ||
1246 | ret = event_init(call); | 1444 | ret = event_init(call); |
1247 | if (ret < 0) | 1445 | if (ret < 0) |
1248 | return ret; | 1446 | return ret; |
1249 | 1447 | ||
1250 | d_events = event_trace_events_dir(); | 1448 | list_add(&call->list, &ftrace_events); |
1251 | if (!d_events) | ||
1252 | return -ENOENT; | ||
1253 | |||
1254 | ret = event_create_dir(call, d_events, id, enable, filter, format); | ||
1255 | if (!ret) | ||
1256 | list_add(&call->list, &ftrace_events); | ||
1257 | call->mod = mod; | 1449 | call->mod = mod; |
1258 | 1450 | ||
1259 | return ret; | 1451 | return 0; |
1260 | } | 1452 | } |
1261 | 1453 | ||
1454 | /* Add an event to a trace directory */ | ||
1455 | static int | ||
1456 | __trace_add_new_event(struct ftrace_event_call *call, | ||
1457 | struct trace_array *tr, | ||
1458 | const struct file_operations *id, | ||
1459 | const struct file_operations *enable, | ||
1460 | const struct file_operations *filter, | ||
1461 | const struct file_operations *format) | ||
1462 | { | ||
1463 | struct ftrace_event_file *file; | ||
1464 | |||
1465 | file = kzalloc(sizeof(*file), GFP_KERNEL); | ||
1466 | if (!file) | ||
1467 | return -ENOMEM; | ||
1468 | |||
1469 | file->event_call = call; | ||
1470 | file->tr = tr; | ||
1471 | list_add(&file->list, &tr->events); | ||
1472 | |||
1473 | return event_create_dir(tr->event_dir, file, id, enable, filter, format); | ||
1474 | } | ||
1475 | |||
1476 | struct ftrace_module_file_ops; | ||
1477 | static void __add_event_to_tracers(struct ftrace_event_call *call, | ||
1478 | struct ftrace_module_file_ops *file_ops); | ||
1479 | |||
1262 | /* Add an additional event_call dynamically */ | 1480 | /* Add an additional event_call dynamically */ |
1263 | int trace_add_event_call(struct ftrace_event_call *call) | 1481 | int trace_add_event_call(struct ftrace_event_call *call) |
1264 | { | 1482 | { |
1265 | int ret; | 1483 | int ret; |
1266 | mutex_lock(&event_mutex); | 1484 | mutex_lock(&event_mutex); |
1267 | ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops, | ||
1268 | &ftrace_enable_fops, | ||
1269 | &ftrace_event_filter_fops, | ||
1270 | &ftrace_event_format_fops); | ||
1271 | mutex_unlock(&event_mutex); | ||
1272 | return ret; | ||
1273 | } | ||
1274 | 1485 | ||
1275 | static void remove_subsystem_dir(const char *name) | 1486 | ret = __register_event(call, NULL); |
1276 | { | 1487 | if (ret >= 0) |
1277 | struct event_subsystem *system; | 1488 | __add_event_to_tracers(call, NULL); |
1278 | 1489 | ||
1279 | if (strcmp(name, TRACE_SYSTEM) == 0) | 1490 | mutex_unlock(&event_mutex); |
1280 | return; | 1491 | return ret; |
1281 | |||
1282 | list_for_each_entry(system, &event_subsystems, list) { | ||
1283 | if (strcmp(system->name, name) == 0) { | ||
1284 | if (!--system->nr_events) { | ||
1285 | debugfs_remove_recursive(system->entry); | ||
1286 | list_del(&system->list); | ||
1287 | __put_system(system); | ||
1288 | } | ||
1289 | break; | ||
1290 | } | ||
1291 | } | ||
1292 | } | 1492 | } |
1293 | 1493 | ||
1294 | /* | 1494 | /* |
@@ -1299,8 +1499,6 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) | |||
1299 | event_remove(call); | 1499 | event_remove(call); |
1300 | trace_destroy_fields(call); | 1500 | trace_destroy_fields(call); |
1301 | destroy_preds(call); | 1501 | destroy_preds(call); |
1302 | debugfs_remove_recursive(call->dir); | ||
1303 | remove_subsystem_dir(call->class->system); | ||
1304 | } | 1502 | } |
1305 | 1503 | ||
1306 | /* Remove an event_call */ | 1504 | /* Remove an event_call */ |
@@ -1335,6 +1533,17 @@ struct ftrace_module_file_ops { | |||
1335 | struct file_operations filter; | 1533 | struct file_operations filter; |
1336 | }; | 1534 | }; |
1337 | 1535 | ||
1536 | static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod) | ||
1537 | { | ||
1538 | struct ftrace_module_file_ops *file_ops; | ||
1539 | |||
1540 | list_for_each_entry(file_ops, &ftrace_module_file_list, list) { | ||
1541 | if (file_ops->mod == mod) | ||
1542 | return file_ops; | ||
1543 | } | ||
1544 | return NULL; | ||
1545 | } | ||
1546 | |||
1338 | static struct ftrace_module_file_ops * | 1547 | static struct ftrace_module_file_ops * |
1339 | trace_create_file_ops(struct module *mod) | 1548 | trace_create_file_ops(struct module *mod) |
1340 | { | 1549 | { |
@@ -1386,9 +1595,8 @@ static void trace_module_add_events(struct module *mod) | |||
1386 | return; | 1595 | return; |
1387 | 1596 | ||
1388 | for_each_event(call, start, end) { | 1597 | for_each_event(call, start, end) { |
1389 | __trace_add_event_call(*call, mod, | 1598 | __register_event(*call, mod); |
1390 | &file_ops->id, &file_ops->enable, | 1599 | __add_event_to_tracers(*call, file_ops); |
1391 | &file_ops->filter, &file_ops->format); | ||
1392 | } | 1600 | } |
1393 | } | 1601 | } |
1394 | 1602 | ||
@@ -1444,6 +1652,10 @@ static int trace_module_notify(struct notifier_block *self, | |||
1444 | return 0; | 1652 | return 0; |
1445 | } | 1653 | } |
1446 | #else | 1654 | #else |
1655 | static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod) | ||
1656 | { | ||
1657 | return NULL; | ||
1658 | } | ||
1447 | static int trace_module_notify(struct notifier_block *self, | 1659 | static int trace_module_notify(struct notifier_block *self, |
1448 | unsigned long val, void *data) | 1660 | unsigned long val, void *data) |
1449 | { | 1661 | { |
@@ -1451,6 +1663,72 @@ static int trace_module_notify(struct notifier_block *self, | |||
1451 | } | 1663 | } |
1452 | #endif /* CONFIG_MODULES */ | 1664 | #endif /* CONFIG_MODULES */ |
1453 | 1665 | ||
1666 | /* Create a new event directory structure for a trace directory. */ | ||
1667 | static void | ||
1668 | __trace_add_event_dirs(struct trace_array *tr) | ||
1669 | { | ||
1670 | struct ftrace_module_file_ops *file_ops = NULL; | ||
1671 | struct ftrace_event_call *call; | ||
1672 | int ret; | ||
1673 | |||
1674 | list_for_each_entry(call, &ftrace_events, list) { | ||
1675 | if (call->mod) { | ||
1676 | /* | ||
1677 | * Directories for events by modules need to | ||
1678 | * keep module ref counts when opened (as we don't | ||
1679 | * want the module to disappear when reading one | ||
1680 | * of these files). The file_ops keep account of | ||
1681 | * the module ref count. | ||
1682 | * | ||
1683 | * As event_calls are added in groups by module, | ||
1684 | * when we find one file_ops, we don't need to search for | ||
1685 | * each call in that module, as the rest should be the | ||
1686 | * same. Only search for a new one if the last one did | ||
1687 | * not match. | ||
1688 | */ | ||
1689 | if (!file_ops || call->mod != file_ops->mod) | ||
1690 | file_ops = find_ftrace_file_ops(call->mod); | ||
1691 | if (!file_ops) | ||
1692 | continue; /* Warn? */ | ||
1693 | ret = __trace_add_new_event(call, tr, | ||
1694 | &file_ops->id, &file_ops->enable, | ||
1695 | &file_ops->filter, &file_ops->format); | ||
1696 | if (ret < 0) | ||
1697 | pr_warning("Could not create directory for event %s\n", | ||
1698 | call->name); | ||
1699 | continue; | ||
1700 | } | ||
1701 | ret = __trace_add_new_event(call, tr, | ||
1702 | &ftrace_event_id_fops, | ||
1703 | &ftrace_enable_fops, | ||
1704 | &ftrace_event_filter_fops, | ||
1705 | &ftrace_event_format_fops); | ||
1706 | if (ret < 0) | ||
1707 | pr_warning("Could not create directory for event %s\n", | ||
1708 | call->name); | ||
1709 | } | ||
1710 | } | ||
1711 | |||
1712 | static void | ||
1713 | __add_event_to_tracers(struct ftrace_event_call *call, | ||
1714 | struct ftrace_module_file_ops *file_ops) | ||
1715 | { | ||
1716 | struct trace_array *tr; | ||
1717 | |||
1718 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | ||
1719 | if (file_ops) | ||
1720 | __trace_add_new_event(call, tr, | ||
1721 | &file_ops->id, &file_ops->enable, | ||
1722 | &file_ops->filter, &file_ops->format); | ||
1723 | else | ||
1724 | __trace_add_new_event(call, tr, | ||
1725 | &ftrace_event_id_fops, | ||
1726 | &ftrace_enable_fops, | ||
1727 | &ftrace_event_filter_fops, | ||
1728 | &ftrace_event_format_fops); | ||
1729 | } | ||
1730 | } | ||
1731 | |||
1454 | static struct notifier_block trace_module_nb = { | 1732 | static struct notifier_block trace_module_nb = { |
1455 | .notifier_call = trace_module_notify, | 1733 | .notifier_call = trace_module_notify, |
1456 | .priority = 0, | 1734 | .priority = 0, |
@@ -1471,8 +1749,43 @@ static __init int setup_trace_event(char *str) | |||
1471 | } | 1749 | } |
1472 | __setup("trace_event=", setup_trace_event); | 1750 | __setup("trace_event=", setup_trace_event); |
1473 | 1751 | ||
1752 | int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) | ||
1753 | { | ||
1754 | struct dentry *d_events; | ||
1755 | struct dentry *entry; | ||
1756 | |||
1757 | entry = debugfs_create_file("set_event", 0644, parent, | ||
1758 | tr, &ftrace_set_event_fops); | ||
1759 | if (!entry) { | ||
1760 | pr_warning("Could not create debugfs 'set_event' entry\n"); | ||
1761 | return -ENOMEM; | ||
1762 | } | ||
1763 | |||
1764 | d_events = debugfs_create_dir("events", parent); | ||
1765 | if (!d_events) | ||
1766 | pr_warning("Could not create debugfs 'events' directory\n"); | ||
1767 | |||
1768 | /* ring buffer internal formats */ | ||
1769 | trace_create_file("header_page", 0444, d_events, | ||
1770 | ring_buffer_print_page_header, | ||
1771 | &ftrace_show_header_fops); | ||
1772 | |||
1773 | trace_create_file("header_event", 0444, d_events, | ||
1774 | ring_buffer_print_entry_header, | ||
1775 | &ftrace_show_header_fops); | ||
1776 | |||
1777 | trace_create_file("enable", 0644, d_events, | ||
1778 | tr, &ftrace_tr_enable_fops); | ||
1779 | |||
1780 | tr->event_dir = d_events; | ||
1781 | __trace_add_event_dirs(tr); | ||
1782 | |||
1783 | return 0; | ||
1784 | } | ||
1785 | |||
1474 | static __init int event_trace_enable(void) | 1786 | static __init int event_trace_enable(void) |
1475 | { | 1787 | { |
1788 | struct trace_array *tr = top_trace_array(); | ||
1476 | struct ftrace_event_call **iter, *call; | 1789 | struct ftrace_event_call **iter, *call; |
1477 | char *buf = bootup_event_buf; | 1790 | char *buf = bootup_event_buf; |
1478 | char *token; | 1791 | char *token; |
@@ -1494,7 +1807,7 @@ static __init int event_trace_enable(void) | |||
1494 | if (!*token) | 1807 | if (!*token) |
1495 | continue; | 1808 | continue; |
1496 | 1809 | ||
1497 | ret = ftrace_set_clr_event(token, 1); | 1810 | ret = ftrace_set_clr_event(tr, token, 1); |
1498 | if (ret) | 1811 | if (ret) |
1499 | pr_warn("Failed to enable trace event: %s\n", token); | 1812 | pr_warn("Failed to enable trace event: %s\n", token); |
1500 | } | 1813 | } |
@@ -1506,61 +1819,29 @@ static __init int event_trace_enable(void) | |||
1506 | 1819 | ||
1507 | static __init int event_trace_init(void) | 1820 | static __init int event_trace_init(void) |
1508 | { | 1821 | { |
1509 | struct ftrace_event_call *call; | 1822 | struct trace_array *tr; |
1510 | struct dentry *d_tracer; | 1823 | struct dentry *d_tracer; |
1511 | struct dentry *entry; | 1824 | struct dentry *entry; |
1512 | struct dentry *d_events; | ||
1513 | int ret; | 1825 | int ret; |
1514 | 1826 | ||
1827 | tr = top_trace_array(); | ||
1828 | |||
1515 | d_tracer = tracing_init_dentry(); | 1829 | d_tracer = tracing_init_dentry(); |
1516 | if (!d_tracer) | 1830 | if (!d_tracer) |
1517 | return 0; | 1831 | return 0; |
1518 | 1832 | ||
1519 | entry = debugfs_create_file("available_events", 0444, d_tracer, | 1833 | entry = debugfs_create_file("available_events", 0444, d_tracer, |
1520 | NULL, &ftrace_avail_fops); | 1834 | tr, &ftrace_avail_fops); |
1521 | if (!entry) | 1835 | if (!entry) |
1522 | pr_warning("Could not create debugfs " | 1836 | pr_warning("Could not create debugfs " |
1523 | "'available_events' entry\n"); | 1837 | "'available_events' entry\n"); |
1524 | 1838 | ||
1525 | entry = debugfs_create_file("set_event", 0644, d_tracer, | ||
1526 | NULL, &ftrace_set_event_fops); | ||
1527 | if (!entry) | ||
1528 | pr_warning("Could not create debugfs " | ||
1529 | "'set_event' entry\n"); | ||
1530 | |||
1531 | d_events = event_trace_events_dir(); | ||
1532 | if (!d_events) | ||
1533 | return 0; | ||
1534 | |||
1535 | /* ring buffer internal formats */ | ||
1536 | trace_create_file("header_page", 0444, d_events, | ||
1537 | ring_buffer_print_page_header, | ||
1538 | &ftrace_show_header_fops); | ||
1539 | |||
1540 | trace_create_file("header_event", 0444, d_events, | ||
1541 | ring_buffer_print_entry_header, | ||
1542 | &ftrace_show_header_fops); | ||
1543 | |||
1544 | trace_create_file("enable", 0644, d_events, | ||
1545 | NULL, &ftrace_system_enable_fops); | ||
1546 | |||
1547 | if (trace_define_common_fields()) | 1839 | if (trace_define_common_fields()) |
1548 | pr_warning("tracing: Failed to allocate common fields"); | 1840 | pr_warning("tracing: Failed to allocate common fields"); |
1549 | 1841 | ||
1550 | /* | 1842 | ret = event_trace_add_tracer(d_tracer, tr); |
1551 | * Early initialization already enabled ftrace event. | 1843 | if (ret) |
1552 | * Now it's only necessary to create the event directory. | 1844 | return ret; |
1553 | */ | ||
1554 | list_for_each_entry(call, &ftrace_events, list) { | ||
1555 | |||
1556 | ret = event_create_dir(call, d_events, | ||
1557 | &ftrace_event_id_fops, | ||
1558 | &ftrace_enable_fops, | ||
1559 | &ftrace_event_filter_fops, | ||
1560 | &ftrace_event_format_fops); | ||
1561 | if (ret < 0) | ||
1562 | event_remove(call); | ||
1563 | } | ||
1564 | 1845 | ||
1565 | ret = register_module_notifier(&trace_module_nb); | 1846 | ret = register_module_notifier(&trace_module_nb); |
1566 | if (ret) | 1847 | if (ret) |
@@ -1627,13 +1908,20 @@ static __init void event_test_stuff(void) | |||
1627 | */ | 1908 | */ |
1628 | static __init void event_trace_self_tests(void) | 1909 | static __init void event_trace_self_tests(void) |
1629 | { | 1910 | { |
1911 | struct ftrace_subsystem_dir *dir; | ||
1912 | struct ftrace_event_file *file; | ||
1630 | struct ftrace_event_call *call; | 1913 | struct ftrace_event_call *call; |
1631 | struct event_subsystem *system; | 1914 | struct event_subsystem *system; |
1915 | struct trace_array *tr; | ||
1632 | int ret; | 1916 | int ret; |
1633 | 1917 | ||
1918 | tr = top_trace_array(); | ||
1919 | |||
1634 | pr_info("Running tests on trace events:\n"); | 1920 | pr_info("Running tests on trace events:\n"); |
1635 | 1921 | ||
1636 | list_for_each_entry(call, &ftrace_events, list) { | 1922 | list_for_each_entry(file, &tr->events, list) { |
1923 | |||
1924 | call = file->event_call; | ||
1637 | 1925 | ||
1638 | /* Only test those that have a probe */ | 1926 | /* Only test those that have a probe */ |
1639 | if (!call->class || !call->class->probe) | 1927 | if (!call->class || !call->class->probe) |
@@ -1657,15 +1945,15 @@ static __init void event_trace_self_tests(void) | |||
1657 | * If an event is already enabled, someone is using | 1945 | * If an event is already enabled, someone is using |
1658 | * it and the self test should not be on. | 1946 | * it and the self test should not be on. |
1659 | */ | 1947 | */ |
1660 | if (call->flags & TRACE_EVENT_FL_ENABLED) { | 1948 | if (file->flags & FTRACE_EVENT_FL_ENABLED) { |
1661 | pr_warning("Enabled event during self test!\n"); | 1949 | pr_warning("Enabled event during self test!\n"); |
1662 | WARN_ON_ONCE(1); | 1950 | WARN_ON_ONCE(1); |
1663 | continue; | 1951 | continue; |
1664 | } | 1952 | } |
1665 | 1953 | ||
1666 | ftrace_event_enable_disable(call, 1); | 1954 | ftrace_event_enable_disable(file, 1); |
1667 | event_test_stuff(); | 1955 | event_test_stuff(); |
1668 | ftrace_event_enable_disable(call, 0); | 1956 | ftrace_event_enable_disable(file, 0); |
1669 | 1957 | ||
1670 | pr_cont("OK\n"); | 1958 | pr_cont("OK\n"); |
1671 | } | 1959 | } |
@@ -1674,7 +1962,9 @@ static __init void event_trace_self_tests(void) | |||
1674 | 1962 | ||
1675 | pr_info("Running tests on trace event systems:\n"); | 1963 | pr_info("Running tests on trace event systems:\n"); |
1676 | 1964 | ||
1677 | list_for_each_entry(system, &event_subsystems, list) { | 1965 | list_for_each_entry(dir, &tr->systems, list) { |
1966 | |||
1967 | system = dir->subsystem; | ||
1678 | 1968 | ||
1679 | /* the ftrace system is special, skip it */ | 1969 | /* the ftrace system is special, skip it */ |
1680 | if (strcmp(system->name, "ftrace") == 0) | 1970 | if (strcmp(system->name, "ftrace") == 0) |
@@ -1682,7 +1972,7 @@ static __init void event_trace_self_tests(void) | |||
1682 | 1972 | ||
1683 | pr_info("Testing event system %s: ", system->name); | 1973 | pr_info("Testing event system %s: ", system->name); |
1684 | 1974 | ||
1685 | ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1); | 1975 | ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); |
1686 | if (WARN_ON_ONCE(ret)) { | 1976 | if (WARN_ON_ONCE(ret)) { |
1687 | pr_warning("error enabling system %s\n", | 1977 | pr_warning("error enabling system %s\n", |
1688 | system->name); | 1978 | system->name); |
@@ -1691,7 +1981,7 @@ static __init void event_trace_self_tests(void) | |||
1691 | 1981 | ||
1692 | event_test_stuff(); | 1982 | event_test_stuff(); |
1693 | 1983 | ||
1694 | ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0); | 1984 | ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); |
1695 | if (WARN_ON_ONCE(ret)) { | 1985 | if (WARN_ON_ONCE(ret)) { |
1696 | pr_warning("error disabling system %s\n", | 1986 | pr_warning("error disabling system %s\n", |
1697 | system->name); | 1987 | system->name); |
@@ -1706,7 +1996,7 @@ static __init void event_trace_self_tests(void) | |||
1706 | pr_info("Running tests on all trace events:\n"); | 1996 | pr_info("Running tests on all trace events:\n"); |
1707 | pr_info("Testing all events: "); | 1997 | pr_info("Testing all events: "); |
1708 | 1998 | ||
1709 | ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1); | 1999 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); |
1710 | if (WARN_ON_ONCE(ret)) { | 2000 | if (WARN_ON_ONCE(ret)) { |
1711 | pr_warning("error enabling all events\n"); | 2001 | pr_warning("error enabling all events\n"); |
1712 | return; | 2002 | return; |
@@ -1715,7 +2005,7 @@ static __init void event_trace_self_tests(void) | |||
1715 | event_test_stuff(); | 2005 | event_test_stuff(); |
1716 | 2006 | ||
1717 | /* reset sysname */ | 2007 | /* reset sysname */ |
1718 | ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0); | 2008 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); |
1719 | if (WARN_ON_ONCE(ret)) { | 2009 | if (WARN_ON_ONCE(ret)) { |
1720 | pr_warning("error disabling all events\n"); | 2010 | pr_warning("error disabling all events\n"); |
1721 | return; | 2011 | return; |