diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 15 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 634 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace.c | 174 | ||||
-rw-r--r-- | kernel/trace/trace.h | 10 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_event_profile.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 25 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_output.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 13 | ||||
-rw-r--r-- | kernel/trace/trace_stat.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_stat.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_workqueue.c | 2 |
19 files changed, 738 insertions, 196 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2246141bda4d..23b96ebbf893 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -104,6 +104,7 @@ config FUNCTION_GRAPH_TRACER | |||
104 | the return value. This is done by setting the current return | 104 | the return value. This is done by setting the current return |
105 | address on the current task structure into a stack of calls. | 105 | address on the current task structure into a stack of calls. |
106 | 106 | ||
107 | |||
107 | config IRQSOFF_TRACER | 108 | config IRQSOFF_TRACER |
108 | bool "Interrupts-off Latency Tracer" | 109 | bool "Interrupts-off Latency Tracer" |
109 | default n | 110 | default n |
@@ -375,6 +376,20 @@ config DYNAMIC_FTRACE | |||
375 | were made. If so, it runs stop_machine (stops all CPUS) | 376 | were made. If so, it runs stop_machine (stops all CPUS) |
376 | and modifies the code to jump over the call to ftrace. | 377 | and modifies the code to jump over the call to ftrace. |
377 | 378 | ||
379 | config FUNCTION_PROFILER | ||
380 | bool "Kernel function profiler" | ||
381 | depends on FUNCTION_TRACER | ||
382 | default n | ||
383 | help | ||
384 | This option enables the kernel function profiler. A file is created | ||
385 | in debugfs called function_profile_enabled which defaults to zero. | ||
386 | When a 1 is echoed into this file profiling begins, and when a | ||
387 | zero is entered, profiling stops. A file in the trace_stats | ||
388 | directory called functions, that show the list of functions that | ||
389 | have been hit and their counters. | ||
390 | |||
391 | If in doubt, say N | ||
392 | |||
378 | config FTRACE_MCOUNT_RECORD | 393 | config FTRACE_MCOUNT_RECORD |
379 | def_bool y | 394 | def_bool y |
380 | depends on DYNAMIC_FTRACE | 395 | depends on DYNAMIC_FTRACE |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f1ed080406c3..8e6a0b5c9940 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -33,7 +33,8 @@ | |||
33 | 33 | ||
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | 35 | ||
36 | #include "trace.h" | 36 | #include "trace_output.h" |
37 | #include "trace_stat.h" | ||
37 | 38 | ||
38 | #define FTRACE_WARN_ON(cond) \ | 39 | #define FTRACE_WARN_ON(cond) \ |
39 | do { \ | 40 | do { \ |
@@ -68,7 +69,7 @@ static DEFINE_MUTEX(ftrace_lock); | |||
68 | 69 | ||
69 | static struct ftrace_ops ftrace_list_end __read_mostly = | 70 | static struct ftrace_ops ftrace_list_end __read_mostly = |
70 | { | 71 | { |
71 | .func = ftrace_stub, | 72 | .func = ftrace_stub, |
72 | }; | 73 | }; |
73 | 74 | ||
74 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 75 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
@@ -240,6 +241,576 @@ static void ftrace_update_pid_func(void) | |||
240 | #endif | 241 | #endif |
241 | } | 242 | } |
242 | 243 | ||
244 | #ifdef CONFIG_FUNCTION_PROFILER | ||
245 | struct ftrace_profile { | ||
246 | struct hlist_node node; | ||
247 | unsigned long ip; | ||
248 | unsigned long counter; | ||
249 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
250 | unsigned long long time; | ||
251 | #endif | ||
252 | }; | ||
253 | |||
254 | struct ftrace_profile_page { | ||
255 | struct ftrace_profile_page *next; | ||
256 | unsigned long index; | ||
257 | struct ftrace_profile records[]; | ||
258 | }; | ||
259 | |||
260 | struct ftrace_profile_stat { | ||
261 | atomic_t disabled; | ||
262 | struct hlist_head *hash; | ||
263 | struct ftrace_profile_page *pages; | ||
264 | struct ftrace_profile_page *start; | ||
265 | struct tracer_stat stat; | ||
266 | }; | ||
267 | |||
268 | #define PROFILE_RECORDS_SIZE \ | ||
269 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | ||
270 | |||
271 | #define PROFILES_PER_PAGE \ | ||
272 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | ||
273 | |||
274 | static int ftrace_profile_bits __read_mostly; | ||
275 | static int ftrace_profile_enabled __read_mostly; | ||
276 | |||
277 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ | ||
278 | static DEFINE_MUTEX(ftrace_profile_lock); | ||
279 | |||
280 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); | ||
281 | |||
282 | #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ | ||
283 | |||
284 | static void * | ||
285 | function_stat_next(void *v, int idx) | ||
286 | { | ||
287 | struct ftrace_profile *rec = v; | ||
288 | struct ftrace_profile_page *pg; | ||
289 | |||
290 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); | ||
291 | |||
292 | again: | ||
293 | rec++; | ||
294 | if ((void *)rec >= (void *)&pg->records[pg->index]) { | ||
295 | pg = pg->next; | ||
296 | if (!pg) | ||
297 | return NULL; | ||
298 | rec = &pg->records[0]; | ||
299 | if (!rec->counter) | ||
300 | goto again; | ||
301 | } | ||
302 | |||
303 | return rec; | ||
304 | } | ||
305 | |||
306 | static void *function_stat_start(struct tracer_stat *trace) | ||
307 | { | ||
308 | struct ftrace_profile_stat *stat = | ||
309 | container_of(trace, struct ftrace_profile_stat, stat); | ||
310 | |||
311 | if (!stat || !stat->start) | ||
312 | return NULL; | ||
313 | |||
314 | return function_stat_next(&stat->start->records[0], 0); | ||
315 | } | ||
316 | |||
317 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
318 | /* function graph compares on total time */ | ||
319 | static int function_stat_cmp(void *p1, void *p2) | ||
320 | { | ||
321 | struct ftrace_profile *a = p1; | ||
322 | struct ftrace_profile *b = p2; | ||
323 | |||
324 | if (a->time < b->time) | ||
325 | return -1; | ||
326 | if (a->time > b->time) | ||
327 | return 1; | ||
328 | else | ||
329 | return 0; | ||
330 | } | ||
331 | #else | ||
332 | /* not function graph compares against hits */ | ||
333 | static int function_stat_cmp(void *p1, void *p2) | ||
334 | { | ||
335 | struct ftrace_profile *a = p1; | ||
336 | struct ftrace_profile *b = p2; | ||
337 | |||
338 | if (a->counter < b->counter) | ||
339 | return -1; | ||
340 | if (a->counter > b->counter) | ||
341 | return 1; | ||
342 | else | ||
343 | return 0; | ||
344 | } | ||
345 | #endif | ||
346 | |||
347 | static int function_stat_headers(struct seq_file *m) | ||
348 | { | ||
349 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
350 | seq_printf(m, " Function " | ||
351 | "Hit Time Avg\n" | ||
352 | " -------- " | ||
353 | "--- ---- ---\n"); | ||
354 | #else | ||
355 | seq_printf(m, " Function Hit\n" | ||
356 | " -------- ---\n"); | ||
357 | #endif | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static int function_stat_show(struct seq_file *m, void *v) | ||
362 | { | ||
363 | struct ftrace_profile *rec = v; | ||
364 | char str[KSYM_SYMBOL_LEN]; | ||
365 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
366 | static DEFINE_MUTEX(mutex); | ||
367 | static struct trace_seq s; | ||
368 | unsigned long long avg; | ||
369 | #endif | ||
370 | |||
371 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
372 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | ||
373 | |||
374 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
375 | seq_printf(m, " "); | ||
376 | avg = rec->time; | ||
377 | do_div(avg, rec->counter); | ||
378 | |||
379 | mutex_lock(&mutex); | ||
380 | trace_seq_init(&s); | ||
381 | trace_print_graph_duration(rec->time, &s); | ||
382 | trace_seq_puts(&s, " "); | ||
383 | trace_print_graph_duration(avg, &s); | ||
384 | trace_print_seq(m, &s); | ||
385 | mutex_unlock(&mutex); | ||
386 | #endif | ||
387 | seq_putc(m, '\n'); | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | ||
393 | { | ||
394 | struct ftrace_profile_page *pg; | ||
395 | |||
396 | pg = stat->pages = stat->start; | ||
397 | |||
398 | while (pg) { | ||
399 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); | ||
400 | pg->index = 0; | ||
401 | pg = pg->next; | ||
402 | } | ||
403 | |||
404 | memset(stat->hash, 0, | ||
405 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); | ||
406 | } | ||
407 | |||
408 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | ||
409 | { | ||
410 | struct ftrace_profile_page *pg; | ||
411 | int functions; | ||
412 | int pages; | ||
413 | int i; | ||
414 | |||
415 | /* If we already allocated, do nothing */ | ||
416 | if (stat->pages) | ||
417 | return 0; | ||
418 | |||
419 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); | ||
420 | if (!stat->pages) | ||
421 | return -ENOMEM; | ||
422 | |||
423 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
424 | functions = ftrace_update_tot_cnt; | ||
425 | #else | ||
426 | /* | ||
427 | * We do not know the number of functions that exist because | ||
428 | * dynamic tracing is what counts them. With past experience | ||
429 | * we have around 20K functions. That should be more than enough. | ||
430 | * It is highly unlikely we will execute every function in | ||
431 | * the kernel. | ||
432 | */ | ||
433 | functions = 20000; | ||
434 | #endif | ||
435 | |||
436 | pg = stat->start = stat->pages; | ||
437 | |||
438 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); | ||
439 | |||
440 | for (i = 0; i < pages; i++) { | ||
441 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
442 | if (!pg->next) | ||
443 | goto out_free; | ||
444 | pg = pg->next; | ||
445 | } | ||
446 | |||
447 | return 0; | ||
448 | |||
449 | out_free: | ||
450 | pg = stat->start; | ||
451 | while (pg) { | ||
452 | unsigned long tmp = (unsigned long)pg; | ||
453 | |||
454 | pg = pg->next; | ||
455 | free_page(tmp); | ||
456 | } | ||
457 | |||
458 | free_page((unsigned long)stat->pages); | ||
459 | stat->pages = NULL; | ||
460 | stat->start = NULL; | ||
461 | |||
462 | return -ENOMEM; | ||
463 | } | ||
464 | |||
465 | static int ftrace_profile_init_cpu(int cpu) | ||
466 | { | ||
467 | struct ftrace_profile_stat *stat; | ||
468 | int size; | ||
469 | |||
470 | stat = &per_cpu(ftrace_profile_stats, cpu); | ||
471 | |||
472 | if (stat->hash) { | ||
473 | /* If the profile is already created, simply reset it */ | ||
474 | ftrace_profile_reset(stat); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * We are profiling all functions, but usually only a few thousand | ||
480 | * functions are hit. We'll make a hash of 1024 items. | ||
481 | */ | ||
482 | size = FTRACE_PROFILE_HASH_SIZE; | ||
483 | |||
484 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); | ||
485 | |||
486 | if (!stat->hash) | ||
487 | return -ENOMEM; | ||
488 | |||
489 | if (!ftrace_profile_bits) { | ||
490 | size--; | ||
491 | |||
492 | for (; size; size >>= 1) | ||
493 | ftrace_profile_bits++; | ||
494 | } | ||
495 | |||
496 | /* Preallocate the function profiling pages */ | ||
497 | if (ftrace_profile_pages_init(stat) < 0) { | ||
498 | kfree(stat->hash); | ||
499 | stat->hash = NULL; | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static int ftrace_profile_init(void) | ||
507 | { | ||
508 | int cpu; | ||
509 | int ret = 0; | ||
510 | |||
511 | for_each_online_cpu(cpu) { | ||
512 | ret = ftrace_profile_init_cpu(cpu); | ||
513 | if (ret) | ||
514 | break; | ||
515 | } | ||
516 | |||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | /* interrupts must be disabled */ | ||
521 | static struct ftrace_profile * | ||
522 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | ||
523 | { | ||
524 | struct ftrace_profile *rec; | ||
525 | struct hlist_head *hhd; | ||
526 | struct hlist_node *n; | ||
527 | unsigned long key; | ||
528 | |||
529 | key = hash_long(ip, ftrace_profile_bits); | ||
530 | hhd = &stat->hash[key]; | ||
531 | |||
532 | if (hlist_empty(hhd)) | ||
533 | return NULL; | ||
534 | |||
535 | hlist_for_each_entry_rcu(rec, n, hhd, node) { | ||
536 | if (rec->ip == ip) | ||
537 | return rec; | ||
538 | } | ||
539 | |||
540 | return NULL; | ||
541 | } | ||
542 | |||
543 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, | ||
544 | struct ftrace_profile *rec) | ||
545 | { | ||
546 | unsigned long key; | ||
547 | |||
548 | key = hash_long(rec->ip, ftrace_profile_bits); | ||
549 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * The memory is already allocated, this simply finds a new record to use. | ||
554 | */ | ||
555 | static struct ftrace_profile * | ||
556 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) | ||
557 | { | ||
558 | struct ftrace_profile *rec = NULL; | ||
559 | |||
560 | /* prevent recursion (from NMIs) */ | ||
561 | if (atomic_inc_return(&stat->disabled) != 1) | ||
562 | goto out; | ||
563 | |||
564 | /* | ||
565 | * Try to find the function again since an NMI | ||
566 | * could have added it | ||
567 | */ | ||
568 | rec = ftrace_find_profiled_func(stat, ip); | ||
569 | if (rec) | ||
570 | goto out; | ||
571 | |||
572 | if (stat->pages->index == PROFILES_PER_PAGE) { | ||
573 | if (!stat->pages->next) | ||
574 | goto out; | ||
575 | stat->pages = stat->pages->next; | ||
576 | } | ||
577 | |||
578 | rec = &stat->pages->records[stat->pages->index++]; | ||
579 | rec->ip = ip; | ||
580 | ftrace_add_profile(stat, rec); | ||
581 | |||
582 | out: | ||
583 | atomic_dec(&stat->disabled); | ||
584 | |||
585 | return rec; | ||
586 | } | ||
587 | |||
588 | static void | ||
589 | function_profile_call(unsigned long ip, unsigned long parent_ip) | ||
590 | { | ||
591 | struct ftrace_profile_stat *stat; | ||
592 | struct ftrace_profile *rec; | ||
593 | unsigned long flags; | ||
594 | |||
595 | if (!ftrace_profile_enabled) | ||
596 | return; | ||
597 | |||
598 | local_irq_save(flags); | ||
599 | |||
600 | stat = &__get_cpu_var(ftrace_profile_stats); | ||
601 | if (!stat->hash) | ||
602 | goto out; | ||
603 | |||
604 | rec = ftrace_find_profiled_func(stat, ip); | ||
605 | if (!rec) { | ||
606 | rec = ftrace_profile_alloc(stat, ip); | ||
607 | if (!rec) | ||
608 | goto out; | ||
609 | } | ||
610 | |||
611 | rec->counter++; | ||
612 | out: | ||
613 | local_irq_restore(flags); | ||
614 | } | ||
615 | |||
616 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
617 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | ||
618 | { | ||
619 | function_profile_call(trace->func, 0); | ||
620 | return 1; | ||
621 | } | ||
622 | |||
623 | static void profile_graph_return(struct ftrace_graph_ret *trace) | ||
624 | { | ||
625 | struct ftrace_profile_stat *stat; | ||
626 | unsigned long long calltime; | ||
627 | struct ftrace_profile *rec; | ||
628 | unsigned long flags; | ||
629 | |||
630 | local_irq_save(flags); | ||
631 | stat = &__get_cpu_var(ftrace_profile_stats); | ||
632 | if (!stat->hash) | ||
633 | goto out; | ||
634 | |||
635 | calltime = trace->rettime - trace->calltime; | ||
636 | |||
637 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | ||
638 | int index; | ||
639 | |||
640 | index = trace->depth; | ||
641 | |||
642 | /* Append this call time to the parent time to subtract */ | ||
643 | if (index) | ||
644 | current->ret_stack[index - 1].subtime += calltime; | ||
645 | |||
646 | if (current->ret_stack[index].subtime < calltime) | ||
647 | calltime -= current->ret_stack[index].subtime; | ||
648 | else | ||
649 | calltime = 0; | ||
650 | } | ||
651 | |||
652 | rec = ftrace_find_profiled_func(stat, trace->func); | ||
653 | if (rec) | ||
654 | rec->time += calltime; | ||
655 | |||
656 | out: | ||
657 | local_irq_restore(flags); | ||
658 | } | ||
659 | |||
660 | static int register_ftrace_profiler(void) | ||
661 | { | ||
662 | return register_ftrace_graph(&profile_graph_return, | ||
663 | &profile_graph_entry); | ||
664 | } | ||
665 | |||
666 | static void unregister_ftrace_profiler(void) | ||
667 | { | ||
668 | unregister_ftrace_graph(); | ||
669 | } | ||
670 | #else | ||
671 | static struct ftrace_ops ftrace_profile_ops __read_mostly = | ||
672 | { | ||
673 | .func = function_profile_call, | ||
674 | }; | ||
675 | |||
676 | static int register_ftrace_profiler(void) | ||
677 | { | ||
678 | return register_ftrace_function(&ftrace_profile_ops); | ||
679 | } | ||
680 | |||
681 | static void unregister_ftrace_profiler(void) | ||
682 | { | ||
683 | unregister_ftrace_function(&ftrace_profile_ops); | ||
684 | } | ||
685 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
686 | |||
687 | static ssize_t | ||
688 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | ||
689 | size_t cnt, loff_t *ppos) | ||
690 | { | ||
691 | unsigned long val; | ||
692 | char buf[64]; /* big enough to hold a number */ | ||
693 | int ret; | ||
694 | |||
695 | if (cnt >= sizeof(buf)) | ||
696 | return -EINVAL; | ||
697 | |||
698 | if (copy_from_user(&buf, ubuf, cnt)) | ||
699 | return -EFAULT; | ||
700 | |||
701 | buf[cnt] = 0; | ||
702 | |||
703 | ret = strict_strtoul(buf, 10, &val); | ||
704 | if (ret < 0) | ||
705 | return ret; | ||
706 | |||
707 | val = !!val; | ||
708 | |||
709 | mutex_lock(&ftrace_profile_lock); | ||
710 | if (ftrace_profile_enabled ^ val) { | ||
711 | if (val) { | ||
712 | ret = ftrace_profile_init(); | ||
713 | if (ret < 0) { | ||
714 | cnt = ret; | ||
715 | goto out; | ||
716 | } | ||
717 | |||
718 | ret = register_ftrace_profiler(); | ||
719 | if (ret < 0) { | ||
720 | cnt = ret; | ||
721 | goto out; | ||
722 | } | ||
723 | ftrace_profile_enabled = 1; | ||
724 | } else { | ||
725 | ftrace_profile_enabled = 0; | ||
726 | unregister_ftrace_profiler(); | ||
727 | } | ||
728 | } | ||
729 | out: | ||
730 | mutex_unlock(&ftrace_profile_lock); | ||
731 | |||
732 | filp->f_pos += cnt; | ||
733 | |||
734 | return cnt; | ||
735 | } | ||
736 | |||
737 | static ssize_t | ||
738 | ftrace_profile_read(struct file *filp, char __user *ubuf, | ||
739 | size_t cnt, loff_t *ppos) | ||
740 | { | ||
741 | char buf[64]; /* big enough to hold a number */ | ||
742 | int r; | ||
743 | |||
744 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | ||
745 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
746 | } | ||
747 | |||
748 | static const struct file_operations ftrace_profile_fops = { | ||
749 | .open = tracing_open_generic, | ||
750 | .read = ftrace_profile_read, | ||
751 | .write = ftrace_profile_write, | ||
752 | }; | ||
753 | |||
754 | /* used to initialize the real stat files */ | ||
755 | static struct tracer_stat function_stats __initdata = { | ||
756 | .name = "functions", | ||
757 | .stat_start = function_stat_start, | ||
758 | .stat_next = function_stat_next, | ||
759 | .stat_cmp = function_stat_cmp, | ||
760 | .stat_headers = function_stat_headers, | ||
761 | .stat_show = function_stat_show | ||
762 | }; | ||
763 | |||
764 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | ||
765 | { | ||
766 | struct ftrace_profile_stat *stat; | ||
767 | struct dentry *entry; | ||
768 | char *name; | ||
769 | int ret; | ||
770 | int cpu; | ||
771 | |||
772 | for_each_possible_cpu(cpu) { | ||
773 | stat = &per_cpu(ftrace_profile_stats, cpu); | ||
774 | |||
775 | /* allocate enough for function name + cpu number */ | ||
776 | name = kmalloc(32, GFP_KERNEL); | ||
777 | if (!name) { | ||
778 | /* | ||
779 | * The files created are permanent, if something happens | ||
780 | * we still do not free memory. | ||
781 | */ | ||
782 | kfree(stat); | ||
783 | WARN(1, | ||
784 | "Could not allocate stat file for cpu %d\n", | ||
785 | cpu); | ||
786 | return; | ||
787 | } | ||
788 | stat->stat = function_stats; | ||
789 | snprintf(name, 32, "function%d", cpu); | ||
790 | stat->stat.name = name; | ||
791 | ret = register_stat_tracer(&stat->stat); | ||
792 | if (ret) { | ||
793 | WARN(1, | ||
794 | "Could not register function stat for cpu %d\n", | ||
795 | cpu); | ||
796 | kfree(name); | ||
797 | return; | ||
798 | } | ||
799 | } | ||
800 | |||
801 | entry = debugfs_create_file("function_profile_enabled", 0644, | ||
802 | d_tracer, NULL, &ftrace_profile_fops); | ||
803 | if (!entry) | ||
804 | pr_warning("Could not create debugfs " | ||
805 | "'function_profile_enabled' entry\n"); | ||
806 | } | ||
807 | |||
808 | #else /* CONFIG_FUNCTION_PROFILER */ | ||
809 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | ||
810 | { | ||
811 | } | ||
812 | #endif /* CONFIG_FUNCTION_PROFILER */ | ||
813 | |||
243 | /* set when tracing only a pid */ | 814 | /* set when tracing only a pid */ |
244 | struct pid *ftrace_pid_trace; | 815 | struct pid *ftrace_pid_trace; |
245 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 816 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
@@ -261,7 +832,6 @@ struct ftrace_func_probe { | |||
261 | struct rcu_head rcu; | 832 | struct rcu_head rcu; |
262 | }; | 833 | }; |
263 | 834 | ||
264 | |||
265 | enum { | 835 | enum { |
266 | FTRACE_ENABLE_CALLS = (1 << 0), | 836 | FTRACE_ENABLE_CALLS = (1 << 0), |
267 | FTRACE_DISABLE_CALLS = (1 << 1), | 837 | FTRACE_DISABLE_CALLS = (1 << 1), |
@@ -1408,7 +1978,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1408 | 1978 | ||
1409 | static struct ftrace_ops trace_probe_ops __read_mostly = | 1979 | static struct ftrace_ops trace_probe_ops __read_mostly = |
1410 | { | 1980 | { |
1411 | .func = function_trace_probe_call, | 1981 | .func = function_trace_probe_call, |
1412 | }; | 1982 | }; |
1413 | 1983 | ||
1414 | static int ftrace_probe_registered; | 1984 | static int ftrace_probe_registered; |
@@ -2128,38 +2698,23 @@ static const struct file_operations ftrace_graph_fops = { | |||
2128 | 2698 | ||
2129 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 2699 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
2130 | { | 2700 | { |
2131 | struct dentry *entry; | ||
2132 | 2701 | ||
2133 | entry = debugfs_create_file("available_filter_functions", 0444, | 2702 | trace_create_file("available_filter_functions", 0444, |
2134 | d_tracer, NULL, &ftrace_avail_fops); | 2703 | d_tracer, NULL, &ftrace_avail_fops); |
2135 | if (!entry) | ||
2136 | pr_warning("Could not create debugfs " | ||
2137 | "'available_filter_functions' entry\n"); | ||
2138 | 2704 | ||
2139 | entry = debugfs_create_file("failures", 0444, | 2705 | trace_create_file("failures", 0444, |
2140 | d_tracer, NULL, &ftrace_failures_fops); | 2706 | d_tracer, NULL, &ftrace_failures_fops); |
2141 | if (!entry) | ||
2142 | pr_warning("Could not create debugfs 'failures' entry\n"); | ||
2143 | 2707 | ||
2144 | entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer, | 2708 | trace_create_file("set_ftrace_filter", 0644, d_tracer, |
2145 | NULL, &ftrace_filter_fops); | 2709 | NULL, &ftrace_filter_fops); |
2146 | if (!entry) | ||
2147 | pr_warning("Could not create debugfs " | ||
2148 | "'set_ftrace_filter' entry\n"); | ||
2149 | 2710 | ||
2150 | entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer, | 2711 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, |
2151 | NULL, &ftrace_notrace_fops); | 2712 | NULL, &ftrace_notrace_fops); |
2152 | if (!entry) | ||
2153 | pr_warning("Could not create debugfs " | ||
2154 | "'set_ftrace_notrace' entry\n"); | ||
2155 | 2713 | ||
2156 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2714 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2157 | entry = debugfs_create_file("set_graph_function", 0444, d_tracer, | 2715 | trace_create_file("set_graph_function", 0444, d_tracer, |
2158 | NULL, | 2716 | NULL, |
2159 | &ftrace_graph_fops); | 2717 | &ftrace_graph_fops); |
2160 | if (!entry) | ||
2161 | pr_warning("Could not create debugfs " | ||
2162 | "'set_graph_function' entry\n"); | ||
2163 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2718 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2164 | 2719 | ||
2165 | return 0; | 2720 | return 0; |
@@ -2417,7 +2972,6 @@ static const struct file_operations ftrace_pid_fops = { | |||
2417 | static __init int ftrace_init_debugfs(void) | 2972 | static __init int ftrace_init_debugfs(void) |
2418 | { | 2973 | { |
2419 | struct dentry *d_tracer; | 2974 | struct dentry *d_tracer; |
2420 | struct dentry *entry; | ||
2421 | 2975 | ||
2422 | d_tracer = tracing_init_dentry(); | 2976 | d_tracer = tracing_init_dentry(); |
2423 | if (!d_tracer) | 2977 | if (!d_tracer) |
@@ -2425,11 +2979,11 @@ static __init int ftrace_init_debugfs(void) | |||
2425 | 2979 | ||
2426 | ftrace_init_dyn_debugfs(d_tracer); | 2980 | ftrace_init_dyn_debugfs(d_tracer); |
2427 | 2981 | ||
2428 | entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, | 2982 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
2429 | NULL, &ftrace_pid_fops); | 2983 | NULL, &ftrace_pid_fops); |
2430 | if (!entry) | 2984 | |
2431 | pr_warning("Could not create debugfs " | 2985 | ftrace_profile_debugfs(d_tracer); |
2432 | "'set_ftrace_pid' entry\n"); | 2986 | |
2433 | return 0; | 2987 | return 0; |
2434 | } | 2988 | } |
2435 | fs_initcall(ftrace_init_debugfs); | 2989 | fs_initcall(ftrace_init_debugfs); |
@@ -2538,7 +3092,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
2538 | 3092 | ||
2539 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3093 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2540 | 3094 | ||
2541 | static atomic_t ftrace_graph_active; | 3095 | static int ftrace_graph_active; |
2542 | static struct notifier_block ftrace_suspend_notifier; | 3096 | static struct notifier_block ftrace_suspend_notifier; |
2543 | 3097 | ||
2544 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 3098 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
@@ -2690,7 +3244,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2690 | mutex_lock(&ftrace_lock); | 3244 | mutex_lock(&ftrace_lock); |
2691 | 3245 | ||
2692 | /* we currently allow only one tracer registered at a time */ | 3246 | /* we currently allow only one tracer registered at a time */ |
2693 | if (atomic_read(&ftrace_graph_active)) { | 3247 | if (ftrace_graph_active) { |
2694 | ret = -EBUSY; | 3248 | ret = -EBUSY; |
2695 | goto out; | 3249 | goto out; |
2696 | } | 3250 | } |
@@ -2698,10 +3252,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2698 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | 3252 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; |
2699 | register_pm_notifier(&ftrace_suspend_notifier); | 3253 | register_pm_notifier(&ftrace_suspend_notifier); |
2700 | 3254 | ||
2701 | atomic_inc(&ftrace_graph_active); | 3255 | ftrace_graph_active++; |
2702 | ret = start_graph_tracing(); | 3256 | ret = start_graph_tracing(); |
2703 | if (ret) { | 3257 | if (ret) { |
2704 | atomic_dec(&ftrace_graph_active); | 3258 | ftrace_graph_active--; |
2705 | goto out; | 3259 | goto out; |
2706 | } | 3260 | } |
2707 | 3261 | ||
@@ -2719,10 +3273,10 @@ void unregister_ftrace_graph(void) | |||
2719 | { | 3273 | { |
2720 | mutex_lock(&ftrace_lock); | 3274 | mutex_lock(&ftrace_lock); |
2721 | 3275 | ||
2722 | if (!unlikely(atomic_read(&ftrace_graph_active))) | 3276 | if (unlikely(!ftrace_graph_active)) |
2723 | goto out; | 3277 | goto out; |
2724 | 3278 | ||
2725 | atomic_dec(&ftrace_graph_active); | 3279 | ftrace_graph_active--; |
2726 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); | 3280 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); |
2727 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 3281 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
2728 | ftrace_graph_entry = ftrace_graph_entry_stub; | 3282 | ftrace_graph_entry = ftrace_graph_entry_stub; |
@@ -2736,7 +3290,7 @@ void unregister_ftrace_graph(void) | |||
2736 | /* Allocate a return stack for newly created task */ | 3290 | /* Allocate a return stack for newly created task */ |
2737 | void ftrace_graph_init_task(struct task_struct *t) | 3291 | void ftrace_graph_init_task(struct task_struct *t) |
2738 | { | 3292 | { |
2739 | if (atomic_read(&ftrace_graph_active)) { | 3293 | if (ftrace_graph_active) { |
2740 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 3294 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
2741 | * sizeof(struct ftrace_ret_stack), | 3295 | * sizeof(struct ftrace_ret_stack), |
2742 | GFP_KERNEL); | 3296 | GFP_KERNEL); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 960cbf44c844..74a11808c282 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2845,14 +2845,11 @@ static const struct file_operations rb_simple_fops = { | |||
2845 | static __init int rb_init_debugfs(void) | 2845 | static __init int rb_init_debugfs(void) |
2846 | { | 2846 | { |
2847 | struct dentry *d_tracer; | 2847 | struct dentry *d_tracer; |
2848 | struct dentry *entry; | ||
2849 | 2848 | ||
2850 | d_tracer = tracing_init_dentry(); | 2849 | d_tracer = tracing_init_dentry(); |
2851 | 2850 | ||
2852 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, | 2851 | trace_create_file("tracing_on", 0644, d_tracer, |
2853 | &ring_buffer_flags, &rb_simple_fops); | 2852 | &ring_buffer_flags, &rb_simple_fops); |
2854 | if (!entry) | ||
2855 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); | ||
2856 | 2853 | ||
2857 | return 0; | 2854 | return 0; |
2858 | } | 2855 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1ce5dc6372b8..4865459f609f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -255,7 +255,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
255 | 255 | ||
256 | /* trace_flags holds trace_options default values */ | 256 | /* trace_flags holds trace_options default values */ |
257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; | 258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
259 | TRACE_ITER_GRAPH_TIME; | ||
259 | 260 | ||
260 | /** | 261 | /** |
261 | * trace_wake_up - wake up tasks waiting for trace input | 262 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -317,6 +318,7 @@ static const char *trace_options[] = { | |||
317 | "latency-format", | 318 | "latency-format", |
318 | "global-clock", | 319 | "global-clock", |
319 | "sleep-time", | 320 | "sleep-time", |
321 | "graph-time", | ||
320 | NULL | 322 | NULL |
321 | }; | 323 | }; |
322 | 324 | ||
@@ -402,17 +404,6 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
402 | return cnt; | 404 | return cnt; |
403 | } | 405 | } |
404 | 406 | ||
405 | static void | ||
406 | trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
407 | { | ||
408 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
409 | |||
410 | s->buffer[len] = 0; | ||
411 | seq_puts(m, s->buffer); | ||
412 | |||
413 | trace_seq_init(s); | ||
414 | } | ||
415 | |||
416 | /** | 407 | /** |
417 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 408 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
418 | * @tr: tracer | 409 | * @tr: tracer |
@@ -3596,7 +3587,7 @@ struct dentry *tracing_dentry_percpu(void) | |||
3596 | static void tracing_init_debugfs_percpu(long cpu) | 3587 | static void tracing_init_debugfs_percpu(long cpu) |
3597 | { | 3588 | { |
3598 | struct dentry *d_percpu = tracing_dentry_percpu(); | 3589 | struct dentry *d_percpu = tracing_dentry_percpu(); |
3599 | struct dentry *entry, *d_cpu; | 3590 | struct dentry *d_cpu; |
3600 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | 3591 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ |
3601 | char cpu_dir[7]; | 3592 | char cpu_dir[7]; |
3602 | 3593 | ||
@@ -3611,21 +3602,15 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
3611 | } | 3602 | } |
3612 | 3603 | ||
3613 | /* per cpu trace_pipe */ | 3604 | /* per cpu trace_pipe */ |
3614 | entry = debugfs_create_file("trace_pipe", 0444, d_cpu, | 3605 | trace_create_file("trace_pipe", 0444, d_cpu, |
3615 | (void *) cpu, &tracing_pipe_fops); | 3606 | (void *) cpu, &tracing_pipe_fops); |
3616 | if (!entry) | ||
3617 | pr_warning("Could not create debugfs 'trace_pipe' entry\n"); | ||
3618 | 3607 | ||
3619 | /* per cpu trace */ | 3608 | /* per cpu trace */ |
3620 | entry = debugfs_create_file("trace", 0644, d_cpu, | 3609 | trace_create_file("trace", 0644, d_cpu, |
3621 | (void *) cpu, &tracing_fops); | 3610 | (void *) cpu, &tracing_fops); |
3622 | if (!entry) | ||
3623 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
3624 | 3611 | ||
3625 | entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu, | 3612 | trace_create_file("trace_pipe_raw", 0444, d_cpu, |
3626 | (void *) cpu, &tracing_buffers_fops); | 3613 | (void *) cpu, &tracing_buffers_fops); |
3627 | if (!entry) | ||
3628 | pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n"); | ||
3629 | } | 3614 | } |
3630 | 3615 | ||
3631 | #ifdef CONFIG_FTRACE_SELFTEST | 3616 | #ifdef CONFIG_FTRACE_SELFTEST |
@@ -3781,6 +3766,22 @@ static const struct file_operations trace_options_core_fops = { | |||
3781 | .write = trace_options_core_write, | 3766 | .write = trace_options_core_write, |
3782 | }; | 3767 | }; |
3783 | 3768 | ||
3769 | struct dentry *trace_create_file(const char *name, | ||
3770 | mode_t mode, | ||
3771 | struct dentry *parent, | ||
3772 | void *data, | ||
3773 | const struct file_operations *fops) | ||
3774 | { | ||
3775 | struct dentry *ret; | ||
3776 | |||
3777 | ret = debugfs_create_file(name, mode, parent, data, fops); | ||
3778 | if (!ret) | ||
3779 | pr_warning("Could not create debugfs '%s' entry\n", name); | ||
3780 | |||
3781 | return ret; | ||
3782 | } | ||
3783 | |||
3784 | |||
3784 | static struct dentry *trace_options_init_dentry(void) | 3785 | static struct dentry *trace_options_init_dentry(void) |
3785 | { | 3786 | { |
3786 | struct dentry *d_tracer; | 3787 | struct dentry *d_tracer; |
@@ -3808,7 +3809,6 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
3808 | struct tracer_opt *opt) | 3809 | struct tracer_opt *opt) |
3809 | { | 3810 | { |
3810 | struct dentry *t_options; | 3811 | struct dentry *t_options; |
3811 | struct dentry *entry; | ||
3812 | 3812 | ||
3813 | t_options = trace_options_init_dentry(); | 3813 | t_options = trace_options_init_dentry(); |
3814 | if (!t_options) | 3814 | if (!t_options) |
@@ -3817,11 +3817,9 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
3817 | topt->flags = flags; | 3817 | topt->flags = flags; |
3818 | topt->opt = opt; | 3818 | topt->opt = opt; |
3819 | 3819 | ||
3820 | entry = debugfs_create_file(opt->name, 0644, t_options, topt, | 3820 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, |
3821 | &trace_options_fops); | 3821 | &trace_options_fops); |
3822 | 3822 | ||
3823 | topt->entry = entry; | ||
3824 | |||
3825 | } | 3823 | } |
3826 | 3824 | ||
3827 | static struct trace_option_dentry * | 3825 | static struct trace_option_dentry * |
@@ -3876,123 +3874,81 @@ static struct dentry * | |||
3876 | create_trace_option_core_file(const char *option, long index) | 3874 | create_trace_option_core_file(const char *option, long index) |
3877 | { | 3875 | { |
3878 | struct dentry *t_options; | 3876 | struct dentry *t_options; |
3879 | struct dentry *entry; | ||
3880 | 3877 | ||
3881 | t_options = trace_options_init_dentry(); | 3878 | t_options = trace_options_init_dentry(); |
3882 | if (!t_options) | 3879 | if (!t_options) |
3883 | return NULL; | 3880 | return NULL; |
3884 | 3881 | ||
3885 | entry = debugfs_create_file(option, 0644, t_options, (void *)index, | 3882 | return trace_create_file(option, 0644, t_options, (void *)index, |
3886 | &trace_options_core_fops); | 3883 | &trace_options_core_fops); |
3887 | |||
3888 | return entry; | ||
3889 | } | 3884 | } |
3890 | 3885 | ||
3891 | static __init void create_trace_options_dir(void) | 3886 | static __init void create_trace_options_dir(void) |
3892 | { | 3887 | { |
3893 | struct dentry *t_options; | 3888 | struct dentry *t_options; |
3894 | struct dentry *entry; | ||
3895 | int i; | 3889 | int i; |
3896 | 3890 | ||
3897 | t_options = trace_options_init_dentry(); | 3891 | t_options = trace_options_init_dentry(); |
3898 | if (!t_options) | 3892 | if (!t_options) |
3899 | return; | 3893 | return; |
3900 | 3894 | ||
3901 | for (i = 0; trace_options[i]; i++) { | 3895 | for (i = 0; trace_options[i]; i++) |
3902 | entry = create_trace_option_core_file(trace_options[i], i); | 3896 | create_trace_option_core_file(trace_options[i], i); |
3903 | if (!entry) | ||
3904 | pr_warning("Could not create debugfs %s entry\n", | ||
3905 | trace_options[i]); | ||
3906 | } | ||
3907 | } | 3897 | } |
3908 | 3898 | ||
3909 | static __init int tracer_init_debugfs(void) | 3899 | static __init int tracer_init_debugfs(void) |
3910 | { | 3900 | { |
3911 | struct dentry *d_tracer; | 3901 | struct dentry *d_tracer; |
3912 | struct dentry *entry; | ||
3913 | int cpu; | 3902 | int cpu; |
3914 | 3903 | ||
3915 | d_tracer = tracing_init_dentry(); | 3904 | d_tracer = tracing_init_dentry(); |
3916 | 3905 | ||
3917 | entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, | 3906 | trace_create_file("tracing_enabled", 0644, d_tracer, |
3918 | &global_trace, &tracing_ctrl_fops); | 3907 | &global_trace, &tracing_ctrl_fops); |
3919 | if (!entry) | ||
3920 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | ||
3921 | 3908 | ||
3922 | entry = debugfs_create_file("trace_options", 0644, d_tracer, | 3909 | trace_create_file("trace_options", 0644, d_tracer, |
3923 | NULL, &tracing_iter_fops); | 3910 | NULL, &tracing_iter_fops); |
3924 | if (!entry) | ||
3925 | pr_warning("Could not create debugfs 'trace_options' entry\n"); | ||
3926 | 3911 | ||
3927 | create_trace_options_dir(); | 3912 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
3913 | NULL, &tracing_cpumask_fops); | ||
3914 | |||
3915 | trace_create_file("trace", 0644, d_tracer, | ||
3916 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | ||
3917 | |||
3918 | trace_create_file("available_tracers", 0444, d_tracer, | ||
3919 | &global_trace, &show_traces_fops); | ||
3920 | |||
3921 | trace_create_file("current_tracer", 0444, d_tracer, | ||
3922 | &global_trace, &set_tracer_fops); | ||
3928 | 3923 | ||
3929 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 3924 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
3930 | NULL, &tracing_cpumask_fops); | 3925 | &tracing_max_latency, &tracing_max_lat_fops); |
3931 | if (!entry) | 3926 | |
3932 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 3927 | trace_create_file("tracing_thresh", 0644, d_tracer, |
3933 | 3928 | &tracing_thresh, &tracing_max_lat_fops); | |
3934 | entry = debugfs_create_file("trace", 0644, d_tracer, | 3929 | |
3935 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 3930 | trace_create_file("README", 0644, d_tracer, |
3936 | if (!entry) | 3931 | NULL, &tracing_readme_fops); |
3937 | pr_warning("Could not create debugfs 'trace' entry\n"); | 3932 | |
3938 | 3933 | trace_create_file("trace_pipe", 0444, d_tracer, | |
3939 | entry = debugfs_create_file("available_tracers", 0444, d_tracer, | ||
3940 | &global_trace, &show_traces_fops); | ||
3941 | if (!entry) | ||
3942 | pr_warning("Could not create debugfs 'available_tracers' entry\n"); | ||
3943 | |||
3944 | entry = debugfs_create_file("current_tracer", 0444, d_tracer, | ||
3945 | &global_trace, &set_tracer_fops); | ||
3946 | if (!entry) | ||
3947 | pr_warning("Could not create debugfs 'current_tracer' entry\n"); | ||
3948 | |||
3949 | entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, | ||
3950 | &tracing_max_latency, | ||
3951 | &tracing_max_lat_fops); | ||
3952 | if (!entry) | ||
3953 | pr_warning("Could not create debugfs " | ||
3954 | "'tracing_max_latency' entry\n"); | ||
3955 | |||
3956 | entry = debugfs_create_file("tracing_thresh", 0644, d_tracer, | ||
3957 | &tracing_thresh, &tracing_max_lat_fops); | ||
3958 | if (!entry) | ||
3959 | pr_warning("Could not create debugfs " | ||
3960 | "'tracing_thresh' entry\n"); | ||
3961 | entry = debugfs_create_file("README", 0644, d_tracer, | ||
3962 | NULL, &tracing_readme_fops); | ||
3963 | if (!entry) | ||
3964 | pr_warning("Could not create debugfs 'README' entry\n"); | ||
3965 | |||
3966 | entry = debugfs_create_file("trace_pipe", 0444, d_tracer, | ||
3967 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 3934 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
3968 | if (!entry) | 3935 | |
3969 | pr_warning("Could not create debugfs " | 3936 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
3970 | "'trace_pipe' entry\n"); | 3937 | &global_trace, &tracing_entries_fops); |
3971 | 3938 | ||
3972 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, | 3939 | trace_create_file("trace_marker", 0220, d_tracer, |
3973 | &global_trace, &tracing_entries_fops); | 3940 | NULL, &tracing_mark_fops); |
3974 | if (!entry) | ||
3975 | pr_warning("Could not create debugfs " | ||
3976 | "'buffer_size_kb' entry\n"); | ||
3977 | |||
3978 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, | ||
3979 | NULL, &tracing_mark_fops); | ||
3980 | if (!entry) | ||
3981 | pr_warning("Could not create debugfs " | ||
3982 | "'trace_marker' entry\n"); | ||
3983 | 3941 | ||
3984 | #ifdef CONFIG_DYNAMIC_FTRACE | 3942 | #ifdef CONFIG_DYNAMIC_FTRACE |
3985 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3943 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
3986 | &ftrace_update_tot_cnt, | 3944 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
3987 | &tracing_dyn_info_fops); | ||
3988 | if (!entry) | ||
3989 | pr_warning("Could not create debugfs " | ||
3990 | "'dyn_ftrace_total_info' entry\n"); | ||
3991 | #endif | 3945 | #endif |
3992 | #ifdef CONFIG_SYSPROF_TRACER | 3946 | #ifdef CONFIG_SYSPROF_TRACER |
3993 | init_tracer_sysprof_debugfs(d_tracer); | 3947 | init_tracer_sysprof_debugfs(d_tracer); |
3994 | #endif | 3948 | #endif |
3995 | 3949 | ||
3950 | create_trace_options_dir(); | ||
3951 | |||
3996 | for_each_tracing_cpu(cpu) | 3952 | for_each_tracing_cpu(cpu) |
3997 | tracing_init_debugfs_percpu(cpu); | 3953 | tracing_init_debugfs_percpu(cpu); |
3998 | 3954 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index e685ac2b2ba1..f76a8f8689d4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -470,6 +470,12 @@ void trace_wake_up(void); | |||
470 | void tracing_reset(struct trace_array *tr, int cpu); | 470 | void tracing_reset(struct trace_array *tr, int cpu); |
471 | void tracing_reset_online_cpus(struct trace_array *tr); | 471 | void tracing_reset_online_cpus(struct trace_array *tr); |
472 | int tracing_open_generic(struct inode *inode, struct file *filp); | 472 | int tracing_open_generic(struct inode *inode, struct file *filp); |
473 | struct dentry *trace_create_file(const char *name, | ||
474 | mode_t mode, | ||
475 | struct dentry *parent, | ||
476 | void *data, | ||
477 | const struct file_operations *fops); | ||
478 | |||
473 | struct dentry *tracing_init_dentry(void); | 479 | struct dentry *tracing_init_dentry(void); |
474 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | 480 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
475 | 481 | ||
@@ -613,6 +619,8 @@ extern unsigned long trace_flags; | |||
613 | /* Standard output formatting function used for function return traces */ | 619 | /* Standard output formatting function used for function return traces */ |
614 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 620 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
615 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | 621 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); |
622 | extern enum print_line_t | ||
623 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | ||
616 | 624 | ||
617 | #ifdef CONFIG_DYNAMIC_FTRACE | 625 | #ifdef CONFIG_DYNAMIC_FTRACE |
618 | /* TODO: make this variable */ | 626 | /* TODO: make this variable */ |
@@ -644,7 +652,6 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
644 | return 1; | 652 | return 1; |
645 | } | 653 | } |
646 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 654 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
647 | |||
648 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | 655 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
649 | static inline enum print_line_t | 656 | static inline enum print_line_t |
650 | print_graph_function(struct trace_iterator *iter) | 657 | print_graph_function(struct trace_iterator *iter) |
@@ -692,6 +699,7 @@ enum trace_iterator_flags { | |||
692 | TRACE_ITER_LATENCY_FMT = 0x40000, | 699 | TRACE_ITER_LATENCY_FMT = 0x40000, |
693 | TRACE_ITER_GLOBAL_CLK = 0x80000, | 700 | TRACE_ITER_GLOBAL_CLK = 0x80000, |
694 | TRACE_ITER_SLEEP_TIME = 0x100000, | 701 | TRACE_ITER_SLEEP_TIME = 0x100000, |
702 | TRACE_ITER_GRAPH_TIME = 0x200000, | ||
695 | }; | 703 | }; |
696 | 704 | ||
697 | /* | 705 | /* |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 7a30fc4c3642..a29ef23ffb47 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <linux/kallsyms.h> | 11 | #include <linux/kallsyms.h> |
12 | #include <linux/time.h> | ||
12 | 13 | ||
13 | #include "trace.h" | 14 | #include "trace.h" |
14 | #include "trace_output.h" | 15 | #include "trace_output.h" |
@@ -67,7 +68,7 @@ initcall_call_print_line(struct trace_iterator *iter) | |||
67 | trace_assign_type(field, entry); | 68 | trace_assign_type(field, entry); |
68 | call = &field->boot_call; | 69 | call = &field->boot_call; |
69 | ts = iter->ts; | 70 | ts = iter->ts; |
70 | nsec_rem = do_div(ts, 1000000000); | 71 | nsec_rem = do_div(ts, NSEC_PER_SEC); |
71 | 72 | ||
72 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", | 73 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", |
73 | (unsigned long)ts, nsec_rem, call->func, call->caller); | 74 | (unsigned long)ts, nsec_rem, call->func, call->caller); |
@@ -92,7 +93,7 @@ initcall_ret_print_line(struct trace_iterator *iter) | |||
92 | trace_assign_type(field, entry); | 93 | trace_assign_type(field, entry); |
93 | init_ret = &field->boot_ret; | 94 | init_ret = &field->boot_ret; |
94 | ts = iter->ts; | 95 | ts = iter->ts; |
95 | nsec_rem = do_div(ts, 1000000000); | 96 | nsec_rem = do_div(ts, NSEC_PER_SEC); |
96 | 97 | ||
97 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " | 98 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " |
98 | "returned %d after %llu msecs\n", | 99 | "returned %d after %llu msecs\n", |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index ad8c22efff41..e6e32912ffb8 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -263,7 +263,7 @@ static int branch_stat_show(struct seq_file *m, void *v) | |||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static void *annotated_branch_stat_start(void) | 266 | static void *annotated_branch_stat_start(struct tracer_stat *trace) |
267 | { | 267 | { |
268 | return __start_annotated_branch_profile; | 268 | return __start_annotated_branch_profile; |
269 | } | 269 | } |
@@ -338,7 +338,7 @@ static int all_branch_stat_headers(struct seq_file *m) | |||
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
340 | 340 | ||
341 | static void *all_branch_stat_start(void) | 341 | static void *all_branch_stat_start(struct tracer_stat *trace) |
342 | { | 342 | { |
343 | return __start_branch_profile; | 343 | return __start_branch_profile; |
344 | } | 344 | } |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 22cba9970776..199de9c74229 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -28,4 +28,3 @@ void ftrace_profile_disable(int event_id) | |||
28 | return event->profile_disable(event); | 28 | return event->profile_disable(event); |
29 | } | 29 | } |
30 | } | 30 | } |
31 | |||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d28687e7b3a7..10f6ad7d85f6 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -78,13 +78,14 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
78 | current->ret_stack[index].ret = ret; | 78 | current->ret_stack[index].ret = ret; |
79 | current->ret_stack[index].func = func; | 79 | current->ret_stack[index].func = func; |
80 | current->ret_stack[index].calltime = calltime; | 80 | current->ret_stack[index].calltime = calltime; |
81 | current->ret_stack[index].subtime = 0; | ||
81 | *depth = index; | 82 | *depth = index; |
82 | 83 | ||
83 | return 0; | 84 | return 0; |
84 | } | 85 | } |
85 | 86 | ||
86 | /* Retrieve a function return address to the trace stack on thread info.*/ | 87 | /* Retrieve a function return address to the trace stack on thread info.*/ |
87 | void | 88 | static void |
88 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | 89 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
89 | { | 90 | { |
90 | int index; | 91 | int index; |
@@ -104,9 +105,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | |||
104 | trace->calltime = current->ret_stack[index].calltime; | 105 | trace->calltime = current->ret_stack[index].calltime; |
105 | trace->overrun = atomic_read(¤t->trace_overrun); | 106 | trace->overrun = atomic_read(¤t->trace_overrun); |
106 | trace->depth = index; | 107 | trace->depth = index; |
107 | barrier(); | ||
108 | current->curr_ret_stack--; | ||
109 | |||
110 | } | 108 | } |
111 | 109 | ||
112 | /* | 110 | /* |
@@ -121,6 +119,8 @@ unsigned long ftrace_return_to_handler(void) | |||
121 | ftrace_pop_return_trace(&trace, &ret); | 119 | ftrace_pop_return_trace(&trace, &ret); |
122 | trace.rettime = trace_clock_local(); | 120 | trace.rettime = trace_clock_local(); |
123 | ftrace_graph_return(&trace); | 121 | ftrace_graph_return(&trace); |
122 | barrier(); | ||
123 | current->curr_ret_stack--; | ||
124 | 124 | ||
125 | if (unlikely(!ret)) { | 125 | if (unlikely(!ret)) { |
126 | ftrace_graph_stop(); | 126 | ftrace_graph_stop(); |
@@ -426,8 +426,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
426 | return TRACE_TYPE_HANDLED; | 426 | return TRACE_TYPE_HANDLED; |
427 | } | 427 | } |
428 | 428 | ||
429 | static enum print_line_t | 429 | enum print_line_t |
430 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | 430 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
431 | { | 431 | { |
432 | unsigned long nsecs_rem = do_div(duration, 1000); | 432 | unsigned long nsecs_rem = do_div(duration, 1000); |
433 | /* log10(ULONG_MAX) + '\0' */ | 433 | /* log10(ULONG_MAX) + '\0' */ |
@@ -464,12 +464,23 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
464 | if (!ret) | 464 | if (!ret) |
465 | return TRACE_TYPE_PARTIAL_LINE; | 465 | return TRACE_TYPE_PARTIAL_LINE; |
466 | } | 466 | } |
467 | return TRACE_TYPE_HANDLED; | ||
468 | } | ||
469 | |||
470 | static enum print_line_t | ||
471 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | ||
472 | { | ||
473 | int ret; | ||
474 | |||
475 | ret = trace_print_graph_duration(duration, s); | ||
476 | if (ret != TRACE_TYPE_HANDLED) | ||
477 | return ret; | ||
467 | 478 | ||
468 | ret = trace_seq_printf(s, "| "); | 479 | ret = trace_seq_printf(s, "| "); |
469 | if (!ret) | 480 | if (!ret) |
470 | return TRACE_TYPE_PARTIAL_LINE; | 481 | return TRACE_TYPE_PARTIAL_LINE; |
471 | return TRACE_TYPE_HANDLED; | ||
472 | 482 | ||
483 | return TRACE_TYPE_HANDLED; | ||
473 | } | 484 | } |
474 | 485 | ||
475 | /* Case of a leaf function on its call entry */ | 486 | /* Case of a leaf function on its call entry */ |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 8e37fcddd8b4..d53b45ed0806 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/time.h> | ||
13 | |||
12 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
13 | 15 | ||
14 | #include "trace.h" | 16 | #include "trace.h" |
@@ -174,7 +176,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
174 | struct mmiotrace_rw *rw; | 176 | struct mmiotrace_rw *rw; |
175 | struct trace_seq *s = &iter->seq; | 177 | struct trace_seq *s = &iter->seq; |
176 | unsigned long long t = ns2usecs(iter->ts); | 178 | unsigned long long t = ns2usecs(iter->ts); |
177 | unsigned long usec_rem = do_div(t, 1000000ULL); | 179 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
178 | unsigned secs = (unsigned long)t; | 180 | unsigned secs = (unsigned long)t; |
179 | int ret = 1; | 181 | int ret = 1; |
180 | 182 | ||
@@ -221,7 +223,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
221 | struct mmiotrace_map *m; | 223 | struct mmiotrace_map *m; |
222 | struct trace_seq *s = &iter->seq; | 224 | struct trace_seq *s = &iter->seq; |
223 | unsigned long long t = ns2usecs(iter->ts); | 225 | unsigned long long t = ns2usecs(iter->ts); |
224 | unsigned long usec_rem = do_div(t, 1000000ULL); | 226 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
225 | unsigned secs = (unsigned long)t; | 227 | unsigned secs = (unsigned long)t; |
226 | int ret; | 228 | int ret; |
227 | 229 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 64b54a59c55b..0e70fb07ca78 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -19,6 +19,16 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | |||
19 | 19 | ||
20 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 20 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
21 | 21 | ||
22 | void trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
23 | { | ||
24 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
25 | |||
26 | s->buffer[len] = 0; | ||
27 | seq_puts(m, s->buffer); | ||
28 | |||
29 | trace_seq_init(s); | ||
30 | } | ||
31 | |||
22 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | 32 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) |
23 | { | 33 | { |
24 | struct trace_seq *s = &iter->seq; | 34 | struct trace_seq *s = &iter->seq; |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index e0bde39c2dd9..91630217fb46 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -20,6 +20,8 @@ trace_print_bprintk_msg_only(struct trace_iterator *iter); | |||
20 | extern enum print_line_t | 20 | extern enum print_line_t |
21 | trace_print_printk_msg_only(struct trace_iterator *iter); | 21 | trace_print_printk_msg_only(struct trace_iterator *iter); |
22 | 22 | ||
23 | extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); | ||
24 | |||
23 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 25 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
24 | __attribute__ ((format (printf, 2, 3))); | 26 | __attribute__ ((format (printf, 2, 3))); |
25 | extern int | 27 | extern int |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index eb81556107fe..9bece9687b62 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -245,17 +245,13 @@ static const struct file_operations ftrace_formats_fops = { | |||
245 | static __init int init_trace_printk_function_export(void) | 245 | static __init int init_trace_printk_function_export(void) |
246 | { | 246 | { |
247 | struct dentry *d_tracer; | 247 | struct dentry *d_tracer; |
248 | struct dentry *entry; | ||
249 | 248 | ||
250 | d_tracer = tracing_init_dentry(); | 249 | d_tracer = tracing_init_dentry(); |
251 | if (!d_tracer) | 250 | if (!d_tracer) |
252 | return 0; | 251 | return 0; |
253 | 252 | ||
254 | entry = debugfs_create_file("printk_formats", 0444, d_tracer, | 253 | trace_create_file("printk_formats", 0444, d_tracer, |
255 | NULL, &ftrace_formats_fops); | 254 | NULL, &ftrace_formats_fops); |
256 | if (!entry) | ||
257 | pr_warning("Could not create debugfs " | ||
258 | "'printk_formats' entry\n"); | ||
259 | 255 | ||
260 | return 0; | 256 | return 0; |
261 | } | 257 | } |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 9117cea6f1ae..9d8cccdfaa06 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -29,13 +29,13 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
29 | int cpu; | 29 | int cpu; |
30 | int pc; | 30 | int pc; |
31 | 31 | ||
32 | if (!sched_ref || sched_stopped) | 32 | if (unlikely(!sched_ref)) |
33 | return; | 33 | return; |
34 | 34 | ||
35 | tracing_record_cmdline(prev); | 35 | tracing_record_cmdline(prev); |
36 | tracing_record_cmdline(next); | 36 | tracing_record_cmdline(next); |
37 | 37 | ||
38 | if (!tracer_enabled) | 38 | if (!tracer_enabled || sched_stopped) |
39 | return; | 39 | return; |
40 | 40 | ||
41 | pc = preempt_count(); | 41 | pc = preempt_count(); |
@@ -56,15 +56,15 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) | |||
56 | unsigned long flags; | 56 | unsigned long flags; |
57 | int cpu, pc; | 57 | int cpu, pc; |
58 | 58 | ||
59 | if (!likely(tracer_enabled)) | 59 | if (unlikely(!sched_ref)) |
60 | return; | 60 | return; |
61 | 61 | ||
62 | pc = preempt_count(); | ||
63 | tracing_record_cmdline(current); | 62 | tracing_record_cmdline(current); |
64 | 63 | ||
65 | if (sched_stopped) | 64 | if (!tracer_enabled || sched_stopped) |
66 | return; | 65 | return; |
67 | 66 | ||
67 | pc = preempt_count(); | ||
68 | local_irq_save(flags); | 68 | local_irq_save(flags); |
69 | cpu = raw_smp_processor_id(); | 69 | cpu = raw_smp_processor_id(); |
70 | data = ctx_trace->data[cpu]; | 70 | data = ctx_trace->data[cpu]; |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index c750f65f9661..1796f00524e1 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -352,19 +352,14 @@ __setup("stacktrace", enable_stacktrace); | |||
352 | static __init int stack_trace_init(void) | 352 | static __init int stack_trace_init(void) |
353 | { | 353 | { |
354 | struct dentry *d_tracer; | 354 | struct dentry *d_tracer; |
355 | struct dentry *entry; | ||
356 | 355 | ||
357 | d_tracer = tracing_init_dentry(); | 356 | d_tracer = tracing_init_dentry(); |
358 | 357 | ||
359 | entry = debugfs_create_file("stack_max_size", 0644, d_tracer, | 358 | trace_create_file("stack_max_size", 0644, d_tracer, |
360 | &max_stack_size, &stack_max_size_fops); | 359 | &max_stack_size, &stack_max_size_fops); |
361 | if (!entry) | ||
362 | pr_warning("Could not create debugfs 'stack_max_size' entry\n"); | ||
363 | 360 | ||
364 | entry = debugfs_create_file("stack_trace", 0444, d_tracer, | 361 | trace_create_file("stack_trace", 0444, d_tracer, |
365 | NULL, &stack_trace_fops); | 362 | NULL, &stack_trace_fops); |
366 | if (!entry) | ||
367 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); | ||
368 | 363 | ||
369 | if (stack_tracer_enabled) | 364 | if (stack_tracer_enabled) |
370 | register_ftrace_function(&trace_ops); | 365 | register_ftrace_function(&trace_ops); |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index acdebd771a93..fdde3a4a94cd 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
@@ -85,7 +85,7 @@ static int stat_seq_init(struct tracer_stat_session *session) | |||
85 | if (!ts->stat_cmp) | 85 | if (!ts->stat_cmp) |
86 | ts->stat_cmp = dummy_cmp; | 86 | ts->stat_cmp = dummy_cmp; |
87 | 87 | ||
88 | stat = ts->stat_start(); | 88 | stat = ts->stat_start(ts); |
89 | if (!stat) | 89 | if (!stat) |
90 | goto exit; | 90 | goto exit; |
91 | 91 | ||
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h index 202274cf7f3d..f3546a2cd826 100644 --- a/kernel/trace/trace_stat.h +++ b/kernel/trace/trace_stat.h | |||
@@ -12,7 +12,7 @@ struct tracer_stat { | |||
12 | /* The name of your stat file */ | 12 | /* The name of your stat file */ |
13 | const char *name; | 13 | const char *name; |
14 | /* Iteration over statistic entries */ | 14 | /* Iteration over statistic entries */ |
15 | void *(*stat_start)(void); | 15 | void *(*stat_start)(struct tracer_stat *trace); |
16 | void *(*stat_next)(void *prev, int idx); | 16 | void *(*stat_next)(void *prev, int idx); |
17 | /* Compare two entries for stats sorting */ | 17 | /* Compare two entries for stats sorting */ |
18 | int (*stat_cmp)(void *p1, void *p2); | 18 | int (*stat_cmp)(void *p1, void *p2); |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 91fd19c2149f..e04b76cc238a 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -321,11 +321,7 @@ static const struct file_operations sysprof_sample_fops = { | |||
321 | 321 | ||
322 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer) | 322 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer) |
323 | { | 323 | { |
324 | struct dentry *entry; | ||
325 | 324 | ||
326 | entry = debugfs_create_file("sysprof_sample_period", 0644, | 325 | trace_create_file("sysprof_sample_period", 0644, |
327 | d_tracer, NULL, &sysprof_sample_fops); | 326 | d_tracer, NULL, &sysprof_sample_fops); |
328 | if (entry) | ||
329 | return; | ||
330 | pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n"); | ||
331 | } | 327 | } |
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 797201e4a137..984b9175c13d 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c | |||
@@ -152,7 +152,7 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) | |||
152 | return ret; | 152 | return ret; |
153 | } | 153 | } |
154 | 154 | ||
155 | static void *workqueue_stat_start(void) | 155 | static void *workqueue_stat_start(struct tracer_stat *trace) |
156 | { | 156 | { |
157 | int cpu; | 157 | int cpu; |
158 | void *ret = NULL; | 158 | void *ret = NULL; |