aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c808
1 files changed, 731 insertions, 77 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f1ed080406c3..3718d55fb4c3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -29,11 +29,13 @@
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/hash.h> 30#include <linux/hash.h>
31 31
32#include <trace/sched.h> 32#include <trace/events/sched.h>
33 33
34#include <asm/ftrace.h> 34#include <asm/ftrace.h>
35#include <asm/setup.h>
35 36
36#include "trace.h" 37#include "trace_output.h"
38#include "trace_stat.h"
37 39
38#define FTRACE_WARN_ON(cond) \ 40#define FTRACE_WARN_ON(cond) \
39 do { \ 41 do { \
@@ -68,7 +70,7 @@ static DEFINE_MUTEX(ftrace_lock);
68 70
69static struct ftrace_ops ftrace_list_end __read_mostly = 71static struct ftrace_ops ftrace_list_end __read_mostly =
70{ 72{
71 .func = ftrace_stub, 73 .func = ftrace_stub,
72}; 74};
73 75
74static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 76static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
@@ -240,6 +242,580 @@ static void ftrace_update_pid_func(void)
240#endif 242#endif
241} 243}
242 244
245#ifdef CONFIG_FUNCTION_PROFILER
246struct ftrace_profile {
247 struct hlist_node node;
248 unsigned long ip;
249 unsigned long counter;
250#ifdef CONFIG_FUNCTION_GRAPH_TRACER
251 unsigned long long time;
252#endif
253};
254
255struct ftrace_profile_page {
256 struct ftrace_profile_page *next;
257 unsigned long index;
258 struct ftrace_profile records[];
259};
260
261struct ftrace_profile_stat {
262 atomic_t disabled;
263 struct hlist_head *hash;
264 struct ftrace_profile_page *pages;
265 struct ftrace_profile_page *start;
266 struct tracer_stat stat;
267};
268
269#define PROFILE_RECORDS_SIZE \
270 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
271
272#define PROFILES_PER_PAGE \
273 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
274
275static int ftrace_profile_bits __read_mostly;
276static int ftrace_profile_enabled __read_mostly;
277
278/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
279static DEFINE_MUTEX(ftrace_profile_lock);
280
281static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
282
283#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
284
285static void *
286function_stat_next(void *v, int idx)
287{
288 struct ftrace_profile *rec = v;
289 struct ftrace_profile_page *pg;
290
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
292
293 again:
294 rec++;
295 if ((void *)rec >= (void *)&pg->records[pg->index]) {
296 pg = pg->next;
297 if (!pg)
298 return NULL;
299 rec = &pg->records[0];
300 if (!rec->counter)
301 goto again;
302 }
303
304 return rec;
305}
306
307static void *function_stat_start(struct tracer_stat *trace)
308{
309 struct ftrace_profile_stat *stat =
310 container_of(trace, struct ftrace_profile_stat, stat);
311
312 if (!stat || !stat->start)
313 return NULL;
314
315 return function_stat_next(&stat->start->records[0], 0);
316}
317
318#ifdef CONFIG_FUNCTION_GRAPH_TRACER
319/* function graph compares on total time */
320static int function_stat_cmp(void *p1, void *p2)
321{
322 struct ftrace_profile *a = p1;
323 struct ftrace_profile *b = p2;
324
325 if (a->time < b->time)
326 return -1;
327 if (a->time > b->time)
328 return 1;
329 else
330 return 0;
331}
332#else
333/* not function graph compares against hits */
334static int function_stat_cmp(void *p1, void *p2)
335{
336 struct ftrace_profile *a = p1;
337 struct ftrace_profile *b = p2;
338
339 if (a->counter < b->counter)
340 return -1;
341 if (a->counter > b->counter)
342 return 1;
343 else
344 return 0;
345}
346#endif
347
348static int function_stat_headers(struct seq_file *m)
349{
350#ifdef CONFIG_FUNCTION_GRAPH_TRACER
351 seq_printf(m, " Function "
352 "Hit Time Avg\n"
353 " -------- "
354 "--- ---- ---\n");
355#else
356 seq_printf(m, " Function Hit\n"
357 " -------- ---\n");
358#endif
359 return 0;
360}
361
362static int function_stat_show(struct seq_file *m, void *v)
363{
364 struct ftrace_profile *rec = v;
365 char str[KSYM_SYMBOL_LEN];
366#ifdef CONFIG_FUNCTION_GRAPH_TRACER
367 static DEFINE_MUTEX(mutex);
368 static struct trace_seq s;
369 unsigned long long avg;
370#endif
371
372 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
373 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
374
375#ifdef CONFIG_FUNCTION_GRAPH_TRACER
376 seq_printf(m, " ");
377 avg = rec->time;
378 do_div(avg, rec->counter);
379
380 mutex_lock(&mutex);
381 trace_seq_init(&s);
382 trace_print_graph_duration(rec->time, &s);
383 trace_seq_puts(&s, " ");
384 trace_print_graph_duration(avg, &s);
385 trace_print_seq(m, &s);
386 mutex_unlock(&mutex);
387#endif
388 seq_putc(m, '\n');
389
390 return 0;
391}
392
393static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
394{
395 struct ftrace_profile_page *pg;
396
397 pg = stat->pages = stat->start;
398
399 while (pg) {
400 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
401 pg->index = 0;
402 pg = pg->next;
403 }
404
405 memset(stat->hash, 0,
406 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
407}
408
409int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
410{
411 struct ftrace_profile_page *pg;
412 int functions;
413 int pages;
414 int i;
415
416 /* If we already allocated, do nothing */
417 if (stat->pages)
418 return 0;
419
420 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
421 if (!stat->pages)
422 return -ENOMEM;
423
424#ifdef CONFIG_DYNAMIC_FTRACE
425 functions = ftrace_update_tot_cnt;
426#else
427 /*
428 * We do not know the number of functions that exist because
429 * dynamic tracing is what counts them. With past experience
430 * we have around 20K functions. That should be more than enough.
431 * It is highly unlikely we will execute every function in
432 * the kernel.
433 */
434 functions = 20000;
435#endif
436
437 pg = stat->start = stat->pages;
438
439 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
440
441 for (i = 0; i < pages; i++) {
442 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
443 if (!pg->next)
444 goto out_free;
445 pg = pg->next;
446 }
447
448 return 0;
449
450 out_free:
451 pg = stat->start;
452 while (pg) {
453 unsigned long tmp = (unsigned long)pg;
454
455 pg = pg->next;
456 free_page(tmp);
457 }
458
459 free_page((unsigned long)stat->pages);
460 stat->pages = NULL;
461 stat->start = NULL;
462
463 return -ENOMEM;
464}
465
466static int ftrace_profile_init_cpu(int cpu)
467{
468 struct ftrace_profile_stat *stat;
469 int size;
470
471 stat = &per_cpu(ftrace_profile_stats, cpu);
472
473 if (stat->hash) {
474 /* If the profile is already created, simply reset it */
475 ftrace_profile_reset(stat);
476 return 0;
477 }
478
479 /*
480 * We are profiling all functions, but usually only a few thousand
481 * functions are hit. We'll make a hash of 1024 items.
482 */
483 size = FTRACE_PROFILE_HASH_SIZE;
484
485 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
486
487 if (!stat->hash)
488 return -ENOMEM;
489
490 if (!ftrace_profile_bits) {
491 size--;
492
493 for (; size; size >>= 1)
494 ftrace_profile_bits++;
495 }
496
497 /* Preallocate the function profiling pages */
498 if (ftrace_profile_pages_init(stat) < 0) {
499 kfree(stat->hash);
500 stat->hash = NULL;
501 return -ENOMEM;
502 }
503
504 return 0;
505}
506
507static int ftrace_profile_init(void)
508{
509 int cpu;
510 int ret = 0;
511
512 for_each_online_cpu(cpu) {
513 ret = ftrace_profile_init_cpu(cpu);
514 if (ret)
515 break;
516 }
517
518 return ret;
519}
520
521/* interrupts must be disabled */
522static struct ftrace_profile *
523ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
524{
525 struct ftrace_profile *rec;
526 struct hlist_head *hhd;
527 struct hlist_node *n;
528 unsigned long key;
529
530 key = hash_long(ip, ftrace_profile_bits);
531 hhd = &stat->hash[key];
532
533 if (hlist_empty(hhd))
534 return NULL;
535
536 hlist_for_each_entry_rcu(rec, n, hhd, node) {
537 if (rec->ip == ip)
538 return rec;
539 }
540
541 return NULL;
542}
543
544static void ftrace_add_profile(struct ftrace_profile_stat *stat,
545 struct ftrace_profile *rec)
546{
547 unsigned long key;
548
549 key = hash_long(rec->ip, ftrace_profile_bits);
550 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
551}
552
553/*
554 * The memory is already allocated, this simply finds a new record to use.
555 */
556static struct ftrace_profile *
557ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
558{
559 struct ftrace_profile *rec = NULL;
560
561 /* prevent recursion (from NMIs) */
562 if (atomic_inc_return(&stat->disabled) != 1)
563 goto out;
564
565 /*
566 * Try to find the function again since an NMI
567 * could have added it
568 */
569 rec = ftrace_find_profiled_func(stat, ip);
570 if (rec)
571 goto out;
572
573 if (stat->pages->index == PROFILES_PER_PAGE) {
574 if (!stat->pages->next)
575 goto out;
576 stat->pages = stat->pages->next;
577 }
578
579 rec = &stat->pages->records[stat->pages->index++];
580 rec->ip = ip;
581 ftrace_add_profile(stat, rec);
582
583 out:
584 atomic_dec(&stat->disabled);
585
586 return rec;
587}
588
589static void
590function_profile_call(unsigned long ip, unsigned long parent_ip)
591{
592 struct ftrace_profile_stat *stat;
593 struct ftrace_profile *rec;
594 unsigned long flags;
595
596 if (!ftrace_profile_enabled)
597 return;
598
599 local_irq_save(flags);
600
601 stat = &__get_cpu_var(ftrace_profile_stats);
602 if (!stat->hash || !ftrace_profile_enabled)
603 goto out;
604
605 rec = ftrace_find_profiled_func(stat, ip);
606 if (!rec) {
607 rec = ftrace_profile_alloc(stat, ip);
608 if (!rec)
609 goto out;
610 }
611
612 rec->counter++;
613 out:
614 local_irq_restore(flags);
615}
616
617#ifdef CONFIG_FUNCTION_GRAPH_TRACER
618static int profile_graph_entry(struct ftrace_graph_ent *trace)
619{
620 function_profile_call(trace->func, 0);
621 return 1;
622}
623
624static void profile_graph_return(struct ftrace_graph_ret *trace)
625{
626 struct ftrace_profile_stat *stat;
627 unsigned long long calltime;
628 struct ftrace_profile *rec;
629 unsigned long flags;
630
631 local_irq_save(flags);
632 stat = &__get_cpu_var(ftrace_profile_stats);
633 if (!stat->hash || !ftrace_profile_enabled)
634 goto out;
635
636 calltime = trace->rettime - trace->calltime;
637
638 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
639 int index;
640
641 index = trace->depth;
642
643 /* Append this call time to the parent time to subtract */
644 if (index)
645 current->ret_stack[index - 1].subtime += calltime;
646
647 if (current->ret_stack[index].subtime < calltime)
648 calltime -= current->ret_stack[index].subtime;
649 else
650 calltime = 0;
651 }
652
653 rec = ftrace_find_profiled_func(stat, trace->func);
654 if (rec)
655 rec->time += calltime;
656
657 out:
658 local_irq_restore(flags);
659}
660
661static int register_ftrace_profiler(void)
662{
663 return register_ftrace_graph(&profile_graph_return,
664 &profile_graph_entry);
665}
666
667static void unregister_ftrace_profiler(void)
668{
669 unregister_ftrace_graph();
670}
671#else
672static struct ftrace_ops ftrace_profile_ops __read_mostly =
673{
674 .func = function_profile_call,
675};
676
677static int register_ftrace_profiler(void)
678{
679 return register_ftrace_function(&ftrace_profile_ops);
680}
681
682static void unregister_ftrace_profiler(void)
683{
684 unregister_ftrace_function(&ftrace_profile_ops);
685}
686#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
687
688static ssize_t
689ftrace_profile_write(struct file *filp, const char __user *ubuf,
690 size_t cnt, loff_t *ppos)
691{
692 unsigned long val;
693 char buf[64]; /* big enough to hold a number */
694 int ret;
695
696 if (cnt >= sizeof(buf))
697 return -EINVAL;
698
699 if (copy_from_user(&buf, ubuf, cnt))
700 return -EFAULT;
701
702 buf[cnt] = 0;
703
704 ret = strict_strtoul(buf, 10, &val);
705 if (ret < 0)
706 return ret;
707
708 val = !!val;
709
710 mutex_lock(&ftrace_profile_lock);
711 if (ftrace_profile_enabled ^ val) {
712 if (val) {
713 ret = ftrace_profile_init();
714 if (ret < 0) {
715 cnt = ret;
716 goto out;
717 }
718
719 ret = register_ftrace_profiler();
720 if (ret < 0) {
721 cnt = ret;
722 goto out;
723 }
724 ftrace_profile_enabled = 1;
725 } else {
726 ftrace_profile_enabled = 0;
727 /*
728 * unregister_ftrace_profiler calls stop_machine
729 * so this acts like an synchronize_sched.
730 */
731 unregister_ftrace_profiler();
732 }
733 }
734 out:
735 mutex_unlock(&ftrace_profile_lock);
736
737 filp->f_pos += cnt;
738
739 return cnt;
740}
741
742static ssize_t
743ftrace_profile_read(struct file *filp, char __user *ubuf,
744 size_t cnt, loff_t *ppos)
745{
746 char buf[64]; /* big enough to hold a number */
747 int r;
748
749 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
750 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
751}
752
753static const struct file_operations ftrace_profile_fops = {
754 .open = tracing_open_generic,
755 .read = ftrace_profile_read,
756 .write = ftrace_profile_write,
757};
758
759/* used to initialize the real stat files */
760static struct tracer_stat function_stats __initdata = {
761 .name = "functions",
762 .stat_start = function_stat_start,
763 .stat_next = function_stat_next,
764 .stat_cmp = function_stat_cmp,
765 .stat_headers = function_stat_headers,
766 .stat_show = function_stat_show
767};
768
769static void ftrace_profile_debugfs(struct dentry *d_tracer)
770{
771 struct ftrace_profile_stat *stat;
772 struct dentry *entry;
773 char *name;
774 int ret;
775 int cpu;
776
777 for_each_possible_cpu(cpu) {
778 stat = &per_cpu(ftrace_profile_stats, cpu);
779
780 /* allocate enough for function name + cpu number */
781 name = kmalloc(32, GFP_KERNEL);
782 if (!name) {
783 /*
784 * The files created are permanent, if something happens
785 * we still do not free memory.
786 */
787 kfree(stat);
788 WARN(1,
789 "Could not allocate stat file for cpu %d\n",
790 cpu);
791 return;
792 }
793 stat->stat = function_stats;
794 snprintf(name, 32, "function%d", cpu);
795 stat->stat.name = name;
796 ret = register_stat_tracer(&stat->stat);
797 if (ret) {
798 WARN(1,
799 "Could not register function stat for cpu %d\n",
800 cpu);
801 kfree(name);
802 return;
803 }
804 }
805
806 entry = debugfs_create_file("function_profile_enabled", 0644,
807 d_tracer, NULL, &ftrace_profile_fops);
808 if (!entry)
809 pr_warning("Could not create debugfs "
810 "'function_profile_enabled' entry\n");
811}
812
813#else /* CONFIG_FUNCTION_PROFILER */
814static void ftrace_profile_debugfs(struct dentry *d_tracer)
815{
816}
817#endif /* CONFIG_FUNCTION_PROFILER */
818
243/* set when tracing only a pid */ 819/* set when tracing only a pid */
244struct pid *ftrace_pid_trace; 820struct pid *ftrace_pid_trace;
245static struct pid * const ftrace_swapper_pid = &init_struct_pid; 821static struct pid * const ftrace_swapper_pid = &init_struct_pid;
@@ -261,7 +837,6 @@ struct ftrace_func_probe {
261 struct rcu_head rcu; 837 struct rcu_head rcu;
262}; 838};
263 839
264
265enum { 840enum {
266 FTRACE_ENABLE_CALLS = (1 << 0), 841 FTRACE_ENABLE_CALLS = (1 << 0),
267 FTRACE_DISABLE_CALLS = (1 << 1), 842 FTRACE_DISABLE_CALLS = (1 << 1),
@@ -346,30 +921,6 @@ static void ftrace_free_rec(struct dyn_ftrace *rec)
346 rec->flags |= FTRACE_FL_FREE; 921 rec->flags |= FTRACE_FL_FREE;
347} 922}
348 923
349void ftrace_release(void *start, unsigned long size)
350{
351 struct dyn_ftrace *rec;
352 struct ftrace_page *pg;
353 unsigned long s = (unsigned long)start;
354 unsigned long e = s + size;
355
356 if (ftrace_disabled || !start)
357 return;
358
359 mutex_lock(&ftrace_lock);
360 do_for_each_ftrace_rec(pg, rec) {
361 if ((rec->ip >= s) && (rec->ip < e)) {
362 /*
363 * rec->ip is changed in ftrace_free_rec()
364 * It should not between s and e if record was freed.
365 */
366 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
367 ftrace_free_rec(rec);
368 }
369 } while_for_each_ftrace_rec();
370 mutex_unlock(&ftrace_lock);
371}
372
373static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) 924static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
374{ 925{
375 struct dyn_ftrace *rec; 926 struct dyn_ftrace *rec;
@@ -673,6 +1224,13 @@ static void ftrace_shutdown(int command)
673 return; 1224 return;
674 1225
675 ftrace_start_up--; 1226 ftrace_start_up--;
1227 /*
1228 * Just warn in case of unbalance, no need to kill ftrace, it's not
1229 * critical but the ftrace_call callers may be never nopped again after
1230 * further ftrace uses.
1231 */
1232 WARN_ON_ONCE(ftrace_start_up < 0);
1233
676 if (!ftrace_start_up) 1234 if (!ftrace_start_up)
677 command |= FTRACE_DISABLE_CALLS; 1235 command |= FTRACE_DISABLE_CALLS;
678 1236
@@ -1408,7 +1966,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1408 1966
1409static struct ftrace_ops trace_probe_ops __read_mostly = 1967static struct ftrace_ops trace_probe_ops __read_mostly =
1410{ 1968{
1411 .func = function_trace_probe_call, 1969 .func = function_trace_probe_call,
1412}; 1970};
1413 1971
1414static int ftrace_probe_registered; 1972static int ftrace_probe_registered;
@@ -1823,6 +2381,45 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1823 ftrace_set_regex(buf, len, reset, 0); 2381 ftrace_set_regex(buf, len, reset, 0);
1824} 2382}
1825 2383
2384/*
2385 * command line interface to allow users to set filters on boot up.
2386 */
2387#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2388static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2389static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2390
2391static int __init set_ftrace_notrace(char *str)
2392{
2393 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2394 return 1;
2395}
2396__setup("ftrace_notrace=", set_ftrace_notrace);
2397
2398static int __init set_ftrace_filter(char *str)
2399{
2400 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2401 return 1;
2402}
2403__setup("ftrace_filter=", set_ftrace_filter);
2404
2405static void __init set_ftrace_early_filter(char *buf, int enable)
2406{
2407 char *func;
2408
2409 while (buf) {
2410 func = strsep(&buf, ",");
2411 ftrace_set_regex(func, strlen(func), 0, enable);
2412 }
2413}
2414
2415static void __init set_ftrace_early_filters(void)
2416{
2417 if (ftrace_filter_buf[0])
2418 set_ftrace_early_filter(ftrace_filter_buf, 1);
2419 if (ftrace_notrace_buf[0])
2420 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2421}
2422
1826static int 2423static int
1827ftrace_regex_release(struct inode *inode, struct file *file, int enable) 2424ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1828{ 2425{
@@ -2128,38 +2725,23 @@ static const struct file_operations ftrace_graph_fops = {
2128 2725
2129static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) 2726static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2130{ 2727{
2131 struct dentry *entry;
2132 2728
2133 entry = debugfs_create_file("available_filter_functions", 0444, 2729 trace_create_file("available_filter_functions", 0444,
2134 d_tracer, NULL, &ftrace_avail_fops); 2730 d_tracer, NULL, &ftrace_avail_fops);
2135 if (!entry)
2136 pr_warning("Could not create debugfs "
2137 "'available_filter_functions' entry\n");
2138 2731
2139 entry = debugfs_create_file("failures", 0444, 2732 trace_create_file("failures", 0444,
2140 d_tracer, NULL, &ftrace_failures_fops); 2733 d_tracer, NULL, &ftrace_failures_fops);
2141 if (!entry)
2142 pr_warning("Could not create debugfs 'failures' entry\n");
2143 2734
2144 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer, 2735 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2145 NULL, &ftrace_filter_fops); 2736 NULL, &ftrace_filter_fops);
2146 if (!entry)
2147 pr_warning("Could not create debugfs "
2148 "'set_ftrace_filter' entry\n");
2149 2737
2150 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer, 2738 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2151 NULL, &ftrace_notrace_fops); 2739 NULL, &ftrace_notrace_fops);
2152 if (!entry)
2153 pr_warning("Could not create debugfs "
2154 "'set_ftrace_notrace' entry\n");
2155 2740
2156#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2741#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2157 entry = debugfs_create_file("set_graph_function", 0444, d_tracer, 2742 trace_create_file("set_graph_function", 0444, d_tracer,
2158 NULL, 2743 NULL,
2159 &ftrace_graph_fops); 2744 &ftrace_graph_fops);
2160 if (!entry)
2161 pr_warning("Could not create debugfs "
2162 "'set_graph_function' entry\n");
2163#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2745#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2164 2746
2165 return 0; 2747 return 0;
@@ -2197,14 +2779,72 @@ static int ftrace_convert_nops(struct module *mod,
2197 return 0; 2779 return 0;
2198} 2780}
2199 2781
2200void ftrace_init_module(struct module *mod, 2782#ifdef CONFIG_MODULES
2201 unsigned long *start, unsigned long *end) 2783void ftrace_release(void *start, void *end)
2784{
2785 struct dyn_ftrace *rec;
2786 struct ftrace_page *pg;
2787 unsigned long s = (unsigned long)start;
2788 unsigned long e = (unsigned long)end;
2789
2790 if (ftrace_disabled || !start || start == end)
2791 return;
2792
2793 mutex_lock(&ftrace_lock);
2794 do_for_each_ftrace_rec(pg, rec) {
2795 if ((rec->ip >= s) && (rec->ip < e)) {
2796 /*
2797 * rec->ip is changed in ftrace_free_rec()
2798 * It should not between s and e if record was freed.
2799 */
2800 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2801 ftrace_free_rec(rec);
2802 }
2803 } while_for_each_ftrace_rec();
2804 mutex_unlock(&ftrace_lock);
2805}
2806
2807static void ftrace_init_module(struct module *mod,
2808 unsigned long *start, unsigned long *end)
2202{ 2809{
2203 if (ftrace_disabled || start == end) 2810 if (ftrace_disabled || start == end)
2204 return; 2811 return;
2205 ftrace_convert_nops(mod, start, end); 2812 ftrace_convert_nops(mod, start, end);
2206} 2813}
2207 2814
2815static int ftrace_module_notify(struct notifier_block *self,
2816 unsigned long val, void *data)
2817{
2818 struct module *mod = data;
2819
2820 switch (val) {
2821 case MODULE_STATE_COMING:
2822 ftrace_init_module(mod, mod->ftrace_callsites,
2823 mod->ftrace_callsites +
2824 mod->num_ftrace_callsites);
2825 break;
2826 case MODULE_STATE_GOING:
2827 ftrace_release(mod->ftrace_callsites,
2828 mod->ftrace_callsites +
2829 mod->num_ftrace_callsites);
2830 break;
2831 }
2832
2833 return 0;
2834}
2835#else
2836static int ftrace_module_notify(struct notifier_block *self,
2837 unsigned long val, void *data)
2838{
2839 return 0;
2840}
2841#endif /* CONFIG_MODULES */
2842
2843struct notifier_block ftrace_module_nb = {
2844 .notifier_call = ftrace_module_notify,
2845 .priority = 0,
2846};
2847
2208extern unsigned long __start_mcount_loc[]; 2848extern unsigned long __start_mcount_loc[];
2209extern unsigned long __stop_mcount_loc[]; 2849extern unsigned long __stop_mcount_loc[];
2210 2850
@@ -2236,6 +2876,12 @@ void __init ftrace_init(void)
2236 __start_mcount_loc, 2876 __start_mcount_loc,
2237 __stop_mcount_loc); 2877 __stop_mcount_loc);
2238 2878
2879 ret = register_module_notifier(&ftrace_module_nb);
2880 if (ret)
2881 pr_warning("Failed to register trace ftrace module notifier\n");
2882
2883 set_ftrace_early_filters();
2884
2239 return; 2885 return;
2240 failed: 2886 failed:
2241 ftrace_disabled = 1; 2887 ftrace_disabled = 1;
@@ -2417,7 +3063,6 @@ static const struct file_operations ftrace_pid_fops = {
2417static __init int ftrace_init_debugfs(void) 3063static __init int ftrace_init_debugfs(void)
2418{ 3064{
2419 struct dentry *d_tracer; 3065 struct dentry *d_tracer;
2420 struct dentry *entry;
2421 3066
2422 d_tracer = tracing_init_dentry(); 3067 d_tracer = tracing_init_dentry();
2423 if (!d_tracer) 3068 if (!d_tracer)
@@ -2425,11 +3070,11 @@ static __init int ftrace_init_debugfs(void)
2425 3070
2426 ftrace_init_dyn_debugfs(d_tracer); 3071 ftrace_init_dyn_debugfs(d_tracer);
2427 3072
2428 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, 3073 trace_create_file("set_ftrace_pid", 0644, d_tracer,
2429 NULL, &ftrace_pid_fops); 3074 NULL, &ftrace_pid_fops);
2430 if (!entry) 3075
2431 pr_warning("Could not create debugfs " 3076 ftrace_profile_debugfs(d_tracer);
2432 "'set_ftrace_pid' entry\n"); 3077
2433 return 0; 3078 return 0;
2434} 3079}
2435fs_initcall(ftrace_init_debugfs); 3080fs_initcall(ftrace_init_debugfs);
@@ -2538,7 +3183,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
2538 3183
2539#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3184#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2540 3185
2541static atomic_t ftrace_graph_active; 3186static int ftrace_graph_active;
2542static struct notifier_block ftrace_suspend_notifier; 3187static struct notifier_block ftrace_suspend_notifier;
2543 3188
2544int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 3189int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -2580,12 +3225,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2580 } 3225 }
2581 3226
2582 if (t->ret_stack == NULL) { 3227 if (t->ret_stack == NULL) {
2583 t->curr_ret_stack = -1;
2584 /* Make sure IRQs see the -1 first: */
2585 barrier();
2586 t->ret_stack = ret_stack_list[start++];
2587 atomic_set(&t->tracing_graph_pause, 0); 3228 atomic_set(&t->tracing_graph_pause, 0);
2588 atomic_set(&t->trace_overrun, 0); 3229 atomic_set(&t->trace_overrun, 0);
3230 t->curr_ret_stack = -1;
3231 /* Make sure the tasks see the -1 first: */
3232 smp_wmb();
3233 t->ret_stack = ret_stack_list[start++];
2589 } 3234 }
2590 } while_each_thread(g, t); 3235 } while_each_thread(g, t);
2591 3236
@@ -2643,8 +3288,10 @@ static int start_graph_tracing(void)
2643 return -ENOMEM; 3288 return -ENOMEM;
2644 3289
2645 /* The cpu_boot init_task->ret_stack will never be freed */ 3290 /* The cpu_boot init_task->ret_stack will never be freed */
2646 for_each_online_cpu(cpu) 3291 for_each_online_cpu(cpu) {
2647 ftrace_graph_init_task(idle_task(cpu)); 3292 if (!idle_task(cpu)->ret_stack)
3293 ftrace_graph_init_task(idle_task(cpu));
3294 }
2648 3295
2649 do { 3296 do {
2650 ret = alloc_retstack_tasklist(ret_stack_list); 3297 ret = alloc_retstack_tasklist(ret_stack_list);
@@ -2690,7 +3337,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2690 mutex_lock(&ftrace_lock); 3337 mutex_lock(&ftrace_lock);
2691 3338
2692 /* we currently allow only one tracer registered at a time */ 3339 /* we currently allow only one tracer registered at a time */
2693 if (atomic_read(&ftrace_graph_active)) { 3340 if (ftrace_graph_active) {
2694 ret = -EBUSY; 3341 ret = -EBUSY;
2695 goto out; 3342 goto out;
2696 } 3343 }
@@ -2698,10 +3345,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2698 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; 3345 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2699 register_pm_notifier(&ftrace_suspend_notifier); 3346 register_pm_notifier(&ftrace_suspend_notifier);
2700 3347
2701 atomic_inc(&ftrace_graph_active); 3348 ftrace_graph_active++;
2702 ret = start_graph_tracing(); 3349 ret = start_graph_tracing();
2703 if (ret) { 3350 if (ret) {
2704 atomic_dec(&ftrace_graph_active); 3351 ftrace_graph_active--;
2705 goto out; 3352 goto out;
2706 } 3353 }
2707 3354
@@ -2719,10 +3366,10 @@ void unregister_ftrace_graph(void)
2719{ 3366{
2720 mutex_lock(&ftrace_lock); 3367 mutex_lock(&ftrace_lock);
2721 3368
2722 if (!unlikely(atomic_read(&ftrace_graph_active))) 3369 if (unlikely(!ftrace_graph_active))
2723 goto out; 3370 goto out;
2724 3371
2725 atomic_dec(&ftrace_graph_active); 3372 ftrace_graph_active--;
2726 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); 3373 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
2727 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 3374 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2728 ftrace_graph_entry = ftrace_graph_entry_stub; 3375 ftrace_graph_entry = ftrace_graph_entry_stub;
@@ -2736,18 +3383,25 @@ void unregister_ftrace_graph(void)
2736/* Allocate a return stack for newly created task */ 3383/* Allocate a return stack for newly created task */
2737void ftrace_graph_init_task(struct task_struct *t) 3384void ftrace_graph_init_task(struct task_struct *t)
2738{ 3385{
2739 if (atomic_read(&ftrace_graph_active)) { 3386 /* Make sure we do not use the parent ret_stack */
2740 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 3387 t->ret_stack = NULL;
3388
3389 if (ftrace_graph_active) {
3390 struct ftrace_ret_stack *ret_stack;
3391
3392 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2741 * sizeof(struct ftrace_ret_stack), 3393 * sizeof(struct ftrace_ret_stack),
2742 GFP_KERNEL); 3394 GFP_KERNEL);
2743 if (!t->ret_stack) 3395 if (!ret_stack)
2744 return; 3396 return;
2745 t->curr_ret_stack = -1; 3397 t->curr_ret_stack = -1;
2746 atomic_set(&t->tracing_graph_pause, 0); 3398 atomic_set(&t->tracing_graph_pause, 0);
2747 atomic_set(&t->trace_overrun, 0); 3399 atomic_set(&t->trace_overrun, 0);
2748 t->ftrace_timestamp = 0; 3400 t->ftrace_timestamp = 0;
2749 } else 3401 /* make curr_ret_stack visable before we add the ret_stack */
2750 t->ret_stack = NULL; 3402 smp_wmb();
3403 t->ret_stack = ret_stack;
3404 }
2751} 3405}
2752 3406
2753void ftrace_graph_exit_task(struct task_struct *t) 3407void ftrace_graph_exit_task(struct task_struct *t)