aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c584
1 files changed, 453 insertions, 131 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 78db083390f0..08b536a2614e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,6 +47,12 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* ftrace_pid_trace >= 0 will only trace threads with this pid */
51static int ftrace_pid_trace = -1;
52
53/* Quick disabling of function tracer. */
54int function_trace_stop;
55
50/* 56/*
51 * ftrace_disabled is set when an anomaly is discovered. 57 * ftrace_disabled is set when an anomaly is discovered.
52 * ftrace_disabled is much stronger than ftrace_enabled. 58 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -55,6 +61,7 @@ static int ftrace_disabled __read_mostly;
55 61
56static DEFINE_SPINLOCK(ftrace_lock); 62static DEFINE_SPINLOCK(ftrace_lock);
57static DEFINE_MUTEX(ftrace_sysctl_lock); 63static DEFINE_MUTEX(ftrace_sysctl_lock);
64static DEFINE_MUTEX(ftrace_start_lock);
58 65
59static struct ftrace_ops ftrace_list_end __read_mostly = 66static struct ftrace_ops ftrace_list_end __read_mostly =
60{ 67{
@@ -63,6 +70,8 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
63 70
64static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 71static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
65ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 72ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
73ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
74ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
66 75
67static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 76static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
68{ 77{
@@ -79,6 +88,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 }; 88 };
80} 89}
81 90
91static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
92{
93 if (current->pid != ftrace_pid_trace)
94 return;
95
96 ftrace_pid_function(ip, parent_ip);
97}
98
99static void set_ftrace_pid_function(ftrace_func_t func)
100{
101 /* do not set ftrace_pid_function to itself! */
102 if (func != ftrace_pid_func)
103 ftrace_pid_function = func;
104}
105
82/** 106/**
83 * clear_ftrace_function - reset the ftrace function 107 * clear_ftrace_function - reset the ftrace function
84 * 108 *
@@ -88,7 +112,23 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88void clear_ftrace_function(void) 112void clear_ftrace_function(void)
89{ 113{
90 ftrace_trace_function = ftrace_stub; 114 ftrace_trace_function = ftrace_stub;
115 __ftrace_trace_function = ftrace_stub;
116 ftrace_pid_function = ftrace_stub;
117}
118
119#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
120/*
121 * For those archs that do not test ftrace_trace_stop in their
122 * mcount call site, we need to do it from C.
123 */
124static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
125{
126 if (function_trace_stop)
127 return;
128
129 __ftrace_trace_function(ip, parent_ip);
91} 130}
131#endif
92 132
93static int __register_ftrace_function(struct ftrace_ops *ops) 133static int __register_ftrace_function(struct ftrace_ops *ops)
94{ 134{
@@ -106,14 +146,28 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
106 ftrace_list = ops; 146 ftrace_list = ops;
107 147
108 if (ftrace_enabled) { 148 if (ftrace_enabled) {
149 ftrace_func_t func;
150
151 if (ops->next == &ftrace_list_end)
152 func = ops->func;
153 else
154 func = ftrace_list_func;
155
156 if (ftrace_pid_trace >= 0) {
157 set_ftrace_pid_function(func);
158 func = ftrace_pid_func;
159 }
160
109 /* 161 /*
110 * For one func, simply call it directly. 162 * For one func, simply call it directly.
111 * For more than one func, call the chain. 163 * For more than one func, call the chain.
112 */ 164 */
113 if (ops->next == &ftrace_list_end) 165#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
114 ftrace_trace_function = ops->func; 166 ftrace_trace_function = func;
115 else 167#else
116 ftrace_trace_function = ftrace_list_func; 168 __ftrace_trace_function = func;
169 ftrace_trace_function = ftrace_test_stop_func;
170#endif
117 } 171 }
118 172
119 spin_unlock(&ftrace_lock); 173 spin_unlock(&ftrace_lock);
@@ -152,9 +206,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
152 206
153 if (ftrace_enabled) { 207 if (ftrace_enabled) {
154 /* If we only have one func left, then call that directly */ 208 /* If we only have one func left, then call that directly */
155 if (ftrace_list == &ftrace_list_end || 209 if (ftrace_list->next == &ftrace_list_end) {
156 ftrace_list->next == &ftrace_list_end) 210 ftrace_func_t func = ftrace_list->func;
157 ftrace_trace_function = ftrace_list->func; 211
212 if (ftrace_pid_trace >= 0) {
213 set_ftrace_pid_function(func);
214 func = ftrace_pid_func;
215 }
216#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217 ftrace_trace_function = func;
218#else
219 __ftrace_trace_function = func;
220#endif
221 }
158 } 222 }
159 223
160 out: 224 out:
@@ -163,6 +227,38 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
163 return ret; 227 return ret;
164} 228}
165 229
230static void ftrace_update_pid_func(void)
231{
232 ftrace_func_t func;
233
234 /* should not be called from interrupt context */
235 spin_lock(&ftrace_lock);
236
237 if (ftrace_trace_function == ftrace_stub)
238 goto out;
239
240 func = ftrace_trace_function;
241
242 if (ftrace_pid_trace >= 0) {
243 set_ftrace_pid_function(func);
244 func = ftrace_pid_func;
245 } else {
246 if (func != ftrace_pid_func)
247 goto out;
248
249 set_ftrace_pid_function(func);
250 }
251
252#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
254#else
255 __ftrace_trace_function = func;
256#endif
257
258 out:
259 spin_unlock(&ftrace_lock);
260}
261
166#ifdef CONFIG_DYNAMIC_FTRACE 262#ifdef CONFIG_DYNAMIC_FTRACE
167#ifndef CONFIG_FTRACE_MCOUNT_RECORD 263#ifndef CONFIG_FTRACE_MCOUNT_RECORD
168# error Dynamic ftrace depends on MCOUNT_RECORD 264# error Dynamic ftrace depends on MCOUNT_RECORD
@@ -182,6 +278,8 @@ enum {
182 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 278 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
183 FTRACE_ENABLE_MCOUNT = (1 << 3), 279 FTRACE_ENABLE_MCOUNT = (1 << 3),
184 FTRACE_DISABLE_MCOUNT = (1 << 4), 280 FTRACE_DISABLE_MCOUNT = (1 << 4),
281 FTRACE_START_FUNC_RET = (1 << 5),
282 FTRACE_STOP_FUNC_RET = (1 << 6),
185}; 283};
186 284
187static int ftrace_filtered; 285static int ftrace_filtered;
@@ -308,7 +406,7 @@ ftrace_record_ip(unsigned long ip)
308{ 406{
309 struct dyn_ftrace *rec; 407 struct dyn_ftrace *rec;
310 408
311 if (!ftrace_enabled || ftrace_disabled) 409 if (ftrace_disabled)
312 return NULL; 410 return NULL;
313 411
314 rec = ftrace_alloc_dyn_node(ip); 412 rec = ftrace_alloc_dyn_node(ip);
@@ -322,14 +420,51 @@ ftrace_record_ip(unsigned long ip)
322 return rec; 420 return rec;
323} 421}
324 422
325#define FTRACE_ADDR ((long)(ftrace_caller)) 423static void print_ip_ins(const char *fmt, unsigned char *p)
424{
425 int i;
426
427 printk(KERN_CONT "%s", fmt);
428
429 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
430 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
431}
432
433static void ftrace_bug(int failed, unsigned long ip)
434{
435 switch (failed) {
436 case -EFAULT:
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace faulted on modifying ");
439 print_ip_sym(ip);
440 break;
441 case -EINVAL:
442 FTRACE_WARN_ON_ONCE(1);
443 pr_info("ftrace failed to modify ");
444 print_ip_sym(ip);
445 print_ip_ins(" actual: ", (unsigned char *)ip);
446 printk(KERN_CONT "\n");
447 break;
448 case -EPERM:
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on writing ");
451 print_ip_sym(ip);
452 break;
453 default:
454 FTRACE_WARN_ON_ONCE(1);
455 pr_info("ftrace faulted on unknown error ");
456 print_ip_sym(ip);
457 }
458}
459
326 460
327static int 461static int
328__ftrace_replace_code(struct dyn_ftrace *rec, 462__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
329 unsigned char *nop, int enable)
330{ 463{
331 unsigned long ip, fl; 464 unsigned long ip, fl;
332 unsigned char *call, *old, *new; 465 unsigned long ftrace_addr;
466
467 ftrace_addr = (unsigned long)ftrace_caller;
333 468
334 ip = rec->ip; 469 ip = rec->ip;
335 470
@@ -388,34 +523,28 @@ __ftrace_replace_code(struct dyn_ftrace *rec,
388 } 523 }
389 } 524 }
390 525
391 call = ftrace_call_replace(ip, FTRACE_ADDR); 526 if (rec->flags & FTRACE_FL_ENABLED)
392 527 return ftrace_make_call(rec, ftrace_addr);
393 if (rec->flags & FTRACE_FL_ENABLED) { 528 else
394 old = nop; 529 return ftrace_make_nop(NULL, rec, ftrace_addr);
395 new = call;
396 } else {
397 old = call;
398 new = nop;
399 }
400
401 return ftrace_modify_code(ip, old, new);
402} 530}
403 531
404static void ftrace_replace_code(int enable) 532static void ftrace_replace_code(int enable)
405{ 533{
406 int i, failed; 534 int i, failed;
407 unsigned char *nop = NULL;
408 struct dyn_ftrace *rec; 535 struct dyn_ftrace *rec;
409 struct ftrace_page *pg; 536 struct ftrace_page *pg;
410 537
411 nop = ftrace_nop_replace();
412
413 for (pg = ftrace_pages_start; pg; pg = pg->next) { 538 for (pg = ftrace_pages_start; pg; pg = pg->next) {
414 for (i = 0; i < pg->index; i++) { 539 for (i = 0; i < pg->index; i++) {
415 rec = &pg->records[i]; 540 rec = &pg->records[i];
416 541
417 /* don't modify code that has already faulted */ 542 /*
418 if (rec->flags & FTRACE_FL_FAILED) 543 * Skip over free records and records that have
544 * failed.
545 */
546 if (rec->flags & FTRACE_FL_FREE ||
547 rec->flags & FTRACE_FL_FAILED)
419 continue; 548 continue;
420 549
421 /* ignore updates to this record's mcount site */ 550 /* ignore updates to this record's mcount site */
@@ -426,68 +555,30 @@ static void ftrace_replace_code(int enable)
426 unfreeze_record(rec); 555 unfreeze_record(rec);
427 } 556 }
428 557
429 failed = __ftrace_replace_code(rec, nop, enable); 558 failed = __ftrace_replace_code(rec, enable);
430 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 559 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
431 rec->flags |= FTRACE_FL_FAILED; 560 rec->flags |= FTRACE_FL_FAILED;
432 if ((system_state == SYSTEM_BOOTING) || 561 if ((system_state == SYSTEM_BOOTING) ||
433 !core_kernel_text(rec->ip)) { 562 !core_kernel_text(rec->ip)) {
434 ftrace_free_rec(rec); 563 ftrace_free_rec(rec);
435 } 564 } else
565 ftrace_bug(failed, rec->ip);
436 } 566 }
437 } 567 }
438 } 568 }
439} 569}
440 570
441static void print_ip_ins(const char *fmt, unsigned char *p)
442{
443 int i;
444
445 printk(KERN_CONT "%s", fmt);
446
447 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
448 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
449}
450
451static int 571static int
452ftrace_code_disable(struct dyn_ftrace *rec) 572ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
453{ 573{
454 unsigned long ip; 574 unsigned long ip;
455 unsigned char *nop, *call;
456 int ret; 575 int ret;
457 576
458 ip = rec->ip; 577 ip = rec->ip;
459 578
460 nop = ftrace_nop_replace(); 579 ret = ftrace_make_nop(mod, rec, mcount_addr);
461 call = ftrace_call_replace(ip, mcount_addr);
462
463 ret = ftrace_modify_code(ip, call, nop);
464 if (ret) { 580 if (ret) {
465 switch (ret) { 581 ftrace_bug(ret, ip);
466 case -EFAULT:
467 FTRACE_WARN_ON_ONCE(1);
468 pr_info("ftrace faulted on modifying ");
469 print_ip_sym(ip);
470 break;
471 case -EINVAL:
472 FTRACE_WARN_ON_ONCE(1);
473 pr_info("ftrace failed to modify ");
474 print_ip_sym(ip);
475 print_ip_ins(" expected: ", call);
476 print_ip_ins(" actual: ", (unsigned char *)ip);
477 print_ip_ins(" replace: ", nop);
478 printk(KERN_CONT "\n");
479 break;
480 case -EPERM:
481 FTRACE_WARN_ON_ONCE(1);
482 pr_info("ftrace faulted on writing ");
483 print_ip_sym(ip);
484 break;
485 default:
486 FTRACE_WARN_ON_ONCE(1);
487 pr_info("ftrace faulted on unknown error ");
488 print_ip_sym(ip);
489 }
490
491 rec->flags |= FTRACE_FL_FAILED; 582 rec->flags |= FTRACE_FL_FAILED;
492 return 0; 583 return 0;
493 } 584 }
@@ -506,6 +597,11 @@ static int __ftrace_modify_code(void *data)
506 if (*command & FTRACE_UPDATE_TRACE_FUNC) 597 if (*command & FTRACE_UPDATE_TRACE_FUNC)
507 ftrace_update_ftrace_func(ftrace_trace_function); 598 ftrace_update_ftrace_func(ftrace_trace_function);
508 599
600 if (*command & FTRACE_START_FUNC_RET)
601 ftrace_enable_ftrace_graph_caller();
602 else if (*command & FTRACE_STOP_FUNC_RET)
603 ftrace_disable_ftrace_graph_caller();
604
509 return 0; 605 return 0;
510} 606}
511 607
@@ -515,43 +611,43 @@ static void ftrace_run_update_code(int command)
515} 611}
516 612
517static ftrace_func_t saved_ftrace_func; 613static ftrace_func_t saved_ftrace_func;
518static int ftrace_start; 614static int ftrace_start_up;
519static DEFINE_MUTEX(ftrace_start_lock);
520 615
521static void ftrace_startup(void) 616static void ftrace_startup_enable(int command)
522{ 617{
523 int command = 0;
524
525 if (unlikely(ftrace_disabled))
526 return;
527
528 mutex_lock(&ftrace_start_lock);
529 ftrace_start++;
530 command |= FTRACE_ENABLE_CALLS;
531
532 if (saved_ftrace_func != ftrace_trace_function) { 618 if (saved_ftrace_func != ftrace_trace_function) {
533 saved_ftrace_func = ftrace_trace_function; 619 saved_ftrace_func = ftrace_trace_function;
534 command |= FTRACE_UPDATE_TRACE_FUNC; 620 command |= FTRACE_UPDATE_TRACE_FUNC;
535 } 621 }
536 622
537 if (!command || !ftrace_enabled) 623 if (!command || !ftrace_enabled)
538 goto out; 624 return;
539 625
540 ftrace_run_update_code(command); 626 ftrace_run_update_code(command);
541 out:
542 mutex_unlock(&ftrace_start_lock);
543} 627}
544 628
545static void ftrace_shutdown(void) 629static void ftrace_startup(int command)
546{ 630{
547 int command = 0; 631 if (unlikely(ftrace_disabled))
632 return;
633
634 mutex_lock(&ftrace_start_lock);
635 ftrace_start_up++;
636 command |= FTRACE_ENABLE_CALLS;
637
638 ftrace_startup_enable(command);
548 639
640 mutex_unlock(&ftrace_start_lock);
641}
642
643static void ftrace_shutdown(int command)
644{
549 if (unlikely(ftrace_disabled)) 645 if (unlikely(ftrace_disabled))
550 return; 646 return;
551 647
552 mutex_lock(&ftrace_start_lock); 648 mutex_lock(&ftrace_start_lock);
553 ftrace_start--; 649 ftrace_start_up--;
554 if (!ftrace_start) 650 if (!ftrace_start_up)
555 command |= FTRACE_DISABLE_CALLS; 651 command |= FTRACE_DISABLE_CALLS;
556 652
557 if (saved_ftrace_func != ftrace_trace_function) { 653 if (saved_ftrace_func != ftrace_trace_function) {
@@ -577,8 +673,8 @@ static void ftrace_startup_sysctl(void)
577 mutex_lock(&ftrace_start_lock); 673 mutex_lock(&ftrace_start_lock);
578 /* Force update next time */ 674 /* Force update next time */
579 saved_ftrace_func = NULL; 675 saved_ftrace_func = NULL;
580 /* ftrace_start is true if we want ftrace running */ 676 /* ftrace_start_up is true if we want ftrace running */
581 if (ftrace_start) 677 if (ftrace_start_up)
582 command |= FTRACE_ENABLE_CALLS; 678 command |= FTRACE_ENABLE_CALLS;
583 679
584 ftrace_run_update_code(command); 680 ftrace_run_update_code(command);
@@ -593,8 +689,8 @@ static void ftrace_shutdown_sysctl(void)
593 return; 689 return;
594 690
595 mutex_lock(&ftrace_start_lock); 691 mutex_lock(&ftrace_start_lock);
596 /* ftrace_start is true if ftrace is running */ 692 /* ftrace_start_up is true if ftrace is running */
597 if (ftrace_start) 693 if (ftrace_start_up)
598 command |= FTRACE_DISABLE_CALLS; 694 command |= FTRACE_DISABLE_CALLS;
599 695
600 ftrace_run_update_code(command); 696 ftrace_run_update_code(command);
@@ -605,7 +701,7 @@ static cycle_t ftrace_update_time;
605static unsigned long ftrace_update_cnt; 701static unsigned long ftrace_update_cnt;
606unsigned long ftrace_update_tot_cnt; 702unsigned long ftrace_update_tot_cnt;
607 703
608static int ftrace_update_code(void) 704static int ftrace_update_code(struct module *mod)
609{ 705{
610 struct dyn_ftrace *p, *t; 706 struct dyn_ftrace *p, *t;
611 cycle_t start, stop; 707 cycle_t start, stop;
@@ -622,7 +718,7 @@ static int ftrace_update_code(void)
622 list_del_init(&p->list); 718 list_del_init(&p->list);
623 719
624 /* convert record (i.e, patch mcount-call with NOP) */ 720 /* convert record (i.e, patch mcount-call with NOP) */
625 if (ftrace_code_disable(p)) { 721 if (ftrace_code_disable(mod, p)) {
626 p->flags |= FTRACE_FL_CONVERTED; 722 p->flags |= FTRACE_FL_CONVERTED;
627 ftrace_update_cnt++; 723 ftrace_update_cnt++;
628 } else 724 } else
@@ -690,7 +786,6 @@ enum {
690#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 786#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
691 787
692struct ftrace_iterator { 788struct ftrace_iterator {
693 loff_t pos;
694 struct ftrace_page *pg; 789 struct ftrace_page *pg;
695 unsigned idx; 790 unsigned idx;
696 unsigned flags; 791 unsigned flags;
@@ -715,6 +810,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
715 iter->pg = iter->pg->next; 810 iter->pg = iter->pg->next;
716 iter->idx = 0; 811 iter->idx = 0;
717 goto retry; 812 goto retry;
813 } else {
814 iter->idx = -1;
718 } 815 }
719 } else { 816 } else {
720 rec = &iter->pg->records[iter->idx++]; 817 rec = &iter->pg->records[iter->idx++];
@@ -737,8 +834,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
737 } 834 }
738 spin_unlock(&ftrace_lock); 835 spin_unlock(&ftrace_lock);
739 836
740 iter->pos = *pos;
741
742 return rec; 837 return rec;
743} 838}
744 839
@@ -746,13 +841,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
746{ 841{
747 struct ftrace_iterator *iter = m->private; 842 struct ftrace_iterator *iter = m->private;
748 void *p = NULL; 843 void *p = NULL;
749 loff_t l = -1;
750 844
751 if (*pos > iter->pos) 845 if (*pos > 0) {
752 *pos = iter->pos; 846 if (iter->idx < 0)
847 return p;
848 (*pos)--;
849 iter->idx--;
850 }
753 851
754 l = *pos; 852 p = t_next(m, p, pos);
755 p = t_next(m, p, &l);
756 853
757 return p; 854 return p;
758} 855}
@@ -763,21 +860,15 @@ static void t_stop(struct seq_file *m, void *p)
763 860
764static int t_show(struct seq_file *m, void *v) 861static int t_show(struct seq_file *m, void *v)
765{ 862{
766 struct ftrace_iterator *iter = m->private;
767 struct dyn_ftrace *rec = v; 863 struct dyn_ftrace *rec = v;
768 char str[KSYM_SYMBOL_LEN]; 864 char str[KSYM_SYMBOL_LEN];
769 int ret = 0;
770 865
771 if (!rec) 866 if (!rec)
772 return 0; 867 return 0;
773 868
774 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 869 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
775 870
776 ret = seq_printf(m, "%s\n", str); 871 seq_printf(m, "%s\n", str);
777 if (ret < 0) {
778 iter->pos--;
779 iter->idx--;
780 }
781 872
782 return 0; 873 return 0;
783} 874}
@@ -803,7 +894,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
803 return -ENOMEM; 894 return -ENOMEM;
804 895
805 iter->pg = ftrace_pages_start; 896 iter->pg = ftrace_pages_start;
806 iter->pos = 0;
807 897
808 ret = seq_open(file, &show_ftrace_seq_ops); 898 ret = seq_open(file, &show_ftrace_seq_ops);
809 if (!ret) { 899 if (!ret) {
@@ -890,7 +980,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
890 980
891 if (file->f_mode & FMODE_READ) { 981 if (file->f_mode & FMODE_READ) {
892 iter->pg = ftrace_pages_start; 982 iter->pg = ftrace_pages_start;
893 iter->pos = 0;
894 iter->flags = enable ? FTRACE_ITER_FILTER : 983 iter->flags = enable ? FTRACE_ITER_FILTER :
895 FTRACE_ITER_NOTRACE; 984 FTRACE_ITER_NOTRACE;
896 985
@@ -1181,7 +1270,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1181 1270
1182 mutex_lock(&ftrace_sysctl_lock); 1271 mutex_lock(&ftrace_sysctl_lock);
1183 mutex_lock(&ftrace_start_lock); 1272 mutex_lock(&ftrace_start_lock);
1184 if (ftrace_start && ftrace_enabled) 1273 if (ftrace_start_up && ftrace_enabled)
1185 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1274 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1186 mutex_unlock(&ftrace_start_lock); 1275 mutex_unlock(&ftrace_start_lock);
1187 mutex_unlock(&ftrace_sysctl_lock); 1276 mutex_unlock(&ftrace_sysctl_lock);
@@ -1233,13 +1322,10 @@ static struct file_operations ftrace_notrace_fops = {
1233 .release = ftrace_notrace_release, 1322 .release = ftrace_notrace_release,
1234}; 1323};
1235 1324
1236static __init int ftrace_init_debugfs(void) 1325static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1237{ 1326{
1238 struct dentry *d_tracer;
1239 struct dentry *entry; 1327 struct dentry *entry;
1240 1328
1241 d_tracer = tracing_init_dentry();
1242
1243 entry = debugfs_create_file("available_filter_functions", 0444, 1329 entry = debugfs_create_file("available_filter_functions", 0444,
1244 d_tracer, NULL, &ftrace_avail_fops); 1330 d_tracer, NULL, &ftrace_avail_fops);
1245 if (!entry) 1331 if (!entry)
@@ -1266,9 +1352,8 @@ static __init int ftrace_init_debugfs(void)
1266 return 0; 1352 return 0;
1267} 1353}
1268 1354
1269fs_initcall(ftrace_init_debugfs); 1355static int ftrace_convert_nops(struct module *mod,
1270 1356 unsigned long *start,
1271static int ftrace_convert_nops(unsigned long *start,
1272 unsigned long *end) 1357 unsigned long *end)
1273{ 1358{
1274 unsigned long *p; 1359 unsigned long *p;
@@ -1279,23 +1364,32 @@ static int ftrace_convert_nops(unsigned long *start,
1279 p = start; 1364 p = start;
1280 while (p < end) { 1365 while (p < end) {
1281 addr = ftrace_call_adjust(*p++); 1366 addr = ftrace_call_adjust(*p++);
1367 /*
1368 * Some architecture linkers will pad between
1369 * the different mcount_loc sections of different
1370 * object files to satisfy alignments.
1371 * Skip any NULL pointers.
1372 */
1373 if (!addr)
1374 continue;
1282 ftrace_record_ip(addr); 1375 ftrace_record_ip(addr);
1283 } 1376 }
1284 1377
1285 /* disable interrupts to prevent kstop machine */ 1378 /* disable interrupts to prevent kstop machine */
1286 local_irq_save(flags); 1379 local_irq_save(flags);
1287 ftrace_update_code(); 1380 ftrace_update_code(mod);
1288 local_irq_restore(flags); 1381 local_irq_restore(flags);
1289 mutex_unlock(&ftrace_start_lock); 1382 mutex_unlock(&ftrace_start_lock);
1290 1383
1291 return 0; 1384 return 0;
1292} 1385}
1293 1386
1294void ftrace_init_module(unsigned long *start, unsigned long *end) 1387void ftrace_init_module(struct module *mod,
1388 unsigned long *start, unsigned long *end)
1295{ 1389{
1296 if (ftrace_disabled || start == end) 1390 if (ftrace_disabled || start == end)
1297 return; 1391 return;
1298 ftrace_convert_nops(start, end); 1392 ftrace_convert_nops(mod, start, end);
1299} 1393}
1300 1394
1301extern unsigned long __start_mcount_loc[]; 1395extern unsigned long __start_mcount_loc[];
@@ -1325,7 +1419,8 @@ void __init ftrace_init(void)
1325 1419
1326 last_ftrace_enabled = ftrace_enabled = 1; 1420 last_ftrace_enabled = ftrace_enabled = 1;
1327 1421
1328 ret = ftrace_convert_nops(__start_mcount_loc, 1422 ret = ftrace_convert_nops(NULL,
1423 __start_mcount_loc,
1329 __stop_mcount_loc); 1424 __stop_mcount_loc);
1330 1425
1331 return; 1426 return;
@@ -1342,12 +1437,101 @@ static int __init ftrace_nodyn_init(void)
1342} 1437}
1343device_initcall(ftrace_nodyn_init); 1438device_initcall(ftrace_nodyn_init);
1344 1439
1345# define ftrace_startup() do { } while (0) 1440static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1346# define ftrace_shutdown() do { } while (0) 1441static inline void ftrace_startup_enable(int command) { }
1442/* Keep as macros so we do not need to define the commands */
1443# define ftrace_startup(command) do { } while (0)
1444# define ftrace_shutdown(command) do { } while (0)
1347# define ftrace_startup_sysctl() do { } while (0) 1445# define ftrace_startup_sysctl() do { } while (0)
1348# define ftrace_shutdown_sysctl() do { } while (0) 1446# define ftrace_shutdown_sysctl() do { } while (0)
1349#endif /* CONFIG_DYNAMIC_FTRACE */ 1447#endif /* CONFIG_DYNAMIC_FTRACE */
1350 1448
1449static ssize_t
1450ftrace_pid_read(struct file *file, char __user *ubuf,
1451 size_t cnt, loff_t *ppos)
1452{
1453 char buf[64];
1454 int r;
1455
1456 if (ftrace_pid_trace >= 0)
1457 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1458 else
1459 r = sprintf(buf, "no pid\n");
1460
1461 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1462}
1463
1464static ssize_t
1465ftrace_pid_write(struct file *filp, const char __user *ubuf,
1466 size_t cnt, loff_t *ppos)
1467{
1468 char buf[64];
1469 long val;
1470 int ret;
1471
1472 if (cnt >= sizeof(buf))
1473 return -EINVAL;
1474
1475 if (copy_from_user(&buf, ubuf, cnt))
1476 return -EFAULT;
1477
1478 buf[cnt] = 0;
1479
1480 ret = strict_strtol(buf, 10, &val);
1481 if (ret < 0)
1482 return ret;
1483
1484 mutex_lock(&ftrace_start_lock);
1485 if (ret < 0) {
1486 /* disable pid tracing */
1487 if (ftrace_pid_trace < 0)
1488 goto out;
1489 ftrace_pid_trace = -1;
1490
1491 } else {
1492
1493 if (ftrace_pid_trace == val)
1494 goto out;
1495
1496 ftrace_pid_trace = val;
1497 }
1498
1499 /* update the function call */
1500 ftrace_update_pid_func();
1501 ftrace_startup_enable(0);
1502
1503 out:
1504 mutex_unlock(&ftrace_start_lock);
1505
1506 return cnt;
1507}
1508
1509static struct file_operations ftrace_pid_fops = {
1510 .read = ftrace_pid_read,
1511 .write = ftrace_pid_write,
1512};
1513
1514static __init int ftrace_init_debugfs(void)
1515{
1516 struct dentry *d_tracer;
1517 struct dentry *entry;
1518
1519 d_tracer = tracing_init_dentry();
1520 if (!d_tracer)
1521 return 0;
1522
1523 ftrace_init_dyn_debugfs(d_tracer);
1524
1525 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1526 NULL, &ftrace_pid_fops);
1527 if (!entry)
1528 pr_warning("Could not create debugfs "
1529 "'set_ftrace_pid' entry\n");
1530 return 0;
1531}
1532
1533fs_initcall(ftrace_init_debugfs);
1534
1351/** 1535/**
1352 * ftrace_kill - kill ftrace 1536 * ftrace_kill - kill ftrace
1353 * 1537 *
@@ -1381,10 +1565,11 @@ int register_ftrace_function(struct ftrace_ops *ops)
1381 return -1; 1565 return -1;
1382 1566
1383 mutex_lock(&ftrace_sysctl_lock); 1567 mutex_lock(&ftrace_sysctl_lock);
1568
1384 ret = __register_ftrace_function(ops); 1569 ret = __register_ftrace_function(ops);
1385 ftrace_startup(); 1570 ftrace_startup(0);
1386 mutex_unlock(&ftrace_sysctl_lock);
1387 1571
1572 mutex_unlock(&ftrace_sysctl_lock);
1388 return ret; 1573 return ret;
1389} 1574}
1390 1575
@@ -1400,7 +1585,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1400 1585
1401 mutex_lock(&ftrace_sysctl_lock); 1586 mutex_lock(&ftrace_sysctl_lock);
1402 ret = __unregister_ftrace_function(ops); 1587 ret = __unregister_ftrace_function(ops);
1403 ftrace_shutdown(); 1588 ftrace_shutdown(0);
1404 mutex_unlock(&ftrace_sysctl_lock); 1589 mutex_unlock(&ftrace_sysctl_lock);
1405 1590
1406 return ret; 1591 return ret;
@@ -1449,3 +1634,140 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1449 return ret; 1634 return ret;
1450} 1635}
1451 1636
1637#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1638
1639static atomic_t ftrace_graph_active;
1640
1641/* The callbacks that hook a function */
1642trace_func_graph_ret_t ftrace_graph_return =
1643 (trace_func_graph_ret_t)ftrace_stub;
1644trace_func_graph_ent_t ftrace_graph_entry =
1645 (trace_func_graph_ent_t)ftrace_stub;
1646
1647/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1648static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1649{
1650 int i;
1651 int ret = 0;
1652 unsigned long flags;
1653 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1654 struct task_struct *g, *t;
1655
1656 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1657 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1658 * sizeof(struct ftrace_ret_stack),
1659 GFP_KERNEL);
1660 if (!ret_stack_list[i]) {
1661 start = 0;
1662 end = i;
1663 ret = -ENOMEM;
1664 goto free;
1665 }
1666 }
1667
1668 read_lock_irqsave(&tasklist_lock, flags);
1669 do_each_thread(g, t) {
1670 if (start == end) {
1671 ret = -EAGAIN;
1672 goto unlock;
1673 }
1674
1675 if (t->ret_stack == NULL) {
1676 t->ret_stack = ret_stack_list[start++];
1677 t->curr_ret_stack = -1;
1678 atomic_set(&t->trace_overrun, 0);
1679 }
1680 } while_each_thread(g, t);
1681
1682unlock:
1683 read_unlock_irqrestore(&tasklist_lock, flags);
1684free:
1685 for (i = start; i < end; i++)
1686 kfree(ret_stack_list[i]);
1687 return ret;
1688}
1689
1690/* Allocate a return stack for each task */
1691static int start_graph_tracing(void)
1692{
1693 struct ftrace_ret_stack **ret_stack_list;
1694 int ret;
1695
1696 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1697 sizeof(struct ftrace_ret_stack *),
1698 GFP_KERNEL);
1699
1700 if (!ret_stack_list)
1701 return -ENOMEM;
1702
1703 do {
1704 ret = alloc_retstack_tasklist(ret_stack_list);
1705 } while (ret == -EAGAIN);
1706
1707 kfree(ret_stack_list);
1708 return ret;
1709}
1710
1711int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1712 trace_func_graph_ent_t entryfunc)
1713{
1714 int ret = 0;
1715
1716 mutex_lock(&ftrace_sysctl_lock);
1717
1718 atomic_inc(&ftrace_graph_active);
1719 ret = start_graph_tracing();
1720 if (ret) {
1721 atomic_dec(&ftrace_graph_active);
1722 goto out;
1723 }
1724
1725 ftrace_graph_return = retfunc;
1726 ftrace_graph_entry = entryfunc;
1727
1728 ftrace_startup(FTRACE_START_FUNC_RET);
1729
1730out:
1731 mutex_unlock(&ftrace_sysctl_lock);
1732 return ret;
1733}
1734
1735void unregister_ftrace_graph(void)
1736{
1737 mutex_lock(&ftrace_sysctl_lock);
1738
1739 atomic_dec(&ftrace_graph_active);
1740 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1741 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1742 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1743
1744 mutex_unlock(&ftrace_sysctl_lock);
1745}
1746
1747/* Allocate a return stack for newly created task */
1748void ftrace_graph_init_task(struct task_struct *t)
1749{
1750 if (atomic_read(&ftrace_graph_active)) {
1751 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1752 * sizeof(struct ftrace_ret_stack),
1753 GFP_KERNEL);
1754 if (!t->ret_stack)
1755 return;
1756 t->curr_ret_stack = -1;
1757 atomic_set(&t->trace_overrun, 0);
1758 } else
1759 t->ret_stack = NULL;
1760}
1761
1762void ftrace_graph_exit_task(struct task_struct *t)
1763{
1764 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1765
1766 t->ret_stack = NULL;
1767 /* NULL must become visible to IRQs before we free it: */
1768 barrier();
1769
1770 kfree(ret_stack);
1771}
1772#endif
1773