aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c84
-rw-r--r--kernel/trace/ring_buffer.c1
-rw-r--r--kernel/trace/trace.c3
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_functions_return.c15
7 files changed, 74 insertions, 36 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 9c89526b6b7..b8378fad29a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -59,7 +59,6 @@ config FUNCTION_TRACER
59 59
60config FUNCTION_RET_TRACER 60config FUNCTION_RET_TRACER
61 bool "Kernel Function return Tracer" 61 bool "Kernel Function return Tracer"
62 depends on !DYNAMIC_FTRACE
63 depends on HAVE_FUNCTION_RET_TRACER 62 depends on HAVE_FUNCTION_RET_TRACER
64 depends on FUNCTION_TRACER 63 depends on FUNCTION_TRACER
65 help 64 help
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b42ec1de546..f212da48668 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -50,6 +50,9 @@ static int last_ftrace_enabled;
50/* Quick disabling of function tracer. */ 50/* Quick disabling of function tracer. */
51int function_trace_stop; 51int function_trace_stop;
52 52
53/* By default, current tracing type is normal tracing. */
54enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
55
53/* 56/*
54 * ftrace_disabled is set when an anomaly is discovered. 57 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled. 58 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -385,12 +388,21 @@ static void ftrace_bug(int failed, unsigned long ip)
385 } 388 }
386} 389}
387 390
388#define FTRACE_ADDR ((long)(ftrace_caller))
389 391
390static int 392static int
391__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 393__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
392{ 394{
393 unsigned long ip, fl; 395 unsigned long ip, fl;
396 unsigned long ftrace_addr;
397
398#ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller;
401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller;
403#else
404 ftrace_addr = (unsigned long)ftrace_caller;
405#endif
394 406
395 ip = rec->ip; 407 ip = rec->ip;
396 408
@@ -450,9 +462,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
450 } 462 }
451 463
452 if (rec->flags & FTRACE_FL_ENABLED) 464 if (rec->flags & FTRACE_FL_ENABLED)
453 return ftrace_make_call(rec, FTRACE_ADDR); 465 return ftrace_make_call(rec, ftrace_addr);
454 else 466 else
455 return ftrace_make_nop(NULL, rec, FTRACE_ADDR); 467 return ftrace_make_nop(NULL, rec, ftrace_addr);
456} 468}
457 469
458static void ftrace_replace_code(int enable) 470static void ftrace_replace_code(int enable)
@@ -682,7 +694,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
682 694
683 cnt = num_to_init / ENTRIES_PER_PAGE; 695 cnt = num_to_init / ENTRIES_PER_PAGE;
684 pr_info("ftrace: allocating %ld entries in %d pages\n", 696 pr_info("ftrace: allocating %ld entries in %d pages\n",
685 num_to_init, cnt); 697 num_to_init, cnt + 1);
686 698
687 for (i = 0; i < cnt; i++) { 699 for (i = 0; i < cnt; i++) {
688 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 700 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -765,13 +777,11 @@ static void *t_start(struct seq_file *m, loff_t *pos)
765 void *p = NULL; 777 void *p = NULL;
766 loff_t l = -1; 778 loff_t l = -1;
767 779
768 if (*pos != iter->pos) { 780 if (*pos > iter->pos)
769 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 781 *pos = iter->pos;
770 ; 782
771 } else { 783 l = *pos;
772 l = *pos; 784 p = t_next(m, p, &l);
773 p = t_next(m, p, &l);
774 }
775 785
776 return p; 786 return p;
777} 787}
@@ -782,15 +792,21 @@ static void t_stop(struct seq_file *m, void *p)
782 792
783static int t_show(struct seq_file *m, void *v) 793static int t_show(struct seq_file *m, void *v)
784{ 794{
795 struct ftrace_iterator *iter = m->private;
785 struct dyn_ftrace *rec = v; 796 struct dyn_ftrace *rec = v;
786 char str[KSYM_SYMBOL_LEN]; 797 char str[KSYM_SYMBOL_LEN];
798 int ret = 0;
787 799
788 if (!rec) 800 if (!rec)
789 return 0; 801 return 0;
790 802
791 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 803 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
792 804
793 seq_printf(m, "%s\n", str); 805 ret = seq_printf(m, "%s\n", str);
806 if (ret < 0) {
807 iter->pos--;
808 iter->idx--;
809 }
794 810
795 return 0; 811 return 0;
796} 812}
@@ -816,7 +832,7 @@ ftrace_avail_open(struct inode *inode, struct file *file)
816 return -ENOMEM; 832 return -ENOMEM;
817 833
818 iter->pg = ftrace_pages_start; 834 iter->pg = ftrace_pages_start;
819 iter->pos = -1; 835 iter->pos = 0;
820 836
821 ret = seq_open(file, &show_ftrace_seq_ops); 837 ret = seq_open(file, &show_ftrace_seq_ops);
822 if (!ret) { 838 if (!ret) {
@@ -903,7 +919,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
903 919
904 if (file->f_mode & FMODE_READ) { 920 if (file->f_mode & FMODE_READ) {
905 iter->pg = ftrace_pages_start; 921 iter->pg = ftrace_pages_start;
906 iter->pos = -1; 922 iter->pos = 0;
907 iter->flags = enable ? FTRACE_ITER_FILTER : 923 iter->flags = enable ? FTRACE_ITER_FILTER :
908 FTRACE_ITER_NOTRACE; 924 FTRACE_ITER_NOTRACE;
909 925
@@ -1405,10 +1421,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
1405 return -1; 1421 return -1;
1406 1422
1407 mutex_lock(&ftrace_sysctl_lock); 1423 mutex_lock(&ftrace_sysctl_lock);
1424
1425 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1426 ret = -EBUSY;
1427 goto out;
1428 }
1429
1408 ret = __register_ftrace_function(ops); 1430 ret = __register_ftrace_function(ops);
1409 ftrace_startup(); 1431 ftrace_startup();
1410 mutex_unlock(&ftrace_sysctl_lock);
1411 1432
1433out:
1434 mutex_unlock(&ftrace_sysctl_lock);
1412 return ret; 1435 return ret;
1413} 1436}
1414 1437
@@ -1474,16 +1497,45 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1474} 1497}
1475 1498
1476#ifdef CONFIG_FUNCTION_RET_TRACER 1499#ifdef CONFIG_FUNCTION_RET_TRACER
1500
1501/* The callback that hooks the return of a function */
1477trace_function_return_t ftrace_function_return = 1502trace_function_return_t ftrace_function_return =
1478 (trace_function_return_t)ftrace_stub; 1503 (trace_function_return_t)ftrace_stub;
1479void register_ftrace_return(trace_function_return_t func) 1504
1505int register_ftrace_return(trace_function_return_t func)
1480{ 1506{
1507 int ret = 0;
1508
1509 mutex_lock(&ftrace_sysctl_lock);
1510
1511 /*
1512 * Don't launch return tracing if normal function
1513 * tracing is already running.
1514 */
1515 if (ftrace_trace_function != ftrace_stub) {
1516 ret = -EBUSY;
1517 goto out;
1518 }
1519
1520 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1481 ftrace_function_return = func; 1521 ftrace_function_return = func;
1522 ftrace_startup();
1523
1524out:
1525 mutex_unlock(&ftrace_sysctl_lock);
1526 return ret;
1482} 1527}
1483 1528
1484void unregister_ftrace_return(void) 1529void unregister_ftrace_return(void)
1485{ 1530{
1531 mutex_lock(&ftrace_sysctl_lock);
1532
1486 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1533 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1534 ftrace_shutdown();
1535 /* Restore normal tracing type */
1536 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1537
1538 mutex_unlock(&ftrace_sysctl_lock);
1487} 1539}
1488#endif 1540#endif
1489 1541
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index caa4fda50f8..85ced143c2c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -622,6 +622,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
622 list_del_init(&page->list); 622 list_del_init(&page->list);
623 free_buffer_page(page); 623 free_buffer_page(page);
624 } 624 }
625 mutex_unlock(&buffer->mutex);
625 return -ENOMEM; 626 return -ENOMEM;
626} 627}
627 628
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 24b6238884f..5653c6b07ba 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -259,9 +259,7 @@ static const char *trace_options[] = {
259 "sched-tree", 259 "sched-tree",
260 "ftrace_printk", 260 "ftrace_printk",
261 "ftrace_preempt", 261 "ftrace_preempt",
262#ifdef CONFIG_BRANCH_TRACER
263 "branch", 262 "branch",
264#endif
265 "annotate", 263 "annotate",
266 NULL 264 NULL
267}; 265};
@@ -2189,6 +2187,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2189 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2187 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2190 } 2188 }
2191 mutex_unlock(&trace_types_lock); 2189 mutex_unlock(&trace_types_lock);
2190 kfree(iter);
2192 2191
2193 return ERR_PTR(-ENOMEM); 2192 return ERR_PTR(-ENOMEM);
2194} 2193}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cdbd5cc22be..37947f6b92b 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -471,9 +471,7 @@ enum trace_iterator_flags {
471 TRACE_ITER_SCHED_TREE = 0x200, 471 TRACE_ITER_SCHED_TREE = 0x200,
472 TRACE_ITER_PRINTK = 0x400, 472 TRACE_ITER_PRINTK = 0x400,
473 TRACE_ITER_PREEMPTONLY = 0x800, 473 TRACE_ITER_PREEMPTONLY = 0x800,
474#ifdef CONFIG_BRANCH_TRACER
475 TRACE_ITER_BRANCH = 0x1000, 474 TRACE_ITER_BRANCH = 0x1000,
476#endif
477 TRACE_ITER_ANNOTATE = 0x2000, 475 TRACE_ITER_ANNOTATE = 0x2000,
478}; 476};
479 477
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 44bd39539d6..23f9b02ce96 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -41,7 +41,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
41 if (unlikely(!tr)) 41 if (unlikely(!tr))
42 return; 42 return;
43 43
44 local_irq_save(flags); 44 raw_local_irq_save(flags);
45 cpu = raw_smp_processor_id(); 45 cpu = raw_smp_processor_id();
46 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 46 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
47 goto out; 47 goto out;
@@ -73,7 +73,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
73 73
74 out: 74 out:
75 atomic_dec(&tr->data[cpu]->disabled); 75 atomic_dec(&tr->data[cpu]->disabled);
76 local_irq_restore(flags); 76 raw_local_irq_restore(flags);
77} 77}
78 78
79static inline 79static inline
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c
index 61185f756a1..a68564af022 100644
--- a/kernel/trace/trace_functions_return.c
+++ b/kernel/trace/trace_functions_return.c
@@ -14,29 +14,18 @@
14#include "trace.h" 14#include "trace.h"
15 15
16 16
17static void start_return_trace(struct trace_array *tr)
18{
19 register_ftrace_return(&trace_function_return);
20}
21
22static void stop_return_trace(struct trace_array *tr)
23{
24 unregister_ftrace_return();
25}
26
27static int return_trace_init(struct trace_array *tr) 17static int return_trace_init(struct trace_array *tr)
28{ 18{
29 int cpu; 19 int cpu;
30 for_each_online_cpu(cpu) 20 for_each_online_cpu(cpu)
31 tracing_reset(tr, cpu); 21 tracing_reset(tr, cpu);
32 22
33 start_return_trace(tr); 23 return register_ftrace_return(&trace_function_return);
34 return 0;
35} 24}
36 25
37static void return_trace_reset(struct trace_array *tr) 26static void return_trace_reset(struct trace_array *tr)
38{ 27{
39 stop_return_trace(tr); 28 unregister_ftrace_return();
40} 29}
41 30
42 31