aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/entry_32.S6
-rw-r--r--arch/x86/kernel/entry_64.S5
-rw-r--r--include/linux/ftrace.h30
-rw-r--r--kernel/trace/Kconfig7
-rw-r--r--kernel/trace/ftrace.c47
6 files changed, 86 insertions, 10 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6f20718d3156..d09e812c6223 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,6 +29,7 @@ config X86
29 select HAVE_FTRACE_MCOUNT_RECORD 29 select HAVE_FTRACE_MCOUNT_RECORD
30 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
31 select HAVE_FUNCTION_TRACER 31 select HAVE_FUNCTION_TRACER
32 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 33 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
33 select HAVE_ARCH_KGDB if !X86_VOYAGER 34 select HAVE_ARCH_KGDB if !X86_VOYAGER
34 select HAVE_ARCH_TRACEHOOK 35 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 28b597ef9ca1..9134de814c97 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1157,6 +1157,9 @@ ENTRY(mcount)
1157END(mcount) 1157END(mcount)
1158 1158
1159ENTRY(ftrace_caller) 1159ENTRY(ftrace_caller)
1160 cmpl $0, function_trace_stop
1161 jne ftrace_stub
1162
1160 pushl %eax 1163 pushl %eax
1161 pushl %ecx 1164 pushl %ecx
1162 pushl %edx 1165 pushl %edx
@@ -1180,6 +1183,9 @@ END(ftrace_caller)
1180#else /* ! CONFIG_DYNAMIC_FTRACE */ 1183#else /* ! CONFIG_DYNAMIC_FTRACE */
1181 1184
1182ENTRY(mcount) 1185ENTRY(mcount)
1186 cmpl $0, function_trace_stop
1187 jne ftrace_stub
1188
1183 cmpl $ftrace_stub, ftrace_trace_function 1189 cmpl $ftrace_stub, ftrace_trace_function
1184 jnz trace 1190 jnz trace
1185.globl ftrace_stub 1191.globl ftrace_stub
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b86f332c96a6..08aa6b10933c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -68,6 +68,8 @@ ENTRY(mcount)
68END(mcount) 68END(mcount)
69 69
70ENTRY(ftrace_caller) 70ENTRY(ftrace_caller)
71 cmpl $0, function_trace_stop
72 jne ftrace_stub
71 73
72 /* taken from glibc */ 74 /* taken from glibc */
73 subq $0x38, %rsp 75 subq $0x38, %rsp
@@ -103,6 +105,9 @@ END(ftrace_caller)
103 105
104#else /* ! CONFIG_DYNAMIC_FTRACE */ 106#else /* ! CONFIG_DYNAMIC_FTRACE */
105ENTRY(mcount) 107ENTRY(mcount)
108 cmpl $0, function_trace_stop
109 jne ftrace_stub
110
106 cmpq $ftrace_stub, ftrace_trace_function 111 cmpq $ftrace_stub, ftrace_trace_function
107 jnz trace 112 jnz trace
108.globl ftrace_stub 113.globl ftrace_stub
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 4642959e5bda..794ab907dbfe 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -23,6 +23,34 @@ struct ftrace_ops {
23 struct ftrace_ops *next; 23 struct ftrace_ops *next;
24}; 24};
25 25
26extern int function_trace_stop;
27
28/**
29 * ftrace_stop - stop function tracer.
30 *
31 * A quick way to stop the function tracer. Note this an on off switch,
32 * it is not something that is recursive like preempt_disable.
33 * This does not disable the calling of mcount, it only stops the
34 * calling of functions from mcount.
35 */
36static inline void ftrace_stop(void)
37{
38 function_trace_stop = 1;
39}
40
41/**
42 * ftrace_start - start the function tracer.
43 *
44 * This function is the inverse of ftrace_stop. This does not enable
45 * the function tracing if the function tracer is disabled. This only
46 * sets the function tracer flag to continue calling the functions
47 * from mcount.
48 */
49static inline void ftrace_start(void)
50{
51 function_trace_stop = 0;
52}
53
26/* 54/*
27 * The ftrace_ops must be a static and should also 55 * The ftrace_ops must be a static and should also
28 * be read_mostly. These functions do modify read_mostly variables 56 * be read_mostly. These functions do modify read_mostly variables
@@ -41,6 +69,8 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
41# define unregister_ftrace_function(ops) do { } while (0) 69# define unregister_ftrace_function(ops) do { } while (0)
42# define clear_ftrace_function(ops) do { } while (0) 70# define clear_ftrace_function(ops) do { } while (0)
43static inline void ftrace_kill(void) { } 71static inline void ftrace_kill(void) { }
72static inline void ftrace_stop(void) { }
73static inline void ftrace_start(void) { }
44#endif /* CONFIG_FUNCTION_TRACER */ 74#endif /* CONFIG_FUNCTION_TRACER */
45 75
46#ifdef CONFIG_DYNAMIC_FTRACE 76#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 33dbefd471e8..fc4febc3334a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -9,6 +9,13 @@ config NOP_TRACER
9config HAVE_FUNCTION_TRACER 9config HAVE_FUNCTION_TRACER
10 bool 10 bool
11 11
12config HAVE_FUNCTION_TRACE_MCOUNT_TEST
13 bool
14 help
15 This gets selected when the arch tests the function_trace_stop
16 variable at the mcount call site. Otherwise, this variable
17 is tested by the called function.
18
12config HAVE_DYNAMIC_FTRACE 19config HAVE_DYNAMIC_FTRACE
13 bool 20 bool
14 21
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4a39d24568c8..896c71f0f4c4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,6 +47,9 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* Quick disabling of function tracer. */
51int function_trace_stop;
52
50/* 53/*
51 * ftrace_disabled is set when an anomaly is discovered. 54 * ftrace_disabled is set when an anomaly is discovered.
52 * ftrace_disabled is much stronger than ftrace_enabled. 55 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -63,6 +66,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
63 66
64static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 67static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
65ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 68ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
69ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
66 70
67static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 71static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
68{ 72{
@@ -88,8 +92,23 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88void clear_ftrace_function(void) 92void clear_ftrace_function(void)
89{ 93{
90 ftrace_trace_function = ftrace_stub; 94 ftrace_trace_function = ftrace_stub;
95 __ftrace_trace_function = ftrace_stub;
91} 96}
92 97
98#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99/*
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
102 */
103static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
104{
105 if (function_trace_stop)
106 return;
107
108 __ftrace_trace_function(ip, parent_ip);
109}
110#endif
111
93static int __register_ftrace_function(struct ftrace_ops *ops) 112static int __register_ftrace_function(struct ftrace_ops *ops)
94{ 113{
95 /* should not be called from interrupt context */ 114 /* should not be called from interrupt context */
@@ -110,10 +129,18 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
110 * For one func, simply call it directly. 129 * For one func, simply call it directly.
111 * For more than one func, call the chain. 130 * For more than one func, call the chain.
112 */ 131 */
132#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
113 if (ops->next == &ftrace_list_end) 133 if (ops->next == &ftrace_list_end)
114 ftrace_trace_function = ops->func; 134 ftrace_trace_function = ops->func;
115 else 135 else
116 ftrace_trace_function = ftrace_list_func; 136 ftrace_trace_function = ftrace_list_func;
137#else
138 if (ops->next == &ftrace_list_end)
139 __ftrace_trace_function = ops->func;
140 else
141 __ftrace_trace_function = ftrace_list_func;
142 ftrace_trace_function = ftrace_test_stop_func;
143#endif
117 } 144 }
118 145
119 spin_unlock(&ftrace_lock); 146 spin_unlock(&ftrace_lock);
@@ -526,7 +553,7 @@ static void ftrace_run_update_code(int command)
526} 553}
527 554
528static ftrace_func_t saved_ftrace_func; 555static ftrace_func_t saved_ftrace_func;
529static int ftrace_start; 556static int ftrace_start_up;
530static DEFINE_MUTEX(ftrace_start_lock); 557static DEFINE_MUTEX(ftrace_start_lock);
531 558
532static void ftrace_startup(void) 559static void ftrace_startup(void)
@@ -537,8 +564,8 @@ static void ftrace_startup(void)
537 return; 564 return;
538 565
539 mutex_lock(&ftrace_start_lock); 566 mutex_lock(&ftrace_start_lock);
540 ftrace_start++; 567 ftrace_start_up++;
541 if (ftrace_start == 1) 568 if (ftrace_start_up == 1)
542 command |= FTRACE_ENABLE_CALLS; 569 command |= FTRACE_ENABLE_CALLS;
543 570
544 if (saved_ftrace_func != ftrace_trace_function) { 571 if (saved_ftrace_func != ftrace_trace_function) {
@@ -562,8 +589,8 @@ static void ftrace_shutdown(void)
562 return; 589 return;
563 590
564 mutex_lock(&ftrace_start_lock); 591 mutex_lock(&ftrace_start_lock);
565 ftrace_start--; 592 ftrace_start_up--;
566 if (!ftrace_start) 593 if (!ftrace_start_up)
567 command |= FTRACE_DISABLE_CALLS; 594 command |= FTRACE_DISABLE_CALLS;
568 595
569 if (saved_ftrace_func != ftrace_trace_function) { 596 if (saved_ftrace_func != ftrace_trace_function) {
@@ -589,8 +616,8 @@ static void ftrace_startup_sysctl(void)
589 mutex_lock(&ftrace_start_lock); 616 mutex_lock(&ftrace_start_lock);
590 /* Force update next time */ 617 /* Force update next time */
591 saved_ftrace_func = NULL; 618 saved_ftrace_func = NULL;
592 /* ftrace_start is true if we want ftrace running */ 619 /* ftrace_start_up is true if we want ftrace running */
593 if (ftrace_start) 620 if (ftrace_start_up)
594 command |= FTRACE_ENABLE_CALLS; 621 command |= FTRACE_ENABLE_CALLS;
595 622
596 ftrace_run_update_code(command); 623 ftrace_run_update_code(command);
@@ -605,8 +632,8 @@ static void ftrace_shutdown_sysctl(void)
605 return; 632 return;
606 633
607 mutex_lock(&ftrace_start_lock); 634 mutex_lock(&ftrace_start_lock);
608 /* ftrace_start is true if ftrace is running */ 635 /* ftrace_start_up is true if ftrace is running */
609 if (ftrace_start) 636 if (ftrace_start_up)
610 command |= FTRACE_DISABLE_CALLS; 637 command |= FTRACE_DISABLE_CALLS;
611 638
612 ftrace_run_update_code(command); 639 ftrace_run_update_code(command);
@@ -1186,7 +1213,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1186 1213
1187 mutex_lock(&ftrace_sysctl_lock); 1214 mutex_lock(&ftrace_sysctl_lock);
1188 mutex_lock(&ftrace_start_lock); 1215 mutex_lock(&ftrace_start_lock);
1189 if (iter->filtered && ftrace_start && ftrace_enabled) 1216 if (iter->filtered && ftrace_start_up && ftrace_enabled)
1190 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1217 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1191 mutex_unlock(&ftrace_start_lock); 1218 mutex_unlock(&ftrace_start_lock);
1192 mutex_unlock(&ftrace_sysctl_lock); 1219 mutex_unlock(&ftrace_sysctl_lock);