aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-11-05 16:05:44 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-06 01:50:51 -0500
commit60a7ecf42661f2b22168751298592da6ee210c9e (patch)
tree050fd052c546c92f8aac10ee71d5bb6d98a21fc8 /kernel
parent79c81d220c8e25163f56edcdfaf23f83a4c88e6b (diff)
ftrace: add quick function trace stop
Impact: quick start and stop of function tracer This patch adds a way to disable the function tracer quickly without the need to run kstop_machine. It adds a new variable called function_trace_stop which will stop the calls to functions from mcount when set. This is just an on/off switch and does not handle recursion like preempt_disable(). It's main purpose is to help other tracers/debuggers start and stop tracing fuctions without the need to call kstop_machine. The config option HAVE_FUNCTION_TRACE_MCOUNT_TEST is added for archs that implement the testing of the function_trace_stop in the mcount arch dependent code. Otherwise, the test is done in the C code. x86 is the only arch at the moment that supports this. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig7
-rw-r--r--kernel/trace/ftrace.c47
2 files changed, 44 insertions, 10 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 33dbefd471e8..fc4febc3334a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -9,6 +9,13 @@ config NOP_TRACER
9config HAVE_FUNCTION_TRACER 9config HAVE_FUNCTION_TRACER
10 bool 10 bool
11 11
12config HAVE_FUNCTION_TRACE_MCOUNT_TEST
13 bool
14 help
15 This gets selected when the arch tests the function_trace_stop
16 variable at the mcount call site. Otherwise, this variable
17 is tested by the called function.
18
12config HAVE_DYNAMIC_FTRACE 19config HAVE_DYNAMIC_FTRACE
13 bool 20 bool
14 21
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4a39d24568c8..896c71f0f4c4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,6 +47,9 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* Quick disabling of function tracer. */
51int function_trace_stop;
52
50/* 53/*
51 * ftrace_disabled is set when an anomaly is discovered. 54 * ftrace_disabled is set when an anomaly is discovered.
52 * ftrace_disabled is much stronger than ftrace_enabled. 55 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -63,6 +66,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
63 66
64static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 67static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
65ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 68ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
69ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
66 70
67static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 71static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
68{ 72{
@@ -88,8 +92,23 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88void clear_ftrace_function(void) 92void clear_ftrace_function(void)
89{ 93{
90 ftrace_trace_function = ftrace_stub; 94 ftrace_trace_function = ftrace_stub;
95 __ftrace_trace_function = ftrace_stub;
91} 96}
92 97
98#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99/*
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
102 */
103static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
104{
105 if (function_trace_stop)
106 return;
107
108 __ftrace_trace_function(ip, parent_ip);
109}
110#endif
111
93static int __register_ftrace_function(struct ftrace_ops *ops) 112static int __register_ftrace_function(struct ftrace_ops *ops)
94{ 113{
95 /* should not be called from interrupt context */ 114 /* should not be called from interrupt context */
@@ -110,10 +129,18 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
110 * For one func, simply call it directly. 129 * For one func, simply call it directly.
111 * For more than one func, call the chain. 130 * For more than one func, call the chain.
112 */ 131 */
132#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
113 if (ops->next == &ftrace_list_end) 133 if (ops->next == &ftrace_list_end)
114 ftrace_trace_function = ops->func; 134 ftrace_trace_function = ops->func;
115 else 135 else
116 ftrace_trace_function = ftrace_list_func; 136 ftrace_trace_function = ftrace_list_func;
137#else
138 if (ops->next == &ftrace_list_end)
139 __ftrace_trace_function = ops->func;
140 else
141 __ftrace_trace_function = ftrace_list_func;
142 ftrace_trace_function = ftrace_test_stop_func;
143#endif
117 } 144 }
118 145
119 spin_unlock(&ftrace_lock); 146 spin_unlock(&ftrace_lock);
@@ -526,7 +553,7 @@ static void ftrace_run_update_code(int command)
526} 553}
527 554
528static ftrace_func_t saved_ftrace_func; 555static ftrace_func_t saved_ftrace_func;
529static int ftrace_start; 556static int ftrace_start_up;
530static DEFINE_MUTEX(ftrace_start_lock); 557static DEFINE_MUTEX(ftrace_start_lock);
531 558
532static void ftrace_startup(void) 559static void ftrace_startup(void)
@@ -537,8 +564,8 @@ static void ftrace_startup(void)
537 return; 564 return;
538 565
539 mutex_lock(&ftrace_start_lock); 566 mutex_lock(&ftrace_start_lock);
540 ftrace_start++; 567 ftrace_start_up++;
541 if (ftrace_start == 1) 568 if (ftrace_start_up == 1)
542 command |= FTRACE_ENABLE_CALLS; 569 command |= FTRACE_ENABLE_CALLS;
543 570
544 if (saved_ftrace_func != ftrace_trace_function) { 571 if (saved_ftrace_func != ftrace_trace_function) {
@@ -562,8 +589,8 @@ static void ftrace_shutdown(void)
562 return; 589 return;
563 590
564 mutex_lock(&ftrace_start_lock); 591 mutex_lock(&ftrace_start_lock);
565 ftrace_start--; 592 ftrace_start_up--;
566 if (!ftrace_start) 593 if (!ftrace_start_up)
567 command |= FTRACE_DISABLE_CALLS; 594 command |= FTRACE_DISABLE_CALLS;
568 595
569 if (saved_ftrace_func != ftrace_trace_function) { 596 if (saved_ftrace_func != ftrace_trace_function) {
@@ -589,8 +616,8 @@ static void ftrace_startup_sysctl(void)
589 mutex_lock(&ftrace_start_lock); 616 mutex_lock(&ftrace_start_lock);
590 /* Force update next time */ 617 /* Force update next time */
591 saved_ftrace_func = NULL; 618 saved_ftrace_func = NULL;
592 /* ftrace_start is true if we want ftrace running */ 619 /* ftrace_start_up is true if we want ftrace running */
593 if (ftrace_start) 620 if (ftrace_start_up)
594 command |= FTRACE_ENABLE_CALLS; 621 command |= FTRACE_ENABLE_CALLS;
595 622
596 ftrace_run_update_code(command); 623 ftrace_run_update_code(command);
@@ -605,8 +632,8 @@ static void ftrace_shutdown_sysctl(void)
605 return; 632 return;
606 633
607 mutex_lock(&ftrace_start_lock); 634 mutex_lock(&ftrace_start_lock);
608 /* ftrace_start is true if ftrace is running */ 635 /* ftrace_start_up is true if ftrace is running */
609 if (ftrace_start) 636 if (ftrace_start_up)
610 command |= FTRACE_DISABLE_CALLS; 637 command |= FTRACE_DISABLE_CALLS;
611 638
612 ftrace_run_update_code(command); 639 ftrace_run_update_code(command);
@@ -1186,7 +1213,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1186 1213
1187 mutex_lock(&ftrace_sysctl_lock); 1214 mutex_lock(&ftrace_sysctl_lock);
1188 mutex_lock(&ftrace_start_lock); 1215 mutex_lock(&ftrace_start_lock);
1189 if (iter->filtered && ftrace_start && ftrace_enabled) 1216 if (iter->filtered && ftrace_start_up && ftrace_enabled)
1190 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1217 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1191 mutex_unlock(&ftrace_start_lock); 1218 mutex_unlock(&ftrace_start_lock);
1192 mutex_unlock(&ftrace_sysctl_lock); 1219 mutex_unlock(&ftrace_sysctl_lock);