diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-11-05 16:05:44 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-06 01:50:51 -0500 |
commit | 60a7ecf42661f2b22168751298592da6ee210c9e (patch) | |
tree | 050fd052c546c92f8aac10ee71d5bb6d98a21fc8 /kernel/trace/ftrace.c | |
parent | 79c81d220c8e25163f56edcdfaf23f83a4c88e6b (diff) |
ftrace: add quick function trace stop
Impact: quick start and stop of function tracer
This patch adds a way to disable the function tracer quickly without
the need to run kstop_machine. It adds a new variable called
function_trace_stop which will stop the calls to functions from mcount
when set. This is just an on/off switch and does not handle recursion
like preempt_disable().
It's main purpose is to help other tracers/debuggers start and stop tracing
fuctions without the need to call kstop_machine.
The config option HAVE_FUNCTION_TRACE_MCOUNT_TEST is added for archs
that implement the testing of the function_trace_stop in the mcount
arch dependent code. Otherwise, the test is done in the C code.
x86 is the only arch at the moment that supports this.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 47 |
1 files changed, 37 insertions, 10 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4a39d24568c8..896c71f0f4c4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -47,6 +47,9 @@ | |||
47 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
48 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
49 | 49 | ||
50 | /* Quick disabling of function tracer. */ | ||
51 | int function_trace_stop; | ||
52 | |||
50 | /* | 53 | /* |
51 | * ftrace_disabled is set when an anomaly is discovered. | 54 | * ftrace_disabled is set when an anomaly is discovered. |
52 | * ftrace_disabled is much stronger than ftrace_enabled. | 55 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -63,6 +66,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
63 | 66 | ||
64 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 67 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
65 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 68 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
69 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | ||
66 | 70 | ||
67 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 71 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
68 | { | 72 | { |
@@ -88,8 +92,23 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
88 | void clear_ftrace_function(void) | 92 | void clear_ftrace_function(void) |
89 | { | 93 | { |
90 | ftrace_trace_function = ftrace_stub; | 94 | ftrace_trace_function = ftrace_stub; |
95 | __ftrace_trace_function = ftrace_stub; | ||
91 | } | 96 | } |
92 | 97 | ||
98 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
99 | /* | ||
100 | * For those archs that do not test ftrace_trace_stop in their | ||
101 | * mcount call site, we need to do it from C. | ||
102 | */ | ||
103 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | ||
104 | { | ||
105 | if (function_trace_stop) | ||
106 | return; | ||
107 | |||
108 | __ftrace_trace_function(ip, parent_ip); | ||
109 | } | ||
110 | #endif | ||
111 | |||
93 | static int __register_ftrace_function(struct ftrace_ops *ops) | 112 | static int __register_ftrace_function(struct ftrace_ops *ops) |
94 | { | 113 | { |
95 | /* should not be called from interrupt context */ | 114 | /* should not be called from interrupt context */ |
@@ -110,10 +129,18 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
110 | * For one func, simply call it directly. | 129 | * For one func, simply call it directly. |
111 | * For more than one func, call the chain. | 130 | * For more than one func, call the chain. |
112 | */ | 131 | */ |
132 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
113 | if (ops->next == &ftrace_list_end) | 133 | if (ops->next == &ftrace_list_end) |
114 | ftrace_trace_function = ops->func; | 134 | ftrace_trace_function = ops->func; |
115 | else | 135 | else |
116 | ftrace_trace_function = ftrace_list_func; | 136 | ftrace_trace_function = ftrace_list_func; |
137 | #else | ||
138 | if (ops->next == &ftrace_list_end) | ||
139 | __ftrace_trace_function = ops->func; | ||
140 | else | ||
141 | __ftrace_trace_function = ftrace_list_func; | ||
142 | ftrace_trace_function = ftrace_test_stop_func; | ||
143 | #endif | ||
117 | } | 144 | } |
118 | 145 | ||
119 | spin_unlock(&ftrace_lock); | 146 | spin_unlock(&ftrace_lock); |
@@ -526,7 +553,7 @@ static void ftrace_run_update_code(int command) | |||
526 | } | 553 | } |
527 | 554 | ||
528 | static ftrace_func_t saved_ftrace_func; | 555 | static ftrace_func_t saved_ftrace_func; |
529 | static int ftrace_start; | 556 | static int ftrace_start_up; |
530 | static DEFINE_MUTEX(ftrace_start_lock); | 557 | static DEFINE_MUTEX(ftrace_start_lock); |
531 | 558 | ||
532 | static void ftrace_startup(void) | 559 | static void ftrace_startup(void) |
@@ -537,8 +564,8 @@ static void ftrace_startup(void) | |||
537 | return; | 564 | return; |
538 | 565 | ||
539 | mutex_lock(&ftrace_start_lock); | 566 | mutex_lock(&ftrace_start_lock); |
540 | ftrace_start++; | 567 | ftrace_start_up++; |
541 | if (ftrace_start == 1) | 568 | if (ftrace_start_up == 1) |
542 | command |= FTRACE_ENABLE_CALLS; | 569 | command |= FTRACE_ENABLE_CALLS; |
543 | 570 | ||
544 | if (saved_ftrace_func != ftrace_trace_function) { | 571 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -562,8 +589,8 @@ static void ftrace_shutdown(void) | |||
562 | return; | 589 | return; |
563 | 590 | ||
564 | mutex_lock(&ftrace_start_lock); | 591 | mutex_lock(&ftrace_start_lock); |
565 | ftrace_start--; | 592 | ftrace_start_up--; |
566 | if (!ftrace_start) | 593 | if (!ftrace_start_up) |
567 | command |= FTRACE_DISABLE_CALLS; | 594 | command |= FTRACE_DISABLE_CALLS; |
568 | 595 | ||
569 | if (saved_ftrace_func != ftrace_trace_function) { | 596 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -589,8 +616,8 @@ static void ftrace_startup_sysctl(void) | |||
589 | mutex_lock(&ftrace_start_lock); | 616 | mutex_lock(&ftrace_start_lock); |
590 | /* Force update next time */ | 617 | /* Force update next time */ |
591 | saved_ftrace_func = NULL; | 618 | saved_ftrace_func = NULL; |
592 | /* ftrace_start is true if we want ftrace running */ | 619 | /* ftrace_start_up is true if we want ftrace running */ |
593 | if (ftrace_start) | 620 | if (ftrace_start_up) |
594 | command |= FTRACE_ENABLE_CALLS; | 621 | command |= FTRACE_ENABLE_CALLS; |
595 | 622 | ||
596 | ftrace_run_update_code(command); | 623 | ftrace_run_update_code(command); |
@@ -605,8 +632,8 @@ static void ftrace_shutdown_sysctl(void) | |||
605 | return; | 632 | return; |
606 | 633 | ||
607 | mutex_lock(&ftrace_start_lock); | 634 | mutex_lock(&ftrace_start_lock); |
608 | /* ftrace_start is true if ftrace is running */ | 635 | /* ftrace_start_up is true if ftrace is running */ |
609 | if (ftrace_start) | 636 | if (ftrace_start_up) |
610 | command |= FTRACE_DISABLE_CALLS; | 637 | command |= FTRACE_DISABLE_CALLS; |
611 | 638 | ||
612 | ftrace_run_update_code(command); | 639 | ftrace_run_update_code(command); |
@@ -1186,7 +1213,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1186 | 1213 | ||
1187 | mutex_lock(&ftrace_sysctl_lock); | 1214 | mutex_lock(&ftrace_sysctl_lock); |
1188 | mutex_lock(&ftrace_start_lock); | 1215 | mutex_lock(&ftrace_start_lock); |
1189 | if (iter->filtered && ftrace_start && ftrace_enabled) | 1216 | if (iter->filtered && ftrace_start_up && ftrace_enabled) |
1190 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1217 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1191 | mutex_unlock(&ftrace_start_lock); | 1218 | mutex_unlock(&ftrace_start_lock); |
1192 | mutex_unlock(&ftrace_sysctl_lock); | 1219 | mutex_unlock(&ftrace_sysctl_lock); |