diff options
| -rw-r--r-- | include/linux/ftrace.h | 13 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 45 |
2 files changed, 17 insertions, 41 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 2d5964119885..3651fdc3bec9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -27,6 +27,19 @@ | |||
| 27 | #define ARCH_SUPPORTS_FTRACE_OPS 0 | 27 | #define ARCH_SUPPORTS_FTRACE_OPS 0 |
| 28 | #endif | 28 | #endif |
| 29 | 29 | ||
| 30 | /* | ||
| 31 | * If the arch's mcount caller does not support all of ftrace's | ||
| 32 | * features, then it must call an indirect function that | ||
| 33 | * does. Or at least does enough to prevent any unwelcomed side effects. | ||
| 34 | */ | ||
| 35 | #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ | ||
| 36 | !ARCH_SUPPORTS_FTRACE_OPS | ||
| 37 | # define FTRACE_FORCE_LIST_FUNC 1 | ||
| 38 | #else | ||
| 39 | # define FTRACE_FORCE_LIST_FUNC 0 | ||
| 40 | #endif | ||
| 41 | |||
| 42 | |||
| 30 | struct module; | 43 | struct module; |
| 31 | struct ftrace_hash; | 44 | struct ftrace_hash; |
| 32 | 45 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4f2ab9352a68..4cbca2e6eb70 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -97,8 +97,6 @@ static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; | |||
| 97 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | 97 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; |
| 98 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 98 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
| 99 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 99 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
| 100 | static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; | ||
| 101 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | ||
| 102 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 100 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
| 103 | static struct ftrace_ops global_ops; | 101 | static struct ftrace_ops global_ops; |
| 104 | static struct ftrace_ops control_ops; | 102 | static struct ftrace_ops control_ops; |
| @@ -162,26 +160,9 @@ static void set_ftrace_pid_function(ftrace_func_t func) | |||
| 162 | void clear_ftrace_function(void) | 160 | void clear_ftrace_function(void) |
| 163 | { | 161 | { |
| 164 | ftrace_trace_function = ftrace_stub; | 162 | ftrace_trace_function = ftrace_stub; |
| 165 | __ftrace_trace_function = ftrace_stub; | ||
| 166 | __ftrace_trace_function_delay = ftrace_stub; | ||
| 167 | ftrace_pid_function = ftrace_stub; | 163 | ftrace_pid_function = ftrace_stub; |
| 168 | } | 164 | } |
| 169 | 165 | ||
| 170 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 171 | /* | ||
| 172 | * For those archs that do not test ftrace_trace_stop in their | ||
| 173 | * mcount call site, we need to do it from C. | ||
| 174 | */ | ||
| 175 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip, | ||
| 176 | struct ftrace_ops *op) | ||
| 177 | { | ||
| 178 | if (function_trace_stop) | ||
| 179 | return; | ||
| 180 | |||
| 181 | __ftrace_trace_function(ip, parent_ip, op); | ||
| 182 | } | ||
| 183 | #endif | ||
| 184 | |||
| 185 | static void control_ops_disable_all(struct ftrace_ops *ops) | 166 | static void control_ops_disable_all(struct ftrace_ops *ops) |
| 186 | { | 167 | { |
| 187 | int cpu; | 168 | int cpu; |
| @@ -246,7 +227,7 @@ static void update_ftrace_function(void) | |||
| 246 | if (ftrace_ops_list == &ftrace_list_end || | 227 | if (ftrace_ops_list == &ftrace_list_end || |
| 247 | (ftrace_ops_list->next == &ftrace_list_end && | 228 | (ftrace_ops_list->next == &ftrace_list_end && |
| 248 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && | 229 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && |
| 249 | ARCH_SUPPORTS_FTRACE_OPS)) { | 230 | !FTRACE_FORCE_LIST_FUNC)) { |
| 250 | /* Set the ftrace_ops that the arch callback uses */ | 231 | /* Set the ftrace_ops that the arch callback uses */ |
| 251 | if (ftrace_ops_list == &global_ops) | 232 | if (ftrace_ops_list == &global_ops) |
| 252 | function_trace_op = ftrace_global_list; | 233 | function_trace_op = ftrace_global_list; |
| @@ -259,18 +240,7 @@ static void update_ftrace_function(void) | |||
| 259 | func = ftrace_ops_list_func; | 240 | func = ftrace_ops_list_func; |
| 260 | } | 241 | } |
| 261 | 242 | ||
| 262 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 263 | ftrace_trace_function = func; | 243 | ftrace_trace_function = func; |
| 264 | #else | ||
| 265 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 266 | /* do not update till all functions have been modified */ | ||
| 267 | __ftrace_trace_function_delay = func; | ||
| 268 | #else | ||
| 269 | __ftrace_trace_function = func; | ||
| 270 | #endif | ||
| 271 | ftrace_trace_function = | ||
| 272 | (func == ftrace_stub) ? func : ftrace_test_stop_func; | ||
| 273 | #endif | ||
| 274 | } | 244 | } |
| 275 | 245 | ||
| 276 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 246 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
| @@ -1902,16 +1872,6 @@ static void ftrace_run_update_code(int command) | |||
| 1902 | */ | 1872 | */ |
| 1903 | arch_ftrace_update_code(command); | 1873 | arch_ftrace_update_code(command); |
| 1904 | 1874 | ||
| 1905 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 1906 | /* | ||
| 1907 | * For archs that call ftrace_test_stop_func(), we must | ||
| 1908 | * wait till after we update all the function callers | ||
| 1909 | * before we update the callback. This keeps different | ||
| 1910 | * ops that record different functions from corrupting | ||
| 1911 | * each other. | ||
| 1912 | */ | ||
| 1913 | __ftrace_trace_function = __ftrace_trace_function_delay; | ||
| 1914 | #endif | ||
| 1915 | function_trace_stop--; | 1875 | function_trace_stop--; |
| 1916 | 1876 | ||
| 1917 | ret = ftrace_arch_code_modify_post_process(); | 1877 | ret = ftrace_arch_code_modify_post_process(); |
| @@ -3996,6 +3956,9 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
| 3996 | { | 3956 | { |
| 3997 | struct ftrace_ops *op; | 3957 | struct ftrace_ops *op; |
| 3998 | 3958 | ||
| 3959 | if (function_trace_stop) | ||
| 3960 | return; | ||
| 3961 | |||
| 3999 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) | 3962 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) |
| 4000 | return; | 3963 | return; |
| 4001 | 3964 | ||
