diff options
Diffstat (limited to 'kernel/trace/trace_functions.c')
| -rw-r--r-- | kernel/trace/trace_functions.c | 370 |
1 files changed, 354 insertions, 16 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9236d7e25a16..75ef000613c3 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | * Copyright (C) 2004-2006 Ingo Molnar | 9 | * Copyright (C) 2004-2006 Ingo Molnar |
| 10 | * Copyright (C) 2004 William Lee Irwin III | 10 | * Copyright (C) 2004 William Lee Irwin III |
| 11 | */ | 11 | */ |
| 12 | #include <linux/ring_buffer.h> | ||
| 12 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
| 13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| 14 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
| @@ -16,52 +17,389 @@ | |||
| 16 | 17 | ||
| 17 | #include "trace.h" | 18 | #include "trace.h" |
| 18 | 19 | ||
| 19 | static void start_function_trace(struct trace_array *tr) | 20 | /* function tracing enabled */ |
| 21 | static int ftrace_function_enabled; | ||
| 22 | |||
| 23 | static struct trace_array *func_trace; | ||
| 24 | |||
| 25 | static void tracing_start_function_trace(void); | ||
| 26 | static void tracing_stop_function_trace(void); | ||
| 27 | |||
| 28 | static int function_trace_init(struct trace_array *tr) | ||
| 20 | { | 29 | { |
| 30 | func_trace = tr; | ||
| 21 | tr->cpu = get_cpu(); | 31 | tr->cpu = get_cpu(); |
| 22 | tracing_reset_online_cpus(tr); | ||
| 23 | put_cpu(); | 32 | put_cpu(); |
| 24 | 33 | ||
| 25 | tracing_start_cmdline_record(); | 34 | tracing_start_cmdline_record(); |
| 26 | tracing_start_function_trace(); | 35 | tracing_start_function_trace(); |
| 36 | return 0; | ||
| 27 | } | 37 | } |
| 28 | 38 | ||
| 29 | static void stop_function_trace(struct trace_array *tr) | 39 | static void function_trace_reset(struct trace_array *tr) |
| 30 | { | 40 | { |
| 31 | tracing_stop_function_trace(); | 41 | tracing_stop_function_trace(); |
| 32 | tracing_stop_cmdline_record(); | 42 | tracing_stop_cmdline_record(); |
| 33 | } | 43 | } |
| 34 | 44 | ||
| 35 | static int function_trace_init(struct trace_array *tr) | 45 | static void function_trace_start(struct trace_array *tr) |
| 36 | { | 46 | { |
| 37 | start_function_trace(tr); | 47 | tracing_reset_online_cpus(tr); |
| 38 | return 0; | ||
| 39 | } | 48 | } |
| 40 | 49 | ||
| 41 | static void function_trace_reset(struct trace_array *tr) | 50 | static void |
| 51 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | ||
| 52 | { | ||
| 53 | struct trace_array *tr = func_trace; | ||
| 54 | struct trace_array_cpu *data; | ||
| 55 | unsigned long flags; | ||
| 56 | long disabled; | ||
| 57 | int cpu, resched; | ||
| 58 | int pc; | ||
| 59 | |||
| 60 | if (unlikely(!ftrace_function_enabled)) | ||
| 61 | return; | ||
| 62 | |||
| 63 | pc = preempt_count(); | ||
| 64 | resched = ftrace_preempt_disable(); | ||
| 65 | local_save_flags(flags); | ||
| 66 | cpu = raw_smp_processor_id(); | ||
| 67 | data = tr->data[cpu]; | ||
| 68 | disabled = atomic_inc_return(&data->disabled); | ||
| 69 | |||
| 70 | if (likely(disabled == 1)) | ||
| 71 | trace_function(tr, ip, parent_ip, flags, pc); | ||
| 72 | |||
| 73 | atomic_dec(&data->disabled); | ||
| 74 | ftrace_preempt_enable(resched); | ||
| 75 | } | ||
| 76 | |||
| 77 | static void | ||
| 78 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
| 42 | { | 79 | { |
| 43 | stop_function_trace(tr); | 80 | struct trace_array *tr = func_trace; |
| 81 | struct trace_array_cpu *data; | ||
| 82 | unsigned long flags; | ||
| 83 | long disabled; | ||
| 84 | int cpu; | ||
| 85 | int pc; | ||
| 86 | |||
| 87 | if (unlikely(!ftrace_function_enabled)) | ||
| 88 | return; | ||
| 89 | |||
| 90 | /* | ||
| 91 | * Need to use raw, since this must be called before the | ||
| 92 | * recursive protection is performed. | ||
| 93 | */ | ||
| 94 | local_irq_save(flags); | ||
| 95 | cpu = raw_smp_processor_id(); | ||
| 96 | data = tr->data[cpu]; | ||
| 97 | disabled = atomic_inc_return(&data->disabled); | ||
| 98 | |||
| 99 | if (likely(disabled == 1)) { | ||
| 100 | pc = preempt_count(); | ||
| 101 | trace_function(tr, ip, parent_ip, flags, pc); | ||
| 102 | } | ||
| 103 | |||
| 104 | atomic_dec(&data->disabled); | ||
| 105 | local_irq_restore(flags); | ||
| 44 | } | 106 | } |
| 45 | 107 | ||
| 46 | static void function_trace_start(struct trace_array *tr) | 108 | static void |
| 109 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | ||
| 47 | { | 110 | { |
| 48 | tracing_reset_online_cpus(tr); | 111 | struct trace_array *tr = func_trace; |
| 112 | struct trace_array_cpu *data; | ||
| 113 | unsigned long flags; | ||
| 114 | long disabled; | ||
| 115 | int cpu; | ||
| 116 | int pc; | ||
| 117 | |||
| 118 | if (unlikely(!ftrace_function_enabled)) | ||
| 119 | return; | ||
| 120 | |||
| 121 | /* | ||
| 122 | * Need to use raw, since this must be called before the | ||
| 123 | * recursive protection is performed. | ||
| 124 | */ | ||
| 125 | local_irq_save(flags); | ||
| 126 | cpu = raw_smp_processor_id(); | ||
| 127 | data = tr->data[cpu]; | ||
| 128 | disabled = atomic_inc_return(&data->disabled); | ||
| 129 | |||
| 130 | if (likely(disabled == 1)) { | ||
| 131 | pc = preempt_count(); | ||
| 132 | trace_function(tr, ip, parent_ip, flags, pc); | ||
| 133 | /* | ||
| 134 | * skip over 5 funcs: | ||
| 135 | * __ftrace_trace_stack, | ||
| 136 | * __trace_stack, | ||
| 137 | * function_stack_trace_call | ||
| 138 | * ftrace_list_func | ||
| 139 | * ftrace_call | ||
| 140 | */ | ||
| 141 | __trace_stack(tr, flags, 5, pc); | ||
| 142 | } | ||
| 143 | |||
| 144 | atomic_dec(&data->disabled); | ||
| 145 | local_irq_restore(flags); | ||
| 146 | } | ||
| 147 | |||
| 148 | |||
| 149 | static struct ftrace_ops trace_ops __read_mostly = | ||
| 150 | { | ||
| 151 | .func = function_trace_call, | ||
| 152 | }; | ||
| 153 | |||
| 154 | static struct ftrace_ops trace_stack_ops __read_mostly = | ||
| 155 | { | ||
| 156 | .func = function_stack_trace_call, | ||
| 157 | }; | ||
| 158 | |||
| 159 | /* Our two options */ | ||
| 160 | enum { | ||
| 161 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 162 | }; | ||
| 163 | |||
| 164 | static struct tracer_opt func_opts[] = { | ||
| 165 | #ifdef CONFIG_STACKTRACE | ||
| 166 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | ||
| 167 | #endif | ||
| 168 | { } /* Always set a last empty entry */ | ||
| 169 | }; | ||
| 170 | |||
| 171 | static struct tracer_flags func_flags = { | ||
| 172 | .val = 0, /* By default: all flags disabled */ | ||
| 173 | .opts = func_opts | ||
| 174 | }; | ||
| 175 | |||
| 176 | static void tracing_start_function_trace(void) | ||
| 177 | { | ||
| 178 | ftrace_function_enabled = 0; | ||
| 179 | |||
| 180 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
| 181 | trace_ops.func = function_trace_call_preempt_only; | ||
| 182 | else | ||
| 183 | trace_ops.func = function_trace_call; | ||
| 184 | |||
| 185 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 186 | register_ftrace_function(&trace_stack_ops); | ||
| 187 | else | ||
| 188 | register_ftrace_function(&trace_ops); | ||
| 189 | |||
| 190 | ftrace_function_enabled = 1; | ||
| 191 | } | ||
| 192 | |||
| 193 | static void tracing_stop_function_trace(void) | ||
| 194 | { | ||
| 195 | ftrace_function_enabled = 0; | ||
| 196 | |||
| 197 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 198 | unregister_ftrace_function(&trace_stack_ops); | ||
| 199 | else | ||
| 200 | unregister_ftrace_function(&trace_ops); | ||
| 201 | } | ||
| 202 | |||
| 203 | static int func_set_flag(u32 old_flags, u32 bit, int set) | ||
| 204 | { | ||
| 205 | if (bit == TRACE_FUNC_OPT_STACK) { | ||
| 206 | /* do nothing if already set */ | ||
| 207 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | ||
| 208 | return 0; | ||
| 209 | |||
| 210 | if (set) { | ||
| 211 | unregister_ftrace_function(&trace_ops); | ||
| 212 | register_ftrace_function(&trace_stack_ops); | ||
| 213 | } else { | ||
| 214 | unregister_ftrace_function(&trace_stack_ops); | ||
| 215 | register_ftrace_function(&trace_ops); | ||
| 216 | } | ||
| 217 | |||
| 218 | return 0; | ||
| 219 | } | ||
| 220 | |||
| 221 | return -EINVAL; | ||
| 49 | } | 222 | } |
| 50 | 223 | ||
| 51 | static struct tracer function_trace __read_mostly = | 224 | static struct tracer function_trace __read_mostly = |
| 52 | { | 225 | { |
| 53 | .name = "function", | 226 | .name = "function", |
| 54 | .init = function_trace_init, | 227 | .init = function_trace_init, |
| 55 | .reset = function_trace_reset, | 228 | .reset = function_trace_reset, |
| 56 | .start = function_trace_start, | 229 | .start = function_trace_start, |
| 230 | .wait_pipe = poll_wait_pipe, | ||
| 231 | .flags = &func_flags, | ||
| 232 | .set_flag = func_set_flag, | ||
| 57 | #ifdef CONFIG_FTRACE_SELFTEST | 233 | #ifdef CONFIG_FTRACE_SELFTEST |
| 58 | .selftest = trace_selftest_startup_function, | 234 | .selftest = trace_selftest_startup_function, |
| 59 | #endif | 235 | #endif |
| 60 | }; | 236 | }; |
| 61 | 237 | ||
| 238 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 239 | static void | ||
| 240 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) | ||
| 241 | { | ||
| 242 | long *count = (long *)data; | ||
| 243 | |||
| 244 | if (tracing_is_on()) | ||
| 245 | return; | ||
| 246 | |||
| 247 | if (!*count) | ||
| 248 | return; | ||
| 249 | |||
| 250 | if (*count != -1) | ||
| 251 | (*count)--; | ||
| 252 | |||
| 253 | tracing_on(); | ||
| 254 | } | ||
| 255 | |||
| 256 | static void | ||
| 257 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) | ||
| 258 | { | ||
| 259 | long *count = (long *)data; | ||
| 260 | |||
| 261 | if (!tracing_is_on()) | ||
| 262 | return; | ||
| 263 | |||
| 264 | if (!*count) | ||
| 265 | return; | ||
| 266 | |||
| 267 | if (*count != -1) | ||
| 268 | (*count)--; | ||
| 269 | |||
| 270 | tracing_off(); | ||
| 271 | } | ||
| 272 | |||
| 273 | static int | ||
| 274 | ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, | ||
| 275 | struct ftrace_probe_ops *ops, void *data); | ||
| 276 | |||
| 277 | static struct ftrace_probe_ops traceon_probe_ops = { | ||
| 278 | .func = ftrace_traceon, | ||
| 279 | .print = ftrace_trace_onoff_print, | ||
| 280 | }; | ||
| 281 | |||
| 282 | static struct ftrace_probe_ops traceoff_probe_ops = { | ||
| 283 | .func = ftrace_traceoff, | ||
| 284 | .print = ftrace_trace_onoff_print, | ||
| 285 | }; | ||
| 286 | |||
| 287 | static int | ||
| 288 | ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, | ||
| 289 | struct ftrace_probe_ops *ops, void *data) | ||
| 290 | { | ||
| 291 | char str[KSYM_SYMBOL_LEN]; | ||
| 292 | long count = (long)data; | ||
| 293 | |||
| 294 | kallsyms_lookup(ip, NULL, NULL, NULL, str); | ||
| 295 | seq_printf(m, "%s:", str); | ||
| 296 | |||
| 297 | if (ops == &traceon_probe_ops) | ||
| 298 | seq_printf(m, "traceon"); | ||
| 299 | else | ||
| 300 | seq_printf(m, "traceoff"); | ||
| 301 | |||
| 302 | if (count == -1) | ||
| 303 | seq_printf(m, ":unlimited\n"); | ||
| 304 | else | ||
| 305 | seq_printf(m, ":count=%ld\n", count); | ||
| 306 | |||
| 307 | return 0; | ||
| 308 | } | ||
| 309 | |||
| 310 | static int | ||
| 311 | ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) | ||
| 312 | { | ||
| 313 | struct ftrace_probe_ops *ops; | ||
| 314 | |||
| 315 | /* we register both traceon and traceoff to this callback */ | ||
| 316 | if (strcmp(cmd, "traceon") == 0) | ||
| 317 | ops = &traceon_probe_ops; | ||
| 318 | else | ||
| 319 | ops = &traceoff_probe_ops; | ||
| 320 | |||
| 321 | unregister_ftrace_function_probe_func(glob, ops); | ||
| 322 | |||
| 323 | return 0; | ||
| 324 | } | ||
| 325 | |||
| 326 | static int | ||
| 327 | ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) | ||
| 328 | { | ||
| 329 | struct ftrace_probe_ops *ops; | ||
| 330 | void *count = (void *)-1; | ||
| 331 | char *number; | ||
| 332 | int ret; | ||
| 333 | |||
| 334 | /* hash funcs only work with set_ftrace_filter */ | ||
| 335 | if (!enable) | ||
| 336 | return -EINVAL; | ||
| 337 | |||
| 338 | if (glob[0] == '!') | ||
| 339 | return ftrace_trace_onoff_unreg(glob+1, cmd, param); | ||
| 340 | |||
| 341 | /* we register both traceon and traceoff to this callback */ | ||
| 342 | if (strcmp(cmd, "traceon") == 0) | ||
| 343 | ops = &traceon_probe_ops; | ||
| 344 | else | ||
| 345 | ops = &traceoff_probe_ops; | ||
| 346 | |||
| 347 | if (!param) | ||
| 348 | goto out_reg; | ||
| 349 | |||
| 350 | number = strsep(¶m, ":"); | ||
| 351 | |||
| 352 | if (!strlen(number)) | ||
| 353 | goto out_reg; | ||
| 354 | |||
| 355 | /* | ||
| 356 | * We use the callback data field (which is a pointer) | ||
| 357 | * as our counter. | ||
| 358 | */ | ||
| 359 | ret = strict_strtoul(number, 0, (unsigned long *)&count); | ||
| 360 | if (ret) | ||
| 361 | return ret; | ||
| 362 | |||
| 363 | out_reg: | ||
| 364 | ret = register_ftrace_function_probe(glob, ops, count); | ||
| 365 | |||
| 366 | return ret < 0 ? ret : 0; | ||
| 367 | } | ||
| 368 | |||
| 369 | static struct ftrace_func_command ftrace_traceon_cmd = { | ||
| 370 | .name = "traceon", | ||
| 371 | .func = ftrace_trace_onoff_callback, | ||
| 372 | }; | ||
| 373 | |||
| 374 | static struct ftrace_func_command ftrace_traceoff_cmd = { | ||
| 375 | .name = "traceoff", | ||
| 376 | .func = ftrace_trace_onoff_callback, | ||
| 377 | }; | ||
| 378 | |||
| 379 | static int __init init_func_cmd_traceon(void) | ||
| 380 | { | ||
| 381 | int ret; | ||
| 382 | |||
| 383 | ret = register_ftrace_command(&ftrace_traceoff_cmd); | ||
| 384 | if (ret) | ||
| 385 | return ret; | ||
| 386 | |||
| 387 | ret = register_ftrace_command(&ftrace_traceon_cmd); | ||
| 388 | if (ret) | ||
| 389 | unregister_ftrace_command(&ftrace_traceoff_cmd); | ||
| 390 | return ret; | ||
| 391 | } | ||
| 392 | #else | ||
| 393 | static inline int init_func_cmd_traceon(void) | ||
| 394 | { | ||
| 395 | return 0; | ||
| 396 | } | ||
| 397 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
| 398 | |||
| 62 | static __init int init_function_trace(void) | 399 | static __init int init_function_trace(void) |
| 63 | { | 400 | { |
| 401 | init_func_cmd_traceon(); | ||
| 64 | return register_tracer(&function_trace); | 402 | return register_tracer(&function_trace); |
| 65 | } | 403 | } |
| 66 | |||
| 67 | device_initcall(init_function_trace); | 404 | device_initcall(init_function_trace); |
| 405 | |||
