diff options
Diffstat (limited to 'kernel/trace/trace_functions.c')
| -rw-r--r-- | kernel/trace/trace_functions.c | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9236d7e25a16..3a5fa08cedb0 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -16,6 +16,8 @@ | |||
| 16 | 16 | ||
| 17 | #include "trace.h" | 17 | #include "trace.h" |
| 18 | 18 | ||
| 19 | static struct trace_array *func_trace; | ||
| 20 | |||
| 19 | static void start_function_trace(struct trace_array *tr) | 21 | static void start_function_trace(struct trace_array *tr) |
| 20 | { | 22 | { |
| 21 | tr->cpu = get_cpu(); | 23 | tr->cpu = get_cpu(); |
| @@ -34,6 +36,7 @@ static void stop_function_trace(struct trace_array *tr) | |||
| 34 | 36 | ||
| 35 | static int function_trace_init(struct trace_array *tr) | 37 | static int function_trace_init(struct trace_array *tr) |
| 36 | { | 38 | { |
| 39 | func_trace = tr; | ||
| 37 | start_function_trace(tr); | 40 | start_function_trace(tr); |
| 38 | return 0; | 41 | return 0; |
| 39 | } | 42 | } |
| @@ -48,12 +51,93 @@ static void function_trace_start(struct trace_array *tr) | |||
| 48 | tracing_reset_online_cpus(tr); | 51 | tracing_reset_online_cpus(tr); |
| 49 | } | 52 | } |
| 50 | 53 | ||
| 54 | static void | ||
| 55 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | ||
| 56 | { | ||
| 57 | struct trace_array *tr = func_trace; | ||
| 58 | struct trace_array_cpu *data; | ||
| 59 | unsigned long flags; | ||
| 60 | long disabled; | ||
| 61 | int cpu; | ||
| 62 | int pc; | ||
| 63 | |||
| 64 | if (unlikely(!ftrace_function_enabled)) | ||
| 65 | return; | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Need to use raw, since this must be called before the | ||
| 69 | * recursive protection is performed. | ||
| 70 | */ | ||
| 71 | local_irq_save(flags); | ||
| 72 | cpu = raw_smp_processor_id(); | ||
| 73 | data = tr->data[cpu]; | ||
| 74 | disabled = atomic_inc_return(&data->disabled); | ||
| 75 | |||
| 76 | if (likely(disabled == 1)) { | ||
| 77 | pc = preempt_count(); | ||
| 78 | /* | ||
| 79 | * skip over 5 funcs: | ||
| 80 | * __ftrace_trace_stack, | ||
| 81 | * __trace_stack, | ||
| 82 | * function_stack_trace_call | ||
| 83 | * ftrace_list_func | ||
| 84 | * ftrace_call | ||
| 85 | */ | ||
| 86 | __trace_stack(tr, data, flags, 5, pc); | ||
| 87 | } | ||
| 88 | |||
| 89 | atomic_dec(&data->disabled); | ||
| 90 | local_irq_restore(flags); | ||
| 91 | } | ||
| 92 | |||
| 93 | static struct ftrace_ops trace_stack_ops __read_mostly = | ||
| 94 | { | ||
| 95 | .func = function_stack_trace_call, | ||
| 96 | }; | ||
| 97 | |||
| 98 | /* Our two options */ | ||
| 99 | enum { | ||
| 100 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 101 | }; | ||
| 102 | |||
| 103 | static struct tracer_opt func_opts[] = { | ||
| 104 | #ifdef CONFIG_STACKTRACE | ||
| 105 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | ||
| 106 | #endif | ||
| 107 | { } /* Always set a last empty entry */ | ||
| 108 | }; | ||
| 109 | |||
| 110 | static struct tracer_flags func_flags = { | ||
| 111 | .val = 0, /* By default: all flags disabled */ | ||
| 112 | .opts = func_opts | ||
| 113 | }; | ||
| 114 | |||
| 115 | static int func_set_flag(u32 old_flags, u32 bit, int set) | ||
| 116 | { | ||
| 117 | if (bit == TRACE_FUNC_OPT_STACK) { | ||
| 118 | /* do nothing if already set */ | ||
| 119 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | ||
| 120 | return 0; | ||
| 121 | |||
| 122 | if (set) | ||
| 123 | register_ftrace_function(&trace_stack_ops); | ||
| 124 | else | ||
| 125 | unregister_ftrace_function(&trace_stack_ops); | ||
| 126 | |||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | |||
| 130 | return -EINVAL; | ||
| 131 | } | ||
| 132 | |||
| 51 | static struct tracer function_trace __read_mostly = | 133 | static struct tracer function_trace __read_mostly = |
| 52 | { | 134 | { |
| 53 | .name = "function", | 135 | .name = "function", |
| 54 | .init = function_trace_init, | 136 | .init = function_trace_init, |
| 55 | .reset = function_trace_reset, | 137 | .reset = function_trace_reset, |
| 56 | .start = function_trace_start, | 138 | .start = function_trace_start, |
| 139 | .flags = &func_flags, | ||
| 140 | .set_flag = func_set_flag, | ||
| 57 | #ifdef CONFIG_FTRACE_SELFTEST | 141 | #ifdef CONFIG_FTRACE_SELFTEST |
| 58 | .selftest = trace_selftest_startup_function, | 142 | .selftest = trace_selftest_startup_function, |
| 59 | #endif | 143 | #endif |
