diff options
Diffstat (limited to 'kernel/trace/trace_functions.c')
| -rw-r--r-- | kernel/trace/trace_functions.c | 116 |
1 files changed, 76 insertions, 40 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 85e517e84f50..3f8dc1ce8b9c 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -13,33 +13,83 @@ | |||
| 13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
| 14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
| 16 | #include <linux/slab.h> | ||
| 16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 17 | 18 | ||
| 18 | #include "trace.h" | 19 | #include "trace.h" |
| 19 | 20 | ||
| 20 | /* function tracing enabled */ | 21 | static void tracing_start_function_trace(struct trace_array *tr); |
| 21 | static int ftrace_function_enabled; | 22 | static void tracing_stop_function_trace(struct trace_array *tr); |
| 23 | static void | ||
| 24 | function_trace_call(unsigned long ip, unsigned long parent_ip, | ||
| 25 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
| 26 | static void | ||
| 27 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | ||
| 28 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
| 29 | static struct ftrace_ops trace_ops; | ||
| 30 | static struct ftrace_ops trace_stack_ops; | ||
| 31 | static struct tracer_flags func_flags; | ||
| 32 | |||
| 33 | /* Our option */ | ||
| 34 | enum { | ||
| 35 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 36 | }; | ||
| 37 | |||
| 38 | static int allocate_ftrace_ops(struct trace_array *tr) | ||
| 39 | { | ||
| 40 | struct ftrace_ops *ops; | ||
| 22 | 41 | ||
| 23 | static struct trace_array *func_trace; | 42 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); |
| 43 | if (!ops) | ||
| 44 | return -ENOMEM; | ||
| 24 | 45 | ||
| 25 | static void tracing_start_function_trace(void); | 46 | /* Currently only the non stack verision is supported */ |
| 26 | static void tracing_stop_function_trace(void); | 47 | ops->func = function_trace_call; |
| 48 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 49 | |||
| 50 | tr->ops = ops; | ||
| 51 | ops->private = tr; | ||
| 52 | return 0; | ||
| 53 | } | ||
| 27 | 54 | ||
| 28 | static int function_trace_init(struct trace_array *tr) | 55 | static int function_trace_init(struct trace_array *tr) |
| 29 | { | 56 | { |
| 30 | func_trace = tr; | 57 | struct ftrace_ops *ops; |
| 58 | int ret; | ||
| 59 | |||
| 60 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | ||
| 61 | /* There's only one global tr */ | ||
| 62 | if (!trace_ops.private) { | ||
| 63 | trace_ops.private = tr; | ||
| 64 | trace_stack_ops.private = tr; | ||
| 65 | } | ||
| 66 | |||
| 67 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 68 | ops = &trace_stack_ops; | ||
| 69 | else | ||
| 70 | ops = &trace_ops; | ||
| 71 | tr->ops = ops; | ||
| 72 | } else { | ||
| 73 | ret = allocate_ftrace_ops(tr); | ||
| 74 | if (ret) | ||
| 75 | return ret; | ||
| 76 | } | ||
| 77 | |||
| 31 | tr->trace_buffer.cpu = get_cpu(); | 78 | tr->trace_buffer.cpu = get_cpu(); |
| 32 | put_cpu(); | 79 | put_cpu(); |
| 33 | 80 | ||
| 34 | tracing_start_cmdline_record(); | 81 | tracing_start_cmdline_record(); |
| 35 | tracing_start_function_trace(); | 82 | tracing_start_function_trace(tr); |
| 36 | return 0; | 83 | return 0; |
| 37 | } | 84 | } |
| 38 | 85 | ||
| 39 | static void function_trace_reset(struct trace_array *tr) | 86 | static void function_trace_reset(struct trace_array *tr) |
| 40 | { | 87 | { |
| 41 | tracing_stop_function_trace(); | 88 | tracing_stop_function_trace(tr); |
| 42 | tracing_stop_cmdline_record(); | 89 | tracing_stop_cmdline_record(); |
| 90 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) | ||
| 91 | kfree(tr->ops); | ||
| 92 | tr->ops = NULL; | ||
| 43 | } | 93 | } |
| 44 | 94 | ||
| 45 | static void function_trace_start(struct trace_array *tr) | 95 | static void function_trace_start(struct trace_array *tr) |
| @@ -47,25 +97,18 @@ static void function_trace_start(struct trace_array *tr) | |||
| 47 | tracing_reset_online_cpus(&tr->trace_buffer); | 97 | tracing_reset_online_cpus(&tr->trace_buffer); |
| 48 | } | 98 | } |
| 49 | 99 | ||
| 50 | /* Our option */ | ||
| 51 | enum { | ||
| 52 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 53 | }; | ||
| 54 | |||
| 55 | static struct tracer_flags func_flags; | ||
| 56 | |||
| 57 | static void | 100 | static void |
| 58 | function_trace_call(unsigned long ip, unsigned long parent_ip, | 101 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
| 59 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 102 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 60 | { | 103 | { |
| 61 | struct trace_array *tr = func_trace; | 104 | struct trace_array *tr = op->private; |
| 62 | struct trace_array_cpu *data; | 105 | struct trace_array_cpu *data; |
| 63 | unsigned long flags; | 106 | unsigned long flags; |
| 64 | int bit; | 107 | int bit; |
| 65 | int cpu; | 108 | int cpu; |
| 66 | int pc; | 109 | int pc; |
| 67 | 110 | ||
| 68 | if (unlikely(!ftrace_function_enabled)) | 111 | if (unlikely(!tr->function_enabled)) |
| 69 | return; | 112 | return; |
| 70 | 113 | ||
| 71 | pc = preempt_count(); | 114 | pc = preempt_count(); |
| @@ -91,14 +134,14 @@ static void | |||
| 91 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | 134 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
| 92 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 135 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 93 | { | 136 | { |
| 94 | struct trace_array *tr = func_trace; | 137 | struct trace_array *tr = op->private; |
| 95 | struct trace_array_cpu *data; | 138 | struct trace_array_cpu *data; |
| 96 | unsigned long flags; | 139 | unsigned long flags; |
| 97 | long disabled; | 140 | long disabled; |
| 98 | int cpu; | 141 | int cpu; |
| 99 | int pc; | 142 | int pc; |
| 100 | 143 | ||
| 101 | if (unlikely(!ftrace_function_enabled)) | 144 | if (unlikely(!tr->function_enabled)) |
| 102 | return; | 145 | return; |
| 103 | 146 | ||
| 104 | /* | 147 | /* |
| @@ -128,7 +171,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
| 128 | local_irq_restore(flags); | 171 | local_irq_restore(flags); |
| 129 | } | 172 | } |
| 130 | 173 | ||
| 131 | |||
| 132 | static struct ftrace_ops trace_ops __read_mostly = | 174 | static struct ftrace_ops trace_ops __read_mostly = |
| 133 | { | 175 | { |
| 134 | .func = function_trace_call, | 176 | .func = function_trace_call, |
| @@ -153,26 +195,17 @@ static struct tracer_flags func_flags = { | |||
| 153 | .opts = func_opts | 195 | .opts = func_opts |
| 154 | }; | 196 | }; |
| 155 | 197 | ||
| 156 | static void tracing_start_function_trace(void) | 198 | static void tracing_start_function_trace(struct trace_array *tr) |
| 157 | { | 199 | { |
| 158 | ftrace_function_enabled = 0; | 200 | tr->function_enabled = 0; |
| 159 | 201 | register_ftrace_function(tr->ops); | |
| 160 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | 202 | tr->function_enabled = 1; |
| 161 | register_ftrace_function(&trace_stack_ops); | ||
| 162 | else | ||
| 163 | register_ftrace_function(&trace_ops); | ||
| 164 | |||
| 165 | ftrace_function_enabled = 1; | ||
| 166 | } | 203 | } |
| 167 | 204 | ||
| 168 | static void tracing_stop_function_trace(void) | 205 | static void tracing_stop_function_trace(struct trace_array *tr) |
| 169 | { | 206 | { |
| 170 | ftrace_function_enabled = 0; | 207 | tr->function_enabled = 0; |
| 171 | 208 | unregister_ftrace_function(tr->ops); | |
| 172 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 173 | unregister_ftrace_function(&trace_stack_ops); | ||
| 174 | else | ||
| 175 | unregister_ftrace_function(&trace_ops); | ||
| 176 | } | 209 | } |
| 177 | 210 | ||
| 178 | static int | 211 | static int |
| @@ -184,12 +217,14 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |||
| 184 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | 217 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) |
| 185 | break; | 218 | break; |
| 186 | 219 | ||
| 220 | unregister_ftrace_function(tr->ops); | ||
| 221 | |||
| 187 | if (set) { | 222 | if (set) { |
| 188 | unregister_ftrace_function(&trace_ops); | 223 | tr->ops = &trace_stack_ops; |
| 189 | register_ftrace_function(&trace_stack_ops); | 224 | register_ftrace_function(tr->ops); |
| 190 | } else { | 225 | } else { |
| 191 | unregister_ftrace_function(&trace_stack_ops); | 226 | tr->ops = &trace_ops; |
| 192 | register_ftrace_function(&trace_ops); | 227 | register_ftrace_function(tr->ops); |
| 193 | } | 228 | } |
| 194 | 229 | ||
| 195 | break; | 230 | break; |
| @@ -209,6 +244,7 @@ static struct tracer function_trace __tracer_data = | |||
| 209 | .wait_pipe = poll_wait_pipe, | 244 | .wait_pipe = poll_wait_pipe, |
| 210 | .flags = &func_flags, | 245 | .flags = &func_flags, |
| 211 | .set_flag = func_set_flag, | 246 | .set_flag = func_set_flag, |
| 247 | .allow_instances = true, | ||
| 212 | #ifdef CONFIG_FTRACE_SELFTEST | 248 | #ifdef CONFIG_FTRACE_SELFTEST |
| 213 | .selftest = trace_selftest_startup_function, | 249 | .selftest = trace_selftest_startup_function, |
| 214 | #endif | 250 | #endif |
