diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 166 |
1 files changed, 127 insertions, 39 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 420ec3487579..b3749a2c3132 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -52,7 +52,7 @@ static struct tracer_flags tracer_flags = { | |||
| 52 | .opts = trace_opts | 52 | .opts = trace_opts |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | /* pid on the last trace processed */ | 55 | static struct trace_array *graph_array; |
| 56 | 56 | ||
| 57 | 57 | ||
| 58 | /* Add a function return address to the trace stack on thread info.*/ | 58 | /* Add a function return address to the trace stack on thread info.*/ |
| @@ -166,10 +166,123 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |||
| 166 | return ret; | 166 | return ret; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | static int __trace_graph_entry(struct trace_array *tr, | ||
| 170 | struct ftrace_graph_ent *trace, | ||
| 171 | unsigned long flags, | ||
| 172 | int pc) | ||
| 173 | { | ||
| 174 | struct ftrace_event_call *call = &event_funcgraph_entry; | ||
| 175 | struct ring_buffer_event *event; | ||
| 176 | struct ring_buffer *buffer = tr->buffer; | ||
| 177 | struct ftrace_graph_ent_entry *entry; | ||
| 178 | |||
| 179 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
| 180 | return 0; | ||
| 181 | |||
| 182 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | ||
| 183 | sizeof(*entry), flags, pc); | ||
| 184 | if (!event) | ||
| 185 | return 0; | ||
| 186 | entry = ring_buffer_event_data(event); | ||
| 187 | entry->graph_ent = *trace; | ||
| 188 | if (!filter_current_check_discard(buffer, call, entry, event)) | ||
| 189 | ring_buffer_unlock_commit(buffer, event); | ||
| 190 | |||
| 191 | return 1; | ||
| 192 | } | ||
| 193 | |||
| 194 | int trace_graph_entry(struct ftrace_graph_ent *trace) | ||
| 195 | { | ||
| 196 | struct trace_array *tr = graph_array; | ||
| 197 | struct trace_array_cpu *data; | ||
| 198 | unsigned long flags; | ||
| 199 | long disabled; | ||
| 200 | int ret; | ||
| 201 | int cpu; | ||
| 202 | int pc; | ||
| 203 | |||
| 204 | if (unlikely(!tr)) | ||
| 205 | return 0; | ||
| 206 | |||
| 207 | if (!ftrace_trace_task(current)) | ||
| 208 | return 0; | ||
| 209 | |||
| 210 | if (!ftrace_graph_addr(trace->func)) | ||
| 211 | return 0; | ||
| 212 | |||
| 213 | local_irq_save(flags); | ||
| 214 | cpu = raw_smp_processor_id(); | ||
| 215 | data = tr->data[cpu]; | ||
| 216 | disabled = atomic_inc_return(&data->disabled); | ||
| 217 | if (likely(disabled == 1)) { | ||
| 218 | pc = preempt_count(); | ||
| 219 | ret = __trace_graph_entry(tr, trace, flags, pc); | ||
| 220 | } else { | ||
| 221 | ret = 0; | ||
| 222 | } | ||
| 223 | /* Only do the atomic if it is not already set */ | ||
| 224 | if (!test_tsk_trace_graph(current)) | ||
| 225 | set_tsk_trace_graph(current); | ||
| 226 | |||
| 227 | atomic_dec(&data->disabled); | ||
| 228 | local_irq_restore(flags); | ||
| 229 | |||
| 230 | return ret; | ||
| 231 | } | ||
| 232 | |||
| 233 | static void __trace_graph_return(struct trace_array *tr, | ||
| 234 | struct ftrace_graph_ret *trace, | ||
| 235 | unsigned long flags, | ||
| 236 | int pc) | ||
| 237 | { | ||
| 238 | struct ftrace_event_call *call = &event_funcgraph_exit; | ||
| 239 | struct ring_buffer_event *event; | ||
| 240 | struct ring_buffer *buffer = tr->buffer; | ||
| 241 | struct ftrace_graph_ret_entry *entry; | ||
| 242 | |||
| 243 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
| 244 | return; | ||
| 245 | |||
| 246 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | ||
| 247 | sizeof(*entry), flags, pc); | ||
| 248 | if (!event) | ||
| 249 | return; | ||
| 250 | entry = ring_buffer_event_data(event); | ||
| 251 | entry->ret = *trace; | ||
| 252 | if (!filter_current_check_discard(buffer, call, entry, event)) | ||
| 253 | ring_buffer_unlock_commit(buffer, event); | ||
| 254 | } | ||
| 255 | |||
| 256 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
| 257 | { | ||
| 258 | struct trace_array *tr = graph_array; | ||
| 259 | struct trace_array_cpu *data; | ||
| 260 | unsigned long flags; | ||
| 261 | long disabled; | ||
| 262 | int cpu; | ||
| 263 | int pc; | ||
| 264 | |||
| 265 | local_irq_save(flags); | ||
| 266 | cpu = raw_smp_processor_id(); | ||
| 267 | data = tr->data[cpu]; | ||
| 268 | disabled = atomic_inc_return(&data->disabled); | ||
| 269 | if (likely(disabled == 1)) { | ||
| 270 | pc = preempt_count(); | ||
| 271 | __trace_graph_return(tr, trace, flags, pc); | ||
| 272 | } | ||
| 273 | if (!trace->depth) | ||
| 274 | clear_tsk_trace_graph(current); | ||
| 275 | atomic_dec(&data->disabled); | ||
| 276 | local_irq_restore(flags); | ||
| 277 | } | ||
| 278 | |||
| 169 | static int graph_trace_init(struct trace_array *tr) | 279 | static int graph_trace_init(struct trace_array *tr) |
| 170 | { | 280 | { |
| 171 | int ret = register_ftrace_graph(&trace_graph_return, | 281 | int ret; |
| 172 | &trace_graph_entry); | 282 | |
| 283 | graph_array = tr; | ||
| 284 | ret = register_ftrace_graph(&trace_graph_return, | ||
| 285 | &trace_graph_entry); | ||
| 173 | if (ret) | 286 | if (ret) |
| 174 | return ret; | 287 | return ret; |
| 175 | tracing_start_cmdline_record(); | 288 | tracing_start_cmdline_record(); |
| @@ -177,49 +290,30 @@ static int graph_trace_init(struct trace_array *tr) | |||
| 177 | return 0; | 290 | return 0; |
| 178 | } | 291 | } |
| 179 | 292 | ||
| 293 | void set_graph_array(struct trace_array *tr) | ||
| 294 | { | ||
| 295 | graph_array = tr; | ||
| 296 | } | ||
| 297 | |||
| 180 | static void graph_trace_reset(struct trace_array *tr) | 298 | static void graph_trace_reset(struct trace_array *tr) |
| 181 | { | 299 | { |
| 182 | tracing_stop_cmdline_record(); | 300 | tracing_stop_cmdline_record(); |
| 183 | unregister_ftrace_graph(); | 301 | unregister_ftrace_graph(); |
| 184 | } | 302 | } |
| 185 | 303 | ||
| 186 | static inline int log10_cpu(int nb) | 304 | static int max_bytes_for_cpu; |
| 187 | { | ||
| 188 | if (nb / 100) | ||
| 189 | return 3; | ||
| 190 | if (nb / 10) | ||
| 191 | return 2; | ||
| 192 | return 1; | ||
| 193 | } | ||
| 194 | 305 | ||
| 195 | static enum print_line_t | 306 | static enum print_line_t |
| 196 | print_graph_cpu(struct trace_seq *s, int cpu) | 307 | print_graph_cpu(struct trace_seq *s, int cpu) |
| 197 | { | 308 | { |
| 198 | int i; | ||
| 199 | int ret; | 309 | int ret; |
| 200 | int log10_this = log10_cpu(cpu); | ||
| 201 | int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); | ||
| 202 | |||
| 203 | 310 | ||
| 204 | /* | 311 | /* |
| 205 | * Start with a space character - to make it stand out | 312 | * Start with a space character - to make it stand out |
| 206 | * to the right a bit when trace output is pasted into | 313 | * to the right a bit when trace output is pasted into |
| 207 | * email: | 314 | * email: |
| 208 | */ | 315 | */ |
| 209 | ret = trace_seq_printf(s, " "); | 316 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
| 210 | |||
| 211 | /* | ||
| 212 | * Tricky - we space the CPU field according to the max | ||
| 213 | * number of online CPUs. On a 2-cpu system it would take | ||
| 214 | * a maximum of 1 digit - on a 128 cpu system it would | ||
| 215 | * take up to 3 digits: | ||
| 216 | */ | ||
| 217 | for (i = 0; i < log10_all - log10_this; i++) { | ||
| 218 | ret = trace_seq_printf(s, " "); | ||
| 219 | if (!ret) | ||
| 220 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 221 | } | ||
| 222 | ret = trace_seq_printf(s, "%d) ", cpu); | ||
| 223 | if (!ret) | 317 | if (!ret) |
| 224 | return TRACE_TYPE_PARTIAL_LINE; | 318 | return TRACE_TYPE_PARTIAL_LINE; |
| 225 | 319 | ||
| @@ -565,11 +659,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 565 | return TRACE_TYPE_PARTIAL_LINE; | 659 | return TRACE_TYPE_PARTIAL_LINE; |
| 566 | } | 660 | } |
| 567 | 661 | ||
| 568 | ret = seq_print_ip_sym(s, call->func, 0); | 662 | ret = trace_seq_printf(s, "%pf();\n", (void *)call->func); |
| 569 | if (!ret) | ||
| 570 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 571 | |||
| 572 | ret = trace_seq_printf(s, "();\n"); | ||
| 573 | if (!ret) | 663 | if (!ret) |
| 574 | return TRACE_TYPE_PARTIAL_LINE; | 664 | return TRACE_TYPE_PARTIAL_LINE; |
| 575 | 665 | ||
| @@ -612,11 +702,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
| 612 | return TRACE_TYPE_PARTIAL_LINE; | 702 | return TRACE_TYPE_PARTIAL_LINE; |
| 613 | } | 703 | } |
| 614 | 704 | ||
| 615 | ret = seq_print_ip_sym(s, call->func, 0); | 705 | ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func); |
| 616 | if (!ret) | ||
| 617 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 618 | |||
| 619 | ret = trace_seq_printf(s, "() {\n"); | ||
| 620 | if (!ret) | 706 | if (!ret) |
| 621 | return TRACE_TYPE_PARTIAL_LINE; | 707 | return TRACE_TYPE_PARTIAL_LINE; |
| 622 | 708 | ||
| @@ -934,6 +1020,8 @@ static struct tracer graph_trace __read_mostly = { | |||
| 934 | 1020 | ||
| 935 | static __init int init_graph_trace(void) | 1021 | static __init int init_graph_trace(void) |
| 936 | { | 1022 | { |
| 1023 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | ||
| 1024 | |||
| 937 | return register_tracer(&graph_trace); | 1025 | return register_tracer(&graph_trace); |
| 938 | } | 1026 | } |
| 939 | 1027 | ||
