diff options
-rw-r--r-- | arch/x86/kernel/ftrace.c | 32 | ||||
-rw-r--r-- | include/linux/ftrace.h | 25 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 30 | ||||
-rw-r--r-- | kernel/trace/trace.c | 67 | ||||
-rw-r--r-- | kernel/trace/trace.h | 28 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 104 | ||||
-rw-r--r-- | kernel/trace/trace_functions_return.c | 98 |
7 files changed, 208 insertions, 176 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 3595a4c14aba..26b2d92d48b3 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -347,7 +347,7 @@ void ftrace_nmi_exit(void) | |||
347 | 347 | ||
348 | /* Add a function return address to the trace stack on thread info.*/ | 348 | /* Add a function return address to the trace stack on thread info.*/ |
349 | static int push_return_trace(unsigned long ret, unsigned long long time, | 349 | static int push_return_trace(unsigned long ret, unsigned long long time, |
350 | unsigned long func) | 350 | unsigned long func, int *depth) |
351 | { | 351 | { |
352 | int index; | 352 | int index; |
353 | 353 | ||
@@ -365,21 +365,22 @@ static int push_return_trace(unsigned long ret, unsigned long long time, | |||
365 | current->ret_stack[index].ret = ret; | 365 | current->ret_stack[index].ret = ret; |
366 | current->ret_stack[index].func = func; | 366 | current->ret_stack[index].func = func; |
367 | current->ret_stack[index].calltime = time; | 367 | current->ret_stack[index].calltime = time; |
368 | *depth = index; | ||
368 | 369 | ||
369 | return 0; | 370 | return 0; |
370 | } | 371 | } |
371 | 372 | ||
372 | /* Retrieve a function return address to the trace stack on thread info.*/ | 373 | /* Retrieve a function return address to the trace stack on thread info.*/ |
373 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | 374 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
374 | unsigned long *func, unsigned long *overrun) | ||
375 | { | 375 | { |
376 | int index; | 376 | int index; |
377 | 377 | ||
378 | index = current->curr_ret_stack; | 378 | index = current->curr_ret_stack; |
379 | *ret = current->ret_stack[index].ret; | 379 | *ret = current->ret_stack[index].ret; |
380 | *func = current->ret_stack[index].func; | 380 | trace->func = current->ret_stack[index].func; |
381 | *time = current->ret_stack[index].calltime; | 381 | trace->calltime = current->ret_stack[index].calltime; |
382 | *overrun = atomic_read(¤t->trace_overrun); | 382 | trace->overrun = atomic_read(¤t->trace_overrun); |
383 | trace->depth = index; | ||
383 | current->curr_ret_stack--; | 384 | current->curr_ret_stack--; |
384 | } | 385 | } |
385 | 386 | ||
@@ -390,12 +391,13 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time, | |||
390 | unsigned long ftrace_return_to_handler(void) | 391 | unsigned long ftrace_return_to_handler(void) |
391 | { | 392 | { |
392 | struct ftrace_graph_ret trace; | 393 | struct ftrace_graph_ret trace; |
393 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func, | 394 | unsigned long ret; |
394 | &trace.overrun); | 395 | |
396 | pop_return_trace(&trace, &ret); | ||
395 | trace.rettime = cpu_clock(raw_smp_processor_id()); | 397 | trace.rettime = cpu_clock(raw_smp_processor_id()); |
396 | ftrace_graph_function(&trace); | 398 | ftrace_graph_return(&trace); |
397 | 399 | ||
398 | return trace.ret; | 400 | return ret; |
399 | } | 401 | } |
400 | 402 | ||
401 | /* | 403 | /* |
@@ -407,6 +409,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
407 | unsigned long old; | 409 | unsigned long old; |
408 | unsigned long long calltime; | 410 | unsigned long long calltime; |
409 | int faulted; | 411 | int faulted; |
412 | struct ftrace_graph_ent trace; | ||
410 | unsigned long return_hooker = (unsigned long) | 413 | unsigned long return_hooker = (unsigned long) |
411 | &return_to_handler; | 414 | &return_to_handler; |
412 | 415 | ||
@@ -452,8 +455,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
452 | 455 | ||
453 | calltime = cpu_clock(raw_smp_processor_id()); | 456 | calltime = cpu_clock(raw_smp_processor_id()); |
454 | 457 | ||
455 | if (push_return_trace(old, calltime, self_addr) == -EBUSY) | 458 | if (push_return_trace(old, calltime, |
459 | self_addr, &trace.depth) == -EBUSY) { | ||
456 | *parent = old; | 460 | *parent = old; |
461 | return; | ||
462 | } | ||
463 | |||
464 | trace.func = self_addr; | ||
465 | ftrace_graph_entry(&trace); | ||
466 | |||
457 | } | 467 | } |
458 | 468 | ||
459 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 469 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b4ac734ad8d6..fc2d54987198 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -313,26 +313,39 @@ ftrace_init_module(struct module *mod, | |||
313 | 313 | ||
314 | 314 | ||
315 | /* | 315 | /* |
316 | * Structure that defines an entry function trace. | ||
317 | */ | ||
318 | struct ftrace_graph_ent { | ||
319 | unsigned long func; /* Current function */ | ||
320 | int depth; | ||
321 | }; | ||
322 | |||
323 | /* | ||
316 | * Structure that defines a return function trace. | 324 | * Structure that defines a return function trace. |
317 | */ | 325 | */ |
318 | struct ftrace_graph_ret { | 326 | struct ftrace_graph_ret { |
319 | unsigned long ret; /* Return address */ | ||
320 | unsigned long func; /* Current function */ | 327 | unsigned long func; /* Current function */ |
321 | unsigned long long calltime; | 328 | unsigned long long calltime; |
322 | unsigned long long rettime; | 329 | unsigned long long rettime; |
323 | /* Number of functions that overran the depth limit for current task */ | 330 | /* Number of functions that overran the depth limit for current task */ |
324 | unsigned long overrun; | 331 | unsigned long overrun; |
332 | int depth; | ||
325 | }; | 333 | }; |
326 | 334 | ||
327 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 335 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
328 | #define FTRACE_RETFUNC_DEPTH 50 | 336 | #define FTRACE_RETFUNC_DEPTH 50 |
329 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | 337 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 |
330 | /* Type of a callback handler of tracing return function */ | 338 | /* Type of the callback handlers for tracing function graph*/ |
331 | typedef void (*trace_function_graph_t)(struct ftrace_graph_ret *); | 339 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ |
340 | typedef void (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | ||
341 | |||
342 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, | ||
343 | trace_func_graph_ent_t entryfunc); | ||
344 | |||
345 | /* The current handlers in use */ | ||
346 | extern trace_func_graph_ret_t ftrace_graph_return; | ||
347 | extern trace_func_graph_ent_t ftrace_graph_entry; | ||
332 | 348 | ||
333 | extern int register_ftrace_graph(trace_function_graph_t func); | ||
334 | /* The current handler in use */ | ||
335 | extern trace_function_graph_t ftrace_graph_function; | ||
336 | extern void unregister_ftrace_graph(void); | 349 | extern void unregister_ftrace_graph(void); |
337 | 350 | ||
338 | extern void ftrace_graph_init_task(struct task_struct *t); | 351 | extern void ftrace_graph_init_task(struct task_struct *t); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9e19976af727..7e2d3b91692d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1498,12 +1498,13 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1498 | 1498 | ||
1499 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1499 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1500 | 1500 | ||
1501 | static atomic_t ftrace_retfunc_active; | 1501 | static atomic_t ftrace_graph_active; |
1502 | |||
1503 | /* The callback that hooks the return of a function */ | ||
1504 | trace_function_graph_t ftrace_graph_function = | ||
1505 | (trace_function_graph_t)ftrace_stub; | ||
1506 | 1502 | ||
1503 | /* The callbacks that hook a function */ | ||
1504 | trace_func_graph_ret_t ftrace_graph_return = | ||
1505 | (trace_func_graph_ret_t)ftrace_stub; | ||
1506 | trace_func_graph_ent_t ftrace_graph_entry = | ||
1507 | (trace_func_graph_ent_t)ftrace_stub; | ||
1507 | 1508 | ||
1508 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | 1509 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
1509 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | 1510 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
@@ -1569,7 +1570,8 @@ static int start_graph_tracing(void) | |||
1569 | return ret; | 1570 | return ret; |
1570 | } | 1571 | } |
1571 | 1572 | ||
1572 | int register_ftrace_graph(trace_function_graph_t func) | 1573 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
1574 | trace_func_graph_ent_t entryfunc) | ||
1573 | { | 1575 | { |
1574 | int ret = 0; | 1576 | int ret = 0; |
1575 | 1577 | ||
@@ -1583,14 +1585,15 @@ int register_ftrace_graph(trace_function_graph_t func) | |||
1583 | ret = -EBUSY; | 1585 | ret = -EBUSY; |
1584 | goto out; | 1586 | goto out; |
1585 | } | 1587 | } |
1586 | atomic_inc(&ftrace_retfunc_active); | 1588 | atomic_inc(&ftrace_graph_active); |
1587 | ret = start_graph_tracing(); | 1589 | ret = start_graph_tracing(); |
1588 | if (ret) { | 1590 | if (ret) { |
1589 | atomic_dec(&ftrace_retfunc_active); | 1591 | atomic_dec(&ftrace_graph_active); |
1590 | goto out; | 1592 | goto out; |
1591 | } | 1593 | } |
1592 | ftrace_tracing_type = FTRACE_TYPE_RETURN; | 1594 | ftrace_tracing_type = FTRACE_TYPE_RETURN; |
1593 | ftrace_graph_function = func; | 1595 | ftrace_graph_return = retfunc; |
1596 | ftrace_graph_entry = entryfunc; | ||
1594 | ftrace_startup(); | 1597 | ftrace_startup(); |
1595 | 1598 | ||
1596 | out: | 1599 | out: |
@@ -1602,8 +1605,9 @@ void unregister_ftrace_graph(void) | |||
1602 | { | 1605 | { |
1603 | mutex_lock(&ftrace_sysctl_lock); | 1606 | mutex_lock(&ftrace_sysctl_lock); |
1604 | 1607 | ||
1605 | atomic_dec(&ftrace_retfunc_active); | 1608 | atomic_dec(&ftrace_graph_active); |
1606 | ftrace_graph_function = (trace_function_graph_t)ftrace_stub; | 1609 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
1610 | ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub; | ||
1607 | ftrace_shutdown(); | 1611 | ftrace_shutdown(); |
1608 | /* Restore normal tracing type */ | 1612 | /* Restore normal tracing type */ |
1609 | ftrace_tracing_type = FTRACE_TYPE_ENTER; | 1613 | ftrace_tracing_type = FTRACE_TYPE_ENTER; |
@@ -1614,7 +1618,7 @@ void unregister_ftrace_graph(void) | |||
1614 | /* Allocate a return stack for newly created task */ | 1618 | /* Allocate a return stack for newly created task */ |
1615 | void ftrace_graph_init_task(struct task_struct *t) | 1619 | void ftrace_graph_init_task(struct task_struct *t) |
1616 | { | 1620 | { |
1617 | if (atomic_read(&ftrace_retfunc_active)) { | 1621 | if (atomic_read(&ftrace_graph_active)) { |
1618 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 1622 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
1619 | * sizeof(struct ftrace_ret_stack), | 1623 | * sizeof(struct ftrace_ret_stack), |
1620 | GFP_KERNEL); | 1624 | GFP_KERNEL); |
@@ -1638,5 +1642,3 @@ void ftrace_graph_exit_task(struct task_struct *t) | |||
1638 | } | 1642 | } |
1639 | #endif | 1643 | #endif |
1640 | 1644 | ||
1641 | |||
1642 | |||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f21ab2c68fd4..9d5f7c94f251 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -879,14 +879,38 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
879 | } | 879 | } |
880 | 880 | ||
881 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 881 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
882 | static void __trace_function_graph(struct trace_array *tr, | 882 | static void __trace_graph_entry(struct trace_array *tr, |
883 | struct trace_array_cpu *data, | ||
884 | struct ftrace_graph_ent *trace, | ||
885 | unsigned long flags, | ||
886 | int pc) | ||
887 | { | ||
888 | struct ring_buffer_event *event; | ||
889 | struct ftrace_graph_ent_entry *entry; | ||
890 | unsigned long irq_flags; | ||
891 | |||
892 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
893 | return; | ||
894 | |||
895 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
896 | &irq_flags); | ||
897 | if (!event) | ||
898 | return; | ||
899 | entry = ring_buffer_event_data(event); | ||
900 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
901 | entry->ent.type = TRACE_GRAPH_ENT; | ||
902 | entry->graph_ent = *trace; | ||
903 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
904 | } | ||
905 | |||
906 | static void __trace_graph_return(struct trace_array *tr, | ||
883 | struct trace_array_cpu *data, | 907 | struct trace_array_cpu *data, |
884 | struct ftrace_graph_ret *trace, | 908 | struct ftrace_graph_ret *trace, |
885 | unsigned long flags, | 909 | unsigned long flags, |
886 | int pc) | 910 | int pc) |
887 | { | 911 | { |
888 | struct ring_buffer_event *event; | 912 | struct ring_buffer_event *event; |
889 | struct ftrace_graph_entry *entry; | 913 | struct ftrace_graph_ret_entry *entry; |
890 | unsigned long irq_flags; | 914 | unsigned long irq_flags; |
891 | 915 | ||
892 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 916 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
@@ -898,12 +922,8 @@ static void __trace_function_graph(struct trace_array *tr, | |||
898 | return; | 922 | return; |
899 | entry = ring_buffer_event_data(event); | 923 | entry = ring_buffer_event_data(event); |
900 | tracing_generic_entry_update(&entry->ent, flags, pc); | 924 | tracing_generic_entry_update(&entry->ent, flags, pc); |
901 | entry->ent.type = TRACE_FN_RET; | 925 | entry->ent.type = TRACE_GRAPH_RET; |
902 | entry->ip = trace->func; | 926 | entry->ret = *trace; |
903 | entry->parent_ip = trace->ret; | ||
904 | entry->rettime = trace->rettime; | ||
905 | entry->calltime = trace->calltime; | ||
906 | entry->overrun = trace->overrun; | ||
907 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 927 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); |
908 | } | 928 | } |
909 | #endif | 929 | #endif |
@@ -1178,7 +1198,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1178 | } | 1198 | } |
1179 | 1199 | ||
1180 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1200 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1181 | void trace_function_graph(struct ftrace_graph_ret *trace) | 1201 | void trace_graph_entry(struct ftrace_graph_ent *trace) |
1182 | { | 1202 | { |
1183 | struct trace_array *tr = &global_trace; | 1203 | struct trace_array *tr = &global_trace; |
1184 | struct trace_array_cpu *data; | 1204 | struct trace_array_cpu *data; |
@@ -1193,7 +1213,28 @@ void trace_function_graph(struct ftrace_graph_ret *trace) | |||
1193 | disabled = atomic_inc_return(&data->disabled); | 1213 | disabled = atomic_inc_return(&data->disabled); |
1194 | if (likely(disabled == 1)) { | 1214 | if (likely(disabled == 1)) { |
1195 | pc = preempt_count(); | 1215 | pc = preempt_count(); |
1196 | __trace_function_graph(tr, data, trace, flags, pc); | 1216 | __trace_graph_entry(tr, data, trace, flags, pc); |
1217 | } | ||
1218 | atomic_dec(&data->disabled); | ||
1219 | raw_local_irq_restore(flags); | ||
1220 | } | ||
1221 | |||
1222 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
1223 | { | ||
1224 | struct trace_array *tr = &global_trace; | ||
1225 | struct trace_array_cpu *data; | ||
1226 | unsigned long flags; | ||
1227 | long disabled; | ||
1228 | int cpu; | ||
1229 | int pc; | ||
1230 | |||
1231 | raw_local_irq_save(flags); | ||
1232 | cpu = raw_smp_processor_id(); | ||
1233 | data = tr->data[cpu]; | ||
1234 | disabled = atomic_inc_return(&data->disabled); | ||
1235 | if (likely(disabled == 1)) { | ||
1236 | pc = preempt_count(); | ||
1237 | __trace_graph_return(tr, data, trace, flags, pc); | ||
1197 | } | 1238 | } |
1198 | atomic_dec(&data->disabled); | 1239 | atomic_dec(&data->disabled); |
1199 | raw_local_irq_restore(flags); | 1240 | raw_local_irq_restore(flags); |
@@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
2000 | trace_seq_print_cont(s, iter); | 2041 | trace_seq_print_cont(s, iter); |
2001 | break; | 2042 | break; |
2002 | } | 2043 | } |
2003 | case TRACE_FN_RET: { | 2044 | case TRACE_GRAPH_RET: { |
2045 | return print_graph_function(iter); | ||
2046 | } | ||
2047 | case TRACE_GRAPH_ENT: { | ||
2004 | return print_graph_function(iter); | 2048 | return print_graph_function(iter); |
2005 | break; | ||
2006 | } | 2049 | } |
2007 | case TRACE_BRANCH: { | 2050 | case TRACE_BRANCH: { |
2008 | struct trace_branch *field; | 2051 | struct trace_branch *field; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 72b5ef868765..ffe1bb1eb620 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -25,7 +25,8 @@ enum trace_type { | |||
25 | TRACE_BRANCH, | 25 | TRACE_BRANCH, |
26 | TRACE_BOOT_CALL, | 26 | TRACE_BOOT_CALL, |
27 | TRACE_BOOT_RET, | 27 | TRACE_BOOT_RET, |
28 | TRACE_FN_RET, | 28 | TRACE_GRAPH_RET, |
29 | TRACE_GRAPH_ENT, | ||
29 | TRACE_USER_STACK, | 30 | TRACE_USER_STACK, |
30 | TRACE_BTS, | 31 | TRACE_BTS, |
31 | 32 | ||
@@ -56,14 +57,16 @@ struct ftrace_entry { | |||
56 | unsigned long parent_ip; | 57 | unsigned long parent_ip; |
57 | }; | 58 | }; |
58 | 59 | ||
60 | /* Function call entry */ | ||
61 | struct ftrace_graph_ent_entry { | ||
62 | struct trace_entry ent; | ||
63 | struct ftrace_graph_ent graph_ent; | ||
64 | }; | ||
65 | |||
59 | /* Function return entry */ | 66 | /* Function return entry */ |
60 | struct ftrace_graph_entry { | 67 | struct ftrace_graph_ret_entry { |
61 | struct trace_entry ent; | 68 | struct trace_entry ent; |
62 | unsigned long ip; | 69 | struct ftrace_graph_ret ret; |
63 | unsigned long parent_ip; | ||
64 | unsigned long long calltime; | ||
65 | unsigned long long rettime; | ||
66 | unsigned long overrun; | ||
67 | }; | 70 | }; |
68 | extern struct tracer boot_tracer; | 71 | extern struct tracer boot_tracer; |
69 | 72 | ||
@@ -264,7 +267,10 @@ extern void __ftrace_bad_type(void); | |||
264 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ | 267 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
265 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | 268 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ |
266 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | 269 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
267 | IF_ASSIGN(var, ent, struct ftrace_graph_entry, TRACE_FN_RET);\ | 270 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
271 | TRACE_GRAPH_ENT); \ | ||
272 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | ||
273 | TRACE_GRAPH_RET); \ | ||
268 | IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ | 274 | IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ |
269 | __ftrace_bad_type(); \ | 275 | __ftrace_bad_type(); \ |
270 | } while (0) | 276 | } while (0) |
@@ -397,9 +403,9 @@ void trace_function(struct trace_array *tr, | |||
397 | unsigned long ip, | 403 | unsigned long ip, |
398 | unsigned long parent_ip, | 404 | unsigned long parent_ip, |
399 | unsigned long flags, int pc); | 405 | unsigned long flags, int pc); |
400 | void | ||
401 | trace_function_graph(struct ftrace_graph_ret *trace); | ||
402 | 406 | ||
407 | void trace_graph_return(struct ftrace_graph_ret *trace); | ||
408 | void trace_graph_entry(struct ftrace_graph_ent *trace); | ||
403 | void trace_bts(struct trace_array *tr, | 409 | void trace_bts(struct trace_array *tr, |
404 | unsigned long from, | 410 | unsigned long from, |
405 | unsigned long to); | 411 | unsigned long to); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index f5bad4624d2b..b6f0cc2a00cb 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | 15 | ||
16 | #define TRACE_GRAPH_INDENT 2 | ||
16 | 17 | ||
17 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | 18 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
18 | static struct tracer_opt trace_opts[] = { | 19 | static struct tracer_opt trace_opts[] = { |
@@ -26,6 +27,8 @@ static struct tracer_flags tracer_flags = { | |||
26 | .opts = trace_opts | 27 | .opts = trace_opts |
27 | }; | 28 | }; |
28 | 29 | ||
30 | /* pid on the last trace processed */ | ||
31 | static pid_t last_pid = -1; | ||
29 | 32 | ||
30 | static int graph_trace_init(struct trace_array *tr) | 33 | static int graph_trace_init(struct trace_array *tr) |
31 | { | 34 | { |
@@ -33,7 +36,8 @@ static int graph_trace_init(struct trace_array *tr) | |||
33 | for_each_online_cpu(cpu) | 36 | for_each_online_cpu(cpu) |
34 | tracing_reset(tr, cpu); | 37 | tracing_reset(tr, cpu); |
35 | 38 | ||
36 | return register_ftrace_graph(&trace_function_graph); | 39 | return register_ftrace_graph(&trace_graph_return, |
40 | &trace_graph_entry); | ||
37 | } | 41 | } |
38 | 42 | ||
39 | static void graph_trace_reset(struct trace_array *tr) | 43 | static void graph_trace_reset(struct trace_array *tr) |
@@ -41,45 +45,97 @@ static void graph_trace_reset(struct trace_array *tr) | |||
41 | unregister_ftrace_graph(); | 45 | unregister_ftrace_graph(); |
42 | } | 46 | } |
43 | 47 | ||
48 | /* If the pid changed since the last trace, output this event */ | ||
49 | static int verif_pid(struct trace_seq *s, pid_t pid) | ||
50 | { | ||
51 | if (last_pid != -1 && last_pid == pid) | ||
52 | return 1; | ||
44 | 53 | ||
45 | enum print_line_t | 54 | last_pid = pid; |
46 | print_graph_function(struct trace_iterator *iter) | 55 | return trace_seq_printf(s, "\n------------8<---------- thread %d" |
56 | " ------------8<----------\n\n", | ||
57 | pid); | ||
58 | } | ||
59 | |||
60 | static enum print_line_t | ||
61 | print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s, | ||
62 | struct trace_entry *ent) | ||
47 | { | 63 | { |
48 | struct trace_seq *s = &iter->seq; | 64 | int i; |
49 | struct trace_entry *entry = iter->ent; | ||
50 | struct ftrace_graph_entry *field; | ||
51 | int ret; | 65 | int ret; |
52 | 66 | ||
53 | if (entry->type == TRACE_FN_RET) { | 67 | if (!verif_pid(s, ent->pid)) |
54 | trace_assign_type(field, entry); | 68 | return TRACE_TYPE_PARTIAL_LINE; |
55 | ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip); | ||
56 | if (!ret) | ||
57 | return TRACE_TYPE_PARTIAL_LINE; | ||
58 | 69 | ||
59 | ret = seq_print_ip_sym(s, field->ip, | 70 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
60 | trace_flags & TRACE_ITER_SYM_MASK); | 71 | ret = trace_seq_printf(s, " "); |
61 | if (!ret) | 72 | if (!ret) |
62 | return TRACE_TYPE_PARTIAL_LINE; | 73 | return TRACE_TYPE_PARTIAL_LINE; |
74 | } | ||
75 | |||
76 | ret = seq_print_ip_sym(s, call->func, 0); | ||
77 | if (!ret) | ||
78 | return TRACE_TYPE_PARTIAL_LINE; | ||
79 | |||
80 | ret = trace_seq_printf(s, "() {\n"); | ||
81 | if (!ret) | ||
82 | return TRACE_TYPE_PARTIAL_LINE; | ||
83 | return TRACE_TYPE_HANDLED; | ||
84 | } | ||
85 | |||
86 | static enum print_line_t | ||
87 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | ||
88 | struct trace_entry *ent) | ||
89 | { | ||
90 | int i; | ||
91 | int ret; | ||
92 | |||
93 | if (!verif_pid(s, ent->pid)) | ||
94 | return TRACE_TYPE_PARTIAL_LINE; | ||
63 | 95 | ||
64 | ret = trace_seq_printf(s, " (%llu ns)", | 96 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
65 | field->rettime - field->calltime); | 97 | ret = trace_seq_printf(s, " "); |
66 | if (!ret) | 98 | if (!ret) |
67 | return TRACE_TYPE_PARTIAL_LINE; | 99 | return TRACE_TYPE_PARTIAL_LINE; |
100 | } | ||
101 | |||
102 | ret = trace_seq_printf(s, "} "); | ||
103 | if (!ret) | ||
104 | return TRACE_TYPE_PARTIAL_LINE; | ||
68 | 105 | ||
69 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | 106 | ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime); |
70 | ret = trace_seq_printf(s, " (Overruns: %lu)", | 107 | if (!ret) |
71 | field->overrun); | 108 | return TRACE_TYPE_PARTIAL_LINE; |
72 | if (!ret) | ||
73 | return TRACE_TYPE_PARTIAL_LINE; | ||
74 | } | ||
75 | 109 | ||
76 | ret = trace_seq_printf(s, "\n"); | 110 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { |
111 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | ||
112 | trace->overrun); | ||
77 | if (!ret) | 113 | if (!ret) |
78 | return TRACE_TYPE_PARTIAL_LINE; | 114 | return TRACE_TYPE_PARTIAL_LINE; |
115 | } | ||
116 | return TRACE_TYPE_HANDLED; | ||
117 | } | ||
118 | |||
119 | enum print_line_t | ||
120 | print_graph_function(struct trace_iterator *iter) | ||
121 | { | ||
122 | struct trace_seq *s = &iter->seq; | ||
123 | struct trace_entry *entry = iter->ent; | ||
79 | 124 | ||
80 | return TRACE_TYPE_HANDLED; | 125 | switch (entry->type) { |
126 | case TRACE_GRAPH_ENT: { | ||
127 | struct ftrace_graph_ent_entry *field; | ||
128 | trace_assign_type(field, entry); | ||
129 | return print_graph_entry(&field->graph_ent, s, entry); | ||
130 | } | ||
131 | case TRACE_GRAPH_RET: { | ||
132 | struct ftrace_graph_ret_entry *field; | ||
133 | trace_assign_type(field, entry); | ||
134 | return print_graph_return(&field->ret, s, entry); | ||
135 | } | ||
136 | default: | ||
137 | return TRACE_TYPE_UNHANDLED; | ||
81 | } | 138 | } |
82 | return TRACE_TYPE_UNHANDLED; | ||
83 | } | 139 | } |
84 | 140 | ||
85 | static struct tracer graph_trace __read_mostly = { | 141 | static struct tracer graph_trace __read_mostly = { |
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c deleted file mode 100644 index e00d64509c9c..000000000000 --- a/kernel/trace/trace_functions_return.c +++ /dev/null | |||
@@ -1,98 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Function return tracer. | ||
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * Mostly borrowed from function tracer which | ||
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/fs.h> | ||
13 | |||
14 | #include "trace.h" | ||
15 | |||
16 | |||
17 | #define TRACE_RETURN_PRINT_OVERRUN 0x1 | ||
18 | static struct tracer_opt trace_opts[] = { | ||
19 | /* Display overruns or not */ | ||
20 | { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) }, | ||
21 | { } /* Empty entry */ | ||
22 | }; | ||
23 | |||
24 | static struct tracer_flags tracer_flags = { | ||
25 | .val = 0, /* Don't display overruns by default */ | ||
26 | .opts = trace_opts | ||
27 | }; | ||
28 | |||
29 | |||
30 | static int return_trace_init(struct trace_array *tr) | ||
31 | { | ||
32 | int cpu; | ||
33 | for_each_online_cpu(cpu) | ||
34 | tracing_reset(tr, cpu); | ||
35 | |||
36 | return register_ftrace_return(&trace_function_return); | ||
37 | } | ||
38 | |||
39 | static void return_trace_reset(struct trace_array *tr) | ||
40 | { | ||
41 | unregister_ftrace_return(); | ||
42 | } | ||
43 | |||
44 | |||
45 | enum print_line_t | ||
46 | print_return_function(struct trace_iterator *iter) | ||
47 | { | ||
48 | struct trace_seq *s = &iter->seq; | ||
49 | struct trace_entry *entry = iter->ent; | ||
50 | struct ftrace_ret_entry *field; | ||
51 | int ret; | ||
52 | |||
53 | if (entry->type == TRACE_FN_RET) { | ||
54 | trace_assign_type(field, entry); | ||
55 | ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip); | ||
56 | if (!ret) | ||
57 | return TRACE_TYPE_PARTIAL_LINE; | ||
58 | |||
59 | ret = seq_print_ip_sym(s, field->ip, | ||
60 | trace_flags & TRACE_ITER_SYM_MASK); | ||
61 | if (!ret) | ||
62 | return TRACE_TYPE_PARTIAL_LINE; | ||
63 | |||
64 | ret = trace_seq_printf(s, " (%llu ns)", | ||
65 | field->rettime - field->calltime); | ||
66 | if (!ret) | ||
67 | return TRACE_TYPE_PARTIAL_LINE; | ||
68 | |||
69 | if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) { | ||
70 | ret = trace_seq_printf(s, " (Overruns: %lu)", | ||
71 | field->overrun); | ||
72 | if (!ret) | ||
73 | return TRACE_TYPE_PARTIAL_LINE; | ||
74 | } | ||
75 | |||
76 | ret = trace_seq_printf(s, "\n"); | ||
77 | if (!ret) | ||
78 | return TRACE_TYPE_PARTIAL_LINE; | ||
79 | |||
80 | return TRACE_TYPE_HANDLED; | ||
81 | } | ||
82 | return TRACE_TYPE_UNHANDLED; | ||
83 | } | ||
84 | |||
85 | static struct tracer return_trace __read_mostly = { | ||
86 | .name = "return", | ||
87 | .init = return_trace_init, | ||
88 | .reset = return_trace_reset, | ||
89 | .print_line = print_return_function, | ||
90 | .flags = &tracer_flags, | ||
91 | }; | ||
92 | |||
93 | static __init int init_return_trace(void) | ||
94 | { | ||
95 | return register_tracer(&return_trace); | ||
96 | } | ||
97 | |||
98 | device_initcall(init_return_trace); | ||