aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-03-24 01:10:15 -0400
committerSteven Rostedt <srostedt@redhat.com>2009-03-24 09:33:30 -0400
commit8aef2d2856158a36c295a8d1288281e4839bff13 (patch)
treeeef85397dedcad001982724e0be2da6dc089b39f
parent05ce5818adee8f8efd0a5ca0d900a6789012516b (diff)
function-graph: ignore times across schedule
Impact: more accurate timings The current method of function graph tracing does not take into account the time spent when a task is not running. This shows functions that call schedule have increased costs: 3) + 18.664 us | } ------------------------------------------ 3) <idle>-0 => kblockd-123 ------------------------------------------ 3) | finish_task_switch() { 3) 1.441 us | _spin_unlock_irq(); 3) 3.966 us | } 3) ! 2959.433 us | } 3) ! 2961.465 us | } This patch uses the tracepoint in the scheduling context switch to account for time that has elapsed while a task is scheduled out. Now we see: ------------------------------------------ 3) <idle>-0 => edac-po-1067 ------------------------------------------ 3) | finish_task_switch() { 3) 0.685 us | _spin_unlock_irq(); 3) 2.331 us | } 3) + 41.439 us | } 3) + 42.663 us | } Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/trace/ftrace.c36
2 files changed, 38 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89cd308cc7a5..471e36d30123 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1409,6 +1409,8 @@ struct task_struct {
1409 int curr_ret_stack; 1409 int curr_ret_stack;
1410 /* Stack of return addresses for return function tracing */ 1410 /* Stack of return addresses for return function tracing */
1411 struct ftrace_ret_stack *ret_stack; 1411 struct ftrace_ret_stack *ret_stack;
1412 /* time stamp for last schedule */
1413 unsigned long long ftrace_timestamp;
1412 /* 1414 /*
1413 * Number of functions that haven't been traced 1415 * Number of functions that haven't been traced
1414 * because of depth overrun. 1416 * because of depth overrun.
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index c81a759fbf76..0b90364d1a2c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -29,6 +29,8 @@
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/hash.h> 30#include <linux/hash.h>
31 31
32#include <trace/sched.h>
33
32#include <asm/ftrace.h> 34#include <asm/ftrace.h>
33 35
34#include "trace.h" 36#include "trace.h"
@@ -2590,6 +2592,31 @@ free:
2590 return ret; 2592 return ret;
2591} 2593}
2592 2594
2595static void
2596ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
2597 struct task_struct *next)
2598{
2599 unsigned long long timestamp;
2600 int index;
2601
2602 timestamp = trace_clock_local();
2603
2604 prev->ftrace_timestamp = timestamp;
2605
2606 /* only process tasks that we timestamped */
2607 if (!next->ftrace_timestamp)
2608 return;
2609
2610 /*
2611 * Update all the counters in next to make up for the
2612 * time next was sleeping.
2613 */
2614 timestamp -= next->ftrace_timestamp;
2615
2616 for (index = next->curr_ret_stack; index >= 0; index--)
2617 next->ret_stack[index].calltime += timestamp;
2618}
2619
2593/* Allocate a return stack for each task */ 2620/* Allocate a return stack for each task */
2594static int start_graph_tracing(void) 2621static int start_graph_tracing(void)
2595{ 2622{
@@ -2611,6 +2638,13 @@ static int start_graph_tracing(void)
2611 ret = alloc_retstack_tasklist(ret_stack_list); 2638 ret = alloc_retstack_tasklist(ret_stack_list);
2612 } while (ret == -EAGAIN); 2639 } while (ret == -EAGAIN);
2613 2640
2641 if (!ret) {
2642 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
2643 if (ret)
2644 pr_info("ftrace_graph: Couldn't activate tracepoint"
2645 " probe to kernel_sched_switch\n");
2646 }
2647
2614 kfree(ret_stack_list); 2648 kfree(ret_stack_list);
2615 return ret; 2649 return ret;
2616} 2650}
@@ -2674,6 +2708,7 @@ void unregister_ftrace_graph(void)
2674 mutex_lock(&ftrace_lock); 2708 mutex_lock(&ftrace_lock);
2675 2709
2676 atomic_dec(&ftrace_graph_active); 2710 atomic_dec(&ftrace_graph_active);
2711 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
2677 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 2712 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2678 ftrace_graph_entry = ftrace_graph_entry_stub; 2713 ftrace_graph_entry = ftrace_graph_entry_stub;
2679 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 2714 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
@@ -2694,6 +2729,7 @@ void ftrace_graph_init_task(struct task_struct *t)
2694 t->curr_ret_stack = -1; 2729 t->curr_ret_stack = -1;
2695 atomic_set(&t->tracing_graph_pause, 0); 2730 atomic_set(&t->tracing_graph_pause, 0);
2696 atomic_set(&t->trace_overrun, 0); 2731 atomic_set(&t->trace_overrun, 0);
2732 t->ftrace_timestamp = 0;
2697 } else 2733 } else
2698 t->ret_stack = NULL; 2734 t->ret_stack = NULL;
2699} 2735}