diff options
author | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
commit | 478c6a43fcbc6c11609f8cee7c7b57223907754f (patch) | |
tree | a7f7952099da60d33032aed6de9c0c56c9f8779e /kernel/trace | |
parent | 8a3f257c704e02aee9869decd069a806b45be3f1 (diff) | |
parent | 6bb597507f9839b13498781e481f5458aea33620 (diff) |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/cpu/cpufreq/longhaul.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 9 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 75 |
3 files changed, 80 insertions, 6 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 34e707e5ab87..504086ab4443 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -72,11 +72,10 @@ config FUNCTION_GRAPH_TRACER | |||
72 | help | 72 | help |
73 | Enable the kernel to trace a function at both its return | 73 | Enable the kernel to trace a function at both its return |
74 | and its entry. | 74 | and its entry. |
75 | It's first purpose is to trace the duration of functions and | 75 | Its first purpose is to trace the duration of functions and |
76 | draw a call graph for each thread with some informations like | 76 | draw a call graph for each thread with some information like |
77 | the return value. | 77 | the return value. This is done by setting the current return |
78 | This is done by setting the current return address on the current | 78 | address on the current task structure into a stack of calls. |
79 | task structure into a stack of calls. | ||
80 | 79 | ||
81 | config IRQSOFF_TRACER | 80 | config IRQSOFF_TRACER |
82 | bool "Interrupts-off Latency Tracer" | 81 | bool "Interrupts-off Latency Tracer" |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fdf913dfc7e8..53e8c8bc0c98 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1908,7 +1908,7 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
1908 | } | 1908 | } |
1909 | 1909 | ||
1910 | /** | 1910 | /** |
1911 | * unregister_ftrace_function - unresgister a function for profiling. | 1911 | * unregister_ftrace_function - unregister a function for profiling. |
1912 | * @ops - ops structure that holds the function to unregister | 1912 | * @ops - ops structure that holds the function to unregister |
1913 | * | 1913 | * |
1914 | * Unregister a function that was added to be called by ftrace profiling. | 1914 | * Unregister a function that was added to be called by ftrace profiling. |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 930c08e5b38e..dce71a5b51bc 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = { | |||
42 | /* pid on the last trace processed */ | 42 | /* pid on the last trace processed */ |
43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | 43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; |
44 | 44 | ||
45 | /* Add a function return address to the trace stack on thread info.*/ | ||
46 | int | ||
47 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
48 | unsigned long func, int *depth) | ||
49 | { | ||
50 | int index; | ||
51 | |||
52 | if (!current->ret_stack) | ||
53 | return -EBUSY; | ||
54 | |||
55 | /* The return trace stack is full */ | ||
56 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
57 | atomic_inc(¤t->trace_overrun); | ||
58 | return -EBUSY; | ||
59 | } | ||
60 | |||
61 | index = ++current->curr_ret_stack; | ||
62 | barrier(); | ||
63 | current->ret_stack[index].ret = ret; | ||
64 | current->ret_stack[index].func = func; | ||
65 | current->ret_stack[index].calltime = time; | ||
66 | *depth = index; | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
72 | void | ||
73 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
74 | { | ||
75 | int index; | ||
76 | |||
77 | index = current->curr_ret_stack; | ||
78 | |||
79 | if (unlikely(index < 0)) { | ||
80 | ftrace_graph_stop(); | ||
81 | WARN_ON(1); | ||
82 | /* Might as well panic, otherwise we have no where to go */ | ||
83 | *ret = (unsigned long)panic; | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | *ret = current->ret_stack[index].ret; | ||
88 | trace->func = current->ret_stack[index].func; | ||
89 | trace->calltime = current->ret_stack[index].calltime; | ||
90 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
91 | trace->depth = index; | ||
92 | barrier(); | ||
93 | current->curr_ret_stack--; | ||
94 | |||
95 | } | ||
96 | |||
97 | /* | ||
98 | * Send the trace to the ring-buffer. | ||
99 | * @return the original return address. | ||
100 | */ | ||
101 | unsigned long ftrace_return_to_handler(void) | ||
102 | { | ||
103 | struct ftrace_graph_ret trace; | ||
104 | unsigned long ret; | ||
105 | |||
106 | ftrace_pop_return_trace(&trace, &ret); | ||
107 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
108 | ftrace_graph_return(&trace); | ||
109 | |||
110 | if (unlikely(!ret)) { | ||
111 | ftrace_graph_stop(); | ||
112 | WARN_ON(1); | ||
113 | /* Might as well panic. What else to do? */ | ||
114 | ret = (unsigned long)panic; | ||
115 | } | ||
116 | |||
117 | return ret; | ||
118 | } | ||
119 | |||
45 | static int graph_trace_init(struct trace_array *tr) | 120 | static int graph_trace_init(struct trace_array *tr) |
46 | { | 121 | { |
47 | int cpu, ret; | 122 | int cpu, ret; |