diff options
| author | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
| commit | 478c6a43fcbc6c11609f8cee7c7b57223907754f (patch) | |
| tree | a7f7952099da60d33032aed6de9c0c56c9f8779e /kernel/trace/trace_functions_graph.c | |
| parent | 8a3f257c704e02aee9869decd069a806b45be3f1 (diff) | |
| parent | 6bb597507f9839b13498781e481f5458aea33620 (diff) | |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/cpu/cpufreq/longhaul.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 75 |
1 files changed, 75 insertions, 0 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 930c08e5b38e..dce71a5b51bc 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = { | |||
| 42 | /* pid on the last trace processed */ | 42 | /* pid on the last trace processed */ |
| 43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | 43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; |
| 44 | 44 | ||
| 45 | /* Add a function return address to the trace stack on thread info.*/ | ||
| 46 | int | ||
| 47 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
| 48 | unsigned long func, int *depth) | ||
| 49 | { | ||
| 50 | int index; | ||
| 51 | |||
| 52 | if (!current->ret_stack) | ||
| 53 | return -EBUSY; | ||
| 54 | |||
| 55 | /* The return trace stack is full */ | ||
| 56 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
| 57 | atomic_inc(¤t->trace_overrun); | ||
| 58 | return -EBUSY; | ||
| 59 | } | ||
| 60 | |||
| 61 | index = ++current->curr_ret_stack; | ||
| 62 | barrier(); | ||
| 63 | current->ret_stack[index].ret = ret; | ||
| 64 | current->ret_stack[index].func = func; | ||
| 65 | current->ret_stack[index].calltime = time; | ||
| 66 | *depth = index; | ||
| 67 | |||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
| 72 | void | ||
| 73 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
| 74 | { | ||
| 75 | int index; | ||
| 76 | |||
| 77 | index = current->curr_ret_stack; | ||
| 78 | |||
| 79 | if (unlikely(index < 0)) { | ||
| 80 | ftrace_graph_stop(); | ||
| 81 | WARN_ON(1); | ||
| 82 | /* Might as well panic, otherwise we have no where to go */ | ||
| 83 | *ret = (unsigned long)panic; | ||
| 84 | return; | ||
| 85 | } | ||
| 86 | |||
| 87 | *ret = current->ret_stack[index].ret; | ||
| 88 | trace->func = current->ret_stack[index].func; | ||
| 89 | trace->calltime = current->ret_stack[index].calltime; | ||
| 90 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
| 91 | trace->depth = index; | ||
| 92 | barrier(); | ||
| 93 | current->curr_ret_stack--; | ||
| 94 | |||
| 95 | } | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Send the trace to the ring-buffer. | ||
| 99 | * @return the original return address. | ||
| 100 | */ | ||
| 101 | unsigned long ftrace_return_to_handler(void) | ||
| 102 | { | ||
| 103 | struct ftrace_graph_ret trace; | ||
| 104 | unsigned long ret; | ||
| 105 | |||
| 106 | ftrace_pop_return_trace(&trace, &ret); | ||
| 107 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
| 108 | ftrace_graph_return(&trace); | ||
| 109 | |||
| 110 | if (unlikely(!ret)) { | ||
| 111 | ftrace_graph_stop(); | ||
| 112 | WARN_ON(1); | ||
| 113 | /* Might as well panic. What else to do? */ | ||
| 114 | ret = (unsigned long)panic; | ||
| 115 | } | ||
| 116 | |||
| 117 | return ret; | ||
| 118 | } | ||
| 119 | |||
| 45 | static int graph_trace_init(struct trace_array *tr) | 120 | static int graph_trace_init(struct trace_array *tr) |
| 46 | { | 121 | { |
| 47 | int cpu, ret; | 122 | int cpu, ret; |
