diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 13:23:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 13:23:53 -0400 |
| commit | ebc8eca169be0283d5a7ab54c4411dd59cfb0f27 (patch) | |
| tree | 831f6d577da3469e3154bf29409281c640bb67df /kernel | |
| parent | 25c1a411e8a0a709abe3449866125dc290711ea8 (diff) | |
| parent | 9ff9a26b786c35ee8d2a66222924a807ec851a9f (diff) | |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (180 commits)
powerpc: clean up ssi.txt, add definition for fsl,ssi-asynchronous
powerpc/85xx: Add support for the "socrates" board (MPC8544).
powerpc: Fix bugs introduced by sysfs changes
powerpc: Sanitize stack pointer in signal handling code
powerpc: Add write barrier before enabling DTL flags
powerpc/83xx: Update ranges in gianfar node to match other dts
powerpc/86xx: Move gianfar mdio nodes under the ethernet nodes
powerpc/85xx: Move gianfar mdio nodes under the ethernet nodes
powerpc/83xx: Move gianfar mdio nodes under the ethernet nodes
powerpc/83xx: Add power management support for MPC837x boards
powerpc/mm: Introduce early_init_mmu() on 64-bit
powerpc/mm: Add option for non-atomic PTE updates to ppc64
powerpc/mm: Fix printk type warning in mmu_context_nohash
powerpc/mm: Rename arch/powerpc/kernel/mmap.c to mmap_64.c
powerpc/mm: Merge various PTE bits and accessors definitions
powerpc/mm: Tweak PTE bit combination definitions
powerpc/cell: Fix iommu exception reporting
powerpc/mm: e300c2/c3/c4 TLB errata workaround
powerpc/mm: Used free register to save a few cycles in SW TLB miss handling
powerpc/mm: Remove unused register usage in SW TLB miss handling
...
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 75 |
1 files changed, 75 insertions, 0 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 930c08e5b38e..dce71a5b51bc 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = { | |||
| 42 | /* pid on the last trace processed */ | 42 | /* pid on the last trace processed */ |
| 43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | 43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; |
| 44 | 44 | ||
| 45 | /* Add a function return address to the trace stack on thread info.*/ | ||
| 46 | int | ||
| 47 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
| 48 | unsigned long func, int *depth) | ||
| 49 | { | ||
| 50 | int index; | ||
| 51 | |||
| 52 | if (!current->ret_stack) | ||
| 53 | return -EBUSY; | ||
| 54 | |||
| 55 | /* The return trace stack is full */ | ||
| 56 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
| 57 | atomic_inc(¤t->trace_overrun); | ||
| 58 | return -EBUSY; | ||
| 59 | } | ||
| 60 | |||
| 61 | index = ++current->curr_ret_stack; | ||
| 62 | barrier(); | ||
| 63 | current->ret_stack[index].ret = ret; | ||
| 64 | current->ret_stack[index].func = func; | ||
| 65 | current->ret_stack[index].calltime = time; | ||
| 66 | *depth = index; | ||
| 67 | |||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
| 72 | void | ||
| 73 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
| 74 | { | ||
| 75 | int index; | ||
| 76 | |||
| 77 | index = current->curr_ret_stack; | ||
| 78 | |||
| 79 | if (unlikely(index < 0)) { | ||
| 80 | ftrace_graph_stop(); | ||
| 81 | WARN_ON(1); | ||
| 82 | /* Might as well panic, otherwise we have no where to go */ | ||
| 83 | *ret = (unsigned long)panic; | ||
| 84 | return; | ||
| 85 | } | ||
| 86 | |||
| 87 | *ret = current->ret_stack[index].ret; | ||
| 88 | trace->func = current->ret_stack[index].func; | ||
| 89 | trace->calltime = current->ret_stack[index].calltime; | ||
| 90 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
| 91 | trace->depth = index; | ||
| 92 | barrier(); | ||
| 93 | current->curr_ret_stack--; | ||
| 94 | |||
| 95 | } | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Send the trace to the ring-buffer. | ||
| 99 | * @return the original return address. | ||
| 100 | */ | ||
| 101 | unsigned long ftrace_return_to_handler(void) | ||
| 102 | { | ||
| 103 | struct ftrace_graph_ret trace; | ||
| 104 | unsigned long ret; | ||
| 105 | |||
| 106 | ftrace_pop_return_trace(&trace, &ret); | ||
| 107 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
| 108 | ftrace_graph_return(&trace); | ||
| 109 | |||
| 110 | if (unlikely(!ret)) { | ||
| 111 | ftrace_graph_stop(); | ||
| 112 | WARN_ON(1); | ||
| 113 | /* Might as well panic. What else to do? */ | ||
| 114 | ret = (unsigned long)panic; | ||
| 115 | } | ||
| 116 | |||
| 117 | return ret; | ||
| 118 | } | ||
| 119 | |||
| 45 | static int graph_trace_init(struct trace_array *tr) | 120 | static int graph_trace_init(struct trace_array *tr) |
| 46 | { | 121 | { |
| 47 | int cpu, ret; | 122 | int cpu, ret; |
