aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_stack.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 16:55:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 16:55:38 -0400
commit9e8529afc4518f4e5d610001545ebc97e1333c79 (patch)
tree26e1aa2cbb50f3f511cfa7d8e39e6b7bd9221b68 /kernel/trace/trace_stack.c
parentec25e246b94a3233ab064994ef05a170bdba0e7c (diff)
parent4c69e6ea415a35eb7f0fc8ee9390c8f7436492a2 (diff)
Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Along with the usual minor fixes and clean ups there are a few major changes with this pull request. 1) Multiple buffers for the ftrace facility This feature has been requested by many people over the last few years. I even heard that Google was about to implement it themselves. I finally had time and cleaned up the code such that you can now create multiple instances of the ftrace buffer and have different events go to different buffers. This way, a low frequency event will not be lost in the noise of a high frequency event. Note, currently only events can go to different buffers, the tracers (ie function, function_graph and the latency tracers) still can only be written to the main buffer. 2) The function tracer triggers have now been extended. The function tracer had two triggers. One to enable tracing when a function is hit, and one to disable tracing. Now you can record a stack trace on a single (or many) function(s), take a snapshot of the buffer (copy it to the snapshot buffer), and you can enable or disable an event to be traced when a function is hit. 3) A perf clock has been added. A "perf" clock can be chosen to be used when tracing. This will cause ftrace to use the same clock as perf uses, and hopefully this will make it easier to interleave the perf and ftrace data for analysis." * tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (82 commits) tracepoints: Prevent null probe from being added tracing: Compare to 1 instead of zero for is_signed_type() tracing: Remove obsolete macro guard _TRACE_PROFILE_INIT ftrace: Get rid of ftrace_profile_bits tracing: Check return value of tracing_init_dentry() tracing: Get rid of unneeded key calculation in ftrace_hash_move() tracing: Reset ftrace_graph_filter_enabled if count is zero tracing: Fix off-by-one on allocating stat->pages kernel: tracing: Use strlcpy instead of strncpy tracing: Update debugfs README file tracing: Fix ftrace_dump() tracing: Rename trace_event_mutex to trace_event_sem tracing: Fix comment about prefix in arch_syscall_match_sym_name() tracing: Convert trace_destroy_fields() to static tracing: Move find_event_field() into trace_events.c tracing: Use TRACE_MAX_PRINT instead of constant tracing: Use pr_warn_once instead of open coded implementation ring-buffer: Add ring buffer startup selftest tracing: Bring Documentation/trace/ftrace.txt up to date tracing: Add "perf" trace_clock ... Conflicts: kernel/trace/ftrace.c kernel/trace/trace.c
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r--kernel/trace/trace_stack.c76
1 files changed, 69 insertions, 7 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 83a8b5b7bd35..b20428c5efe2 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -20,13 +20,24 @@
20 20
21#define STACK_TRACE_ENTRIES 500 21#define STACK_TRACE_ENTRIES 500
22 22
23#ifdef CC_USING_FENTRY
24# define fentry 1
25#else
26# define fentry 0
27#endif
28
23static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 29static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
24 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; 30 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
25static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; 31static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
26 32
33/*
34 * Reserve one entry for the passed in ip. This will allow
35 * us to remove most or all of the stack size overhead
36 * added by the stack tracer itself.
37 */
27static struct stack_trace max_stack_trace = { 38static struct stack_trace max_stack_trace = {
28 .max_entries = STACK_TRACE_ENTRIES, 39 .max_entries = STACK_TRACE_ENTRIES - 1,
29 .entries = stack_dump_trace, 40 .entries = &stack_dump_trace[1],
30}; 41};
31 42
32static unsigned long max_stack_size; 43static unsigned long max_stack_size;
@@ -39,25 +50,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
39int stack_tracer_enabled; 50int stack_tracer_enabled;
40static int last_stack_tracer_enabled; 51static int last_stack_tracer_enabled;
41 52
42static inline void check_stack(void) 53static inline void
54check_stack(unsigned long ip, unsigned long *stack)
43{ 55{
44 unsigned long this_size, flags; 56 unsigned long this_size, flags;
45 unsigned long *p, *top, *start; 57 unsigned long *p, *top, *start;
58 static int tracer_frame;
59 int frame_size = ACCESS_ONCE(tracer_frame);
46 int i; 60 int i;
47 61
48 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); 62 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
49 this_size = THREAD_SIZE - this_size; 63 this_size = THREAD_SIZE - this_size;
64 /* Remove the frame of the tracer */
65 this_size -= frame_size;
50 66
51 if (this_size <= max_stack_size) 67 if (this_size <= max_stack_size)
52 return; 68 return;
53 69
54 /* we do not handle interrupt stacks yet */ 70 /* we do not handle interrupt stacks yet */
55 if (!object_is_on_stack(&this_size)) 71 if (!object_is_on_stack(stack))
56 return; 72 return;
57 73
58 local_irq_save(flags); 74 local_irq_save(flags);
59 arch_spin_lock(&max_stack_lock); 75 arch_spin_lock(&max_stack_lock);
60 76
77 /* In case another CPU set the tracer_frame on us */
78 if (unlikely(!frame_size))
79 this_size -= tracer_frame;
80
61 /* a race could have already updated it */ 81 /* a race could have already updated it */
62 if (this_size <= max_stack_size) 82 if (this_size <= max_stack_size)
63 goto out; 83 goto out;
@@ -70,10 +90,18 @@ static inline void check_stack(void)
70 save_stack_trace(&max_stack_trace); 90 save_stack_trace(&max_stack_trace);
71 91
72 /* 92 /*
93 * Add the passed in ip from the function tracer.
94 * Searching for this on the stack will skip over
95 * most of the overhead from the stack tracer itself.
96 */
97 stack_dump_trace[0] = ip;
98 max_stack_trace.nr_entries++;
99
100 /*
73 * Now find where in the stack these are. 101 * Now find where in the stack these are.
74 */ 102 */
75 i = 0; 103 i = 0;
76 start = &this_size; 104 start = stack;
77 top = (unsigned long *) 105 top = (unsigned long *)
78 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); 106 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
79 107
@@ -97,6 +125,18 @@ static inline void check_stack(void)
97 found = 1; 125 found = 1;
98 /* Start the search from here */ 126 /* Start the search from here */
99 start = p + 1; 127 start = p + 1;
128 /*
129 * We do not want to show the overhead
130 * of the stack tracer stack in the
131 * max stack. If we haven't figured
132 * out what that is, then figure it out
133 * now.
134 */
135 if (unlikely(!tracer_frame) && i == 1) {
136 tracer_frame = (p - stack) *
137 sizeof(unsigned long);
138 max_stack_size -= tracer_frame;
139 }
100 } 140 }
101 } 141 }
102 142
@@ -113,6 +153,7 @@ static void
113stack_trace_call(unsigned long ip, unsigned long parent_ip, 153stack_trace_call(unsigned long ip, unsigned long parent_ip,
114 struct ftrace_ops *op, struct pt_regs *pt_regs) 154 struct ftrace_ops *op, struct pt_regs *pt_regs)
115{ 155{
156 unsigned long stack;
116 int cpu; 157 int cpu;
117 158
118 preempt_disable_notrace(); 159 preempt_disable_notrace();
@@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
122 if (per_cpu(trace_active, cpu)++ != 0) 163 if (per_cpu(trace_active, cpu)++ != 0)
123 goto out; 164 goto out;
124 165
125 check_stack(); 166 /*
167 * When fentry is used, the traced function does not get
168 * its stack frame set up, and we lose the parent.
169 * The ip is pretty useless because the function tracer
170 * was called before that function set up its stack frame.
171 * In this case, we use the parent ip.
172 *
173 * By adding the return address of either the parent ip
174 * or the current ip we can disregard most of the stack usage
175 * caused by the stack tracer itself.
176 *
177 * The function tracer always reports the address of where the
178 * mcount call was, but the stack will hold the return address.
179 */
180 if (fentry)
181 ip = parent_ip;
182 else
183 ip += MCOUNT_INSN_SIZE;
184
185 check_stack(ip, &stack);
126 186
127 out: 187 out:
128 per_cpu(trace_active, cpu)--; 188 per_cpu(trace_active, cpu)--;
@@ -371,6 +431,8 @@ static __init int stack_trace_init(void)
371 struct dentry *d_tracer; 431 struct dentry *d_tracer;
372 432
373 d_tracer = tracing_init_dentry(); 433 d_tracer = tracing_init_dentry();
434 if (!d_tracer)
435 return 0;
374 436
375 trace_create_file("stack_max_size", 0644, d_tracer, 437 trace_create_file("stack_max_size", 0644, d_tracer,
376 &max_stack_size, &stack_max_size_fops); 438 &max_stack_size, &stack_max_size_fops);