aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c32
1 files changed, 27 insertions, 5 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e998a824e9d..9aed1a5cf55 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -9,6 +9,7 @@
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/slab.h>
12#include <linux/fs.h> 13#include <linux/fs.h>
13 14
14#include "trace.h" 15#include "trace.h"
@@ -188,7 +189,7 @@ static int __trace_graph_entry(struct trace_array *tr,
188 struct ring_buffer *buffer = tr->buffer; 189 struct ring_buffer *buffer = tr->buffer;
189 struct ftrace_graph_ent_entry *entry; 190 struct ftrace_graph_ent_entry *entry;
190 191
191 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 192 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
192 return 0; 193 return 0;
193 194
194 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 195 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -237,6 +238,14 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
237 return ret; 238 return ret;
238} 239}
239 240
241int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
242{
243 if (tracing_thresh)
244 return 1;
245 else
246 return trace_graph_entry(trace);
247}
248
240static void __trace_graph_return(struct trace_array *tr, 249static void __trace_graph_return(struct trace_array *tr,
241 struct ftrace_graph_ret *trace, 250 struct ftrace_graph_ret *trace,
242 unsigned long flags, 251 unsigned long flags,
@@ -247,7 +256,7 @@ static void __trace_graph_return(struct trace_array *tr,
247 struct ring_buffer *buffer = tr->buffer; 256 struct ring_buffer *buffer = tr->buffer;
248 struct ftrace_graph_ret_entry *entry; 257 struct ftrace_graph_ret_entry *entry;
249 258
250 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 259 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
251 return; 260 return;
252 261
253 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 262 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -290,13 +299,26 @@ void set_graph_array(struct trace_array *tr)
290 smp_mb(); 299 smp_mb();
291} 300}
292 301
302void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
303{
304 if (tracing_thresh &&
305 (trace->rettime - trace->calltime < tracing_thresh))
306 return;
307 else
308 trace_graph_return(trace);
309}
310
293static int graph_trace_init(struct trace_array *tr) 311static int graph_trace_init(struct trace_array *tr)
294{ 312{
295 int ret; 313 int ret;
296 314
297 set_graph_array(tr); 315 set_graph_array(tr);
298 ret = register_ftrace_graph(&trace_graph_return, 316 if (tracing_thresh)
299 &trace_graph_entry); 317 ret = register_ftrace_graph(&trace_graph_thresh_return,
318 &trace_graph_thresh_entry);
319 else
320 ret = register_ftrace_graph(&trace_graph_return,
321 &trace_graph_entry);
300 if (ret) 322 if (ret)
301 return ret; 323 return ret;
302 tracing_start_cmdline_record(); 324 tracing_start_cmdline_record();
@@ -920,7 +942,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
920 if (!ret) 942 if (!ret)
921 return TRACE_TYPE_PARTIAL_LINE; 943 return TRACE_TYPE_PARTIAL_LINE;
922 } else { 944 } else {
923 ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func); 945 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
924 if (!ret) 946 if (!ret)
925 return TRACE_TYPE_PARTIAL_LINE; 947 return TRACE_TYPE_PARTIAL_LINE;
926 } 948 }