aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-19 06:13:33 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-19 06:13:33 -0500
commit4cd0332db7e8f57cc082bab11d82c064a9721737 (patch)
treeb6de7771d67c5bf6eeb890fa0f5a901365104b98 /arch/x86/kernel/ftrace.c
parent40999096e8b9872199bf56ecd0c4d98397ccaf2f (diff)
parent712406a6bf59ebf4a00358bb59a4a2a1b2953d90 (diff)
Merge branch 'mainline/function-graph' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/function-graph-tracer
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c75
1 files changed, 1 insertions, 74 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 2f9c0c8cb4c7..c2e057d9f88c 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -369,79 +369,6 @@ int ftrace_disable_ftrace_graph_caller(void)
369 369
370#endif /* !CONFIG_DYNAMIC_FTRACE */ 370#endif /* !CONFIG_DYNAMIC_FTRACE */
371 371
372/* Add a function return address to the trace stack on thread info.*/
373static int push_return_trace(unsigned long ret, unsigned long long time,
374 unsigned long func, int *depth)
375{
376 int index;
377
378 if (!current->ret_stack)
379 return -EBUSY;
380
381 /* The return trace stack is full */
382 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
383 atomic_inc(&current->trace_overrun);
384 return -EBUSY;
385 }
386
387 index = ++current->curr_ret_stack;
388 barrier();
389 current->ret_stack[index].ret = ret;
390 current->ret_stack[index].func = func;
391 current->ret_stack[index].calltime = time;
392 *depth = index;
393
394 return 0;
395}
396
397/* Retrieve a function return address to the trace stack on thread info.*/
398static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
399{
400 int index;
401
402 index = current->curr_ret_stack;
403
404 if (unlikely(index < 0)) {
405 ftrace_graph_stop();
406 WARN_ON(1);
407 /* Might as well panic, otherwise we have no where to go */
408 *ret = (unsigned long)panic;
409 return;
410 }
411
412 *ret = current->ret_stack[index].ret;
413 trace->func = current->ret_stack[index].func;
414 trace->calltime = current->ret_stack[index].calltime;
415 trace->overrun = atomic_read(&current->trace_overrun);
416 trace->depth = index;
417 barrier();
418 current->curr_ret_stack--;
419
420}
421
422/*
423 * Send the trace to the ring-buffer.
424 * @return the original return address.
425 */
426unsigned long ftrace_return_to_handler(void)
427{
428 struct ftrace_graph_ret trace;
429 unsigned long ret;
430
431 pop_return_trace(&trace, &ret);
432 trace.rettime = cpu_clock(raw_smp_processor_id());
433 ftrace_graph_return(&trace);
434
435 if (unlikely(!ret)) {
436 ftrace_graph_stop();
437 WARN_ON(1);
438 /* Might as well panic. What else to do? */
439 ret = (unsigned long)panic;
440 }
441
442 return ret;
443}
444
445/* 372/*
446 * Hook the return address and push it in the stack of return addrs 373 * Hook the return address and push it in the stack of return addrs
447 * in current thread info. 374 * in current thread info.
@@ -494,7 +421,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
494 421
495 calltime = cpu_clock(raw_smp_processor_id()); 422 calltime = cpu_clock(raw_smp_processor_id());
496 423
497 if (push_return_trace(old, calltime, 424 if (ftrace_push_return_trace(old, calltime,
498 self_addr, &trace.depth) == -EBUSY) { 425 self_addr, &trace.depth) == -EBUSY) {
499 *parent = old; 426 *parent = old;
500 return; 427 return;