aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 3595a4c14aba..26b2d92d48b3 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -347,7 +347,7 @@ void ftrace_nmi_exit(void)
347 347
348/* Add a function return address to the trace stack on thread info.*/ 348/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time, 349static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func) 350 unsigned long func, int *depth)
351{ 351{
352 int index; 352 int index;
353 353
@@ -365,21 +365,22 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
365 current->ret_stack[index].ret = ret; 365 current->ret_stack[index].ret = ret;
366 current->ret_stack[index].func = func; 366 current->ret_stack[index].func = func;
367 current->ret_stack[index].calltime = time; 367 current->ret_stack[index].calltime = time;
368 *depth = index;
368 369
369 return 0; 370 return 0;
370} 371}
371 372
372/* Retrieve a function return address to the trace stack on thread info.*/ 373/* Retrieve a function return address to the trace stack on thread info.*/
373static void pop_return_trace(unsigned long *ret, unsigned long long *time, 374static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
374 unsigned long *func, unsigned long *overrun)
375{ 375{
376 int index; 376 int index;
377 377
378 index = current->curr_ret_stack; 378 index = current->curr_ret_stack;
379 *ret = current->ret_stack[index].ret; 379 *ret = current->ret_stack[index].ret;
380 *func = current->ret_stack[index].func; 380 trace->func = current->ret_stack[index].func;
381 *time = current->ret_stack[index].calltime; 381 trace->calltime = current->ret_stack[index].calltime;
382 *overrun = atomic_read(&current->trace_overrun); 382 trace->overrun = atomic_read(&current->trace_overrun);
383 trace->depth = index;
383 current->curr_ret_stack--; 384 current->curr_ret_stack--;
384} 385}
385 386
@@ -390,12 +391,13 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
390unsigned long ftrace_return_to_handler(void) 391unsigned long ftrace_return_to_handler(void)
391{ 392{
392 struct ftrace_graph_ret trace; 393 struct ftrace_graph_ret trace;
393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func, 394 unsigned long ret;
394 &trace.overrun); 395
396 pop_return_trace(&trace, &ret);
395 trace.rettime = cpu_clock(raw_smp_processor_id()); 397 trace.rettime = cpu_clock(raw_smp_processor_id());
396 ftrace_graph_function(&trace); 398 ftrace_graph_return(&trace);
397 399
398 return trace.ret; 400 return ret;
399} 401}
400 402
401/* 403/*
@@ -407,6 +409,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
407 unsigned long old; 409 unsigned long old;
408 unsigned long long calltime; 410 unsigned long long calltime;
409 int faulted; 411 int faulted;
412 struct ftrace_graph_ent trace;
410 unsigned long return_hooker = (unsigned long) 413 unsigned long return_hooker = (unsigned long)
411 &return_to_handler; 414 &return_to_handler;
412 415
@@ -452,8 +455,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
452 455
453 calltime = cpu_clock(raw_smp_processor_id()); 456 calltime = cpu_clock(raw_smp_processor_id());
454 457
455 if (push_return_trace(old, calltime, self_addr) == -EBUSY) 458 if (push_return_trace(old, calltime,
459 self_addr, &trace.depth) == -EBUSY) {
456 *parent = old; 460 *parent = old;
461 return;
462 }
463
464 trace.func = self_addr;
465 ftrace_graph_entry(&trace);
466
457} 467}
458 468
459#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 469#endif /* CONFIG_FUNCTION_GRAPH_TRACER */