aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/thread_info.h7
-rw-r--r--arch/x86/kernel/ftrace.c10
2 files changed, 14 insertions, 3 deletions
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a71158369fd4..e90e81ef6ab9 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -21,6 +21,7 @@ struct task_struct;
21struct exec_domain; 21struct exec_domain;
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/ftrace.h> 23#include <asm/ftrace.h>
24#include <asm/atomic.h>
24 25
25struct thread_info { 26struct thread_info {
26 struct task_struct *task; /* main task structure */ 27 struct task_struct *task; /* main task structure */
@@ -45,6 +46,11 @@ struct thread_info {
45 int curr_ret_stack; 46 int curr_ret_stack;
46 /* Stack of return addresses for return function tracing */ 47 /* Stack of return addresses for return function tracing */
47 struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE]; 48 struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
49 /*
50 * Number of functions that haven't been traced
51 * because of depth overrun.
52 */
53 atomic_t trace_overrun;
48#endif 54#endif
49}; 55};
50 56
@@ -61,6 +67,7 @@ struct thread_info {
61 .fn = do_no_restart_syscall, \ 67 .fn = do_no_restart_syscall, \
62 }, \ 68 }, \
63 .curr_ret_stack = -1,\ 69 .curr_ret_stack = -1,\
70 .trace_overrun = ATOMIC_INIT(0) \
64} 71}
65#else 72#else
66#define INIT_THREAD_INFO(tsk) \ 73#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 924153edd973..356bb1eb6e9a 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -353,8 +353,10 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
353 struct thread_info *ti = current_thread_info(); 353 struct thread_info *ti = current_thread_info();
354 354
355 /* The return trace stack is full */ 355 /* The return trace stack is full */
356 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) 356 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
357 atomic_inc(&ti->trace_overrun);
357 return -EBUSY; 358 return -EBUSY;
359 }
358 360
359 index = ++ti->curr_ret_stack; 361 index = ++ti->curr_ret_stack;
360 barrier(); 362 barrier();
@@ -367,7 +369,7 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
367 369
368/* Retrieve a function return address to the trace stack on thread info.*/ 370/* Retrieve a function return address to the trace stack on thread info.*/
369static void pop_return_trace(unsigned long *ret, unsigned long long *time, 371static void pop_return_trace(unsigned long *ret, unsigned long long *time,
370 unsigned long *func) 372 unsigned long *func, unsigned long *overrun)
371{ 373{
372 int index; 374 int index;
373 375
@@ -376,6 +378,7 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
376 *ret = ti->ret_stack[index].ret; 378 *ret = ti->ret_stack[index].ret;
377 *func = ti->ret_stack[index].func; 379 *func = ti->ret_stack[index].func;
378 *time = ti->ret_stack[index].calltime; 380 *time = ti->ret_stack[index].calltime;
381 *overrun = atomic_read(&ti->trace_overrun);
379 ti->curr_ret_stack--; 382 ti->curr_ret_stack--;
380} 383}
381 384
@@ -386,7 +389,8 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
386unsigned long ftrace_return_to_handler(void) 389unsigned long ftrace_return_to_handler(void)
387{ 390{
388 struct ftrace_retfunc trace; 391 struct ftrace_retfunc trace;
389 pop_return_trace(&trace.ret, &trace.calltime, &trace.func); 392 pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
393 &trace.overrun);
390 trace.rettime = cpu_clock(raw_smp_processor_id()); 394 trace.rettime = cpu_clock(raw_smp_processor_id());
391 ftrace_function_return(&trace); 395 ftrace_function_return(&trace);
392 396