aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-23 00:22:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-23 03:17:26 -0500
commitf201ae2356c74bcae130b2177b3dca903ea98071 (patch)
treec4b1b43fbe0a4594cb86749b2e7098fe15eb86ba /arch/x86/kernel
parenta0a70c735ef714fe1b6777b571630c3d50c7b008 (diff)
tracing/function-return-tracer: store return stack into task_struct and allocate it dynamically
Impact: use deeper function tracing depth safely Some tests showed that function return tracing needed a more deeper depth of function calls. But it could be unsafe to store these return addresses to the stack. So these arrays will now be allocated dynamically into task_struct of current only when the tracer is activated. Typical scheme when tracer is activated: - allocate a return stack for each task in global list. - fork: allocate the return stack for the newly created task - exit: free return stack of current - idle init: same as fork I chose a default depth of 50. I don't have overruns anymore. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/ftrace.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 356bb1eb6e9a..bb137f7297ed 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -350,19 +350,21 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func) 350 unsigned long func)
351{ 351{
352 int index; 352 int index;
353 struct thread_info *ti = current_thread_info(); 353
354 if (!current->ret_stack)
355 return -EBUSY;
354 356
355 /* The return trace stack is full */ 357 /* The return trace stack is full */
356 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { 358 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
357 atomic_inc(&ti->trace_overrun); 359 atomic_inc(&current->trace_overrun);
358 return -EBUSY; 360 return -EBUSY;
359 } 361 }
360 362
361 index = ++ti->curr_ret_stack; 363 index = ++current->curr_ret_stack;
362 barrier(); 364 barrier();
363 ti->ret_stack[index].ret = ret; 365 current->ret_stack[index].ret = ret;
364 ti->ret_stack[index].func = func; 366 current->ret_stack[index].func = func;
365 ti->ret_stack[index].calltime = time; 367 current->ret_stack[index].calltime = time;
366 368
367 return 0; 369 return 0;
368} 370}
@@ -373,13 +375,12 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
373{ 375{
374 int index; 376 int index;
375 377
376 struct thread_info *ti = current_thread_info(); 378 index = current->curr_ret_stack;
377 index = ti->curr_ret_stack; 379 *ret = current->ret_stack[index].ret;
378 *ret = ti->ret_stack[index].ret; 380 *func = current->ret_stack[index].func;
379 *func = ti->ret_stack[index].func; 381 *time = current->ret_stack[index].calltime;
380 *time = ti->ret_stack[index].calltime; 382 *overrun = atomic_read(&current->trace_overrun);
381 *overrun = atomic_read(&ti->trace_overrun); 383 current->curr_ret_stack--;
382 ti->curr_ret_stack--;
383} 384}
384 385
385/* 386/*