aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-12 16:47:54 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-12 17:15:43 -0500
commit62d59d17a5f98edb48b171742dfa531488802f07 (patch)
tree3b66d4439aa0e497a5a387ed7b4e8f08f57ee7f1
parentcb9382e5a94e54d0356d730954396c746ae66d6e (diff)
tracing/function-return-tracer: make the function return tracer lockless
Impact: remove spinlocks and irq disabling in function return tracer. I've tried to figure out all of the race condition that could happen when the tracer pushes or pops a return address trace to/from the current thread_info. Theory: _ One thread can only execute on one cpu at a time. So this code doesn't need to be SMP-safe. Just drop the spinlock. _ The only race could happen between the current thread and an interrupt. If an interrupt is raised, it will increase the index of the return stack storage and then execute until the end of the tracing to finally free the index it used. We don't need to disable irqs. This is theorical. In practice, I've tested it with a two-core SMP and had no problem at all. Perhaps -tip testing could confirm it. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/ftrace.c43
1 files changed, 5 insertions, 38 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 16a571dea2ef..1db0e121a3e7 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -44,62 +44,37 @@ void ftrace_nmi_exit(void)
44 atomic_dec(&in_nmi); 44 atomic_dec(&in_nmi);
45} 45}
46 46
47/*
48 * Synchronize accesses to return adresses stack with
49 * interrupts.
50 */
51static raw_spinlock_t ret_stack_lock;
52
53/* Add a function return address to the trace stack on thread info.*/ 47/* Add a function return address to the trace stack on thread info.*/
54static int push_return_trace(unsigned long ret, unsigned long long time, 48static int push_return_trace(unsigned long ret, unsigned long long time,
55 unsigned long func) 49 unsigned long func)
56{ 50{
57 int index; 51 int index;
58 struct thread_info *ti; 52 struct thread_info *ti = current_thread_info();
59 unsigned long flags;
60 int err = 0;
61
62 raw_local_irq_save(flags);
63 __raw_spin_lock(&ret_stack_lock);
64 53
65 ti = current_thread_info();
66 /* The return trace stack is full */ 54 /* The return trace stack is full */
67 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { 55 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
68 err = -EBUSY; 56 return -EBUSY;
69 goto out;
70 }
71 57
72 index = ++ti->curr_ret_stack; 58 index = ++ti->curr_ret_stack;
73 ti->ret_stack[index].ret = ret; 59 ti->ret_stack[index].ret = ret;
74 ti->ret_stack[index].func = func; 60 ti->ret_stack[index].func = func;
75 ti->ret_stack[index].calltime = time; 61 ti->ret_stack[index].calltime = time;
76 62
77out: 63 return 0;
78 __raw_spin_unlock(&ret_stack_lock);
79 raw_local_irq_restore(flags);
80 return err;
81} 64}
82 65
83/* Retrieve a function return address to the trace stack on thread info.*/ 66/* Retrieve a function return address to the trace stack on thread info.*/
84static void pop_return_trace(unsigned long *ret, unsigned long long *time, 67static void pop_return_trace(unsigned long *ret, unsigned long long *time,
85 unsigned long *func) 68 unsigned long *func)
86{ 69{
87 struct thread_info *ti;
88 int index; 70 int index;
89 unsigned long flags;
90
91 raw_local_irq_save(flags);
92 __raw_spin_lock(&ret_stack_lock);
93 71
94 ti = current_thread_info(); 72 struct thread_info *ti = current_thread_info();
95 index = ti->curr_ret_stack; 73 index = ti->curr_ret_stack;
96 *ret = ti->ret_stack[index].ret; 74 *ret = ti->ret_stack[index].ret;
97 *func = ti->ret_stack[index].func; 75 *func = ti->ret_stack[index].func;
98 *time = ti->ret_stack[index].calltime; 76 *time = ti->ret_stack[index].calltime;
99 ti->curr_ret_stack--; 77 ti->curr_ret_stack--;
100
101 __raw_spin_unlock(&ret_stack_lock);
102 raw_local_irq_restore(flags);
103} 78}
104 79
105/* 80/*
@@ -175,14 +150,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
175 *parent = old; 150 *parent = old;
176} 151}
177 152
178static int __init init_ftrace_function_return(void)
179{
180 ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
181 return 0;
182}
183device_initcall(init_ftrace_function_return);
184
185
186#endif 153#endif
187 154
188#ifdef CONFIG_DYNAMIC_FTRACE 155#ifdef CONFIG_DYNAMIC_FTRACE