aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-11-02 17:17:59 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-01-22 23:38:00 -0500
commitc29f122cd7fc178b72b1335b1fce0dff2e5c0f5d (patch)
tree450f3888c9f663298231a29d6a1e63269f815fd0
parent0a016409e42f273415f8225ddf2c58eb2df88034 (diff)
ftrace: Add context level recursion bit checking
Currently for recursion checking in the function tracer, ftrace tests a task_struct bit to determine if the function tracer had recursed or not. If it has, then it will will return without going further. But this leads to races. If an interrupt came in after the bit was set, the functions being traced would see that bit set and think that the function tracer recursed on itself, and would return. Instead add a bit for each context (normal, softirq, irq and nmi). A check of which context the task is in is made before testing the associated bit. Now if an interrupt preempts the function tracer after the previous context has been set, the interrupt functions can still be traced. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ftrace.c40
-rw-r--r--kernel/trace/trace.h12
2 files changed, 42 insertions, 10 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1330969d8447..639b6ab1f04c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -156,14 +156,27 @@ static void
156ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, 156ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
157 struct ftrace_ops *op, struct pt_regs *regs) 157 struct ftrace_ops *op, struct pt_regs *regs)
158{ 158{
159 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) 159 int bit;
160
161 if (in_interrupt()) {
162 if (in_nmi())
163 bit = TRACE_GLOBAL_NMI_BIT;
164
165 else if (in_irq())
166 bit = TRACE_GLOBAL_IRQ_BIT;
167 else
168 bit = TRACE_GLOBAL_SIRQ_BIT;
169 } else
170 bit = TRACE_GLOBAL_BIT;
171
172 if (unlikely(trace_recursion_test(bit)))
160 return; 173 return;
161 174
162 trace_recursion_set(TRACE_GLOBAL_BIT); 175 trace_recursion_set(bit);
163 do_for_each_ftrace_op(op, ftrace_global_list) { 176 do_for_each_ftrace_op(op, ftrace_global_list) {
164 op->func(ip, parent_ip, op, regs); 177 op->func(ip, parent_ip, op, regs);
165 } while_for_each_ftrace_op(op); 178 } while_for_each_ftrace_op(op);
166 trace_recursion_clear(TRACE_GLOBAL_BIT); 179 trace_recursion_clear(bit);
167} 180}
168 181
169static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 182static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
@@ -4132,14 +4145,27 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4132 struct ftrace_ops *ignored, struct pt_regs *regs) 4145 struct ftrace_ops *ignored, struct pt_regs *regs)
4133{ 4146{
4134 struct ftrace_ops *op; 4147 struct ftrace_ops *op;
4148 unsigned int bit;
4135 4149
4136 if (function_trace_stop) 4150 if (function_trace_stop)
4137 return; 4151 return;
4138 4152
4139 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) 4153 if (in_interrupt()) {
4140 return; 4154 if (in_nmi())
4155 bit = TRACE_INTERNAL_NMI_BIT;
4156
4157 else if (in_irq())
4158 bit = TRACE_INTERNAL_IRQ_BIT;
4159 else
4160 bit = TRACE_INTERNAL_SIRQ_BIT;
4161 } else
4162 bit = TRACE_INTERNAL_BIT;
4163
4164 if (unlikely(trace_recursion_test(bit)))
4165 return;
4166
4167 trace_recursion_set(bit);
4141 4168
4142 trace_recursion_set(TRACE_INTERNAL_BIT);
4143 /* 4169 /*
4144 * Some of the ops may be dynamically allocated, 4170 * Some of the ops may be dynamically allocated,
4145 * they must be freed after a synchronize_sched(). 4171 * they must be freed after a synchronize_sched().
@@ -4150,7 +4176,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4150 op->func(ip, parent_ip, op, regs); 4176 op->func(ip, parent_ip, op, regs);
4151 } while_for_each_ftrace_op(op); 4177 } while_for_each_ftrace_op(op);
4152 preempt_enable_notrace(); 4178 preempt_enable_notrace();
4153 trace_recursion_clear(TRACE_INTERNAL_BIT); 4179 trace_recursion_clear(bit);
4154} 4180}
4155 4181
4156/* 4182/*
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c75d7988902c..fe6ccff9cc70 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -299,8 +299,14 @@ struct tracer {
299 299
300/* for function tracing recursion */ 300/* for function tracing recursion */
301#define TRACE_INTERNAL_BIT (1<<11) 301#define TRACE_INTERNAL_BIT (1<<11)
302#define TRACE_GLOBAL_BIT (1<<12) 302#define TRACE_INTERNAL_NMI_BIT (1<<12)
303#define TRACE_CONTROL_BIT (1<<13) 303#define TRACE_INTERNAL_IRQ_BIT (1<<13)
304#define TRACE_INTERNAL_SIRQ_BIT (1<<14)
305#define TRACE_GLOBAL_BIT (1<<15)
306#define TRACE_GLOBAL_NMI_BIT (1<<16)
307#define TRACE_GLOBAL_IRQ_BIT (1<<17)
308#define TRACE_GLOBAL_SIRQ_BIT (1<<18)
309#define TRACE_CONTROL_BIT (1<<19)
304 310
305/* 311/*
306 * Abuse of the trace_recursion. 312 * Abuse of the trace_recursion.
@@ -309,7 +315,7 @@ struct tracer {
309 * was called in irq context but we have irq tracing off. Since this 315 * was called in irq context but we have irq tracing off. Since this
310 * can only be modified by current, we can reuse trace_recursion. 316 * can only be modified by current, we can reuse trace_recursion.
311 */ 317 */
312#define TRACE_IRQ_BIT (1<<13) 318#define TRACE_IRQ_BIT (1<<20)
313 319
314#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) 320#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
315#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) 321#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)