aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-05-12 15:20:55 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 15:13:41 -0400
commitc5f888cae49dfe3e86d9d1e0dab2b63ceb057be3 (patch)
treee276b5299b52ccbe8b89492d398fe63652cddd49 /kernel/trace/trace_irqsoff.c
parent4d9493c90f8e6e1b164aede3814010a290161abb (diff)
ftrace: irqsoff use raw_smp_processor_id
This patch changes the use of __get_cpu_var to explicitly calling raw_smp_processor_id and using the per_cpu() macro. On some debug configurations, the use of __get_cpu_var may cause ftrace to trigger and this can cause problems with the irqsoff tracing. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d0c1748b1e2c..761f3ec66c50 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -204,14 +204,14 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
204 if (likely(!tracer_enabled)) 204 if (likely(!tracer_enabled))
205 return; 205 return;
206 206
207 if (__get_cpu_var(tracing_cpu)) 207 cpu = raw_smp_processor_id();
208
209 if (per_cpu(tracing_cpu, cpu))
208 return; 210 return;
209 211
210 cpu = raw_smp_processor_id();
211 data = tr->data[cpu]; 212 data = tr->data[cpu];
212 213
213 if (unlikely(!data) || unlikely(!head_page(data)) || 214 if (unlikely(!data) || atomic_read(&data->disabled))
214 atomic_read(&data->disabled))
215 return; 215 return;
216 216
217 atomic_inc(&data->disabled); 217 atomic_inc(&data->disabled);
@@ -225,7 +225,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
225 225
226 trace_function(tr, data, ip, parent_ip, flags); 226 trace_function(tr, data, ip, parent_ip, flags);
227 227
228 __get_cpu_var(tracing_cpu) = 1; 228 per_cpu(tracing_cpu, cpu) = 1;
229 229
230 atomic_dec(&data->disabled); 230 atomic_dec(&data->disabled);
231} 231}
@@ -238,16 +238,16 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
238 struct trace_array_cpu *data; 238 struct trace_array_cpu *data;
239 unsigned long flags; 239 unsigned long flags;
240 240
241 cpu = raw_smp_processor_id();
241 /* Always clear the tracing cpu on stopping the trace */ 242 /* Always clear the tracing cpu on stopping the trace */
242 if (unlikely(__get_cpu_var(tracing_cpu))) 243 if (unlikely(per_cpu(tracing_cpu, cpu)))
243 __get_cpu_var(tracing_cpu) = 0; 244 per_cpu(tracing_cpu, cpu) = 0;
244 else 245 else
245 return; 246 return;
246 247
247 if (!tracer_enabled) 248 if (!tracer_enabled)
248 return; 249 return;
249 250
250 cpu = raw_smp_processor_id();
251 data = tr->data[cpu]; 251 data = tr->data[cpu];
252 252
253 if (unlikely(!data) || unlikely(!head_page(data)) || 253 if (unlikely(!data) || unlikely(!head_page(data)) ||
@@ -255,6 +255,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
255 return; 255 return;
256 256
257 atomic_inc(&data->disabled); 257 atomic_inc(&data->disabled);
258
258 local_save_flags(flags); 259 local_save_flags(flags);
259 trace_function(tr, data, ip, parent_ip, flags); 260 trace_function(tr, data, ip, parent_ip, flags);
260 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 261 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
@@ -376,7 +377,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr)
376static void __irqsoff_tracer_init(struct trace_array *tr) 377static void __irqsoff_tracer_init(struct trace_array *tr)
377{ 378{
378 irqsoff_trace = tr; 379 irqsoff_trace = tr;
379 /* make sure that the tracer is visibel */ 380 /* make sure that the tracer is visible */
380 smp_wmb(); 381 smp_wmb();
381 382
382 if (tr->ctrl) 383 if (tr->ctrl)