aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c184
1 files changed, 134 insertions, 50 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a9131b0cf1a5..8b1231633dc5 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -21,6 +21,36 @@
21static struct trace_array *irqsoff_trace __read_mostly; 21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly; 22static int tracer_enabled __read_mostly;
23 23
24static DEFINE_PER_CPU(int, tracing_cpu);
25
26enum {
27 TRACER_IRQS_OFF = (1 << 1),
28 TRACER_PREEMPT_OFF = (1 << 2),
29};
30
31static int trace_type __read_mostly;
32
33#ifdef CONFIG_PREEMPT_TRACER
34static inline int notrace
35preempt_trace(void)
36{
37 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
38}
39#else
40# define preempt_trace() (0)
41#endif
42
43#ifdef CONFIG_IRQSOFF_TRACER
44static inline int notrace
45irq_trace(void)
46{
47 return ((trace_type & TRACER_IRQS_OFF) &&
48 irqs_disabled());
49}
50#else
51# define irq_trace() (0)
52#endif
53
24/* 54/*
25 * Sequence count - we record it when starting a measurement and 55 * Sequence count - we record it when starting a measurement and
26 * skip the latency if the sequence has changed - some other section 56 * skip the latency if the sequence has changed - some other section
@@ -44,14 +74,11 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
44 long disabled; 74 long disabled;
45 int cpu; 75 int cpu;
46 76
47 if (likely(!tracer_enabled)) 77 if (likely(!__get_cpu_var(tracing_cpu)))
48 return; 78 return;
49 79
50 local_save_flags(flags); 80 local_save_flags(flags);
51 81
52 if (!irqs_disabled_flags(flags))
53 return;
54
55 cpu = raw_smp_processor_id(); 82 cpu = raw_smp_processor_id();
56 data = tr->data[cpu]; 83 data = tr->data[cpu];
57 disabled = atomic_inc_return(&data->disabled); 84 disabled = atomic_inc_return(&data->disabled);
@@ -171,23 +198,29 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
171 if (likely(!tracer_enabled)) 198 if (likely(!tracer_enabled))
172 return; 199 return;
173 200
201 if (__get_cpu_var(tracing_cpu))
202 return;
203
174 cpu = raw_smp_processor_id(); 204 cpu = raw_smp_processor_id();
175 data = tr->data[cpu]; 205 data = tr->data[cpu];
176 206
177 if (unlikely(!data) || unlikely(!data->trace) || 207 if (unlikely(!data) || unlikely(!data->trace) ||
178 data->critical_start || atomic_read(&data->disabled)) 208 atomic_read(&data->disabled))
179 return; 209 return;
180 210
181 atomic_inc(&data->disabled); 211 atomic_inc(&data->disabled);
182 212
183 data->critical_sequence = max_sequence; 213 data->critical_sequence = max_sequence;
184 data->preempt_timestamp = now(cpu); 214 data->preempt_timestamp = now(cpu);
185 data->critical_start = parent_ip; 215 data->critical_start = parent_ip ? : ip;
186 tracing_reset(data); 216 tracing_reset(data);
187 217
188 local_save_flags(flags); 218 local_save_flags(flags);
219
189 ftrace(tr, data, ip, parent_ip, flags); 220 ftrace(tr, data, ip, parent_ip, flags);
190 221
222 __get_cpu_var(tracing_cpu) = 1;
223
191 atomic_dec(&data->disabled); 224 atomic_dec(&data->disabled);
192} 225}
193 226
@@ -199,7 +232,13 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
199 struct trace_array_cpu *data; 232 struct trace_array_cpu *data;
200 unsigned long flags; 233 unsigned long flags;
201 234
202 if (likely(!tracer_enabled)) 235 /* Always clear the tracing cpu on stopping the trace */
236 if (unlikely(__get_cpu_var(tracing_cpu)))
237 __get_cpu_var(tracing_cpu) = 0;
238 else
239 return;
240
241 if (!tracer_enabled)
203 return; 242 return;
204 243
205 cpu = raw_smp_processor_id(); 244 cpu = raw_smp_processor_id();
@@ -212,49 +251,35 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
212 atomic_inc(&data->disabled); 251 atomic_inc(&data->disabled);
213 local_save_flags(flags); 252 local_save_flags(flags);
214 ftrace(tr, data, ip, parent_ip, flags); 253 ftrace(tr, data, ip, parent_ip, flags);
215 check_critical_timing(tr, data, parent_ip, cpu); 254 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
216 data->critical_start = 0; 255 data->critical_start = 0;
217 atomic_dec(&data->disabled); 256 atomic_dec(&data->disabled);
218} 257}
219 258
259/* start and stop critical timings used to for stoppage (in idle) */
220void notrace start_critical_timings(void) 260void notrace start_critical_timings(void)
221{ 261{
222 unsigned long flags; 262 if (preempt_trace() || irq_trace())
223
224 local_save_flags(flags);
225
226 if (irqs_disabled_flags(flags))
227 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 263 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
228} 264}
229 265
230void notrace stop_critical_timings(void) 266void notrace stop_critical_timings(void)
231{ 267{
232 unsigned long flags; 268 if (preempt_trace() || irq_trace())
233
234 local_save_flags(flags);
235
236 if (irqs_disabled_flags(flags))
237 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 269 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
238} 270}
239 271
272#ifdef CONFIG_IRQSOFF_TRACER
240#ifdef CONFIG_PROVE_LOCKING 273#ifdef CONFIG_PROVE_LOCKING
241void notrace time_hardirqs_on(unsigned long a0, unsigned long a1) 274void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
242{ 275{
243 unsigned long flags; 276 if (!preempt_trace() && irq_trace())
244
245 local_save_flags(flags);
246
247 if (irqs_disabled_flags(flags))
248 stop_critical_timing(a0, a1); 277 stop_critical_timing(a0, a1);
249} 278}
250 279
251void notrace time_hardirqs_off(unsigned long a0, unsigned long a1) 280void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
252{ 281{
253 unsigned long flags; 282 if (!preempt_trace() && irq_trace())
254
255 local_save_flags(flags);
256
257 if (irqs_disabled_flags(flags))
258 start_critical_timing(a0, a1); 283 start_critical_timing(a0, a1);
259} 284}
260 285
@@ -289,49 +314,46 @@ inline void print_irqtrace_events(struct task_struct *curr)
289 */ 314 */
290void notrace trace_hardirqs_on(void) 315void notrace trace_hardirqs_on(void)
291{ 316{
292 unsigned long flags; 317 if (!preempt_trace() && irq_trace())
293
294 local_save_flags(flags);
295
296 if (irqs_disabled_flags(flags))
297 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 318 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
298} 319}
299EXPORT_SYMBOL(trace_hardirqs_on); 320EXPORT_SYMBOL(trace_hardirqs_on);
300 321
301void notrace trace_hardirqs_off(void) 322void notrace trace_hardirqs_off(void)
302{ 323{
303 unsigned long flags; 324 if (!preempt_trace() && irq_trace())
304
305 local_save_flags(flags);
306
307 if (irqs_disabled_flags(flags))
308 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 325 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
309} 326}
310EXPORT_SYMBOL(trace_hardirqs_off); 327EXPORT_SYMBOL(trace_hardirqs_off);
311 328
312void notrace trace_hardirqs_on_caller(unsigned long caller_addr) 329void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
313{ 330{
314 unsigned long flags; 331 if (!preempt_trace() && irq_trace())
315
316 local_save_flags(flags);
317
318 if (irqs_disabled_flags(flags))
319 stop_critical_timing(CALLER_ADDR0, caller_addr); 332 stop_critical_timing(CALLER_ADDR0, caller_addr);
320} 333}
321EXPORT_SYMBOL(trace_hardirqs_on_caller); 334EXPORT_SYMBOL(trace_hardirqs_on_caller);
322 335
323void notrace trace_hardirqs_off_caller(unsigned long caller_addr) 336void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
324{ 337{
325 unsigned long flags; 338 if (!preempt_trace() && irq_trace())
326
327 local_save_flags(flags);
328
329 if (irqs_disabled_flags(flags))
330 start_critical_timing(CALLER_ADDR0, caller_addr); 339 start_critical_timing(CALLER_ADDR0, caller_addr);
331} 340}
332EXPORT_SYMBOL(trace_hardirqs_off_caller); 341EXPORT_SYMBOL(trace_hardirqs_off_caller);
333 342
334#endif /* CONFIG_PROVE_LOCKING */ 343#endif /* CONFIG_PROVE_LOCKING */
344#endif /* CONFIG_IRQSOFF_TRACER */
345
346#ifdef CONFIG_PREEMPT_TRACER
347void notrace trace_preempt_on(unsigned long a0, unsigned long a1)
348{
349 stop_critical_timing(a0, a1);
350}
351
352void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
353{
354 start_critical_timing(a0, a1);
355}
356#endif /* CONFIG_PREEMPT_TRACER */
335 357
336static void start_irqsoff_tracer(struct trace_array *tr) 358static void start_irqsoff_tracer(struct trace_array *tr)
337{ 359{
@@ -345,7 +367,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr)
345 tracer_enabled = 0; 367 tracer_enabled = 0;
346} 368}
347 369
348static void irqsoff_tracer_init(struct trace_array *tr) 370static void __irqsoff_tracer_init(struct trace_array *tr)
349{ 371{
350 irqsoff_trace = tr; 372 irqsoff_trace = tr;
351 /* make sure that the tracer is visibel */ 373 /* make sure that the tracer is visibel */
@@ -382,6 +404,13 @@ static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
382 start_irqsoff_tracer(iter->tr); 404 start_irqsoff_tracer(iter->tr);
383} 405}
384 406
407#ifdef CONFIG_IRQSOFF_TRACER
408static void irqsoff_tracer_init(struct trace_array *tr)
409{
410 trace_type = TRACER_IRQS_OFF;
411
412 __irqsoff_tracer_init(tr);
413}
385static struct tracer irqsoff_tracer __read_mostly = 414static struct tracer irqsoff_tracer __read_mostly =
386{ 415{
387 .name = "irqsoff", 416 .name = "irqsoff",
@@ -392,10 +421,65 @@ static struct tracer irqsoff_tracer __read_mostly =
392 .ctrl_update = irqsoff_tracer_ctrl_update, 421 .ctrl_update = irqsoff_tracer_ctrl_update,
393 .print_max = 1, 422 .print_max = 1,
394}; 423};
424# define register_irqsoff(trace) register_tracer(&trace)
425#else
426# define register_irqsoff(trace) do { } while (0)
427#endif
428
429#ifdef CONFIG_PREEMPT_TRACER
430static void preemptoff_tracer_init(struct trace_array *tr)
431{
432 trace_type = TRACER_PREEMPT_OFF;
433
434 __irqsoff_tracer_init(tr);
435}
436
437static struct tracer preemptoff_tracer __read_mostly =
438{
439 .name = "preemptoff",
440 .init = preemptoff_tracer_init,
441 .reset = irqsoff_tracer_reset,
442 .open = irqsoff_tracer_open,
443 .close = irqsoff_tracer_close,
444 .ctrl_update = irqsoff_tracer_ctrl_update,
445 .print_max = 1,
446};
447# define register_preemptoff(trace) register_tracer(&trace)
448#else
449# define register_preemptoff(trace) do { } while (0)
450#endif
451
452#if defined(CONFIG_IRQSOFF_TRACER) && \
453 defined(CONFIG_PREEMPT_TRACER)
454
455static void preemptirqsoff_tracer_init(struct trace_array *tr)
456{
457 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
458
459 __irqsoff_tracer_init(tr);
460}
461
462static struct tracer preemptirqsoff_tracer __read_mostly =
463{
464 .name = "preemptirqsoff",
465 .init = preemptirqsoff_tracer_init,
466 .reset = irqsoff_tracer_reset,
467 .open = irqsoff_tracer_open,
468 .close = irqsoff_tracer_close,
469 .ctrl_update = irqsoff_tracer_ctrl_update,
470 .print_max = 1,
471};
472
473# define register_preemptirqsoff(trace) register_tracer(&trace)
474#else
475# define register_preemptirqsoff(trace) do { } while (0)
476#endif
395 477
396__init static int init_irqsoff_tracer(void) 478__init static int init_irqsoff_tracer(void)
397{ 479{
398 register_tracer(&irqsoff_tracer); 480 register_irqsoff(irqsoff_tracer);
481 register_preemptoff(preemptoff_tracer);
482 register_preemptirqsoff(preemptirqsoff_tracer);
399 483
400 return 0; 484 return 0;
401} 485}