diff options
-rw-r--r-- | kernel/trace/trace.c | 101 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 4 |
2 files changed, 72 insertions, 33 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1226a98c9bef..b2aebd62fd33 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -246,9 +246,24 @@ cycle_t ftrace_now(int cpu) | |||
246 | return ts; | 246 | return ts; |
247 | } | 247 | } |
248 | 248 | ||
249 | /** | ||
250 | * tracing_is_enabled - Show if global_trace has been disabled | ||
251 | * | ||
252 | * Shows if the global trace has been enabled or not. It uses the | ||
253 | * mirror flag "buffer_disabled" to be used in fast paths such as for | ||
254 | * the irqsoff tracer. But it may be inaccurate due to races. If you | ||
255 | * need to know the accurate state, use tracing_is_on() which is a little | ||
256 | * slower, but accurate. | ||
257 | */ | ||
249 | int tracing_is_enabled(void) | 258 | int tracing_is_enabled(void) |
250 | { | 259 | { |
251 | return tracing_is_on(); | 260 | /* |
261 | * For quick access (irqsoff uses this in fast path), just | ||
262 | * return the mirror variable of the state of the ring buffer. | ||
263 | * It's a little racy, but we don't really care. | ||
264 | */ | ||
265 | smp_rmb(); | ||
266 | return !global_trace.buffer_disabled; | ||
252 | } | 267 | } |
253 | 268 | ||
254 | /* | 269 | /* |
@@ -361,6 +376,23 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
361 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | | 376 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | |
362 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; | 377 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; |
363 | 378 | ||
379 | void tracer_tracing_on(struct trace_array *tr) | ||
380 | { | ||
381 | if (tr->trace_buffer.buffer) | ||
382 | ring_buffer_record_on(tr->trace_buffer.buffer); | ||
383 | /* | ||
384 | * This flag is looked at when buffers haven't been allocated | ||
385 | * yet, or by some tracers (like irqsoff), that just want to | ||
386 | * know if the ring buffer has been disabled, but it can handle | ||
387 | * races of where it gets disabled but we still do a record. | ||
388 | * As the check is in the fast path of the tracers, it is more | ||
389 | * important to be fast than accurate. | ||
390 | */ | ||
391 | tr->buffer_disabled = 0; | ||
392 | /* Make the flag seen by readers */ | ||
393 | smp_wmb(); | ||
394 | } | ||
395 | |||
364 | /** | 396 | /** |
365 | * tracing_on - enable tracing buffers | 397 | * tracing_on - enable tracing buffers |
366 | * | 398 | * |
@@ -369,15 +401,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
369 | */ | 401 | */ |
370 | void tracing_on(void) | 402 | void tracing_on(void) |
371 | { | 403 | { |
372 | if (global_trace.trace_buffer.buffer) | 404 | tracer_tracing_on(&global_trace); |
373 | ring_buffer_record_on(global_trace.trace_buffer.buffer); | ||
374 | /* | ||
375 | * This flag is only looked at when buffers haven't been | ||
376 | * allocated yet. We don't really care about the race | ||
377 | * between setting this flag and actually turning | ||
378 | * on the buffer. | ||
379 | */ | ||
380 | global_trace.buffer_disabled = 0; | ||
381 | } | 405 | } |
382 | EXPORT_SYMBOL_GPL(tracing_on); | 406 | EXPORT_SYMBOL_GPL(tracing_on); |
383 | 407 | ||
@@ -571,6 +595,23 @@ void tracing_snapshot_alloc(void) | |||
571 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | 595 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
572 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 596 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
573 | 597 | ||
598 | void tracer_tracing_off(struct trace_array *tr) | ||
599 | { | ||
600 | if (tr->trace_buffer.buffer) | ||
601 | ring_buffer_record_off(tr->trace_buffer.buffer); | ||
602 | /* | ||
603 | * This flag is looked at when buffers haven't been allocated | ||
604 | * yet, or by some tracers (like irqsoff), that just want to | ||
605 | * know if the ring buffer has been disabled, but it can handle | ||
606 | * races of where it gets disabled but we still do a record. | ||
607 | * As the check is in the fast path of the tracers, it is more | ||
608 | * important to be fast than accurate. | ||
609 | */ | ||
610 | tr->buffer_disabled = 1; | ||
611 | /* Make the flag seen by readers */ | ||
612 | smp_wmb(); | ||
613 | } | ||
614 | |||
574 | /** | 615 | /** |
575 | * tracing_off - turn off tracing buffers | 616 | * tracing_off - turn off tracing buffers |
576 | * | 617 | * |
@@ -581,26 +622,29 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | |||
581 | */ | 622 | */ |
582 | void tracing_off(void) | 623 | void tracing_off(void) |
583 | { | 624 | { |
584 | if (global_trace.trace_buffer.buffer) | 625 | tracer_tracing_off(&global_trace); |
585 | ring_buffer_record_off(global_trace.trace_buffer.buffer); | ||
586 | /* | ||
587 | * This flag is only looked at when buffers haven't been | ||
588 | * allocated yet. We don't really care about the race | ||
589 | * between setting this flag and actually turning | ||
590 | * on the buffer. | ||
591 | */ | ||
592 | global_trace.buffer_disabled = 1; | ||
593 | } | 626 | } |
594 | EXPORT_SYMBOL_GPL(tracing_off); | 627 | EXPORT_SYMBOL_GPL(tracing_off); |
595 | 628 | ||
596 | /** | 629 | /** |
630 | * tracer_tracing_is_on - show real state of ring buffer enabled | ||
631 | * @tr : the trace array to know if ring buffer is enabled | ||
632 | * | ||
633 | * Shows real state of the ring buffer if it is enabled or not. | ||
634 | */ | ||
635 | int tracer_tracing_is_on(struct trace_array *tr) | ||
636 | { | ||
637 | if (tr->trace_buffer.buffer) | ||
638 | return ring_buffer_record_is_on(tr->trace_buffer.buffer); | ||
639 | return !tr->buffer_disabled; | ||
640 | } | ||
641 | |||
642 | /** | ||
597 | * tracing_is_on - show state of ring buffers enabled | 643 | * tracing_is_on - show state of ring buffers enabled |
598 | */ | 644 | */ |
599 | int tracing_is_on(void) | 645 | int tracing_is_on(void) |
600 | { | 646 | { |
601 | if (global_trace.trace_buffer.buffer) | 647 | return tracer_tracing_is_on(&global_trace); |
602 | return ring_buffer_record_is_on(global_trace.trace_buffer.buffer); | ||
603 | return !global_trace.buffer_disabled; | ||
604 | } | 648 | } |
605 | EXPORT_SYMBOL_GPL(tracing_is_on); | 649 | EXPORT_SYMBOL_GPL(tracing_is_on); |
606 | 650 | ||
@@ -4060,7 +4104,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
4060 | * | 4104 | * |
4061 | * iter->pos will be 0 if we haven't read anything. | 4105 | * iter->pos will be 0 if we haven't read anything. |
4062 | */ | 4106 | */ |
4063 | if (!tracing_is_enabled() && iter->pos) | 4107 | if (!tracing_is_on() && iter->pos) |
4064 | break; | 4108 | break; |
4065 | } | 4109 | } |
4066 | 4110 | ||
@@ -5772,15 +5816,10 @@ rb_simple_read(struct file *filp, char __user *ubuf, | |||
5772 | size_t cnt, loff_t *ppos) | 5816 | size_t cnt, loff_t *ppos) |
5773 | { | 5817 | { |
5774 | struct trace_array *tr = filp->private_data; | 5818 | struct trace_array *tr = filp->private_data; |
5775 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
5776 | char buf[64]; | 5819 | char buf[64]; |
5777 | int r; | 5820 | int r; |
5778 | 5821 | ||
5779 | if (buffer) | 5822 | r = tracer_tracing_is_on(tr); |
5780 | r = ring_buffer_record_is_on(buffer); | ||
5781 | else | ||
5782 | r = 0; | ||
5783 | |||
5784 | r = sprintf(buf, "%d\n", r); | 5823 | r = sprintf(buf, "%d\n", r); |
5785 | 5824 | ||
5786 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 5825 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
@@ -5802,11 +5841,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
5802 | if (buffer) { | 5841 | if (buffer) { |
5803 | mutex_lock(&trace_types_lock); | 5842 | mutex_lock(&trace_types_lock); |
5804 | if (val) { | 5843 | if (val) { |
5805 | ring_buffer_record_on(buffer); | 5844 | tracer_tracing_on(tr); |
5806 | if (tr->current_trace->start) | 5845 | if (tr->current_trace->start) |
5807 | tr->current_trace->start(tr); | 5846 | tr->current_trace->start(tr); |
5808 | } else { | 5847 | } else { |
5809 | ring_buffer_record_off(buffer); | 5848 | tracer_tracing_off(tr); |
5810 | if (tr->current_trace->stop) | 5849 | if (tr->current_trace->stop) |
5811 | tr->current_trace->stop(tr); | 5850 | tr->current_trace->stop(tr); |
5812 | } | 5851 | } |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index b19d065a28cb..2aefbee93a6d 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -373,7 +373,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
373 | struct trace_array_cpu *data; | 373 | struct trace_array_cpu *data; |
374 | unsigned long flags; | 374 | unsigned long flags; |
375 | 375 | ||
376 | if (likely(!tracer_enabled)) | 376 | if (!tracer_enabled || !tracing_is_enabled()) |
377 | return; | 377 | return; |
378 | 378 | ||
379 | cpu = raw_smp_processor_id(); | 379 | cpu = raw_smp_processor_id(); |
@@ -416,7 +416,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
416 | else | 416 | else |
417 | return; | 417 | return; |
418 | 418 | ||
419 | if (!tracer_enabled) | 419 | if (!tracer_enabled || !tracing_is_enabled()) |
420 | return; | 420 | return; |
421 | 421 | ||
422 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); | 422 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |