diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 107 |
1 files changed, 107 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 10d5503f0d04..f3c13d63d064 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -352,6 +352,59 @@ static void wakeup_work_handler(struct work_struct *work) | |||
352 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); | 352 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); |
353 | 353 | ||
354 | /** | 354 | /** |
355 | * tracing_on - enable tracing buffers | ||
356 | * | ||
357 | * This function enables tracing buffers that may have been | ||
358 | * disabled with tracing_off. | ||
359 | */ | ||
360 | void tracing_on(void) | ||
361 | { | ||
362 | if (global_trace.buffer) | ||
363 | ring_buffer_record_on(global_trace.buffer); | ||
364 | /* | ||
365 | * This flag is only looked at when buffers haven't been | ||
366 | * allocated yet. We don't really care about the race | ||
367 | * between setting this flag and actually turning | ||
368 | * on the buffer. | ||
369 | */ | ||
370 | global_trace.buffer_disabled = 0; | ||
371 | } | ||
372 | EXPORT_SYMBOL_GPL(tracing_on); | ||
373 | |||
374 | /** | ||
375 | * tracing_off - turn off tracing buffers | ||
376 | * | ||
377 | * This function stops the tracing buffers from recording data. | ||
378 | * It does not disable any overhead the tracers themselves may | ||
379 | * be causing. This function simply causes all recording to | ||
380 | * the ring buffers to fail. | ||
381 | */ | ||
382 | void tracing_off(void) | ||
383 | { | ||
384 | if (global_trace.buffer) | ||
385 | ring_buffer_record_on(global_trace.buffer); | ||
386 | /* | ||
387 | * This flag is only looked at when buffers haven't been | ||
388 | * allocated yet. We don't really care about the race | ||
389 | * between setting this flag and actually turning | ||
390 | * on the buffer. | ||
391 | */ | ||
392 | global_trace.buffer_disabled = 1; | ||
393 | } | ||
394 | EXPORT_SYMBOL_GPL(tracing_off); | ||
395 | |||
396 | /** | ||
397 | * tracing_is_on - show state of ring buffers enabled | ||
398 | */ | ||
399 | int tracing_is_on(void) | ||
400 | { | ||
401 | if (global_trace.buffer) | ||
402 | return ring_buffer_record_is_on(global_trace.buffer); | ||
403 | return !global_trace.buffer_disabled; | ||
404 | } | ||
405 | EXPORT_SYMBOL_GPL(tracing_is_on); | ||
406 | |||
407 | /** | ||
355 | * trace_wake_up - wake up tasks waiting for trace input | 408 | * trace_wake_up - wake up tasks waiting for trace input |
356 | * | 409 | * |
357 | * Schedules a delayed work to wake up any task that is blocked on the | 410 | * Schedules a delayed work to wake up any task that is blocked on the |
@@ -4567,6 +4620,55 @@ static __init void create_trace_options_dir(void) | |||
4567 | create_trace_option_core_file(trace_options[i], i); | 4620 | create_trace_option_core_file(trace_options[i], i); |
4568 | } | 4621 | } |
4569 | 4622 | ||
4623 | static ssize_t | ||
4624 | rb_simple_read(struct file *filp, char __user *ubuf, | ||
4625 | size_t cnt, loff_t *ppos) | ||
4626 | { | ||
4627 | struct ring_buffer *buffer = filp->private_data; | ||
4628 | char buf[64]; | ||
4629 | int r; | ||
4630 | |||
4631 | if (buffer) | ||
4632 | r = ring_buffer_record_is_on(buffer); | ||
4633 | else | ||
4634 | r = 0; | ||
4635 | |||
4636 | r = sprintf(buf, "%d\n", r); | ||
4637 | |||
4638 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
4639 | } | ||
4640 | |||
4641 | static ssize_t | ||
4642 | rb_simple_write(struct file *filp, const char __user *ubuf, | ||
4643 | size_t cnt, loff_t *ppos) | ||
4644 | { | ||
4645 | struct ring_buffer *buffer = filp->private_data; | ||
4646 | unsigned long val; | ||
4647 | int ret; | ||
4648 | |||
4649 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
4650 | if (ret) | ||
4651 | return ret; | ||
4652 | |||
4653 | if (buffer) { | ||
4654 | if (val) | ||
4655 | ring_buffer_record_on(buffer); | ||
4656 | else | ||
4657 | ring_buffer_record_off(buffer); | ||
4658 | } | ||
4659 | |||
4660 | (*ppos)++; | ||
4661 | |||
4662 | return cnt; | ||
4663 | } | ||
4664 | |||
4665 | static const struct file_operations rb_simple_fops = { | ||
4666 | .open = tracing_open_generic, | ||
4667 | .read = rb_simple_read, | ||
4668 | .write = rb_simple_write, | ||
4669 | .llseek = default_llseek, | ||
4670 | }; | ||
4671 | |||
4570 | static __init int tracer_init_debugfs(void) | 4672 | static __init int tracer_init_debugfs(void) |
4571 | { | 4673 | { |
4572 | struct dentry *d_tracer; | 4674 | struct dentry *d_tracer; |
@@ -4626,6 +4728,9 @@ static __init int tracer_init_debugfs(void) | |||
4626 | trace_create_file("trace_clock", 0644, d_tracer, NULL, | 4728 | trace_create_file("trace_clock", 0644, d_tracer, NULL, |
4627 | &trace_clock_fops); | 4729 | &trace_clock_fops); |
4628 | 4730 | ||
4731 | trace_create_file("tracing_on", 0644, d_tracer, | ||
4732 | global_trace.buffer, &rb_simple_fops); | ||
4733 | |||
4629 | #ifdef CONFIG_DYNAMIC_FTRACE | 4734 | #ifdef CONFIG_DYNAMIC_FTRACE |
4630 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4735 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
4631 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4736 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
@@ -4863,6 +4968,8 @@ __init static int tracer_alloc_buffers(void) | |||
4863 | goto out_free_cpumask; | 4968 | goto out_free_cpumask; |
4864 | } | 4969 | } |
4865 | global_trace.entries = ring_buffer_size(global_trace.buffer); | 4970 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
4971 | if (global_trace.buffer_disabled) | ||
4972 | tracing_off(); | ||
4866 | 4973 | ||
4867 | 4974 | ||
4868 | #ifdef CONFIG_TRACER_MAX_TRACE | 4975 | #ifdef CONFIG_TRACER_MAX_TRACE |