diff options
| author | Steven Rostedt <srostedt@redhat.com> | 2012-02-22 15:50:28 -0500 |
|---|---|---|
| committer | Steven Rostedt <rostedt@goodmis.org> | 2012-02-22 15:50:28 -0500 |
| commit | 499e547057f5bba5cd6f87ebe59b05d0c59da905 (patch) | |
| tree | 9bc64c1a76446153b72a66ad25fe892012d9d120 /kernel/trace | |
| parent | 5500fa51199aee770ce53718853732600543619e (diff) | |
tracing/ring-buffer: Only have tracing_on disable tracing buffers
As the ring-buffer code is being used by other facilities in the
kernel, having tracing_on file disable *all* buffers is not a desired
affect. It should only disable the ftrace buffers that are being used.
Move the code into the trace.c file and use the buffer disabling
for tracing_on() and tracing_off(). This way only the ftrace buffers
will be affected by them and other kernel utilities will not be
confused to why their output suddenly stopped.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 157 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 107 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 1 |
3 files changed, 168 insertions, 97 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f5b7b5c1195b..cf8d11e91efd 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -154,33 +154,10 @@ enum { | |||
| 154 | 154 | ||
| 155 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | 155 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; |
| 156 | 156 | ||
| 157 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) | 157 | /* Used for individual buffers (after the counter) */ |
| 158 | 158 | #define RB_BUFFER_OFF (1 << 20) | |
| 159 | /** | ||
| 160 | * tracing_on - enable all tracing buffers | ||
| 161 | * | ||
| 162 | * This function enables all tracing buffers that may have been | ||
| 163 | * disabled with tracing_off. | ||
| 164 | */ | ||
| 165 | void tracing_on(void) | ||
| 166 | { | ||
| 167 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | ||
| 168 | } | ||
| 169 | EXPORT_SYMBOL_GPL(tracing_on); | ||
| 170 | 159 | ||
| 171 | /** | 160 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) |
| 172 | * tracing_off - turn off all tracing buffers | ||
| 173 | * | ||
| 174 | * This function stops all tracing buffers from recording data. | ||
| 175 | * It does not disable any overhead the tracers themselves may | ||
| 176 | * be causing. This function simply causes all recording to | ||
| 177 | * the ring buffers to fail. | ||
| 178 | */ | ||
| 179 | void tracing_off(void) | ||
| 180 | { | ||
| 181 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | ||
| 182 | } | ||
| 183 | EXPORT_SYMBOL_GPL(tracing_off); | ||
| 184 | 161 | ||
| 185 | /** | 162 | /** |
| 186 | * tracing_off_permanent - permanently disable ring buffers | 163 | * tracing_off_permanent - permanently disable ring buffers |
| @@ -193,15 +170,6 @@ void tracing_off_permanent(void) | |||
| 193 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | 170 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); |
| 194 | } | 171 | } |
| 195 | 172 | ||
| 196 | /** | ||
| 197 | * tracing_is_on - show state of ring buffers enabled | ||
| 198 | */ | ||
| 199 | int tracing_is_on(void) | ||
| 200 | { | ||
| 201 | return ring_buffer_flags == RB_BUFFERS_ON; | ||
| 202 | } | ||
| 203 | EXPORT_SYMBOL_GPL(tracing_is_on); | ||
| 204 | |||
| 205 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 173 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
| 206 | #define RB_ALIGNMENT 4U | 174 | #define RB_ALIGNMENT 4U |
| 207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 175 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
| @@ -2619,6 +2587,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) | |||
| 2619 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | 2587 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); |
| 2620 | 2588 | ||
| 2621 | /** | 2589 | /** |
| 2590 | * ring_buffer_record_off - stop all writes into the buffer | ||
| 2591 | * @buffer: The ring buffer to stop writes to. | ||
| 2592 | * | ||
| 2593 | * This prevents all writes to the buffer. Any attempt to write | ||
| 2594 | * to the buffer after this will fail and return NULL. | ||
| 2595 | * | ||
| 2596 | * This is different than ring_buffer_record_disable() as | ||
| 2597 | * it works like an on/off switch, where as the disable() verison | ||
| 2598 | * must be paired with a enable(). | ||
| 2599 | */ | ||
| 2600 | void ring_buffer_record_off(struct ring_buffer *buffer) | ||
| 2601 | { | ||
| 2602 | unsigned int rd; | ||
| 2603 | unsigned int new_rd; | ||
| 2604 | |||
| 2605 | do { | ||
| 2606 | rd = atomic_read(&buffer->record_disabled); | ||
| 2607 | new_rd = rd | RB_BUFFER_OFF; | ||
| 2608 | } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); | ||
| 2609 | } | ||
| 2610 | EXPORT_SYMBOL_GPL(ring_buffer_record_off); | ||
| 2611 | |||
| 2612 | /** | ||
| 2613 | * ring_buffer_record_on - restart writes into the buffer | ||
| 2614 | * @buffer: The ring buffer to start writes to. | ||
| 2615 | * | ||
| 2616 | * This enables all writes to the buffer that was disabled by | ||
| 2617 | * ring_buffer_record_off(). | ||
| 2618 | * | ||
| 2619 | * This is different than ring_buffer_record_enable() as | ||
| 2620 | * it works like an on/off switch, where as the enable() verison | ||
| 2621 | * must be paired with a disable(). | ||
| 2622 | */ | ||
| 2623 | void ring_buffer_record_on(struct ring_buffer *buffer) | ||
| 2624 | { | ||
| 2625 | unsigned int rd; | ||
| 2626 | unsigned int new_rd; | ||
| 2627 | |||
| 2628 | do { | ||
| 2629 | rd = atomic_read(&buffer->record_disabled); | ||
| 2630 | new_rd = rd & ~RB_BUFFER_OFF; | ||
| 2631 | } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); | ||
| 2632 | } | ||
| 2633 | EXPORT_SYMBOL_GPL(ring_buffer_record_on); | ||
| 2634 | |||
| 2635 | /** | ||
| 2636 | * ring_buffer_record_is_on - return true if the ring buffer can write | ||
| 2637 | * @buffer: The ring buffer to see if write is enabled | ||
| 2638 | * | ||
| 2639 | * Returns true if the ring buffer is in a state that it accepts writes. | ||
| 2640 | */ | ||
| 2641 | int ring_buffer_record_is_on(struct ring_buffer *buffer) | ||
| 2642 | { | ||
| 2643 | return !atomic_read(&buffer->record_disabled); | ||
| 2644 | } | ||
| 2645 | |||
| 2646 | /** | ||
| 2622 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 2647 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
| 2623 | * @buffer: The ring buffer to stop writes to. | 2648 | * @buffer: The ring buffer to stop writes to. |
| 2624 | * @cpu: The CPU buffer to stop | 2649 | * @cpu: The CPU buffer to stop |
| @@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 4039 | } | 4064 | } |
| 4040 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); | 4065 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); |
| 4041 | 4066 | ||
| 4042 | #ifdef CONFIG_TRACING | ||
| 4043 | static ssize_t | ||
| 4044 | rb_simple_read(struct file *filp, char __user *ubuf, | ||
| 4045 | size_t cnt, loff_t *ppos) | ||
| 4046 | { | ||
| 4047 | unsigned long *p = filp->private_data; | ||
| 4048 | char buf[64]; | ||
| 4049 | int r; | ||
| 4050 | |||
| 4051 | if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) | ||
| 4052 | r = sprintf(buf, "permanently disabled\n"); | ||
| 4053 | else | ||
| 4054 | r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); | ||
| 4055 | |||
| 4056 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
| 4057 | } | ||
| 4058 | |||
| 4059 | static ssize_t | ||
| 4060 | rb_simple_write(struct file *filp, const char __user *ubuf, | ||
| 4061 | size_t cnt, loff_t *ppos) | ||
| 4062 | { | ||
| 4063 | unsigned long *p = filp->private_data; | ||
| 4064 | unsigned long val; | ||
| 4065 | int ret; | ||
| 4066 | |||
| 4067 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
| 4068 | if (ret) | ||
| 4069 | return ret; | ||
| 4070 | |||
| 4071 | if (val) | ||
| 4072 | set_bit(RB_BUFFERS_ON_BIT, p); | ||
| 4073 | else | ||
| 4074 | clear_bit(RB_BUFFERS_ON_BIT, p); | ||
| 4075 | |||
| 4076 | (*ppos)++; | ||
| 4077 | |||
| 4078 | return cnt; | ||
| 4079 | } | ||
| 4080 | |||
| 4081 | static const struct file_operations rb_simple_fops = { | ||
| 4082 | .open = tracing_open_generic, | ||
| 4083 | .read = rb_simple_read, | ||
| 4084 | .write = rb_simple_write, | ||
| 4085 | .llseek = default_llseek, | ||
| 4086 | }; | ||
| 4087 | |||
| 4088 | |||
| 4089 | static __init int rb_init_debugfs(void) | ||
| 4090 | { | ||
| 4091 | struct dentry *d_tracer; | ||
| 4092 | |||
| 4093 | d_tracer = tracing_init_dentry(); | ||
| 4094 | |||
| 4095 | trace_create_file("tracing_on", 0644, d_tracer, | ||
| 4096 | &ring_buffer_flags, &rb_simple_fops); | ||
| 4097 | |||
| 4098 | return 0; | ||
| 4099 | } | ||
| 4100 | |||
| 4101 | fs_initcall(rb_init_debugfs); | ||
| 4102 | #endif | ||
| 4103 | |||
| 4104 | #ifdef CONFIG_HOTPLUG_CPU | 4067 | #ifdef CONFIG_HOTPLUG_CPU |
| 4105 | static int rb_cpu_notify(struct notifier_block *self, | 4068 | static int rb_cpu_notify(struct notifier_block *self, |
| 4106 | unsigned long action, void *hcpu) | 4069 | unsigned long action, void *hcpu) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 10d5503f0d04..f3c13d63d064 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -352,6 +352,59 @@ static void wakeup_work_handler(struct work_struct *work) | |||
| 352 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); | 352 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); |
| 353 | 353 | ||
| 354 | /** | 354 | /** |
| 355 | * tracing_on - enable tracing buffers | ||
| 356 | * | ||
| 357 | * This function enables tracing buffers that may have been | ||
| 358 | * disabled with tracing_off. | ||
| 359 | */ | ||
| 360 | void tracing_on(void) | ||
| 361 | { | ||
| 362 | if (global_trace.buffer) | ||
| 363 | ring_buffer_record_on(global_trace.buffer); | ||
| 364 | /* | ||
| 365 | * This flag is only looked at when buffers haven't been | ||
| 366 | * allocated yet. We don't really care about the race | ||
| 367 | * between setting this flag and actually turning | ||
| 368 | * on the buffer. | ||
| 369 | */ | ||
| 370 | global_trace.buffer_disabled = 0; | ||
| 371 | } | ||
| 372 | EXPORT_SYMBOL_GPL(tracing_on); | ||
| 373 | |||
| 374 | /** | ||
| 375 | * tracing_off - turn off tracing buffers | ||
| 376 | * | ||
| 377 | * This function stops the tracing buffers from recording data. | ||
| 378 | * It does not disable any overhead the tracers themselves may | ||
| 379 | * be causing. This function simply causes all recording to | ||
| 380 | * the ring buffers to fail. | ||
| 381 | */ | ||
| 382 | void tracing_off(void) | ||
| 383 | { | ||
| 384 | if (global_trace.buffer) | ||
| 385 | ring_buffer_record_on(global_trace.buffer); | ||
| 386 | /* | ||
| 387 | * This flag is only looked at when buffers haven't been | ||
| 388 | * allocated yet. We don't really care about the race | ||
| 389 | * between setting this flag and actually turning | ||
| 390 | * on the buffer. | ||
| 391 | */ | ||
| 392 | global_trace.buffer_disabled = 1; | ||
| 393 | } | ||
| 394 | EXPORT_SYMBOL_GPL(tracing_off); | ||
| 395 | |||
| 396 | /** | ||
| 397 | * tracing_is_on - show state of ring buffers enabled | ||
| 398 | */ | ||
| 399 | int tracing_is_on(void) | ||
| 400 | { | ||
| 401 | if (global_trace.buffer) | ||
| 402 | return ring_buffer_record_is_on(global_trace.buffer); | ||
| 403 | return !global_trace.buffer_disabled; | ||
| 404 | } | ||
| 405 | EXPORT_SYMBOL_GPL(tracing_is_on); | ||
| 406 | |||
| 407 | /** | ||
| 355 | * trace_wake_up - wake up tasks waiting for trace input | 408 | * trace_wake_up - wake up tasks waiting for trace input |
| 356 | * | 409 | * |
| 357 | * Schedules a delayed work to wake up any task that is blocked on the | 410 | * Schedules a delayed work to wake up any task that is blocked on the |
| @@ -4567,6 +4620,55 @@ static __init void create_trace_options_dir(void) | |||
| 4567 | create_trace_option_core_file(trace_options[i], i); | 4620 | create_trace_option_core_file(trace_options[i], i); |
| 4568 | } | 4621 | } |
| 4569 | 4622 | ||
| 4623 | static ssize_t | ||
| 4624 | rb_simple_read(struct file *filp, char __user *ubuf, | ||
| 4625 | size_t cnt, loff_t *ppos) | ||
| 4626 | { | ||
| 4627 | struct ring_buffer *buffer = filp->private_data; | ||
| 4628 | char buf[64]; | ||
| 4629 | int r; | ||
| 4630 | |||
| 4631 | if (buffer) | ||
| 4632 | r = ring_buffer_record_is_on(buffer); | ||
| 4633 | else | ||
| 4634 | r = 0; | ||
| 4635 | |||
| 4636 | r = sprintf(buf, "%d\n", r); | ||
| 4637 | |||
| 4638 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
| 4639 | } | ||
| 4640 | |||
| 4641 | static ssize_t | ||
| 4642 | rb_simple_write(struct file *filp, const char __user *ubuf, | ||
| 4643 | size_t cnt, loff_t *ppos) | ||
| 4644 | { | ||
| 4645 | struct ring_buffer *buffer = filp->private_data; | ||
| 4646 | unsigned long val; | ||
| 4647 | int ret; | ||
| 4648 | |||
| 4649 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
| 4650 | if (ret) | ||
| 4651 | return ret; | ||
| 4652 | |||
| 4653 | if (buffer) { | ||
| 4654 | if (val) | ||
| 4655 | ring_buffer_record_on(buffer); | ||
| 4656 | else | ||
| 4657 | ring_buffer_record_off(buffer); | ||
| 4658 | } | ||
| 4659 | |||
| 4660 | (*ppos)++; | ||
| 4661 | |||
| 4662 | return cnt; | ||
| 4663 | } | ||
| 4664 | |||
| 4665 | static const struct file_operations rb_simple_fops = { | ||
| 4666 | .open = tracing_open_generic, | ||
| 4667 | .read = rb_simple_read, | ||
| 4668 | .write = rb_simple_write, | ||
| 4669 | .llseek = default_llseek, | ||
| 4670 | }; | ||
| 4671 | |||
| 4570 | static __init int tracer_init_debugfs(void) | 4672 | static __init int tracer_init_debugfs(void) |
| 4571 | { | 4673 | { |
| 4572 | struct dentry *d_tracer; | 4674 | struct dentry *d_tracer; |
| @@ -4626,6 +4728,9 @@ static __init int tracer_init_debugfs(void) | |||
| 4626 | trace_create_file("trace_clock", 0644, d_tracer, NULL, | 4728 | trace_create_file("trace_clock", 0644, d_tracer, NULL, |
| 4627 | &trace_clock_fops); | 4729 | &trace_clock_fops); |
| 4628 | 4730 | ||
| 4731 | trace_create_file("tracing_on", 0644, d_tracer, | ||
| 4732 | global_trace.buffer, &rb_simple_fops); | ||
| 4733 | |||
| 4629 | #ifdef CONFIG_DYNAMIC_FTRACE | 4734 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 4630 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4735 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
| 4631 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4736 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
| @@ -4863,6 +4968,8 @@ __init static int tracer_alloc_buffers(void) | |||
| 4863 | goto out_free_cpumask; | 4968 | goto out_free_cpumask; |
| 4864 | } | 4969 | } |
| 4865 | global_trace.entries = ring_buffer_size(global_trace.buffer); | 4970 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
| 4971 | if (global_trace.buffer_disabled) | ||
| 4972 | tracing_off(); | ||
| 4866 | 4973 | ||
| 4867 | 4974 | ||
| 4868 | #ifdef CONFIG_TRACER_MAX_TRACE | 4975 | #ifdef CONFIG_TRACER_MAX_TRACE |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 54faec790bc1..ce887c0eca56 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -154,6 +154,7 @@ struct trace_array { | |||
| 154 | struct ring_buffer *buffer; | 154 | struct ring_buffer *buffer; |
| 155 | unsigned long entries; | 155 | unsigned long entries; |
| 156 | int cpu; | 156 | int cpu; |
| 157 | int buffer_disabled; | ||
| 157 | cycle_t time_start; | 158 | cycle_t time_start; |
| 158 | struct task_struct *waiter; | 159 | struct task_struct *waiter; |
| 159 | struct trace_array_cpu *data[NR_CPUS]; | 160 | struct trace_array_cpu *data[NR_CPUS]; |
