diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-03-22 04:17:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-03-22 04:17:57 -0400 |
commit | 6605f9ac69593d480324ba5fa05f64cfebf4db2f (patch) | |
tree | 4c64d89d04ad24911d0eca7f3feb0012e4014dae /kernel | |
parent | ad2a8e6078a16d3b61b530f1447110841c36ae56 (diff) | |
parent | 93d68e52295fb8b65ded6db49e32e63b6a203e0b (diff) |
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/urgent
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/Kconfig | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 3 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 157 | ||||
-rw-r--r-- | kernel/trace/trace.c | 109 | ||||
-rw-r--r-- | kernel/trace/trace.h | 3 |
5 files changed, 173 insertions, 101 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index cd3134510f3d..a1d2849f2473 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -141,7 +141,7 @@ if FTRACE | |||
141 | config FUNCTION_TRACER | 141 | config FUNCTION_TRACER |
142 | bool "Kernel Function Tracer" | 142 | bool "Kernel Function Tracer" |
143 | depends on HAVE_FUNCTION_TRACER | 143 | depends on HAVE_FUNCTION_TRACER |
144 | select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE | 144 | select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE |
145 | select KALLSYMS | 145 | select KALLSYMS |
146 | select GENERIC_TRACER | 146 | select GENERIC_TRACER |
147 | select CONTEXT_SWITCH_TRACER | 147 | select CONTEXT_SWITCH_TRACER |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 867bd1dd2dd0..0fa92f677c92 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -249,7 +249,8 @@ static void update_ftrace_function(void) | |||
249 | #else | 249 | #else |
250 | __ftrace_trace_function = func; | 250 | __ftrace_trace_function = func; |
251 | #endif | 251 | #endif |
252 | ftrace_trace_function = ftrace_test_stop_func; | 252 | ftrace_trace_function = |
253 | (func == ftrace_stub) ? func : ftrace_test_stop_func; | ||
253 | #endif | 254 | #endif |
254 | } | 255 | } |
255 | 256 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f5b7b5c1195b..cf8d11e91efd 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -154,33 +154,10 @@ enum { | |||
154 | 154 | ||
155 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | 155 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; |
156 | 156 | ||
157 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) | 157 | /* Used for individual buffers (after the counter) */ |
158 | 158 | #define RB_BUFFER_OFF (1 << 20) | |
159 | /** | ||
160 | * tracing_on - enable all tracing buffers | ||
161 | * | ||
162 | * This function enables all tracing buffers that may have been | ||
163 | * disabled with tracing_off. | ||
164 | */ | ||
165 | void tracing_on(void) | ||
166 | { | ||
167 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | ||
168 | } | ||
169 | EXPORT_SYMBOL_GPL(tracing_on); | ||
170 | 159 | ||
171 | /** | 160 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) |
172 | * tracing_off - turn off all tracing buffers | ||
173 | * | ||
174 | * This function stops all tracing buffers from recording data. | ||
175 | * It does not disable any overhead the tracers themselves may | ||
176 | * be causing. This function simply causes all recording to | ||
177 | * the ring buffers to fail. | ||
178 | */ | ||
179 | void tracing_off(void) | ||
180 | { | ||
181 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | ||
182 | } | ||
183 | EXPORT_SYMBOL_GPL(tracing_off); | ||
184 | 161 | ||
185 | /** | 162 | /** |
186 | * tracing_off_permanent - permanently disable ring buffers | 163 | * tracing_off_permanent - permanently disable ring buffers |
@@ -193,15 +170,6 @@ void tracing_off_permanent(void) | |||
193 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | 170 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); |
194 | } | 171 | } |
195 | 172 | ||
196 | /** | ||
197 | * tracing_is_on - show state of ring buffers enabled | ||
198 | */ | ||
199 | int tracing_is_on(void) | ||
200 | { | ||
201 | return ring_buffer_flags == RB_BUFFERS_ON; | ||
202 | } | ||
203 | EXPORT_SYMBOL_GPL(tracing_is_on); | ||
204 | |||
205 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 173 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
206 | #define RB_ALIGNMENT 4U | 174 | #define RB_ALIGNMENT 4U |
207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 175 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
@@ -2619,6 +2587,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) | |||
2619 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | 2587 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); |
2620 | 2588 | ||
2621 | /** | 2589 | /** |
2590 | * ring_buffer_record_off - stop all writes into the buffer | ||
2591 | * @buffer: The ring buffer to stop writes to. | ||
2592 | * | ||
2593 | * This prevents all writes to the buffer. Any attempt to write | ||
2594 | * to the buffer after this will fail and return NULL. | ||
2595 | * | ||
2596 | * This is different than ring_buffer_record_disable() as | ||
2597 | * it works like an on/off switch, where as the disable() verison | ||
2598 | * must be paired with a enable(). | ||
2599 | */ | ||
2600 | void ring_buffer_record_off(struct ring_buffer *buffer) | ||
2601 | { | ||
2602 | unsigned int rd; | ||
2603 | unsigned int new_rd; | ||
2604 | |||
2605 | do { | ||
2606 | rd = atomic_read(&buffer->record_disabled); | ||
2607 | new_rd = rd | RB_BUFFER_OFF; | ||
2608 | } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); | ||
2609 | } | ||
2610 | EXPORT_SYMBOL_GPL(ring_buffer_record_off); | ||
2611 | |||
2612 | /** | ||
2613 | * ring_buffer_record_on - restart writes into the buffer | ||
2614 | * @buffer: The ring buffer to start writes to. | ||
2615 | * | ||
2616 | * This enables all writes to the buffer that was disabled by | ||
2617 | * ring_buffer_record_off(). | ||
2618 | * | ||
2619 | * This is different than ring_buffer_record_enable() as | ||
2620 | * it works like an on/off switch, where as the enable() verison | ||
2621 | * must be paired with a disable(). | ||
2622 | */ | ||
2623 | void ring_buffer_record_on(struct ring_buffer *buffer) | ||
2624 | { | ||
2625 | unsigned int rd; | ||
2626 | unsigned int new_rd; | ||
2627 | |||
2628 | do { | ||
2629 | rd = atomic_read(&buffer->record_disabled); | ||
2630 | new_rd = rd & ~RB_BUFFER_OFF; | ||
2631 | } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); | ||
2632 | } | ||
2633 | EXPORT_SYMBOL_GPL(ring_buffer_record_on); | ||
2634 | |||
2635 | /** | ||
2636 | * ring_buffer_record_is_on - return true if the ring buffer can write | ||
2637 | * @buffer: The ring buffer to see if write is enabled | ||
2638 | * | ||
2639 | * Returns true if the ring buffer is in a state that it accepts writes. | ||
2640 | */ | ||
2641 | int ring_buffer_record_is_on(struct ring_buffer *buffer) | ||
2642 | { | ||
2643 | return !atomic_read(&buffer->record_disabled); | ||
2644 | } | ||
2645 | |||
2646 | /** | ||
2622 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 2647 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
2623 | * @buffer: The ring buffer to stop writes to. | 2648 | * @buffer: The ring buffer to stop writes to. |
2624 | * @cpu: The CPU buffer to stop | 2649 | * @cpu: The CPU buffer to stop |
@@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
4039 | } | 4064 | } |
4040 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); | 4065 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); |
4041 | 4066 | ||
4042 | #ifdef CONFIG_TRACING | ||
4043 | static ssize_t | ||
4044 | rb_simple_read(struct file *filp, char __user *ubuf, | ||
4045 | size_t cnt, loff_t *ppos) | ||
4046 | { | ||
4047 | unsigned long *p = filp->private_data; | ||
4048 | char buf[64]; | ||
4049 | int r; | ||
4050 | |||
4051 | if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) | ||
4052 | r = sprintf(buf, "permanently disabled\n"); | ||
4053 | else | ||
4054 | r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); | ||
4055 | |||
4056 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
4057 | } | ||
4058 | |||
4059 | static ssize_t | ||
4060 | rb_simple_write(struct file *filp, const char __user *ubuf, | ||
4061 | size_t cnt, loff_t *ppos) | ||
4062 | { | ||
4063 | unsigned long *p = filp->private_data; | ||
4064 | unsigned long val; | ||
4065 | int ret; | ||
4066 | |||
4067 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
4068 | if (ret) | ||
4069 | return ret; | ||
4070 | |||
4071 | if (val) | ||
4072 | set_bit(RB_BUFFERS_ON_BIT, p); | ||
4073 | else | ||
4074 | clear_bit(RB_BUFFERS_ON_BIT, p); | ||
4075 | |||
4076 | (*ppos)++; | ||
4077 | |||
4078 | return cnt; | ||
4079 | } | ||
4080 | |||
4081 | static const struct file_operations rb_simple_fops = { | ||
4082 | .open = tracing_open_generic, | ||
4083 | .read = rb_simple_read, | ||
4084 | .write = rb_simple_write, | ||
4085 | .llseek = default_llseek, | ||
4086 | }; | ||
4087 | |||
4088 | |||
4089 | static __init int rb_init_debugfs(void) | ||
4090 | { | ||
4091 | struct dentry *d_tracer; | ||
4092 | |||
4093 | d_tracer = tracing_init_dentry(); | ||
4094 | |||
4095 | trace_create_file("tracing_on", 0644, d_tracer, | ||
4096 | &ring_buffer_flags, &rb_simple_fops); | ||
4097 | |||
4098 | return 0; | ||
4099 | } | ||
4100 | |||
4101 | fs_initcall(rb_init_debugfs); | ||
4102 | #endif | ||
4103 | |||
4104 | #ifdef CONFIG_HOTPLUG_CPU | 4067 | #ifdef CONFIG_HOTPLUG_CPU |
4105 | static int rb_cpu_notify(struct notifier_block *self, | 4068 | static int rb_cpu_notify(struct notifier_block *self, |
4106 | unsigned long action, void *hcpu) | 4069 | unsigned long action, void *hcpu) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 10d5503f0d04..3a19c354edd6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/ctype.h> | 36 | #include <linux/ctype.h> |
37 | #include <linux/init.h> | 37 | #include <linux/init.h> |
38 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
39 | #include <linux/nmi.h> | ||
39 | #include <linux/fs.h> | 40 | #include <linux/fs.h> |
40 | 41 | ||
41 | #include "trace.h" | 42 | #include "trace.h" |
@@ -352,6 +353,59 @@ static void wakeup_work_handler(struct work_struct *work) | |||
352 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); | 353 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); |
353 | 354 | ||
354 | /** | 355 | /** |
356 | * tracing_on - enable tracing buffers | ||
357 | * | ||
358 | * This function enables tracing buffers that may have been | ||
359 | * disabled with tracing_off. | ||
360 | */ | ||
361 | void tracing_on(void) | ||
362 | { | ||
363 | if (global_trace.buffer) | ||
364 | ring_buffer_record_on(global_trace.buffer); | ||
365 | /* | ||
366 | * This flag is only looked at when buffers haven't been | ||
367 | * allocated yet. We don't really care about the race | ||
368 | * between setting this flag and actually turning | ||
369 | * on the buffer. | ||
370 | */ | ||
371 | global_trace.buffer_disabled = 0; | ||
372 | } | ||
373 | EXPORT_SYMBOL_GPL(tracing_on); | ||
374 | |||
375 | /** | ||
376 | * tracing_off - turn off tracing buffers | ||
377 | * | ||
378 | * This function stops the tracing buffers from recording data. | ||
379 | * It does not disable any overhead the tracers themselves may | ||
380 | * be causing. This function simply causes all recording to | ||
381 | * the ring buffers to fail. | ||
382 | */ | ||
383 | void tracing_off(void) | ||
384 | { | ||
385 | if (global_trace.buffer) | ||
386 | ring_buffer_record_on(global_trace.buffer); | ||
387 | /* | ||
388 | * This flag is only looked at when buffers haven't been | ||
389 | * allocated yet. We don't really care about the race | ||
390 | * between setting this flag and actually turning | ||
391 | * on the buffer. | ||
392 | */ | ||
393 | global_trace.buffer_disabled = 1; | ||
394 | } | ||
395 | EXPORT_SYMBOL_GPL(tracing_off); | ||
396 | |||
397 | /** | ||
398 | * tracing_is_on - show state of ring buffers enabled | ||
399 | */ | ||
400 | int tracing_is_on(void) | ||
401 | { | ||
402 | if (global_trace.buffer) | ||
403 | return ring_buffer_record_is_on(global_trace.buffer); | ||
404 | return !global_trace.buffer_disabled; | ||
405 | } | ||
406 | EXPORT_SYMBOL_GPL(tracing_is_on); | ||
407 | |||
408 | /** | ||
355 | * trace_wake_up - wake up tasks waiting for trace input | 409 | * trace_wake_up - wake up tasks waiting for trace input |
356 | * | 410 | * |
357 | * Schedules a delayed work to wake up any task that is blocked on the | 411 | * Schedules a delayed work to wake up any task that is blocked on the |
@@ -4567,6 +4621,55 @@ static __init void create_trace_options_dir(void) | |||
4567 | create_trace_option_core_file(trace_options[i], i); | 4621 | create_trace_option_core_file(trace_options[i], i); |
4568 | } | 4622 | } |
4569 | 4623 | ||
4624 | static ssize_t | ||
4625 | rb_simple_read(struct file *filp, char __user *ubuf, | ||
4626 | size_t cnt, loff_t *ppos) | ||
4627 | { | ||
4628 | struct ring_buffer *buffer = filp->private_data; | ||
4629 | char buf[64]; | ||
4630 | int r; | ||
4631 | |||
4632 | if (buffer) | ||
4633 | r = ring_buffer_record_is_on(buffer); | ||
4634 | else | ||
4635 | r = 0; | ||
4636 | |||
4637 | r = sprintf(buf, "%d\n", r); | ||
4638 | |||
4639 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
4640 | } | ||
4641 | |||
4642 | static ssize_t | ||
4643 | rb_simple_write(struct file *filp, const char __user *ubuf, | ||
4644 | size_t cnt, loff_t *ppos) | ||
4645 | { | ||
4646 | struct ring_buffer *buffer = filp->private_data; | ||
4647 | unsigned long val; | ||
4648 | int ret; | ||
4649 | |||
4650 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
4651 | if (ret) | ||
4652 | return ret; | ||
4653 | |||
4654 | if (buffer) { | ||
4655 | if (val) | ||
4656 | ring_buffer_record_on(buffer); | ||
4657 | else | ||
4658 | ring_buffer_record_off(buffer); | ||
4659 | } | ||
4660 | |||
4661 | (*ppos)++; | ||
4662 | |||
4663 | return cnt; | ||
4664 | } | ||
4665 | |||
4666 | static const struct file_operations rb_simple_fops = { | ||
4667 | .open = tracing_open_generic, | ||
4668 | .read = rb_simple_read, | ||
4669 | .write = rb_simple_write, | ||
4670 | .llseek = default_llseek, | ||
4671 | }; | ||
4672 | |||
4570 | static __init int tracer_init_debugfs(void) | 4673 | static __init int tracer_init_debugfs(void) |
4571 | { | 4674 | { |
4572 | struct dentry *d_tracer; | 4675 | struct dentry *d_tracer; |
@@ -4626,6 +4729,9 @@ static __init int tracer_init_debugfs(void) | |||
4626 | trace_create_file("trace_clock", 0644, d_tracer, NULL, | 4729 | trace_create_file("trace_clock", 0644, d_tracer, NULL, |
4627 | &trace_clock_fops); | 4730 | &trace_clock_fops); |
4628 | 4731 | ||
4732 | trace_create_file("tracing_on", 0644, d_tracer, | ||
4733 | global_trace.buffer, &rb_simple_fops); | ||
4734 | |||
4629 | #ifdef CONFIG_DYNAMIC_FTRACE | 4735 | #ifdef CONFIG_DYNAMIC_FTRACE |
4630 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4736 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
4631 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4737 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
@@ -4798,6 +4904,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4798 | if (ret != TRACE_TYPE_NO_CONSUME) | 4904 | if (ret != TRACE_TYPE_NO_CONSUME) |
4799 | trace_consume(&iter); | 4905 | trace_consume(&iter); |
4800 | } | 4906 | } |
4907 | touch_nmi_watchdog(); | ||
4801 | 4908 | ||
4802 | trace_printk_seq(&iter.seq); | 4909 | trace_printk_seq(&iter.seq); |
4803 | } | 4910 | } |
@@ -4863,6 +4970,8 @@ __init static int tracer_alloc_buffers(void) | |||
4863 | goto out_free_cpumask; | 4970 | goto out_free_cpumask; |
4864 | } | 4971 | } |
4865 | global_trace.entries = ring_buffer_size(global_trace.buffer); | 4972 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
4973 | if (global_trace.buffer_disabled) | ||
4974 | tracing_off(); | ||
4866 | 4975 | ||
4867 | 4976 | ||
4868 | #ifdef CONFIG_TRACER_MAX_TRACE | 4977 | #ifdef CONFIG_TRACER_MAX_TRACE |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 54faec790bc1..95059f091a24 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -154,6 +154,7 @@ struct trace_array { | |||
154 | struct ring_buffer *buffer; | 154 | struct ring_buffer *buffer; |
155 | unsigned long entries; | 155 | unsigned long entries; |
156 | int cpu; | 156 | int cpu; |
157 | int buffer_disabled; | ||
157 | cycle_t time_start; | 158 | cycle_t time_start; |
158 | struct task_struct *waiter; | 159 | struct task_struct *waiter; |
159 | struct trace_array_cpu *data[NR_CPUS]; | 160 | struct trace_array_cpu *data[NR_CPUS]; |
@@ -835,13 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
835 | filter) | 836 | filter) |
836 | #include "trace_entries.h" | 837 | #include "trace_entries.h" |
837 | 838 | ||
838 | #ifdef CONFIG_PERF_EVENTS | ||
839 | #ifdef CONFIG_FUNCTION_TRACER | 839 | #ifdef CONFIG_FUNCTION_TRACER |
840 | int perf_ftrace_event_register(struct ftrace_event_call *call, | 840 | int perf_ftrace_event_register(struct ftrace_event_call *call, |
841 | enum trace_reg type, void *data); | 841 | enum trace_reg type, void *data); |
842 | #else | 842 | #else |
843 | #define perf_ftrace_event_register NULL | 843 | #define perf_ftrace_event_register NULL |
844 | #endif /* CONFIG_FUNCTION_TRACER */ | 844 | #endif /* CONFIG_FUNCTION_TRACER */ |
845 | #endif /* CONFIG_PERF_EVENTS */ | ||
846 | 845 | ||
847 | #endif /* _LINUX_KERNEL_TRACE_H */ | 846 | #endif /* _LINUX_KERNEL_TRACE_H */ |