diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 73 |
1 files changed, 45 insertions, 28 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f4bb3800318b..c580233add95 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
31 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
32 | #include <linux/kprobes.h> | 32 | #include <linux/kprobes.h> |
33 | #include <linux/seq_file.h> | ||
34 | #include <linux/writeback.h> | 33 | #include <linux/writeback.h> |
35 | 34 | ||
36 | #include <linux/stacktrace.h> | 35 | #include <linux/stacktrace.h> |
@@ -90,10 +89,10 @@ static inline void ftrace_enable_cpu(void) | |||
90 | preempt_enable(); | 89 | preempt_enable(); |
91 | } | 90 | } |
92 | 91 | ||
93 | static cpumask_t __read_mostly tracing_buffer_mask; | 92 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
94 | 93 | ||
95 | #define for_each_tracing_cpu(cpu) \ | 94 | #define for_each_tracing_cpu(cpu) \ |
96 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 95 | for_each_cpu(cpu, tracing_buffer_mask) |
97 | 96 | ||
98 | /* | 97 | /* |
99 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 98 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
@@ -1310,7 +1309,7 @@ enum trace_file_type { | |||
1310 | TRACE_FILE_ANNOTATE = 2, | 1309 | TRACE_FILE_ANNOTATE = 2, |
1311 | }; | 1310 | }; |
1312 | 1311 | ||
1313 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) | 1312 | static void trace_iterator_increment(struct trace_iterator *iter) |
1314 | { | 1313 | { |
1315 | /* Don't allow ftrace to trace into the ring buffers */ | 1314 | /* Don't allow ftrace to trace into the ring buffers */ |
1316 | ftrace_disable_cpu(); | 1315 | ftrace_disable_cpu(); |
@@ -1389,7 +1388,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter) | |||
1389 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1388 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); |
1390 | 1389 | ||
1391 | if (iter->ent) | 1390 | if (iter->ent) |
1392 | trace_iterator_increment(iter, iter->cpu); | 1391 | trace_iterator_increment(iter); |
1393 | 1392 | ||
1394 | return iter->ent ? iter : NULL; | 1393 | return iter->ent ? iter : NULL; |
1395 | } | 1394 | } |
@@ -1812,10 +1811,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
1812 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | 1811 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) |
1813 | return; | 1812 | return; |
1814 | 1813 | ||
1815 | if (cpu_isset(iter->cpu, iter->started)) | 1814 | if (cpumask_test_cpu(iter->cpu, iter->started)) |
1816 | return; | 1815 | return; |
1817 | 1816 | ||
1818 | cpu_set(iter->cpu, iter->started); | 1817 | cpumask_set_cpu(iter->cpu, iter->started); |
1819 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | 1818 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); |
1820 | } | 1819 | } |
1821 | 1820 | ||
@@ -2647,13 +2646,7 @@ static struct file_operations show_traces_fops = { | |||
2647 | /* | 2646 | /* |
2648 | * Only trace on a CPU if the bitmask is set: | 2647 | * Only trace on a CPU if the bitmask is set: |
2649 | */ | 2648 | */ |
2650 | static cpumask_t tracing_cpumask = CPU_MASK_ALL; | 2649 | static cpumask_var_t tracing_cpumask; |
2651 | |||
2652 | /* | ||
2653 | * When tracing/tracing_cpu_mask is modified then this holds | ||
2654 | * the new bitmask we are about to install: | ||
2655 | */ | ||
2656 | static cpumask_t tracing_cpumask_new; | ||
2657 | 2650 | ||
2658 | /* | 2651 | /* |
2659 | * The tracer itself will not take this lock, but still we want | 2652 | * The tracer itself will not take this lock, but still we want |
@@ -2694,6 +2687,10 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2694 | size_t count, loff_t *ppos) | 2687 | size_t count, loff_t *ppos) |
2695 | { | 2688 | { |
2696 | int err, cpu; | 2689 | int err, cpu; |
2690 | cpumask_var_t tracing_cpumask_new; | ||
2691 | |||
2692 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | ||
2693 | return -ENOMEM; | ||
2697 | 2694 | ||
2698 | mutex_lock(&tracing_cpumask_update_lock); | 2695 | mutex_lock(&tracing_cpumask_update_lock); |
2699 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 2696 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
@@ -2707,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2707 | * Increase/decrease the disabled counter if we are | 2704 | * Increase/decrease the disabled counter if we are |
2708 | * about to flip a bit in the cpumask: | 2705 | * about to flip a bit in the cpumask: |
2709 | */ | 2706 | */ |
2710 | if (cpu_isset(cpu, tracing_cpumask) && | 2707 | if (cpumask_test_cpu(cpu, tracing_cpumask) && |
2711 | !cpu_isset(cpu, tracing_cpumask_new)) { | 2708 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2712 | atomic_inc(&global_trace.data[cpu]->disabled); | 2709 | atomic_inc(&global_trace.data[cpu]->disabled); |
2713 | } | 2710 | } |
2714 | if (!cpu_isset(cpu, tracing_cpumask) && | 2711 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && |
2715 | cpu_isset(cpu, tracing_cpumask_new)) { | 2712 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2716 | atomic_dec(&global_trace.data[cpu]->disabled); | 2713 | atomic_dec(&global_trace.data[cpu]->disabled); |
2717 | } | 2714 | } |
2718 | } | 2715 | } |
2719 | __raw_spin_unlock(&ftrace_max_lock); | 2716 | __raw_spin_unlock(&ftrace_max_lock); |
2720 | local_irq_enable(); | 2717 | local_irq_enable(); |
2721 | 2718 | ||
2722 | tracing_cpumask = tracing_cpumask_new; | 2719 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
2723 | 2720 | ||
2724 | mutex_unlock(&tracing_cpumask_update_lock); | 2721 | mutex_unlock(&tracing_cpumask_update_lock); |
2722 | free_cpumask_var(tracing_cpumask_new); | ||
2725 | 2723 | ||
2726 | return count; | 2724 | return count; |
2727 | 2725 | ||
2728 | err_unlock: | 2726 | err_unlock: |
2729 | mutex_unlock(&tracing_cpumask_update_lock); | 2727 | mutex_unlock(&tracing_cpumask_update_lock); |
2728 | free_cpumask_var(tracing_cpumask); | ||
2730 | 2729 | ||
2731 | return err; | 2730 | return err; |
2732 | } | 2731 | } |
@@ -3115,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3115 | if (!iter) | 3114 | if (!iter) |
3116 | return -ENOMEM; | 3115 | return -ENOMEM; |
3117 | 3116 | ||
3117 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | ||
3118 | kfree(iter); | ||
3119 | return -ENOMEM; | ||
3120 | } | ||
3121 | |||
3118 | mutex_lock(&trace_types_lock); | 3122 | mutex_lock(&trace_types_lock); |
3119 | 3123 | ||
3120 | /* trace pipe does not show start of buffer */ | 3124 | /* trace pipe does not show start of buffer */ |
3121 | cpus_setall(iter->started); | 3125 | cpumask_setall(iter->started); |
3122 | 3126 | ||
3123 | iter->tr = &global_trace; | 3127 | iter->tr = &global_trace; |
3124 | iter->trace = current_trace; | 3128 | iter->trace = current_trace; |
@@ -3135,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
3135 | { | 3139 | { |
3136 | struct trace_iterator *iter = file->private_data; | 3140 | struct trace_iterator *iter = file->private_data; |
3137 | 3141 | ||
3142 | free_cpumask_var(iter->started); | ||
3138 | kfree(iter); | 3143 | kfree(iter); |
3139 | atomic_dec(&tracing_reader); | 3144 | atomic_dec(&tracing_reader); |
3140 | 3145 | ||
@@ -3753,7 +3758,6 @@ void ftrace_dump(void) | |||
3753 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 3758 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
3754 | /* use static because iter can be a bit big for the stack */ | 3759 | /* use static because iter can be a bit big for the stack */ |
3755 | static struct trace_iterator iter; | 3760 | static struct trace_iterator iter; |
3756 | static cpumask_t mask; | ||
3757 | static int dump_ran; | 3761 | static int dump_ran; |
3758 | unsigned long flags; | 3762 | unsigned long flags; |
3759 | int cnt = 0, cpu; | 3763 | int cnt = 0, cpu; |
@@ -3787,8 +3791,6 @@ void ftrace_dump(void) | |||
3787 | * and then release the locks again. | 3791 | * and then release the locks again. |
3788 | */ | 3792 | */ |
3789 | 3793 | ||
3790 | cpus_clear(mask); | ||
3791 | |||
3792 | while (!trace_empty(&iter)) { | 3794 | while (!trace_empty(&iter)) { |
3793 | 3795 | ||
3794 | if (!cnt) | 3796 | if (!cnt) |
@@ -3824,19 +3826,28 @@ __init static int tracer_alloc_buffers(void) | |||
3824 | { | 3826 | { |
3825 | struct trace_array_cpu *data; | 3827 | struct trace_array_cpu *data; |
3826 | int i; | 3828 | int i; |
3829 | int ret = -ENOMEM; | ||
3827 | 3830 | ||
3828 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 3831 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) |
3829 | tracing_buffer_mask = cpu_possible_map; | 3832 | goto out; |
3833 | |||
3834 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | ||
3835 | goto out_free_buffer_mask; | ||
3836 | |||
3837 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | ||
3838 | cpumask_copy(tracing_cpumask, cpu_all_mask); | ||
3830 | 3839 | ||
3840 | /* TODO: make the number of buffers hot pluggable with CPUS */ | ||
3831 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, | 3841 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, |
3832 | TRACE_BUFFER_FLAGS); | 3842 | TRACE_BUFFER_FLAGS); |
3833 | if (!global_trace.buffer) { | 3843 | if (!global_trace.buffer) { |
3834 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 3844 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
3835 | WARN_ON(1); | 3845 | WARN_ON(1); |
3836 | return 0; | 3846 | goto out_free_cpumask; |
3837 | } | 3847 | } |
3838 | global_trace.entries = ring_buffer_size(global_trace.buffer); | 3848 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
3839 | 3849 | ||
3850 | |||
3840 | #ifdef CONFIG_TRACER_MAX_TRACE | 3851 | #ifdef CONFIG_TRACER_MAX_TRACE |
3841 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, | 3852 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, |
3842 | TRACE_BUFFER_FLAGS); | 3853 | TRACE_BUFFER_FLAGS); |
@@ -3844,7 +3855,7 @@ __init static int tracer_alloc_buffers(void) | |||
3844 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 3855 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
3845 | WARN_ON(1); | 3856 | WARN_ON(1); |
3846 | ring_buffer_free(global_trace.buffer); | 3857 | ring_buffer_free(global_trace.buffer); |
3847 | return 0; | 3858 | goto out_free_cpumask; |
3848 | } | 3859 | } |
3849 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 3860 | max_tr.entries = ring_buffer_size(max_tr.buffer); |
3850 | WARN_ON(max_tr.entries != global_trace.entries); | 3861 | WARN_ON(max_tr.entries != global_trace.entries); |
@@ -3874,8 +3885,14 @@ __init static int tracer_alloc_buffers(void) | |||
3874 | &trace_panic_notifier); | 3885 | &trace_panic_notifier); |
3875 | 3886 | ||
3876 | register_die_notifier(&trace_die_notifier); | 3887 | register_die_notifier(&trace_die_notifier); |
3888 | ret = 0; | ||
3877 | 3889 | ||
3878 | return 0; | 3890 | out_free_cpumask: |
3891 | free_cpumask_var(tracing_cpumask); | ||
3892 | out_free_buffer_mask: | ||
3893 | free_cpumask_var(tracing_buffer_mask); | ||
3894 | out: | ||
3895 | return ret; | ||
3879 | } | 3896 | } |
3880 | early_initcall(tracer_alloc_buffers); | 3897 | early_initcall(tracer_alloc_buffers); |
3881 | fs_initcall(tracer_init_debugfs); | 3898 | fs_initcall(tracer_init_debugfs); |