aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c60
1 files changed, 36 insertions, 24 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0e91f43b6baf..5d04e27f3b40 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
89 preempt_enable(); 89 preempt_enable();
90} 90}
91 91
92static cpumask_t __read_mostly tracing_buffer_mask; 92static cpumask_var_t __read_mostly tracing_buffer_mask;
93 93
94#define for_each_tracing_cpu(cpu) \ 94#define for_each_tracing_cpu(cpu) \
95 for_each_cpu_mask(cpu, tracing_buffer_mask) 95 for_each_cpu(cpu, tracing_buffer_mask)
96 96
97/* 97/*
98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = {
2646/* 2646/*
2647 * Only trace on a CPU if the bitmask is set: 2647 * Only trace on a CPU if the bitmask is set:
2648 */ 2648 */
2649static cpumask_t tracing_cpumask = CPU_MASK_ALL; 2649static cpumask_var_t tracing_cpumask;
2650
2651/*
2652 * When tracing/tracing_cpu_mask is modified then this holds
2653 * the new bitmask we are about to install:
2654 */
2655static cpumask_t tracing_cpumask_new;
2656 2650
2657/* 2651/*
2658 * The tracer itself will not take this lock, but still we want 2652 * The tracer itself will not take this lock, but still we want
@@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
2674 2668
2675 mutex_lock(&tracing_cpumask_update_lock); 2669 mutex_lock(&tracing_cpumask_update_lock);
2676 2670
2677 len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); 2671 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2678 if (count - len < 2) { 2672 if (count - len < 2) {
2679 count = -EINVAL; 2673 count = -EINVAL;
2680 goto out_err; 2674 goto out_err;
@@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2693 size_t count, loff_t *ppos) 2687 size_t count, loff_t *ppos)
2694{ 2688{
2695 int err, cpu; 2689 int err, cpu;
2690 cpumask_var_t tracing_cpumask_new;
2691
2692 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2693 return -ENOMEM;
2696 2694
2697 mutex_lock(&tracing_cpumask_update_lock); 2695 mutex_lock(&tracing_cpumask_update_lock);
2698 err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); 2696 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2699 if (err) 2697 if (err)
2700 goto err_unlock; 2698 goto err_unlock;
2701 2699
@@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2706 * Increase/decrease the disabled counter if we are 2704 * Increase/decrease the disabled counter if we are
2707 * about to flip a bit in the cpumask: 2705 * about to flip a bit in the cpumask:
2708 */ 2706 */
2709 if (cpu_isset(cpu, tracing_cpumask) && 2707 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2710 !cpu_isset(cpu, tracing_cpumask_new)) { 2708 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2711 atomic_inc(&global_trace.data[cpu]->disabled); 2709 atomic_inc(&global_trace.data[cpu]->disabled);
2712 } 2710 }
2713 if (!cpu_isset(cpu, tracing_cpumask) && 2711 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2714 cpu_isset(cpu, tracing_cpumask_new)) { 2712 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2715 atomic_dec(&global_trace.data[cpu]->disabled); 2713 atomic_dec(&global_trace.data[cpu]->disabled);
2716 } 2714 }
2717 } 2715 }
2718 __raw_spin_unlock(&ftrace_max_lock); 2716 __raw_spin_unlock(&ftrace_max_lock);
2719 local_irq_enable(); 2717 local_irq_enable();
2720 2718
2721 tracing_cpumask = tracing_cpumask_new; 2719 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2722 2720
2723 mutex_unlock(&tracing_cpumask_update_lock); 2721 mutex_unlock(&tracing_cpumask_update_lock);
2722 free_cpumask_var(tracing_cpumask_new);
2724 2723
2725 return count; 2724 return count;
2726 2725
2727err_unlock: 2726err_unlock:
2728 mutex_unlock(&tracing_cpumask_update_lock); 2727 mutex_unlock(&tracing_cpumask_update_lock);
2728 free_cpumask_var(tracing_cpumask);
2729 2729
2730 return err; 2730 return err;
2731} 2731}
@@ -3752,7 +3752,6 @@ void ftrace_dump(void)
3752 static DEFINE_SPINLOCK(ftrace_dump_lock); 3752 static DEFINE_SPINLOCK(ftrace_dump_lock);
3753 /* use static because iter can be a bit big for the stack */ 3753 /* use static because iter can be a bit big for the stack */
3754 static struct trace_iterator iter; 3754 static struct trace_iterator iter;
3755 static cpumask_t mask;
3756 static int dump_ran; 3755 static int dump_ran;
3757 unsigned long flags; 3756 unsigned long flags;
3758 int cnt = 0, cpu; 3757 int cnt = 0, cpu;
@@ -3786,8 +3785,6 @@ void ftrace_dump(void)
3786 * and then release the locks again. 3785 * and then release the locks again.
3787 */ 3786 */
3788 3787
3789 cpus_clear(mask);
3790
3791 while (!trace_empty(&iter)) { 3788 while (!trace_empty(&iter)) {
3792 3789
3793 if (!cnt) 3790 if (!cnt)
@@ -3823,19 +3820,28 @@ __init static int tracer_alloc_buffers(void)
3823{ 3820{
3824 struct trace_array_cpu *data; 3821 struct trace_array_cpu *data;
3825 int i; 3822 int i;
3823 int ret = -ENOMEM;
3826 3824
3827 /* TODO: make the number of buffers hot pluggable with CPUS */ 3825 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
3828 tracing_buffer_mask = cpu_possible_map; 3826 goto out;
3827
3828 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3829 goto out_free_buffer_mask;
3829 3830
3831 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3832 cpumask_copy(tracing_cpumask, cpu_all_mask);
3833
3834 /* TODO: make the number of buffers hot pluggable with CPUS */
3830 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3835 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3831 TRACE_BUFFER_FLAGS); 3836 TRACE_BUFFER_FLAGS);
3832 if (!global_trace.buffer) { 3837 if (!global_trace.buffer) {
3833 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3838 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3834 WARN_ON(1); 3839 WARN_ON(1);
3835 return 0; 3840 goto out_free_cpumask;
3836 } 3841 }
3837 global_trace.entries = ring_buffer_size(global_trace.buffer); 3842 global_trace.entries = ring_buffer_size(global_trace.buffer);
3838 3843
3844
3839#ifdef CONFIG_TRACER_MAX_TRACE 3845#ifdef CONFIG_TRACER_MAX_TRACE
3840 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 3846 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3841 TRACE_BUFFER_FLAGS); 3847 TRACE_BUFFER_FLAGS);
@@ -3843,7 +3849,7 @@ __init static int tracer_alloc_buffers(void)
3843 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3849 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3844 WARN_ON(1); 3850 WARN_ON(1);
3845 ring_buffer_free(global_trace.buffer); 3851 ring_buffer_free(global_trace.buffer);
3846 return 0; 3852 goto out_free_cpumask;
3847 } 3853 }
3848 max_tr.entries = ring_buffer_size(max_tr.buffer); 3854 max_tr.entries = ring_buffer_size(max_tr.buffer);
3849 WARN_ON(max_tr.entries != global_trace.entries); 3855 WARN_ON(max_tr.entries != global_trace.entries);
@@ -3873,8 +3879,14 @@ __init static int tracer_alloc_buffers(void)
3873 &trace_panic_notifier); 3879 &trace_panic_notifier);
3874 3880
3875 register_die_notifier(&trace_die_notifier); 3881 register_die_notifier(&trace_die_notifier);
3882 ret = 0;
3876 3883
3877 return 0; 3884out_free_cpumask:
3885 free_cpumask_var(tracing_cpumask);
3886out_free_buffer_mask:
3887 free_cpumask_var(tracing_buffer_mask);
3888out:
3889 return ret;
3878} 3890}
3879early_initcall(tracer_alloc_buffers); 3891early_initcall(tracer_alloc_buffers);
3880fs_initcall(tracer_init_debugfs); 3892fs_initcall(tracer_init_debugfs);