diff options
author | Steven Rostedt <srostedt@redhat.com> | 2012-08-06 16:24:11 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:34:43 -0400 |
commit | a7603ff4b5f7e26e67af82a4c3d05eeeb8d7b160 (patch) | |
tree | 2d348aeb190cf6c7ba43f97419b291251d6e04c5 /kernel/trace/trace.c | |
parent | ccb469a198cffac94a7eea0b69f715f06e2ddf15 (diff) |
tracing: Replace the static global per_cpu arrays with allocated per_cpu
The global and max-tr currently use static per_cpu arrays for the CPU data
descriptors. But in order to get new allocated trace_arrays, they need to
be allocated per_cpu arrays. Instead of using the static arrays, switch
the global and max-tr to use allocated data.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 92 |
1 files changed, 54 insertions, 38 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 29bff72f97ef..406adbc277a0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -191,8 +191,6 @@ static struct trace_array global_trace; | |||
191 | 191 | ||
192 | LIST_HEAD(ftrace_trace_arrays); | 192 | LIST_HEAD(ftrace_trace_arrays); |
193 | 193 | ||
194 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | ||
195 | |||
196 | int filter_current_check_discard(struct ring_buffer *buffer, | 194 | int filter_current_check_discard(struct ring_buffer *buffer, |
197 | struct ftrace_event_call *call, void *rec, | 195 | struct ftrace_event_call *call, void *rec, |
198 | struct ring_buffer_event *event) | 196 | struct ring_buffer_event *event) |
@@ -227,8 +225,6 @@ cycle_t ftrace_now(int cpu) | |||
227 | */ | 225 | */ |
228 | static struct trace_array max_tr; | 226 | static struct trace_array max_tr; |
229 | 227 | ||
230 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); | ||
231 | |||
232 | int tracing_is_enabled(void) | 228 | int tracing_is_enabled(void) |
233 | { | 229 | { |
234 | return tracing_is_on(); | 230 | return tracing_is_on(); |
@@ -666,13 +662,13 @@ unsigned long __read_mostly tracing_max_latency; | |||
666 | static void | 662 | static void |
667 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 663 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
668 | { | 664 | { |
669 | struct trace_array_cpu *data = tr->data[cpu]; | 665 | struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu); |
670 | struct trace_array_cpu *max_data; | 666 | struct trace_array_cpu *max_data; |
671 | 667 | ||
672 | max_tr.cpu = cpu; | 668 | max_tr.cpu = cpu; |
673 | max_tr.time_start = data->preempt_timestamp; | 669 | max_tr.time_start = data->preempt_timestamp; |
674 | 670 | ||
675 | max_data = max_tr.data[cpu]; | 671 | max_data = per_cpu_ptr(max_tr.data, cpu); |
676 | max_data->saved_latency = tracing_max_latency; | 672 | max_data->saved_latency = tracing_max_latency; |
677 | max_data->critical_start = data->critical_start; | 673 | max_data->critical_start = data->critical_start; |
678 | max_data->critical_end = data->critical_end; | 674 | max_data->critical_end = data->critical_end; |
@@ -1984,7 +1980,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
1984 | unsigned long entries = 0; | 1980 | unsigned long entries = 0; |
1985 | u64 ts; | 1981 | u64 ts; |
1986 | 1982 | ||
1987 | tr->data[cpu]->skipped_entries = 0; | 1983 | per_cpu_ptr(tr->data, cpu)->skipped_entries = 0; |
1988 | 1984 | ||
1989 | buf_iter = trace_buffer_iter(iter, cpu); | 1985 | buf_iter = trace_buffer_iter(iter, cpu); |
1990 | if (!buf_iter) | 1986 | if (!buf_iter) |
@@ -2004,7 +2000,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
2004 | ring_buffer_read(buf_iter, NULL); | 2000 | ring_buffer_read(buf_iter, NULL); |
2005 | } | 2001 | } |
2006 | 2002 | ||
2007 | tr->data[cpu]->skipped_entries = entries; | 2003 | per_cpu_ptr(tr->data, cpu)->skipped_entries = entries; |
2008 | } | 2004 | } |
2009 | 2005 | ||
2010 | /* | 2006 | /* |
@@ -2099,8 +2095,8 @@ get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *e | |||
2099 | * entries for the trace and we need to ignore the | 2095 | * entries for the trace and we need to ignore the |
2100 | * ones before the time stamp. | 2096 | * ones before the time stamp. |
2101 | */ | 2097 | */ |
2102 | if (tr->data[cpu]->skipped_entries) { | 2098 | if (per_cpu_ptr(tr->data, cpu)->skipped_entries) { |
2103 | count -= tr->data[cpu]->skipped_entries; | 2099 | count -= per_cpu_ptr(tr->data, cpu)->skipped_entries; |
2104 | /* total is the same as the entries */ | 2100 | /* total is the same as the entries */ |
2105 | *total += count; | 2101 | *total += count; |
2106 | } else | 2102 | } else |
@@ -2157,7 +2153,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
2157 | { | 2153 | { |
2158 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 2154 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
2159 | struct trace_array *tr = iter->tr; | 2155 | struct trace_array *tr = iter->tr; |
2160 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 2156 | struct trace_array_cpu *data = per_cpu_ptr(tr->data, tr->cpu); |
2161 | struct tracer *type = iter->trace; | 2157 | struct tracer *type = iter->trace; |
2162 | unsigned long entries; | 2158 | unsigned long entries; |
2163 | unsigned long total; | 2159 | unsigned long total; |
@@ -2227,7 +2223,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
2227 | if (cpumask_test_cpu(iter->cpu, iter->started)) | 2223 | if (cpumask_test_cpu(iter->cpu, iter->started)) |
2228 | return; | 2224 | return; |
2229 | 2225 | ||
2230 | if (iter->tr->data[iter->cpu]->skipped_entries) | 2226 | if (per_cpu_ptr(iter->tr->data, iter->cpu)->skipped_entries) |
2231 | return; | 2227 | return; |
2232 | 2228 | ||
2233 | cpumask_set_cpu(iter->cpu, iter->started); | 2229 | cpumask_set_cpu(iter->cpu, iter->started); |
@@ -2858,12 +2854,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2858 | */ | 2854 | */ |
2859 | if (cpumask_test_cpu(cpu, tracing_cpumask) && | 2855 | if (cpumask_test_cpu(cpu, tracing_cpumask) && |
2860 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 2856 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2861 | atomic_inc(&tr->data[cpu]->disabled); | 2857 | atomic_inc(&per_cpu_ptr(tr->data, cpu)->disabled); |
2862 | ring_buffer_record_disable_cpu(tr->buffer, cpu); | 2858 | ring_buffer_record_disable_cpu(tr->buffer, cpu); |
2863 | } | 2859 | } |
2864 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && | 2860 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && |
2865 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 2861 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2866 | atomic_dec(&tr->data[cpu]->disabled); | 2862 | atomic_dec(&per_cpu_ptr(tr->data, cpu)->disabled); |
2867 | ring_buffer_record_enable_cpu(tr->buffer, cpu); | 2863 | ring_buffer_record_enable_cpu(tr->buffer, cpu); |
2868 | } | 2864 | } |
2869 | } | 2865 | } |
@@ -3177,7 +3173,7 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val) | |||
3177 | { | 3173 | { |
3178 | int cpu; | 3174 | int cpu; |
3179 | for_each_tracing_cpu(cpu) | 3175 | for_each_tracing_cpu(cpu) |
3180 | tr->data[cpu]->entries = val; | 3176 | per_cpu_ptr(tr->data, cpu)->entries = val; |
3181 | } | 3177 | } |
3182 | 3178 | ||
3183 | /* resize @tr's buffer to the size of @size_tr's entries */ | 3179 | /* resize @tr's buffer to the size of @size_tr's entries */ |
@@ -3189,17 +3185,18 @@ static int resize_buffer_duplicate_size(struct trace_array *tr, | |||
3189 | if (cpu_id == RING_BUFFER_ALL_CPUS) { | 3185 | if (cpu_id == RING_BUFFER_ALL_CPUS) { |
3190 | for_each_tracing_cpu(cpu) { | 3186 | for_each_tracing_cpu(cpu) { |
3191 | ret = ring_buffer_resize(tr->buffer, | 3187 | ret = ring_buffer_resize(tr->buffer, |
3192 | size_tr->data[cpu]->entries, cpu); | 3188 | per_cpu_ptr(size_tr->data, cpu)->entries, cpu); |
3193 | if (ret < 0) | 3189 | if (ret < 0) |
3194 | break; | 3190 | break; |
3195 | tr->data[cpu]->entries = size_tr->data[cpu]->entries; | 3191 | per_cpu_ptr(tr->data, cpu)->entries = |
3192 | per_cpu_ptr(size_tr->data, cpu)->entries; | ||
3196 | } | 3193 | } |
3197 | } else { | 3194 | } else { |
3198 | ret = ring_buffer_resize(tr->buffer, | 3195 | ret = ring_buffer_resize(tr->buffer, |
3199 | size_tr->data[cpu_id]->entries, cpu_id); | 3196 | per_cpu_ptr(size_tr->data, cpu_id)->entries, cpu_id); |
3200 | if (ret == 0) | 3197 | if (ret == 0) |
3201 | tr->data[cpu_id]->entries = | 3198 | per_cpu_ptr(tr->data, cpu_id)->entries = |
3202 | size_tr->data[cpu_id]->entries; | 3199 | per_cpu_ptr(size_tr->data, cpu_id)->entries; |
3203 | } | 3200 | } |
3204 | 3201 | ||
3205 | return ret; | 3202 | return ret; |
@@ -3256,13 +3253,13 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, | |||
3256 | if (cpu == RING_BUFFER_ALL_CPUS) | 3253 | if (cpu == RING_BUFFER_ALL_CPUS) |
3257 | set_buffer_entries(&max_tr, size); | 3254 | set_buffer_entries(&max_tr, size); |
3258 | else | 3255 | else |
3259 | max_tr.data[cpu]->entries = size; | 3256 | per_cpu_ptr(max_tr.data, cpu)->entries = size; |
3260 | 3257 | ||
3261 | out: | 3258 | out: |
3262 | if (cpu == RING_BUFFER_ALL_CPUS) | 3259 | if (cpu == RING_BUFFER_ALL_CPUS) |
3263 | set_buffer_entries(tr, size); | 3260 | set_buffer_entries(tr, size); |
3264 | else | 3261 | else |
3265 | tr->data[cpu]->entries = size; | 3262 | per_cpu_ptr(tr->data, cpu)->entries = size; |
3266 | 3263 | ||
3267 | return ret; | 3264 | return ret; |
3268 | } | 3265 | } |
@@ -3905,8 +3902,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
3905 | for_each_tracing_cpu(cpu) { | 3902 | for_each_tracing_cpu(cpu) { |
3906 | /* fill in the size from first enabled cpu */ | 3903 | /* fill in the size from first enabled cpu */ |
3907 | if (size == 0) | 3904 | if (size == 0) |
3908 | size = tr->data[cpu]->entries; | 3905 | size = per_cpu_ptr(tr->data, cpu)->entries; |
3909 | if (size != tr->data[cpu]->entries) { | 3906 | if (size != per_cpu_ptr(tr->data, cpu)->entries) { |
3910 | buf_size_same = 0; | 3907 | buf_size_same = 0; |
3911 | break; | 3908 | break; |
3912 | } | 3909 | } |
@@ -3922,7 +3919,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
3922 | } else | 3919 | } else |
3923 | r = sprintf(buf, "X\n"); | 3920 | r = sprintf(buf, "X\n"); |
3924 | } else | 3921 | } else |
3925 | r = sprintf(buf, "%lu\n", tr->data[tc->cpu]->entries >> 10); | 3922 | r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->data, tc->cpu)->entries >> 10); |
3926 | 3923 | ||
3927 | mutex_unlock(&trace_types_lock); | 3924 | mutex_unlock(&trace_types_lock); |
3928 | 3925 | ||
@@ -3969,7 +3966,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf, | |||
3969 | 3966 | ||
3970 | mutex_lock(&trace_types_lock); | 3967 | mutex_lock(&trace_types_lock); |
3971 | for_each_tracing_cpu(cpu) { | 3968 | for_each_tracing_cpu(cpu) { |
3972 | size += tr->data[cpu]->entries >> 10; | 3969 | size += per_cpu_ptr(tr->data, cpu)->entries >> 10; |
3973 | if (!ring_buffer_expanded) | 3970 | if (!ring_buffer_expanded) |
3974 | expanded_size += trace_buf_size >> 10; | 3971 | expanded_size += trace_buf_size >> 10; |
3975 | } | 3972 | } |
@@ -4773,7 +4770,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) | |||
4773 | static void | 4770 | static void |
4774 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | 4771 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) |
4775 | { | 4772 | { |
4776 | struct trace_array_cpu *data = tr->data[cpu]; | 4773 | struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu); |
4777 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); | 4774 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); |
4778 | struct dentry *d_cpu; | 4775 | struct dentry *d_cpu; |
4779 | char cpu_dir[30]; /* 30 characters should be more than enough */ | 4776 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
@@ -5298,7 +5295,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
5298 | trace_init_global_iter(&iter); | 5295 | trace_init_global_iter(&iter); |
5299 | 5296 | ||
5300 | for_each_tracing_cpu(cpu) { | 5297 | for_each_tracing_cpu(cpu) { |
5301 | atomic_inc(&iter.tr->data[cpu]->disabled); | 5298 | atomic_inc(&per_cpu_ptr(iter.tr->data, cpu)->disabled); |
5302 | } | 5299 | } |
5303 | 5300 | ||
5304 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 5301 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; |
@@ -5366,7 +5363,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
5366 | trace_flags |= old_userobj; | 5363 | trace_flags |= old_userobj; |
5367 | 5364 | ||
5368 | for_each_tracing_cpu(cpu) { | 5365 | for_each_tracing_cpu(cpu) { |
5369 | atomic_dec(&iter.tr->data[cpu]->disabled); | 5366 | atomic_dec(&per_cpu_ptr(iter.tr->data, cpu)->disabled); |
5370 | } | 5367 | } |
5371 | tracing_on(); | 5368 | tracing_on(); |
5372 | } | 5369 | } |
@@ -5422,11 +5419,31 @@ __init static int tracer_alloc_buffers(void) | |||
5422 | WARN_ON(1); | 5419 | WARN_ON(1); |
5423 | goto out_free_cpumask; | 5420 | goto out_free_cpumask; |
5424 | } | 5421 | } |
5422 | |||
5423 | global_trace.data = alloc_percpu(struct trace_array_cpu); | ||
5424 | |||
5425 | if (!global_trace.data) { | ||
5426 | printk(KERN_ERR "tracer: failed to allocate percpu memory!\n"); | ||
5427 | WARN_ON(1); | ||
5428 | goto out_free_cpumask; | ||
5429 | } | ||
5430 | |||
5431 | for_each_tracing_cpu(i) { | ||
5432 | memset(per_cpu_ptr(global_trace.data, i), 0, sizeof(struct trace_array_cpu)); | ||
5433 | per_cpu_ptr(global_trace.data, i)->trace_cpu.cpu = i; | ||
5434 | per_cpu_ptr(global_trace.data, i)->trace_cpu.tr = &global_trace; | ||
5435 | } | ||
5436 | |||
5425 | if (global_trace.buffer_disabled) | 5437 | if (global_trace.buffer_disabled) |
5426 | tracing_off(); | 5438 | tracing_off(); |
5427 | 5439 | ||
5428 | |||
5429 | #ifdef CONFIG_TRACER_MAX_TRACE | 5440 | #ifdef CONFIG_TRACER_MAX_TRACE |
5441 | max_tr.data = alloc_percpu(struct trace_array_cpu); | ||
5442 | if (!max_tr.data) { | ||
5443 | printk(KERN_ERR "tracer: failed to allocate percpu memory!\n"); | ||
5444 | WARN_ON(1); | ||
5445 | goto out_free_cpumask; | ||
5446 | } | ||
5430 | max_tr.buffer = ring_buffer_alloc(1, rb_flags); | 5447 | max_tr.buffer = ring_buffer_alloc(1, rb_flags); |
5431 | raw_spin_lock_init(&max_tr.start_lock); | 5448 | raw_spin_lock_init(&max_tr.start_lock); |
5432 | if (!max_tr.buffer) { | 5449 | if (!max_tr.buffer) { |
@@ -5435,18 +5452,15 @@ __init static int tracer_alloc_buffers(void) | |||
5435 | ring_buffer_free(global_trace.buffer); | 5452 | ring_buffer_free(global_trace.buffer); |
5436 | goto out_free_cpumask; | 5453 | goto out_free_cpumask; |
5437 | } | 5454 | } |
5438 | #endif | ||
5439 | 5455 | ||
5440 | /* Allocate the first page for all buffers */ | ||
5441 | for_each_tracing_cpu(i) { | 5456 | for_each_tracing_cpu(i) { |
5442 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 5457 | memset(per_cpu_ptr(max_tr.data, i), 0, sizeof(struct trace_array_cpu)); |
5443 | global_trace.data[i]->trace_cpu.cpu = i; | 5458 | per_cpu_ptr(max_tr.data, i)->trace_cpu.cpu = i; |
5444 | global_trace.data[i]->trace_cpu.tr = &global_trace; | 5459 | per_cpu_ptr(max_tr.data, i)->trace_cpu.tr = &max_tr; |
5445 | max_tr.data[i] = &per_cpu(max_tr_data, i); | ||
5446 | max_tr.data[i]->trace_cpu.cpu = i; | ||
5447 | max_tr.data[i]->trace_cpu.tr = &max_tr; | ||
5448 | } | 5460 | } |
5461 | #endif | ||
5449 | 5462 | ||
5463 | /* Allocate the first page for all buffers */ | ||
5450 | set_buffer_entries(&global_trace, | 5464 | set_buffer_entries(&global_trace, |
5451 | ring_buffer_size(global_trace.buffer, 0)); | 5465 | ring_buffer_size(global_trace.buffer, 0)); |
5452 | #ifdef CONFIG_TRACER_MAX_TRACE | 5466 | #ifdef CONFIG_TRACER_MAX_TRACE |
@@ -5488,6 +5502,8 @@ __init static int tracer_alloc_buffers(void) | |||
5488 | return 0; | 5502 | return 0; |
5489 | 5503 | ||
5490 | out_free_cpumask: | 5504 | out_free_cpumask: |
5505 | free_percpu(global_trace.data); | ||
5506 | free_percpu(max_tr.data); | ||
5491 | free_cpumask_var(tracing_cpumask); | 5507 | free_cpumask_var(tracing_cpumask); |
5492 | out_free_buffer_mask: | 5508 | out_free_buffer_mask: |
5493 | free_cpumask_var(tracing_buffer_mask); | 5509 | free_cpumask_var(tracing_buffer_mask); |