aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/trace/trace.c92
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_branch.c6
-rw-r--r--kernel/trace/trace_functions.c4
-rw-r--r--kernel/trace/trace_functions_graph.c4
-rw-r--r--kernel/trace/trace_irqsoff.c6
-rw-r--r--kernel/trace/trace_kdb.c4
-rw-r--r--kernel/trace/trace_mmiotrace.c4
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c14
10 files changed, 79 insertions, 61 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 29bff72f97ef..406adbc277a0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -191,8 +191,6 @@ static struct trace_array global_trace;
191 191
192LIST_HEAD(ftrace_trace_arrays); 192LIST_HEAD(ftrace_trace_arrays);
193 193
194static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
195
196int filter_current_check_discard(struct ring_buffer *buffer, 194int filter_current_check_discard(struct ring_buffer *buffer,
197 struct ftrace_event_call *call, void *rec, 195 struct ftrace_event_call *call, void *rec,
198 struct ring_buffer_event *event) 196 struct ring_buffer_event *event)
@@ -227,8 +225,6 @@ cycle_t ftrace_now(int cpu)
227 */ 225 */
228static struct trace_array max_tr; 226static struct trace_array max_tr;
229 227
230static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
231
232int tracing_is_enabled(void) 228int tracing_is_enabled(void)
233{ 229{
234 return tracing_is_on(); 230 return tracing_is_on();
@@ -666,13 +662,13 @@ unsigned long __read_mostly tracing_max_latency;
666static void 662static void
667__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 663__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
668{ 664{
669 struct trace_array_cpu *data = tr->data[cpu]; 665 struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu);
670 struct trace_array_cpu *max_data; 666 struct trace_array_cpu *max_data;
671 667
672 max_tr.cpu = cpu; 668 max_tr.cpu = cpu;
673 max_tr.time_start = data->preempt_timestamp; 669 max_tr.time_start = data->preempt_timestamp;
674 670
675 max_data = max_tr.data[cpu]; 671 max_data = per_cpu_ptr(max_tr.data, cpu);
676 max_data->saved_latency = tracing_max_latency; 672 max_data->saved_latency = tracing_max_latency;
677 max_data->critical_start = data->critical_start; 673 max_data->critical_start = data->critical_start;
678 max_data->critical_end = data->critical_end; 674 max_data->critical_end = data->critical_end;
@@ -1984,7 +1980,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1984 unsigned long entries = 0; 1980 unsigned long entries = 0;
1985 u64 ts; 1981 u64 ts;
1986 1982
1987 tr->data[cpu]->skipped_entries = 0; 1983 per_cpu_ptr(tr->data, cpu)->skipped_entries = 0;
1988 1984
1989 buf_iter = trace_buffer_iter(iter, cpu); 1985 buf_iter = trace_buffer_iter(iter, cpu);
1990 if (!buf_iter) 1986 if (!buf_iter)
@@ -2004,7 +2000,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2004 ring_buffer_read(buf_iter, NULL); 2000 ring_buffer_read(buf_iter, NULL);
2005 } 2001 }
2006 2002
2007 tr->data[cpu]->skipped_entries = entries; 2003 per_cpu_ptr(tr->data, cpu)->skipped_entries = entries;
2008} 2004}
2009 2005
2010/* 2006/*
@@ -2099,8 +2095,8 @@ get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *e
2099 * entries for the trace and we need to ignore the 2095 * entries for the trace and we need to ignore the
2100 * ones before the time stamp. 2096 * ones before the time stamp.
2101 */ 2097 */
2102 if (tr->data[cpu]->skipped_entries) { 2098 if (per_cpu_ptr(tr->data, cpu)->skipped_entries) {
2103 count -= tr->data[cpu]->skipped_entries; 2099 count -= per_cpu_ptr(tr->data, cpu)->skipped_entries;
2104 /* total is the same as the entries */ 2100 /* total is the same as the entries */
2105 *total += count; 2101 *total += count;
2106 } else 2102 } else
@@ -2157,7 +2153,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2157{ 2153{
2158 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2154 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2159 struct trace_array *tr = iter->tr; 2155 struct trace_array *tr = iter->tr;
2160 struct trace_array_cpu *data = tr->data[tr->cpu]; 2156 struct trace_array_cpu *data = per_cpu_ptr(tr->data, tr->cpu);
2161 struct tracer *type = iter->trace; 2157 struct tracer *type = iter->trace;
2162 unsigned long entries; 2158 unsigned long entries;
2163 unsigned long total; 2159 unsigned long total;
@@ -2227,7 +2223,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
2227 if (cpumask_test_cpu(iter->cpu, iter->started)) 2223 if (cpumask_test_cpu(iter->cpu, iter->started))
2228 return; 2224 return;
2229 2225
2230 if (iter->tr->data[iter->cpu]->skipped_entries) 2226 if (per_cpu_ptr(iter->tr->data, iter->cpu)->skipped_entries)
2231 return; 2227 return;
2232 2228
2233 cpumask_set_cpu(iter->cpu, iter->started); 2229 cpumask_set_cpu(iter->cpu, iter->started);
@@ -2858,12 +2854,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2858 */ 2854 */
2859 if (cpumask_test_cpu(cpu, tracing_cpumask) && 2855 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2860 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2856 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2861 atomic_inc(&tr->data[cpu]->disabled); 2857 atomic_inc(&per_cpu_ptr(tr->data, cpu)->disabled);
2862 ring_buffer_record_disable_cpu(tr->buffer, cpu); 2858 ring_buffer_record_disable_cpu(tr->buffer, cpu);
2863 } 2859 }
2864 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2860 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2865 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2861 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2866 atomic_dec(&tr->data[cpu]->disabled); 2862 atomic_dec(&per_cpu_ptr(tr->data, cpu)->disabled);
2867 ring_buffer_record_enable_cpu(tr->buffer, cpu); 2863 ring_buffer_record_enable_cpu(tr->buffer, cpu);
2868 } 2864 }
2869 } 2865 }
@@ -3177,7 +3173,7 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val)
3177{ 3173{
3178 int cpu; 3174 int cpu;
3179 for_each_tracing_cpu(cpu) 3175 for_each_tracing_cpu(cpu)
3180 tr->data[cpu]->entries = val; 3176 per_cpu_ptr(tr->data, cpu)->entries = val;
3181} 3177}
3182 3178
3183/* resize @tr's buffer to the size of @size_tr's entries */ 3179/* resize @tr's buffer to the size of @size_tr's entries */
@@ -3189,17 +3185,18 @@ static int resize_buffer_duplicate_size(struct trace_array *tr,
3189 if (cpu_id == RING_BUFFER_ALL_CPUS) { 3185 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3190 for_each_tracing_cpu(cpu) { 3186 for_each_tracing_cpu(cpu) {
3191 ret = ring_buffer_resize(tr->buffer, 3187 ret = ring_buffer_resize(tr->buffer,
3192 size_tr->data[cpu]->entries, cpu); 3188 per_cpu_ptr(size_tr->data, cpu)->entries, cpu);
3193 if (ret < 0) 3189 if (ret < 0)
3194 break; 3190 break;
3195 tr->data[cpu]->entries = size_tr->data[cpu]->entries; 3191 per_cpu_ptr(tr->data, cpu)->entries =
3192 per_cpu_ptr(size_tr->data, cpu)->entries;
3196 } 3193 }
3197 } else { 3194 } else {
3198 ret = ring_buffer_resize(tr->buffer, 3195 ret = ring_buffer_resize(tr->buffer,
3199 size_tr->data[cpu_id]->entries, cpu_id); 3196 per_cpu_ptr(size_tr->data, cpu_id)->entries, cpu_id);
3200 if (ret == 0) 3197 if (ret == 0)
3201 tr->data[cpu_id]->entries = 3198 per_cpu_ptr(tr->data, cpu_id)->entries =
3202 size_tr->data[cpu_id]->entries; 3199 per_cpu_ptr(size_tr->data, cpu_id)->entries;
3203 } 3200 }
3204 3201
3205 return ret; 3202 return ret;
@@ -3256,13 +3253,13 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
3256 if (cpu == RING_BUFFER_ALL_CPUS) 3253 if (cpu == RING_BUFFER_ALL_CPUS)
3257 set_buffer_entries(&max_tr, size); 3254 set_buffer_entries(&max_tr, size);
3258 else 3255 else
3259 max_tr.data[cpu]->entries = size; 3256 per_cpu_ptr(max_tr.data, cpu)->entries = size;
3260 3257
3261 out: 3258 out:
3262 if (cpu == RING_BUFFER_ALL_CPUS) 3259 if (cpu == RING_BUFFER_ALL_CPUS)
3263 set_buffer_entries(tr, size); 3260 set_buffer_entries(tr, size);
3264 else 3261 else
3265 tr->data[cpu]->entries = size; 3262 per_cpu_ptr(tr->data, cpu)->entries = size;
3266 3263
3267 return ret; 3264 return ret;
3268} 3265}
@@ -3905,8 +3902,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
3905 for_each_tracing_cpu(cpu) { 3902 for_each_tracing_cpu(cpu) {
3906 /* fill in the size from first enabled cpu */ 3903 /* fill in the size from first enabled cpu */
3907 if (size == 0) 3904 if (size == 0)
3908 size = tr->data[cpu]->entries; 3905 size = per_cpu_ptr(tr->data, cpu)->entries;
3909 if (size != tr->data[cpu]->entries) { 3906 if (size != per_cpu_ptr(tr->data, cpu)->entries) {
3910 buf_size_same = 0; 3907 buf_size_same = 0;
3911 break; 3908 break;
3912 } 3909 }
@@ -3922,7 +3919,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
3922 } else 3919 } else
3923 r = sprintf(buf, "X\n"); 3920 r = sprintf(buf, "X\n");
3924 } else 3921 } else
3925 r = sprintf(buf, "%lu\n", tr->data[tc->cpu]->entries >> 10); 3922 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->data, tc->cpu)->entries >> 10);
3926 3923
3927 mutex_unlock(&trace_types_lock); 3924 mutex_unlock(&trace_types_lock);
3928 3925
@@ -3969,7 +3966,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
3969 3966
3970 mutex_lock(&trace_types_lock); 3967 mutex_lock(&trace_types_lock);
3971 for_each_tracing_cpu(cpu) { 3968 for_each_tracing_cpu(cpu) {
3972 size += tr->data[cpu]->entries >> 10; 3969 size += per_cpu_ptr(tr->data, cpu)->entries >> 10;
3973 if (!ring_buffer_expanded) 3970 if (!ring_buffer_expanded)
3974 expanded_size += trace_buf_size >> 10; 3971 expanded_size += trace_buf_size >> 10;
3975 } 3972 }
@@ -4773,7 +4770,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
4773static void 4770static void
4774tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 4771tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
4775{ 4772{
4776 struct trace_array_cpu *data = tr->data[cpu]; 4773 struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu);
4777 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 4774 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
4778 struct dentry *d_cpu; 4775 struct dentry *d_cpu;
4779 char cpu_dir[30]; /* 30 characters should be more than enough */ 4776 char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -5298,7 +5295,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5298 trace_init_global_iter(&iter); 5295 trace_init_global_iter(&iter);
5299 5296
5300 for_each_tracing_cpu(cpu) { 5297 for_each_tracing_cpu(cpu) {
5301 atomic_inc(&iter.tr->data[cpu]->disabled); 5298 atomic_inc(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
5302 } 5299 }
5303 5300
5304 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 5301 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -5366,7 +5363,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5366 trace_flags |= old_userobj; 5363 trace_flags |= old_userobj;
5367 5364
5368 for_each_tracing_cpu(cpu) { 5365 for_each_tracing_cpu(cpu) {
5369 atomic_dec(&iter.tr->data[cpu]->disabled); 5366 atomic_dec(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
5370 } 5367 }
5371 tracing_on(); 5368 tracing_on();
5372 } 5369 }
@@ -5422,11 +5419,31 @@ __init static int tracer_alloc_buffers(void)
5422 WARN_ON(1); 5419 WARN_ON(1);
5423 goto out_free_cpumask; 5420 goto out_free_cpumask;
5424 } 5421 }
5422
5423 global_trace.data = alloc_percpu(struct trace_array_cpu);
5424
5425 if (!global_trace.data) {
5426 printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
5427 WARN_ON(1);
5428 goto out_free_cpumask;
5429 }
5430
5431 for_each_tracing_cpu(i) {
5432 memset(per_cpu_ptr(global_trace.data, i), 0, sizeof(struct trace_array_cpu));
5433 per_cpu_ptr(global_trace.data, i)->trace_cpu.cpu = i;
5434 per_cpu_ptr(global_trace.data, i)->trace_cpu.tr = &global_trace;
5435 }
5436
5425 if (global_trace.buffer_disabled) 5437 if (global_trace.buffer_disabled)
5426 tracing_off(); 5438 tracing_off();
5427 5439
5428
5429#ifdef CONFIG_TRACER_MAX_TRACE 5440#ifdef CONFIG_TRACER_MAX_TRACE
5441 max_tr.data = alloc_percpu(struct trace_array_cpu);
5442 if (!max_tr.data) {
5443 printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
5444 WARN_ON(1);
5445 goto out_free_cpumask;
5446 }
5430 max_tr.buffer = ring_buffer_alloc(1, rb_flags); 5447 max_tr.buffer = ring_buffer_alloc(1, rb_flags);
5431 raw_spin_lock_init(&max_tr.start_lock); 5448 raw_spin_lock_init(&max_tr.start_lock);
5432 if (!max_tr.buffer) { 5449 if (!max_tr.buffer) {
@@ -5435,18 +5452,15 @@ __init static int tracer_alloc_buffers(void)
5435 ring_buffer_free(global_trace.buffer); 5452 ring_buffer_free(global_trace.buffer);
5436 goto out_free_cpumask; 5453 goto out_free_cpumask;
5437 } 5454 }
5438#endif
5439 5455
5440 /* Allocate the first page for all buffers */
5441 for_each_tracing_cpu(i) { 5456 for_each_tracing_cpu(i) {
5442 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 5457 memset(per_cpu_ptr(max_tr.data, i), 0, sizeof(struct trace_array_cpu));
5443 global_trace.data[i]->trace_cpu.cpu = i; 5458 per_cpu_ptr(max_tr.data, i)->trace_cpu.cpu = i;
5444 global_trace.data[i]->trace_cpu.tr = &global_trace; 5459 per_cpu_ptr(max_tr.data, i)->trace_cpu.tr = &max_tr;
5445 max_tr.data[i] = &per_cpu(max_tr_data, i);
5446 max_tr.data[i]->trace_cpu.cpu = i;
5447 max_tr.data[i]->trace_cpu.tr = &max_tr;
5448 } 5460 }
5461#endif
5449 5462
5463 /* Allocate the first page for all buffers */
5450 set_buffer_entries(&global_trace, 5464 set_buffer_entries(&global_trace,
5451 ring_buffer_size(global_trace.buffer, 0)); 5465 ring_buffer_size(global_trace.buffer, 0));
5452#ifdef CONFIG_TRACER_MAX_TRACE 5466#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5488,6 +5502,8 @@ __init static int tracer_alloc_buffers(void)
5488 return 0; 5502 return 0;
5489 5503
5490out_free_cpumask: 5504out_free_cpumask:
5505 free_percpu(global_trace.data);
5506 free_percpu(max_tr.data);
5491 free_cpumask_var(tracing_cpumask); 5507 free_cpumask_var(tracing_cpumask);
5492out_free_buffer_mask: 5508out_free_buffer_mask:
5493 free_cpumask_var(tracing_buffer_mask); 5509 free_cpumask_var(tracing_buffer_mask);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index b80fbcf70af4..15ccd7cd1560 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -186,7 +186,7 @@ struct trace_array {
186 struct list_head systems; 186 struct list_head systems;
187 struct list_head events; 187 struct list_head events;
188 struct task_struct *waiter; 188 struct task_struct *waiter;
189 struct trace_array_cpu *data[NR_CPUS]; 189 struct trace_array_cpu *data;
190}; 190};
191 191
192enum { 192enum {
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 95e96842ed29..6dadbefbb1d6 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -32,6 +32,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
32{ 32{
33 struct ftrace_event_call *call = &event_branch; 33 struct ftrace_event_call *call = &event_branch;
34 struct trace_array *tr = branch_tracer; 34 struct trace_array *tr = branch_tracer;
35 struct trace_array_cpu *data;
35 struct ring_buffer_event *event; 36 struct ring_buffer_event *event;
36 struct trace_branch *entry; 37 struct trace_branch *entry;
37 struct ring_buffer *buffer; 38 struct ring_buffer *buffer;
@@ -51,7 +52,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
51 52
52 local_irq_save(flags); 53 local_irq_save(flags);
53 cpu = raw_smp_processor_id(); 54 cpu = raw_smp_processor_id();
54 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 55 data = per_cpu_ptr(tr->data, cpu);
56 if (atomic_inc_return(&data->disabled) != 1)
55 goto out; 57 goto out;
56 58
57 pc = preempt_count(); 59 pc = preempt_count();
@@ -80,7 +82,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
80 __buffer_unlock_commit(buffer, event); 82 __buffer_unlock_commit(buffer, event);
81 83
82 out: 84 out:
83 atomic_dec(&tr->data[cpu]->disabled); 85 atomic_dec(&data->disabled);
84 local_irq_restore(flags); 86 local_irq_restore(flags);
85} 87}
86 88
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 601152523326..9d73861efc6a 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
76 goto out; 76 goto out;
77 77
78 cpu = smp_processor_id(); 78 cpu = smp_processor_id();
79 data = tr->data[cpu]; 79 data = per_cpu_ptr(tr->data, cpu);
80 if (!atomic_read(&data->disabled)) { 80 if (!atomic_read(&data->disabled)) {
81 local_save_flags(flags); 81 local_save_flags(flags);
82 trace_function(tr, ip, parent_ip, flags, pc); 82 trace_function(tr, ip, parent_ip, flags, pc);
@@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
107 */ 107 */
108 local_irq_save(flags); 108 local_irq_save(flags);
109 cpu = raw_smp_processor_id(); 109 cpu = raw_smp_processor_id();
110 data = tr->data[cpu]; 110 data = per_cpu_ptr(tr->data, cpu);
111 disabled = atomic_inc_return(&data->disabled); 111 disabled = atomic_inc_return(&data->disabled);
112 112
113 if (likely(disabled == 1)) { 113 if (likely(disabled == 1)) {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 39ada66389cc..ca986d61a282 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 cpu = raw_smp_processor_id(); 267 cpu = raw_smp_processor_id();
268 data = tr->data[cpu]; 268 data = per_cpu_ptr(tr->data, cpu);
269 disabled = atomic_inc_return(&data->disabled); 269 disabled = atomic_inc_return(&data->disabled);
270 if (likely(disabled == 1)) { 270 if (likely(disabled == 1)) {
271 pc = preempt_count(); 271 pc = preempt_count();
@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
350 350
351 local_irq_save(flags); 351 local_irq_save(flags);
352 cpu = raw_smp_processor_id(); 352 cpu = raw_smp_processor_id();
353 data = tr->data[cpu]; 353 data = per_cpu_ptr(tr->data, cpu);
354 disabled = atomic_inc_return(&data->disabled); 354 disabled = atomic_inc_return(&data->disabled);
355 if (likely(disabled == 1)) { 355 if (likely(disabled == 1)) {
356 pc = preempt_count(); 356 pc = preempt_count();
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b3cf6bf308ef..9b52f9cf7a0d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -121,7 +121,7 @@ static int func_prolog_dec(struct trace_array *tr,
121 if (!irqs_disabled_flags(*flags)) 121 if (!irqs_disabled_flags(*flags))
122 return 0; 122 return 0;
123 123
124 *data = tr->data[cpu]; 124 *data = per_cpu_ptr(tr->data, cpu);
125 disabled = atomic_inc_return(&(*data)->disabled); 125 disabled = atomic_inc_return(&(*data)->disabled);
126 126
127 if (likely(disabled == 1)) 127 if (likely(disabled == 1))
@@ -380,7 +380,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
380 if (per_cpu(tracing_cpu, cpu)) 380 if (per_cpu(tracing_cpu, cpu))
381 return; 381 return;
382 382
383 data = tr->data[cpu]; 383 data = per_cpu_ptr(tr->data, cpu);
384 384
385 if (unlikely(!data) || atomic_read(&data->disabled)) 385 if (unlikely(!data) || atomic_read(&data->disabled))
386 return; 386 return;
@@ -418,7 +418,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
418 if (!tracer_enabled) 418 if (!tracer_enabled)
419 return; 419 return;
420 420
421 data = tr->data[cpu]; 421 data = per_cpu_ptr(tr->data, cpu);
422 422
423 if (unlikely(!data) || 423 if (unlikely(!data) ||
424 !data->critical_start || atomic_read(&data->disabled)) 424 !data->critical_start || atomic_read(&data->disabled))
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index cc1dbdc5ee5d..349f6941e8f2 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -26,7 +26,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
26 trace_init_global_iter(&iter); 26 trace_init_global_iter(&iter);
27 27
28 for_each_tracing_cpu(cpu) { 28 for_each_tracing_cpu(cpu) {
29 atomic_inc(&iter.tr->data[cpu]->disabled); 29 atomic_inc(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
30 } 30 }
31 31
32 old_userobj = trace_flags; 32 old_userobj = trace_flags;
@@ -83,7 +83,7 @@ out:
83 trace_flags = old_userobj; 83 trace_flags = old_userobj;
84 84
85 for_each_tracing_cpu(cpu) { 85 for_each_tracing_cpu(cpu) {
86 atomic_dec(&iter.tr->data[cpu]->disabled); 86 atomic_dec(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
87 } 87 }
88 88
89 for_each_tracing_cpu(cpu) 89 for_each_tracing_cpu(cpu)
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index fd3c8aae55e5..2472f6f76b50 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -330,7 +330,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
330void mmio_trace_rw(struct mmiotrace_rw *rw) 330void mmio_trace_rw(struct mmiotrace_rw *rw)
331{ 331{
332 struct trace_array *tr = mmio_trace_array; 332 struct trace_array *tr = mmio_trace_array;
333 struct trace_array_cpu *data = tr->data[smp_processor_id()]; 333 struct trace_array_cpu *data = per_cpu_ptr(tr->data, smp_processor_id());
334 __trace_mmiotrace_rw(tr, data, rw); 334 __trace_mmiotrace_rw(tr, data, rw);
335} 335}
336 336
@@ -363,7 +363,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
363 struct trace_array_cpu *data; 363 struct trace_array_cpu *data;
364 364
365 preempt_disable(); 365 preempt_disable();
366 data = tr->data[smp_processor_id()]; 366 data = per_cpu_ptr(tr->data, smp_processor_id());
367 __trace_mmiotrace_map(tr, data, map); 367 __trace_mmiotrace_map(tr, data, map);
368 preempt_enable(); 368 preempt_enable();
369} 369}
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 3374c792ccd8..1ffe39abd6fc 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -69,7 +69,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
69 pc = preempt_count(); 69 pc = preempt_count();
70 local_irq_save(flags); 70 local_irq_save(flags);
71 cpu = raw_smp_processor_id(); 71 cpu = raw_smp_processor_id();
72 data = ctx_trace->data[cpu]; 72 data = per_cpu_ptr(ctx_trace->data, cpu);
73 73
74 if (likely(!atomic_read(&data->disabled))) 74 if (likely(!atomic_read(&data->disabled)))
75 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); 75 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
@@ -123,7 +123,7 @@ probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
123 pc = preempt_count(); 123 pc = preempt_count();
124 local_irq_save(flags); 124 local_irq_save(flags);
125 cpu = raw_smp_processor_id(); 125 cpu = raw_smp_processor_id();
126 data = ctx_trace->data[cpu]; 126 data = per_cpu_ptr(ctx_trace->data, cpu);
127 127
128 if (likely(!atomic_read(&data->disabled))) 128 if (likely(!atomic_read(&data->disabled)))
129 tracing_sched_wakeup_trace(ctx_trace, wakee, current, 129 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5255a8477247..f9ceb75a95b7 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -89,7 +89,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
89 if (cpu != wakeup_current_cpu) 89 if (cpu != wakeup_current_cpu)
90 goto out_enable; 90 goto out_enable;
91 91
92 *data = tr->data[cpu]; 92 *data = per_cpu_ptr(tr->data, cpu);
93 disabled = atomic_inc_return(&(*data)->disabled); 93 disabled = atomic_inc_return(&(*data)->disabled);
94 if (unlikely(disabled != 1)) 94 if (unlikely(disabled != 1))
95 goto out; 95 goto out;
@@ -353,7 +353,7 @@ probe_wakeup_sched_switch(void *ignore,
353 353
354 /* disable local data, not wakeup_cpu data */ 354 /* disable local data, not wakeup_cpu data */
355 cpu = raw_smp_processor_id(); 355 cpu = raw_smp_processor_id();
356 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); 356 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
357 if (likely(disabled != 1)) 357 if (likely(disabled != 1))
358 goto out; 358 goto out;
359 359
@@ -365,7 +365,7 @@ probe_wakeup_sched_switch(void *ignore,
365 goto out_unlock; 365 goto out_unlock;
366 366
367 /* The task we are waiting for is waking up */ 367 /* The task we are waiting for is waking up */
368 data = wakeup_trace->data[wakeup_cpu]; 368 data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu);
369 369
370 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); 370 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
371 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 371 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
@@ -387,7 +387,7 @@ out_unlock:
387 arch_spin_unlock(&wakeup_lock); 387 arch_spin_unlock(&wakeup_lock);
388 local_irq_restore(flags); 388 local_irq_restore(flags);
389out: 389out:
390 atomic_dec(&wakeup_trace->data[cpu]->disabled); 390 atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
391} 391}
392 392
393static void __wakeup_reset(struct trace_array *tr) 393static void __wakeup_reset(struct trace_array *tr)
@@ -435,7 +435,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
435 return; 435 return;
436 436
437 pc = preempt_count(); 437 pc = preempt_count();
438 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); 438 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
439 if (unlikely(disabled != 1)) 439 if (unlikely(disabled != 1))
440 goto out; 440 goto out;
441 441
@@ -458,7 +458,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
458 458
459 local_save_flags(flags); 459 local_save_flags(flags);
460 460
461 data = wakeup_trace->data[wakeup_cpu]; 461 data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu);
462 data->preempt_timestamp = ftrace_now(cpu); 462 data->preempt_timestamp = ftrace_now(cpu);
463 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); 463 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
464 464
@@ -472,7 +472,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
472out_locked: 472out_locked:
473 arch_spin_unlock(&wakeup_lock); 473 arch_spin_unlock(&wakeup_lock);
474out: 474out:
475 atomic_dec(&wakeup_trace->data[cpu]->disabled); 475 atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
476} 476}
477 477
478static void start_wakeup_tracer(struct trace_array *tr) 478static void start_wakeup_tracer(struct trace_array *tr)