aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c130
1 files changed, 79 insertions, 51 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b20d3ec75de9..bb6b5e7fa2a2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
86 */ 86 */
87static int tracing_disabled = 1; 87static int tracing_disabled = 1;
88 88
89DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 89DEFINE_PER_CPU(int, ftrace_cpu_disabled);
90 90
91static inline void ftrace_disable_cpu(void) 91static inline void ftrace_disable_cpu(void)
92{ 92{
93 preempt_disable(); 93 preempt_disable();
94 local_inc(&__get_cpu_var(ftrace_cpu_disabled)); 94 __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
95} 95}
96 96
97static inline void ftrace_enable_cpu(void) 97static inline void ftrace_enable_cpu(void)
98{ 98{
99 local_dec(&__get_cpu_var(ftrace_cpu_disabled)); 99 __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
100 preempt_enable(); 100 preempt_enable();
101} 101}
102 102
@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf);
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_cmdline_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str)
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
138 return 1; 138 return 1;
139} 139}
140__setup("ftrace=", set_ftrace); 140__setup("ftrace=", set_cmdline_ftrace);
141 141
142static int __init set_ftrace_dump_on_oops(char *str) 142static int __init set_ftrace_dump_on_oops(char *str)
143{ 143{
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
203 */ 203 */
204static struct trace_array max_tr; 204static struct trace_array max_tr;
205 205
206static DEFINE_PER_CPU(struct trace_array_cpu, max_data); 206static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
207 207
208/* tracer_enabled is used to toggle activation of a tracer */ 208/* tracer_enabled is used to toggle activation of a tracer */
209static int tracer_enabled = 1; 209static int tracer_enabled = 1;
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
493 * protected by per_cpu spinlocks. But the action of the swap 493 * protected by per_cpu spinlocks. But the action of the swap
494 * needs its own lock. 494 * needs its own lock.
495 * 495 *
496 * This is defined as a raw_spinlock_t in order to help 496 * This is defined as a arch_spinlock_t in order to help
497 * with performance when lockdep debugging is enabled. 497 * with performance when lockdep debugging is enabled.
498 * 498 *
499 * It is also used in other places outside the update_max_tr 499 * It is also used in other places outside the update_max_tr
500 * so it needs to be defined outside of the 500 * so it needs to be defined outside of the
501 * CONFIG_TRACER_MAX_TRACE. 501 * CONFIG_TRACER_MAX_TRACE.
502 */ 502 */
503static raw_spinlock_t ftrace_max_lock = 503static arch_spinlock_t ftrace_max_lock =
504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 504 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
505 505
506#ifdef CONFIG_TRACER_MAX_TRACE 506#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency; 507unsigned long __read_mostly tracing_max_latency;
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
555 return; 555 return;
556 556
557 WARN_ON_ONCE(!irqs_disabled()); 557 WARN_ON_ONCE(!irqs_disabled());
558 __raw_spin_lock(&ftrace_max_lock); 558 arch_spin_lock(&ftrace_max_lock);
559 559
560 tr->buffer = max_tr.buffer; 560 tr->buffer = max_tr.buffer;
561 max_tr.buffer = buf; 561 max_tr.buffer = buf;
562 562
563 __update_max_tr(tr, tsk, cpu); 563 __update_max_tr(tr, tsk, cpu);
564 __raw_spin_unlock(&ftrace_max_lock); 564 arch_spin_unlock(&ftrace_max_lock);
565} 565}
566 566
567/** 567/**
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
581 return; 581 return;
582 582
583 WARN_ON_ONCE(!irqs_disabled()); 583 WARN_ON_ONCE(!irqs_disabled());
584 __raw_spin_lock(&ftrace_max_lock); 584 arch_spin_lock(&ftrace_max_lock);
585 585
586 ftrace_disable_cpu(); 586 ftrace_disable_cpu();
587 587
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
604 604
605 __update_max_tr(tr, tsk, cpu); 605 __update_max_tr(tr, tsk, cpu);
606 __raw_spin_unlock(&ftrace_max_lock); 606 arch_spin_unlock(&ftrace_max_lock);
607} 607}
608#endif /* CONFIG_TRACER_MAX_TRACE */ 608#endif /* CONFIG_TRACER_MAX_TRACE */
609 609
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
802static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 802static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
804static int cmdline_idx; 804static int cmdline_idx;
805static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 805static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
806 806
807/* temporary disable recording */ 807/* temporary disable recording */
808static atomic_t trace_record_cmdline_disabled __read_mostly; 808static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
915 * nor do we want to disable interrupts, 915 * nor do we want to disable interrupts,
916 * so if we miss here, then better luck next time. 916 * so if we miss here, then better luck next time.
917 */ 917 */
918 if (!__raw_spin_trylock(&trace_cmdline_lock)) 918 if (!arch_spin_trylock(&trace_cmdline_lock))
919 return; 919 return;
920 920
921 idx = map_pid_to_cmdline[tsk->pid]; 921 idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
940 940
941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
942 942
943 __raw_spin_unlock(&trace_cmdline_lock); 943 arch_spin_unlock(&trace_cmdline_lock);
944} 944}
945 945
946void trace_find_cmdline(int pid, char comm[]) 946void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[])
958 } 958 }
959 959
960 preempt_disable(); 960 preempt_disable();
961 __raw_spin_lock(&trace_cmdline_lock); 961 arch_spin_lock(&trace_cmdline_lock);
962 map = map_pid_to_cmdline[pid]; 962 map = map_pid_to_cmdline[pid];
963 if (map != NO_CMDLINE_MAP) 963 if (map != NO_CMDLINE_MAP)
964 strcpy(comm, saved_cmdlines[map]); 964 strcpy(comm, saved_cmdlines[map]);
965 else 965 else
966 strcpy(comm, "<...>"); 966 strcpy(comm, "<...>");
967 967
968 __raw_spin_unlock(&trace_cmdline_lock); 968 arch_spin_unlock(&trace_cmdline_lock);
969 preempt_enable(); 969 preempt_enable();
970} 970}
971 971
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
1085 struct ftrace_entry *entry; 1085 struct ftrace_entry *entry;
1086 1086
1087 /* If we are reading the ring buffer, don't trace */ 1087 /* If we are reading the ring buffer, don't trace */
1088 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1088 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
1089 return; 1089 return;
1090 1090
1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1251 */ 1251 */
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1253{ 1253{
1254 static raw_spinlock_t trace_buf_lock = 1254 static arch_spinlock_t trace_buf_lock =
1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1255 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1256 static u32 trace_buf[TRACE_BUF_SIZE];
1257 1257
1258 struct ftrace_event_call *call = &event_bprint; 1258 struct ftrace_event_call *call = &event_bprint;
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1283 1283
1284 /* Lockdep uses trace_printk for lock tracing */ 1284 /* Lockdep uses trace_printk for lock tracing */
1285 local_irq_save(flags); 1285 local_irq_save(flags);
1286 __raw_spin_lock(&trace_buf_lock); 1286 arch_spin_lock(&trace_buf_lock);
1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1288 1288
1289 if (len > TRACE_BUF_SIZE || len < 0) 1289 if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1304 ring_buffer_unlock_commit(buffer, event); 1304 ring_buffer_unlock_commit(buffer, event);
1305 1305
1306out_unlock: 1306out_unlock:
1307 __raw_spin_unlock(&trace_buf_lock); 1307 arch_spin_unlock(&trace_buf_lock);
1308 local_irq_restore(flags); 1308 local_irq_restore(flags);
1309 1309
1310out: 1310out:
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
1334int trace_array_vprintk(struct trace_array *tr, 1334int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args) 1335 unsigned long ip, const char *fmt, va_list args)
1336{ 1336{
1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1337 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE]; 1338 static char trace_buf[TRACE_BUF_SIZE];
1339 1339
1340 struct ftrace_event_call *call = &event_print; 1340 struct ftrace_event_call *call = &event_print;
@@ -1360,12 +1360,9 @@ int trace_array_vprintk(struct trace_array *tr,
1360 1360
1361 pause_graph_tracing(); 1361 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1363 arch_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1365 1365
1366 len = min(len, TRACE_BUF_SIZE-1);
1367 trace_buf[len] = 0;
1368
1369 size = sizeof(*entry) + len + 1; 1366 size = sizeof(*entry) + len + 1;
1370 buffer = tr->buffer; 1367 buffer = tr->buffer;
1371 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 1368 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
@@ -1373,15 +1370,15 @@ int trace_array_vprintk(struct trace_array *tr,
1373 if (!event) 1370 if (!event)
1374 goto out_unlock; 1371 goto out_unlock;
1375 entry = ring_buffer_event_data(event); 1372 entry = ring_buffer_event_data(event);
1376 entry->ip = ip; 1373 entry->ip = ip;
1377 1374
1378 memcpy(&entry->buf, trace_buf, len); 1375 memcpy(&entry->buf, trace_buf, len);
1379 entry->buf[len] = 0; 1376 entry->buf[len] = '\0';
1380 if (!filter_check_discard(call, entry, buffer, event)) 1377 if (!filter_check_discard(call, entry, buffer, event))
1381 ring_buffer_unlock_commit(buffer, event); 1378 ring_buffer_unlock_commit(buffer, event);
1382 1379
1383 out_unlock: 1380 out_unlock:
1384 __raw_spin_unlock(&trace_buf_lock); 1381 arch_spin_unlock(&trace_buf_lock);
1385 raw_local_irq_restore(irq_flags); 1382 raw_local_irq_restore(irq_flags);
1386 unpause_graph_tracing(); 1383 unpause_graph_tracing();
1387 out: 1384 out:
@@ -1515,6 +1512,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1515 int i = (int)*pos; 1512 int i = (int)*pos;
1516 void *ent; 1513 void *ent;
1517 1514
1515 WARN_ON_ONCE(iter->leftover);
1516
1518 (*pos)++; 1517 (*pos)++;
1519 1518
1520 /* can't go backwards */ 1519 /* can't go backwards */
@@ -1613,8 +1612,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1613 ; 1612 ;
1614 1613
1615 } else { 1614 } else {
1616 l = *pos - 1; 1615 /*
1617 p = s_next(m, p, &l); 1616 * If we overflowed the seq_file before, then we want
1617 * to just reuse the trace_seq buffer again.
1618 */
1619 if (iter->leftover)
1620 p = iter;
1621 else {
1622 l = *pos - 1;
1623 p = s_next(m, p, &l);
1624 }
1618 } 1625 }
1619 1626
1620 trace_event_read_lock(); 1627 trace_event_read_lock();
@@ -1922,6 +1929,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1922static int s_show(struct seq_file *m, void *v) 1929static int s_show(struct seq_file *m, void *v)
1923{ 1930{
1924 struct trace_iterator *iter = v; 1931 struct trace_iterator *iter = v;
1932 int ret;
1925 1933
1926 if (iter->ent == NULL) { 1934 if (iter->ent == NULL) {
1927 if (iter->tr) { 1935 if (iter->tr) {
@@ -1941,9 +1949,27 @@ static int s_show(struct seq_file *m, void *v)
1941 if (!(trace_flags & TRACE_ITER_VERBOSE)) 1949 if (!(trace_flags & TRACE_ITER_VERBOSE))
1942 print_func_help_header(m); 1950 print_func_help_header(m);
1943 } 1951 }
1952 } else if (iter->leftover) {
1953 /*
1954 * If we filled the seq_file buffer earlier, we
1955 * want to just show it now.
1956 */
1957 ret = trace_print_seq(m, &iter->seq);
1958
1959 /* ret should this time be zero, but you never know */
1960 iter->leftover = ret;
1961
1944 } else { 1962 } else {
1945 print_trace_line(iter); 1963 print_trace_line(iter);
1946 trace_print_seq(m, &iter->seq); 1964 ret = trace_print_seq(m, &iter->seq);
1965 /*
1966 * If we overflow the seq_file buffer, then it will
1967 * ask us for this data again at start up.
1968 * Use that instead.
1969 * ret is 0 if seq_file write succeeded.
1970 * -1 otherwise.
1971 */
1972 iter->leftover = ret;
1947 } 1973 }
1948 1974
1949 return 0; 1975 return 0;
@@ -2253,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2253 mutex_lock(&tracing_cpumask_update_lock); 2279 mutex_lock(&tracing_cpumask_update_lock);
2254 2280
2255 local_irq_disable(); 2281 local_irq_disable();
2256 __raw_spin_lock(&ftrace_max_lock); 2282 arch_spin_lock(&ftrace_max_lock);
2257 for_each_tracing_cpu(cpu) { 2283 for_each_tracing_cpu(cpu) {
2258 /* 2284 /*
2259 * Increase/decrease the disabled counter if we are 2285 * Increase/decrease the disabled counter if we are
@@ -2268,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2268 atomic_dec(&global_trace.data[cpu]->disabled); 2294 atomic_dec(&global_trace.data[cpu]->disabled);
2269 } 2295 }
2270 } 2296 }
2271 __raw_spin_unlock(&ftrace_max_lock); 2297 arch_spin_unlock(&ftrace_max_lock);
2272 local_irq_enable(); 2298 local_irq_enable();
2273 2299
2274 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -2897,6 +2923,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
2897 else 2923 else
2898 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); 2924 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2899 2925
2926
2927 if (iter->trace->pipe_close)
2928 iter->trace->pipe_close(iter);
2929
2900 mutex_unlock(&trace_types_lock); 2930 mutex_unlock(&trace_types_lock);
2901 2931
2902 free_cpumask_var(iter->started); 2932 free_cpumask_var(iter->started);
@@ -3334,7 +3364,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3334 size_t cnt, loff_t *fpos) 3364 size_t cnt, loff_t *fpos)
3335{ 3365{
3336 char *buf; 3366 char *buf;
3337 char *end;
3338 3367
3339 if (tracing_disabled) 3368 if (tracing_disabled)
3340 return -EINVAL; 3369 return -EINVAL;
@@ -3342,7 +3371,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3342 if (cnt > TRACE_BUF_SIZE) 3371 if (cnt > TRACE_BUF_SIZE)
3343 cnt = TRACE_BUF_SIZE; 3372 cnt = TRACE_BUF_SIZE;
3344 3373
3345 buf = kmalloc(cnt + 1, GFP_KERNEL); 3374 buf = kmalloc(cnt + 2, GFP_KERNEL);
3346 if (buf == NULL) 3375 if (buf == NULL)
3347 return -ENOMEM; 3376 return -ENOMEM;
3348 3377
@@ -3350,14 +3379,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3350 kfree(buf); 3379 kfree(buf);
3351 return -EFAULT; 3380 return -EFAULT;
3352 } 3381 }
3382 if (buf[cnt-1] != '\n') {
3383 buf[cnt] = '\n';
3384 buf[cnt+1] = '\0';
3385 } else
3386 buf[cnt] = '\0';
3353 3387
3354 /* Cut from the first nil or newline. */ 3388 cnt = mark_printk("%s", buf);
3355 buf[cnt] = '\0';
3356 end = strchr(buf, '\n');
3357 if (end)
3358 *end = '\0';
3359
3360 cnt = mark_printk("%s\n", buf);
3361 kfree(buf); 3389 kfree(buf);
3362 *fpos += cnt; 3390 *fpos += cnt;
3363 3391
@@ -3730,7 +3758,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3730 3758
3731 s = kmalloc(sizeof(*s), GFP_KERNEL); 3759 s = kmalloc(sizeof(*s), GFP_KERNEL);
3732 if (!s) 3760 if (!s)
3733 return ENOMEM; 3761 return -ENOMEM;
3734 3762
3735 trace_seq_init(s); 3763 trace_seq_init(s);
3736 3764
@@ -4279,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
4279 4307
4280static void __ftrace_dump(bool disable_tracing) 4308static void __ftrace_dump(bool disable_tracing)
4281{ 4309{
4282 static raw_spinlock_t ftrace_dump_lock = 4310 static arch_spinlock_t ftrace_dump_lock =
4283 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4311 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4284 /* use static because iter can be a bit big for the stack */ 4312 /* use static because iter can be a bit big for the stack */
4285 static struct trace_iterator iter; 4313 static struct trace_iterator iter;
4286 unsigned int old_userobj; 4314 unsigned int old_userobj;
@@ -4290,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing)
4290 4318
4291 /* only one dump */ 4319 /* only one dump */
4292 local_irq_save(flags); 4320 local_irq_save(flags);
4293 __raw_spin_lock(&ftrace_dump_lock); 4321 arch_spin_lock(&ftrace_dump_lock);
4294 if (dump_ran) 4322 if (dump_ran)
4295 goto out; 4323 goto out;
4296 4324
@@ -4365,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing)
4365 } 4393 }
4366 4394
4367 out: 4395 out:
4368 __raw_spin_unlock(&ftrace_dump_lock); 4396 arch_spin_unlock(&ftrace_dump_lock);
4369 local_irq_restore(flags); 4397 local_irq_restore(flags);
4370} 4398}
4371 4399
@@ -4426,7 +4454,7 @@ __init static int tracer_alloc_buffers(void)
4426 /* Allocate the first page for all buffers */ 4454 /* Allocate the first page for all buffers */
4427 for_each_tracing_cpu(i) { 4455 for_each_tracing_cpu(i) {
4428 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4456 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4429 max_tr.data[i] = &per_cpu(max_data, i); 4457 max_tr.data[i] = &per_cpu(max_tr_data, i);
4430 } 4458 }
4431 4459
4432 trace_init_cmdlines(); 4460 trace_init_cmdlines();