aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c250
1 files changed, 170 insertions, 80 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5c75deeefe30..874f2893cff0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -125,19 +125,19 @@ int ftrace_dump_on_oops;
125 125
126static int tracing_set_tracer(const char *buf); 126static int tracing_set_tracer(const char *buf);
127 127
128#define BOOTUP_TRACER_SIZE 100 128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_cmdline_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
136 /* We are using ftrace early, expand it */ 136 /* We are using ftrace early, expand it */
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
138 return 1; 138 return 1;
139} 139}
140__setup("ftrace=", set_ftrace); 140__setup("ftrace=", set_cmdline_ftrace);
141 141
142static int __init set_ftrace_dump_on_oops(char *str) 142static int __init set_ftrace_dump_on_oops(char *str)
143{ 143{
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly;
242static struct tracer *current_trace __read_mostly; 242static struct tracer *current_trace __read_mostly;
243 243
244/* 244/*
245 * max_tracer_type_len is used to simplify the allocating of
246 * buffers to read userspace tracer names. We keep track of
247 * the longest tracer name registered.
248 */
249static int max_tracer_type_len;
250
251/*
252 * trace_types_lock is used to protect the trace_types list. 245 * trace_types_lock is used to protect the trace_types list.
253 * This lock is also used to keep user access serialized. 246 * This lock is also used to keep user access serialized.
254 * Accesses from userspace will grab this lock while userspace 247 * Accesses from userspace will grab this lock while userspace
@@ -275,12 +268,18 @@ static DEFINE_SPINLOCK(tracing_start_lock);
275 */ 268 */
276void trace_wake_up(void) 269void trace_wake_up(void)
277{ 270{
271 int cpu;
272
273 if (trace_flags & TRACE_ITER_BLOCK)
274 return;
278 /* 275 /*
279 * The runqueue_is_locked() can fail, but this is the best we 276 * The runqueue_is_locked() can fail, but this is the best we
280 * have for now: 277 * have for now:
281 */ 278 */
282 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) 279 cpu = get_cpu();
280 if (!runqueue_is_locked(cpu))
283 wake_up(&trace_wait); 281 wake_up(&trace_wait);
282 put_cpu();
284} 283}
285 284
286static int __init set_buf_size(char *str) 285static int __init set_buf_size(char *str)
@@ -339,6 +338,112 @@ static struct {
339 338
340int trace_clock_id; 339int trace_clock_id;
341 340
341/*
342 * trace_parser_get_init - gets the buffer for trace parser
343 */
344int trace_parser_get_init(struct trace_parser *parser, int size)
345{
346 memset(parser, 0, sizeof(*parser));
347
348 parser->buffer = kmalloc(size, GFP_KERNEL);
349 if (!parser->buffer)
350 return 1;
351
352 parser->size = size;
353 return 0;
354}
355
356/*
357 * trace_parser_put - frees the buffer for trace parser
358 */
359void trace_parser_put(struct trace_parser *parser)
360{
361 kfree(parser->buffer);
362}
363
364/*
365 * trace_get_user - reads the user input string separated by space
366 * (matched by isspace(ch))
367 *
368 * For each string found the 'struct trace_parser' is updated,
369 * and the function returns.
370 *
371 * Returns number of bytes read.
372 *
373 * See kernel/trace/trace.h for 'struct trace_parser' details.
374 */
375int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
376 size_t cnt, loff_t *ppos)
377{
378 char ch;
379 size_t read = 0;
380 ssize_t ret;
381
382 if (!*ppos)
383 trace_parser_clear(parser);
384
385 ret = get_user(ch, ubuf++);
386 if (ret)
387 goto out;
388
389 read++;
390 cnt--;
391
392 /*
393 * The parser is not finished with the last write,
394 * continue reading the user input without skipping spaces.
395 */
396 if (!parser->cont) {
397 /* skip white space */
398 while (cnt && isspace(ch)) {
399 ret = get_user(ch, ubuf++);
400 if (ret)
401 goto out;
402 read++;
403 cnt--;
404 }
405
406 /* only spaces were written */
407 if (isspace(ch)) {
408 *ppos += read;
409 ret = read;
410 goto out;
411 }
412
413 parser->idx = 0;
414 }
415
416 /* read the non-space input */
417 while (cnt && !isspace(ch)) {
418 if (parser->idx < parser->size - 1)
419 parser->buffer[parser->idx++] = ch;
420 else {
421 ret = -EINVAL;
422 goto out;
423 }
424 ret = get_user(ch, ubuf++);
425 if (ret)
426 goto out;
427 read++;
428 cnt--;
429 }
430
431 /* We either got finished input or we have to wait for another call. */
432 if (isspace(ch)) {
433 parser->buffer[parser->idx] = 0;
434 parser->cont = false;
435 } else {
436 parser->cont = true;
437 parser->buffer[parser->idx++] = ch;
438 }
439
440 *ppos += read;
441 ret = read;
442
443out:
444 return ret;
445}
446
342ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 447ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
343{ 448{
344 int len; 449 int len;
@@ -513,7 +618,6 @@ __releases(kernel_lock)
513__acquires(kernel_lock) 618__acquires(kernel_lock)
514{ 619{
515 struct tracer *t; 620 struct tracer *t;
516 int len;
517 int ret = 0; 621 int ret = 0;
518 622
519 if (!type->name) { 623 if (!type->name) {
@@ -521,6 +625,11 @@ __acquires(kernel_lock)
521 return -1; 625 return -1;
522 } 626 }
523 627
628 if (strlen(type->name) > MAX_TRACER_SIZE) {
629 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
630 return -1;
631 }
632
524 /* 633 /*
525 * When this gets called we hold the BKL which means that 634 * When this gets called we hold the BKL which means that
526 * preemption is disabled. Various trace selftests however 635 * preemption is disabled. Various trace selftests however
@@ -535,7 +644,7 @@ __acquires(kernel_lock)
535 for (t = trace_types; t; t = t->next) { 644 for (t = trace_types; t; t = t->next) {
536 if (strcmp(type->name, t->name) == 0) { 645 if (strcmp(type->name, t->name) == 0) {
537 /* already found */ 646 /* already found */
538 pr_info("Trace %s already registered\n", 647 pr_info("Tracer %s already registered\n",
539 type->name); 648 type->name);
540 ret = -1; 649 ret = -1;
541 goto out; 650 goto out;
@@ -586,9 +695,6 @@ __acquires(kernel_lock)
586 695
587 type->next = trace_types; 696 type->next = trace_types;
588 trace_types = type; 697 trace_types = type;
589 len = strlen(type->name);
590 if (len > max_tracer_type_len)
591 max_tracer_type_len = len;
592 698
593 out: 699 out:
594 tracing_selftest_running = false; 700 tracing_selftest_running = false;
@@ -597,7 +703,7 @@ __acquires(kernel_lock)
597 if (ret || !default_bootup_tracer) 703 if (ret || !default_bootup_tracer)
598 goto out_unlock; 704 goto out_unlock;
599 705
600 if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) 706 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
601 goto out_unlock; 707 goto out_unlock;
602 708
603 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 709 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
@@ -619,14 +725,13 @@ __acquires(kernel_lock)
619void unregister_tracer(struct tracer *type) 725void unregister_tracer(struct tracer *type)
620{ 726{
621 struct tracer **t; 727 struct tracer **t;
622 int len;
623 728
624 mutex_lock(&trace_types_lock); 729 mutex_lock(&trace_types_lock);
625 for (t = &trace_types; *t; t = &(*t)->next) { 730 for (t = &trace_types; *t; t = &(*t)->next) {
626 if (*t == type) 731 if (*t == type)
627 goto found; 732 goto found;
628 } 733 }
629 pr_info("Trace %s not registered\n", type->name); 734 pr_info("Tracer %s not registered\n", type->name);
630 goto out; 735 goto out;
631 736
632 found: 737 found:
@@ -639,17 +744,7 @@ void unregister_tracer(struct tracer *type)
639 current_trace->stop(&global_trace); 744 current_trace->stop(&global_trace);
640 current_trace = &nop_trace; 745 current_trace = &nop_trace;
641 } 746 }
642 747out:
643 if (strlen(type->name) != max_tracer_type_len)
644 goto out;
645
646 max_tracer_type_len = 0;
647 for (t = &trace_types; *t; t = &(*t)->next) {
648 len = strlen((*t)->name);
649 if (len > max_tracer_type_len)
650 max_tracer_type_len = len;
651 }
652 out:
653 mutex_unlock(&trace_types_lock); 748 mutex_unlock(&trace_types_lock);
654} 749}
655 750
@@ -719,6 +814,11 @@ static void trace_init_cmdlines(void)
719 cmdline_idx = 0; 814 cmdline_idx = 0;
720} 815}
721 816
817int is_tracing_stopped(void)
818{
819 return trace_stop_count;
820}
821
722/** 822/**
723 * ftrace_off_permanent - disable all ftrace code permanently 823 * ftrace_off_permanent - disable all ftrace code permanently
724 * 824 *
@@ -886,7 +986,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
886 986
887 entry->preempt_count = pc & 0xff; 987 entry->preempt_count = pc & 0xff;
888 entry->pid = (tsk) ? tsk->pid : 0; 988 entry->pid = (tsk) ? tsk->pid : 0;
889 entry->tgid = (tsk) ? tsk->tgid : 0; 989 entry->lock_depth = (tsk) ? tsk->lock_depth : 0;
890 entry->flags = 990 entry->flags =
891#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 991#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
892 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 992 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -1068,6 +1168,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1068 return; 1168 return;
1069 entry = ring_buffer_event_data(event); 1169 entry = ring_buffer_event_data(event);
1070 1170
1171 entry->tgid = current->tgid;
1071 memset(&entry->caller, 0, sizeof(entry->caller)); 1172 memset(&entry->caller, 0, sizeof(entry->caller));
1072 1173
1073 trace.nr_entries = 0; 1174 trace.nr_entries = 0;
@@ -1094,6 +1195,7 @@ ftrace_trace_special(void *__tr,
1094 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1195 unsigned long arg1, unsigned long arg2, unsigned long arg3,
1095 int pc) 1196 int pc)
1096{ 1197{
1198 struct ftrace_event_call *call = &event_special;
1097 struct ring_buffer_event *event; 1199 struct ring_buffer_event *event;
1098 struct trace_array *tr = __tr; 1200 struct trace_array *tr = __tr;
1099 struct ring_buffer *buffer = tr->buffer; 1201 struct ring_buffer *buffer = tr->buffer;
@@ -1107,7 +1209,9 @@ ftrace_trace_special(void *__tr,
1107 entry->arg1 = arg1; 1209 entry->arg1 = arg1;
1108 entry->arg2 = arg2; 1210 entry->arg2 = arg2;
1109 entry->arg3 = arg3; 1211 entry->arg3 = arg3;
1110 trace_buffer_unlock_commit(buffer, event, 0, pc); 1212
1213 if (!filter_check_discard(call, entry, buffer, event))
1214 trace_buffer_unlock_commit(buffer, event, 0, pc);
1111} 1215}
1112 1216
1113void 1217void
@@ -1257,10 +1361,11 @@ int trace_array_vprintk(struct trace_array *tr,
1257 pause_graph_tracing(); 1361 pause_graph_tracing();
1258 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1259 __raw_spin_lock(&trace_buf_lock); 1363 __raw_spin_lock(&trace_buf_lock);
1260 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 if (args == NULL) {
1261 1365 strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
1262 len = min(len, TRACE_BUF_SIZE-1); 1366 len = strlen(trace_buf);
1263 trace_buf[len] = 0; 1367 } else
1368 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1264 1369
1265 size = sizeof(*entry) + len + 1; 1370 size = sizeof(*entry) + len + 1;
1266 buffer = tr->buffer; 1371 buffer = tr->buffer;
@@ -1269,10 +1374,10 @@ int trace_array_vprintk(struct trace_array *tr,
1269 if (!event) 1374 if (!event)
1270 goto out_unlock; 1375 goto out_unlock;
1271 entry = ring_buffer_event_data(event); 1376 entry = ring_buffer_event_data(event);
1272 entry->ip = ip; 1377 entry->ip = ip;
1273 1378
1274 memcpy(&entry->buf, trace_buf, len); 1379 memcpy(&entry->buf, trace_buf, len);
1275 entry->buf[len] = 0; 1380 entry->buf[len] = '\0';
1276 if (!filter_check_discard(call, entry, buffer, event)) 1381 if (!filter_check_discard(call, entry, buffer, event))
1277 ring_buffer_unlock_commit(buffer, event); 1382 ring_buffer_unlock_commit(buffer, event);
1278 1383
@@ -1289,7 +1394,7 @@ int trace_array_vprintk(struct trace_array *tr,
1289 1394
1290int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1395int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1291{ 1396{
1292 return trace_array_printk(&global_trace, ip, fmt, args); 1397 return trace_array_vprintk(&global_trace, ip, fmt, args);
1293} 1398}
1294EXPORT_SYMBOL_GPL(trace_vprintk); 1399EXPORT_SYMBOL_GPL(trace_vprintk);
1295 1400
@@ -1530,10 +1635,10 @@ static void print_lat_help_header(struct seq_file *m)
1530 seq_puts(m, "# | / _----=> need-resched \n"); 1635 seq_puts(m, "# | / _----=> need-resched \n");
1531 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1636 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1532 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1637 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1533 seq_puts(m, "# |||| / \n"); 1638 seq_puts(m, "# |||| /_--=> lock-depth \n");
1534 seq_puts(m, "# ||||| delay \n"); 1639 seq_puts(m, "# |||||/ delay \n");
1535 seq_puts(m, "# cmd pid ||||| time | caller \n"); 1640 seq_puts(m, "# cmd pid |||||| time | caller \n");
1536 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1641 seq_puts(m, "# \\ / |||||| \\ | / \n");
1537} 1642}
1538 1643
1539static void print_func_help_header(struct seq_file *m) 1644static void print_func_help_header(struct seq_file *m)
@@ -1845,7 +1950,7 @@ static int s_show(struct seq_file *m, void *v)
1845 return 0; 1950 return 0;
1846} 1951}
1847 1952
1848static struct seq_operations tracer_seq_ops = { 1953static const struct seq_operations tracer_seq_ops = {
1849 .start = s_start, 1954 .start = s_start,
1850 .next = s_next, 1955 .next = s_next,
1851 .stop = s_stop, 1956 .stop = s_stop,
@@ -1880,11 +1985,9 @@ __tracing_open(struct inode *inode, struct file *file)
1880 if (current_trace) 1985 if (current_trace)
1881 *iter->trace = *current_trace; 1986 *iter->trace = *current_trace;
1882 1987
1883 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) 1988 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
1884 goto fail; 1989 goto fail;
1885 1990
1886 cpumask_clear(iter->started);
1887
1888 if (current_trace && current_trace->print_max) 1991 if (current_trace && current_trace->print_max)
1889 iter->tr = &max_tr; 1992 iter->tr = &max_tr;
1890 else 1993 else
@@ -2059,7 +2162,7 @@ static int t_show(struct seq_file *m, void *v)
2059 return 0; 2162 return 0;
2060} 2163}
2061 2164
2062static struct seq_operations show_traces_seq_ops = { 2165static const struct seq_operations show_traces_seq_ops = {
2063 .start = t_start, 2166 .start = t_start,
2064 .next = t_next, 2167 .next = t_next,
2065 .stop = t_stop, 2168 .stop = t_stop,
@@ -2338,7 +2441,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2338 return ret; 2441 return ret;
2339 } 2442 }
2340 2443
2341 filp->f_pos += cnt; 2444 *ppos += cnt;
2342 2445
2343 return cnt; 2446 return cnt;
2344} 2447}
@@ -2480,7 +2583,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2480 } 2583 }
2481 mutex_unlock(&trace_types_lock); 2584 mutex_unlock(&trace_types_lock);
2482 2585
2483 filp->f_pos += cnt; 2586 *ppos += cnt;
2484 2587
2485 return cnt; 2588 return cnt;
2486} 2589}
@@ -2489,7 +2592,7 @@ static ssize_t
2489tracing_set_trace_read(struct file *filp, char __user *ubuf, 2592tracing_set_trace_read(struct file *filp, char __user *ubuf,
2490 size_t cnt, loff_t *ppos) 2593 size_t cnt, loff_t *ppos)
2491{ 2594{
2492 char buf[max_tracer_type_len+2]; 2595 char buf[MAX_TRACER_SIZE+2];
2493 int r; 2596 int r;
2494 2597
2495 mutex_lock(&trace_types_lock); 2598 mutex_lock(&trace_types_lock);
@@ -2639,15 +2742,15 @@ static ssize_t
2639tracing_set_trace_write(struct file *filp, const char __user *ubuf, 2742tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2640 size_t cnt, loff_t *ppos) 2743 size_t cnt, loff_t *ppos)
2641{ 2744{
2642 char buf[max_tracer_type_len+1]; 2745 char buf[MAX_TRACER_SIZE+1];
2643 int i; 2746 int i;
2644 size_t ret; 2747 size_t ret;
2645 int err; 2748 int err;
2646 2749
2647 ret = cnt; 2750 ret = cnt;
2648 2751
2649 if (cnt > max_tracer_type_len) 2752 if (cnt > MAX_TRACER_SIZE)
2650 cnt = max_tracer_type_len; 2753 cnt = MAX_TRACER_SIZE;
2651 2754
2652 if (copy_from_user(&buf, ubuf, cnt)) 2755 if (copy_from_user(&buf, ubuf, cnt))
2653 return -EFAULT; 2756 return -EFAULT;
@@ -2662,7 +2765,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2662 if (err) 2765 if (err)
2663 return err; 2766 return err;
2664 2767
2665 filp->f_pos += ret; 2768 *ppos += ret;
2666 2769
2667 return ret; 2770 return ret;
2668} 2771}
@@ -3197,7 +3300,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3197 } 3300 }
3198 } 3301 }
3199 3302
3200 filp->f_pos += cnt; 3303 *ppos += cnt;
3201 3304
3202 /* If check pages failed, return ENOMEM */ 3305 /* If check pages failed, return ENOMEM */
3203 if (tracing_disabled) 3306 if (tracing_disabled)
@@ -3217,22 +3320,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3217 return cnt; 3320 return cnt;
3218} 3321}
3219 3322
3220static int mark_printk(const char *fmt, ...)
3221{
3222 int ret;
3223 va_list args;
3224 va_start(args, fmt);
3225 ret = trace_vprintk(0, fmt, args);
3226 va_end(args);
3227 return ret;
3228}
3229
3230static ssize_t 3323static ssize_t
3231tracing_mark_write(struct file *filp, const char __user *ubuf, 3324tracing_mark_write(struct file *filp, const char __user *ubuf,
3232 size_t cnt, loff_t *fpos) 3325 size_t cnt, loff_t *fpos)
3233{ 3326{
3234 char *buf; 3327 char *buf;
3235 char *end;
3236 3328
3237 if (tracing_disabled) 3329 if (tracing_disabled)
3238 return -EINVAL; 3330 return -EINVAL;
@@ -3240,7 +3332,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3240 if (cnt > TRACE_BUF_SIZE) 3332 if (cnt > TRACE_BUF_SIZE)
3241 cnt = TRACE_BUF_SIZE; 3333 cnt = TRACE_BUF_SIZE;
3242 3334
3243 buf = kmalloc(cnt + 1, GFP_KERNEL); 3335 buf = kmalloc(cnt + 2, GFP_KERNEL);
3244 if (buf == NULL) 3336 if (buf == NULL)
3245 return -ENOMEM; 3337 return -ENOMEM;
3246 3338
@@ -3248,14 +3340,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3248 kfree(buf); 3340 kfree(buf);
3249 return -EFAULT; 3341 return -EFAULT;
3250 } 3342 }
3343 if (buf[cnt-1] != '\n') {
3344 buf[cnt] = '\n';
3345 buf[cnt+1] = '\0';
3346 } else
3347 buf[cnt] = '\0';
3251 3348
3252 /* Cut from the first nil or newline. */ 3349 cnt = trace_vprintk(0, buf, NULL);
3253 buf[cnt] = '\0';
3254 end = strchr(buf, '\n');
3255 if (end)
3256 *end = '\0';
3257
3258 cnt = mark_printk("%s\n", buf);
3259 kfree(buf); 3350 kfree(buf);
3260 *fpos += cnt; 3351 *fpos += cnt;
3261 3352
@@ -3628,7 +3719,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3628 3719
3629 s = kmalloc(sizeof(*s), GFP_KERNEL); 3720 s = kmalloc(sizeof(*s), GFP_KERNEL);
3630 if (!s) 3721 if (!s)
3631 return ENOMEM; 3722 return -ENOMEM;
3632 3723
3633 trace_seq_init(s); 3724 trace_seq_init(s);
3634 3725
@@ -4285,7 +4376,7 @@ __init static int tracer_alloc_buffers(void)
4285 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4376 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4286 goto out_free_buffer_mask; 4377 goto out_free_buffer_mask;
4287 4378
4288 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 4379 if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4289 goto out_free_tracing_cpumask; 4380 goto out_free_tracing_cpumask;
4290 4381
4291 /* To save memory, keep the ring buffer size to its minimum */ 4382 /* To save memory, keep the ring buffer size to its minimum */
@@ -4296,7 +4387,6 @@ __init static int tracer_alloc_buffers(void)
4296 4387
4297 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4388 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4298 cpumask_copy(tracing_cpumask, cpu_all_mask); 4389 cpumask_copy(tracing_cpumask, cpu_all_mask);
4299 cpumask_clear(tracing_reader_cpumask);
4300 4390
4301 /* TODO: make the number of buffers hot pluggable with CPUS */ 4391 /* TODO: make the number of buffers hot pluggable with CPUS */
4302 global_trace.buffer = ring_buffer_alloc(ring_buf_size, 4392 global_trace.buffer = ring_buffer_alloc(ring_buf_size,