aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c188
1 files changed, 146 insertions, 42 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5c75deeefe30..6c0f6a8a22eb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -125,13 +125,13 @@ int ftrace_dump_on_oops;
125 125
126static int tracing_set_tracer(const char *buf); 126static int tracing_set_tracer(const char *buf);
127 127
128#define BOOTUP_TRACER_SIZE 100 128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
136 /* We are using ftrace early, expand it */ 136 /* We are using ftrace early, expand it */
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly;
242static struct tracer *current_trace __read_mostly; 242static struct tracer *current_trace __read_mostly;
243 243
244/* 244/*
245 * max_tracer_type_len is used to simplify the allocating of
246 * buffers to read userspace tracer names. We keep track of
247 * the longest tracer name registered.
248 */
249static int max_tracer_type_len;
250
251/*
252 * trace_types_lock is used to protect the trace_types list. 245 * trace_types_lock is used to protect the trace_types list.
253 * This lock is also used to keep user access serialized. 246 * This lock is also used to keep user access serialized.
254 * Accesses from userspace will grab this lock while userspace 247 * Accesses from userspace will grab this lock while userspace
@@ -275,12 +268,18 @@ static DEFINE_SPINLOCK(tracing_start_lock);
275 */ 268 */
276void trace_wake_up(void) 269void trace_wake_up(void)
277{ 270{
271 int cpu;
272
273 if (trace_flags & TRACE_ITER_BLOCK)
274 return;
278 /* 275 /*
279 * The runqueue_is_locked() can fail, but this is the best we 276 * The runqueue_is_locked() can fail, but this is the best we
280 * have for now: 277 * have for now:
281 */ 278 */
282 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) 279 cpu = get_cpu();
280 if (!runqueue_is_locked(cpu))
283 wake_up(&trace_wait); 281 wake_up(&trace_wait);
282 put_cpu();
284} 283}
285 284
286static int __init set_buf_size(char *str) 285static int __init set_buf_size(char *str)
@@ -339,6 +338,112 @@ static struct {
339 338
340int trace_clock_id; 339int trace_clock_id;
341 340
341/*
342 * trace_parser_get_init - gets the buffer for trace parser
343 */
344int trace_parser_get_init(struct trace_parser *parser, int size)
345{
346 memset(parser, 0, sizeof(*parser));
347
348 parser->buffer = kmalloc(size, GFP_KERNEL);
349 if (!parser->buffer)
350 return 1;
351
352 parser->size = size;
353 return 0;
354}
355
356/*
357 * trace_parser_put - frees the buffer for trace parser
358 */
359void trace_parser_put(struct trace_parser *parser)
360{
361 kfree(parser->buffer);
362}
363
364/*
365 * trace_get_user - reads the user input string separated by space
366 * (matched by isspace(ch))
367 *
368 * For each string found the 'struct trace_parser' is updated,
369 * and the function returns.
370 *
371 * Returns number of bytes read.
372 *
373 * See kernel/trace/trace.h for 'struct trace_parser' details.
374 */
375int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
376 size_t cnt, loff_t *ppos)
377{
378 char ch;
379 size_t read = 0;
380 ssize_t ret;
381
382 if (!*ppos)
383 trace_parser_clear(parser);
384
385 ret = get_user(ch, ubuf++);
386 if (ret)
387 goto out;
388
389 read++;
390 cnt--;
391
392 /*
393 * The parser is not finished with the last write,
394 * continue reading the user input without skipping spaces.
395 */
396 if (!parser->cont) {
397 /* skip white space */
398 while (cnt && isspace(ch)) {
399 ret = get_user(ch, ubuf++);
400 if (ret)
401 goto out;
402 read++;
403 cnt--;
404 }
405
406 /* only spaces were written */
407 if (isspace(ch)) {
408 *ppos += read;
409 ret = read;
410 goto out;
411 }
412
413 parser->idx = 0;
414 }
415
416 /* read the non-space input */
417 while (cnt && !isspace(ch)) {
418 if (parser->idx < parser->size)
419 parser->buffer[parser->idx++] = ch;
420 else {
421 ret = -EINVAL;
422 goto out;
423 }
424 ret = get_user(ch, ubuf++);
425 if (ret)
426 goto out;
427 read++;
428 cnt--;
429 }
430
431 /* We either got finished input or we have to wait for another call. */
432 if (isspace(ch)) {
433 parser->buffer[parser->idx] = 0;
434 parser->cont = false;
435 } else {
436 parser->cont = true;
437 parser->buffer[parser->idx++] = ch;
438 }
439
440 *ppos += read;
441 ret = read;
442
443out:
444 return ret;
445}
446
342ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 447ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
343{ 448{
344 int len; 449 int len;
@@ -513,7 +618,6 @@ __releases(kernel_lock)
513__acquires(kernel_lock) 618__acquires(kernel_lock)
514{ 619{
515 struct tracer *t; 620 struct tracer *t;
516 int len;
517 int ret = 0; 621 int ret = 0;
518 622
519 if (!type->name) { 623 if (!type->name) {
@@ -521,6 +625,11 @@ __acquires(kernel_lock)
521 return -1; 625 return -1;
522 } 626 }
523 627
628 if (strlen(type->name) > MAX_TRACER_SIZE) {
629 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
630 return -1;
631 }
632
524 /* 633 /*
525 * When this gets called we hold the BKL which means that 634 * When this gets called we hold the BKL which means that
526 * preemption is disabled. Various trace selftests however 635 * preemption is disabled. Various trace selftests however
@@ -535,7 +644,7 @@ __acquires(kernel_lock)
535 for (t = trace_types; t; t = t->next) { 644 for (t = trace_types; t; t = t->next) {
536 if (strcmp(type->name, t->name) == 0) { 645 if (strcmp(type->name, t->name) == 0) {
537 /* already found */ 646 /* already found */
538 pr_info("Trace %s already registered\n", 647 pr_info("Tracer %s already registered\n",
539 type->name); 648 type->name);
540 ret = -1; 649 ret = -1;
541 goto out; 650 goto out;
@@ -586,9 +695,6 @@ __acquires(kernel_lock)
586 695
587 type->next = trace_types; 696 type->next = trace_types;
588 trace_types = type; 697 trace_types = type;
589 len = strlen(type->name);
590 if (len > max_tracer_type_len)
591 max_tracer_type_len = len;
592 698
593 out: 699 out:
594 tracing_selftest_running = false; 700 tracing_selftest_running = false;
@@ -597,7 +703,7 @@ __acquires(kernel_lock)
597 if (ret || !default_bootup_tracer) 703 if (ret || !default_bootup_tracer)
598 goto out_unlock; 704 goto out_unlock;
599 705
600 if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) 706 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
601 goto out_unlock; 707 goto out_unlock;
602 708
603 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 709 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
@@ -619,14 +725,13 @@ __acquires(kernel_lock)
619void unregister_tracer(struct tracer *type) 725void unregister_tracer(struct tracer *type)
620{ 726{
621 struct tracer **t; 727 struct tracer **t;
622 int len;
623 728
624 mutex_lock(&trace_types_lock); 729 mutex_lock(&trace_types_lock);
625 for (t = &trace_types; *t; t = &(*t)->next) { 730 for (t = &trace_types; *t; t = &(*t)->next) {
626 if (*t == type) 731 if (*t == type)
627 goto found; 732 goto found;
628 } 733 }
629 pr_info("Trace %s not registered\n", type->name); 734 pr_info("Tracer %s not registered\n", type->name);
630 goto out; 735 goto out;
631 736
632 found: 737 found:
@@ -639,17 +744,7 @@ void unregister_tracer(struct tracer *type)
639 current_trace->stop(&global_trace); 744 current_trace->stop(&global_trace);
640 current_trace = &nop_trace; 745 current_trace = &nop_trace;
641 } 746 }
642 747out:
643 if (strlen(type->name) != max_tracer_type_len)
644 goto out;
645
646 max_tracer_type_len = 0;
647 for (t = &trace_types; *t; t = &(*t)->next) {
648 len = strlen((*t)->name);
649 if (len > max_tracer_type_len)
650 max_tracer_type_len = len;
651 }
652 out:
653 mutex_unlock(&trace_types_lock); 748 mutex_unlock(&trace_types_lock);
654} 749}
655 750
@@ -719,6 +814,11 @@ static void trace_init_cmdlines(void)
719 cmdline_idx = 0; 814 cmdline_idx = 0;
720} 815}
721 816
817int is_tracing_stopped(void)
818{
819 return trace_stop_count;
820}
821
722/** 822/**
723 * ftrace_off_permanent - disable all ftrace code permanently 823 * ftrace_off_permanent - disable all ftrace code permanently
724 * 824 *
@@ -886,7 +986,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
886 986
887 entry->preempt_count = pc & 0xff; 987 entry->preempt_count = pc & 0xff;
888 entry->pid = (tsk) ? tsk->pid : 0; 988 entry->pid = (tsk) ? tsk->pid : 0;
889 entry->tgid = (tsk) ? tsk->tgid : 0; 989 entry->lock_depth = (tsk) ? tsk->lock_depth : 0;
890 entry->flags = 990 entry->flags =
891#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 991#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
892 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 992 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -1068,6 +1168,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1068 return; 1168 return;
1069 entry = ring_buffer_event_data(event); 1169 entry = ring_buffer_event_data(event);
1070 1170
1171 entry->tgid = current->tgid;
1071 memset(&entry->caller, 0, sizeof(entry->caller)); 1172 memset(&entry->caller, 0, sizeof(entry->caller));
1072 1173
1073 trace.nr_entries = 0; 1174 trace.nr_entries = 0;
@@ -1094,6 +1195,7 @@ ftrace_trace_special(void *__tr,
1094 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1195 unsigned long arg1, unsigned long arg2, unsigned long arg3,
1095 int pc) 1196 int pc)
1096{ 1197{
1198 struct ftrace_event_call *call = &event_special;
1097 struct ring_buffer_event *event; 1199 struct ring_buffer_event *event;
1098 struct trace_array *tr = __tr; 1200 struct trace_array *tr = __tr;
1099 struct ring_buffer *buffer = tr->buffer; 1201 struct ring_buffer *buffer = tr->buffer;
@@ -1107,7 +1209,9 @@ ftrace_trace_special(void *__tr,
1107 entry->arg1 = arg1; 1209 entry->arg1 = arg1;
1108 entry->arg2 = arg2; 1210 entry->arg2 = arg2;
1109 entry->arg3 = arg3; 1211 entry->arg3 = arg3;
1110 trace_buffer_unlock_commit(buffer, event, 0, pc); 1212
1213 if (!filter_check_discard(call, entry, buffer, event))
1214 trace_buffer_unlock_commit(buffer, event, 0, pc);
1111} 1215}
1112 1216
1113void 1217void
@@ -1530,10 +1634,10 @@ static void print_lat_help_header(struct seq_file *m)
1530 seq_puts(m, "# | / _----=> need-resched \n"); 1634 seq_puts(m, "# | / _----=> need-resched \n");
1531 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1635 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1532 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1636 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1533 seq_puts(m, "# |||| / \n"); 1637 seq_puts(m, "# |||| /_--=> lock-depth \n");
1534 seq_puts(m, "# ||||| delay \n"); 1638 seq_puts(m, "# |||||/ delay \n");
1535 seq_puts(m, "# cmd pid ||||| time | caller \n"); 1639 seq_puts(m, "# cmd pid |||||| time | caller \n");
1536 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1640 seq_puts(m, "# \\ / |||||| \\ | / \n");
1537} 1641}
1538 1642
1539static void print_func_help_header(struct seq_file *m) 1643static void print_func_help_header(struct seq_file *m)
@@ -1845,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v)
1845 return 0; 1949 return 0;
1846} 1950}
1847 1951
1848static struct seq_operations tracer_seq_ops = { 1952static const struct seq_operations tracer_seq_ops = {
1849 .start = s_start, 1953 .start = s_start,
1850 .next = s_next, 1954 .next = s_next,
1851 .stop = s_stop, 1955 .stop = s_stop,
@@ -2059,7 +2163,7 @@ static int t_show(struct seq_file *m, void *v)
2059 return 0; 2163 return 0;
2060} 2164}
2061 2165
2062static struct seq_operations show_traces_seq_ops = { 2166static const struct seq_operations show_traces_seq_ops = {
2063 .start = t_start, 2167 .start = t_start,
2064 .next = t_next, 2168 .next = t_next,
2065 .stop = t_stop, 2169 .stop = t_stop,
@@ -2489,7 +2593,7 @@ static ssize_t
2489tracing_set_trace_read(struct file *filp, char __user *ubuf, 2593tracing_set_trace_read(struct file *filp, char __user *ubuf,
2490 size_t cnt, loff_t *ppos) 2594 size_t cnt, loff_t *ppos)
2491{ 2595{
2492 char buf[max_tracer_type_len+2]; 2596 char buf[MAX_TRACER_SIZE+2];
2493 int r; 2597 int r;
2494 2598
2495 mutex_lock(&trace_types_lock); 2599 mutex_lock(&trace_types_lock);
@@ -2639,15 +2743,15 @@ static ssize_t
2639tracing_set_trace_write(struct file *filp, const char __user *ubuf, 2743tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2640 size_t cnt, loff_t *ppos) 2744 size_t cnt, loff_t *ppos)
2641{ 2745{
2642 char buf[max_tracer_type_len+1]; 2746 char buf[MAX_TRACER_SIZE+1];
2643 int i; 2747 int i;
2644 size_t ret; 2748 size_t ret;
2645 int err; 2749 int err;
2646 2750
2647 ret = cnt; 2751 ret = cnt;
2648 2752
2649 if (cnt > max_tracer_type_len) 2753 if (cnt > MAX_TRACER_SIZE)
2650 cnt = max_tracer_type_len; 2754 cnt = MAX_TRACER_SIZE;
2651 2755
2652 if (copy_from_user(&buf, ubuf, cnt)) 2756 if (copy_from_user(&buf, ubuf, cnt))
2653 return -EFAULT; 2757 return -EFAULT;