aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:03:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:03:38 -0400
commit7115e3fcf45514db7525a05365b10454ff7f345e (patch)
tree17450e6337d559cc35dae6a7a73abab01ac63f00 /kernel/trace/trace.c
parent1f6e05171bb5cc32a4d6437ab2269fc21d169ca7 (diff)
parentc752d04066a36ae30b29795f3fa3f536292c1f8c (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (121 commits) perf symbols: Increase symbol KSYM_NAME_LEN size perf hists browser: Refuse 'a' hotkey on non symbolic views perf ui browser: Use libslang to read keys perf tools: Fix tracing info recording perf hists browser: Elide DSO column when it is set to just one DSO, ditto for threads perf hists: Don't consider filtered entries when calculating column widths perf hists: Don't decay total_period for filtered entries perf hists browser: Honour symbol_conf.show_{nr_samples,total_period} perf hists browser: Do not exit on tab key with single event perf annotate browser: Don't change selection line when returning from callq perf tools: handle endianness of feature bitmap perf tools: Add prelink suggestion to dso update message perf script: Fix unknown feature comment perf hists browser: Apply the dso and thread filters when merging new batches perf hists: Move the dso and thread filters from hist_browser perf ui browser: Honour the xterm colors perf top tui: Give color hints just on the percentage, like on --stdio perf ui browser: Make the colors configurable and change the defaults perf tui: Remove unneeded call to newtCls on startup perf hists: Don't format the percentage on hist_entry__snprintf ... Fix up conflicts in arch/x86/kernel/kprobes.c manually. Ingo's tree did the insane "add volatile to const array", which just doesn't make sense ("volatile const"?). But we could remove the const *and* make the array volatile to make doubly sure that gcc doesn't optimize it away.. Also fix up kernel/trace/ring_buffer.c non-data-conflicts manually: the reader_lock has been turned into a raw lock by the core locking merge, and there was a new user of it introduced in this perf core merge. Make sure that new use also uses the raw accessor functions.
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c181
1 files changed, 149 insertions, 32 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0c8bdeeb358b..f2bd275bb60f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -435,6 +435,7 @@ static struct {
435} trace_clocks[] = { 435} trace_clocks[] = {
436 { trace_clock_local, "local" }, 436 { trace_clock_local, "local" },
437 { trace_clock_global, "global" }, 437 { trace_clock_global, "global" },
438 { trace_clock_counter, "counter" },
438}; 439};
439 440
440int trace_clock_id; 441int trace_clock_id;
@@ -2159,6 +2160,14 @@ void trace_default_header(struct seq_file *m)
2159 } 2160 }
2160} 2161}
2161 2162
2163static void test_ftrace_alive(struct seq_file *m)
2164{
2165 if (!ftrace_is_dead())
2166 return;
2167 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2168 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2169}
2170
2162static int s_show(struct seq_file *m, void *v) 2171static int s_show(struct seq_file *m, void *v)
2163{ 2172{
2164 struct trace_iterator *iter = v; 2173 struct trace_iterator *iter = v;
@@ -2168,6 +2177,7 @@ static int s_show(struct seq_file *m, void *v)
2168 if (iter->tr) { 2177 if (iter->tr) {
2169 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2178 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2170 seq_puts(m, "#\n"); 2179 seq_puts(m, "#\n");
2180 test_ftrace_alive(m);
2171 } 2181 }
2172 if (iter->trace && iter->trace->print_header) 2182 if (iter->trace && iter->trace->print_header)
2173 iter->trace->print_header(m); 2183 iter->trace->print_header(m);
@@ -2710,9 +2720,9 @@ static const char readme_msg[] =
2710 "# cat /sys/kernel/debug/tracing/trace_options\n" 2720 "# cat /sys/kernel/debug/tracing/trace_options\n"
2711 "noprint-parent nosym-offset nosym-addr noverbose\n" 2721 "noprint-parent nosym-offset nosym-addr noverbose\n"
2712 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" 2722 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2713 "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" 2723 "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2714 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" 2724 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2715 "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" 2725 "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
2716; 2726;
2717 2727
2718static ssize_t 2728static ssize_t
@@ -3569,6 +3579,30 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3569} 3579}
3570 3580
3571static ssize_t 3581static ssize_t
3582tracing_total_entries_read(struct file *filp, char __user *ubuf,
3583 size_t cnt, loff_t *ppos)
3584{
3585 struct trace_array *tr = filp->private_data;
3586 char buf[64];
3587 int r, cpu;
3588 unsigned long size = 0, expanded_size = 0;
3589
3590 mutex_lock(&trace_types_lock);
3591 for_each_tracing_cpu(cpu) {
3592 size += tr->entries >> 10;
3593 if (!ring_buffer_expanded)
3594 expanded_size += trace_buf_size >> 10;
3595 }
3596 if (ring_buffer_expanded)
3597 r = sprintf(buf, "%lu\n", size);
3598 else
3599 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
3600 mutex_unlock(&trace_types_lock);
3601
3602 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3603}
3604
3605static ssize_t
3572tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 3606tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3573 size_t cnt, loff_t *ppos) 3607 size_t cnt, loff_t *ppos)
3574{ 3608{
@@ -3594,22 +3628,24 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
3594 return 0; 3628 return 0;
3595} 3629}
3596 3630
3597static int mark_printk(const char *fmt, ...)
3598{
3599 int ret;
3600 va_list args;
3601 va_start(args, fmt);
3602 ret = trace_vprintk(0, fmt, args);
3603 va_end(args);
3604 return ret;
3605}
3606
3607static ssize_t 3631static ssize_t
3608tracing_mark_write(struct file *filp, const char __user *ubuf, 3632tracing_mark_write(struct file *filp, const char __user *ubuf,
3609 size_t cnt, loff_t *fpos) 3633 size_t cnt, loff_t *fpos)
3610{ 3634{
3611 char *buf; 3635 unsigned long addr = (unsigned long)ubuf;
3612 size_t written; 3636 struct ring_buffer_event *event;
3637 struct ring_buffer *buffer;
3638 struct print_entry *entry;
3639 unsigned long irq_flags;
3640 struct page *pages[2];
3641 int nr_pages = 1;
3642 ssize_t written;
3643 void *page1;
3644 void *page2;
3645 int offset;
3646 int size;
3647 int len;
3648 int ret;
3613 3649
3614 if (tracing_disabled) 3650 if (tracing_disabled)
3615 return -EINVAL; 3651 return -EINVAL;
@@ -3617,28 +3653,81 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3617 if (cnt > TRACE_BUF_SIZE) 3653 if (cnt > TRACE_BUF_SIZE)
3618 cnt = TRACE_BUF_SIZE; 3654 cnt = TRACE_BUF_SIZE;
3619 3655
3620 buf = kmalloc(cnt + 2, GFP_KERNEL); 3656 /*
3621 if (buf == NULL) 3657 * Userspace is injecting traces into the kernel trace buffer.
3622 return -ENOMEM; 3658 * We want to be as non intrusive as possible.
3659 * To do so, we do not want to allocate any special buffers
3660 * or take any locks, but instead write the userspace data
3661 * straight into the ring buffer.
3662 *
3663 * First we need to pin the userspace buffer into memory,
3664 * which, most likely it is, because it just referenced it.
3665 * But there's no guarantee that it is. By using get_user_pages_fast()
3666 * and kmap_atomic/kunmap_atomic() we can get access to the
3667 * pages directly. We then write the data directly into the
3668 * ring buffer.
3669 */
3670 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
3623 3671
3624 if (copy_from_user(buf, ubuf, cnt)) { 3672 /* check if we cross pages */
3625 kfree(buf); 3673 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3626 return -EFAULT; 3674 nr_pages = 2;
3675
3676 offset = addr & (PAGE_SIZE - 1);
3677 addr &= PAGE_MASK;
3678
3679 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
3680 if (ret < nr_pages) {
3681 while (--ret >= 0)
3682 put_page(pages[ret]);
3683 written = -EFAULT;
3684 goto out;
3685 }
3686
3687 page1 = kmap_atomic(pages[0]);
3688 if (nr_pages == 2)
3689 page2 = kmap_atomic(pages[1]);
3690
3691 local_save_flags(irq_flags);
3692 size = sizeof(*entry) + cnt + 2; /* possible \n added */
3693 buffer = global_trace.buffer;
3694 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3695 irq_flags, preempt_count());
3696 if (!event) {
3697 /* Ring buffer disabled, return as if not open for write */
3698 written = -EBADF;
3699 goto out_unlock;
3627 } 3700 }
3628 if (buf[cnt-1] != '\n') { 3701
3629 buf[cnt] = '\n'; 3702 entry = ring_buffer_event_data(event);
3630 buf[cnt+1] = '\0'; 3703 entry->ip = _THIS_IP_;
3704
3705 if (nr_pages == 2) {
3706 len = PAGE_SIZE - offset;
3707 memcpy(&entry->buf, page1 + offset, len);
3708 memcpy(&entry->buf[len], page2, cnt - len);
3631 } else 3709 } else
3632 buf[cnt] = '\0'; 3710 memcpy(&entry->buf, page1 + offset, cnt);
3633 3711
3634 written = mark_printk("%s", buf); 3712 if (entry->buf[cnt - 1] != '\n') {
3635 kfree(buf); 3713 entry->buf[cnt] = '\n';
3636 *fpos += written; 3714 entry->buf[cnt + 1] = '\0';
3715 } else
3716 entry->buf[cnt] = '\0';
3717
3718 ring_buffer_unlock_commit(buffer, event);
3637 3719
3638 /* don't tell userspace we wrote more - it might confuse them */ 3720 written = cnt;
3639 if (written > cnt)
3640 written = cnt;
3641 3721
3722 *fpos += written;
3723
3724 out_unlock:
3725 if (nr_pages == 2)
3726 kunmap_atomic(page2);
3727 kunmap_atomic(page1);
3728 while (nr_pages > 0)
3729 put_page(pages[--nr_pages]);
3730 out:
3642 return written; 3731 return written;
3643} 3732}
3644 3733
@@ -3739,6 +3828,12 @@ static const struct file_operations tracing_entries_fops = {
3739 .llseek = generic_file_llseek, 3828 .llseek = generic_file_llseek,
3740}; 3829};
3741 3830
3831static const struct file_operations tracing_total_entries_fops = {
3832 .open = tracing_open_generic,
3833 .read = tracing_total_entries_read,
3834 .llseek = generic_file_llseek,
3835};
3836
3742static const struct file_operations tracing_free_buffer_fops = { 3837static const struct file_operations tracing_free_buffer_fops = {
3743 .write = tracing_free_buffer_write, 3838 .write = tracing_free_buffer_write,
3744 .release = tracing_free_buffer_release, 3839 .release = tracing_free_buffer_release,
@@ -3808,8 +3903,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3808 if (info->read < PAGE_SIZE) 3903 if (info->read < PAGE_SIZE)
3809 goto read; 3904 goto read;
3810 3905
3811 info->read = 0;
3812
3813 trace_access_lock(info->cpu); 3906 trace_access_lock(info->cpu);
3814 ret = ring_buffer_read_page(info->tr->buffer, 3907 ret = ring_buffer_read_page(info->tr->buffer,
3815 &info->spare, 3908 &info->spare,
@@ -3819,6 +3912,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3819 if (ret < 0) 3912 if (ret < 0)
3820 return 0; 3913 return 0;
3821 3914
3915 info->read = 0;
3916
3822read: 3917read:
3823 size = PAGE_SIZE - info->read; 3918 size = PAGE_SIZE - info->read;
3824 if (size > count) 3919 if (size > count)
@@ -4026,6 +4121,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
4026 struct trace_array *tr = &global_trace; 4121 struct trace_array *tr = &global_trace;
4027 struct trace_seq *s; 4122 struct trace_seq *s;
4028 unsigned long cnt; 4123 unsigned long cnt;
4124 unsigned long long t;
4125 unsigned long usec_rem;
4029 4126
4030 s = kmalloc(sizeof(*s), GFP_KERNEL); 4127 s = kmalloc(sizeof(*s), GFP_KERNEL);
4031 if (!s) 4128 if (!s)
@@ -4042,6 +4139,17 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
4042 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); 4139 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4043 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 4140 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4044 4141
4142 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4143 trace_seq_printf(s, "bytes: %ld\n", cnt);
4144
4145 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4146 usec_rem = do_div(t, USEC_PER_SEC);
4147 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
4148
4149 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4150 usec_rem = do_div(t, USEC_PER_SEC);
4151 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4152
4045 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4153 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4046 4154
4047 kfree(s); 4155 kfree(s);
@@ -4450,6 +4558,9 @@ static __init int tracer_init_debugfs(void)
4450 trace_create_file("buffer_size_kb", 0644, d_tracer, 4558 trace_create_file("buffer_size_kb", 0644, d_tracer,
4451 &global_trace, &tracing_entries_fops); 4559 &global_trace, &tracing_entries_fops);
4452 4560
4561 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4562 &global_trace, &tracing_total_entries_fops);
4563
4453 trace_create_file("free_buffer", 0644, d_tracer, 4564 trace_create_file("free_buffer", 0644, d_tracer,
4454 &global_trace, &tracing_free_buffer_fops); 4565 &global_trace, &tracing_free_buffer_fops);
4455 4566
@@ -4566,6 +4677,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4566 4677
4567 tracing_off(); 4678 tracing_off();
4568 4679
4680 /* Did function tracer already get disabled? */
4681 if (ftrace_is_dead()) {
4682 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
4683 printk("# MAY BE MISSING FUNCTION EVENTS\n");
4684 }
4685
4569 if (disable_tracing) 4686 if (disable_tracing)
4570 ftrace_kill(); 4687 ftrace_kill();
4571 4688