aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c34
-rw-r--r--kernel/trace/ring_buffer.c117
-rw-r--r--kernel/trace/trace.c19
3 files changed, 149 insertions, 21 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4a39d24568c8..e60205722d0c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -185,7 +185,6 @@ enum {
185}; 185};
186 186
187static int ftrace_filtered; 187static int ftrace_filtered;
188static int tracing_on;
189 188
190static LIST_HEAD(ftrace_new_addrs); 189static LIST_HEAD(ftrace_new_addrs);
191 190
@@ -506,13 +505,10 @@ static int __ftrace_modify_code(void *data)
506{ 505{
507 int *command = data; 506 int *command = data;
508 507
509 if (*command & FTRACE_ENABLE_CALLS) { 508 if (*command & FTRACE_ENABLE_CALLS)
510 ftrace_replace_code(1); 509 ftrace_replace_code(1);
511 tracing_on = 1; 510 else if (*command & FTRACE_DISABLE_CALLS)
512 } else if (*command & FTRACE_DISABLE_CALLS) {
513 ftrace_replace_code(0); 511 ftrace_replace_code(0);
514 tracing_on = 0;
515 }
516 512
517 if (*command & FTRACE_UPDATE_TRACE_FUNC) 513 if (*command & FTRACE_UPDATE_TRACE_FUNC)
518 ftrace_update_ftrace_func(ftrace_trace_function); 514 ftrace_update_ftrace_func(ftrace_trace_function);
@@ -677,7 +673,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
677 673
678 cnt = num_to_init / ENTRIES_PER_PAGE; 674 cnt = num_to_init / ENTRIES_PER_PAGE;
679 pr_info("ftrace: allocating %ld entries in %d pages\n", 675 pr_info("ftrace: allocating %ld entries in %d pages\n",
680 num_to_init, cnt); 676 num_to_init, cnt + 1);
681 677
682 for (i = 0; i < cnt; i++) { 678 for (i = 0; i < cnt; i++) {
683 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 679 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -757,13 +753,11 @@ static void *t_start(struct seq_file *m, loff_t *pos)
757 void *p = NULL; 753 void *p = NULL;
758 loff_t l = -1; 754 loff_t l = -1;
759 755
760 if (*pos != iter->pos) { 756 if (*pos > iter->pos)
761 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 757 *pos = iter->pos;
762 ; 758
763 } else { 759 l = *pos;
764 l = *pos; 760 p = t_next(m, p, &l);
765 p = t_next(m, p, &l);
766 }
767 761
768 return p; 762 return p;
769} 763}
@@ -774,15 +768,21 @@ static void t_stop(struct seq_file *m, void *p)
774 768
775static int t_show(struct seq_file *m, void *v) 769static int t_show(struct seq_file *m, void *v)
776{ 770{
771 struct ftrace_iterator *iter = m->private;
777 struct dyn_ftrace *rec = v; 772 struct dyn_ftrace *rec = v;
778 char str[KSYM_SYMBOL_LEN]; 773 char str[KSYM_SYMBOL_LEN];
774 int ret = 0;
779 775
780 if (!rec) 776 if (!rec)
781 return 0; 777 return 0;
782 778
783 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 779 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
784 780
785 seq_printf(m, "%s\n", str); 781 ret = seq_printf(m, "%s\n", str);
782 if (ret < 0) {
783 iter->pos--;
784 iter->idx--;
785 }
786 786
787 return 0; 787 return 0;
788} 788}
@@ -808,7 +808,7 @@ ftrace_avail_open(struct inode *inode, struct file *file)
808 return -ENOMEM; 808 return -ENOMEM;
809 809
810 iter->pg = ftrace_pages_start; 810 iter->pg = ftrace_pages_start;
811 iter->pos = -1; 811 iter->pos = 0;
812 812
813 ret = seq_open(file, &show_ftrace_seq_ops); 813 ret = seq_open(file, &show_ftrace_seq_ops);
814 if (!ret) { 814 if (!ret) {
@@ -895,7 +895,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
895 895
896 if (file->f_mode & FMODE_READ) { 896 if (file->f_mode & FMODE_READ) {
897 iter->pg = ftrace_pages_start; 897 iter->pg = ftrace_pages_start;
898 iter->pos = -1; 898 iter->pos = 0;
899 iter->flags = enable ? FTRACE_ITER_FILTER : 899 iter->flags = enable ? FTRACE_ITER_FILTER :
900 FTRACE_ITER_NOTRACE; 900 FTRACE_ITER_NOTRACE;
901 901
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3f3380638646..036456cbb4f7 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -16,14 +16,49 @@
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18 18
19#include "trace.h"
20
21/* Global flag to disable all recording to ring buffers */
22static int ring_buffers_off __read_mostly;
23
24/**
25 * tracing_on - enable all tracing buffers
26 *
27 * This function enables all tracing buffers that may have been
28 * disabled with tracing_off.
29 */
30void tracing_on(void)
31{
32 ring_buffers_off = 0;
33}
34
35/**
36 * tracing_off - turn off all tracing buffers
37 *
38 * This function stops all tracing buffers from recording data.
39 * It does not disable any overhead the tracers themselves may
40 * be causing. This function simply causes all recording to
41 * the ring buffers to fail.
42 */
43void tracing_off(void)
44{
45 ring_buffers_off = 1;
46}
47
19/* Up this if you want to test the TIME_EXTENTS and normalization */ 48/* Up this if you want to test the TIME_EXTENTS and normalization */
20#define DEBUG_SHIFT 0 49#define DEBUG_SHIFT 0
21 50
22/* FIXME!!! */ 51/* FIXME!!! */
23u64 ring_buffer_time_stamp(int cpu) 52u64 ring_buffer_time_stamp(int cpu)
24{ 53{
54 u64 time;
55
56 preempt_disable_notrace();
25 /* shift to debug/test normalization and TIME_EXTENTS */ 57 /* shift to debug/test normalization and TIME_EXTENTS */
26 return sched_clock() << DEBUG_SHIFT; 58 time = sched_clock() << DEBUG_SHIFT;
59 preempt_enable_notrace();
60
61 return time;
27} 62}
28 63
29void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 64void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
@@ -503,6 +538,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
503 LIST_HEAD(pages); 538 LIST_HEAD(pages);
504 int i, cpu; 539 int i, cpu;
505 540
541 /*
542 * Always succeed at resizing a non-existent buffer:
543 */
544 if (!buffer)
545 return size;
546
506 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 547 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
507 size *= BUF_PAGE_SIZE; 548 size *= BUF_PAGE_SIZE;
508 buffer_size = buffer->pages * BUF_PAGE_SIZE; 549 buffer_size = buffer->pages * BUF_PAGE_SIZE;
@@ -1060,7 +1101,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1060 1101
1061 /* Did the write stamp get updated already? */ 1102 /* Did the write stamp get updated already? */
1062 if (unlikely(ts < cpu_buffer->write_stamp)) 1103 if (unlikely(ts < cpu_buffer->write_stamp))
1063 goto again; 1104 delta = 0;
1064 1105
1065 if (test_time_stamp(delta)) { 1106 if (test_time_stamp(delta)) {
1066 1107
@@ -1133,6 +1174,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1133 struct ring_buffer_event *event; 1174 struct ring_buffer_event *event;
1134 int cpu, resched; 1175 int cpu, resched;
1135 1176
1177 if (ring_buffers_off)
1178 return NULL;
1179
1136 if (atomic_read(&buffer->record_disabled)) 1180 if (atomic_read(&buffer->record_disabled))
1137 return NULL; 1181 return NULL;
1138 1182
@@ -1249,6 +1293,9 @@ int ring_buffer_write(struct ring_buffer *buffer,
1249 int ret = -EBUSY; 1293 int ret = -EBUSY;
1250 int cpu, resched; 1294 int cpu, resched;
1251 1295
1296 if (ring_buffers_off)
1297 return -EBUSY;
1298
1252 if (atomic_read(&buffer->record_disabled)) 1299 if (atomic_read(&buffer->record_disabled))
1253 return -EBUSY; 1300 return -EBUSY;
1254 1301
@@ -2070,3 +2117,69 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2070 return 0; 2117 return 0;
2071} 2118}
2072 2119
2120static ssize_t
2121rb_simple_read(struct file *filp, char __user *ubuf,
2122 size_t cnt, loff_t *ppos)
2123{
2124 int *p = filp->private_data;
2125 char buf[64];
2126 int r;
2127
2128 /* !ring_buffers_off == tracing_on */
2129 r = sprintf(buf, "%d\n", !*p);
2130
2131 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2132}
2133
2134static ssize_t
2135rb_simple_write(struct file *filp, const char __user *ubuf,
2136 size_t cnt, loff_t *ppos)
2137{
2138 int *p = filp->private_data;
2139 char buf[64];
2140 long val;
2141 int ret;
2142
2143 if (cnt >= sizeof(buf))
2144 return -EINVAL;
2145
2146 if (copy_from_user(&buf, ubuf, cnt))
2147 return -EFAULT;
2148
2149 buf[cnt] = 0;
2150
2151 ret = strict_strtoul(buf, 10, &val);
2152 if (ret < 0)
2153 return ret;
2154
2155 /* !ring_buffers_off == tracing_on */
2156 *p = !val;
2157
2158 (*ppos)++;
2159
2160 return cnt;
2161}
2162
2163static struct file_operations rb_simple_fops = {
2164 .open = tracing_open_generic,
2165 .read = rb_simple_read,
2166 .write = rb_simple_write,
2167};
2168
2169
2170static __init int rb_init_debugfs(void)
2171{
2172 struct dentry *d_tracer;
2173 struct dentry *entry;
2174
2175 d_tracer = tracing_init_dentry();
2176
2177 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2178 &ring_buffers_off, &rb_simple_fops);
2179 if (!entry)
2180 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2181
2182 return 0;
2183}
2184
2185fs_initcall(rb_init_debugfs);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9f3b478f9171..697eda36b86a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1755,7 +1755,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1755 return TRACE_TYPE_HANDLED; 1755 return TRACE_TYPE_HANDLED;
1756 1756
1757 SEQ_PUT_FIELD_RET(s, entry->pid); 1757 SEQ_PUT_FIELD_RET(s, entry->pid);
1758 SEQ_PUT_FIELD_RET(s, iter->cpu); 1758 SEQ_PUT_FIELD_RET(s, entry->cpu);
1759 SEQ_PUT_FIELD_RET(s, iter->ts); 1759 SEQ_PUT_FIELD_RET(s, iter->ts);
1760 1760
1761 switch (entry->type) { 1761 switch (entry->type) {
@@ -2676,7 +2676,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2676{ 2676{
2677 unsigned long val; 2677 unsigned long val;
2678 char buf[64]; 2678 char buf[64];
2679 int ret; 2679 int ret, cpu;
2680 struct trace_array *tr = filp->private_data; 2680 struct trace_array *tr = filp->private_data;
2681 2681
2682 if (cnt >= sizeof(buf)) 2682 if (cnt >= sizeof(buf))
@@ -2704,6 +2704,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2704 goto out; 2704 goto out;
2705 } 2705 }
2706 2706
2707 /* disable all cpu buffers */
2708 for_each_tracing_cpu(cpu) {
2709 if (global_trace.data[cpu])
2710 atomic_inc(&global_trace.data[cpu]->disabled);
2711 if (max_tr.data[cpu])
2712 atomic_inc(&max_tr.data[cpu]->disabled);
2713 }
2714
2707 if (val != global_trace.entries) { 2715 if (val != global_trace.entries) {
2708 ret = ring_buffer_resize(global_trace.buffer, val); 2716 ret = ring_buffer_resize(global_trace.buffer, val);
2709 if (ret < 0) { 2717 if (ret < 0) {
@@ -2735,6 +2743,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2735 if (tracing_disabled) 2743 if (tracing_disabled)
2736 cnt = -ENOMEM; 2744 cnt = -ENOMEM;
2737 out: 2745 out:
2746 for_each_tracing_cpu(cpu) {
2747 if (global_trace.data[cpu])
2748 atomic_dec(&global_trace.data[cpu]->disabled);
2749 if (max_tr.data[cpu])
2750 atomic_dec(&max_tr.data[cpu]->disabled);
2751 }
2752
2738 max_tr.entries = global_trace.entries; 2753 max_tr.entries = global_trace.entries;
2739 mutex_unlock(&trace_types_lock); 2754 mutex_unlock(&trace_types_lock);
2740 2755