aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c1053
1 files changed, 692 insertions, 361 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 291397e66669..91eecaaa43e0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -20,6 +20,7 @@
20#include <linux/notifier.h> 20#include <linux/notifier.h>
21#include <linux/irqflags.h> 21#include <linux/irqflags.h>
22#include <linux/debugfs.h> 22#include <linux/debugfs.h>
23#include <linux/tracefs.h>
23#include <linux/pagemap.h> 24#include <linux/pagemap.h>
24#include <linux/hardirq.h> 25#include <linux/hardirq.h>
25#include <linux/linkage.h> 26#include <linux/linkage.h>
@@ -31,6 +32,7 @@
31#include <linux/splice.h> 32#include <linux/splice.h>
32#include <linux/kdebug.h> 33#include <linux/kdebug.h>
33#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/mount.h>
34#include <linux/rwsem.h> 36#include <linux/rwsem.h>
35#include <linux/slab.h> 37#include <linux/slab.h>
36#include <linux/ctype.h> 38#include <linux/ctype.h>
@@ -63,6 +65,10 @@ static bool __read_mostly tracing_selftest_running;
63 */ 65 */
64bool __read_mostly tracing_selftest_disabled; 66bool __read_mostly tracing_selftest_disabled;
65 67
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
66/* For tracers that don't implement custom flags */ 72/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = { 73static struct tracer_opt dummy_tracer_opt[] = {
68 { } 74 { }
@@ -119,6 +125,42 @@ enum ftrace_dump_mode ftrace_dump_on_oops;
119/* When set, tracing will stop when a WARN*() is hit */ 125/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning; 126int __disable_trace_on_warning;
121 127
128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
122static int tracing_set_tracer(struct trace_array *tr, const char *buf); 164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
123 165
124#define MAX_TRACER_SIZE 100 166#define MAX_TRACER_SIZE 100
@@ -155,10 +197,11 @@ __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
155 197
156static int __init stop_trace_on_warning(char *str) 198static int __init stop_trace_on_warning(char *str)
157{ 199{
158 __disable_trace_on_warning = 1; 200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
159 return 1; 202 return 1;
160} 203}
161__setup("traceoff_on_warning=", stop_trace_on_warning); 204__setup("traceoff_on_warning", stop_trace_on_warning);
162 205
163static int __init boot_alloc_snapshot(char *str) 206static int __init boot_alloc_snapshot(char *str)
164{ 207{
@@ -192,6 +235,13 @@ static int __init set_trace_boot_clock(char *str)
192} 235}
193__setup("trace_clock=", set_trace_boot_clock); 236__setup("trace_clock=", set_trace_boot_clock);
194 237
238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
195 245
196unsigned long long ns2usecs(cycle_t nsec) 246unsigned long long ns2usecs(cycle_t nsec)
197{ 247{
@@ -820,11 +870,12 @@ static struct {
820 const char *name; 870 const char *name;
821 int in_ns; /* is this clock in nanoseconds? */ 871 int in_ns; /* is this clock in nanoseconds? */
822} trace_clocks[] = { 872} trace_clocks[] = {
823 { trace_clock_local, "local", 1 }, 873 { trace_clock_local, "local", 1 },
824 { trace_clock_global, "global", 1 }, 874 { trace_clock_global, "global", 1 },
825 { trace_clock_counter, "counter", 0 }, 875 { trace_clock_counter, "counter", 0 },
826 { trace_clock_jiffies, "uptime", 0 }, 876 { trace_clock_jiffies, "uptime", 0 },
827 { trace_clock, "perf", 1 }, 877 { trace_clock, "perf", 1 },
878 { ktime_get_mono_fast_ns, "mono", 1 },
828 ARCH_TRACE_CLOCKS 879 ARCH_TRACE_CLOCKS
829}; 880};
830 881
@@ -937,43 +988,20 @@ out:
937 return ret; 988 return ret;
938} 989}
939 990
940ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 991/* TODO add a seq_buf_to_buffer() */
941{
942 int len;
943 int ret;
944
945 if (!cnt)
946 return 0;
947
948 if (s->len <= s->readpos)
949 return -EBUSY;
950
951 len = s->len - s->readpos;
952 if (cnt > len)
953 cnt = len;
954 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
955 if (ret == cnt)
956 return -EFAULT;
957
958 cnt -= ret;
959
960 s->readpos += cnt;
961 return cnt;
962}
963
964static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 992static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
965{ 993{
966 int len; 994 int len;
967 995
968 if (s->len <= s->readpos) 996 if (trace_seq_used(s) <= s->seq.readpos)
969 return -EBUSY; 997 return -EBUSY;
970 998
971 len = s->len - s->readpos; 999 len = trace_seq_used(s) - s->seq.readpos;
972 if (cnt > len) 1000 if (cnt > len)
973 cnt = len; 1001 cnt = len;
974 memcpy(buf, s->buffer + s->readpos, cnt); 1002 memcpy(buf, s->buffer + s->seq.readpos, cnt);
975 1003
976 s->readpos += cnt; 1004 s->seq.readpos += cnt;
977 return cnt; 1005 return cnt;
978} 1006}
979 1007
@@ -1099,13 +1127,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1099} 1127}
1100#endif /* CONFIG_TRACER_MAX_TRACE */ 1128#endif /* CONFIG_TRACER_MAX_TRACE */
1101 1129
1102static int wait_on_pipe(struct trace_iterator *iter) 1130static int wait_on_pipe(struct trace_iterator *iter, bool full)
1103{ 1131{
1104 /* Iterators are static, they should be filled or empty */ 1132 /* Iterators are static, they should be filled or empty */
1105 if (trace_buffer_iter(iter, iter->cpu_file)) 1133 if (trace_buffer_iter(iter, iter->cpu_file))
1106 return 0; 1134 return 0;
1107 1135
1108 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1136 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1137 full);
1109} 1138}
1110 1139
1111#ifdef CONFIG_FTRACE_STARTUP_TEST 1140#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -2045,13 +2074,14 @@ void trace_printk_init_buffers(void)
2045 2074
2046 /* trace_printk() is for debug use only. Don't use it in production. */ 2075 /* trace_printk() is for debug use only. Don't use it in production. */
2047 2076
2048 pr_warning("\n**********************************************************\n"); 2077 pr_warning("\n");
2078 pr_warning("**********************************************************\n");
2049 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 2079 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2050 pr_warning("** **\n"); 2080 pr_warning("** **\n");
2051 pr_warning("** trace_printk() being used. Allocating extra memory. **\n"); 2081 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2052 pr_warning("** **\n"); 2082 pr_warning("** **\n");
2053 pr_warning("** This means that this is a DEBUG kernel and it is **\n"); 2083 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2054 pr_warning("** unsafe for produciton use. **\n"); 2084 pr_warning("** unsafe for production use. **\n");
2055 pr_warning("** **\n"); 2085 pr_warning("** **\n");
2056 pr_warning("** If you see this message and you are not debugging **\n"); 2086 pr_warning("** If you see this message and you are not debugging **\n");
2057 pr_warning("** the kernel, report this immediately to your vendor! **\n"); 2087 pr_warning("** the kernel, report this immediately to your vendor! **\n");
@@ -2180,9 +2210,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
2180 goto out; 2210 goto out;
2181 } 2211 }
2182 2212
2183 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 2213 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2184 if (len > TRACE_BUF_SIZE)
2185 goto out;
2186 2214
2187 local_save_flags(flags); 2215 local_save_flags(flags);
2188 size = sizeof(*entry) + len + 1; 2216 size = sizeof(*entry) + len + 1;
@@ -2193,8 +2221,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
2193 entry = ring_buffer_event_data(event); 2221 entry = ring_buffer_event_data(event);
2194 entry->ip = ip; 2222 entry->ip = ip;
2195 2223
2196 memcpy(&entry->buf, tbuffer, len); 2224 memcpy(&entry->buf, tbuffer, len + 1);
2197 entry->buf[len] = '\0';
2198 if (!call_filter_check_discard(call, entry, buffer, event)) { 2225 if (!call_filter_check_discard(call, entry, buffer, event)) {
2199 __buffer_unlock_commit(buffer, event); 2226 __buffer_unlock_commit(buffer, event);
2200 ftrace_trace_stack(buffer, flags, 6, pc); 2227 ftrace_trace_stack(buffer, flags, 6, pc);
@@ -2531,14 +2558,14 @@ get_total_entries(struct trace_buffer *buf,
2531 2558
2532static void print_lat_help_header(struct seq_file *m) 2559static void print_lat_help_header(struct seq_file *m)
2533{ 2560{
2534 seq_puts(m, "# _------=> CPU# \n"); 2561 seq_puts(m, "# _------=> CPU# \n"
2535 seq_puts(m, "# / _-----=> irqs-off \n"); 2562 "# / _-----=> irqs-off \n"
2536 seq_puts(m, "# | / _----=> need-resched \n"); 2563 "# | / _----=> need-resched \n"
2537 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 2564 "# || / _---=> hardirq/softirq \n"
2538 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 2565 "# ||| / _--=> preempt-depth \n"
2539 seq_puts(m, "# |||| / delay \n"); 2566 "# |||| / delay \n"
2540 seq_puts(m, "# cmd pid ||||| time | caller \n"); 2567 "# cmd pid ||||| time | caller \n"
2541 seq_puts(m, "# \\ / ||||| \\ | / \n"); 2568 "# \\ / ||||| \\ | / \n");
2542} 2569}
2543 2570
2544static void print_event_info(struct trace_buffer *buf, struct seq_file *m) 2571static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
@@ -2555,20 +2582,20 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2555static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) 2582static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2556{ 2583{
2557 print_event_info(buf, m); 2584 print_event_info(buf, m);
2558 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 2585 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2559 seq_puts(m, "# | | | | |\n"); 2586 "# | | | | |\n");
2560} 2587}
2561 2588
2562static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) 2589static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2563{ 2590{
2564 print_event_info(buf, m); 2591 print_event_info(buf, m);
2565 seq_puts(m, "# _-----=> irqs-off\n"); 2592 seq_puts(m, "# _-----=> irqs-off\n"
2566 seq_puts(m, "# / _----=> need-resched\n"); 2593 "# / _----=> need-resched\n"
2567 seq_puts(m, "# | / _---=> hardirq/softirq\n"); 2594 "# | / _---=> hardirq/softirq\n"
2568 seq_puts(m, "# || / _--=> preempt-depth\n"); 2595 "# || / _--=> preempt-depth\n"
2569 seq_puts(m, "# ||| / delay\n"); 2596 "# ||| / delay\n"
2570 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); 2597 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2571 seq_puts(m, "# | | | |||| | |\n"); 2598 "# | | | |||| | |\n");
2572} 2599}
2573 2600
2574void 2601void
@@ -2671,24 +2698,21 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2671 event = ftrace_find_event(entry->type); 2698 event = ftrace_find_event(entry->type);
2672 2699
2673 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2700 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2674 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2701 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2675 if (!trace_print_lat_context(iter)) 2702 trace_print_lat_context(iter);
2676 goto partial; 2703 else
2677 } else { 2704 trace_print_context(iter);
2678 if (!trace_print_context(iter))
2679 goto partial;
2680 }
2681 } 2705 }
2682 2706
2707 if (trace_seq_has_overflowed(s))
2708 return TRACE_TYPE_PARTIAL_LINE;
2709
2683 if (event) 2710 if (event)
2684 return event->funcs->trace(iter, sym_flags, event); 2711 return event->funcs->trace(iter, sym_flags, event);
2685 2712
2686 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 2713 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2687 goto partial;
2688 2714
2689 return TRACE_TYPE_HANDLED; 2715 return trace_handle_return(s);
2690partial:
2691 return TRACE_TYPE_PARTIAL_LINE;
2692} 2716}
2693 2717
2694static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 2718static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
@@ -2699,22 +2723,20 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2699 2723
2700 entry = iter->ent; 2724 entry = iter->ent;
2701 2725
2702 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2726 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2703 if (!trace_seq_printf(s, "%d %d %llu ", 2727 trace_seq_printf(s, "%d %d %llu ",
2704 entry->pid, iter->cpu, iter->ts)) 2728 entry->pid, iter->cpu, iter->ts);
2705 goto partial; 2729
2706 } 2730 if (trace_seq_has_overflowed(s))
2731 return TRACE_TYPE_PARTIAL_LINE;
2707 2732
2708 event = ftrace_find_event(entry->type); 2733 event = ftrace_find_event(entry->type);
2709 if (event) 2734 if (event)
2710 return event->funcs->raw(iter, 0, event); 2735 return event->funcs->raw(iter, 0, event);
2711 2736
2712 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 2737 trace_seq_printf(s, "%d ?\n", entry->type);
2713 goto partial;
2714 2738
2715 return TRACE_TYPE_HANDLED; 2739 return trace_handle_return(s);
2716partial:
2717 return TRACE_TYPE_PARTIAL_LINE;
2718} 2740}
2719 2741
2720static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 2742static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
@@ -2727,9 +2749,11 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2727 entry = iter->ent; 2749 entry = iter->ent;
2728 2750
2729 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2751 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2730 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 2752 SEQ_PUT_HEX_FIELD(s, entry->pid);
2731 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 2753 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2732 SEQ_PUT_HEX_FIELD_RET(s, iter->ts); 2754 SEQ_PUT_HEX_FIELD(s, iter->ts);
2755 if (trace_seq_has_overflowed(s))
2756 return TRACE_TYPE_PARTIAL_LINE;
2733 } 2757 }
2734 2758
2735 event = ftrace_find_event(entry->type); 2759 event = ftrace_find_event(entry->type);
@@ -2739,9 +2763,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2739 return ret; 2763 return ret;
2740 } 2764 }
2741 2765
2742 SEQ_PUT_FIELD_RET(s, newline); 2766 SEQ_PUT_FIELD(s, newline);
2743 2767
2744 return TRACE_TYPE_HANDLED; 2768 return trace_handle_return(s);
2745} 2769}
2746 2770
2747static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2771static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
@@ -2753,9 +2777,11 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2753 entry = iter->ent; 2777 entry = iter->ent;
2754 2778
2755 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2779 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2756 SEQ_PUT_FIELD_RET(s, entry->pid); 2780 SEQ_PUT_FIELD(s, entry->pid);
2757 SEQ_PUT_FIELD_RET(s, iter->cpu); 2781 SEQ_PUT_FIELD(s, iter->cpu);
2758 SEQ_PUT_FIELD_RET(s, iter->ts); 2782 SEQ_PUT_FIELD(s, iter->ts);
2783 if (trace_seq_has_overflowed(s))
2784 return TRACE_TYPE_PARTIAL_LINE;
2759 } 2785 }
2760 2786
2761 event = ftrace_find_event(entry->type); 2787 event = ftrace_find_event(entry->type);
@@ -2801,10 +2827,12 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
2801{ 2827{
2802 enum print_line_t ret; 2828 enum print_line_t ret;
2803 2829
2804 if (iter->lost_events && 2830 if (iter->lost_events) {
2805 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 2831 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2806 iter->cpu, iter->lost_events)) 2832 iter->cpu, iter->lost_events);
2807 return TRACE_TYPE_PARTIAL_LINE; 2833 if (trace_seq_has_overflowed(&iter->seq))
2834 return TRACE_TYPE_PARTIAL_LINE;
2835 }
2808 2836
2809 if (iter->trace && iter->trace->print_line) { 2837 if (iter->trace && iter->trace->print_line) {
2810 ret = iter->trace->print_line(iter); 2838 ret = iter->trace->print_line(iter);
@@ -2882,44 +2910,44 @@ static void test_ftrace_alive(struct seq_file *m)
2882{ 2910{
2883 if (!ftrace_is_dead()) 2911 if (!ftrace_is_dead())
2884 return; 2912 return;
2885 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 2913 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2886 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2914 "# MAY BE MISSING FUNCTION EVENTS\n");
2887} 2915}
2888 2916
2889#ifdef CONFIG_TRACER_MAX_TRACE 2917#ifdef CONFIG_TRACER_MAX_TRACE
2890static void show_snapshot_main_help(struct seq_file *m) 2918static void show_snapshot_main_help(struct seq_file *m)
2891{ 2919{
2892 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2920 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2893 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2921 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2894 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2922 "# Takes a snapshot of the main buffer.\n"
2895 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"); 2923 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2896 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2924 "# (Doesn't have to be '2' works with any number that\n"
2897 seq_printf(m, "# is not a '0' or '1')\n"); 2925 "# is not a '0' or '1')\n");
2898} 2926}
2899 2927
2900static void show_snapshot_percpu_help(struct seq_file *m) 2928static void show_snapshot_percpu_help(struct seq_file *m)
2901{ 2929{
2902 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 2930 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2903#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 2931#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2904 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2932 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2905 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n"); 2933 "# Takes a snapshot of the main buffer for this cpu.\n");
2906#else 2934#else
2907 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n"); 2935 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2908 seq_printf(m, "# Must use main snapshot file to allocate.\n"); 2936 "# Must use main snapshot file to allocate.\n");
2909#endif 2937#endif
2910 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"); 2938 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2911 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2939 "# (Doesn't have to be '2' works with any number that\n"
2912 seq_printf(m, "# is not a '0' or '1')\n"); 2940 "# is not a '0' or '1')\n");
2913} 2941}
2914 2942
2915static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 2943static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2916{ 2944{
2917 if (iter->tr->allocated_snapshot) 2945 if (iter->tr->allocated_snapshot)
2918 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); 2946 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2919 else 2947 else
2920 seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); 2948 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2921 2949
2922 seq_printf(m, "# Snapshot commands:\n"); 2950 seq_puts(m, "# Snapshot commands:\n");
2923 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 2951 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2924 show_snapshot_main_help(m); 2952 show_snapshot_main_help(m);
2925 else 2953 else
@@ -3273,7 +3301,7 @@ static int t_show(struct seq_file *m, void *v)
3273 if (!t) 3301 if (!t)
3274 return 0; 3302 return 0;
3275 3303
3276 seq_printf(m, "%s", t->name); 3304 seq_puts(m, t->name);
3277 if (t->next) 3305 if (t->next)
3278 seq_putc(m, ' '); 3306 seq_putc(m, ' ');
3279 else 3307 else
@@ -3363,12 +3391,12 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
3363 3391
3364 mutex_lock(&tracing_cpumask_update_lock); 3392 mutex_lock(&tracing_cpumask_update_lock);
3365 3393
3366 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask); 3394 len = snprintf(mask_str, count, "%*pb\n",
3367 if (count - len < 2) { 3395 cpumask_pr_args(tr->tracing_cpumask));
3396 if (len >= count) {
3368 count = -EINVAL; 3397 count = -EINVAL;
3369 goto out_err; 3398 goto out_err;
3370 } 3399 }
3371 len += sprintf(mask_str + len, "\n");
3372 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 3400 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3373 3401
3374out_err: 3402out_err:
@@ -3699,6 +3727,7 @@ static const char readme_msg[] =
3699#endif 3727#endif
3700#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3728#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3701 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 3729 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3730 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3702 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 3731 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3703#endif 3732#endif
3704#ifdef CONFIG_TRACER_SNAPSHOT 3733#ifdef CONFIG_TRACER_SNAPSHOT
@@ -3917,6 +3946,182 @@ static const struct file_operations tracing_saved_cmdlines_size_fops = {
3917 .write = tracing_saved_cmdlines_size_write, 3946 .write = tracing_saved_cmdlines_size_write,
3918}; 3947};
3919 3948
3949#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3950static union trace_enum_map_item *
3951update_enum_map(union trace_enum_map_item *ptr)
3952{
3953 if (!ptr->map.enum_string) {
3954 if (ptr->tail.next) {
3955 ptr = ptr->tail.next;
3956 /* Set ptr to the next real item (skip head) */
3957 ptr++;
3958 } else
3959 return NULL;
3960 }
3961 return ptr;
3962}
3963
3964static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3965{
3966 union trace_enum_map_item *ptr = v;
3967
3968 /*
3969 * Paranoid! If ptr points to end, we don't want to increment past it.
3970 * This really should never happen.
3971 */
3972 ptr = update_enum_map(ptr);
3973 if (WARN_ON_ONCE(!ptr))
3974 return NULL;
3975
3976 ptr++;
3977
3978 (*pos)++;
3979
3980 ptr = update_enum_map(ptr);
3981
3982 return ptr;
3983}
3984
3985static void *enum_map_start(struct seq_file *m, loff_t *pos)
3986{
3987 union trace_enum_map_item *v;
3988 loff_t l = 0;
3989
3990 mutex_lock(&trace_enum_mutex);
3991
3992 v = trace_enum_maps;
3993 if (v)
3994 v++;
3995
3996 while (v && l < *pos) {
3997 v = enum_map_next(m, v, &l);
3998 }
3999
4000 return v;
4001}
4002
4003static void enum_map_stop(struct seq_file *m, void *v)
4004{
4005 mutex_unlock(&trace_enum_mutex);
4006}
4007
4008static int enum_map_show(struct seq_file *m, void *v)
4009{
4010 union trace_enum_map_item *ptr = v;
4011
4012 seq_printf(m, "%s %ld (%s)\n",
4013 ptr->map.enum_string, ptr->map.enum_value,
4014 ptr->map.system);
4015
4016 return 0;
4017}
4018
4019static const struct seq_operations tracing_enum_map_seq_ops = {
4020 .start = enum_map_start,
4021 .next = enum_map_next,
4022 .stop = enum_map_stop,
4023 .show = enum_map_show,
4024};
4025
4026static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4027{
4028 if (tracing_disabled)
4029 return -ENODEV;
4030
4031 return seq_open(filp, &tracing_enum_map_seq_ops);
4032}
4033
4034static const struct file_operations tracing_enum_map_fops = {
4035 .open = tracing_enum_map_open,
4036 .read = seq_read,
4037 .llseek = seq_lseek,
4038 .release = seq_release,
4039};
4040
4041static inline union trace_enum_map_item *
4042trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4043{
4044 /* Return tail of array given the head */
4045 return ptr + ptr->head.length + 1;
4046}
4047
4048static void
4049trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4050 int len)
4051{
4052 struct trace_enum_map **stop;
4053 struct trace_enum_map **map;
4054 union trace_enum_map_item *map_array;
4055 union trace_enum_map_item *ptr;
4056
4057 stop = start + len;
4058
4059 /*
4060 * The trace_enum_maps contains the map plus a head and tail item,
4061 * where the head holds the module and length of array, and the
4062 * tail holds a pointer to the next list.
4063 */
4064 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4065 if (!map_array) {
4066 pr_warning("Unable to allocate trace enum mapping\n");
4067 return;
4068 }
4069
4070 mutex_lock(&trace_enum_mutex);
4071
4072 if (!trace_enum_maps)
4073 trace_enum_maps = map_array;
4074 else {
4075 ptr = trace_enum_maps;
4076 for (;;) {
4077 ptr = trace_enum_jmp_to_tail(ptr);
4078 if (!ptr->tail.next)
4079 break;
4080 ptr = ptr->tail.next;
4081
4082 }
4083 ptr->tail.next = map_array;
4084 }
4085 map_array->head.mod = mod;
4086 map_array->head.length = len;
4087 map_array++;
4088
4089 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4090 map_array->map = **map;
4091 map_array++;
4092 }
4093 memset(map_array, 0, sizeof(*map_array));
4094
4095 mutex_unlock(&trace_enum_mutex);
4096}
4097
4098static void trace_create_enum_file(struct dentry *d_tracer)
4099{
4100 trace_create_file("enum_map", 0444, d_tracer,
4101 NULL, &tracing_enum_map_fops);
4102}
4103
4104#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4105static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4106static inline void trace_insert_enum_map_file(struct module *mod,
4107 struct trace_enum_map **start, int len) { }
4108#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4109
4110static void trace_insert_enum_map(struct module *mod,
4111 struct trace_enum_map **start, int len)
4112{
4113 struct trace_enum_map **map;
4114
4115 if (len <= 0)
4116 return;
4117
4118 map = start;
4119
4120 trace_event_enum_update(map, len);
4121
4122 trace_insert_enum_map_file(mod, start, len);
4123}
4124
3920static ssize_t 4125static ssize_t
3921tracing_set_trace_read(struct file *filp, char __user *ubuf, 4126tracing_set_trace_read(struct file *filp, char __user *ubuf,
3922 size_t cnt, loff_t *ppos) 4127 size_t cnt, loff_t *ppos)
@@ -4114,9 +4319,24 @@ static void tracing_set_nop(struct trace_array *tr)
4114 tr->current_trace = &nop_trace; 4319 tr->current_trace = &nop_trace;
4115} 4320}
4116 4321
4117static int tracing_set_tracer(struct trace_array *tr, const char *buf) 4322static void update_tracer_options(struct trace_array *tr, struct tracer *t)
4118{ 4323{
4119 static struct trace_option_dentry *topts; 4324 static struct trace_option_dentry *topts;
4325
4326 /* Only enable if the directory has been created already. */
4327 if (!tr->dir)
4328 return;
4329
4330 /* Currently, only the top instance has options */
4331 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4332 return;
4333
4334 destroy_trace_option_files(topts);
4335 topts = create_trace_option_files(tr, t);
4336}
4337
4338static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4339{
4120 struct tracer *t; 4340 struct tracer *t;
4121#ifdef CONFIG_TRACER_MAX_TRACE 4341#ifdef CONFIG_TRACER_MAX_TRACE
4122 bool had_max_tr; 4342 bool had_max_tr;
@@ -4150,6 +4370,12 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4150 goto out; 4370 goto out;
4151 } 4371 }
4152 4372
4373 /* If trace pipe files are being read, we can't change the tracer */
4374 if (tr->current_trace->ref) {
4375 ret = -EBUSY;
4376 goto out;
4377 }
4378
4153 trace_branch_disable(); 4379 trace_branch_disable();
4154 4380
4155 tr->current_trace->enabled--; 4381 tr->current_trace->enabled--;
@@ -4175,11 +4401,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4175 free_snapshot(tr); 4401 free_snapshot(tr);
4176 } 4402 }
4177#endif 4403#endif
4178 /* Currently, only the top instance has options */ 4404 update_tracer_options(tr, t);
4179 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4180 destroy_trace_option_files(topts);
4181 topts = create_trace_option_files(tr, t);
4182 }
4183 4405
4184#ifdef CONFIG_TRACER_MAX_TRACE 4406#ifdef CONFIG_TRACER_MAX_TRACE
4185 if (t->use_max_tr && !had_max_tr) { 4407 if (t->use_max_tr && !had_max_tr) {
@@ -4238,10 +4460,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4238} 4460}
4239 4461
4240static ssize_t 4462static ssize_t
4241tracing_max_lat_read(struct file *filp, char __user *ubuf, 4463tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4242 size_t cnt, loff_t *ppos) 4464 size_t cnt, loff_t *ppos)
4243{ 4465{
4244 unsigned long *ptr = filp->private_data;
4245 char buf[64]; 4466 char buf[64];
4246 int r; 4467 int r;
4247 4468
@@ -4253,10 +4474,9 @@ tracing_max_lat_read(struct file *filp, char __user *ubuf,
4253} 4474}
4254 4475
4255static ssize_t 4476static ssize_t
4256tracing_max_lat_write(struct file *filp, const char __user *ubuf, 4477tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4257 size_t cnt, loff_t *ppos) 4478 size_t cnt, loff_t *ppos)
4258{ 4479{
4259 unsigned long *ptr = filp->private_data;
4260 unsigned long val; 4480 unsigned long val;
4261 int ret; 4481 int ret;
4262 4482
@@ -4269,6 +4489,52 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4269 return cnt; 4489 return cnt;
4270} 4490}
4271 4491
4492static ssize_t
4493tracing_thresh_read(struct file *filp, char __user *ubuf,
4494 size_t cnt, loff_t *ppos)
4495{
4496 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4497}
4498
4499static ssize_t
4500tracing_thresh_write(struct file *filp, const char __user *ubuf,
4501 size_t cnt, loff_t *ppos)
4502{
4503 struct trace_array *tr = filp->private_data;
4504 int ret;
4505
4506 mutex_lock(&trace_types_lock);
4507 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4508 if (ret < 0)
4509 goto out;
4510
4511 if (tr->current_trace->update_thresh) {
4512 ret = tr->current_trace->update_thresh(tr);
4513 if (ret < 0)
4514 goto out;
4515 }
4516
4517 ret = cnt;
4518out:
4519 mutex_unlock(&trace_types_lock);
4520
4521 return ret;
4522}
4523
4524static ssize_t
4525tracing_max_lat_read(struct file *filp, char __user *ubuf,
4526 size_t cnt, loff_t *ppos)
4527{
4528 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4529}
4530
4531static ssize_t
4532tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4533 size_t cnt, loff_t *ppos)
4534{
4535 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4536}
4537
4272static int tracing_open_pipe(struct inode *inode, struct file *filp) 4538static int tracing_open_pipe(struct inode *inode, struct file *filp)
4273{ 4539{
4274 struct trace_array *tr = inode->i_private; 4540 struct trace_array *tr = inode->i_private;
@@ -4291,16 +4557,8 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4291 goto out; 4557 goto out;
4292 } 4558 }
4293 4559
4294 /* 4560 trace_seq_init(&iter->seq);
4295 * We make a copy of the current tracer to avoid concurrent 4561 iter->trace = tr->current_trace;
4296 * changes on it while we are reading.
4297 */
4298 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4299 if (!iter->trace) {
4300 ret = -ENOMEM;
4301 goto fail;
4302 }
4303 *iter->trace = *tr->current_trace;
4304 4562
4305 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 4563 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4306 ret = -ENOMEM; 4564 ret = -ENOMEM;
@@ -4327,6 +4585,8 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4327 iter->trace->pipe_open(iter); 4585 iter->trace->pipe_open(iter);
4328 4586
4329 nonseekable_open(inode, filp); 4587 nonseekable_open(inode, filp);
4588
4589 tr->current_trace->ref++;
4330out: 4590out:
4331 mutex_unlock(&trace_types_lock); 4591 mutex_unlock(&trace_types_lock);
4332 return ret; 4592 return ret;
@@ -4346,6 +4606,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
4346 4606
4347 mutex_lock(&trace_types_lock); 4607 mutex_lock(&trace_types_lock);
4348 4608
4609 tr->current_trace->ref--;
4610
4349 if (iter->trace->pipe_close) 4611 if (iter->trace->pipe_close)
4350 iter->trace->pipe_close(iter); 4612 iter->trace->pipe_close(iter);
4351 4613
@@ -4353,7 +4615,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
4353 4615
4354 free_cpumask_var(iter->started); 4616 free_cpumask_var(iter->started);
4355 mutex_destroy(&iter->mutex); 4617 mutex_destroy(&iter->mutex);
4356 kfree(iter->trace);
4357 kfree(iter); 4618 kfree(iter);
4358 4619
4359 trace_array_put(tr); 4620 trace_array_put(tr);
@@ -4386,7 +4647,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4386 return trace_poll(iter, filp, poll_table); 4647 return trace_poll(iter, filp, poll_table);
4387} 4648}
4388 4649
4389/* Must be called with trace_types_lock mutex held. */ 4650/* Must be called with iter->mutex held. */
4390static int tracing_wait_pipe(struct file *filp) 4651static int tracing_wait_pipe(struct file *filp)
4391{ 4652{
4392 struct trace_iterator *iter = filp->private_data; 4653 struct trace_iterator *iter = filp->private_data;
@@ -4412,15 +4673,12 @@ static int tracing_wait_pipe(struct file *filp)
4412 4673
4413 mutex_unlock(&iter->mutex); 4674 mutex_unlock(&iter->mutex);
4414 4675
4415 ret = wait_on_pipe(iter); 4676 ret = wait_on_pipe(iter, false);
4416 4677
4417 mutex_lock(&iter->mutex); 4678 mutex_lock(&iter->mutex);
4418 4679
4419 if (ret) 4680 if (ret)
4420 return ret; 4681 return ret;
4421
4422 if (signal_pending(current))
4423 return -EINTR;
4424 } 4682 }
4425 4683
4426 return 1; 4684 return 1;
@@ -4434,7 +4692,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
4434 size_t cnt, loff_t *ppos) 4692 size_t cnt, loff_t *ppos)
4435{ 4693{
4436 struct trace_iterator *iter = filp->private_data; 4694 struct trace_iterator *iter = filp->private_data;
4437 struct trace_array *tr = iter->tr;
4438 ssize_t sret; 4695 ssize_t sret;
4439 4696
4440 /* return any leftover data */ 4697 /* return any leftover data */
@@ -4444,12 +4701,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
4444 4701
4445 trace_seq_init(&iter->seq); 4702 trace_seq_init(&iter->seq);
4446 4703
4447 /* copy the tracer to avoid using a global lock all around */
4448 mutex_lock(&trace_types_lock);
4449 if (unlikely(iter->trace->name != tr->current_trace->name))
4450 *iter->trace = *tr->current_trace;
4451 mutex_unlock(&trace_types_lock);
4452
4453 /* 4704 /*
4454 * Avoid more than one consumer on a single file descriptor 4705 * Avoid more than one consumer on a single file descriptor
4455 * This is just a matter of traces coherency, the ring buffer itself 4706 * This is just a matter of traces coherency, the ring buffer itself
@@ -4487,18 +4738,18 @@ waitagain:
4487 trace_access_lock(iter->cpu_file); 4738 trace_access_lock(iter->cpu_file);
4488 while (trace_find_next_entry_inc(iter) != NULL) { 4739 while (trace_find_next_entry_inc(iter) != NULL) {
4489 enum print_line_t ret; 4740 enum print_line_t ret;
4490 int len = iter->seq.len; 4741 int save_len = iter->seq.seq.len;
4491 4742
4492 ret = print_trace_line(iter); 4743 ret = print_trace_line(iter);
4493 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4744 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4494 /* don't print partial lines */ 4745 /* don't print partial lines */
4495 iter->seq.len = len; 4746 iter->seq.seq.len = save_len;
4496 break; 4747 break;
4497 } 4748 }
4498 if (ret != TRACE_TYPE_NO_CONSUME) 4749 if (ret != TRACE_TYPE_NO_CONSUME)
4499 trace_consume(iter); 4750 trace_consume(iter);
4500 4751
4501 if (iter->seq.len >= cnt) 4752 if (trace_seq_used(&iter->seq) >= cnt)
4502 break; 4753 break;
4503 4754
4504 /* 4755 /*
@@ -4514,7 +4765,7 @@ waitagain:
4514 4765
4515 /* Now copy what we have to the user */ 4766 /* Now copy what we have to the user */
4516 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4767 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4517 if (iter->seq.readpos >= iter->seq.len) 4768 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4518 trace_seq_init(&iter->seq); 4769 trace_seq_init(&iter->seq);
4519 4770
4520 /* 4771 /*
@@ -4548,20 +4799,33 @@ static size_t
4548tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 4799tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4549{ 4800{
4550 size_t count; 4801 size_t count;
4802 int save_len;
4551 int ret; 4803 int ret;
4552 4804
4553 /* Seq buffer is page-sized, exactly what we need. */ 4805 /* Seq buffer is page-sized, exactly what we need. */
4554 for (;;) { 4806 for (;;) {
4555 count = iter->seq.len; 4807 save_len = iter->seq.seq.len;
4556 ret = print_trace_line(iter); 4808 ret = print_trace_line(iter);
4557 count = iter->seq.len - count; 4809
4558 if (rem < count) { 4810 if (trace_seq_has_overflowed(&iter->seq)) {
4559 rem = 0; 4811 iter->seq.seq.len = save_len;
4560 iter->seq.len -= count;
4561 break; 4812 break;
4562 } 4813 }
4814
4815 /*
4816 * This should not be hit, because it should only
4817 * be set if the iter->seq overflowed. But check it
4818 * anyway to be safe.
4819 */
4563 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4820 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4564 iter->seq.len -= count; 4821 iter->seq.seq.len = save_len;
4822 break;
4823 }
4824
4825 count = trace_seq_used(&iter->seq) - save_len;
4826 if (rem < count) {
4827 rem = 0;
4828 iter->seq.seq.len = save_len;
4565 break; 4829 break;
4566 } 4830 }
4567 4831
@@ -4596,7 +4860,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4596 .ops = &tracing_pipe_buf_ops, 4860 .ops = &tracing_pipe_buf_ops,
4597 .spd_release = tracing_spd_release_pipe, 4861 .spd_release = tracing_spd_release_pipe,
4598 }; 4862 };
4599 struct trace_array *tr = iter->tr;
4600 ssize_t ret; 4863 ssize_t ret;
4601 size_t rem; 4864 size_t rem;
4602 unsigned int i; 4865 unsigned int i;
@@ -4604,12 +4867,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4604 if (splice_grow_spd(pipe, &spd)) 4867 if (splice_grow_spd(pipe, &spd))
4605 return -ENOMEM; 4868 return -ENOMEM;
4606 4869
4607 /* copy the tracer to avoid using a global lock all around */
4608 mutex_lock(&trace_types_lock);
4609 if (unlikely(iter->trace->name != tr->current_trace->name))
4610 *iter->trace = *tr->current_trace;
4611 mutex_unlock(&trace_types_lock);
4612
4613 mutex_lock(&iter->mutex); 4870 mutex_lock(&iter->mutex);
4614 4871
4615 if (iter->trace->splice_read) { 4872 if (iter->trace->splice_read) {
@@ -4642,13 +4899,13 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4642 /* Copy the data into the page, so we can start over. */ 4899 /* Copy the data into the page, so we can start over. */
4643 ret = trace_seq_to_buffer(&iter->seq, 4900 ret = trace_seq_to_buffer(&iter->seq,
4644 page_address(spd.pages[i]), 4901 page_address(spd.pages[i]),
4645 iter->seq.len); 4902 trace_seq_used(&iter->seq));
4646 if (ret < 0) { 4903 if (ret < 0) {
4647 __free_page(spd.pages[i]); 4904 __free_page(spd.pages[i]);
4648 break; 4905 break;
4649 } 4906 }
4650 spd.partial[i].offset = 0; 4907 spd.partial[i].offset = 0;
4651 spd.partial[i].len = iter->seq.len; 4908 spd.partial[i].len = trace_seq_used(&iter->seq);
4652 4909
4653 trace_seq_init(&iter->seq); 4910 trace_seq_init(&iter->seq);
4654 } 4911 }
@@ -4896,7 +5153,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
4896 *fpos += written; 5153 *fpos += written;
4897 5154
4898 out_unlock: 5155 out_unlock:
4899 for (i = 0; i < nr_pages; i++){ 5156 for (i = nr_pages - 1; i >= 0; i--) {
4900 kunmap_atomic(map_page[i]); 5157 kunmap_atomic(map_page[i]);
4901 put_page(pages[i]); 5158 put_page(pages[i]);
4902 } 5159 }
@@ -5170,6 +5427,13 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
5170#endif /* CONFIG_TRACER_SNAPSHOT */ 5427#endif /* CONFIG_TRACER_SNAPSHOT */
5171 5428
5172 5429
5430static const struct file_operations tracing_thresh_fops = {
5431 .open = tracing_open_generic,
5432 .read = tracing_thresh_read,
5433 .write = tracing_thresh_write,
5434 .llseek = generic_file_llseek,
5435};
5436
5173static const struct file_operations tracing_max_lat_fops = { 5437static const struct file_operations tracing_max_lat_fops = {
5174 .open = tracing_open_generic, 5438 .open = tracing_open_generic,
5175 .read = tracing_max_lat_read, 5439 .read = tracing_max_lat_read,
@@ -5278,6 +5542,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
5278 5542
5279 filp->private_data = info; 5543 filp->private_data = info;
5280 5544
5545 tr->current_trace->ref++;
5546
5281 mutex_unlock(&trace_types_lock); 5547 mutex_unlock(&trace_types_lock);
5282 5548
5283 ret = nonseekable_open(inode, filp); 5549 ret = nonseekable_open(inode, filp);
@@ -5308,21 +5574,16 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5308 if (!count) 5574 if (!count)
5309 return 0; 5575 return 0;
5310 5576
5311 mutex_lock(&trace_types_lock);
5312
5313#ifdef CONFIG_TRACER_MAX_TRACE 5577#ifdef CONFIG_TRACER_MAX_TRACE
5314 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 5578 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5315 size = -EBUSY; 5579 return -EBUSY;
5316 goto out_unlock;
5317 }
5318#endif 5580#endif
5319 5581
5320 if (!info->spare) 5582 if (!info->spare)
5321 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 5583 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5322 iter->cpu_file); 5584 iter->cpu_file);
5323 size = -ENOMEM;
5324 if (!info->spare) 5585 if (!info->spare)
5325 goto out_unlock; 5586 return -ENOMEM;
5326 5587
5327 /* Do we have previous read data to read? */ 5588 /* Do we have previous read data to read? */
5328 if (info->read < PAGE_SIZE) 5589 if (info->read < PAGE_SIZE)
@@ -5338,25 +5599,16 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5338 5599
5339 if (ret < 0) { 5600 if (ret < 0) {
5340 if (trace_empty(iter)) { 5601 if (trace_empty(iter)) {
5341 if ((filp->f_flags & O_NONBLOCK)) { 5602 if ((filp->f_flags & O_NONBLOCK))
5342 size = -EAGAIN; 5603 return -EAGAIN;
5343 goto out_unlock; 5604
5344 } 5605 ret = wait_on_pipe(iter, false);
5345 mutex_unlock(&trace_types_lock); 5606 if (ret)
5346 ret = wait_on_pipe(iter); 5607 return ret;
5347 mutex_lock(&trace_types_lock); 5608
5348 if (ret) {
5349 size = ret;
5350 goto out_unlock;
5351 }
5352 if (signal_pending(current)) {
5353 size = -EINTR;
5354 goto out_unlock;
5355 }
5356 goto again; 5609 goto again;
5357 } 5610 }
5358 size = 0; 5611 return 0;
5359 goto out_unlock;
5360 } 5612 }
5361 5613
5362 info->read = 0; 5614 info->read = 0;
@@ -5366,18 +5618,14 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5366 size = count; 5618 size = count;
5367 5619
5368 ret = copy_to_user(ubuf, info->spare + info->read, size); 5620 ret = copy_to_user(ubuf, info->spare + info->read, size);
5369 if (ret == size) { 5621 if (ret == size)
5370 size = -EFAULT; 5622 return -EFAULT;
5371 goto out_unlock; 5623
5372 }
5373 size -= ret; 5624 size -= ret;
5374 5625
5375 *ppos += size; 5626 *ppos += size;
5376 info->read += size; 5627 info->read += size;
5377 5628
5378 out_unlock:
5379 mutex_unlock(&trace_types_lock);
5380
5381 return size; 5629 return size;
5382} 5630}
5383 5631
@@ -5388,6 +5636,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
5388 5636
5389 mutex_lock(&trace_types_lock); 5637 mutex_lock(&trace_types_lock);
5390 5638
5639 iter->tr->current_trace->ref--;
5640
5391 __trace_array_put(iter->tr); 5641 __trace_array_put(iter->tr);
5392 5642
5393 if (info->spare) 5643 if (info->spare)
@@ -5471,32 +5721,22 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5471 }; 5721 };
5472 struct buffer_ref *ref; 5722 struct buffer_ref *ref;
5473 int entries, size, i; 5723 int entries, size, i;
5474 ssize_t ret; 5724 ssize_t ret = 0;
5475
5476 mutex_lock(&trace_types_lock);
5477 5725
5478#ifdef CONFIG_TRACER_MAX_TRACE 5726#ifdef CONFIG_TRACER_MAX_TRACE
5479 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 5727 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5480 ret = -EBUSY; 5728 return -EBUSY;
5481 goto out;
5482 }
5483#endif 5729#endif
5484 5730
5485 if (splice_grow_spd(pipe, &spd)) { 5731 if (splice_grow_spd(pipe, &spd))
5486 ret = -ENOMEM; 5732 return -ENOMEM;
5487 goto out;
5488 }
5489 5733
5490 if (*ppos & (PAGE_SIZE - 1)) { 5734 if (*ppos & (PAGE_SIZE - 1))
5491 ret = -EINVAL; 5735 return -EINVAL;
5492 goto out;
5493 }
5494 5736
5495 if (len & (PAGE_SIZE - 1)) { 5737 if (len & (PAGE_SIZE - 1)) {
5496 if (len < PAGE_SIZE) { 5738 if (len < PAGE_SIZE)
5497 ret = -EINVAL; 5739 return -EINVAL;
5498 goto out;
5499 }
5500 len &= PAGE_MASK; 5740 len &= PAGE_MASK;
5501 } 5741 }
5502 5742
@@ -5509,13 +5749,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5509 int r; 5749 int r;
5510 5750
5511 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 5751 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5512 if (!ref) 5752 if (!ref) {
5753 ret = -ENOMEM;
5513 break; 5754 break;
5755 }
5514 5756
5515 ref->ref = 1; 5757 ref->ref = 1;
5516 ref->buffer = iter->trace_buffer->buffer; 5758 ref->buffer = iter->trace_buffer->buffer;
5517 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 5759 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5518 if (!ref->page) { 5760 if (!ref->page) {
5761 ret = -ENOMEM;
5519 kfree(ref); 5762 kfree(ref);
5520 break; 5763 break;
5521 } 5764 }
@@ -5553,26 +5796,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5553 5796
5554 /* did we read anything? */ 5797 /* did we read anything? */
5555 if (!spd.nr_pages) { 5798 if (!spd.nr_pages) {
5556 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5557 ret = -EAGAIN;
5558 goto out;
5559 }
5560 mutex_unlock(&trace_types_lock);
5561 ret = wait_on_pipe(iter);
5562 mutex_lock(&trace_types_lock);
5563 if (ret) 5799 if (ret)
5564 goto out; 5800 return ret;
5565 if (signal_pending(current)) { 5801
5566 ret = -EINTR; 5802 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5567 goto out; 5803 return -EAGAIN;
5568 } 5804
5805 ret = wait_on_pipe(iter, true);
5806 if (ret)
5807 return ret;
5808
5569 goto again; 5809 goto again;
5570 } 5810 }
5571 5811
5572 ret = splice_to_pipe(pipe, &spd); 5812 ret = splice_to_pipe(pipe, &spd);
5573 splice_shrink_spd(&spd); 5813 splice_shrink_spd(&spd);
5574out:
5575 mutex_unlock(&trace_types_lock);
5576 5814
5577 return ret; 5815 return ret;
5578} 5816}
@@ -5642,7 +5880,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
5642 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 5880 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5643 trace_seq_printf(s, "read events: %ld\n", cnt); 5881 trace_seq_printf(s, "read events: %ld\n", cnt);
5644 5882
5645 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 5883 count = simple_read_from_buffer(ubuf, count, ppos,
5884 s->buffer, trace_seq_used(s));
5646 5885
5647 kfree(s); 5886 kfree(s);
5648 5887
@@ -5723,10 +5962,10 @@ ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5723 5962
5724 seq_printf(m, "%ps:", (void *)ip); 5963 seq_printf(m, "%ps:", (void *)ip);
5725 5964
5726 seq_printf(m, "snapshot"); 5965 seq_puts(m, "snapshot");
5727 5966
5728 if (count == -1) 5967 if (count == -1)
5729 seq_printf(m, ":unlimited\n"); 5968 seq_puts(m, ":unlimited\n");
5730 else 5969 else
5731 seq_printf(m, ":count=%ld\n", count); 5970 seq_printf(m, ":count=%ld\n", count);
5732 5971
@@ -5801,28 +6040,19 @@ static __init int register_snapshot_cmd(void)
5801static inline __init int register_snapshot_cmd(void) { return 0; } 6040static inline __init int register_snapshot_cmd(void) { return 0; }
5802#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 6041#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5803 6042
5804struct dentry *tracing_init_dentry_tr(struct trace_array *tr) 6043static struct dentry *tracing_get_dentry(struct trace_array *tr)
5805{ 6044{
5806 if (tr->dir) 6045 if (WARN_ON(!tr->dir))
5807 return tr->dir; 6046 return ERR_PTR(-ENODEV);
5808
5809 if (!debugfs_initialized())
5810 return NULL;
5811 6047
6048 /* Top directory uses NULL as the parent */
5812 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 6049 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5813 tr->dir = debugfs_create_dir("tracing", NULL); 6050 return NULL;
5814
5815 if (!tr->dir)
5816 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5817 6051
6052 /* All sub buffers have a descriptor */
5818 return tr->dir; 6053 return tr->dir;
5819} 6054}
5820 6055
5821struct dentry *tracing_init_dentry(void)
5822{
5823 return tracing_init_dentry_tr(&global_trace);
5824}
5825
5826static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 6056static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5827{ 6057{
5828 struct dentry *d_tracer; 6058 struct dentry *d_tracer;
@@ -5830,14 +6060,14 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5830 if (tr->percpu_dir) 6060 if (tr->percpu_dir)
5831 return tr->percpu_dir; 6061 return tr->percpu_dir;
5832 6062
5833 d_tracer = tracing_init_dentry_tr(tr); 6063 d_tracer = tracing_get_dentry(tr);
5834 if (!d_tracer) 6064 if (IS_ERR(d_tracer))
5835 return NULL; 6065 return NULL;
5836 6066
5837 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer); 6067 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
5838 6068
5839 WARN_ONCE(!tr->percpu_dir, 6069 WARN_ONCE(!tr->percpu_dir,
5840 "Could not create debugfs directory 'per_cpu/%d'\n", cpu); 6070 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
5841 6071
5842 return tr->percpu_dir; 6072 return tr->percpu_dir;
5843} 6073}
@@ -5854,7 +6084,7 @@ trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5854} 6084}
5855 6085
5856static void 6086static void
5857tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 6087tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
5858{ 6088{
5859 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 6089 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5860 struct dentry *d_cpu; 6090 struct dentry *d_cpu;
@@ -5864,9 +6094,9 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5864 return; 6094 return;
5865 6095
5866 snprintf(cpu_dir, 30, "cpu%ld", cpu); 6096 snprintf(cpu_dir, 30, "cpu%ld", cpu);
5867 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 6097 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
5868 if (!d_cpu) { 6098 if (!d_cpu) {
5869 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 6099 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
5870 return; 6100 return;
5871 } 6101 }
5872 6102
@@ -6018,9 +6248,9 @@ struct dentry *trace_create_file(const char *name,
6018{ 6248{
6019 struct dentry *ret; 6249 struct dentry *ret;
6020 6250
6021 ret = debugfs_create_file(name, mode, parent, data, fops); 6251 ret = tracefs_create_file(name, mode, parent, data, fops);
6022 if (!ret) 6252 if (!ret)
6023 pr_warning("Could not create debugfs '%s' entry\n", name); 6253 pr_warning("Could not create tracefs '%s' entry\n", name);
6024 6254
6025 return ret; 6255 return ret;
6026} 6256}
@@ -6033,13 +6263,13 @@ static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6033 if (tr->options) 6263 if (tr->options)
6034 return tr->options; 6264 return tr->options;
6035 6265
6036 d_tracer = tracing_init_dentry_tr(tr); 6266 d_tracer = tracing_get_dentry(tr);
6037 if (!d_tracer) 6267 if (IS_ERR(d_tracer))
6038 return NULL; 6268 return NULL;
6039 6269
6040 tr->options = debugfs_create_dir("options", d_tracer); 6270 tr->options = tracefs_create_dir("options", d_tracer);
6041 if (!tr->options) { 6271 if (!tr->options) {
6042 pr_warning("Could not create debugfs directory 'options'\n"); 6272 pr_warning("Could not create tracefs directory 'options'\n");
6043 return NULL; 6273 return NULL;
6044 } 6274 }
6045 6275
@@ -6107,10 +6337,8 @@ destroy_trace_option_files(struct trace_option_dentry *topts)
6107 if (!topts) 6337 if (!topts)
6108 return; 6338 return;
6109 6339
6110 for (cnt = 0; topts[cnt].opt; cnt++) { 6340 for (cnt = 0; topts[cnt].opt; cnt++)
6111 if (topts[cnt].entry) 6341 tracefs_remove(topts[cnt].entry);
6112 debugfs_remove(topts[cnt].entry);
6113 }
6114 6342
6115 kfree(topts); 6343 kfree(topts);
6116} 6344}
@@ -6199,7 +6427,7 @@ static const struct file_operations rb_simple_fops = {
6199struct dentry *trace_instance_dir; 6427struct dentry *trace_instance_dir;
6200 6428
6201static void 6429static void
6202init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); 6430init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6203 6431
6204static int 6432static int
6205allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) 6433allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
@@ -6276,7 +6504,7 @@ static void free_trace_buffers(struct trace_array *tr)
6276#endif 6504#endif
6277} 6505}
6278 6506
6279static int new_instance_create(const char *name) 6507static int instance_mkdir(const char *name)
6280{ 6508{
6281 struct trace_array *tr; 6509 struct trace_array *tr;
6282 int ret; 6510 int ret;
@@ -6315,17 +6543,17 @@ static int new_instance_create(const char *name)
6315 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 6543 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6316 goto out_free_tr; 6544 goto out_free_tr;
6317 6545
6318 tr->dir = debugfs_create_dir(name, trace_instance_dir); 6546 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6319 if (!tr->dir) 6547 if (!tr->dir)
6320 goto out_free_tr; 6548 goto out_free_tr;
6321 6549
6322 ret = event_trace_add_tracer(tr->dir, tr); 6550 ret = event_trace_add_tracer(tr->dir, tr);
6323 if (ret) { 6551 if (ret) {
6324 debugfs_remove_recursive(tr->dir); 6552 tracefs_remove_recursive(tr->dir);
6325 goto out_free_tr; 6553 goto out_free_tr;
6326 } 6554 }
6327 6555
6328 init_tracer_debugfs(tr, tr->dir); 6556 init_tracer_tracefs(tr, tr->dir);
6329 6557
6330 list_add(&tr->list, &ftrace_trace_arrays); 6558 list_add(&tr->list, &ftrace_trace_arrays);
6331 6559
@@ -6346,7 +6574,7 @@ static int new_instance_create(const char *name)
6346 6574
6347} 6575}
6348 6576
6349static int instance_delete(const char *name) 6577static int instance_rmdir(const char *name)
6350{ 6578{
6351 struct trace_array *tr; 6579 struct trace_array *tr;
6352 int found = 0; 6580 int found = 0;
@@ -6365,7 +6593,7 @@ static int instance_delete(const char *name)
6365 goto out_unlock; 6593 goto out_unlock;
6366 6594
6367 ret = -EBUSY; 6595 ret = -EBUSY;
6368 if (tr->ref) 6596 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6369 goto out_unlock; 6597 goto out_unlock;
6370 6598
6371 list_del(&tr->list); 6599 list_del(&tr->list);
@@ -6387,82 +6615,17 @@ static int instance_delete(const char *name)
6387 return ret; 6615 return ret;
6388} 6616}
6389 6617
6390static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6391{
6392 struct dentry *parent;
6393 int ret;
6394
6395 /* Paranoid: Make sure the parent is the "instances" directory */
6396 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6397 if (WARN_ON_ONCE(parent != trace_instance_dir))
6398 return -ENOENT;
6399
6400 /*
6401 * The inode mutex is locked, but debugfs_create_dir() will also
6402 * take the mutex. As the instances directory can not be destroyed
6403 * or changed in any other way, it is safe to unlock it, and
6404 * let the dentry try. If two users try to make the same dir at
6405 * the same time, then the new_instance_create() will determine the
6406 * winner.
6407 */
6408 mutex_unlock(&inode->i_mutex);
6409
6410 ret = new_instance_create(dentry->d_iname);
6411
6412 mutex_lock(&inode->i_mutex);
6413
6414 return ret;
6415}
6416
6417static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6418{
6419 struct dentry *parent;
6420 int ret;
6421
6422 /* Paranoid: Make sure the parent is the "instances" directory */
6423 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6424 if (WARN_ON_ONCE(parent != trace_instance_dir))
6425 return -ENOENT;
6426
6427 /* The caller did a dget() on dentry */
6428 mutex_unlock(&dentry->d_inode->i_mutex);
6429
6430 /*
6431 * The inode mutex is locked, but debugfs_create_dir() will also
6432 * take the mutex. As the instances directory can not be destroyed
6433 * or changed in any other way, it is safe to unlock it, and
6434 * let the dentry try. If two users try to make the same dir at
6435 * the same time, then the instance_delete() will determine the
6436 * winner.
6437 */
6438 mutex_unlock(&inode->i_mutex);
6439
6440 ret = instance_delete(dentry->d_iname);
6441
6442 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6443 mutex_lock(&dentry->d_inode->i_mutex);
6444
6445 return ret;
6446}
6447
6448static const struct inode_operations instance_dir_inode_operations = {
6449 .lookup = simple_lookup,
6450 .mkdir = instance_mkdir,
6451 .rmdir = instance_rmdir,
6452};
6453
6454static __init void create_trace_instances(struct dentry *d_tracer) 6618static __init void create_trace_instances(struct dentry *d_tracer)
6455{ 6619{
6456 trace_instance_dir = debugfs_create_dir("instances", d_tracer); 6620 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6621 instance_mkdir,
6622 instance_rmdir);
6457 if (WARN_ON(!trace_instance_dir)) 6623 if (WARN_ON(!trace_instance_dir))
6458 return; 6624 return;
6459
6460 /* Hijack the dir inode operations, to allow mkdir */
6461 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6462} 6625}
6463 6626
6464static void 6627static void
6465init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) 6628init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6466{ 6629{
6467 int cpu; 6630 int cpu;
6468 6631
@@ -6516,24 +6679,162 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6516#endif 6679#endif
6517 6680
6518 for_each_tracing_cpu(cpu) 6681 for_each_tracing_cpu(cpu)
6519 tracing_init_debugfs_percpu(tr, cpu); 6682 tracing_init_tracefs_percpu(tr, cpu);
6683
6684}
6685
6686static struct vfsmount *trace_automount(void *ingore)
6687{
6688 struct vfsmount *mnt;
6689 struct file_system_type *type;
6690
6691 /*
6692 * To maintain backward compatibility for tools that mount
6693 * debugfs to get to the tracing facility, tracefs is automatically
6694 * mounted to the debugfs/tracing directory.
6695 */
6696 type = get_fs_type("tracefs");
6697 if (!type)
6698 return NULL;
6699 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6700 put_filesystem(type);
6701 if (IS_ERR(mnt))
6702 return NULL;
6703 mntget(mnt);
6704
6705 return mnt;
6706}
6707
6708/**
6709 * tracing_init_dentry - initialize top level trace array
6710 *
6711 * This is called when creating files or directories in the tracing
6712 * directory. It is called via fs_initcall() by any of the boot up code
6713 * and expects to return the dentry of the top level tracing directory.
6714 */
6715struct dentry *tracing_init_dentry(void)
6716{
6717 struct trace_array *tr = &global_trace;
6718
6719 /* The top level trace array uses NULL as parent */
6720 if (tr->dir)
6721 return NULL;
6722
6723 if (WARN_ON(!debugfs_initialized()))
6724 return ERR_PTR(-ENODEV);
6725
6726 /*
6727 * As there may still be users that expect the tracing
6728 * files to exist in debugfs/tracing, we must automount
6729 * the tracefs file system there, so older tools still
6730 * work with the newer kerenl.
6731 */
6732 tr->dir = debugfs_create_automount("tracing", NULL,
6733 trace_automount, NULL);
6734 if (!tr->dir) {
6735 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6736 return ERR_PTR(-ENOMEM);
6737 }
6738
6739 return NULL;
6740}
6520 6741
6742extern struct trace_enum_map *__start_ftrace_enum_maps[];
6743extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6744
6745static void __init trace_enum_init(void)
6746{
6747 int len;
6748
6749 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6750 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6751}
6752
6753#ifdef CONFIG_MODULES
6754static void trace_module_add_enums(struct module *mod)
6755{
6756 if (!mod->num_trace_enums)
6757 return;
6758
6759 /*
6760 * Modules with bad taint do not have events created, do
6761 * not bother with enums either.
6762 */
6763 if (trace_module_has_bad_taint(mod))
6764 return;
6765
6766 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6767}
6768
6769#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6770static void trace_module_remove_enums(struct module *mod)
6771{
6772 union trace_enum_map_item *map;
6773 union trace_enum_map_item **last = &trace_enum_maps;
6774
6775 if (!mod->num_trace_enums)
6776 return;
6777
6778 mutex_lock(&trace_enum_mutex);
6779
6780 map = trace_enum_maps;
6781
6782 while (map) {
6783 if (map->head.mod == mod)
6784 break;
6785 map = trace_enum_jmp_to_tail(map);
6786 last = &map->tail.next;
6787 map = map->tail.next;
6788 }
6789 if (!map)
6790 goto out;
6791
6792 *last = trace_enum_jmp_to_tail(map)->tail.next;
6793 kfree(map);
6794 out:
6795 mutex_unlock(&trace_enum_mutex);
6521} 6796}
6797#else
6798static inline void trace_module_remove_enums(struct module *mod) { }
6799#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6800
6801static int trace_module_notify(struct notifier_block *self,
6802 unsigned long val, void *data)
6803{
6804 struct module *mod = data;
6805
6806 switch (val) {
6807 case MODULE_STATE_COMING:
6808 trace_module_add_enums(mod);
6809 break;
6810 case MODULE_STATE_GOING:
6811 trace_module_remove_enums(mod);
6812 break;
6813 }
6814
6815 return 0;
6816}
6817
6818static struct notifier_block trace_module_nb = {
6819 .notifier_call = trace_module_notify,
6820 .priority = 0,
6821};
6822#endif /* CONFIG_MODULES */
6522 6823
6523static __init int tracer_init_debugfs(void) 6824static __init int tracer_init_tracefs(void)
6524{ 6825{
6525 struct dentry *d_tracer; 6826 struct dentry *d_tracer;
6526 6827
6527 trace_access_lock_init(); 6828 trace_access_lock_init();
6528 6829
6529 d_tracer = tracing_init_dentry(); 6830 d_tracer = tracing_init_dentry();
6530 if (!d_tracer) 6831 if (IS_ERR(d_tracer))
6531 return 0; 6832 return 0;
6532 6833
6533 init_tracer_debugfs(&global_trace, d_tracer); 6834 init_tracer_tracefs(&global_trace, d_tracer);
6534 6835
6535 trace_create_file("tracing_thresh", 0644, d_tracer, 6836 trace_create_file("tracing_thresh", 0644, d_tracer,
6536 &tracing_thresh, &tracing_max_lat_fops); 6837 &global_trace, &tracing_thresh_fops);
6537 6838
6538 trace_create_file("README", 0444, d_tracer, 6839 trace_create_file("README", 0444, d_tracer,
6539 NULL, &tracing_readme_fops); 6840 NULL, &tracing_readme_fops);
@@ -6544,6 +6845,14 @@ static __init int tracer_init_debugfs(void)
6544 trace_create_file("saved_cmdlines_size", 0644, d_tracer, 6845 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6545 NULL, &tracing_saved_cmdlines_size_fops); 6846 NULL, &tracing_saved_cmdlines_size_fops);
6546 6847
6848 trace_enum_init();
6849
6850 trace_create_enum_file(d_tracer);
6851
6852#ifdef CONFIG_MODULES
6853 register_module_notifier(&trace_module_nb);
6854#endif
6855
6547#ifdef CONFIG_DYNAMIC_FTRACE 6856#ifdef CONFIG_DYNAMIC_FTRACE
6548 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 6857 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6549 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 6858 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -6553,6 +6862,10 @@ static __init int tracer_init_debugfs(void)
6553 6862
6554 create_trace_options_dir(&global_trace); 6863 create_trace_options_dir(&global_trace);
6555 6864
6865 /* If the tracer was started via cmdline, create options for it here */
6866 if (global_trace.current_trace != &nop_trace)
6867 update_tracer_options(&global_trace, global_trace.current_trace);
6868
6556 return 0; 6869 return 0;
6557} 6870}
6558 6871
@@ -6607,11 +6920,19 @@ void
6607trace_printk_seq(struct trace_seq *s) 6920trace_printk_seq(struct trace_seq *s)
6608{ 6921{
6609 /* Probably should print a warning here. */ 6922 /* Probably should print a warning here. */
6610 if (s->len >= TRACE_MAX_PRINT) 6923 if (s->seq.len >= TRACE_MAX_PRINT)
6611 s->len = TRACE_MAX_PRINT; 6924 s->seq.len = TRACE_MAX_PRINT;
6925
6926 /*
6927 * More paranoid code. Although the buffer size is set to
6928 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6929 * an extra layer of protection.
6930 */
6931 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6932 s->seq.len = s->seq.size - 1;
6612 6933
6613 /* should be zero ended, but we are paranoid. */ 6934 /* should be zero ended, but we are paranoid. */
6614 s->buffer[s->len] = 0; 6935 s->buffer[s->seq.len] = 0;
6615 6936
6616 printk(KERN_TRACE "%s", s->buffer); 6937 printk(KERN_TRACE "%s", s->buffer);
6617 6938
@@ -6752,7 +7073,6 @@ __init static int tracer_alloc_buffers(void)
6752 int ring_buf_size; 7073 int ring_buf_size;
6753 int ret = -ENOMEM; 7074 int ret = -ENOMEM;
6754 7075
6755
6756 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 7076 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6757 goto out; 7077 goto out;
6758 7078
@@ -6850,6 +7170,18 @@ out:
6850 return ret; 7170 return ret;
6851} 7171}
6852 7172
7173void __init trace_init(void)
7174{
7175 if (tracepoint_printk) {
7176 tracepoint_print_iter =
7177 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7178 if (WARN_ON(!tracepoint_print_iter))
7179 tracepoint_printk = 0;
7180 }
7181 tracer_alloc_buffers();
7182 trace_event_init();
7183}
7184
6853__init static int clear_boot_tracer(void) 7185__init static int clear_boot_tracer(void)
6854{ 7186{
6855 /* 7187 /*
@@ -6869,6 +7201,5 @@ __init static int clear_boot_tracer(void)
6869 return 0; 7201 return 0;
6870} 7202}
6871 7203
6872early_initcall(tracer_alloc_buffers); 7204fs_initcall(tracer_init_tracefs);
6873fs_initcall(tracer_init_debugfs);
6874late_initcall(clear_boot_tracer); 7205late_initcall(clear_boot_tracer);