diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Kconfig | 36 | ||||
| -rw-r--r-- | kernel/trace/Makefile | 1 | ||||
| -rw-r--r-- | kernel/trace/bpf_trace.c | 222 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 44 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 491 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_entries.h | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 153 | ||||
| -rw-r--r-- | kernel/trace/trace_export.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 7 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 25 | ||||
| -rw-r--r-- | kernel/trace/trace_probe.c | 19 | ||||
| -rw-r--r-- | kernel/trace/trace_probe.h | 12 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_stat.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 15 |
17 files changed, 866 insertions, 195 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a5da09c899dd..3b9a48ae153a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -432,6 +432,14 @@ config UPROBE_EVENT | |||
| 432 | This option is required if you plan to use perf-probe subcommand | 432 | This option is required if you plan to use perf-probe subcommand |
| 433 | of perf tools on user space applications. | 433 | of perf tools on user space applications. |
| 434 | 434 | ||
| 435 | config BPF_EVENTS | ||
| 436 | depends on BPF_SYSCALL | ||
| 437 | depends on KPROBE_EVENT | ||
| 438 | bool | ||
| 439 | default y | ||
| 440 | help | ||
| 441 | This allows the user to attach BPF programs to kprobe events. | ||
| 442 | |||
| 435 | config PROBE_EVENTS | 443 | config PROBE_EVENTS |
| 436 | def_bool n | 444 | def_bool n |
| 437 | 445 | ||
| @@ -599,6 +607,34 @@ config RING_BUFFER_STARTUP_TEST | |||
| 599 | 607 | ||
| 600 | If unsure, say N | 608 | If unsure, say N |
| 601 | 609 | ||
| 610 | config TRACE_ENUM_MAP_FILE | ||
| 611 | bool "Show enum mappings for trace events" | ||
| 612 | depends on TRACING | ||
| 613 | help | ||
| 614 | The "print fmt" of the trace events will show the enum names instead | ||
| 615 | of their values. This can cause problems for user space tools that | ||
| 616 | use this string to parse the raw data as user space does not know | ||
| 617 | how to convert the string to its value. | ||
| 618 | |||
| 619 | To fix this, there's a special macro in the kernel that can be used | ||
| 620 | to convert the enum into its value. If this macro is used, then the | ||
| 621 | print fmt strings will have the enums converted to their values. | ||
| 622 | |||
| 623 | If something does not get converted properly, this option can be | ||
| 624 | used to show what enums the kernel tried to convert. | ||
| 625 | |||
| 626 | This option is for debugging the enum conversions. A file is created | ||
| 627 | in the tracing directory called "enum_map" that will show the enum | ||
| 628 | names matched with their values and what trace event system they | ||
| 629 | belong too. | ||
| 630 | |||
| 631 | Normally, the mapping of the strings to values will be freed after | ||
| 632 | boot up or module load. With this option, they will not be freed, as | ||
| 633 | they are needed for the "enum_map" file. Enabling this option will | ||
| 634 | increase the memory footprint of the running kernel. | ||
| 635 | |||
| 636 | If unsure, say N | ||
| 637 | |||
| 602 | endif # FTRACE | 638 | endif # FTRACE |
| 603 | 639 | ||
| 604 | endif # TRACING_SUPPORT | 640 | endif # TRACING_SUPPORT |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 98f26588255e..9b1044e936a6 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -53,6 +53,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o | |||
| 53 | endif | 53 | endif |
| 54 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 54 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
| 55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o |
| 56 | obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o | ||
| 56 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 57 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
| 57 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o | 58 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o |
| 58 | ifeq ($(CONFIG_PM),y) | 59 | ifeq ($(CONFIG_PM),y) |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c new file mode 100644 index 000000000000..2d56ce501632 --- /dev/null +++ b/kernel/trace/bpf_trace.c | |||
| @@ -0,0 +1,222 @@ | |||
| 1 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com | ||
| 2 | * | ||
| 3 | * This program is free software; you can redistribute it and/or | ||
| 4 | * modify it under the terms of version 2 of the GNU General Public | ||
| 5 | * License as published by the Free Software Foundation. | ||
| 6 | */ | ||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/types.h> | ||
| 9 | #include <linux/slab.h> | ||
| 10 | #include <linux/bpf.h> | ||
| 11 | #include <linux/filter.h> | ||
| 12 | #include <linux/uaccess.h> | ||
| 13 | #include <linux/ctype.h> | ||
| 14 | #include "trace.h" | ||
| 15 | |||
| 16 | static DEFINE_PER_CPU(int, bpf_prog_active); | ||
| 17 | |||
| 18 | /** | ||
| 19 | * trace_call_bpf - invoke BPF program | ||
| 20 | * @prog: BPF program | ||
| 21 | * @ctx: opaque context pointer | ||
| 22 | * | ||
| 23 | * kprobe handlers execute BPF programs via this helper. | ||
| 24 | * Can be used from static tracepoints in the future. | ||
| 25 | * | ||
| 26 | * Return: BPF programs always return an integer which is interpreted by | ||
| 27 | * kprobe handler as: | ||
| 28 | * 0 - return from kprobe (event is filtered out) | ||
| 29 | * 1 - store kprobe event into ring buffer | ||
| 30 | * Other values are reserved and currently alias to 1 | ||
| 31 | */ | ||
| 32 | unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) | ||
| 33 | { | ||
| 34 | unsigned int ret; | ||
| 35 | |||
| 36 | if (in_nmi()) /* not supported yet */ | ||
| 37 | return 1; | ||
| 38 | |||
| 39 | preempt_disable(); | ||
| 40 | |||
| 41 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | ||
| 42 | /* | ||
| 43 | * since some bpf program is already running on this cpu, | ||
| 44 | * don't call into another bpf program (same or different) | ||
| 45 | * and don't send kprobe event into ring-buffer, | ||
| 46 | * so return zero here | ||
| 47 | */ | ||
| 48 | ret = 0; | ||
| 49 | goto out; | ||
| 50 | } | ||
| 51 | |||
| 52 | rcu_read_lock(); | ||
| 53 | ret = BPF_PROG_RUN(prog, ctx); | ||
| 54 | rcu_read_unlock(); | ||
| 55 | |||
| 56 | out: | ||
| 57 | __this_cpu_dec(bpf_prog_active); | ||
| 58 | preempt_enable(); | ||
| 59 | |||
| 60 | return ret; | ||
| 61 | } | ||
| 62 | EXPORT_SYMBOL_GPL(trace_call_bpf); | ||
| 63 | |||
| 64 | static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | ||
| 65 | { | ||
| 66 | void *dst = (void *) (long) r1; | ||
| 67 | int size = (int) r2; | ||
| 68 | void *unsafe_ptr = (void *) (long) r3; | ||
| 69 | |||
| 70 | return probe_kernel_read(dst, unsafe_ptr, size); | ||
| 71 | } | ||
| 72 | |||
| 73 | static const struct bpf_func_proto bpf_probe_read_proto = { | ||
| 74 | .func = bpf_probe_read, | ||
| 75 | .gpl_only = true, | ||
| 76 | .ret_type = RET_INTEGER, | ||
| 77 | .arg1_type = ARG_PTR_TO_STACK, | ||
| 78 | .arg2_type = ARG_CONST_STACK_SIZE, | ||
| 79 | .arg3_type = ARG_ANYTHING, | ||
| 80 | }; | ||
| 81 | |||
| 82 | static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | ||
| 83 | { | ||
| 84 | /* NMI safe access to clock monotonic */ | ||
| 85 | return ktime_get_mono_fast_ns(); | ||
| 86 | } | ||
| 87 | |||
| 88 | static const struct bpf_func_proto bpf_ktime_get_ns_proto = { | ||
| 89 | .func = bpf_ktime_get_ns, | ||
| 90 | .gpl_only = true, | ||
| 91 | .ret_type = RET_INTEGER, | ||
| 92 | }; | ||
| 93 | |||
| 94 | /* | ||
| 95 | * limited trace_printk() | ||
| 96 | * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed | ||
| 97 | */ | ||
| 98 | static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5) | ||
| 99 | { | ||
| 100 | char *fmt = (char *) (long) r1; | ||
| 101 | int mod[3] = {}; | ||
| 102 | int fmt_cnt = 0; | ||
| 103 | int i; | ||
| 104 | |||
| 105 | /* | ||
| 106 | * bpf_check()->check_func_arg()->check_stack_boundary() | ||
| 107 | * guarantees that fmt points to bpf program stack, | ||
| 108 | * fmt_size bytes of it were initialized and fmt_size > 0 | ||
| 109 | */ | ||
| 110 | if (fmt[--fmt_size] != 0) | ||
| 111 | return -EINVAL; | ||
| 112 | |||
| 113 | /* check format string for allowed specifiers */ | ||
| 114 | for (i = 0; i < fmt_size; i++) { | ||
| 115 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) | ||
| 116 | return -EINVAL; | ||
| 117 | |||
| 118 | if (fmt[i] != '%') | ||
| 119 | continue; | ||
| 120 | |||
| 121 | if (fmt_cnt >= 3) | ||
| 122 | return -EINVAL; | ||
| 123 | |||
| 124 | /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ | ||
| 125 | i++; | ||
| 126 | if (fmt[i] == 'l') { | ||
| 127 | mod[fmt_cnt]++; | ||
| 128 | i++; | ||
| 129 | } else if (fmt[i] == 'p') { | ||
| 130 | mod[fmt_cnt]++; | ||
| 131 | i++; | ||
| 132 | if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) | ||
| 133 | return -EINVAL; | ||
| 134 | fmt_cnt++; | ||
| 135 | continue; | ||
| 136 | } | ||
| 137 | |||
| 138 | if (fmt[i] == 'l') { | ||
| 139 | mod[fmt_cnt]++; | ||
| 140 | i++; | ||
| 141 | } | ||
| 142 | |||
| 143 | if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') | ||
| 144 | return -EINVAL; | ||
| 145 | fmt_cnt++; | ||
| 146 | } | ||
| 147 | |||
| 148 | return __trace_printk(1/* fake ip will not be printed */, fmt, | ||
| 149 | mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3, | ||
| 150 | mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4, | ||
| 151 | mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5); | ||
| 152 | } | ||
| 153 | |||
| 154 | static const struct bpf_func_proto bpf_trace_printk_proto = { | ||
| 155 | .func = bpf_trace_printk, | ||
| 156 | .gpl_only = true, | ||
| 157 | .ret_type = RET_INTEGER, | ||
| 158 | .arg1_type = ARG_PTR_TO_STACK, | ||
| 159 | .arg2_type = ARG_CONST_STACK_SIZE, | ||
| 160 | }; | ||
| 161 | |||
| 162 | static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) | ||
| 163 | { | ||
| 164 | switch (func_id) { | ||
| 165 | case BPF_FUNC_map_lookup_elem: | ||
| 166 | return &bpf_map_lookup_elem_proto; | ||
| 167 | case BPF_FUNC_map_update_elem: | ||
| 168 | return &bpf_map_update_elem_proto; | ||
| 169 | case BPF_FUNC_map_delete_elem: | ||
| 170 | return &bpf_map_delete_elem_proto; | ||
| 171 | case BPF_FUNC_probe_read: | ||
| 172 | return &bpf_probe_read_proto; | ||
| 173 | case BPF_FUNC_ktime_get_ns: | ||
| 174 | return &bpf_ktime_get_ns_proto; | ||
| 175 | |||
| 176 | case BPF_FUNC_trace_printk: | ||
| 177 | /* | ||
| 178 | * this program might be calling bpf_trace_printk, | ||
| 179 | * so allocate per-cpu printk buffers | ||
| 180 | */ | ||
| 181 | trace_printk_init_buffers(); | ||
| 182 | |||
| 183 | return &bpf_trace_printk_proto; | ||
| 184 | default: | ||
| 185 | return NULL; | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | ||
| 190 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) | ||
| 191 | { | ||
| 192 | /* check bounds */ | ||
| 193 | if (off < 0 || off >= sizeof(struct pt_regs)) | ||
| 194 | return false; | ||
| 195 | |||
| 196 | /* only read is allowed */ | ||
| 197 | if (type != BPF_READ) | ||
| 198 | return false; | ||
| 199 | |||
| 200 | /* disallow misaligned access */ | ||
| 201 | if (off % size != 0) | ||
| 202 | return false; | ||
| 203 | |||
| 204 | return true; | ||
| 205 | } | ||
| 206 | |||
| 207 | static struct bpf_verifier_ops kprobe_prog_ops = { | ||
| 208 | .get_func_proto = kprobe_prog_func_proto, | ||
| 209 | .is_valid_access = kprobe_prog_is_valid_access, | ||
| 210 | }; | ||
| 211 | |||
| 212 | static struct bpf_prog_type_list kprobe_tl = { | ||
| 213 | .ops = &kprobe_prog_ops, | ||
| 214 | .type = BPF_PROG_TYPE_KPROBE, | ||
| 215 | }; | ||
| 216 | |||
| 217 | static int __init register_kprobe_prog_ops(void) | ||
| 218 | { | ||
| 219 | bpf_register_prog_type(&kprobe_tl); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | late_initcall(register_kprobe_prog_ops); | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4f228024055b..02bece4a99ea 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| 19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
| 20 | #include <linux/suspend.h> | 20 | #include <linux/suspend.h> |
| 21 | #include <linux/debugfs.h> | 21 | #include <linux/tracefs.h> |
| 22 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
| 23 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
| 24 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
| @@ -249,6 +249,19 @@ static void update_function_graph_func(void); | |||
| 249 | static inline void update_function_graph_func(void) { } | 249 | static inline void update_function_graph_func(void) { } |
| 250 | #endif | 250 | #endif |
| 251 | 251 | ||
| 252 | |||
| 253 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) | ||
| 254 | { | ||
| 255 | /* | ||
| 256 | * If this is a dynamic ops or we force list func, | ||
| 257 | * then it needs to call the list anyway. | ||
| 258 | */ | ||
| 259 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC) | ||
| 260 | return ftrace_ops_list_func; | ||
| 261 | |||
| 262 | return ftrace_ops_get_func(ops); | ||
| 263 | } | ||
| 264 | |||
| 252 | static void update_ftrace_function(void) | 265 | static void update_ftrace_function(void) |
| 253 | { | 266 | { |
| 254 | ftrace_func_t func; | 267 | ftrace_func_t func; |
| @@ -270,7 +283,7 @@ static void update_ftrace_function(void) | |||
| 270 | * then have the mcount trampoline call the function directly. | 283 | * then have the mcount trampoline call the function directly. |
| 271 | */ | 284 | */ |
| 272 | } else if (ftrace_ops_list->next == &ftrace_list_end) { | 285 | } else if (ftrace_ops_list->next == &ftrace_list_end) { |
| 273 | func = ftrace_ops_get_func(ftrace_ops_list); | 286 | func = ftrace_ops_get_list_func(ftrace_ops_list); |
| 274 | 287 | ||
| 275 | } else { | 288 | } else { |
| 276 | /* Just use the default ftrace_ops */ | 289 | /* Just use the default ftrace_ops */ |
| @@ -1008,7 +1021,7 @@ static struct tracer_stat function_stats __initdata = { | |||
| 1008 | .stat_show = function_stat_show | 1021 | .stat_show = function_stat_show |
| 1009 | }; | 1022 | }; |
| 1010 | 1023 | ||
| 1011 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | 1024 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
| 1012 | { | 1025 | { |
| 1013 | struct ftrace_profile_stat *stat; | 1026 | struct ftrace_profile_stat *stat; |
| 1014 | struct dentry *entry; | 1027 | struct dentry *entry; |
| @@ -1044,15 +1057,15 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
| 1044 | } | 1057 | } |
| 1045 | } | 1058 | } |
| 1046 | 1059 | ||
| 1047 | entry = debugfs_create_file("function_profile_enabled", 0644, | 1060 | entry = tracefs_create_file("function_profile_enabled", 0644, |
| 1048 | d_tracer, NULL, &ftrace_profile_fops); | 1061 | d_tracer, NULL, &ftrace_profile_fops); |
| 1049 | if (!entry) | 1062 | if (!entry) |
| 1050 | pr_warning("Could not create debugfs " | 1063 | pr_warning("Could not create tracefs " |
| 1051 | "'function_profile_enabled' entry\n"); | 1064 | "'function_profile_enabled' entry\n"); |
| 1052 | } | 1065 | } |
| 1053 | 1066 | ||
| 1054 | #else /* CONFIG_FUNCTION_PROFILER */ | 1067 | #else /* CONFIG_FUNCTION_PROFILER */ |
| 1055 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | 1068 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
| 1056 | { | 1069 | { |
| 1057 | } | 1070 | } |
| 1058 | #endif /* CONFIG_FUNCTION_PROFILER */ | 1071 | #endif /* CONFIG_FUNCTION_PROFILER */ |
| @@ -4712,7 +4725,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops) | |||
| 4712 | mutex_unlock(&ftrace_lock); | 4725 | mutex_unlock(&ftrace_lock); |
| 4713 | } | 4726 | } |
| 4714 | 4727 | ||
| 4715 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 4728 | static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) |
| 4716 | { | 4729 | { |
| 4717 | 4730 | ||
| 4718 | trace_create_file("available_filter_functions", 0444, | 4731 | trace_create_file("available_filter_functions", 0444, |
| @@ -5020,7 +5033,7 @@ static int __init ftrace_nodyn_init(void) | |||
| 5020 | } | 5033 | } |
| 5021 | core_initcall(ftrace_nodyn_init); | 5034 | core_initcall(ftrace_nodyn_init); |
| 5022 | 5035 | ||
| 5023 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 5036 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } |
| 5024 | static inline void ftrace_startup_enable(int command) { } | 5037 | static inline void ftrace_startup_enable(int command) { } |
| 5025 | static inline void ftrace_startup_all(int command) { } | 5038 | static inline void ftrace_startup_all(int command) { } |
| 5026 | /* Keep as macros so we do not need to define the commands */ | 5039 | /* Keep as macros so we do not need to define the commands */ |
| @@ -5209,13 +5222,6 @@ static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | |||
| 5209 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) | 5222 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) |
| 5210 | { | 5223 | { |
| 5211 | /* | 5224 | /* |
| 5212 | * If this is a dynamic ops or we force list func, | ||
| 5213 | * then it needs to call the list anyway. | ||
| 5214 | */ | ||
| 5215 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC) | ||
| 5216 | return ftrace_ops_list_func; | ||
| 5217 | |||
| 5218 | /* | ||
| 5219 | * If the func handles its own recursion, call it directly. | 5225 | * If the func handles its own recursion, call it directly. |
| 5220 | * Otherwise call the recursion protected function that | 5226 | * Otherwise call the recursion protected function that |
| 5221 | * will call the ftrace ops function. | 5227 | * will call the ftrace ops function. |
| @@ -5473,7 +5479,7 @@ static const struct file_operations ftrace_pid_fops = { | |||
| 5473 | .release = ftrace_pid_release, | 5479 | .release = ftrace_pid_release, |
| 5474 | }; | 5480 | }; |
| 5475 | 5481 | ||
| 5476 | static __init int ftrace_init_debugfs(void) | 5482 | static __init int ftrace_init_tracefs(void) |
| 5477 | { | 5483 | { |
| 5478 | struct dentry *d_tracer; | 5484 | struct dentry *d_tracer; |
| 5479 | 5485 | ||
| @@ -5481,16 +5487,16 @@ static __init int ftrace_init_debugfs(void) | |||
| 5481 | if (IS_ERR(d_tracer)) | 5487 | if (IS_ERR(d_tracer)) |
| 5482 | return 0; | 5488 | return 0; |
| 5483 | 5489 | ||
| 5484 | ftrace_init_dyn_debugfs(d_tracer); | 5490 | ftrace_init_dyn_tracefs(d_tracer); |
| 5485 | 5491 | ||
| 5486 | trace_create_file("set_ftrace_pid", 0644, d_tracer, | 5492 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
| 5487 | NULL, &ftrace_pid_fops); | 5493 | NULL, &ftrace_pid_fops); |
| 5488 | 5494 | ||
| 5489 | ftrace_profile_debugfs(d_tracer); | 5495 | ftrace_profile_tracefs(d_tracer); |
| 5490 | 5496 | ||
| 5491 | return 0; | 5497 | return 0; |
| 5492 | } | 5498 | } |
| 5493 | fs_initcall(ftrace_init_debugfs); | 5499 | fs_initcall(ftrace_init_tracefs); |
| 5494 | 5500 | ||
| 5495 | /** | 5501 | /** |
| 5496 | * ftrace_kill - kill ftrace | 5502 | * ftrace_kill - kill ftrace |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5040d44fe5a3..0315d43176d8 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context); | |||
| 2679 | 2679 | ||
| 2680 | static __always_inline int trace_recursive_lock(void) | 2680 | static __always_inline int trace_recursive_lock(void) |
| 2681 | { | 2681 | { |
| 2682 | unsigned int val = this_cpu_read(current_context); | 2682 | unsigned int val = __this_cpu_read(current_context); |
| 2683 | int bit; | 2683 | int bit; |
| 2684 | 2684 | ||
| 2685 | if (in_interrupt()) { | 2685 | if (in_interrupt()) { |
| @@ -2696,18 +2696,14 @@ static __always_inline int trace_recursive_lock(void) | |||
| 2696 | return 1; | 2696 | return 1; |
| 2697 | 2697 | ||
| 2698 | val |= (1 << bit); | 2698 | val |= (1 << bit); |
| 2699 | this_cpu_write(current_context, val); | 2699 | __this_cpu_write(current_context, val); |
| 2700 | 2700 | ||
| 2701 | return 0; | 2701 | return 0; |
| 2702 | } | 2702 | } |
| 2703 | 2703 | ||
| 2704 | static __always_inline void trace_recursive_unlock(void) | 2704 | static __always_inline void trace_recursive_unlock(void) |
| 2705 | { | 2705 | { |
| 2706 | unsigned int val = this_cpu_read(current_context); | 2706 | __this_cpu_and(current_context, __this_cpu_read(current_context) - 1); |
| 2707 | |||
| 2708 | val--; | ||
| 2709 | val &= this_cpu_read(current_context); | ||
| 2710 | this_cpu_write(current_context, val); | ||
| 2711 | } | 2707 | } |
| 2712 | 2708 | ||
| 2713 | #else | 2709 | #else |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 62c6506d663f..91eecaaa43e0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
| 21 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
| 22 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
| 23 | #include <linux/tracefs.h> | ||
| 23 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
| 24 | #include <linux/hardirq.h> | 25 | #include <linux/hardirq.h> |
| 25 | #include <linux/linkage.h> | 26 | #include <linux/linkage.h> |
| @@ -31,6 +32,7 @@ | |||
| 31 | #include <linux/splice.h> | 32 | #include <linux/splice.h> |
| 32 | #include <linux/kdebug.h> | 33 | #include <linux/kdebug.h> |
| 33 | #include <linux/string.h> | 34 | #include <linux/string.h> |
| 35 | #include <linux/mount.h> | ||
| 34 | #include <linux/rwsem.h> | 36 | #include <linux/rwsem.h> |
| 35 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
| 36 | #include <linux/ctype.h> | 38 | #include <linux/ctype.h> |
| @@ -123,6 +125,42 @@ enum ftrace_dump_mode ftrace_dump_on_oops; | |||
| 123 | /* When set, tracing will stop when a WARN*() is hit */ | 125 | /* When set, tracing will stop when a WARN*() is hit */ |
| 124 | int __disable_trace_on_warning; | 126 | int __disable_trace_on_warning; |
| 125 | 127 | ||
| 128 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE | ||
| 129 | /* Map of enums to their values, for "enum_map" file */ | ||
| 130 | struct trace_enum_map_head { | ||
| 131 | struct module *mod; | ||
| 132 | unsigned long length; | ||
| 133 | }; | ||
| 134 | |||
| 135 | union trace_enum_map_item; | ||
| 136 | |||
| 137 | struct trace_enum_map_tail { | ||
| 138 | /* | ||
| 139 | * "end" is first and points to NULL as it must be different | ||
| 140 | * than "mod" or "enum_string" | ||
| 141 | */ | ||
| 142 | union trace_enum_map_item *next; | ||
| 143 | const char *end; /* points to NULL */ | ||
| 144 | }; | ||
| 145 | |||
| 146 | static DEFINE_MUTEX(trace_enum_mutex); | ||
| 147 | |||
| 148 | /* | ||
| 149 | * The trace_enum_maps are saved in an array with two extra elements, | ||
| 150 | * one at the beginning, and one at the end. The beginning item contains | ||
| 151 | * the count of the saved maps (head.length), and the module they | ||
| 152 | * belong to if not built in (head.mod). The ending item contains a | ||
| 153 | * pointer to the next array of saved enum_map items. | ||
| 154 | */ | ||
| 155 | union trace_enum_map_item { | ||
| 156 | struct trace_enum_map map; | ||
| 157 | struct trace_enum_map_head head; | ||
| 158 | struct trace_enum_map_tail tail; | ||
| 159 | }; | ||
| 160 | |||
| 161 | static union trace_enum_map_item *trace_enum_maps; | ||
| 162 | #endif /* CONFIG_TRACE_ENUM_MAP_FILE */ | ||
| 163 | |||
| 126 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); | 164 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
| 127 | 165 | ||
| 128 | #define MAX_TRACER_SIZE 100 | 166 | #define MAX_TRACER_SIZE 100 |
| @@ -3908,6 +3946,182 @@ static const struct file_operations tracing_saved_cmdlines_size_fops = { | |||
| 3908 | .write = tracing_saved_cmdlines_size_write, | 3946 | .write = tracing_saved_cmdlines_size_write, |
| 3909 | }; | 3947 | }; |
| 3910 | 3948 | ||
| 3949 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE | ||
| 3950 | static union trace_enum_map_item * | ||
| 3951 | update_enum_map(union trace_enum_map_item *ptr) | ||
| 3952 | { | ||
| 3953 | if (!ptr->map.enum_string) { | ||
| 3954 | if (ptr->tail.next) { | ||
| 3955 | ptr = ptr->tail.next; | ||
| 3956 | /* Set ptr to the next real item (skip head) */ | ||
| 3957 | ptr++; | ||
| 3958 | } else | ||
| 3959 | return NULL; | ||
| 3960 | } | ||
| 3961 | return ptr; | ||
| 3962 | } | ||
| 3963 | |||
| 3964 | static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos) | ||
| 3965 | { | ||
| 3966 | union trace_enum_map_item *ptr = v; | ||
| 3967 | |||
| 3968 | /* | ||
| 3969 | * Paranoid! If ptr points to end, we don't want to increment past it. | ||
| 3970 | * This really should never happen. | ||
| 3971 | */ | ||
| 3972 | ptr = update_enum_map(ptr); | ||
| 3973 | if (WARN_ON_ONCE(!ptr)) | ||
| 3974 | return NULL; | ||
| 3975 | |||
| 3976 | ptr++; | ||
| 3977 | |||
| 3978 | (*pos)++; | ||
| 3979 | |||
| 3980 | ptr = update_enum_map(ptr); | ||
| 3981 | |||
| 3982 | return ptr; | ||
| 3983 | } | ||
| 3984 | |||
| 3985 | static void *enum_map_start(struct seq_file *m, loff_t *pos) | ||
| 3986 | { | ||
| 3987 | union trace_enum_map_item *v; | ||
| 3988 | loff_t l = 0; | ||
| 3989 | |||
| 3990 | mutex_lock(&trace_enum_mutex); | ||
| 3991 | |||
| 3992 | v = trace_enum_maps; | ||
| 3993 | if (v) | ||
| 3994 | v++; | ||
| 3995 | |||
| 3996 | while (v && l < *pos) { | ||
| 3997 | v = enum_map_next(m, v, &l); | ||
| 3998 | } | ||
| 3999 | |||
| 4000 | return v; | ||
| 4001 | } | ||
| 4002 | |||
| 4003 | static void enum_map_stop(struct seq_file *m, void *v) | ||
| 4004 | { | ||
| 4005 | mutex_unlock(&trace_enum_mutex); | ||
| 4006 | } | ||
| 4007 | |||
| 4008 | static int enum_map_show(struct seq_file *m, void *v) | ||
| 4009 | { | ||
| 4010 | union trace_enum_map_item *ptr = v; | ||
| 4011 | |||
| 4012 | seq_printf(m, "%s %ld (%s)\n", | ||
| 4013 | ptr->map.enum_string, ptr->map.enum_value, | ||
| 4014 | ptr->map.system); | ||
| 4015 | |||
| 4016 | return 0; | ||
| 4017 | } | ||
| 4018 | |||
| 4019 | static const struct seq_operations tracing_enum_map_seq_ops = { | ||
| 4020 | .start = enum_map_start, | ||
| 4021 | .next = enum_map_next, | ||
| 4022 | .stop = enum_map_stop, | ||
| 4023 | .show = enum_map_show, | ||
| 4024 | }; | ||
| 4025 | |||
| 4026 | static int tracing_enum_map_open(struct inode *inode, struct file *filp) | ||
| 4027 | { | ||
| 4028 | if (tracing_disabled) | ||
| 4029 | return -ENODEV; | ||
| 4030 | |||
| 4031 | return seq_open(filp, &tracing_enum_map_seq_ops); | ||
| 4032 | } | ||
| 4033 | |||
| 4034 | static const struct file_operations tracing_enum_map_fops = { | ||
| 4035 | .open = tracing_enum_map_open, | ||
| 4036 | .read = seq_read, | ||
| 4037 | .llseek = seq_lseek, | ||
| 4038 | .release = seq_release, | ||
| 4039 | }; | ||
| 4040 | |||
| 4041 | static inline union trace_enum_map_item * | ||
| 4042 | trace_enum_jmp_to_tail(union trace_enum_map_item *ptr) | ||
| 4043 | { | ||
| 4044 | /* Return tail of array given the head */ | ||
| 4045 | return ptr + ptr->head.length + 1; | ||
| 4046 | } | ||
| 4047 | |||
| 4048 | static void | ||
| 4049 | trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start, | ||
| 4050 | int len) | ||
| 4051 | { | ||
| 4052 | struct trace_enum_map **stop; | ||
| 4053 | struct trace_enum_map **map; | ||
| 4054 | union trace_enum_map_item *map_array; | ||
| 4055 | union trace_enum_map_item *ptr; | ||
| 4056 | |||
| 4057 | stop = start + len; | ||
| 4058 | |||
| 4059 | /* | ||
| 4060 | * The trace_enum_maps contains the map plus a head and tail item, | ||
| 4061 | * where the head holds the module and length of array, and the | ||
| 4062 | * tail holds a pointer to the next list. | ||
| 4063 | */ | ||
| 4064 | map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL); | ||
| 4065 | if (!map_array) { | ||
| 4066 | pr_warning("Unable to allocate trace enum mapping\n"); | ||
| 4067 | return; | ||
| 4068 | } | ||
| 4069 | |||
| 4070 | mutex_lock(&trace_enum_mutex); | ||
| 4071 | |||
| 4072 | if (!trace_enum_maps) | ||
| 4073 | trace_enum_maps = map_array; | ||
| 4074 | else { | ||
| 4075 | ptr = trace_enum_maps; | ||
| 4076 | for (;;) { | ||
| 4077 | ptr = trace_enum_jmp_to_tail(ptr); | ||
| 4078 | if (!ptr->tail.next) | ||
| 4079 | break; | ||
| 4080 | ptr = ptr->tail.next; | ||
| 4081 | |||
| 4082 | } | ||
| 4083 | ptr->tail.next = map_array; | ||
| 4084 | } | ||
| 4085 | map_array->head.mod = mod; | ||
| 4086 | map_array->head.length = len; | ||
| 4087 | map_array++; | ||
| 4088 | |||
| 4089 | for (map = start; (unsigned long)map < (unsigned long)stop; map++) { | ||
| 4090 | map_array->map = **map; | ||
| 4091 | map_array++; | ||
| 4092 | } | ||
| 4093 | memset(map_array, 0, sizeof(*map_array)); | ||
| 4094 | |||
| 4095 | mutex_unlock(&trace_enum_mutex); | ||
| 4096 | } | ||
| 4097 | |||
| 4098 | static void trace_create_enum_file(struct dentry *d_tracer) | ||
| 4099 | { | ||
| 4100 | trace_create_file("enum_map", 0444, d_tracer, | ||
| 4101 | NULL, &tracing_enum_map_fops); | ||
| 4102 | } | ||
| 4103 | |||
| 4104 | #else /* CONFIG_TRACE_ENUM_MAP_FILE */ | ||
| 4105 | static inline void trace_create_enum_file(struct dentry *d_tracer) { } | ||
| 4106 | static inline void trace_insert_enum_map_file(struct module *mod, | ||
| 4107 | struct trace_enum_map **start, int len) { } | ||
| 4108 | #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */ | ||
| 4109 | |||
| 4110 | static void trace_insert_enum_map(struct module *mod, | ||
| 4111 | struct trace_enum_map **start, int len) | ||
| 4112 | { | ||
| 4113 | struct trace_enum_map **map; | ||
| 4114 | |||
| 4115 | if (len <= 0) | ||
| 4116 | return; | ||
| 4117 | |||
| 4118 | map = start; | ||
| 4119 | |||
| 4120 | trace_event_enum_update(map, len); | ||
| 4121 | |||
| 4122 | trace_insert_enum_map_file(mod, start, len); | ||
| 4123 | } | ||
| 4124 | |||
| 3911 | static ssize_t | 4125 | static ssize_t |
| 3912 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 4126 | tracing_set_trace_read(struct file *filp, char __user *ubuf, |
| 3913 | size_t cnt, loff_t *ppos) | 4127 | size_t cnt, loff_t *ppos) |
| @@ -4105,9 +4319,24 @@ static void tracing_set_nop(struct trace_array *tr) | |||
| 4105 | tr->current_trace = &nop_trace; | 4319 | tr->current_trace = &nop_trace; |
| 4106 | } | 4320 | } |
| 4107 | 4321 | ||
| 4108 | static int tracing_set_tracer(struct trace_array *tr, const char *buf) | 4322 | static void update_tracer_options(struct trace_array *tr, struct tracer *t) |
| 4109 | { | 4323 | { |
| 4110 | static struct trace_option_dentry *topts; | 4324 | static struct trace_option_dentry *topts; |
| 4325 | |||
| 4326 | /* Only enable if the directory has been created already. */ | ||
| 4327 | if (!tr->dir) | ||
| 4328 | return; | ||
| 4329 | |||
| 4330 | /* Currently, only the top instance has options */ | ||
| 4331 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) | ||
| 4332 | return; | ||
| 4333 | |||
| 4334 | destroy_trace_option_files(topts); | ||
| 4335 | topts = create_trace_option_files(tr, t); | ||
| 4336 | } | ||
| 4337 | |||
| 4338 | static int tracing_set_tracer(struct trace_array *tr, const char *buf) | ||
| 4339 | { | ||
| 4111 | struct tracer *t; | 4340 | struct tracer *t; |
| 4112 | #ifdef CONFIG_TRACER_MAX_TRACE | 4341 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 4113 | bool had_max_tr; | 4342 | bool had_max_tr; |
| @@ -4172,11 +4401,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) | |||
| 4172 | free_snapshot(tr); | 4401 | free_snapshot(tr); |
| 4173 | } | 4402 | } |
| 4174 | #endif | 4403 | #endif |
| 4175 | /* Currently, only the top instance has options */ | 4404 | update_tracer_options(tr, t); |
| 4176 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | ||
| 4177 | destroy_trace_option_files(topts); | ||
| 4178 | topts = create_trace_option_files(tr, t); | ||
| 4179 | } | ||
| 4180 | 4405 | ||
| 4181 | #ifdef CONFIG_TRACER_MAX_TRACE | 4406 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 4182 | if (t->use_max_tr && !had_max_tr) { | 4407 | if (t->use_max_tr && !had_max_tr) { |
| @@ -5817,6 +6042,14 @@ static inline __init int register_snapshot_cmd(void) { return 0; } | |||
| 5817 | 6042 | ||
| 5818 | static struct dentry *tracing_get_dentry(struct trace_array *tr) | 6043 | static struct dentry *tracing_get_dentry(struct trace_array *tr) |
| 5819 | { | 6044 | { |
| 6045 | if (WARN_ON(!tr->dir)) | ||
| 6046 | return ERR_PTR(-ENODEV); | ||
| 6047 | |||
| 6048 | /* Top directory uses NULL as the parent */ | ||
| 6049 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | ||
| 6050 | return NULL; | ||
| 6051 | |||
| 6052 | /* All sub buffers have a descriptor */ | ||
| 5820 | return tr->dir; | 6053 | return tr->dir; |
| 5821 | } | 6054 | } |
| 5822 | 6055 | ||
| @@ -5831,10 +6064,10 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) | |||
| 5831 | if (IS_ERR(d_tracer)) | 6064 | if (IS_ERR(d_tracer)) |
| 5832 | return NULL; | 6065 | return NULL; |
| 5833 | 6066 | ||
| 5834 | tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer); | 6067 | tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); |
| 5835 | 6068 | ||
| 5836 | WARN_ONCE(!tr->percpu_dir, | 6069 | WARN_ONCE(!tr->percpu_dir, |
| 5837 | "Could not create debugfs directory 'per_cpu/%d'\n", cpu); | 6070 | "Could not create tracefs directory 'per_cpu/%d'\n", cpu); |
| 5838 | 6071 | ||
| 5839 | return tr->percpu_dir; | 6072 | return tr->percpu_dir; |
| 5840 | } | 6073 | } |
| @@ -5851,7 +6084,7 @@ trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, | |||
| 5851 | } | 6084 | } |
| 5852 | 6085 | ||
| 5853 | static void | 6086 | static void |
| 5854 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | 6087 | tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) |
| 5855 | { | 6088 | { |
| 5856 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); | 6089 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); |
| 5857 | struct dentry *d_cpu; | 6090 | struct dentry *d_cpu; |
| @@ -5861,9 +6094,9 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | |||
| 5861 | return; | 6094 | return; |
| 5862 | 6095 | ||
| 5863 | snprintf(cpu_dir, 30, "cpu%ld", cpu); | 6096 | snprintf(cpu_dir, 30, "cpu%ld", cpu); |
| 5864 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | 6097 | d_cpu = tracefs_create_dir(cpu_dir, d_percpu); |
| 5865 | if (!d_cpu) { | 6098 | if (!d_cpu) { |
| 5866 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | 6099 | pr_warning("Could not create tracefs '%s' entry\n", cpu_dir); |
| 5867 | return; | 6100 | return; |
| 5868 | } | 6101 | } |
| 5869 | 6102 | ||
| @@ -6015,9 +6248,9 @@ struct dentry *trace_create_file(const char *name, | |||
| 6015 | { | 6248 | { |
| 6016 | struct dentry *ret; | 6249 | struct dentry *ret; |
| 6017 | 6250 | ||
| 6018 | ret = debugfs_create_file(name, mode, parent, data, fops); | 6251 | ret = tracefs_create_file(name, mode, parent, data, fops); |
| 6019 | if (!ret) | 6252 | if (!ret) |
| 6020 | pr_warning("Could not create debugfs '%s' entry\n", name); | 6253 | pr_warning("Could not create tracefs '%s' entry\n", name); |
| 6021 | 6254 | ||
| 6022 | return ret; | 6255 | return ret; |
| 6023 | } | 6256 | } |
| @@ -6034,9 +6267,9 @@ static struct dentry *trace_options_init_dentry(struct trace_array *tr) | |||
| 6034 | if (IS_ERR(d_tracer)) | 6267 | if (IS_ERR(d_tracer)) |
| 6035 | return NULL; | 6268 | return NULL; |
| 6036 | 6269 | ||
| 6037 | tr->options = debugfs_create_dir("options", d_tracer); | 6270 | tr->options = tracefs_create_dir("options", d_tracer); |
| 6038 | if (!tr->options) { | 6271 | if (!tr->options) { |
| 6039 | pr_warning("Could not create debugfs directory 'options'\n"); | 6272 | pr_warning("Could not create tracefs directory 'options'\n"); |
| 6040 | return NULL; | 6273 | return NULL; |
| 6041 | } | 6274 | } |
| 6042 | 6275 | ||
| @@ -6105,7 +6338,7 @@ destroy_trace_option_files(struct trace_option_dentry *topts) | |||
| 6105 | return; | 6338 | return; |
| 6106 | 6339 | ||
| 6107 | for (cnt = 0; topts[cnt].opt; cnt++) | 6340 | for (cnt = 0; topts[cnt].opt; cnt++) |
| 6108 | debugfs_remove(topts[cnt].entry); | 6341 | tracefs_remove(topts[cnt].entry); |
| 6109 | 6342 | ||
| 6110 | kfree(topts); | 6343 | kfree(topts); |
| 6111 | } | 6344 | } |
| @@ -6194,7 +6427,7 @@ static const struct file_operations rb_simple_fops = { | |||
| 6194 | struct dentry *trace_instance_dir; | 6427 | struct dentry *trace_instance_dir; |
| 6195 | 6428 | ||
| 6196 | static void | 6429 | static void |
| 6197 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); | 6430 | init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); |
| 6198 | 6431 | ||
| 6199 | static int | 6432 | static int |
| 6200 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) | 6433 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) |
| @@ -6271,7 +6504,7 @@ static void free_trace_buffers(struct trace_array *tr) | |||
| 6271 | #endif | 6504 | #endif |
| 6272 | } | 6505 | } |
| 6273 | 6506 | ||
| 6274 | static int new_instance_create(const char *name) | 6507 | static int instance_mkdir(const char *name) |
| 6275 | { | 6508 | { |
| 6276 | struct trace_array *tr; | 6509 | struct trace_array *tr; |
| 6277 | int ret; | 6510 | int ret; |
| @@ -6310,17 +6543,17 @@ static int new_instance_create(const char *name) | |||
| 6310 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) | 6543 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) |
| 6311 | goto out_free_tr; | 6544 | goto out_free_tr; |
| 6312 | 6545 | ||
| 6313 | tr->dir = debugfs_create_dir(name, trace_instance_dir); | 6546 | tr->dir = tracefs_create_dir(name, trace_instance_dir); |
| 6314 | if (!tr->dir) | 6547 | if (!tr->dir) |
| 6315 | goto out_free_tr; | 6548 | goto out_free_tr; |
| 6316 | 6549 | ||
| 6317 | ret = event_trace_add_tracer(tr->dir, tr); | 6550 | ret = event_trace_add_tracer(tr->dir, tr); |
| 6318 | if (ret) { | 6551 | if (ret) { |
| 6319 | debugfs_remove_recursive(tr->dir); | 6552 | tracefs_remove_recursive(tr->dir); |
| 6320 | goto out_free_tr; | 6553 | goto out_free_tr; |
| 6321 | } | 6554 | } |
| 6322 | 6555 | ||
| 6323 | init_tracer_debugfs(tr, tr->dir); | 6556 | init_tracer_tracefs(tr, tr->dir); |
| 6324 | 6557 | ||
| 6325 | list_add(&tr->list, &ftrace_trace_arrays); | 6558 | list_add(&tr->list, &ftrace_trace_arrays); |
| 6326 | 6559 | ||
| @@ -6341,7 +6574,7 @@ static int new_instance_create(const char *name) | |||
| 6341 | 6574 | ||
| 6342 | } | 6575 | } |
| 6343 | 6576 | ||
| 6344 | static int instance_delete(const char *name) | 6577 | static int instance_rmdir(const char *name) |
| 6345 | { | 6578 | { |
| 6346 | struct trace_array *tr; | 6579 | struct trace_array *tr; |
| 6347 | int found = 0; | 6580 | int found = 0; |
| @@ -6382,82 +6615,17 @@ static int instance_delete(const char *name) | |||
| 6382 | return ret; | 6615 | return ret; |
| 6383 | } | 6616 | } |
| 6384 | 6617 | ||
| 6385 | static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode) | ||
| 6386 | { | ||
| 6387 | struct dentry *parent; | ||
| 6388 | int ret; | ||
| 6389 | |||
| 6390 | /* Paranoid: Make sure the parent is the "instances" directory */ | ||
| 6391 | parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); | ||
| 6392 | if (WARN_ON_ONCE(parent != trace_instance_dir)) | ||
| 6393 | return -ENOENT; | ||
| 6394 | |||
| 6395 | /* | ||
| 6396 | * The inode mutex is locked, but debugfs_create_dir() will also | ||
| 6397 | * take the mutex. As the instances directory can not be destroyed | ||
| 6398 | * or changed in any other way, it is safe to unlock it, and | ||
| 6399 | * let the dentry try. If two users try to make the same dir at | ||
| 6400 | * the same time, then the new_instance_create() will determine the | ||
| 6401 | * winner. | ||
| 6402 | */ | ||
| 6403 | mutex_unlock(&inode->i_mutex); | ||
| 6404 | |||
| 6405 | ret = new_instance_create(dentry->d_iname); | ||
| 6406 | |||
| 6407 | mutex_lock(&inode->i_mutex); | ||
| 6408 | |||
| 6409 | return ret; | ||
| 6410 | } | ||
| 6411 | |||
| 6412 | static int instance_rmdir(struct inode *inode, struct dentry *dentry) | ||
| 6413 | { | ||
| 6414 | struct dentry *parent; | ||
| 6415 | int ret; | ||
| 6416 | |||
| 6417 | /* Paranoid: Make sure the parent is the "instances" directory */ | ||
| 6418 | parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); | ||
| 6419 | if (WARN_ON_ONCE(parent != trace_instance_dir)) | ||
| 6420 | return -ENOENT; | ||
| 6421 | |||
| 6422 | /* The caller did a dget() on dentry */ | ||
| 6423 | mutex_unlock(&dentry->d_inode->i_mutex); | ||
| 6424 | |||
| 6425 | /* | ||
| 6426 | * The inode mutex is locked, but debugfs_create_dir() will also | ||
| 6427 | * take the mutex. As the instances directory can not be destroyed | ||
| 6428 | * or changed in any other way, it is safe to unlock it, and | ||
| 6429 | * let the dentry try. If two users try to make the same dir at | ||
| 6430 | * the same time, then the instance_delete() will determine the | ||
| 6431 | * winner. | ||
| 6432 | */ | ||
| 6433 | mutex_unlock(&inode->i_mutex); | ||
| 6434 | |||
| 6435 | ret = instance_delete(dentry->d_iname); | ||
| 6436 | |||
| 6437 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); | ||
| 6438 | mutex_lock(&dentry->d_inode->i_mutex); | ||
| 6439 | |||
| 6440 | return ret; | ||
| 6441 | } | ||
| 6442 | |||
| 6443 | static const struct inode_operations instance_dir_inode_operations = { | ||
| 6444 | .lookup = simple_lookup, | ||
| 6445 | .mkdir = instance_mkdir, | ||
| 6446 | .rmdir = instance_rmdir, | ||
| 6447 | }; | ||
| 6448 | |||
| 6449 | static __init void create_trace_instances(struct dentry *d_tracer) | 6618 | static __init void create_trace_instances(struct dentry *d_tracer) |
| 6450 | { | 6619 | { |
| 6451 | trace_instance_dir = debugfs_create_dir("instances", d_tracer); | 6620 | trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, |
| 6621 | instance_mkdir, | ||
| 6622 | instance_rmdir); | ||
| 6452 | if (WARN_ON(!trace_instance_dir)) | 6623 | if (WARN_ON(!trace_instance_dir)) |
| 6453 | return; | 6624 | return; |
| 6454 | |||
| 6455 | /* Hijack the dir inode operations, to allow mkdir */ | ||
| 6456 | trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations; | ||
| 6457 | } | 6625 | } |
| 6458 | 6626 | ||
| 6459 | static void | 6627 | static void |
| 6460 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | 6628 | init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) |
| 6461 | { | 6629 | { |
| 6462 | int cpu; | 6630 | int cpu; |
| 6463 | 6631 | ||
| @@ -6511,10 +6679,32 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
| 6511 | #endif | 6679 | #endif |
| 6512 | 6680 | ||
| 6513 | for_each_tracing_cpu(cpu) | 6681 | for_each_tracing_cpu(cpu) |
| 6514 | tracing_init_debugfs_percpu(tr, cpu); | 6682 | tracing_init_tracefs_percpu(tr, cpu); |
| 6515 | 6683 | ||
| 6516 | } | 6684 | } |
| 6517 | 6685 | ||
| 6686 | static struct vfsmount *trace_automount(void *ingore) | ||
| 6687 | { | ||
| 6688 | struct vfsmount *mnt; | ||
| 6689 | struct file_system_type *type; | ||
| 6690 | |||
| 6691 | /* | ||
| 6692 | * To maintain backward compatibility for tools that mount | ||
| 6693 | * debugfs to get to the tracing facility, tracefs is automatically | ||
| 6694 | * mounted to the debugfs/tracing directory. | ||
| 6695 | */ | ||
| 6696 | type = get_fs_type("tracefs"); | ||
| 6697 | if (!type) | ||
| 6698 | return NULL; | ||
| 6699 | mnt = vfs_kern_mount(type, 0, "tracefs", NULL); | ||
| 6700 | put_filesystem(type); | ||
| 6701 | if (IS_ERR(mnt)) | ||
| 6702 | return NULL; | ||
| 6703 | mntget(mnt); | ||
| 6704 | |||
| 6705 | return mnt; | ||
| 6706 | } | ||
| 6707 | |||
| 6518 | /** | 6708 | /** |
| 6519 | * tracing_init_dentry - initialize top level trace array | 6709 | * tracing_init_dentry - initialize top level trace array |
| 6520 | * | 6710 | * |
| @@ -6526,23 +6716,112 @@ struct dentry *tracing_init_dentry(void) | |||
| 6526 | { | 6716 | { |
| 6527 | struct trace_array *tr = &global_trace; | 6717 | struct trace_array *tr = &global_trace; |
| 6528 | 6718 | ||
| 6719 | /* The top level trace array uses NULL as parent */ | ||
| 6529 | if (tr->dir) | 6720 | if (tr->dir) |
| 6530 | return tr->dir; | 6721 | return NULL; |
| 6531 | 6722 | ||
| 6532 | if (WARN_ON(!debugfs_initialized())) | 6723 | if (WARN_ON(!debugfs_initialized())) |
| 6533 | return ERR_PTR(-ENODEV); | 6724 | return ERR_PTR(-ENODEV); |
| 6534 | 6725 | ||
| 6535 | tr->dir = debugfs_create_dir("tracing", NULL); | 6726 | /* |
| 6536 | 6727 | * As there may still be users that expect the tracing | |
| 6728 | * files to exist in debugfs/tracing, we must automount | ||
| 6729 | * the tracefs file system there, so older tools still | ||
| 6730 | * work with the newer kerenl. | ||
| 6731 | */ | ||
| 6732 | tr->dir = debugfs_create_automount("tracing", NULL, | ||
| 6733 | trace_automount, NULL); | ||
| 6537 | if (!tr->dir) { | 6734 | if (!tr->dir) { |
| 6538 | pr_warn_once("Could not create debugfs directory 'tracing'\n"); | 6735 | pr_warn_once("Could not create debugfs directory 'tracing'\n"); |
| 6539 | return ERR_PTR(-ENOMEM); | 6736 | return ERR_PTR(-ENOMEM); |
| 6540 | } | 6737 | } |
| 6541 | 6738 | ||
| 6542 | return tr->dir; | 6739 | return NULL; |
| 6740 | } | ||
| 6741 | |||
| 6742 | extern struct trace_enum_map *__start_ftrace_enum_maps[]; | ||
| 6743 | extern struct trace_enum_map *__stop_ftrace_enum_maps[]; | ||
| 6744 | |||
| 6745 | static void __init trace_enum_init(void) | ||
| 6746 | { | ||
| 6747 | int len; | ||
| 6748 | |||
| 6749 | len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps; | ||
| 6750 | trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len); | ||
| 6751 | } | ||
| 6752 | |||
| 6753 | #ifdef CONFIG_MODULES | ||
| 6754 | static void trace_module_add_enums(struct module *mod) | ||
| 6755 | { | ||
| 6756 | if (!mod->num_trace_enums) | ||
| 6757 | return; | ||
| 6758 | |||
| 6759 | /* | ||
| 6760 | * Modules with bad taint do not have events created, do | ||
| 6761 | * not bother with enums either. | ||
| 6762 | */ | ||
| 6763 | if (trace_module_has_bad_taint(mod)) | ||
| 6764 | return; | ||
| 6765 | |||
| 6766 | trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums); | ||
| 6543 | } | 6767 | } |
| 6544 | 6768 | ||
| 6545 | static __init int tracer_init_debugfs(void) | 6769 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE |
| 6770 | static void trace_module_remove_enums(struct module *mod) | ||
| 6771 | { | ||
| 6772 | union trace_enum_map_item *map; | ||
| 6773 | union trace_enum_map_item **last = &trace_enum_maps; | ||
| 6774 | |||
| 6775 | if (!mod->num_trace_enums) | ||
| 6776 | return; | ||
| 6777 | |||
| 6778 | mutex_lock(&trace_enum_mutex); | ||
| 6779 | |||
| 6780 | map = trace_enum_maps; | ||
| 6781 | |||
| 6782 | while (map) { | ||
| 6783 | if (map->head.mod == mod) | ||
| 6784 | break; | ||
| 6785 | map = trace_enum_jmp_to_tail(map); | ||
| 6786 | last = &map->tail.next; | ||
| 6787 | map = map->tail.next; | ||
| 6788 | } | ||
| 6789 | if (!map) | ||
| 6790 | goto out; | ||
| 6791 | |||
| 6792 | *last = trace_enum_jmp_to_tail(map)->tail.next; | ||
| 6793 | kfree(map); | ||
| 6794 | out: | ||
| 6795 | mutex_unlock(&trace_enum_mutex); | ||
| 6796 | } | ||
| 6797 | #else | ||
| 6798 | static inline void trace_module_remove_enums(struct module *mod) { } | ||
| 6799 | #endif /* CONFIG_TRACE_ENUM_MAP_FILE */ | ||
| 6800 | |||
| 6801 | static int trace_module_notify(struct notifier_block *self, | ||
| 6802 | unsigned long val, void *data) | ||
| 6803 | { | ||
| 6804 | struct module *mod = data; | ||
| 6805 | |||
| 6806 | switch (val) { | ||
| 6807 | case MODULE_STATE_COMING: | ||
| 6808 | trace_module_add_enums(mod); | ||
| 6809 | break; | ||
| 6810 | case MODULE_STATE_GOING: | ||
| 6811 | trace_module_remove_enums(mod); | ||
| 6812 | break; | ||
| 6813 | } | ||
| 6814 | |||
| 6815 | return 0; | ||
| 6816 | } | ||
| 6817 | |||
| 6818 | static struct notifier_block trace_module_nb = { | ||
| 6819 | .notifier_call = trace_module_notify, | ||
| 6820 | .priority = 0, | ||
| 6821 | }; | ||
| 6822 | #endif /* CONFIG_MODULES */ | ||
| 6823 | |||
| 6824 | static __init int tracer_init_tracefs(void) | ||
| 6546 | { | 6825 | { |
| 6547 | struct dentry *d_tracer; | 6826 | struct dentry *d_tracer; |
| 6548 | 6827 | ||
| @@ -6552,7 +6831,7 @@ static __init int tracer_init_debugfs(void) | |||
| 6552 | if (IS_ERR(d_tracer)) | 6831 | if (IS_ERR(d_tracer)) |
| 6553 | return 0; | 6832 | return 0; |
| 6554 | 6833 | ||
| 6555 | init_tracer_debugfs(&global_trace, d_tracer); | 6834 | init_tracer_tracefs(&global_trace, d_tracer); |
| 6556 | 6835 | ||
| 6557 | trace_create_file("tracing_thresh", 0644, d_tracer, | 6836 | trace_create_file("tracing_thresh", 0644, d_tracer, |
| 6558 | &global_trace, &tracing_thresh_fops); | 6837 | &global_trace, &tracing_thresh_fops); |
| @@ -6566,6 +6845,14 @@ static __init int tracer_init_debugfs(void) | |||
| 6566 | trace_create_file("saved_cmdlines_size", 0644, d_tracer, | 6845 | trace_create_file("saved_cmdlines_size", 0644, d_tracer, |
| 6567 | NULL, &tracing_saved_cmdlines_size_fops); | 6846 | NULL, &tracing_saved_cmdlines_size_fops); |
| 6568 | 6847 | ||
| 6848 | trace_enum_init(); | ||
| 6849 | |||
| 6850 | trace_create_enum_file(d_tracer); | ||
| 6851 | |||
| 6852 | #ifdef CONFIG_MODULES | ||
| 6853 | register_module_notifier(&trace_module_nb); | ||
| 6854 | #endif | ||
| 6855 | |||
| 6569 | #ifdef CONFIG_DYNAMIC_FTRACE | 6856 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 6570 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 6857 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
| 6571 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 6858 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
| @@ -6575,6 +6862,10 @@ static __init int tracer_init_debugfs(void) | |||
| 6575 | 6862 | ||
| 6576 | create_trace_options_dir(&global_trace); | 6863 | create_trace_options_dir(&global_trace); |
| 6577 | 6864 | ||
| 6865 | /* If the tracer was started via cmdline, create options for it here */ | ||
| 6866 | if (global_trace.current_trace != &nop_trace) | ||
| 6867 | update_tracer_options(&global_trace, global_trace.current_trace); | ||
| 6868 | |||
| 6578 | return 0; | 6869 | return 0; |
| 6579 | } | 6870 | } |
| 6580 | 6871 | ||
| @@ -6888,7 +7179,7 @@ void __init trace_init(void) | |||
| 6888 | tracepoint_printk = 0; | 7179 | tracepoint_printk = 0; |
| 6889 | } | 7180 | } |
| 6890 | tracer_alloc_buffers(); | 7181 | tracer_alloc_buffers(); |
| 6891 | trace_event_init(); | 7182 | trace_event_init(); |
| 6892 | } | 7183 | } |
| 6893 | 7184 | ||
| 6894 | __init static int clear_boot_tracer(void) | 7185 | __init static int clear_boot_tracer(void) |
| @@ -6910,5 +7201,5 @@ __init static int clear_boot_tracer(void) | |||
| 6910 | return 0; | 7201 | return 0; |
| 6911 | } | 7202 | } |
| 6912 | 7203 | ||
| 6913 | fs_initcall(tracer_init_debugfs); | 7204 | fs_initcall(tracer_init_tracefs); |
| 6914 | late_initcall(clear_boot_tracer); | 7205 | late_initcall(clear_boot_tracer); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index dd8205a35760..d2612016de94 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -334,7 +334,7 @@ struct tracer_flags { | |||
| 334 | 334 | ||
| 335 | 335 | ||
| 336 | /** | 336 | /** |
| 337 | * struct tracer - a specific tracer and its callbacks to interact with debugfs | 337 | * struct tracer - a specific tracer and its callbacks to interact with tracefs |
| 338 | * @name: the name chosen to select it on the available_tracers file | 338 | * @name: the name chosen to select it on the available_tracers file |
| 339 | * @init: called when one switches to this tracer (echo name > current_tracer) | 339 | * @init: called when one switches to this tracer (echo name > current_tracer) |
| 340 | * @reset: called when one switches to another tracer | 340 | * @reset: called when one switches to another tracer |
| @@ -1309,8 +1309,10 @@ static inline void init_ftrace_syscalls(void) { } | |||
| 1309 | 1309 | ||
| 1310 | #ifdef CONFIG_EVENT_TRACING | 1310 | #ifdef CONFIG_EVENT_TRACING |
| 1311 | void trace_event_init(void); | 1311 | void trace_event_init(void); |
| 1312 | void trace_event_enum_update(struct trace_enum_map **map, int len); | ||
| 1312 | #else | 1313 | #else |
| 1313 | static inline void __init trace_event_init(void) { } | 1314 | static inline void __init trace_event_init(void) { } |
| 1315 | static inlin void trace_event_enum_update(struct trace_enum_map **map, int len) { } | ||
| 1314 | #endif | 1316 | #endif |
| 1315 | 1317 | ||
| 1316 | extern struct trace_iterator *tracepoint_print_iter; | 1318 | extern struct trace_iterator *tracepoint_print_iter; |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index e2d027ac66a2..ee7b94a4810a 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
| @@ -223,7 +223,7 @@ FTRACE_ENTRY(bprint, bprint_entry, | |||
| 223 | __dynamic_array( u32, buf ) | 223 | __dynamic_array( u32, buf ) |
| 224 | ), | 224 | ), |
| 225 | 225 | ||
| 226 | F_printk("%pf: %s", | 226 | F_printk("%ps: %s", |
| 227 | (void *)__entry->ip, __entry->fmt), | 227 | (void *)__entry->ip, __entry->fmt), |
| 228 | 228 | ||
| 229 | FILTER_OTHER | 229 | FILTER_OTHER |
| @@ -238,7 +238,7 @@ FTRACE_ENTRY(print, print_entry, | |||
| 238 | __dynamic_array( char, buf ) | 238 | __dynamic_array( char, buf ) |
| 239 | ), | 239 | ), |
| 240 | 240 | ||
| 241 | F_printk("%pf: %s", | 241 | F_printk("%ps: %s", |
| 242 | (void *)__entry->ip, __entry->buf), | 242 | (void *)__entry->ip, __entry->buf), |
| 243 | 243 | ||
| 244 | FILTER_OTHER | 244 | FILTER_OTHER |
| @@ -253,7 +253,7 @@ FTRACE_ENTRY(bputs, bputs_entry, | |||
| 253 | __field( const char *, str ) | 253 | __field( const char *, str ) |
| 254 | ), | 254 | ), |
| 255 | 255 | ||
| 256 | F_printk("%pf: %s", | 256 | F_printk("%ps: %s", |
| 257 | (void *)__entry->ip, __entry->str), | 257 | (void *)__entry->ip, __entry->str), |
| 258 | 258 | ||
| 259 | FILTER_OTHER | 259 | FILTER_OTHER |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index db54dda10ccc..7da1dfeb322e 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -13,7 +13,7 @@ | |||
| 13 | #include <linux/workqueue.h> | 13 | #include <linux/workqueue.h> |
| 14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
| 15 | #include <linux/kthread.h> | 15 | #include <linux/kthread.h> |
| 16 | #include <linux/debugfs.h> | 16 | #include <linux/tracefs.h> |
| 17 | #include <linux/uaccess.h> | 17 | #include <linux/uaccess.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/ctype.h> | 19 | #include <linux/ctype.h> |
| @@ -480,7 +480,7 @@ static void remove_subsystem(struct ftrace_subsystem_dir *dir) | |||
| 480 | return; | 480 | return; |
| 481 | 481 | ||
| 482 | if (!--dir->nr_events) { | 482 | if (!--dir->nr_events) { |
| 483 | debugfs_remove_recursive(dir->entry); | 483 | tracefs_remove_recursive(dir->entry); |
| 484 | list_del(&dir->list); | 484 | list_del(&dir->list); |
| 485 | __put_system_dir(dir); | 485 | __put_system_dir(dir); |
| 486 | } | 486 | } |
| @@ -499,7 +499,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file) | |||
| 499 | } | 499 | } |
| 500 | spin_unlock(&dir->d_lock); | 500 | spin_unlock(&dir->d_lock); |
| 501 | 501 | ||
| 502 | debugfs_remove_recursive(dir); | 502 | tracefs_remove_recursive(dir); |
| 503 | } | 503 | } |
| 504 | 504 | ||
| 505 | list_del(&file->list); | 505 | list_del(&file->list); |
| @@ -1526,7 +1526,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, | |||
| 1526 | } else | 1526 | } else |
| 1527 | __get_system(system); | 1527 | __get_system(system); |
| 1528 | 1528 | ||
| 1529 | dir->entry = debugfs_create_dir(name, parent); | 1529 | dir->entry = tracefs_create_dir(name, parent); |
| 1530 | if (!dir->entry) { | 1530 | if (!dir->entry) { |
| 1531 | pr_warn("Failed to create system directory %s\n", name); | 1531 | pr_warn("Failed to create system directory %s\n", name); |
| 1532 | __put_system(system); | 1532 | __put_system(system); |
| @@ -1539,12 +1539,12 @@ event_subsystem_dir(struct trace_array *tr, const char *name, | |||
| 1539 | dir->subsystem = system; | 1539 | dir->subsystem = system; |
| 1540 | file->system = dir; | 1540 | file->system = dir; |
| 1541 | 1541 | ||
| 1542 | entry = debugfs_create_file("filter", 0644, dir->entry, dir, | 1542 | entry = tracefs_create_file("filter", 0644, dir->entry, dir, |
| 1543 | &ftrace_subsystem_filter_fops); | 1543 | &ftrace_subsystem_filter_fops); |
| 1544 | if (!entry) { | 1544 | if (!entry) { |
| 1545 | kfree(system->filter); | 1545 | kfree(system->filter); |
| 1546 | system->filter = NULL; | 1546 | system->filter = NULL; |
| 1547 | pr_warn("Could not create debugfs '%s/filter' entry\n", name); | 1547 | pr_warn("Could not create tracefs '%s/filter' entry\n", name); |
| 1548 | } | 1548 | } |
| 1549 | 1549 | ||
| 1550 | trace_create_file("enable", 0644, dir->entry, dir, | 1550 | trace_create_file("enable", 0644, dir->entry, dir, |
| @@ -1585,9 +1585,9 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1585 | d_events = parent; | 1585 | d_events = parent; |
| 1586 | 1586 | ||
| 1587 | name = ftrace_event_name(call); | 1587 | name = ftrace_event_name(call); |
| 1588 | file->dir = debugfs_create_dir(name, d_events); | 1588 | file->dir = tracefs_create_dir(name, d_events); |
| 1589 | if (!file->dir) { | 1589 | if (!file->dir) { |
| 1590 | pr_warn("Could not create debugfs '%s' directory\n", name); | 1590 | pr_warn("Could not create tracefs '%s' directory\n", name); |
| 1591 | return -1; | 1591 | return -1; |
| 1592 | } | 1592 | } |
| 1593 | 1593 | ||
| @@ -1704,6 +1704,125 @@ __register_event(struct ftrace_event_call *call, struct module *mod) | |||
| 1704 | return 0; | 1704 | return 0; |
| 1705 | } | 1705 | } |
| 1706 | 1706 | ||
| 1707 | static char *enum_replace(char *ptr, struct trace_enum_map *map, int len) | ||
| 1708 | { | ||
| 1709 | int rlen; | ||
| 1710 | int elen; | ||
| 1711 | |||
| 1712 | /* Find the length of the enum value as a string */ | ||
| 1713 | elen = snprintf(ptr, 0, "%ld", map->enum_value); | ||
| 1714 | /* Make sure there's enough room to replace the string with the value */ | ||
| 1715 | if (len < elen) | ||
| 1716 | return NULL; | ||
| 1717 | |||
| 1718 | snprintf(ptr, elen + 1, "%ld", map->enum_value); | ||
| 1719 | |||
| 1720 | /* Get the rest of the string of ptr */ | ||
| 1721 | rlen = strlen(ptr + len); | ||
| 1722 | memmove(ptr + elen, ptr + len, rlen); | ||
| 1723 | /* Make sure we end the new string */ | ||
| 1724 | ptr[elen + rlen] = 0; | ||
| 1725 | |||
| 1726 | return ptr + elen; | ||
| 1727 | } | ||
| 1728 | |||
| 1729 | static void update_event_printk(struct ftrace_event_call *call, | ||
| 1730 | struct trace_enum_map *map) | ||
| 1731 | { | ||
| 1732 | char *ptr; | ||
| 1733 | int quote = 0; | ||
| 1734 | int len = strlen(map->enum_string); | ||
| 1735 | |||
| 1736 | for (ptr = call->print_fmt; *ptr; ptr++) { | ||
| 1737 | if (*ptr == '\\') { | ||
| 1738 | ptr++; | ||
| 1739 | /* paranoid */ | ||
| 1740 | if (!*ptr) | ||
| 1741 | break; | ||
| 1742 | continue; | ||
| 1743 | } | ||
| 1744 | if (*ptr == '"') { | ||
| 1745 | quote ^= 1; | ||
| 1746 | continue; | ||
| 1747 | } | ||
| 1748 | if (quote) | ||
| 1749 | continue; | ||
| 1750 | if (isdigit(*ptr)) { | ||
| 1751 | /* skip numbers */ | ||
| 1752 | do { | ||
| 1753 | ptr++; | ||
| 1754 | /* Check for alpha chars like ULL */ | ||
| 1755 | } while (isalnum(*ptr)); | ||
| 1756 | /* | ||
| 1757 | * A number must have some kind of delimiter after | ||
| 1758 | * it, and we can ignore that too. | ||
| 1759 | */ | ||
| 1760 | continue; | ||
| 1761 | } | ||
| 1762 | if (isalpha(*ptr) || *ptr == '_') { | ||
| 1763 | if (strncmp(map->enum_string, ptr, len) == 0 && | ||
| 1764 | !isalnum(ptr[len]) && ptr[len] != '_') { | ||
| 1765 | ptr = enum_replace(ptr, map, len); | ||
| 1766 | /* Hmm, enum string smaller than value */ | ||
| 1767 | if (WARN_ON_ONCE(!ptr)) | ||
| 1768 | return; | ||
| 1769 | /* | ||
| 1770 | * No need to decrement here, as enum_replace() | ||
| 1771 | * returns the pointer to the character passed | ||
| 1772 | * the enum, and two enums can not be placed | ||
| 1773 | * back to back without something in between. | ||
| 1774 | * We can skip that something in between. | ||
| 1775 | */ | ||
| 1776 | continue; | ||
| 1777 | } | ||
| 1778 | skip_more: | ||
| 1779 | do { | ||
| 1780 | ptr++; | ||
| 1781 | } while (isalnum(*ptr) || *ptr == '_'); | ||
| 1782 | /* | ||
| 1783 | * If what comes after this variable is a '.' or | ||
| 1784 | * '->' then we can continue to ignore that string. | ||
| 1785 | */ | ||
| 1786 | if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { | ||
| 1787 | ptr += *ptr == '.' ? 1 : 2; | ||
| 1788 | goto skip_more; | ||
| 1789 | } | ||
| 1790 | /* | ||
| 1791 | * Once again, we can skip the delimiter that came | ||
| 1792 | * after the string. | ||
| 1793 | */ | ||
| 1794 | continue; | ||
| 1795 | } | ||
| 1796 | } | ||
| 1797 | } | ||
| 1798 | |||
| 1799 | void trace_event_enum_update(struct trace_enum_map **map, int len) | ||
| 1800 | { | ||
| 1801 | struct ftrace_event_call *call, *p; | ||
| 1802 | const char *last_system = NULL; | ||
| 1803 | int last_i; | ||
| 1804 | int i; | ||
| 1805 | |||
| 1806 | down_write(&trace_event_sem); | ||
| 1807 | list_for_each_entry_safe(call, p, &ftrace_events, list) { | ||
| 1808 | /* events are usually grouped together with systems */ | ||
| 1809 | if (!last_system || call->class->system != last_system) { | ||
| 1810 | last_i = 0; | ||
| 1811 | last_system = call->class->system; | ||
| 1812 | } | ||
| 1813 | |||
| 1814 | for (i = last_i; i < len; i++) { | ||
| 1815 | if (call->class->system == map[i]->system) { | ||
| 1816 | /* Save the first system if need be */ | ||
| 1817 | if (!last_i) | ||
| 1818 | last_i = i; | ||
| 1819 | update_event_printk(call, map[i]); | ||
| 1820 | } | ||
| 1821 | } | ||
| 1822 | } | ||
| 1823 | up_write(&trace_event_sem); | ||
| 1824 | } | ||
| 1825 | |||
| 1707 | static struct ftrace_event_file * | 1826 | static struct ftrace_event_file * |
| 1708 | trace_create_new_event(struct ftrace_event_call *call, | 1827 | trace_create_new_event(struct ftrace_event_call *call, |
| 1709 | struct trace_array *tr) | 1828 | struct trace_array *tr) |
| @@ -1915,7 +2034,7 @@ static int trace_module_notify(struct notifier_block *self, | |||
| 1915 | 2034 | ||
| 1916 | static struct notifier_block trace_module_nb = { | 2035 | static struct notifier_block trace_module_nb = { |
| 1917 | .notifier_call = trace_module_notify, | 2036 | .notifier_call = trace_module_notify, |
| 1918 | .priority = 0, | 2037 | .priority = 1, /* higher than trace.c module notify */ |
| 1919 | }; | 2038 | }; |
| 1920 | #endif /* CONFIG_MODULES */ | 2039 | #endif /* CONFIG_MODULES */ |
| 1921 | 2040 | ||
| @@ -2228,7 +2347,7 @@ static inline int register_event_cmds(void) { return 0; } | |||
| 2228 | /* | 2347 | /* |
| 2229 | * The top level array has already had its ftrace_event_file | 2348 | * The top level array has already had its ftrace_event_file |
| 2230 | * descriptors created in order to allow for early events to | 2349 | * descriptors created in order to allow for early events to |
| 2231 | * be recorded. This function is called after the debugfs has been | 2350 | * be recorded. This function is called after the tracefs has been |
| 2232 | * initialized, and we now have to create the files associated | 2351 | * initialized, and we now have to create the files associated |
| 2233 | * to the events. | 2352 | * to the events. |
| 2234 | */ | 2353 | */ |
| @@ -2311,16 +2430,16 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) | |||
| 2311 | struct dentry *d_events; | 2430 | struct dentry *d_events; |
| 2312 | struct dentry *entry; | 2431 | struct dentry *entry; |
| 2313 | 2432 | ||
| 2314 | entry = debugfs_create_file("set_event", 0644, parent, | 2433 | entry = tracefs_create_file("set_event", 0644, parent, |
| 2315 | tr, &ftrace_set_event_fops); | 2434 | tr, &ftrace_set_event_fops); |
| 2316 | if (!entry) { | 2435 | if (!entry) { |
| 2317 | pr_warn("Could not create debugfs 'set_event' entry\n"); | 2436 | pr_warn("Could not create tracefs 'set_event' entry\n"); |
| 2318 | return -ENOMEM; | 2437 | return -ENOMEM; |
| 2319 | } | 2438 | } |
| 2320 | 2439 | ||
| 2321 | d_events = debugfs_create_dir("events", parent); | 2440 | d_events = tracefs_create_dir("events", parent); |
| 2322 | if (!d_events) { | 2441 | if (!d_events) { |
| 2323 | pr_warn("Could not create debugfs 'events' directory\n"); | 2442 | pr_warn("Could not create tracefs 'events' directory\n"); |
| 2324 | return -ENOMEM; | 2443 | return -ENOMEM; |
| 2325 | } | 2444 | } |
| 2326 | 2445 | ||
| @@ -2412,7 +2531,7 @@ int event_trace_del_tracer(struct trace_array *tr) | |||
| 2412 | 2531 | ||
| 2413 | down_write(&trace_event_sem); | 2532 | down_write(&trace_event_sem); |
| 2414 | __trace_remove_event_dirs(tr); | 2533 | __trace_remove_event_dirs(tr); |
| 2415 | debugfs_remove_recursive(tr->event_dir); | 2534 | tracefs_remove_recursive(tr->event_dir); |
| 2416 | up_write(&trace_event_sem); | 2535 | up_write(&trace_event_sem); |
| 2417 | 2536 | ||
| 2418 | tr->event_dir = NULL; | 2537 | tr->event_dir = NULL; |
| @@ -2534,10 +2653,10 @@ static __init int event_trace_init(void) | |||
| 2534 | if (IS_ERR(d_tracer)) | 2653 | if (IS_ERR(d_tracer)) |
| 2535 | return 0; | 2654 | return 0; |
| 2536 | 2655 | ||
| 2537 | entry = debugfs_create_file("available_events", 0444, d_tracer, | 2656 | entry = tracefs_create_file("available_events", 0444, d_tracer, |
| 2538 | tr, &ftrace_avail_fops); | 2657 | tr, &ftrace_avail_fops); |
| 2539 | if (!entry) | 2658 | if (!entry) |
| 2540 | pr_warn("Could not create debugfs 'available_events' entry\n"); | 2659 | pr_warn("Could not create tracefs 'available_events' entry\n"); |
| 2541 | 2660 | ||
| 2542 | if (trace_define_common_fields()) | 2661 | if (trace_define_common_fields()) |
| 2543 | pr_warn("tracing: Failed to allocate common fields"); | 2662 | pr_warn("tracing: Failed to allocate common fields"); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 12e2b99be862..174a6a71146c 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -177,7 +177,7 @@ struct ftrace_event_call __used event_##call = { \ | |||
| 177 | }, \ | 177 | }, \ |
| 178 | .event.type = etype, \ | 178 | .event.type = etype, \ |
| 179 | .print_fmt = print, \ | 179 | .print_fmt = print, \ |
| 180 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ | 180 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ |
| 181 | }; \ | 181 | }; \ |
| 182 | struct ftrace_event_call __used \ | 182 | struct ftrace_event_call __used \ |
| 183 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; | 183 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 2d25ad1526bb..9cfea4c6d314 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
| 7 | * | 7 | * |
| 8 | */ | 8 | */ |
| 9 | #include <linux/debugfs.h> | ||
| 10 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
| 11 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
| 12 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
| @@ -151,7 +150,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | |||
| 151 | * The curr_ret_stack is initialized to -1 and get increased | 150 | * The curr_ret_stack is initialized to -1 and get increased |
| 152 | * in this function. So it can be less than -1 only if it was | 151 | * in this function. So it can be less than -1 only if it was |
| 153 | * filtered out via ftrace_graph_notrace_addr() which can be | 152 | * filtered out via ftrace_graph_notrace_addr() which can be |
| 154 | * set from set_graph_notrace file in debugfs by user. | 153 | * set from set_graph_notrace file in tracefs by user. |
| 155 | */ | 154 | */ |
| 156 | if (current->curr_ret_stack < -1) | 155 | if (current->curr_ret_stack < -1) |
| 157 | return -EBUSY; | 156 | return -EBUSY; |
| @@ -1432,7 +1431,7 @@ static const struct file_operations graph_depth_fops = { | |||
| 1432 | .llseek = generic_file_llseek, | 1431 | .llseek = generic_file_llseek, |
| 1433 | }; | 1432 | }; |
| 1434 | 1433 | ||
| 1435 | static __init int init_graph_debugfs(void) | 1434 | static __init int init_graph_tracefs(void) |
| 1436 | { | 1435 | { |
| 1437 | struct dentry *d_tracer; | 1436 | struct dentry *d_tracer; |
| 1438 | 1437 | ||
| @@ -1445,7 +1444,7 @@ static __init int init_graph_debugfs(void) | |||
| 1445 | 1444 | ||
| 1446 | return 0; | 1445 | return 0; |
| 1447 | } | 1446 | } |
| 1448 | fs_initcall(init_graph_debugfs); | 1447 | fs_initcall(init_graph_tracefs); |
| 1449 | 1448 | ||
| 1450 | static __init int init_graph_trace(void) | 1449 | static __init int init_graph_trace(void) |
| 1451 | { | 1450 | { |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d73f565b4e06..d0ce590f06e1 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -250,7 +250,7 @@ DEFINE_FETCH_symbol(string_size) | |||
| 250 | #define fetch_file_offset_string_size NULL | 250 | #define fetch_file_offset_string_size NULL |
| 251 | 251 | ||
| 252 | /* Fetch type information table */ | 252 | /* Fetch type information table */ |
| 253 | const struct fetch_type kprobes_fetch_type_table[] = { | 253 | static const struct fetch_type kprobes_fetch_type_table[] = { |
| 254 | /* Special types */ | 254 | /* Special types */ |
| 255 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, | 255 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, |
| 256 | sizeof(u32), 1, "__data_loc char[]"), | 256 | sizeof(u32), 1, "__data_loc char[]"), |
| @@ -760,7 +760,8 @@ static int create_trace_kprobe(int argc, char **argv) | |||
| 760 | 760 | ||
| 761 | /* Parse fetch argument */ | 761 | /* Parse fetch argument */ |
| 762 | ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, | 762 | ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, |
| 763 | is_return, true); | 763 | is_return, true, |
| 764 | kprobes_fetch_type_table); | ||
| 764 | if (ret) { | 765 | if (ret) { |
| 765 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); | 766 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
| 766 | goto error; | 767 | goto error; |
| @@ -1134,11 +1135,15 @@ static void | |||
| 1134 | kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) | 1135 | kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) |
| 1135 | { | 1136 | { |
| 1136 | struct ftrace_event_call *call = &tk->tp.call; | 1137 | struct ftrace_event_call *call = &tk->tp.call; |
| 1138 | struct bpf_prog *prog = call->prog; | ||
| 1137 | struct kprobe_trace_entry_head *entry; | 1139 | struct kprobe_trace_entry_head *entry; |
| 1138 | struct hlist_head *head; | 1140 | struct hlist_head *head; |
| 1139 | int size, __size, dsize; | 1141 | int size, __size, dsize; |
| 1140 | int rctx; | 1142 | int rctx; |
| 1141 | 1143 | ||
| 1144 | if (prog && !trace_call_bpf(prog, regs)) | ||
| 1145 | return; | ||
| 1146 | |||
| 1142 | head = this_cpu_ptr(call->perf_events); | 1147 | head = this_cpu_ptr(call->perf_events); |
| 1143 | if (hlist_empty(head)) | 1148 | if (hlist_empty(head)) |
| 1144 | return; | 1149 | return; |
| @@ -1165,11 +1170,15 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, | |||
| 1165 | struct pt_regs *regs) | 1170 | struct pt_regs *regs) |
| 1166 | { | 1171 | { |
| 1167 | struct ftrace_event_call *call = &tk->tp.call; | 1172 | struct ftrace_event_call *call = &tk->tp.call; |
| 1173 | struct bpf_prog *prog = call->prog; | ||
| 1168 | struct kretprobe_trace_entry_head *entry; | 1174 | struct kretprobe_trace_entry_head *entry; |
| 1169 | struct hlist_head *head; | 1175 | struct hlist_head *head; |
| 1170 | int size, __size, dsize; | 1176 | int size, __size, dsize; |
| 1171 | int rctx; | 1177 | int rctx; |
| 1172 | 1178 | ||
| 1179 | if (prog && !trace_call_bpf(prog, regs)) | ||
| 1180 | return; | ||
| 1181 | |||
| 1173 | head = this_cpu_ptr(call->perf_events); | 1182 | head = this_cpu_ptr(call->perf_events); |
| 1174 | if (hlist_empty(head)) | 1183 | if (hlist_empty(head)) |
| 1175 | return; | 1184 | return; |
| @@ -1286,7 +1295,7 @@ static int register_kprobe_event(struct trace_kprobe *tk) | |||
| 1286 | kfree(call->print_fmt); | 1295 | kfree(call->print_fmt); |
| 1287 | return -ENODEV; | 1296 | return -ENODEV; |
| 1288 | } | 1297 | } |
| 1289 | call->flags = 0; | 1298 | call->flags = TRACE_EVENT_FL_KPROBE; |
| 1290 | call->class->reg = kprobe_register; | 1299 | call->class->reg = kprobe_register; |
| 1291 | call->data = tk; | 1300 | call->data = tk; |
| 1292 | ret = trace_add_event_call(call); | 1301 | ret = trace_add_event_call(call); |
| @@ -1310,7 +1319,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk) | |||
| 1310 | return ret; | 1319 | return ret; |
| 1311 | } | 1320 | } |
| 1312 | 1321 | ||
| 1313 | /* Make a debugfs interface for controlling probe points */ | 1322 | /* Make a tracefs interface for controlling probe points */ |
| 1314 | static __init int init_kprobe_trace(void) | 1323 | static __init int init_kprobe_trace(void) |
| 1315 | { | 1324 | { |
| 1316 | struct dentry *d_tracer; | 1325 | struct dentry *d_tracer; |
| @@ -1323,20 +1332,20 @@ static __init int init_kprobe_trace(void) | |||
| 1323 | if (IS_ERR(d_tracer)) | 1332 | if (IS_ERR(d_tracer)) |
| 1324 | return 0; | 1333 | return 0; |
| 1325 | 1334 | ||
| 1326 | entry = debugfs_create_file("kprobe_events", 0644, d_tracer, | 1335 | entry = tracefs_create_file("kprobe_events", 0644, d_tracer, |
| 1327 | NULL, &kprobe_events_ops); | 1336 | NULL, &kprobe_events_ops); |
| 1328 | 1337 | ||
| 1329 | /* Event list interface */ | 1338 | /* Event list interface */ |
| 1330 | if (!entry) | 1339 | if (!entry) |
| 1331 | pr_warning("Could not create debugfs " | 1340 | pr_warning("Could not create tracefs " |
| 1332 | "'kprobe_events' entry\n"); | 1341 | "'kprobe_events' entry\n"); |
| 1333 | 1342 | ||
| 1334 | /* Profile interface */ | 1343 | /* Profile interface */ |
| 1335 | entry = debugfs_create_file("kprobe_profile", 0444, d_tracer, | 1344 | entry = tracefs_create_file("kprobe_profile", 0444, d_tracer, |
| 1336 | NULL, &kprobe_profile_ops); | 1345 | NULL, &kprobe_profile_ops); |
| 1337 | 1346 | ||
| 1338 | if (!entry) | 1347 | if (!entry) |
| 1339 | pr_warning("Could not create debugfs " | 1348 | pr_warning("Could not create tracefs " |
| 1340 | "'kprobe_profile' entry\n"); | 1349 | "'kprobe_profile' entry\n"); |
| 1341 | return 0; | 1350 | return 0; |
| 1342 | } | 1351 | } |
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index b983b2fd2ca1..1769a81da8a7 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c | |||
| @@ -356,17 +356,14 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, | |||
| 356 | 356 | ||
| 357 | /* Recursive argument parser */ | 357 | /* Recursive argument parser */ |
| 358 | static int parse_probe_arg(char *arg, const struct fetch_type *t, | 358 | static int parse_probe_arg(char *arg, const struct fetch_type *t, |
| 359 | struct fetch_param *f, bool is_return, bool is_kprobe) | 359 | struct fetch_param *f, bool is_return, bool is_kprobe, |
| 360 | const struct fetch_type *ftbl) | ||
| 360 | { | 361 | { |
| 361 | const struct fetch_type *ftbl; | ||
| 362 | unsigned long param; | 362 | unsigned long param; |
| 363 | long offset; | 363 | long offset; |
| 364 | char *tmp; | 364 | char *tmp; |
| 365 | int ret = 0; | 365 | int ret = 0; |
| 366 | 366 | ||
| 367 | ftbl = is_kprobe ? kprobes_fetch_type_table : uprobes_fetch_type_table; | ||
| 368 | BUG_ON(ftbl == NULL); | ||
| 369 | |||
| 370 | switch (arg[0]) { | 367 | switch (arg[0]) { |
| 371 | case '$': | 368 | case '$': |
| 372 | ret = parse_probe_vars(arg + 1, t, f, is_return, is_kprobe); | 369 | ret = parse_probe_vars(arg + 1, t, f, is_return, is_kprobe); |
| @@ -447,7 +444,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, | |||
| 447 | dprm->fetch_size = get_fetch_size_function(t, | 444 | dprm->fetch_size = get_fetch_size_function(t, |
| 448 | dprm->fetch, ftbl); | 445 | dprm->fetch, ftbl); |
| 449 | ret = parse_probe_arg(arg, t2, &dprm->orig, is_return, | 446 | ret = parse_probe_arg(arg, t2, &dprm->orig, is_return, |
| 450 | is_kprobe); | 447 | is_kprobe, ftbl); |
| 451 | if (ret) | 448 | if (ret) |
| 452 | kfree(dprm); | 449 | kfree(dprm); |
| 453 | else { | 450 | else { |
| @@ -505,15 +502,12 @@ static int __parse_bitfield_probe_arg(const char *bf, | |||
| 505 | 502 | ||
| 506 | /* String length checking wrapper */ | 503 | /* String length checking wrapper */ |
| 507 | int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | 504 | int traceprobe_parse_probe_arg(char *arg, ssize_t *size, |
| 508 | struct probe_arg *parg, bool is_return, bool is_kprobe) | 505 | struct probe_arg *parg, bool is_return, bool is_kprobe, |
| 506 | const struct fetch_type *ftbl) | ||
| 509 | { | 507 | { |
| 510 | const struct fetch_type *ftbl; | ||
| 511 | const char *t; | 508 | const char *t; |
| 512 | int ret; | 509 | int ret; |
| 513 | 510 | ||
| 514 | ftbl = is_kprobe ? kprobes_fetch_type_table : uprobes_fetch_type_table; | ||
| 515 | BUG_ON(ftbl == NULL); | ||
| 516 | |||
| 517 | if (strlen(arg) > MAX_ARGSTR_LEN) { | 511 | if (strlen(arg) > MAX_ARGSTR_LEN) { |
| 518 | pr_info("Argument is too long.: %s\n", arg); | 512 | pr_info("Argument is too long.: %s\n", arg); |
| 519 | return -ENOSPC; | 513 | return -ENOSPC; |
| @@ -535,7 +529,8 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | |||
| 535 | } | 529 | } |
| 536 | parg->offset = *size; | 530 | parg->offset = *size; |
| 537 | *size += parg->type->size; | 531 | *size += parg->type->size; |
| 538 | ret = parse_probe_arg(arg, parg->type, &parg->fetch, is_return, is_kprobe); | 532 | ret = parse_probe_arg(arg, parg->type, &parg->fetch, is_return, |
| 533 | is_kprobe, ftbl); | ||
| 539 | 534 | ||
| 540 | if (ret >= 0 && t != NULL) | 535 | if (ret >= 0 && t != NULL) |
| 541 | ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch); | 536 | ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch); |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 4f815fbce16d..ab283e146b70 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
| 28 | #include <linux/debugfs.h> | 28 | #include <linux/tracefs.h> |
| 29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
| 30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
| 31 | #include <linux/ctype.h> | 31 | #include <linux/ctype.h> |
| @@ -229,13 +229,6 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \ | |||
| 229 | #define FETCH_TYPE_STRING 0 | 229 | #define FETCH_TYPE_STRING 0 |
| 230 | #define FETCH_TYPE_STRSIZE 1 | 230 | #define FETCH_TYPE_STRSIZE 1 |
| 231 | 231 | ||
| 232 | /* | ||
| 233 | * Fetch type information table. | ||
| 234 | * It's declared as a weak symbol due to conditional compilation. | ||
| 235 | */ | ||
| 236 | extern __weak const struct fetch_type kprobes_fetch_type_table[]; | ||
| 237 | extern __weak const struct fetch_type uprobes_fetch_type_table[]; | ||
| 238 | |||
| 239 | #ifdef CONFIG_KPROBE_EVENT | 232 | #ifdef CONFIG_KPROBE_EVENT |
| 240 | struct symbol_cache; | 233 | struct symbol_cache; |
| 241 | unsigned long update_symbol_cache(struct symbol_cache *sc); | 234 | unsigned long update_symbol_cache(struct symbol_cache *sc); |
| @@ -333,7 +326,8 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 333 | } | 326 | } |
| 334 | 327 | ||
| 335 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | 328 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, |
| 336 | struct probe_arg *parg, bool is_return, bool is_kprobe); | 329 | struct probe_arg *parg, bool is_return, bool is_kprobe, |
| 330 | const struct fetch_type *ftbl); | ||
| 337 | 331 | ||
| 338 | extern int traceprobe_conflict_field_name(const char *name, | 332 | extern int traceprobe_conflict_field_name(const char *name, |
| 339 | struct probe_arg *args, int narg); | 333 | struct probe_arg *args, int narg); |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index c3e4fcfddd45..3f34496244e9 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -327,11 +327,11 @@ static void t_stop(struct seq_file *m, void *p) | |||
| 327 | local_irq_enable(); | 327 | local_irq_enable(); |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | static int trace_lookup_stack(struct seq_file *m, long i) | 330 | static void trace_lookup_stack(struct seq_file *m, long i) |
| 331 | { | 331 | { |
| 332 | unsigned long addr = stack_dump_trace[i]; | 332 | unsigned long addr = stack_dump_trace[i]; |
| 333 | 333 | ||
| 334 | return seq_printf(m, "%pS\n", (void *)addr); | 334 | seq_printf(m, "%pS\n", (void *)addr); |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | static void print_disabled(struct seq_file *m) | 337 | static void print_disabled(struct seq_file *m) |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index 75e19e86c954..6cf935316769 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
| 13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
| 14 | #include <linux/rbtree.h> | 14 | #include <linux/rbtree.h> |
| 15 | #include <linux/debugfs.h> | 15 | #include <linux/tracefs.h> |
| 16 | #include "trace_stat.h" | 16 | #include "trace_stat.h" |
| 17 | #include "trace.h" | 17 | #include "trace.h" |
| 18 | 18 | ||
| @@ -65,7 +65,7 @@ static void reset_stat_session(struct stat_session *session) | |||
| 65 | 65 | ||
| 66 | static void destroy_session(struct stat_session *session) | 66 | static void destroy_session(struct stat_session *session) |
| 67 | { | 67 | { |
| 68 | debugfs_remove(session->file); | 68 | tracefs_remove(session->file); |
| 69 | __reset_stat_session(session); | 69 | __reset_stat_session(session); |
| 70 | mutex_destroy(&session->stat_mutex); | 70 | mutex_destroy(&session->stat_mutex); |
| 71 | kfree(session); | 71 | kfree(session); |
| @@ -279,9 +279,9 @@ static int tracing_stat_init(void) | |||
| 279 | if (IS_ERR(d_tracing)) | 279 | if (IS_ERR(d_tracing)) |
| 280 | return 0; | 280 | return 0; |
| 281 | 281 | ||
| 282 | stat_dir = debugfs_create_dir("trace_stat", d_tracing); | 282 | stat_dir = tracefs_create_dir("trace_stat", d_tracing); |
| 283 | if (!stat_dir) | 283 | if (!stat_dir) |
| 284 | pr_warning("Could not create debugfs " | 284 | pr_warning("Could not create tracefs " |
| 285 | "'trace_stat' entry\n"); | 285 | "'trace_stat' entry\n"); |
| 286 | return 0; | 286 | return 0; |
| 287 | } | 287 | } |
| @@ -291,7 +291,7 @@ static int init_stat_file(struct stat_session *session) | |||
| 291 | if (!stat_dir && tracing_stat_init()) | 291 | if (!stat_dir && tracing_stat_init()) |
| 292 | return -ENODEV; | 292 | return -ENODEV; |
| 293 | 293 | ||
| 294 | session->file = debugfs_create_file(session->ts->name, 0644, | 294 | session->file = tracefs_create_file(session->ts->name, 0644, |
| 295 | stat_dir, | 295 | stat_dir, |
| 296 | session, &tracing_stat_fops); | 296 | session, &tracing_stat_fops); |
| 297 | if (!session->file) | 297 | if (!session->file) |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 7dc1c8abecd6..d60fe62ec4fa 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -196,7 +196,7 @@ DEFINE_FETCH_file_offset(string) | |||
| 196 | DEFINE_FETCH_file_offset(string_size) | 196 | DEFINE_FETCH_file_offset(string_size) |
| 197 | 197 | ||
| 198 | /* Fetch type information table */ | 198 | /* Fetch type information table */ |
| 199 | const struct fetch_type uprobes_fetch_type_table[] = { | 199 | static const struct fetch_type uprobes_fetch_type_table[] = { |
| 200 | /* Special types */ | 200 | /* Special types */ |
| 201 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, | 201 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, |
| 202 | sizeof(u32), 1, "__data_loc char[]"), | 202 | sizeof(u32), 1, "__data_loc char[]"), |
| @@ -535,7 +535,8 @@ static int create_trace_uprobe(int argc, char **argv) | |||
| 535 | 535 | ||
| 536 | /* Parse fetch argument */ | 536 | /* Parse fetch argument */ |
| 537 | ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, | 537 | ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, |
| 538 | is_return, false); | 538 | is_return, false, |
| 539 | uprobes_fetch_type_table); | ||
| 539 | if (ret) { | 540 | if (ret) { |
| 540 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); | 541 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
| 541 | goto error; | 542 | goto error; |
| @@ -1005,7 +1006,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) | |||
| 1005 | return true; | 1006 | return true; |
| 1006 | 1007 | ||
| 1007 | list_for_each_entry(event, &filter->perf_events, hw.tp_list) { | 1008 | list_for_each_entry(event, &filter->perf_events, hw.tp_list) { |
| 1008 | if (event->hw.tp_target->mm == mm) | 1009 | if (event->hw.target->mm == mm) |
| 1009 | return true; | 1010 | return true; |
| 1010 | } | 1011 | } |
| 1011 | 1012 | ||
| @@ -1015,7 +1016,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) | |||
| 1015 | static inline bool | 1016 | static inline bool |
| 1016 | uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) | 1017 | uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) |
| 1017 | { | 1018 | { |
| 1018 | return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm); | 1019 | return __uprobe_perf_filter(&tu->filter, event->hw.target->mm); |
| 1019 | } | 1020 | } |
| 1020 | 1021 | ||
| 1021 | static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) | 1022 | static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) |
| @@ -1023,10 +1024,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) | |||
| 1023 | bool done; | 1024 | bool done; |
| 1024 | 1025 | ||
| 1025 | write_lock(&tu->filter.rwlock); | 1026 | write_lock(&tu->filter.rwlock); |
| 1026 | if (event->hw.tp_target) { | 1027 | if (event->hw.target) { |
| 1027 | list_del(&event->hw.tp_list); | 1028 | list_del(&event->hw.tp_list); |
| 1028 | done = tu->filter.nr_systemwide || | 1029 | done = tu->filter.nr_systemwide || |
| 1029 | (event->hw.tp_target->flags & PF_EXITING) || | 1030 | (event->hw.target->flags & PF_EXITING) || |
| 1030 | uprobe_filter_event(tu, event); | 1031 | uprobe_filter_event(tu, event); |
| 1031 | } else { | 1032 | } else { |
| 1032 | tu->filter.nr_systemwide--; | 1033 | tu->filter.nr_systemwide--; |
| @@ -1046,7 +1047,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) | |||
| 1046 | int err; | 1047 | int err; |
| 1047 | 1048 | ||
| 1048 | write_lock(&tu->filter.rwlock); | 1049 | write_lock(&tu->filter.rwlock); |
| 1049 | if (event->hw.tp_target) { | 1050 | if (event->hw.target) { |
| 1050 | /* | 1051 | /* |
| 1051 | * event->parent != NULL means copy_process(), we can avoid | 1052 | * event->parent != NULL means copy_process(), we can avoid |
| 1052 | * uprobe_apply(). current->mm must be probed and we can rely | 1053 | * uprobe_apply(). current->mm must be probed and we can rely |
