diff options
| author | Ingo Molnar <mingo@kernel.org> | 2017-07-30 05:15:13 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2017-07-30 05:15:13 -0400 |
| commit | f5db340f19f14a8df9dfd22d71fba1513e9f1f7e (patch) | |
| tree | 131d3345bc987aee3c922624de816492e7f323a4 /kernel/trace | |
| parent | ee438ec8f33c5af0d4a4ffb935c5b9272e8c2680 (diff) | |
| parent | 38115f2f8cec8087d558c062e779c443a01f87d6 (diff) | |
Merge branch 'perf/urgent' into perf/core, to pick up latest fixes and refresh the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Kconfig | 22 | ||||
| -rw-r--r-- | kernel/trace/bpf_trace.c | 66 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 411 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 470 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 36 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 66 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 9 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 27 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 72 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 6 |
11 files changed, 932 insertions, 263 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 7e06f04e98fe..434c840e2d82 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -667,30 +667,30 @@ config RING_BUFFER_STARTUP_TEST | |||
| 667 | 667 | ||
| 668 | If unsure, say N | 668 | If unsure, say N |
| 669 | 669 | ||
| 670 | config TRACE_ENUM_MAP_FILE | 670 | config TRACE_EVAL_MAP_FILE |
| 671 | bool "Show enum mappings for trace events" | 671 | bool "Show eval mappings for trace events" |
| 672 | depends on TRACING | 672 | depends on TRACING |
| 673 | help | 673 | help |
| 674 | The "print fmt" of the trace events will show the enum names instead | 674 | The "print fmt" of the trace events will show the enum/sizeof names |
| 675 | of their values. This can cause problems for user space tools that | 675 | instead of their values. This can cause problems for user space tools |
| 676 | use this string to parse the raw data as user space does not know | 676 | that use this string to parse the raw data as user space does not know |
| 677 | how to convert the string to its value. | 677 | how to convert the string to its value. |
| 678 | 678 | ||
| 679 | To fix this, there's a special macro in the kernel that can be used | 679 | To fix this, there's a special macro in the kernel that can be used |
| 680 | to convert the enum into its value. If this macro is used, then the | 680 | to convert an enum/sizeof into its value. If this macro is used, then |
| 681 | print fmt strings will have the enums converted to their values. | 681 | the print fmt strings will be converted to their values. |
| 682 | 682 | ||
| 683 | If something does not get converted properly, this option can be | 683 | If something does not get converted properly, this option can be |
| 684 | used to show what enums the kernel tried to convert. | 684 | used to show what enums/sizeof the kernel tried to convert. |
| 685 | 685 | ||
| 686 | This option is for debugging the enum conversions. A file is created | 686 | This option is for debugging the conversions. A file is created |
| 687 | in the tracing directory called "enum_map" that will show the enum | 687 | in the tracing directory called "eval_map" that will show the |
| 688 | names matched with their values and what trace event system they | 688 | names matched with their values and what trace event system they |
| 689 | belong too. | 689 | belong too. |
| 690 | 690 | ||
| 691 | Normally, the mapping of the strings to values will be freed after | 691 | Normally, the mapping of the strings to values will be freed after |
| 692 | boot up or module load. With this option, they will not be freed, as | 692 | boot up or module load. With this option, they will not be freed, as |
| 693 | they are needed for the "enum_map" file. Enabling this option will | 693 | they are needed for the "eval_map" file. Enabling this option will |
| 694 | increase the memory footprint of the running kernel. | 694 | increase the memory footprint of the running kernel. |
| 695 | 695 | ||
| 696 | If unsure, say N | 696 | If unsure, say N |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 460a031c77e5..37385193a608 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
| @@ -122,8 +122,8 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |||
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | /* | 124 | /* |
| 125 | * limited trace_printk() | 125 | * Only limited trace_printk() conversion specifiers allowed: |
| 126 | * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed | 126 | * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s |
| 127 | */ | 127 | */ |
| 128 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, | 128 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
| 129 | u64, arg2, u64, arg3) | 129 | u64, arg2, u64, arg3) |
| @@ -198,7 +198,8 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, | |||
| 198 | i++; | 198 | i++; |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') | 201 | if (fmt[i] != 'i' && fmt[i] != 'd' && |
| 202 | fmt[i] != 'u' && fmt[i] != 'x') | ||
| 202 | return -EINVAL; | 203 | return -EINVAL; |
| 203 | fmt_cnt++; | 204 | fmt_cnt++; |
| 204 | } | 205 | } |
| @@ -234,7 +235,8 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |||
| 234 | unsigned int cpu = smp_processor_id(); | 235 | unsigned int cpu = smp_processor_id(); |
| 235 | u64 index = flags & BPF_F_INDEX_MASK; | 236 | u64 index = flags & BPF_F_INDEX_MASK; |
| 236 | struct bpf_event_entry *ee; | 237 | struct bpf_event_entry *ee; |
| 237 | struct perf_event *event; | 238 | u64 value = 0; |
| 239 | int err; | ||
| 238 | 240 | ||
| 239 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) | 241 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
| 240 | return -EINVAL; | 242 | return -EINVAL; |
| @@ -247,21 +249,14 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |||
| 247 | if (!ee) | 249 | if (!ee) |
| 248 | return -ENOENT; | 250 | return -ENOENT; |
| 249 | 251 | ||
| 250 | event = ee->event; | 252 | err = perf_event_read_local(ee->event, &value); |
| 251 | if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && | ||
| 252 | event->attr.type != PERF_TYPE_RAW)) | ||
| 253 | return -EINVAL; | ||
| 254 | |||
| 255 | /* make sure event is local and doesn't have pmu::count */ | ||
| 256 | if (unlikely(event->oncpu != cpu || event->pmu->count)) | ||
| 257 | return -EINVAL; | ||
| 258 | |||
| 259 | /* | 253 | /* |
| 260 | * we don't know if the function is run successfully by the | 254 | * this api is ugly since we miss [-22..-2] range of valid |
| 261 | * return value. It can be judged in other places, such as | 255 | * counter values, but that's uapi |
| 262 | * eBPF programs. | ||
| 263 | */ | 256 | */ |
| 264 | return perf_event_read_local(event); | 257 | if (err) |
| 258 | return err; | ||
| 259 | return value; | ||
| 265 | } | 260 | } |
| 266 | 261 | ||
| 267 | static const struct bpf_func_proto bpf_perf_event_read_proto = { | 262 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
| @@ -272,14 +267,16 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = { | |||
| 272 | .arg2_type = ARG_ANYTHING, | 267 | .arg2_type = ARG_ANYTHING, |
| 273 | }; | 268 | }; |
| 274 | 269 | ||
| 270 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); | ||
| 271 | |||
| 275 | static __always_inline u64 | 272 | static __always_inline u64 |
| 276 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | 273 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, |
| 277 | u64 flags, struct perf_raw_record *raw) | 274 | u64 flags, struct perf_raw_record *raw) |
| 278 | { | 275 | { |
| 279 | struct bpf_array *array = container_of(map, struct bpf_array, map); | 276 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 277 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd); | ||
| 280 | unsigned int cpu = smp_processor_id(); | 278 | unsigned int cpu = smp_processor_id(); |
| 281 | u64 index = flags & BPF_F_INDEX_MASK; | 279 | u64 index = flags & BPF_F_INDEX_MASK; |
| 282 | struct perf_sample_data sample_data; | ||
| 283 | struct bpf_event_entry *ee; | 280 | struct bpf_event_entry *ee; |
| 284 | struct perf_event *event; | 281 | struct perf_event *event; |
| 285 | 282 | ||
| @@ -300,9 +297,9 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |||
| 300 | if (unlikely(event->oncpu != cpu)) | 297 | if (unlikely(event->oncpu != cpu)) |
| 301 | return -EOPNOTSUPP; | 298 | return -EOPNOTSUPP; |
| 302 | 299 | ||
| 303 | perf_sample_data_init(&sample_data, 0, 0); | 300 | perf_sample_data_init(sd, 0, 0); |
| 304 | sample_data.raw = raw; | 301 | sd->raw = raw; |
| 305 | perf_event_output(event, &sample_data, regs); | 302 | perf_event_output(event, sd, regs); |
| 306 | return 0; | 303 | return 0; |
| 307 | } | 304 | } |
| 308 | 305 | ||
| @@ -483,7 +480,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func | |||
| 483 | 480 | ||
| 484 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | 481 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ |
| 485 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, | 482 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
| 486 | enum bpf_reg_type *reg_type) | 483 | struct bpf_insn_access_aux *info) |
| 487 | { | 484 | { |
| 488 | if (off < 0 || off >= sizeof(struct pt_regs)) | 485 | if (off < 0 || off >= sizeof(struct pt_regs)) |
| 489 | return false; | 486 | return false; |
| @@ -566,7 +563,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) | |||
| 566 | } | 563 | } |
| 567 | 564 | ||
| 568 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | 565 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
| 569 | enum bpf_reg_type *reg_type) | 566 | struct bpf_insn_access_aux *info) |
| 570 | { | 567 | { |
| 571 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | 568 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) |
| 572 | return false; | 569 | return false; |
| @@ -585,40 +582,47 @@ const struct bpf_verifier_ops tracepoint_prog_ops = { | |||
| 585 | }; | 582 | }; |
| 586 | 583 | ||
| 587 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, | 584 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
| 588 | enum bpf_reg_type *reg_type) | 585 | struct bpf_insn_access_aux *info) |
| 589 | { | 586 | { |
| 587 | const int size_sp = FIELD_SIZEOF(struct bpf_perf_event_data, | ||
| 588 | sample_period); | ||
| 589 | |||
| 590 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) | 590 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
| 591 | return false; | 591 | return false; |
| 592 | if (type != BPF_READ) | 592 | if (type != BPF_READ) |
| 593 | return false; | 593 | return false; |
| 594 | if (off % size != 0) | 594 | if (off % size != 0) |
| 595 | return false; | 595 | return false; |
| 596 | if (off == offsetof(struct bpf_perf_event_data, sample_period)) { | 596 | |
| 597 | if (size != sizeof(u64)) | 597 | switch (off) { |
| 598 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): | ||
| 599 | bpf_ctx_record_field_size(info, size_sp); | ||
| 600 | if (!bpf_ctx_narrow_access_ok(off, size, size_sp)) | ||
| 598 | return false; | 601 | return false; |
| 599 | } else { | 602 | break; |
| 603 | default: | ||
| 600 | if (size != sizeof(long)) | 604 | if (size != sizeof(long)) |
| 601 | return false; | 605 | return false; |
| 602 | } | 606 | } |
| 607 | |||
| 603 | return true; | 608 | return true; |
| 604 | } | 609 | } |
| 605 | 610 | ||
| 606 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, | 611 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
| 607 | const struct bpf_insn *si, | 612 | const struct bpf_insn *si, |
| 608 | struct bpf_insn *insn_buf, | 613 | struct bpf_insn *insn_buf, |
| 609 | struct bpf_prog *prog) | 614 | struct bpf_prog *prog, u32 *target_size) |
| 610 | { | 615 | { |
| 611 | struct bpf_insn *insn = insn_buf; | 616 | struct bpf_insn *insn = insn_buf; |
| 612 | 617 | ||
| 613 | switch (si->off) { | 618 | switch (si->off) { |
| 614 | case offsetof(struct bpf_perf_event_data, sample_period): | 619 | case offsetof(struct bpf_perf_event_data, sample_period): |
| 615 | BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64)); | ||
| 616 | |||
| 617 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | 620 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
| 618 | data), si->dst_reg, si->src_reg, | 621 | data), si->dst_reg, si->src_reg, |
| 619 | offsetof(struct bpf_perf_event_data_kern, data)); | 622 | offsetof(struct bpf_perf_event_data_kern, data)); |
| 620 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, | 623 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
| 621 | offsetof(struct perf_sample_data, period)); | 624 | bpf_target_off(struct perf_sample_data, period, 8, |
| 625 | target_size)); | ||
| 622 | break; | 626 | break; |
| 623 | default: | 627 | default: |
| 624 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | 628 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b308be30dfb9..02004ae91860 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -113,7 +113,7 @@ static int ftrace_disabled __read_mostly; | |||
| 113 | 113 | ||
| 114 | static DEFINE_MUTEX(ftrace_lock); | 114 | static DEFINE_MUTEX(ftrace_lock); |
| 115 | 115 | ||
| 116 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 116 | static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
| 117 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 117 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
| 118 | static struct ftrace_ops global_ops; | 118 | static struct ftrace_ops global_ops; |
| 119 | 119 | ||
| @@ -169,8 +169,11 @@ int ftrace_nr_registered_ops(void) | |||
| 169 | 169 | ||
| 170 | mutex_lock(&ftrace_lock); | 170 | mutex_lock(&ftrace_lock); |
| 171 | 171 | ||
| 172 | for (ops = ftrace_ops_list; | 172 | for (ops = rcu_dereference_protected(ftrace_ops_list, |
| 173 | ops != &ftrace_list_end; ops = ops->next) | 173 | lockdep_is_held(&ftrace_lock)); |
| 174 | ops != &ftrace_list_end; | ||
| 175 | ops = rcu_dereference_protected(ops->next, | ||
| 176 | lockdep_is_held(&ftrace_lock))) | ||
| 174 | cnt++; | 177 | cnt++; |
| 175 | 178 | ||
| 176 | mutex_unlock(&ftrace_lock); | 179 | mutex_unlock(&ftrace_lock); |
| @@ -275,10 +278,11 @@ static void update_ftrace_function(void) | |||
| 275 | * If there's only one ftrace_ops registered, the ftrace_ops_list | 278 | * If there's only one ftrace_ops registered, the ftrace_ops_list |
| 276 | * will point to the ops we want. | 279 | * will point to the ops we want. |
| 277 | */ | 280 | */ |
| 278 | set_function_trace_op = ftrace_ops_list; | 281 | set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, |
| 282 | lockdep_is_held(&ftrace_lock)); | ||
| 279 | 283 | ||
| 280 | /* If there's no ftrace_ops registered, just call the stub function */ | 284 | /* If there's no ftrace_ops registered, just call the stub function */ |
| 281 | if (ftrace_ops_list == &ftrace_list_end) { | 285 | if (set_function_trace_op == &ftrace_list_end) { |
| 282 | func = ftrace_stub; | 286 | func = ftrace_stub; |
| 283 | 287 | ||
| 284 | /* | 288 | /* |
| @@ -286,7 +290,8 @@ static void update_ftrace_function(void) | |||
| 286 | * recursion safe and not dynamic and the arch supports passing ops, | 290 | * recursion safe and not dynamic and the arch supports passing ops, |
| 287 | * then have the mcount trampoline call the function directly. | 291 | * then have the mcount trampoline call the function directly. |
| 288 | */ | 292 | */ |
| 289 | } else if (ftrace_ops_list->next == &ftrace_list_end) { | 293 | } else if (rcu_dereference_protected(ftrace_ops_list->next, |
| 294 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | ||
| 290 | func = ftrace_ops_get_list_func(ftrace_ops_list); | 295 | func = ftrace_ops_get_list_func(ftrace_ops_list); |
| 291 | 296 | ||
| 292 | } else { | 297 | } else { |
| @@ -348,9 +353,11 @@ int using_ftrace_ops_list_func(void) | |||
| 348 | return ftrace_trace_function == ftrace_ops_list_func; | 353 | return ftrace_trace_function == ftrace_ops_list_func; |
| 349 | } | 354 | } |
| 350 | 355 | ||
| 351 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 356 | static void add_ftrace_ops(struct ftrace_ops __rcu **list, |
| 357 | struct ftrace_ops *ops) | ||
| 352 | { | 358 | { |
| 353 | ops->next = *list; | 359 | rcu_assign_pointer(ops->next, *list); |
| 360 | |||
| 354 | /* | 361 | /* |
| 355 | * We are entering ops into the list but another | 362 | * We are entering ops into the list but another |
| 356 | * CPU might be walking that list. We need to make sure | 363 | * CPU might be walking that list. We need to make sure |
| @@ -360,7 +367,8 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | |||
| 360 | rcu_assign_pointer(*list, ops); | 367 | rcu_assign_pointer(*list, ops); |
| 361 | } | 368 | } |
| 362 | 369 | ||
| 363 | static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 370 | static int remove_ftrace_ops(struct ftrace_ops __rcu **list, |
| 371 | struct ftrace_ops *ops) | ||
| 364 | { | 372 | { |
| 365 | struct ftrace_ops **p; | 373 | struct ftrace_ops **p; |
| 366 | 374 | ||
| @@ -368,7 +376,10 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | |||
| 368 | * If we are removing the last function, then simply point | 376 | * If we are removing the last function, then simply point |
| 369 | * to the ftrace_stub. | 377 | * to the ftrace_stub. |
| 370 | */ | 378 | */ |
| 371 | if (*list == ops && ops->next == &ftrace_list_end) { | 379 | if (rcu_dereference_protected(*list, |
| 380 | lockdep_is_held(&ftrace_lock)) == ops && | ||
| 381 | rcu_dereference_protected(ops->next, | ||
| 382 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | ||
| 372 | *list = &ftrace_list_end; | 383 | *list = &ftrace_list_end; |
| 373 | return 0; | 384 | return 0; |
| 374 | } | 385 | } |
| @@ -1293,6 +1304,28 @@ static void ftrace_hash_clear(struct ftrace_hash *hash) | |||
| 1293 | FTRACE_WARN_ON(hash->count); | 1304 | FTRACE_WARN_ON(hash->count); |
| 1294 | } | 1305 | } |
| 1295 | 1306 | ||
| 1307 | static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) | ||
| 1308 | { | ||
| 1309 | list_del(&ftrace_mod->list); | ||
| 1310 | kfree(ftrace_mod->module); | ||
| 1311 | kfree(ftrace_mod->func); | ||
| 1312 | kfree(ftrace_mod); | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | static void clear_ftrace_mod_list(struct list_head *head) | ||
| 1316 | { | ||
| 1317 | struct ftrace_mod_load *p, *n; | ||
| 1318 | |||
| 1319 | /* stack tracer isn't supported yet */ | ||
| 1320 | if (!head) | ||
| 1321 | return; | ||
| 1322 | |||
| 1323 | mutex_lock(&ftrace_lock); | ||
| 1324 | list_for_each_entry_safe(p, n, head, list) | ||
| 1325 | free_ftrace_mod(p); | ||
| 1326 | mutex_unlock(&ftrace_lock); | ||
| 1327 | } | ||
| 1328 | |||
| 1296 | static void free_ftrace_hash(struct ftrace_hash *hash) | 1329 | static void free_ftrace_hash(struct ftrace_hash *hash) |
| 1297 | { | 1330 | { |
| 1298 | if (!hash || hash == EMPTY_HASH) | 1331 | if (!hash || hash == EMPTY_HASH) |
| @@ -1346,6 +1379,35 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | |||
| 1346 | return hash; | 1379 | return hash; |
| 1347 | } | 1380 | } |
| 1348 | 1381 | ||
| 1382 | |||
| 1383 | static int ftrace_add_mod(struct trace_array *tr, | ||
| 1384 | const char *func, const char *module, | ||
| 1385 | int enable) | ||
| 1386 | { | ||
| 1387 | struct ftrace_mod_load *ftrace_mod; | ||
| 1388 | struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; | ||
| 1389 | |||
| 1390 | ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); | ||
| 1391 | if (!ftrace_mod) | ||
| 1392 | return -ENOMEM; | ||
| 1393 | |||
| 1394 | ftrace_mod->func = kstrdup(func, GFP_KERNEL); | ||
| 1395 | ftrace_mod->module = kstrdup(module, GFP_KERNEL); | ||
| 1396 | ftrace_mod->enable = enable; | ||
| 1397 | |||
| 1398 | if (!ftrace_mod->func || !ftrace_mod->module) | ||
| 1399 | goto out_free; | ||
| 1400 | |||
| 1401 | list_add(&ftrace_mod->list, mod_head); | ||
| 1402 | |||
| 1403 | return 0; | ||
| 1404 | |||
| 1405 | out_free: | ||
| 1406 | free_ftrace_mod(ftrace_mod); | ||
| 1407 | |||
| 1408 | return -ENOMEM; | ||
| 1409 | } | ||
| 1410 | |||
| 1349 | static struct ftrace_hash * | 1411 | static struct ftrace_hash * |
| 1350 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | 1412 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) |
| 1351 | { | 1413 | { |
| @@ -1359,6 +1421,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |||
| 1359 | if (!new_hash) | 1421 | if (!new_hash) |
| 1360 | return NULL; | 1422 | return NULL; |
| 1361 | 1423 | ||
| 1424 | if (hash) | ||
| 1425 | new_hash->flags = hash->flags; | ||
| 1426 | |||
| 1362 | /* Empty hash? */ | 1427 | /* Empty hash? */ |
| 1363 | if (ftrace_hash_empty(hash)) | 1428 | if (ftrace_hash_empty(hash)) |
| 1364 | return new_hash; | 1429 | return new_hash; |
| @@ -1403,7 +1468,7 @@ __ftrace_hash_move(struct ftrace_hash *src) | |||
| 1403 | /* | 1468 | /* |
| 1404 | * If the new source is empty, just return the empty_hash. | 1469 | * If the new source is empty, just return the empty_hash. |
| 1405 | */ | 1470 | */ |
| 1406 | if (!src->count) | 1471 | if (ftrace_hash_empty(src)) |
| 1407 | return EMPTY_HASH; | 1472 | return EMPTY_HASH; |
| 1408 | 1473 | ||
| 1409 | /* | 1474 | /* |
| @@ -1420,6 +1485,8 @@ __ftrace_hash_move(struct ftrace_hash *src) | |||
| 1420 | if (!new_hash) | 1485 | if (!new_hash) |
| 1421 | return NULL; | 1486 | return NULL; |
| 1422 | 1487 | ||
| 1488 | new_hash->flags = src->flags; | ||
| 1489 | |||
| 1423 | size = 1 << src->size_bits; | 1490 | size = 1 << src->size_bits; |
| 1424 | for (i = 0; i < size; i++) { | 1491 | for (i = 0; i < size; i++) { |
| 1425 | hhd = &src->buckets[i]; | 1492 | hhd = &src->buckets[i]; |
| @@ -1513,8 +1580,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
| 1513 | return 0; | 1580 | return 0; |
| 1514 | #endif | 1581 | #endif |
| 1515 | 1582 | ||
| 1516 | hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); | 1583 | rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); |
| 1517 | hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); | 1584 | rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); |
| 1518 | 1585 | ||
| 1519 | if (hash_contains_ip(ip, &hash)) | 1586 | if (hash_contains_ip(ip, &hash)) |
| 1520 | ret = 1; | 1587 | ret = 1; |
| @@ -1650,7 +1717,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1650 | struct dyn_ftrace *rec; | 1717 | struct dyn_ftrace *rec; |
| 1651 | bool update = false; | 1718 | bool update = false; |
| 1652 | int count = 0; | 1719 | int count = 0; |
| 1653 | int all = 0; | 1720 | int all = false; |
| 1654 | 1721 | ||
| 1655 | /* Only update if the ops has been registered */ | 1722 | /* Only update if the ops has been registered */ |
| 1656 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | 1723 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
| @@ -1671,7 +1738,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1671 | hash = ops->func_hash->filter_hash; | 1738 | hash = ops->func_hash->filter_hash; |
| 1672 | other_hash = ops->func_hash->notrace_hash; | 1739 | other_hash = ops->func_hash->notrace_hash; |
| 1673 | if (ftrace_hash_empty(hash)) | 1740 | if (ftrace_hash_empty(hash)) |
| 1674 | all = 1; | 1741 | all = true; |
| 1675 | } else { | 1742 | } else { |
| 1676 | inc = !inc; | 1743 | inc = !inc; |
| 1677 | hash = ops->func_hash->notrace_hash; | 1744 | hash = ops->func_hash->notrace_hash; |
| @@ -2784,7 +2851,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
| 2784 | * If there's no more ops registered with ftrace, run a | 2851 | * If there's no more ops registered with ftrace, run a |
| 2785 | * sanity check to make sure all rec flags are cleared. | 2852 | * sanity check to make sure all rec flags are cleared. |
| 2786 | */ | 2853 | */ |
| 2787 | if (ftrace_ops_list == &ftrace_list_end) { | 2854 | if (rcu_dereference_protected(ftrace_ops_list, |
| 2855 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | ||
| 2788 | struct ftrace_page *pg; | 2856 | struct ftrace_page *pg; |
| 2789 | struct dyn_ftrace *rec; | 2857 | struct dyn_ftrace *rec; |
| 2790 | 2858 | ||
| @@ -3061,6 +3129,7 @@ ftrace_allocate_pages(unsigned long num_to_init) | |||
| 3061 | struct ftrace_iterator { | 3129 | struct ftrace_iterator { |
| 3062 | loff_t pos; | 3130 | loff_t pos; |
| 3063 | loff_t func_pos; | 3131 | loff_t func_pos; |
| 3132 | loff_t mod_pos; | ||
| 3064 | struct ftrace_page *pg; | 3133 | struct ftrace_page *pg; |
| 3065 | struct dyn_ftrace *func; | 3134 | struct dyn_ftrace *func; |
| 3066 | struct ftrace_func_probe *probe; | 3135 | struct ftrace_func_probe *probe; |
| @@ -3068,6 +3137,8 @@ struct ftrace_iterator { | |||
| 3068 | struct trace_parser parser; | 3137 | struct trace_parser parser; |
| 3069 | struct ftrace_hash *hash; | 3138 | struct ftrace_hash *hash; |
| 3070 | struct ftrace_ops *ops; | 3139 | struct ftrace_ops *ops; |
| 3140 | struct trace_array *tr; | ||
| 3141 | struct list_head *mod_list; | ||
| 3071 | int pidx; | 3142 | int pidx; |
| 3072 | int idx; | 3143 | int idx; |
| 3073 | unsigned flags; | 3144 | unsigned flags; |
| @@ -3152,13 +3223,13 @@ static void *t_probe_start(struct seq_file *m, loff_t *pos) | |||
| 3152 | if (!(iter->flags & FTRACE_ITER_DO_PROBES)) | 3223 | if (!(iter->flags & FTRACE_ITER_DO_PROBES)) |
| 3153 | return NULL; | 3224 | return NULL; |
| 3154 | 3225 | ||
| 3155 | if (iter->func_pos > *pos) | 3226 | if (iter->mod_pos > *pos) |
| 3156 | return NULL; | 3227 | return NULL; |
| 3157 | 3228 | ||
| 3158 | iter->probe = NULL; | 3229 | iter->probe = NULL; |
| 3159 | iter->probe_entry = NULL; | 3230 | iter->probe_entry = NULL; |
| 3160 | iter->pidx = 0; | 3231 | iter->pidx = 0; |
| 3161 | for (l = 0; l <= (*pos - iter->func_pos); ) { | 3232 | for (l = 0; l <= (*pos - iter->mod_pos); ) { |
| 3162 | p = t_probe_next(m, &l); | 3233 | p = t_probe_next(m, &l); |
| 3163 | if (!p) | 3234 | if (!p) |
| 3164 | break; | 3235 | break; |
| @@ -3197,6 +3268,82 @@ t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) | |||
| 3197 | } | 3268 | } |
| 3198 | 3269 | ||
| 3199 | static void * | 3270 | static void * |
| 3271 | t_mod_next(struct seq_file *m, loff_t *pos) | ||
| 3272 | { | ||
| 3273 | struct ftrace_iterator *iter = m->private; | ||
| 3274 | struct trace_array *tr = iter->tr; | ||
| 3275 | |||
| 3276 | (*pos)++; | ||
| 3277 | iter->pos = *pos; | ||
| 3278 | |||
| 3279 | iter->mod_list = iter->mod_list->next; | ||
| 3280 | |||
| 3281 | if (iter->mod_list == &tr->mod_trace || | ||
| 3282 | iter->mod_list == &tr->mod_notrace) { | ||
| 3283 | iter->flags &= ~FTRACE_ITER_MOD; | ||
| 3284 | return NULL; | ||
| 3285 | } | ||
| 3286 | |||
| 3287 | iter->mod_pos = *pos; | ||
| 3288 | |||
| 3289 | return iter; | ||
| 3290 | } | ||
| 3291 | |||
| 3292 | static void *t_mod_start(struct seq_file *m, loff_t *pos) | ||
| 3293 | { | ||
| 3294 | struct ftrace_iterator *iter = m->private; | ||
| 3295 | void *p = NULL; | ||
| 3296 | loff_t l; | ||
| 3297 | |||
| 3298 | if (iter->func_pos > *pos) | ||
| 3299 | return NULL; | ||
| 3300 | |||
| 3301 | iter->mod_pos = iter->func_pos; | ||
| 3302 | |||
| 3303 | /* probes are only available if tr is set */ | ||
| 3304 | if (!iter->tr) | ||
| 3305 | return NULL; | ||
| 3306 | |||
| 3307 | for (l = 0; l <= (*pos - iter->func_pos); ) { | ||
| 3308 | p = t_mod_next(m, &l); | ||
| 3309 | if (!p) | ||
| 3310 | break; | ||
| 3311 | } | ||
| 3312 | if (!p) { | ||
| 3313 | iter->flags &= ~FTRACE_ITER_MOD; | ||
| 3314 | return t_probe_start(m, pos); | ||
| 3315 | } | ||
| 3316 | |||
| 3317 | /* Only set this if we have an item */ | ||
| 3318 | iter->flags |= FTRACE_ITER_MOD; | ||
| 3319 | |||
| 3320 | return iter; | ||
| 3321 | } | ||
| 3322 | |||
| 3323 | static int | ||
| 3324 | t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) | ||
| 3325 | { | ||
| 3326 | struct ftrace_mod_load *ftrace_mod; | ||
| 3327 | struct trace_array *tr = iter->tr; | ||
| 3328 | |||
| 3329 | if (WARN_ON_ONCE(!iter->mod_list) || | ||
| 3330 | iter->mod_list == &tr->mod_trace || | ||
| 3331 | iter->mod_list == &tr->mod_notrace) | ||
| 3332 | return -EIO; | ||
| 3333 | |||
| 3334 | ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); | ||
| 3335 | |||
| 3336 | if (ftrace_mod->func) | ||
| 3337 | seq_printf(m, "%s", ftrace_mod->func); | ||
| 3338 | else | ||
| 3339 | seq_putc(m, '*'); | ||
| 3340 | |||
| 3341 | seq_printf(m, ":mod:%s\n", ftrace_mod->module); | ||
| 3342 | |||
| 3343 | return 0; | ||
| 3344 | } | ||
| 3345 | |||
| 3346 | static void * | ||
| 3200 | t_func_next(struct seq_file *m, loff_t *pos) | 3347 | t_func_next(struct seq_file *m, loff_t *pos) |
| 3201 | { | 3348 | { |
| 3202 | struct ftrace_iterator *iter = m->private; | 3349 | struct ftrace_iterator *iter = m->private; |
| @@ -3237,7 +3384,7 @@ static void * | |||
| 3237 | t_next(struct seq_file *m, void *v, loff_t *pos) | 3384 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 3238 | { | 3385 | { |
| 3239 | struct ftrace_iterator *iter = m->private; | 3386 | struct ftrace_iterator *iter = m->private; |
| 3240 | loff_t l = *pos; /* t_hash_start() must use original pos */ | 3387 | loff_t l = *pos; /* t_probe_start() must use original pos */ |
| 3241 | void *ret; | 3388 | void *ret; |
| 3242 | 3389 | ||
| 3243 | if (unlikely(ftrace_disabled)) | 3390 | if (unlikely(ftrace_disabled)) |
| @@ -3246,16 +3393,19 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 3246 | if (iter->flags & FTRACE_ITER_PROBE) | 3393 | if (iter->flags & FTRACE_ITER_PROBE) |
| 3247 | return t_probe_next(m, pos); | 3394 | return t_probe_next(m, pos); |
| 3248 | 3395 | ||
| 3396 | if (iter->flags & FTRACE_ITER_MOD) | ||
| 3397 | return t_mod_next(m, pos); | ||
| 3398 | |||
| 3249 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 3399 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
| 3250 | /* next must increment pos, and t_probe_start does not */ | 3400 | /* next must increment pos, and t_probe_start does not */ |
| 3251 | (*pos)++; | 3401 | (*pos)++; |
| 3252 | return t_probe_start(m, &l); | 3402 | return t_mod_start(m, &l); |
| 3253 | } | 3403 | } |
| 3254 | 3404 | ||
| 3255 | ret = t_func_next(m, pos); | 3405 | ret = t_func_next(m, pos); |
| 3256 | 3406 | ||
| 3257 | if (!ret) | 3407 | if (!ret) |
| 3258 | return t_probe_start(m, &l); | 3408 | return t_mod_start(m, &l); |
| 3259 | 3409 | ||
| 3260 | return ret; | 3410 | return ret; |
| 3261 | } | 3411 | } |
| @@ -3264,7 +3414,7 @@ static void reset_iter_read(struct ftrace_iterator *iter) | |||
| 3264 | { | 3414 | { |
| 3265 | iter->pos = 0; | 3415 | iter->pos = 0; |
| 3266 | iter->func_pos = 0; | 3416 | iter->func_pos = 0; |
| 3267 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE); | 3417 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); |
| 3268 | } | 3418 | } |
| 3269 | 3419 | ||
| 3270 | static void *t_start(struct seq_file *m, loff_t *pos) | 3420 | static void *t_start(struct seq_file *m, loff_t *pos) |
| @@ -3293,15 +3443,15 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 3293 | ftrace_hash_empty(iter->hash)) { | 3443 | ftrace_hash_empty(iter->hash)) { |
| 3294 | iter->func_pos = 1; /* Account for the message */ | 3444 | iter->func_pos = 1; /* Account for the message */ |
| 3295 | if (*pos > 0) | 3445 | if (*pos > 0) |
| 3296 | return t_probe_start(m, pos); | 3446 | return t_mod_start(m, pos); |
| 3297 | iter->flags |= FTRACE_ITER_PRINTALL; | 3447 | iter->flags |= FTRACE_ITER_PRINTALL; |
| 3298 | /* reset in case of seek/pread */ | 3448 | /* reset in case of seek/pread */ |
| 3299 | iter->flags &= ~FTRACE_ITER_PROBE; | 3449 | iter->flags &= ~FTRACE_ITER_PROBE; |
| 3300 | return iter; | 3450 | return iter; |
| 3301 | } | 3451 | } |
| 3302 | 3452 | ||
| 3303 | if (iter->flags & FTRACE_ITER_PROBE) | 3453 | if (iter->flags & FTRACE_ITER_MOD) |
| 3304 | return t_probe_start(m, pos); | 3454 | return t_mod_start(m, pos); |
| 3305 | 3455 | ||
| 3306 | /* | 3456 | /* |
| 3307 | * Unfortunately, we need to restart at ftrace_pages_start | 3457 | * Unfortunately, we need to restart at ftrace_pages_start |
| @@ -3317,7 +3467,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 3317 | } | 3467 | } |
| 3318 | 3468 | ||
| 3319 | if (!p) | 3469 | if (!p) |
| 3320 | return t_probe_start(m, pos); | 3470 | return t_mod_start(m, pos); |
| 3321 | 3471 | ||
| 3322 | return iter; | 3472 | return iter; |
| 3323 | } | 3473 | } |
| @@ -3351,6 +3501,9 @@ static int t_show(struct seq_file *m, void *v) | |||
| 3351 | if (iter->flags & FTRACE_ITER_PROBE) | 3501 | if (iter->flags & FTRACE_ITER_PROBE) |
| 3352 | return t_probe_show(m, iter); | 3502 | return t_probe_show(m, iter); |
| 3353 | 3503 | ||
| 3504 | if (iter->flags & FTRACE_ITER_MOD) | ||
| 3505 | return t_mod_show(m, iter); | ||
| 3506 | |||
| 3354 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 3507 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
| 3355 | if (iter->flags & FTRACE_ITER_NOTRACE) | 3508 | if (iter->flags & FTRACE_ITER_NOTRACE) |
| 3356 | seq_puts(m, "#### no functions disabled ####\n"); | 3509 | seq_puts(m, "#### no functions disabled ####\n"); |
| @@ -3457,6 +3610,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 3457 | { | 3610 | { |
| 3458 | struct ftrace_iterator *iter; | 3611 | struct ftrace_iterator *iter; |
| 3459 | struct ftrace_hash *hash; | 3612 | struct ftrace_hash *hash; |
| 3613 | struct list_head *mod_head; | ||
| 3614 | struct trace_array *tr = ops->private; | ||
| 3460 | int ret = 0; | 3615 | int ret = 0; |
| 3461 | 3616 | ||
| 3462 | ftrace_ops_init(ops); | 3617 | ftrace_ops_init(ops); |
| @@ -3475,21 +3630,29 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 3475 | 3630 | ||
| 3476 | iter->ops = ops; | 3631 | iter->ops = ops; |
| 3477 | iter->flags = flag; | 3632 | iter->flags = flag; |
| 3633 | iter->tr = tr; | ||
| 3478 | 3634 | ||
| 3479 | mutex_lock(&ops->func_hash->regex_lock); | 3635 | mutex_lock(&ops->func_hash->regex_lock); |
| 3480 | 3636 | ||
| 3481 | if (flag & FTRACE_ITER_NOTRACE) | 3637 | if (flag & FTRACE_ITER_NOTRACE) { |
| 3482 | hash = ops->func_hash->notrace_hash; | 3638 | hash = ops->func_hash->notrace_hash; |
| 3483 | else | 3639 | mod_head = tr ? &tr->mod_notrace : NULL; |
| 3640 | } else { | ||
| 3484 | hash = ops->func_hash->filter_hash; | 3641 | hash = ops->func_hash->filter_hash; |
| 3642 | mod_head = tr ? &tr->mod_trace : NULL; | ||
| 3643 | } | ||
| 3644 | |||
| 3645 | iter->mod_list = mod_head; | ||
| 3485 | 3646 | ||
| 3486 | if (file->f_mode & FMODE_WRITE) { | 3647 | if (file->f_mode & FMODE_WRITE) { |
| 3487 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; | 3648 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
| 3488 | 3649 | ||
| 3489 | if (file->f_flags & O_TRUNC) | 3650 | if (file->f_flags & O_TRUNC) { |
| 3490 | iter->hash = alloc_ftrace_hash(size_bits); | 3651 | iter->hash = alloc_ftrace_hash(size_bits); |
| 3491 | else | 3652 | clear_ftrace_mod_list(mod_head); |
| 3653 | } else { | ||
| 3492 | iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); | 3654 | iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); |
| 3655 | } | ||
| 3493 | 3656 | ||
| 3494 | if (!iter->hash) { | 3657 | if (!iter->hash) { |
| 3495 | trace_parser_put(&iter->parser); | 3658 | trace_parser_put(&iter->parser); |
| @@ -3665,7 +3828,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod) | |||
| 3665 | int exclude_mod = 0; | 3828 | int exclude_mod = 0; |
| 3666 | int found = 0; | 3829 | int found = 0; |
| 3667 | int ret; | 3830 | int ret; |
| 3668 | int clear_filter; | 3831 | int clear_filter = 0; |
| 3669 | 3832 | ||
| 3670 | if (func) { | 3833 | if (func) { |
| 3671 | func_g.type = filter_parse_regex(func, len, &func_g.search, | 3834 | func_g.type = filter_parse_regex(func, len, &func_g.search, |
| @@ -3761,6 +3924,165 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, | |||
| 3761 | return ret; | 3924 | return ret; |
| 3762 | } | 3925 | } |
| 3763 | 3926 | ||
| 3927 | static bool module_exists(const char *module) | ||
| 3928 | { | ||
| 3929 | /* All modules have the symbol __this_module */ | ||
| 3930 | const char this_mod[] = "__this_module"; | ||
| 3931 | const int modname_size = MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 1; | ||
| 3932 | char modname[modname_size + 1]; | ||
| 3933 | unsigned long val; | ||
| 3934 | int n; | ||
| 3935 | |||
| 3936 | n = snprintf(modname, modname_size + 1, "%s:%s", module, this_mod); | ||
| 3937 | |||
| 3938 | if (n > modname_size) | ||
| 3939 | return false; | ||
| 3940 | |||
| 3941 | val = module_kallsyms_lookup_name(modname); | ||
| 3942 | return val != 0; | ||
| 3943 | } | ||
| 3944 | |||
| 3945 | static int cache_mod(struct trace_array *tr, | ||
| 3946 | const char *func, char *module, int enable) | ||
| 3947 | { | ||
| 3948 | struct ftrace_mod_load *ftrace_mod, *n; | ||
| 3949 | struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; | ||
| 3950 | int ret; | ||
| 3951 | |||
| 3952 | mutex_lock(&ftrace_lock); | ||
| 3953 | |||
| 3954 | /* We do not cache inverse filters */ | ||
| 3955 | if (func[0] == '!') { | ||
| 3956 | func++; | ||
| 3957 | ret = -EINVAL; | ||
| 3958 | |||
| 3959 | /* Look to remove this hash */ | ||
| 3960 | list_for_each_entry_safe(ftrace_mod, n, head, list) { | ||
| 3961 | if (strcmp(ftrace_mod->module, module) != 0) | ||
| 3962 | continue; | ||
| 3963 | |||
| 3964 | /* no func matches all */ | ||
| 3965 | if (strcmp(func, "*") == 0 || | ||
| 3966 | (ftrace_mod->func && | ||
| 3967 | strcmp(ftrace_mod->func, func) == 0)) { | ||
| 3968 | ret = 0; | ||
| 3969 | free_ftrace_mod(ftrace_mod); | ||
| 3970 | continue; | ||
| 3971 | } | ||
| 3972 | } | ||
| 3973 | goto out; | ||
| 3974 | } | ||
| 3975 | |||
| 3976 | ret = -EINVAL; | ||
| 3977 | /* We only care about modules that have not been loaded yet */ | ||
| 3978 | if (module_exists(module)) | ||
| 3979 | goto out; | ||
| 3980 | |||
| 3981 | /* Save this string off, and execute it when the module is loaded */ | ||
| 3982 | ret = ftrace_add_mod(tr, func, module, enable); | ||
| 3983 | out: | ||
| 3984 | mutex_unlock(&ftrace_lock); | ||
| 3985 | |||
| 3986 | return ret; | ||
| 3987 | } | ||
| 3988 | |||
| 3989 | static int | ||
| 3990 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | ||
| 3991 | int reset, int enable); | ||
| 3992 | |||
| 3993 | #ifdef CONFIG_MODULES | ||
| 3994 | static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, | ||
| 3995 | char *mod, bool enable) | ||
| 3996 | { | ||
| 3997 | struct ftrace_mod_load *ftrace_mod, *n; | ||
| 3998 | struct ftrace_hash **orig_hash, *new_hash; | ||
| 3999 | LIST_HEAD(process_mods); | ||
| 4000 | char *func; | ||
| 4001 | int ret; | ||
| 4002 | |||
| 4003 | mutex_lock(&ops->func_hash->regex_lock); | ||
| 4004 | |||
| 4005 | if (enable) | ||
| 4006 | orig_hash = &ops->func_hash->filter_hash; | ||
| 4007 | else | ||
| 4008 | orig_hash = &ops->func_hash->notrace_hash; | ||
| 4009 | |||
| 4010 | new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, | ||
| 4011 | *orig_hash); | ||
| 4012 | if (!new_hash) | ||
| 4013 | goto out; /* warn? */ | ||
| 4014 | |||
| 4015 | mutex_lock(&ftrace_lock); | ||
| 4016 | |||
| 4017 | list_for_each_entry_safe(ftrace_mod, n, head, list) { | ||
| 4018 | |||
| 4019 | if (strcmp(ftrace_mod->module, mod) != 0) | ||
| 4020 | continue; | ||
| 4021 | |||
| 4022 | if (ftrace_mod->func) | ||
| 4023 | func = kstrdup(ftrace_mod->func, GFP_KERNEL); | ||
| 4024 | else | ||
| 4025 | func = kstrdup("*", GFP_KERNEL); | ||
| 4026 | |||
| 4027 | if (!func) /* warn? */ | ||
| 4028 | continue; | ||
| 4029 | |||
| 4030 | list_del(&ftrace_mod->list); | ||
| 4031 | list_add(&ftrace_mod->list, &process_mods); | ||
| 4032 | |||
| 4033 | /* Use the newly allocated func, as it may be "*" */ | ||
| 4034 | kfree(ftrace_mod->func); | ||
| 4035 | ftrace_mod->func = func; | ||
| 4036 | } | ||
| 4037 | |||
| 4038 | mutex_unlock(&ftrace_lock); | ||
| 4039 | |||
| 4040 | list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { | ||
| 4041 | |||
| 4042 | func = ftrace_mod->func; | ||
| 4043 | |||
| 4044 | /* Grabs ftrace_lock, which is why we have this extra step */ | ||
| 4045 | match_records(new_hash, func, strlen(func), mod); | ||
| 4046 | free_ftrace_mod(ftrace_mod); | ||
| 4047 | } | ||
| 4048 | |||
| 4049 | if (enable && list_empty(head)) | ||
| 4050 | new_hash->flags &= ~FTRACE_HASH_FL_MOD; | ||
| 4051 | |||
| 4052 | mutex_lock(&ftrace_lock); | ||
| 4053 | |||
| 4054 | ret = ftrace_hash_move_and_update_ops(ops, orig_hash, | ||
| 4055 | new_hash, enable); | ||
| 4056 | mutex_unlock(&ftrace_lock); | ||
| 4057 | |||
| 4058 | out: | ||
| 4059 | mutex_unlock(&ops->func_hash->regex_lock); | ||
| 4060 | |||
| 4061 | free_ftrace_hash(new_hash); | ||
| 4062 | } | ||
| 4063 | |||
| 4064 | static void process_cached_mods(const char *mod_name) | ||
| 4065 | { | ||
| 4066 | struct trace_array *tr; | ||
| 4067 | char *mod; | ||
| 4068 | |||
| 4069 | mod = kstrdup(mod_name, GFP_KERNEL); | ||
| 4070 | if (!mod) | ||
| 4071 | return; | ||
| 4072 | |||
| 4073 | mutex_lock(&trace_types_lock); | ||
| 4074 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | ||
| 4075 | if (!list_empty(&tr->mod_trace)) | ||
| 4076 | process_mod_list(&tr->mod_trace, tr->ops, mod, true); | ||
| 4077 | if (!list_empty(&tr->mod_notrace)) | ||
| 4078 | process_mod_list(&tr->mod_notrace, tr->ops, mod, false); | ||
| 4079 | } | ||
| 4080 | mutex_unlock(&trace_types_lock); | ||
| 4081 | |||
| 4082 | kfree(mod); | ||
| 4083 | } | ||
| 4084 | #endif | ||
| 4085 | |||
| 3764 | /* | 4086 | /* |
| 3765 | * We register the module command as a template to show others how | 4087 | * We register the module command as a template to show others how |
| 3766 | * to register the a command as well. | 4088 | * to register the a command as well. |
| @@ -3768,10 +4090,16 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, | |||
| 3768 | 4090 | ||
| 3769 | static int | 4091 | static int |
| 3770 | ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, | 4092 | ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, |
| 3771 | char *func, char *cmd, char *module, int enable) | 4093 | char *func_orig, char *cmd, char *module, int enable) |
| 3772 | { | 4094 | { |
| 4095 | char *func; | ||
| 3773 | int ret; | 4096 | int ret; |
| 3774 | 4097 | ||
| 4098 | /* match_records() modifies func, and we need the original */ | ||
| 4099 | func = kstrdup(func_orig, GFP_KERNEL); | ||
| 4100 | if (!func) | ||
| 4101 | return -ENOMEM; | ||
| 4102 | |||
| 3775 | /* | 4103 | /* |
| 3776 | * cmd == 'mod' because we only registered this func | 4104 | * cmd == 'mod' because we only registered this func |
| 3777 | * for the 'mod' ftrace_func_command. | 4105 | * for the 'mod' ftrace_func_command. |
| @@ -3780,8 +4108,10 @@ ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, | |||
| 3780 | * parameter. | 4108 | * parameter. |
| 3781 | */ | 4109 | */ |
| 3782 | ret = match_records(hash, func, strlen(func), module); | 4110 | ret = match_records(hash, func, strlen(func), module); |
| 4111 | kfree(func); | ||
| 4112 | |||
| 3783 | if (!ret) | 4113 | if (!ret) |
| 3784 | return -EINVAL; | 4114 | return cache_mod(tr, func_orig, module, enable); |
| 3785 | if (ret < 0) | 4115 | if (ret < 0) |
| 3786 | return ret; | 4116 | return ret; |
| 3787 | return 0; | 4117 | return 0; |
| @@ -4725,9 +5055,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 4725 | if (file->f_mode & FMODE_WRITE) { | 5055 | if (file->f_mode & FMODE_WRITE) { |
| 4726 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | 5056 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
| 4727 | 5057 | ||
| 4728 | if (filter_hash) | 5058 | if (filter_hash) { |
| 4729 | orig_hash = &iter->ops->func_hash->filter_hash; | 5059 | orig_hash = &iter->ops->func_hash->filter_hash; |
| 4730 | else | 5060 | if (iter->tr && !list_empty(&iter->tr->mod_trace)) |
| 5061 | iter->hash->flags |= FTRACE_HASH_FL_MOD; | ||
| 5062 | } else | ||
| 4731 | orig_hash = &iter->ops->func_hash->notrace_hash; | 5063 | orig_hash = &iter->ops->func_hash->notrace_hash; |
| 4732 | 5064 | ||
| 4733 | mutex_lock(&ftrace_lock); | 5065 | mutex_lock(&ftrace_lock); |
| @@ -5385,6 +5717,7 @@ void ftrace_release_mod(struct module *mod) | |||
| 5385 | if (pg == ftrace_pages) | 5717 | if (pg == ftrace_pages) |
| 5386 | ftrace_pages = next_to_ftrace_page(last_pg); | 5718 | ftrace_pages = next_to_ftrace_page(last_pg); |
| 5387 | 5719 | ||
| 5720 | ftrace_update_tot_cnt -= pg->index; | ||
| 5388 | *last_pg = pg->next; | 5721 | *last_pg = pg->next; |
| 5389 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | 5722 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
| 5390 | free_pages((unsigned long)pg->records, order); | 5723 | free_pages((unsigned long)pg->records, order); |
| @@ -5463,6 +5796,8 @@ void ftrace_module_enable(struct module *mod) | |||
| 5463 | 5796 | ||
| 5464 | out_unlock: | 5797 | out_unlock: |
| 5465 | mutex_unlock(&ftrace_lock); | 5798 | mutex_unlock(&ftrace_lock); |
| 5799 | |||
| 5800 | process_cached_mods(mod->name); | ||
| 5466 | } | 5801 | } |
| 5467 | 5802 | ||
| 5468 | void ftrace_module_init(struct module *mod) | 5803 | void ftrace_module_init(struct module *mod) |
| @@ -5501,6 +5836,7 @@ void __init ftrace_free_init_mem(void) | |||
| 5501 | if (!rec) | 5836 | if (!rec) |
| 5502 | continue; | 5837 | continue; |
| 5503 | pg->index--; | 5838 | pg->index--; |
| 5839 | ftrace_update_tot_cnt--; | ||
| 5504 | if (!pg->index) { | 5840 | if (!pg->index) { |
| 5505 | *last_pg = pg->next; | 5841 | *last_pg = pg->next; |
| 5506 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | 5842 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
| @@ -5567,6 +5903,8 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops) | |||
| 5567 | void ftrace_init_trace_array(struct trace_array *tr) | 5903 | void ftrace_init_trace_array(struct trace_array *tr) |
| 5568 | { | 5904 | { |
| 5569 | INIT_LIST_HEAD(&tr->func_probes); | 5905 | INIT_LIST_HEAD(&tr->func_probes); |
| 5906 | INIT_LIST_HEAD(&tr->mod_trace); | ||
| 5907 | INIT_LIST_HEAD(&tr->mod_notrace); | ||
| 5570 | } | 5908 | } |
| 5571 | #else | 5909 | #else |
| 5572 | 5910 | ||
| @@ -6127,7 +6465,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 6127 | if (ftrace_enabled) { | 6465 | if (ftrace_enabled) { |
| 6128 | 6466 | ||
| 6129 | /* we are starting ftrace again */ | 6467 | /* we are starting ftrace again */ |
| 6130 | if (ftrace_ops_list != &ftrace_list_end) | 6468 | if (rcu_dereference_protected(ftrace_ops_list, |
| 6469 | lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) | ||
| 6131 | update_ftrace_function(); | 6470 | update_ftrace_function(); |
| 6132 | 6471 | ||
| 6133 | ftrace_startup_sysctl(); | 6472 | ftrace_startup_sysctl(); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 4ae268e687fe..529cc50d7243 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1136,12 +1136,12 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) | |||
| 1136 | for (i = 0; i < nr_pages; i++) { | 1136 | for (i = 0; i < nr_pages; i++) { |
| 1137 | struct page *page; | 1137 | struct page *page; |
| 1138 | /* | 1138 | /* |
| 1139 | * __GFP_NORETRY flag makes sure that the allocation fails | 1139 | * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails |
| 1140 | * gracefully without invoking oom-killer and the system is | 1140 | * gracefully without invoking oom-killer and the system is not |
| 1141 | * not destabilized. | 1141 | * destabilized. |
| 1142 | */ | 1142 | */ |
| 1143 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1143 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
| 1144 | GFP_KERNEL | __GFP_NORETRY, | 1144 | GFP_KERNEL | __GFP_RETRY_MAYFAIL, |
| 1145 | cpu_to_node(cpu)); | 1145 | cpu_to_node(cpu)); |
| 1146 | if (!bpage) | 1146 | if (!bpage) |
| 1147 | goto free_pages; | 1147 | goto free_pages; |
| @@ -1149,7 +1149,7 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) | |||
| 1149 | list_add(&bpage->list, pages); | 1149 | list_add(&bpage->list, pages); |
| 1150 | 1150 | ||
| 1151 | page = alloc_pages_node(cpu_to_node(cpu), | 1151 | page = alloc_pages_node(cpu_to_node(cpu), |
| 1152 | GFP_KERNEL | __GFP_NORETRY, 0); | 1152 | GFP_KERNEL | __GFP_RETRY_MAYFAIL, 0); |
| 1153 | if (!page) | 1153 | if (!page) |
| 1154 | goto free_pages; | 1154 | goto free_pages; |
| 1155 | bpage->page = page_address(page); | 1155 | bpage->page = page_address(page); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 091e801145c9..42b9355033d4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -87,7 +87,7 @@ dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |||
| 87 | * tracing is active, only save the comm when a trace event | 87 | * tracing is active, only save the comm when a trace event |
| 88 | * occurred. | 88 | * occurred. |
| 89 | */ | 89 | */ |
| 90 | static DEFINE_PER_CPU(bool, trace_cmdline_save); | 90 | static DEFINE_PER_CPU(bool, trace_taskinfo_save); |
| 91 | 91 | ||
| 92 | /* | 92 | /* |
| 93 | * Kill all tracing for good (never come back). | 93 | * Kill all tracing for good (never come back). |
| @@ -120,41 +120,41 @@ enum ftrace_dump_mode ftrace_dump_on_oops; | |||
| 120 | /* When set, tracing will stop when a WARN*() is hit */ | 120 | /* When set, tracing will stop when a WARN*() is hit */ |
| 121 | int __disable_trace_on_warning; | 121 | int __disable_trace_on_warning; |
| 122 | 122 | ||
| 123 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE | 123 | #ifdef CONFIG_TRACE_EVAL_MAP_FILE |
| 124 | /* Map of enums to their values, for "enum_map" file */ | 124 | /* Map of enums to their values, for "eval_map" file */ |
| 125 | struct trace_enum_map_head { | 125 | struct trace_eval_map_head { |
| 126 | struct module *mod; | 126 | struct module *mod; |
| 127 | unsigned long length; | 127 | unsigned long length; |
| 128 | }; | 128 | }; |
| 129 | 129 | ||
| 130 | union trace_enum_map_item; | 130 | union trace_eval_map_item; |
| 131 | 131 | ||
| 132 | struct trace_enum_map_tail { | 132 | struct trace_eval_map_tail { |
| 133 | /* | 133 | /* |
| 134 | * "end" is first and points to NULL as it must be different | 134 | * "end" is first and points to NULL as it must be different |
| 135 | * than "mod" or "enum_string" | 135 | * than "mod" or "eval_string" |
| 136 | */ | 136 | */ |
| 137 | union trace_enum_map_item *next; | 137 | union trace_eval_map_item *next; |
| 138 | const char *end; /* points to NULL */ | 138 | const char *end; /* points to NULL */ |
| 139 | }; | 139 | }; |
| 140 | 140 | ||
| 141 | static DEFINE_MUTEX(trace_enum_mutex); | 141 | static DEFINE_MUTEX(trace_eval_mutex); |
| 142 | 142 | ||
| 143 | /* | 143 | /* |
| 144 | * The trace_enum_maps are saved in an array with two extra elements, | 144 | * The trace_eval_maps are saved in an array with two extra elements, |
| 145 | * one at the beginning, and one at the end. The beginning item contains | 145 | * one at the beginning, and one at the end. The beginning item contains |
| 146 | * the count of the saved maps (head.length), and the module they | 146 | * the count of the saved maps (head.length), and the module they |
| 147 | * belong to if not built in (head.mod). The ending item contains a | 147 | * belong to if not built in (head.mod). The ending item contains a |
| 148 | * pointer to the next array of saved enum_map items. | 148 | * pointer to the next array of saved eval_map items. |
| 149 | */ | 149 | */ |
| 150 | union trace_enum_map_item { | 150 | union trace_eval_map_item { |
| 151 | struct trace_enum_map map; | 151 | struct trace_eval_map map; |
| 152 | struct trace_enum_map_head head; | 152 | struct trace_eval_map_head head; |
| 153 | struct trace_enum_map_tail tail; | 153 | struct trace_eval_map_tail tail; |
| 154 | }; | 154 | }; |
| 155 | 155 | ||
| 156 | static union trace_enum_map_item *trace_enum_maps; | 156 | static union trace_eval_map_item *trace_eval_maps; |
| 157 | #endif /* CONFIG_TRACE_ENUM_MAP_FILE */ | 157 | #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ |
| 158 | 158 | ||
| 159 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); | 159 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
| 160 | 160 | ||
| @@ -790,7 +790,7 @@ EXPORT_SYMBOL_GPL(tracing_on); | |||
| 790 | static __always_inline void | 790 | static __always_inline void |
| 791 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) | 791 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) |
| 792 | { | 792 | { |
| 793 | __this_cpu_write(trace_cmdline_save, true); | 793 | __this_cpu_write(trace_taskinfo_save, true); |
| 794 | 794 | ||
| 795 | /* If this is the temp buffer, we need to commit fully */ | 795 | /* If this is the temp buffer, we need to commit fully */ |
| 796 | if (this_cpu_read(trace_buffered_event) == event) { | 796 | if (this_cpu_read(trace_buffered_event) == event) { |
| @@ -1141,9 +1141,9 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) | |||
| 1141 | 1141 | ||
| 1142 | /* | 1142 | /* |
| 1143 | * TRACE_FLAGS is defined as a tuple matching bit masks with strings. | 1143 | * TRACE_FLAGS is defined as a tuple matching bit masks with strings. |
| 1144 | * It uses C(a, b) where 'a' is the enum name and 'b' is the string that | 1144 | * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that |
| 1145 | * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list | 1145 | * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list |
| 1146 | * of strings in the order that the enums were defined. | 1146 | * of strings in the order that the evals (enum) were defined. |
| 1147 | */ | 1147 | */ |
| 1148 | #undef C | 1148 | #undef C |
| 1149 | #define C(a, b) b | 1149 | #define C(a, b) b |
| @@ -1709,6 +1709,8 @@ void tracing_reset_all_online_cpus(void) | |||
| 1709 | } | 1709 | } |
| 1710 | } | 1710 | } |
| 1711 | 1711 | ||
| 1712 | static int *tgid_map; | ||
| 1713 | |||
| 1712 | #define SAVED_CMDLINES_DEFAULT 128 | 1714 | #define SAVED_CMDLINES_DEFAULT 128 |
| 1713 | #define NO_CMDLINE_MAP UINT_MAX | 1715 | #define NO_CMDLINE_MAP UINT_MAX |
| 1714 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 1716 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
| @@ -1722,7 +1724,7 @@ struct saved_cmdlines_buffer { | |||
| 1722 | static struct saved_cmdlines_buffer *savedcmd; | 1724 | static struct saved_cmdlines_buffer *savedcmd; |
| 1723 | 1725 | ||
| 1724 | /* temporary disable recording */ | 1726 | /* temporary disable recording */ |
| 1725 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 1727 | static atomic_t trace_record_taskinfo_disabled __read_mostly; |
| 1726 | 1728 | ||
| 1727 | static inline char *get_saved_cmdlines(int idx) | 1729 | static inline char *get_saved_cmdlines(int idx) |
| 1728 | { | 1730 | { |
| @@ -1910,13 +1912,15 @@ static void tracing_stop_tr(struct trace_array *tr) | |||
| 1910 | raw_spin_unlock_irqrestore(&tr->start_lock, flags); | 1912 | raw_spin_unlock_irqrestore(&tr->start_lock, flags); |
| 1911 | } | 1913 | } |
| 1912 | 1914 | ||
| 1913 | void trace_stop_cmdline_recording(void); | ||
| 1914 | |||
| 1915 | static int trace_save_cmdline(struct task_struct *tsk) | 1915 | static int trace_save_cmdline(struct task_struct *tsk) |
| 1916 | { | 1916 | { |
| 1917 | unsigned pid, idx; | 1917 | unsigned pid, idx; |
| 1918 | 1918 | ||
| 1919 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | 1919 | /* treat recording of idle task as a success */ |
| 1920 | if (!tsk->pid) | ||
| 1921 | return 1; | ||
| 1922 | |||
| 1923 | if (unlikely(tsk->pid > PID_MAX_DEFAULT)) | ||
| 1920 | return 0; | 1924 | return 0; |
| 1921 | 1925 | ||
| 1922 | /* | 1926 | /* |
| @@ -1992,16 +1996,107 @@ void trace_find_cmdline(int pid, char comm[]) | |||
| 1992 | preempt_enable(); | 1996 | preempt_enable(); |
| 1993 | } | 1997 | } |
| 1994 | 1998 | ||
| 1995 | void tracing_record_cmdline(struct task_struct *tsk) | 1999 | int trace_find_tgid(int pid) |
| 1996 | { | 2000 | { |
| 1997 | if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) | 2001 | if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) |
| 2002 | return 0; | ||
| 2003 | |||
| 2004 | return tgid_map[pid]; | ||
| 2005 | } | ||
| 2006 | |||
| 2007 | static int trace_save_tgid(struct task_struct *tsk) | ||
| 2008 | { | ||
| 2009 | /* treat recording of idle task as a success */ | ||
| 2010 | if (!tsk->pid) | ||
| 2011 | return 1; | ||
| 2012 | |||
| 2013 | if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT)) | ||
| 2014 | return 0; | ||
| 2015 | |||
| 2016 | tgid_map[tsk->pid] = tsk->tgid; | ||
| 2017 | return 1; | ||
| 2018 | } | ||
| 2019 | |||
| 2020 | static bool tracing_record_taskinfo_skip(int flags) | ||
| 2021 | { | ||
| 2022 | if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) | ||
| 2023 | return true; | ||
| 2024 | if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) | ||
| 2025 | return true; | ||
| 2026 | if (!__this_cpu_read(trace_taskinfo_save)) | ||
| 2027 | return true; | ||
| 2028 | return false; | ||
| 2029 | } | ||
| 2030 | |||
| 2031 | /** | ||
| 2032 | * tracing_record_taskinfo - record the task info of a task | ||
| 2033 | * | ||
| 2034 | * @task - task to record | ||
| 2035 | * @flags - TRACE_RECORD_CMDLINE for recording comm | ||
| 2036 | * - TRACE_RECORD_TGID for recording tgid | ||
| 2037 | */ | ||
| 2038 | void tracing_record_taskinfo(struct task_struct *task, int flags) | ||
| 2039 | { | ||
| 2040 | bool done; | ||
| 2041 | |||
| 2042 | if (tracing_record_taskinfo_skip(flags)) | ||
| 1998 | return; | 2043 | return; |
| 1999 | 2044 | ||
| 2000 | if (!__this_cpu_read(trace_cmdline_save)) | 2045 | /* |
| 2046 | * Record as much task information as possible. If some fail, continue | ||
| 2047 | * to try to record the others. | ||
| 2048 | */ | ||
| 2049 | done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); | ||
| 2050 | done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); | ||
| 2051 | |||
| 2052 | /* If recording any information failed, retry again soon. */ | ||
| 2053 | if (!done) | ||
| 2001 | return; | 2054 | return; |
| 2002 | 2055 | ||
| 2003 | if (trace_save_cmdline(tsk)) | 2056 | __this_cpu_write(trace_taskinfo_save, false); |
| 2004 | __this_cpu_write(trace_cmdline_save, false); | 2057 | } |
| 2058 | |||
| 2059 | /** | ||
| 2060 | * tracing_record_taskinfo_sched_switch - record task info for sched_switch | ||
| 2061 | * | ||
| 2062 | * @prev - previous task during sched_switch | ||
| 2063 | * @next - next task during sched_switch | ||
| 2064 | * @flags - TRACE_RECORD_CMDLINE for recording comm | ||
| 2065 | * TRACE_RECORD_TGID for recording tgid | ||
| 2066 | */ | ||
| 2067 | void tracing_record_taskinfo_sched_switch(struct task_struct *prev, | ||
| 2068 | struct task_struct *next, int flags) | ||
| 2069 | { | ||
| 2070 | bool done; | ||
| 2071 | |||
| 2072 | if (tracing_record_taskinfo_skip(flags)) | ||
| 2073 | return; | ||
| 2074 | |||
| 2075 | /* | ||
| 2076 | * Record as much task information as possible. If some fail, continue | ||
| 2077 | * to try to record the others. | ||
| 2078 | */ | ||
| 2079 | done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); | ||
| 2080 | done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); | ||
| 2081 | done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); | ||
| 2082 | done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); | ||
| 2083 | |||
| 2084 | /* If recording any information failed, retry again soon. */ | ||
| 2085 | if (!done) | ||
| 2086 | return; | ||
| 2087 | |||
| 2088 | __this_cpu_write(trace_taskinfo_save, false); | ||
| 2089 | } | ||
| 2090 | |||
| 2091 | /* Helpers to record a specific task information */ | ||
| 2092 | void tracing_record_cmdline(struct task_struct *task) | ||
| 2093 | { | ||
| 2094 | tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); | ||
| 2095 | } | ||
| 2096 | |||
| 2097 | void tracing_record_tgid(struct task_struct *task) | ||
| 2098 | { | ||
| 2099 | tracing_record_taskinfo(task, TRACE_RECORD_TGID); | ||
| 2005 | } | 2100 | } |
| 2006 | 2101 | ||
| 2007 | /* | 2102 | /* |
| @@ -3146,7 +3241,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 3146 | #endif | 3241 | #endif |
| 3147 | 3242 | ||
| 3148 | if (!iter->snapshot) | 3243 | if (!iter->snapshot) |
| 3149 | atomic_inc(&trace_record_cmdline_disabled); | 3244 | atomic_inc(&trace_record_taskinfo_disabled); |
| 3150 | 3245 | ||
| 3151 | if (*pos != iter->pos) { | 3246 | if (*pos != iter->pos) { |
| 3152 | iter->ent = NULL; | 3247 | iter->ent = NULL; |
| @@ -3191,7 +3286,7 @@ static void s_stop(struct seq_file *m, void *p) | |||
| 3191 | #endif | 3286 | #endif |
| 3192 | 3287 | ||
| 3193 | if (!iter->snapshot) | 3288 | if (!iter->snapshot) |
| 3194 | atomic_dec(&trace_record_cmdline_disabled); | 3289 | atomic_dec(&trace_record_taskinfo_disabled); |
| 3195 | 3290 | ||
| 3196 | trace_access_unlock(iter->cpu_file); | 3291 | trace_access_unlock(iter->cpu_file); |
| 3197 | trace_event_read_unlock(); | 3292 | trace_event_read_unlock(); |
| @@ -3248,23 +3343,38 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m) | |||
| 3248 | seq_puts(m, "#\n"); | 3343 | seq_puts(m, "#\n"); |
| 3249 | } | 3344 | } |
| 3250 | 3345 | ||
| 3251 | static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) | 3346 | static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, |
| 3347 | unsigned int flags) | ||
| 3252 | { | 3348 | { |
| 3349 | bool tgid = flags & TRACE_ITER_RECORD_TGID; | ||
| 3350 | |||
| 3253 | print_event_info(buf, m); | 3351 | print_event_info(buf, m); |
| 3254 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n" | 3352 | |
| 3255 | "# | | | | |\n"); | 3353 | seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); |
| 3354 | seq_printf(m, "# | | | %s | |\n", tgid ? " | " : ""); | ||
| 3256 | } | 3355 | } |
| 3257 | 3356 | ||
| 3258 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) | 3357 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, |
| 3358 | unsigned int flags) | ||
| 3259 | { | 3359 | { |
| 3260 | print_event_info(buf, m); | 3360 | bool tgid = flags & TRACE_ITER_RECORD_TGID; |
| 3261 | seq_puts(m, "# _-----=> irqs-off\n" | 3361 | const char tgid_space[] = " "; |
| 3262 | "# / _----=> need-resched\n" | 3362 | const char space[] = " "; |
| 3263 | "# | / _---=> hardirq/softirq\n" | 3363 | |
| 3264 | "# || / _--=> preempt-depth\n" | 3364 | seq_printf(m, "# %s _-----=> irqs-off\n", |
| 3265 | "# ||| / delay\n" | 3365 | tgid ? tgid_space : space); |
| 3266 | "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" | 3366 | seq_printf(m, "# %s / _----=> need-resched\n", |
| 3267 | "# | | | |||| | |\n"); | 3367 | tgid ? tgid_space : space); |
| 3368 | seq_printf(m, "# %s| / _---=> hardirq/softirq\n", | ||
| 3369 | tgid ? tgid_space : space); | ||
| 3370 | seq_printf(m, "# %s|| / _--=> preempt-depth\n", | ||
| 3371 | tgid ? tgid_space : space); | ||
| 3372 | seq_printf(m, "# %s||| / delay\n", | ||
| 3373 | tgid ? tgid_space : space); | ||
| 3374 | seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", | ||
| 3375 | tgid ? " TGID " : space); | ||
| 3376 | seq_printf(m, "# | | | %s|||| | |\n", | ||
| 3377 | tgid ? " | " : space); | ||
| 3268 | } | 3378 | } |
| 3269 | 3379 | ||
| 3270 | void | 3380 | void |
| @@ -3580,9 +3690,11 @@ void trace_default_header(struct seq_file *m) | |||
| 3580 | } else { | 3690 | } else { |
| 3581 | if (!(trace_flags & TRACE_ITER_VERBOSE)) { | 3691 | if (!(trace_flags & TRACE_ITER_VERBOSE)) { |
| 3582 | if (trace_flags & TRACE_ITER_IRQ_INFO) | 3692 | if (trace_flags & TRACE_ITER_IRQ_INFO) |
| 3583 | print_func_help_header_irq(iter->trace_buffer, m); | 3693 | print_func_help_header_irq(iter->trace_buffer, |
| 3694 | m, trace_flags); | ||
| 3584 | else | 3695 | else |
| 3585 | print_func_help_header(iter->trace_buffer, m); | 3696 | print_func_help_header(iter->trace_buffer, m, |
| 3697 | trace_flags); | ||
| 3586 | } | 3698 | } |
| 3587 | } | 3699 | } |
| 3588 | } | 3700 | } |
| @@ -4238,6 +4350,18 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) | |||
| 4238 | if (mask == TRACE_ITER_RECORD_CMD) | 4350 | if (mask == TRACE_ITER_RECORD_CMD) |
| 4239 | trace_event_enable_cmd_record(enabled); | 4351 | trace_event_enable_cmd_record(enabled); |
| 4240 | 4352 | ||
| 4353 | if (mask == TRACE_ITER_RECORD_TGID) { | ||
| 4354 | if (!tgid_map) | ||
| 4355 | tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map), | ||
| 4356 | GFP_KERNEL); | ||
| 4357 | if (!tgid_map) { | ||
| 4358 | tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; | ||
| 4359 | return -ENOMEM; | ||
| 4360 | } | ||
| 4361 | |||
| 4362 | trace_event_enable_tgid_record(enabled); | ||
| 4363 | } | ||
| 4364 | |||
| 4241 | if (mask == TRACE_ITER_EVENT_FORK) | 4365 | if (mask == TRACE_ITER_EVENT_FORK) |
| 4242 | trace_event_follow_fork(tr, enabled); | 4366 | trace_event_follow_fork(tr, enabled); |
| 4243 | 4367 | ||
| @@ -4473,7 +4597,8 @@ static const char readme_msg[] = | |||
| 4473 | #endif | 4597 | #endif |
| 4474 | #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) | 4598 | #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) |
| 4475 | "\t accepts: event-definitions (one definition per line)\n" | 4599 | "\t accepts: event-definitions (one definition per line)\n" |
| 4476 | "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n" | 4600 | "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" |
| 4601 | "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" | ||
| 4477 | "\t -:[<group>/]<event>\n" | 4602 | "\t -:[<group>/]<event>\n" |
| 4478 | #ifdef CONFIG_KPROBE_EVENTS | 4603 | #ifdef CONFIG_KPROBE_EVENTS |
| 4479 | "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" | 4604 | "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" |
| @@ -4597,6 +4722,76 @@ static const struct file_operations tracing_readme_fops = { | |||
| 4597 | .llseek = generic_file_llseek, | 4722 | .llseek = generic_file_llseek, |
| 4598 | }; | 4723 | }; |
| 4599 | 4724 | ||
| 4725 | static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) | ||
| 4726 | { | ||
| 4727 | int *ptr = v; | ||
| 4728 | |||
| 4729 | if (*pos || m->count) | ||
| 4730 | ptr++; | ||
| 4731 | |||
| 4732 | (*pos)++; | ||
| 4733 | |||
| 4734 | for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) { | ||
| 4735 | if (trace_find_tgid(*ptr)) | ||
| 4736 | return ptr; | ||
| 4737 | } | ||
| 4738 | |||
| 4739 | return NULL; | ||
| 4740 | } | ||
| 4741 | |||
| 4742 | static void *saved_tgids_start(struct seq_file *m, loff_t *pos) | ||
| 4743 | { | ||
| 4744 | void *v; | ||
| 4745 | loff_t l = 0; | ||
| 4746 | |||
| 4747 | if (!tgid_map) | ||
| 4748 | return NULL; | ||
| 4749 | |||
| 4750 | v = &tgid_map[0]; | ||
| 4751 | while (l <= *pos) { | ||
| 4752 | v = saved_tgids_next(m, v, &l); | ||
| 4753 | if (!v) | ||
| 4754 | return NULL; | ||
| 4755 | } | ||
| 4756 | |||
| 4757 | return v; | ||
| 4758 | } | ||
| 4759 | |||
| 4760 | static void saved_tgids_stop(struct seq_file *m, void *v) | ||
| 4761 | { | ||
| 4762 | } | ||
| 4763 | |||
| 4764 | static int saved_tgids_show(struct seq_file *m, void *v) | ||
| 4765 | { | ||
| 4766 | int pid = (int *)v - tgid_map; | ||
| 4767 | |||
| 4768 | seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid)); | ||
| 4769 | return 0; | ||
| 4770 | } | ||
| 4771 | |||
| 4772 | static const struct seq_operations tracing_saved_tgids_seq_ops = { | ||
| 4773 | .start = saved_tgids_start, | ||
| 4774 | .stop = saved_tgids_stop, | ||
| 4775 | .next = saved_tgids_next, | ||
| 4776 | .show = saved_tgids_show, | ||
| 4777 | }; | ||
| 4778 | |||
| 4779 | static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) | ||
| 4780 | { | ||
| 4781 | if (tracing_disabled) | ||
| 4782 | return -ENODEV; | ||
| 4783 | |||
| 4784 | return seq_open(filp, &tracing_saved_tgids_seq_ops); | ||
| 4785 | } | ||
| 4786 | |||
| 4787 | |||
| 4788 | static const struct file_operations tracing_saved_tgids_fops = { | ||
| 4789 | .open = tracing_saved_tgids_open, | ||
| 4790 | .read = seq_read, | ||
| 4791 | .llseek = seq_lseek, | ||
| 4792 | .release = seq_release, | ||
| 4793 | }; | ||
| 4794 | |||
| 4600 | static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) | 4795 | static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) |
| 4601 | { | 4796 | { |
| 4602 | unsigned int *ptr = v; | 4797 | unsigned int *ptr = v; |
| @@ -4746,11 +4941,11 @@ static const struct file_operations tracing_saved_cmdlines_size_fops = { | |||
| 4746 | .write = tracing_saved_cmdlines_size_write, | 4941 | .write = tracing_saved_cmdlines_size_write, |
| 4747 | }; | 4942 | }; |
| 4748 | 4943 | ||
| 4749 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE | 4944 | #ifdef CONFIG_TRACE_EVAL_MAP_FILE |
| 4750 | static union trace_enum_map_item * | 4945 | static union trace_eval_map_item * |
| 4751 | update_enum_map(union trace_enum_map_item *ptr) | 4946 | update_eval_map(union trace_eval_map_item *ptr) |
| 4752 | { | 4947 | { |
| 4753 | if (!ptr->map.enum_string) { | 4948 | if (!ptr->map.eval_string) { |
| 4754 | if (ptr->tail.next) { | 4949 | if (ptr->tail.next) { |
| 4755 | ptr = ptr->tail.next; | 4950 | ptr = ptr->tail.next; |
| 4756 | /* Set ptr to the next real item (skip head) */ | 4951 | /* Set ptr to the next real item (skip head) */ |
| @@ -4761,15 +4956,15 @@ update_enum_map(union trace_enum_map_item *ptr) | |||
| 4761 | return ptr; | 4956 | return ptr; |
| 4762 | } | 4957 | } |
| 4763 | 4958 | ||
| 4764 | static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos) | 4959 | static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) |
| 4765 | { | 4960 | { |
| 4766 | union trace_enum_map_item *ptr = v; | 4961 | union trace_eval_map_item *ptr = v; |
| 4767 | 4962 | ||
| 4768 | /* | 4963 | /* |
| 4769 | * Paranoid! If ptr points to end, we don't want to increment past it. | 4964 | * Paranoid! If ptr points to end, we don't want to increment past it. |
| 4770 | * This really should never happen. | 4965 | * This really should never happen. |
| 4771 | */ | 4966 | */ |
| 4772 | ptr = update_enum_map(ptr); | 4967 | ptr = update_eval_map(ptr); |
| 4773 | if (WARN_ON_ONCE(!ptr)) | 4968 | if (WARN_ON_ONCE(!ptr)) |
| 4774 | return NULL; | 4969 | return NULL; |
| 4775 | 4970 | ||
| @@ -4777,104 +4972,104 @@ static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 4777 | 4972 | ||
| 4778 | (*pos)++; | 4973 | (*pos)++; |
| 4779 | 4974 | ||
| 4780 | ptr = update_enum_map(ptr); | 4975 | ptr = update_eval_map(ptr); |
| 4781 | 4976 | ||
| 4782 | return ptr; | 4977 | return ptr; |
| 4783 | } | 4978 | } |
| 4784 | 4979 | ||
| 4785 | static void *enum_map_start(struct seq_file *m, loff_t *pos) | 4980 | static void *eval_map_start(struct seq_file *m, loff_t *pos) |
| 4786 | { | 4981 | { |
| 4787 | union trace_enum_map_item *v; | 4982 | union trace_eval_map_item *v; |
| 4788 | loff_t l = 0; | 4983 | loff_t l = 0; |
| 4789 | 4984 | ||
| 4790 | mutex_lock(&trace_enum_mutex); | 4985 | mutex_lock(&trace_eval_mutex); |
| 4791 | 4986 | ||
| 4792 | v = trace_enum_maps; | 4987 | v = trace_eval_maps; |
| 4793 | if (v) | 4988 | if (v) |
| 4794 | v++; | 4989 | v++; |
| 4795 | 4990 | ||
| 4796 | while (v && l < *pos) { | 4991 | while (v && l < *pos) { |
| 4797 | v = enum_map_next(m, v, &l); | 4992 | v = eval_map_next(m, v, &l); |
| 4798 | } | 4993 | } |
| 4799 | 4994 | ||
| 4800 | return v; | 4995 | return v; |
| 4801 | } | 4996 | } |
| 4802 | 4997 | ||
| 4803 | static void enum_map_stop(struct seq_file *m, void *v) | 4998 | static void eval_map_stop(struct seq_file *m, void *v) |
| 4804 | { | 4999 | { |
| 4805 | mutex_unlock(&trace_enum_mutex); | 5000 | mutex_unlock(&trace_eval_mutex); |
| 4806 | } | 5001 | } |
| 4807 | 5002 | ||
| 4808 | static int enum_map_show(struct seq_file *m, void *v) | 5003 | static int eval_map_show(struct seq_file *m, void *v) |
| 4809 | { | 5004 | { |
| 4810 | union trace_enum_map_item *ptr = v; | 5005 | union trace_eval_map_item *ptr = v; |
| 4811 | 5006 | ||
| 4812 | seq_printf(m, "%s %ld (%s)\n", | 5007 | seq_printf(m, "%s %ld (%s)\n", |
| 4813 | ptr->map.enum_string, ptr->map.enum_value, | 5008 | ptr->map.eval_string, ptr->map.eval_value, |
| 4814 | ptr->map.system); | 5009 | ptr->map.system); |
| 4815 | 5010 | ||
| 4816 | return 0; | 5011 | return 0; |
| 4817 | } | 5012 | } |
| 4818 | 5013 | ||
| 4819 | static const struct seq_operations tracing_enum_map_seq_ops = { | 5014 | static const struct seq_operations tracing_eval_map_seq_ops = { |
| 4820 | .start = enum_map_start, | 5015 | .start = eval_map_start, |
| 4821 | .next = enum_map_next, | 5016 | .next = eval_map_next, |
| 4822 | .stop = enum_map_stop, | 5017 | .stop = eval_map_stop, |
| 4823 | .show = enum_map_show, | 5018 | .show = eval_map_show, |
| 4824 | }; | 5019 | }; |
| 4825 | 5020 | ||
| 4826 | static int tracing_enum_map_open(struct inode *inode, struct file *filp) | 5021 | static int tracing_eval_map_open(struct inode *inode, struct file *filp) |
| 4827 | { | 5022 | { |
| 4828 | if (tracing_disabled) | 5023 | if (tracing_disabled) |
| 4829 | return -ENODEV; | 5024 | return -ENODEV; |
| 4830 | 5025 | ||
| 4831 | return seq_open(filp, &tracing_enum_map_seq_ops); | 5026 | return seq_open(filp, &tracing_eval_map_seq_ops); |
| 4832 | } | 5027 | } |
| 4833 | 5028 | ||
| 4834 | static const struct file_operations tracing_enum_map_fops = { | 5029 | static const struct file_operations tracing_eval_map_fops = { |
| 4835 | .open = tracing_enum_map_open, | 5030 | .open = tracing_eval_map_open, |
| 4836 | .read = seq_read, | 5031 | .read = seq_read, |
| 4837 | .llseek = seq_lseek, | 5032 | .llseek = seq_lseek, |
| 4838 | .release = seq_release, | 5033 | .release = seq_release, |
| 4839 | }; | 5034 | }; |
| 4840 | 5035 | ||
| 4841 | static inline union trace_enum_map_item * | 5036 | static inline union trace_eval_map_item * |
| 4842 | trace_enum_jmp_to_tail(union trace_enum_map_item *ptr) | 5037 | trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) |
| 4843 | { | 5038 | { |
| 4844 | /* Return tail of array given the head */ | 5039 | /* Return tail of array given the head */ |
| 4845 | return ptr + ptr->head.length + 1; | 5040 | return ptr + ptr->head.length + 1; |
| 4846 | } | 5041 | } |
| 4847 | 5042 | ||
| 4848 | static void | 5043 | static void |
| 4849 | trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start, | 5044 | trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, |
| 4850 | int len) | 5045 | int len) |
| 4851 | { | 5046 | { |
| 4852 | struct trace_enum_map **stop; | 5047 | struct trace_eval_map **stop; |
| 4853 | struct trace_enum_map **map; | 5048 | struct trace_eval_map **map; |
| 4854 | union trace_enum_map_item *map_array; | 5049 | union trace_eval_map_item *map_array; |
| 4855 | union trace_enum_map_item *ptr; | 5050 | union trace_eval_map_item *ptr; |
| 4856 | 5051 | ||
| 4857 | stop = start + len; | 5052 | stop = start + len; |
| 4858 | 5053 | ||
| 4859 | /* | 5054 | /* |
| 4860 | * The trace_enum_maps contains the map plus a head and tail item, | 5055 | * The trace_eval_maps contains the map plus a head and tail item, |
| 4861 | * where the head holds the module and length of array, and the | 5056 | * where the head holds the module and length of array, and the |
| 4862 | * tail holds a pointer to the next list. | 5057 | * tail holds a pointer to the next list. |
| 4863 | */ | 5058 | */ |
| 4864 | map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL); | 5059 | map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL); |
| 4865 | if (!map_array) { | 5060 | if (!map_array) { |
| 4866 | pr_warn("Unable to allocate trace enum mapping\n"); | 5061 | pr_warn("Unable to allocate trace eval mapping\n"); |
| 4867 | return; | 5062 | return; |
| 4868 | } | 5063 | } |
| 4869 | 5064 | ||
| 4870 | mutex_lock(&trace_enum_mutex); | 5065 | mutex_lock(&trace_eval_mutex); |
| 4871 | 5066 | ||
| 4872 | if (!trace_enum_maps) | 5067 | if (!trace_eval_maps) |
| 4873 | trace_enum_maps = map_array; | 5068 | trace_eval_maps = map_array; |
| 4874 | else { | 5069 | else { |
| 4875 | ptr = trace_enum_maps; | 5070 | ptr = trace_eval_maps; |
| 4876 | for (;;) { | 5071 | for (;;) { |
| 4877 | ptr = trace_enum_jmp_to_tail(ptr); | 5072 | ptr = trace_eval_jmp_to_tail(ptr); |
| 4878 | if (!ptr->tail.next) | 5073 | if (!ptr->tail.next) |
| 4879 | break; | 5074 | break; |
| 4880 | ptr = ptr->tail.next; | 5075 | ptr = ptr->tail.next; |
| @@ -4892,34 +5087,34 @@ trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start, | |||
| 4892 | } | 5087 | } |
| 4893 | memset(map_array, 0, sizeof(*map_array)); | 5088 | memset(map_array, 0, sizeof(*map_array)); |
| 4894 | 5089 | ||
| 4895 | mutex_unlock(&trace_enum_mutex); | 5090 | mutex_unlock(&trace_eval_mutex); |
| 4896 | } | 5091 | } |
| 4897 | 5092 | ||
| 4898 | static void trace_create_enum_file(struct dentry *d_tracer) | 5093 | static void trace_create_eval_file(struct dentry *d_tracer) |
| 4899 | { | 5094 | { |
| 4900 | trace_create_file("enum_map", 0444, d_tracer, | 5095 | trace_create_file("eval_map", 0444, d_tracer, |
| 4901 | NULL, &tracing_enum_map_fops); | 5096 | NULL, &tracing_eval_map_fops); |
| 4902 | } | 5097 | } |
| 4903 | 5098 | ||
| 4904 | #else /* CONFIG_TRACE_ENUM_MAP_FILE */ | 5099 | #else /* CONFIG_TRACE_EVAL_MAP_FILE */ |
| 4905 | static inline void trace_create_enum_file(struct dentry *d_tracer) { } | 5100 | static inline void trace_create_eval_file(struct dentry *d_tracer) { } |
| 4906 | static inline void trace_insert_enum_map_file(struct module *mod, | 5101 | static inline void trace_insert_eval_map_file(struct module *mod, |
| 4907 | struct trace_enum_map **start, int len) { } | 5102 | struct trace_eval_map **start, int len) { } |
| 4908 | #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */ | 5103 | #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ |
| 4909 | 5104 | ||
| 4910 | static void trace_insert_enum_map(struct module *mod, | 5105 | static void trace_insert_eval_map(struct module *mod, |
| 4911 | struct trace_enum_map **start, int len) | 5106 | struct trace_eval_map **start, int len) |
| 4912 | { | 5107 | { |
| 4913 | struct trace_enum_map **map; | 5108 | struct trace_eval_map **map; |
| 4914 | 5109 | ||
| 4915 | if (len <= 0) | 5110 | if (len <= 0) |
| 4916 | return; | 5111 | return; |
| 4917 | 5112 | ||
| 4918 | map = start; | 5113 | map = start; |
| 4919 | 5114 | ||
| 4920 | trace_event_enum_update(map, len); | 5115 | trace_event_eval_update(map, len); |
| 4921 | 5116 | ||
| 4922 | trace_insert_enum_map_file(mod, start, len); | 5117 | trace_insert_eval_map_file(mod, start, len); |
| 4923 | } | 5118 | } |
| 4924 | 5119 | ||
| 4925 | static ssize_t | 5120 | static ssize_t |
| @@ -6739,33 +6934,18 @@ static const struct file_operations tracing_stats_fops = { | |||
| 6739 | 6934 | ||
| 6740 | #ifdef CONFIG_DYNAMIC_FTRACE | 6935 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 6741 | 6936 | ||
| 6742 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | ||
| 6743 | { | ||
| 6744 | return 0; | ||
| 6745 | } | ||
| 6746 | |||
| 6747 | static ssize_t | 6937 | static ssize_t |
| 6748 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, | 6938 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
| 6749 | size_t cnt, loff_t *ppos) | 6939 | size_t cnt, loff_t *ppos) |
| 6750 | { | 6940 | { |
| 6751 | static char ftrace_dyn_info_buffer[1024]; | ||
| 6752 | static DEFINE_MUTEX(dyn_info_mutex); | ||
| 6753 | unsigned long *p = filp->private_data; | 6941 | unsigned long *p = filp->private_data; |
| 6754 | char *buf = ftrace_dyn_info_buffer; | 6942 | char buf[64]; /* Not too big for a shallow stack */ |
| 6755 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | ||
| 6756 | int r; | 6943 | int r; |
| 6757 | 6944 | ||
| 6758 | mutex_lock(&dyn_info_mutex); | 6945 | r = scnprintf(buf, 63, "%ld", *p); |
| 6759 | r = sprintf(buf, "%ld ", *p); | ||
| 6760 | |||
| 6761 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); | ||
| 6762 | buf[r++] = '\n'; | 6946 | buf[r++] = '\n'; |
| 6763 | 6947 | ||
| 6764 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 6948 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
| 6765 | |||
| 6766 | mutex_unlock(&dyn_info_mutex); | ||
| 6767 | |||
| 6768 | return r; | ||
| 6769 | } | 6949 | } |
| 6770 | 6950 | ||
| 6771 | static const struct file_operations tracing_dyn_info_fops = { | 6951 | static const struct file_operations tracing_dyn_info_fops = { |
| @@ -7594,6 +7774,7 @@ static int instance_rmdir(const char *name) | |||
| 7594 | } | 7774 | } |
| 7595 | kfree(tr->topts); | 7775 | kfree(tr->topts); |
| 7596 | 7776 | ||
| 7777 | free_cpumask_var(tr->tracing_cpumask); | ||
| 7597 | kfree(tr->name); | 7778 | kfree(tr->name); |
| 7598 | kfree(tr); | 7779 | kfree(tr); |
| 7599 | 7780 | ||
| @@ -7737,21 +7918,21 @@ struct dentry *tracing_init_dentry(void) | |||
| 7737 | return NULL; | 7918 | return NULL; |
| 7738 | } | 7919 | } |
| 7739 | 7920 | ||
| 7740 | extern struct trace_enum_map *__start_ftrace_enum_maps[]; | 7921 | extern struct trace_eval_map *__start_ftrace_eval_maps[]; |
| 7741 | extern struct trace_enum_map *__stop_ftrace_enum_maps[]; | 7922 | extern struct trace_eval_map *__stop_ftrace_eval_maps[]; |
| 7742 | 7923 | ||
| 7743 | static void __init trace_enum_init(void) | 7924 | static void __init trace_eval_init(void) |
| 7744 | { | 7925 | { |
| 7745 | int len; | 7926 | int len; |
| 7746 | 7927 | ||
| 7747 | len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps; | 7928 | len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; |
| 7748 | trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len); | 7929 | trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); |
| 7749 | } | 7930 | } |
| 7750 | 7931 | ||
| 7751 | #ifdef CONFIG_MODULES | 7932 | #ifdef CONFIG_MODULES |
| 7752 | static void trace_module_add_enums(struct module *mod) | 7933 | static void trace_module_add_evals(struct module *mod) |
| 7753 | { | 7934 | { |
| 7754 | if (!mod->num_trace_enums) | 7935 | if (!mod->num_trace_evals) |
| 7755 | return; | 7936 | return; |
| 7756 | 7937 | ||
| 7757 | /* | 7938 | /* |
| @@ -7761,40 +7942,40 @@ static void trace_module_add_enums(struct module *mod) | |||
| 7761 | if (trace_module_has_bad_taint(mod)) | 7942 | if (trace_module_has_bad_taint(mod)) |
| 7762 | return; | 7943 | return; |
| 7763 | 7944 | ||
| 7764 | trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums); | 7945 | trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); |
| 7765 | } | 7946 | } |
| 7766 | 7947 | ||
| 7767 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE | 7948 | #ifdef CONFIG_TRACE_EVAL_MAP_FILE |
| 7768 | static void trace_module_remove_enums(struct module *mod) | 7949 | static void trace_module_remove_evals(struct module *mod) |
| 7769 | { | 7950 | { |
| 7770 | union trace_enum_map_item *map; | 7951 | union trace_eval_map_item *map; |
| 7771 | union trace_enum_map_item **last = &trace_enum_maps; | 7952 | union trace_eval_map_item **last = &trace_eval_maps; |
| 7772 | 7953 | ||
| 7773 | if (!mod->num_trace_enums) | 7954 | if (!mod->num_trace_evals) |
| 7774 | return; | 7955 | return; |
| 7775 | 7956 | ||
| 7776 | mutex_lock(&trace_enum_mutex); | 7957 | mutex_lock(&trace_eval_mutex); |
| 7777 | 7958 | ||
| 7778 | map = trace_enum_maps; | 7959 | map = trace_eval_maps; |
| 7779 | 7960 | ||
| 7780 | while (map) { | 7961 | while (map) { |
| 7781 | if (map->head.mod == mod) | 7962 | if (map->head.mod == mod) |
| 7782 | break; | 7963 | break; |
| 7783 | map = trace_enum_jmp_to_tail(map); | 7964 | map = trace_eval_jmp_to_tail(map); |
| 7784 | last = &map->tail.next; | 7965 | last = &map->tail.next; |
| 7785 | map = map->tail.next; | 7966 | map = map->tail.next; |
| 7786 | } | 7967 | } |
| 7787 | if (!map) | 7968 | if (!map) |
| 7788 | goto out; | 7969 | goto out; |
| 7789 | 7970 | ||
| 7790 | *last = trace_enum_jmp_to_tail(map)->tail.next; | 7971 | *last = trace_eval_jmp_to_tail(map)->tail.next; |
| 7791 | kfree(map); | 7972 | kfree(map); |
| 7792 | out: | 7973 | out: |
| 7793 | mutex_unlock(&trace_enum_mutex); | 7974 | mutex_unlock(&trace_eval_mutex); |
| 7794 | } | 7975 | } |
| 7795 | #else | 7976 | #else |
| 7796 | static inline void trace_module_remove_enums(struct module *mod) { } | 7977 | static inline void trace_module_remove_evals(struct module *mod) { } |
| 7797 | #endif /* CONFIG_TRACE_ENUM_MAP_FILE */ | 7978 | #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ |
| 7798 | 7979 | ||
| 7799 | static int trace_module_notify(struct notifier_block *self, | 7980 | static int trace_module_notify(struct notifier_block *self, |
| 7800 | unsigned long val, void *data) | 7981 | unsigned long val, void *data) |
| @@ -7803,10 +7984,10 @@ static int trace_module_notify(struct notifier_block *self, | |||
| 7803 | 7984 | ||
| 7804 | switch (val) { | 7985 | switch (val) { |
| 7805 | case MODULE_STATE_COMING: | 7986 | case MODULE_STATE_COMING: |
| 7806 | trace_module_add_enums(mod); | 7987 | trace_module_add_evals(mod); |
| 7807 | break; | 7988 | break; |
| 7808 | case MODULE_STATE_GOING: | 7989 | case MODULE_STATE_GOING: |
| 7809 | trace_module_remove_enums(mod); | 7990 | trace_module_remove_evals(mod); |
| 7810 | break; | 7991 | break; |
| 7811 | } | 7992 | } |
| 7812 | 7993 | ||
| @@ -7844,9 +8025,12 @@ static __init int tracer_init_tracefs(void) | |||
| 7844 | trace_create_file("saved_cmdlines_size", 0644, d_tracer, | 8025 | trace_create_file("saved_cmdlines_size", 0644, d_tracer, |
| 7845 | NULL, &tracing_saved_cmdlines_size_fops); | 8026 | NULL, &tracing_saved_cmdlines_size_fops); |
| 7846 | 8027 | ||
| 7847 | trace_enum_init(); | 8028 | trace_create_file("saved_tgids", 0444, d_tracer, |
| 8029 | NULL, &tracing_saved_tgids_fops); | ||
| 8030 | |||
| 8031 | trace_eval_init(); | ||
| 7848 | 8032 | ||
| 7849 | trace_create_enum_file(d_tracer); | 8033 | trace_create_eval_file(d_tracer); |
| 7850 | 8034 | ||
| 7851 | #ifdef CONFIG_MODULES | 8035 | #ifdef CONFIG_MODULES |
| 7852 | register_module_notifier(&trace_module_nb); | 8036 | register_module_notifier(&trace_module_nb); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 39fd77330aab..490ba229931d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -263,7 +263,10 @@ struct trace_array { | |||
| 263 | struct ftrace_ops *ops; | 263 | struct ftrace_ops *ops; |
| 264 | struct trace_pid_list __rcu *function_pids; | 264 | struct trace_pid_list __rcu *function_pids; |
| 265 | #ifdef CONFIG_DYNAMIC_FTRACE | 265 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 266 | /* All of these are protected by the ftrace_lock */ | ||
| 266 | struct list_head func_probes; | 267 | struct list_head func_probes; |
| 268 | struct list_head mod_trace; | ||
| 269 | struct list_head mod_notrace; | ||
| 267 | #endif | 270 | #endif |
| 268 | /* function tracing enabled */ | 271 | /* function tracing enabled */ |
| 269 | int function_enabled; | 272 | int function_enabled; |
| @@ -637,6 +640,9 @@ void set_graph_array(struct trace_array *tr); | |||
| 637 | 640 | ||
| 638 | void tracing_start_cmdline_record(void); | 641 | void tracing_start_cmdline_record(void); |
| 639 | void tracing_stop_cmdline_record(void); | 642 | void tracing_stop_cmdline_record(void); |
| 643 | void tracing_start_tgid_record(void); | ||
| 644 | void tracing_stop_tgid_record(void); | ||
| 645 | |||
| 640 | int register_tracer(struct tracer *type); | 646 | int register_tracer(struct tracer *type); |
| 641 | int is_tracing_stopped(void); | 647 | int is_tracing_stopped(void); |
| 642 | 648 | ||
| @@ -697,6 +703,7 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags, | |||
| 697 | extern u64 ftrace_now(int cpu); | 703 | extern u64 ftrace_now(int cpu); |
| 698 | 704 | ||
| 699 | extern void trace_find_cmdline(int pid, char comm[]); | 705 | extern void trace_find_cmdline(int pid, char comm[]); |
| 706 | extern int trace_find_tgid(int pid); | ||
| 700 | extern void trace_event_follow_fork(struct trace_array *tr, bool enable); | 707 | extern void trace_event_follow_fork(struct trace_array *tr, bool enable); |
| 701 | 708 | ||
| 702 | #ifdef CONFIG_DYNAMIC_FTRACE | 709 | #ifdef CONFIG_DYNAMIC_FTRACE |
| @@ -761,10 +768,24 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); | |||
| 761 | 768 | ||
| 762 | extern char trace_find_mark(unsigned long long duration); | 769 | extern char trace_find_mark(unsigned long long duration); |
| 763 | 770 | ||
| 771 | struct ftrace_hash; | ||
| 772 | |||
| 773 | struct ftrace_mod_load { | ||
| 774 | struct list_head list; | ||
| 775 | char *func; | ||
| 776 | char *module; | ||
| 777 | int enable; | ||
| 778 | }; | ||
| 779 | |||
| 780 | enum { | ||
| 781 | FTRACE_HASH_FL_MOD = (1 << 0), | ||
| 782 | }; | ||
| 783 | |||
| 764 | struct ftrace_hash { | 784 | struct ftrace_hash { |
| 765 | unsigned long size_bits; | 785 | unsigned long size_bits; |
| 766 | struct hlist_head *buckets; | 786 | struct hlist_head *buckets; |
| 767 | unsigned long count; | 787 | unsigned long count; |
| 788 | unsigned long flags; | ||
| 768 | struct rcu_head rcu; | 789 | struct rcu_head rcu; |
| 769 | }; | 790 | }; |
| 770 | 791 | ||
| @@ -773,7 +794,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); | |||
| 773 | 794 | ||
| 774 | static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) | 795 | static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) |
| 775 | { | 796 | { |
| 776 | return !hash || !hash->count; | 797 | return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); |
| 777 | } | 798 | } |
| 778 | 799 | ||
| 779 | /* Standard output formatting function used for function return traces */ | 800 | /* Standard output formatting function used for function return traces */ |
| @@ -1107,6 +1128,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |||
| 1107 | C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ | 1128 | C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ |
| 1108 | C(LATENCY_FMT, "latency-format"), \ | 1129 | C(LATENCY_FMT, "latency-format"), \ |
| 1109 | C(RECORD_CMD, "record-cmd"), \ | 1130 | C(RECORD_CMD, "record-cmd"), \ |
| 1131 | C(RECORD_TGID, "record-tgid"), \ | ||
| 1110 | C(OVERWRITE, "overwrite"), \ | 1132 | C(OVERWRITE, "overwrite"), \ |
| 1111 | C(STOP_ON_FREE, "disable_on_free"), \ | 1133 | C(STOP_ON_FREE, "disable_on_free"), \ |
| 1112 | C(IRQ_INFO, "irq-info"), \ | 1134 | C(IRQ_INFO, "irq-info"), \ |
| @@ -1188,9 +1210,9 @@ struct ftrace_event_field { | |||
| 1188 | struct event_filter { | 1210 | struct event_filter { |
| 1189 | int n_preds; /* Number assigned */ | 1211 | int n_preds; /* Number assigned */ |
| 1190 | int a_preds; /* allocated */ | 1212 | int a_preds; /* allocated */ |
| 1191 | struct filter_pred *preds; | 1213 | struct filter_pred __rcu *preds; |
| 1192 | struct filter_pred *root; | 1214 | struct filter_pred __rcu *root; |
| 1193 | char *filter_string; | 1215 | char *filter_string; |
| 1194 | }; | 1216 | }; |
| 1195 | 1217 | ||
| 1196 | struct event_subsystem { | 1218 | struct event_subsystem { |
| @@ -1423,6 +1445,8 @@ struct ftrace_event_field * | |||
| 1423 | trace_find_event_field(struct trace_event_call *call, char *name); | 1445 | trace_find_event_field(struct trace_event_call *call, char *name); |
| 1424 | 1446 | ||
| 1425 | extern void trace_event_enable_cmd_record(bool enable); | 1447 | extern void trace_event_enable_cmd_record(bool enable); |
| 1448 | extern void trace_event_enable_tgid_record(bool enable); | ||
| 1449 | |||
| 1426 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); | 1450 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
| 1427 | extern int event_trace_del_tracer(struct trace_array *tr); | 1451 | extern int event_trace_del_tracer(struct trace_array *tr); |
| 1428 | 1452 | ||
| @@ -1773,10 +1797,10 @@ static inline const char *get_syscall_name(int syscall) | |||
| 1773 | 1797 | ||
| 1774 | #ifdef CONFIG_EVENT_TRACING | 1798 | #ifdef CONFIG_EVENT_TRACING |
| 1775 | void trace_event_init(void); | 1799 | void trace_event_init(void); |
| 1776 | void trace_event_enum_update(struct trace_enum_map **map, int len); | 1800 | void trace_event_eval_update(struct trace_eval_map **map, int len); |
| 1777 | #else | 1801 | #else |
| 1778 | static inline void __init trace_event_init(void) { } | 1802 | static inline void __init trace_event_init(void) { } |
| 1779 | static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { } | 1803 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |
| 1780 | #endif | 1804 | #endif |
| 1781 | 1805 | ||
| 1782 | extern struct trace_iterator *tracepoint_print_iter; | 1806 | extern struct trace_iterator *tracepoint_print_iter; |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e7973e10398c..36132f9280e6 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -343,6 +343,28 @@ void trace_event_enable_cmd_record(bool enable) | |||
| 343 | mutex_unlock(&event_mutex); | 343 | mutex_unlock(&event_mutex); |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | void trace_event_enable_tgid_record(bool enable) | ||
| 347 | { | ||
| 348 | struct trace_event_file *file; | ||
| 349 | struct trace_array *tr; | ||
| 350 | |||
| 351 | mutex_lock(&event_mutex); | ||
| 352 | do_for_each_event_file(tr, file) { | ||
| 353 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) | ||
| 354 | continue; | ||
| 355 | |||
| 356 | if (enable) { | ||
| 357 | tracing_start_tgid_record(); | ||
| 358 | set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); | ||
| 359 | } else { | ||
| 360 | tracing_stop_tgid_record(); | ||
| 361 | clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, | ||
| 362 | &file->flags); | ||
| 363 | } | ||
| 364 | } while_for_each_event_file(); | ||
| 365 | mutex_unlock(&event_mutex); | ||
| 366 | } | ||
| 367 | |||
| 346 | static int __ftrace_event_enable_disable(struct trace_event_file *file, | 368 | static int __ftrace_event_enable_disable(struct trace_event_file *file, |
| 347 | int enable, int soft_disable) | 369 | int enable, int soft_disable) |
| 348 | { | 370 | { |
| @@ -381,6 +403,12 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, | |||
| 381 | tracing_stop_cmdline_record(); | 403 | tracing_stop_cmdline_record(); |
| 382 | clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); | 404 | clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); |
| 383 | } | 405 | } |
| 406 | |||
| 407 | if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { | ||
| 408 | tracing_stop_tgid_record(); | ||
| 409 | clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); | ||
| 410 | } | ||
| 411 | |||
| 384 | call->class->reg(call, TRACE_REG_UNREGISTER, file); | 412 | call->class->reg(call, TRACE_REG_UNREGISTER, file); |
| 385 | } | 413 | } |
| 386 | /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ | 414 | /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ |
| @@ -407,18 +435,30 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, | |||
| 407 | } | 435 | } |
| 408 | 436 | ||
| 409 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) { | 437 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) { |
| 438 | bool cmd = false, tgid = false; | ||
| 410 | 439 | ||
| 411 | /* Keep the event disabled, when going to SOFT_MODE. */ | 440 | /* Keep the event disabled, when going to SOFT_MODE. */ |
| 412 | if (soft_disable) | 441 | if (soft_disable) |
| 413 | set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); | 442 | set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); |
| 414 | 443 | ||
| 415 | if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { | 444 | if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { |
| 445 | cmd = true; | ||
| 416 | tracing_start_cmdline_record(); | 446 | tracing_start_cmdline_record(); |
| 417 | set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); | 447 | set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); |
| 418 | } | 448 | } |
| 449 | |||
| 450 | if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { | ||
| 451 | tgid = true; | ||
| 452 | tracing_start_tgid_record(); | ||
| 453 | set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); | ||
| 454 | } | ||
| 455 | |||
| 419 | ret = call->class->reg(call, TRACE_REG_REGISTER, file); | 456 | ret = call->class->reg(call, TRACE_REG_REGISTER, file); |
| 420 | if (ret) { | 457 | if (ret) { |
| 421 | tracing_stop_cmdline_record(); | 458 | if (cmd) |
| 459 | tracing_stop_cmdline_record(); | ||
| 460 | if (tgid) | ||
| 461 | tracing_stop_tgid_record(); | ||
| 422 | pr_info("event trace: Could not enable event " | 462 | pr_info("event trace: Could not enable event " |
| 423 | "%s\n", trace_event_name(call)); | 463 | "%s\n", trace_event_name(call)); |
| 424 | break; | 464 | break; |
| @@ -2067,18 +2107,18 @@ __register_event(struct trace_event_call *call, struct module *mod) | |||
| 2067 | return 0; | 2107 | return 0; |
| 2068 | } | 2108 | } |
| 2069 | 2109 | ||
| 2070 | static char *enum_replace(char *ptr, struct trace_enum_map *map, int len) | 2110 | static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) |
| 2071 | { | 2111 | { |
| 2072 | int rlen; | 2112 | int rlen; |
| 2073 | int elen; | 2113 | int elen; |
| 2074 | 2114 | ||
| 2075 | /* Find the length of the enum value as a string */ | 2115 | /* Find the length of the eval value as a string */ |
| 2076 | elen = snprintf(ptr, 0, "%ld", map->enum_value); | 2116 | elen = snprintf(ptr, 0, "%ld", map->eval_value); |
| 2077 | /* Make sure there's enough room to replace the string with the value */ | 2117 | /* Make sure there's enough room to replace the string with the value */ |
| 2078 | if (len < elen) | 2118 | if (len < elen) |
| 2079 | return NULL; | 2119 | return NULL; |
| 2080 | 2120 | ||
| 2081 | snprintf(ptr, elen + 1, "%ld", map->enum_value); | 2121 | snprintf(ptr, elen + 1, "%ld", map->eval_value); |
| 2082 | 2122 | ||
| 2083 | /* Get the rest of the string of ptr */ | 2123 | /* Get the rest of the string of ptr */ |
| 2084 | rlen = strlen(ptr + len); | 2124 | rlen = strlen(ptr + len); |
| @@ -2090,11 +2130,11 @@ static char *enum_replace(char *ptr, struct trace_enum_map *map, int len) | |||
| 2090 | } | 2130 | } |
| 2091 | 2131 | ||
| 2092 | static void update_event_printk(struct trace_event_call *call, | 2132 | static void update_event_printk(struct trace_event_call *call, |
| 2093 | struct trace_enum_map *map) | 2133 | struct trace_eval_map *map) |
| 2094 | { | 2134 | { |
| 2095 | char *ptr; | 2135 | char *ptr; |
| 2096 | int quote = 0; | 2136 | int quote = 0; |
| 2097 | int len = strlen(map->enum_string); | 2137 | int len = strlen(map->eval_string); |
| 2098 | 2138 | ||
| 2099 | for (ptr = call->print_fmt; *ptr; ptr++) { | 2139 | for (ptr = call->print_fmt; *ptr; ptr++) { |
| 2100 | if (*ptr == '\\') { | 2140 | if (*ptr == '\\') { |
| @@ -2125,16 +2165,16 @@ static void update_event_printk(struct trace_event_call *call, | |||
| 2125 | continue; | 2165 | continue; |
| 2126 | } | 2166 | } |
| 2127 | if (isalpha(*ptr) || *ptr == '_') { | 2167 | if (isalpha(*ptr) || *ptr == '_') { |
| 2128 | if (strncmp(map->enum_string, ptr, len) == 0 && | 2168 | if (strncmp(map->eval_string, ptr, len) == 0 && |
| 2129 | !isalnum(ptr[len]) && ptr[len] != '_') { | 2169 | !isalnum(ptr[len]) && ptr[len] != '_') { |
| 2130 | ptr = enum_replace(ptr, map, len); | 2170 | ptr = eval_replace(ptr, map, len); |
| 2131 | /* Hmm, enum string smaller than value */ | 2171 | /* enum/sizeof string smaller than value */ |
| 2132 | if (WARN_ON_ONCE(!ptr)) | 2172 | if (WARN_ON_ONCE(!ptr)) |
| 2133 | return; | 2173 | return; |
| 2134 | /* | 2174 | /* |
| 2135 | * No need to decrement here, as enum_replace() | 2175 | * No need to decrement here, as eval_replace() |
| 2136 | * returns the pointer to the character passed | 2176 | * returns the pointer to the character passed |
| 2137 | * the enum, and two enums can not be placed | 2177 | * the eval, and two evals can not be placed |
| 2138 | * back to back without something in between. | 2178 | * back to back without something in between. |
| 2139 | * We can skip that something in between. | 2179 | * We can skip that something in between. |
| 2140 | */ | 2180 | */ |
| @@ -2165,7 +2205,7 @@ static void update_event_printk(struct trace_event_call *call, | |||
| 2165 | } | 2205 | } |
| 2166 | } | 2206 | } |
| 2167 | 2207 | ||
| 2168 | void trace_event_enum_update(struct trace_enum_map **map, int len) | 2208 | void trace_event_eval_update(struct trace_eval_map **map, int len) |
| 2169 | { | 2209 | { |
| 2170 | struct trace_event_call *call, *p; | 2210 | struct trace_event_call *call, *p; |
| 2171 | const char *last_system = NULL; | 2211 | const char *last_system = NULL; |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 2c5221819be5..c9b5aa10fbf9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -598,6 +598,14 @@ static struct notifier_block trace_kprobe_module_nb = { | |||
| 598 | .priority = 1 /* Invoked after kprobe module callback */ | 598 | .priority = 1 /* Invoked after kprobe module callback */ |
| 599 | }; | 599 | }; |
| 600 | 600 | ||
| 601 | /* Convert certain expected symbols into '_' when generating event names */ | ||
| 602 | static inline void sanitize_event_name(char *name) | ||
| 603 | { | ||
| 604 | while (*name++ != '\0') | ||
| 605 | if (*name == ':' || *name == '.') | ||
| 606 | *name = '_'; | ||
| 607 | } | ||
| 608 | |||
| 601 | static int create_trace_kprobe(int argc, char **argv) | 609 | static int create_trace_kprobe(int argc, char **argv) |
| 602 | { | 610 | { |
| 603 | /* | 611 | /* |
| @@ -736,6 +744,7 @@ static int create_trace_kprobe(int argc, char **argv) | |||
| 736 | else | 744 | else |
| 737 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", | 745 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", |
| 738 | is_return ? 'r' : 'p', addr); | 746 | is_return ? 'r' : 'p', addr); |
| 747 | sanitize_event_name(buf); | ||
| 739 | event = buf; | 748 | event = buf; |
| 740 | } | 749 | } |
| 741 | tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, | 750 | tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 08f9bab8089e..bac629af2285 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -340,31 +340,41 @@ static inline const char *kretprobed(const char *name) | |||
| 340 | static void | 340 | static void |
| 341 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | 341 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) |
| 342 | { | 342 | { |
| 343 | #ifdef CONFIG_KALLSYMS | ||
| 344 | char str[KSYM_SYMBOL_LEN]; | 343 | char str[KSYM_SYMBOL_LEN]; |
| 344 | #ifdef CONFIG_KALLSYMS | ||
| 345 | const char *name; | 345 | const char *name; |
| 346 | 346 | ||
| 347 | kallsyms_lookup(address, NULL, NULL, NULL, str); | 347 | kallsyms_lookup(address, NULL, NULL, NULL, str); |
| 348 | 348 | ||
| 349 | name = kretprobed(str); | 349 | name = kretprobed(str); |
| 350 | 350 | ||
| 351 | trace_seq_printf(s, fmt, name); | 351 | if (name && strlen(name)) { |
| 352 | trace_seq_printf(s, fmt, name); | ||
| 353 | return; | ||
| 354 | } | ||
| 352 | #endif | 355 | #endif |
| 356 | snprintf(str, KSYM_SYMBOL_LEN, "0x%08lx", address); | ||
| 357 | trace_seq_printf(s, fmt, str); | ||
| 353 | } | 358 | } |
| 354 | 359 | ||
| 355 | static void | 360 | static void |
| 356 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | 361 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, |
| 357 | unsigned long address) | 362 | unsigned long address) |
| 358 | { | 363 | { |
| 359 | #ifdef CONFIG_KALLSYMS | ||
| 360 | char str[KSYM_SYMBOL_LEN]; | 364 | char str[KSYM_SYMBOL_LEN]; |
| 365 | #ifdef CONFIG_KALLSYMS | ||
| 361 | const char *name; | 366 | const char *name; |
| 362 | 367 | ||
| 363 | sprint_symbol(str, address); | 368 | sprint_symbol(str, address); |
| 364 | name = kretprobed(str); | 369 | name = kretprobed(str); |
| 365 | 370 | ||
| 366 | trace_seq_printf(s, fmt, name); | 371 | if (name && strlen(name)) { |
| 372 | trace_seq_printf(s, fmt, name); | ||
| 373 | return; | ||
| 374 | } | ||
| 367 | #endif | 375 | #endif |
| 376 | snprintf(str, KSYM_SYMBOL_LEN, "0x%08lx", address); | ||
| 377 | trace_seq_printf(s, fmt, str); | ||
| 368 | } | 378 | } |
| 369 | 379 | ||
| 370 | #ifndef CONFIG_64BIT | 380 | #ifndef CONFIG_64BIT |
| @@ -587,6 +597,15 @@ int trace_print_context(struct trace_iterator *iter) | |||
| 587 | trace_seq_printf(s, "%16s-%-5d [%03d] ", | 597 | trace_seq_printf(s, "%16s-%-5d [%03d] ", |
| 588 | comm, entry->pid, iter->cpu); | 598 | comm, entry->pid, iter->cpu); |
| 589 | 599 | ||
| 600 | if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { | ||
| 601 | unsigned int tgid = trace_find_tgid(entry->pid); | ||
| 602 | |||
| 603 | if (!tgid) | ||
| 604 | trace_seq_printf(s, "(-----) "); | ||
| 605 | else | ||
| 606 | trace_seq_printf(s, "(%5d) ", tgid); | ||
| 607 | } | ||
| 608 | |||
| 590 | if (tr->trace_flags & TRACE_ITER_IRQ_INFO) | 609 | if (tr->trace_flags & TRACE_ITER_IRQ_INFO) |
| 591 | trace_print_lat_fmt(s, entry); | 610 | trace_print_lat_fmt(s, entry); |
| 592 | 611 | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 4c896a0101bd..b341c02730be 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -12,27 +12,38 @@ | |||
| 12 | 12 | ||
| 13 | #include "trace.h" | 13 | #include "trace.h" |
| 14 | 14 | ||
| 15 | static int sched_ref; | 15 | #define RECORD_CMDLINE 1 |
| 16 | #define RECORD_TGID 2 | ||
| 17 | |||
| 18 | static int sched_cmdline_ref; | ||
| 19 | static int sched_tgid_ref; | ||
| 16 | static DEFINE_MUTEX(sched_register_mutex); | 20 | static DEFINE_MUTEX(sched_register_mutex); |
| 17 | 21 | ||
| 18 | static void | 22 | static void |
| 19 | probe_sched_switch(void *ignore, bool preempt, | 23 | probe_sched_switch(void *ignore, bool preempt, |
| 20 | struct task_struct *prev, struct task_struct *next) | 24 | struct task_struct *prev, struct task_struct *next) |
| 21 | { | 25 | { |
| 22 | if (unlikely(!sched_ref)) | 26 | int flags; |
| 23 | return; | 27 | |
| 28 | flags = (RECORD_TGID * !!sched_tgid_ref) + | ||
| 29 | (RECORD_CMDLINE * !!sched_cmdline_ref); | ||
| 24 | 30 | ||
| 25 | tracing_record_cmdline(prev); | 31 | if (!flags) |
| 26 | tracing_record_cmdline(next); | 32 | return; |
| 33 | tracing_record_taskinfo_sched_switch(prev, next, flags); | ||
| 27 | } | 34 | } |
| 28 | 35 | ||
| 29 | static void | 36 | static void |
| 30 | probe_sched_wakeup(void *ignore, struct task_struct *wakee) | 37 | probe_sched_wakeup(void *ignore, struct task_struct *wakee) |
| 31 | { | 38 | { |
| 32 | if (unlikely(!sched_ref)) | 39 | int flags; |
| 33 | return; | 40 | |
| 41 | flags = (RECORD_TGID * !!sched_tgid_ref) + | ||
| 42 | (RECORD_CMDLINE * !!sched_cmdline_ref); | ||
| 34 | 43 | ||
| 35 | tracing_record_cmdline(current); | 44 | if (!flags) |
| 45 | return; | ||
| 46 | tracing_record_taskinfo(current, flags); | ||
| 36 | } | 47 | } |
| 37 | 48 | ||
| 38 | static int tracing_sched_register(void) | 49 | static int tracing_sched_register(void) |
| @@ -75,28 +86,61 @@ static void tracing_sched_unregister(void) | |||
| 75 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); | 86 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); |
| 76 | } | 87 | } |
| 77 | 88 | ||
| 78 | static void tracing_start_sched_switch(void) | 89 | static void tracing_start_sched_switch(int ops) |
| 79 | { | 90 | { |
| 91 | bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref); | ||
| 80 | mutex_lock(&sched_register_mutex); | 92 | mutex_lock(&sched_register_mutex); |
| 81 | if (!(sched_ref++)) | 93 | |
| 94 | switch (ops) { | ||
| 95 | case RECORD_CMDLINE: | ||
| 96 | sched_cmdline_ref++; | ||
| 97 | break; | ||
| 98 | |||
| 99 | case RECORD_TGID: | ||
| 100 | sched_tgid_ref++; | ||
| 101 | break; | ||
| 102 | } | ||
| 103 | |||
| 104 | if (sched_register && (sched_cmdline_ref || sched_tgid_ref)) | ||
| 82 | tracing_sched_register(); | 105 | tracing_sched_register(); |
| 83 | mutex_unlock(&sched_register_mutex); | 106 | mutex_unlock(&sched_register_mutex); |
| 84 | } | 107 | } |
| 85 | 108 | ||
| 86 | static void tracing_stop_sched_switch(void) | 109 | static void tracing_stop_sched_switch(int ops) |
| 87 | { | 110 | { |
| 88 | mutex_lock(&sched_register_mutex); | 111 | mutex_lock(&sched_register_mutex); |
| 89 | if (!(--sched_ref)) | 112 | |
| 113 | switch (ops) { | ||
| 114 | case RECORD_CMDLINE: | ||
| 115 | sched_cmdline_ref--; | ||
| 116 | break; | ||
| 117 | |||
| 118 | case RECORD_TGID: | ||
| 119 | sched_tgid_ref--; | ||
| 120 | break; | ||
| 121 | } | ||
| 122 | |||
| 123 | if (!sched_cmdline_ref && !sched_tgid_ref) | ||
| 90 | tracing_sched_unregister(); | 124 | tracing_sched_unregister(); |
| 91 | mutex_unlock(&sched_register_mutex); | 125 | mutex_unlock(&sched_register_mutex); |
| 92 | } | 126 | } |
| 93 | 127 | ||
| 94 | void tracing_start_cmdline_record(void) | 128 | void tracing_start_cmdline_record(void) |
| 95 | { | 129 | { |
| 96 | tracing_start_sched_switch(); | 130 | tracing_start_sched_switch(RECORD_CMDLINE); |
| 97 | } | 131 | } |
| 98 | 132 | ||
| 99 | void tracing_stop_cmdline_record(void) | 133 | void tracing_stop_cmdline_record(void) |
| 100 | { | 134 | { |
| 101 | tracing_stop_sched_switch(); | 135 | tracing_stop_sched_switch(RECORD_CMDLINE); |
| 136 | } | ||
| 137 | |||
| 138 | void tracing_start_tgid_record(void) | ||
| 139 | { | ||
| 140 | tracing_start_sched_switch(RECORD_TGID); | ||
| 141 | } | ||
| 142 | |||
| 143 | void tracing_stop_tgid_record(void) | ||
| 144 | { | ||
| 145 | tracing_stop_sched_switch(RECORD_TGID); | ||
| 102 | } | 146 | } |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index b4a751e8f9d6..a4df67cbc711 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -406,6 +406,8 @@ static const struct file_operations stack_trace_fops = { | |||
| 406 | .release = seq_release, | 406 | .release = seq_release, |
| 407 | }; | 407 | }; |
| 408 | 408 | ||
| 409 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 410 | |||
| 409 | static int | 411 | static int |
| 410 | stack_trace_filter_open(struct inode *inode, struct file *file) | 412 | stack_trace_filter_open(struct inode *inode, struct file *file) |
| 411 | { | 413 | { |
| @@ -423,6 +425,8 @@ static const struct file_operations stack_trace_filter_fops = { | |||
| 423 | .release = ftrace_regex_release, | 425 | .release = ftrace_regex_release, |
| 424 | }; | 426 | }; |
| 425 | 427 | ||
| 428 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
| 429 | |||
| 426 | int | 430 | int |
| 427 | stack_trace_sysctl(struct ctl_table *table, int write, | 431 | stack_trace_sysctl(struct ctl_table *table, int write, |
| 428 | void __user *buffer, size_t *lenp, | 432 | void __user *buffer, size_t *lenp, |
| @@ -477,8 +481,10 @@ static __init int stack_trace_init(void) | |||
| 477 | trace_create_file("stack_trace", 0444, d_tracer, | 481 | trace_create_file("stack_trace", 0444, d_tracer, |
| 478 | NULL, &stack_trace_fops); | 482 | NULL, &stack_trace_fops); |
| 479 | 483 | ||
| 484 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 480 | trace_create_file("stack_trace_filter", 0444, d_tracer, | 485 | trace_create_file("stack_trace_filter", 0444, d_tracer, |
| 481 | &trace_ops, &stack_trace_filter_fops); | 486 | &trace_ops, &stack_trace_filter_fops); |
| 487 | #endif | ||
| 482 | 488 | ||
| 483 | if (stack_trace_filter_buf[0]) | 489 | if (stack_trace_filter_buf[0]) |
| 484 | ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); | 490 | ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); |
