diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 309 |
1 files changed, 211 insertions, 98 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0f0881676dc9..5c75deeefe30 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -169,10 +169,11 @@ static struct trace_array global_trace; | |||
| 169 | 169 | ||
| 170 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 170 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
| 171 | 171 | ||
| 172 | int filter_current_check_discard(struct ftrace_event_call *call, void *rec, | 172 | int filter_current_check_discard(struct ring_buffer *buffer, |
| 173 | struct ftrace_event_call *call, void *rec, | ||
| 173 | struct ring_buffer_event *event) | 174 | struct ring_buffer_event *event) |
| 174 | { | 175 | { |
| 175 | return filter_check_discard(call, rec, global_trace.buffer, event); | 176 | return filter_check_discard(call, rec, buffer, event); |
| 176 | } | 177 | } |
| 177 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 178 | EXPORT_SYMBOL_GPL(filter_current_check_discard); |
| 178 | 179 | ||
| @@ -263,6 +264,9 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
| 263 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 264 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
| 264 | TRACE_ITER_GRAPH_TIME; | 265 | TRACE_ITER_GRAPH_TIME; |
| 265 | 266 | ||
| 267 | static int trace_stop_count; | ||
| 268 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
| 269 | |||
| 266 | /** | 270 | /** |
| 267 | * trace_wake_up - wake up tasks waiting for trace input | 271 | * trace_wake_up - wake up tasks waiting for trace input |
| 268 | * | 272 | * |
| @@ -407,19 +411,22 @@ static void | |||
| 407 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 411 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
| 408 | { | 412 | { |
| 409 | struct trace_array_cpu *data = tr->data[cpu]; | 413 | struct trace_array_cpu *data = tr->data[cpu]; |
| 414 | struct trace_array_cpu *max_data = tr->data[cpu]; | ||
| 410 | 415 | ||
| 411 | max_tr.cpu = cpu; | 416 | max_tr.cpu = cpu; |
| 412 | max_tr.time_start = data->preempt_timestamp; | 417 | max_tr.time_start = data->preempt_timestamp; |
| 413 | 418 | ||
| 414 | data = max_tr.data[cpu]; | 419 | max_data = max_tr.data[cpu]; |
| 415 | data->saved_latency = tracing_max_latency; | 420 | max_data->saved_latency = tracing_max_latency; |
| 421 | max_data->critical_start = data->critical_start; | ||
| 422 | max_data->critical_end = data->critical_end; | ||
| 416 | 423 | ||
| 417 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 424 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); |
| 418 | data->pid = tsk->pid; | 425 | max_data->pid = tsk->pid; |
| 419 | data->uid = task_uid(tsk); | 426 | max_data->uid = task_uid(tsk); |
| 420 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 427 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
| 421 | data->policy = tsk->policy; | 428 | max_data->policy = tsk->policy; |
| 422 | data->rt_priority = tsk->rt_priority; | 429 | max_data->rt_priority = tsk->rt_priority; |
| 423 | 430 | ||
| 424 | /* record this tasks comm */ | 431 | /* record this tasks comm */ |
| 425 | tracing_record_cmdline(tsk); | 432 | tracing_record_cmdline(tsk); |
| @@ -439,16 +446,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 439 | { | 446 | { |
| 440 | struct ring_buffer *buf = tr->buffer; | 447 | struct ring_buffer *buf = tr->buffer; |
| 441 | 448 | ||
| 449 | if (trace_stop_count) | ||
| 450 | return; | ||
| 451 | |||
| 442 | WARN_ON_ONCE(!irqs_disabled()); | 452 | WARN_ON_ONCE(!irqs_disabled()); |
| 443 | __raw_spin_lock(&ftrace_max_lock); | 453 | __raw_spin_lock(&ftrace_max_lock); |
| 444 | 454 | ||
| 445 | tr->buffer = max_tr.buffer; | 455 | tr->buffer = max_tr.buffer; |
| 446 | max_tr.buffer = buf; | 456 | max_tr.buffer = buf; |
| 447 | 457 | ||
| 448 | ftrace_disable_cpu(); | ||
| 449 | ring_buffer_reset(tr->buffer); | ||
| 450 | ftrace_enable_cpu(); | ||
| 451 | |||
| 452 | __update_max_tr(tr, tsk, cpu); | 458 | __update_max_tr(tr, tsk, cpu); |
| 453 | __raw_spin_unlock(&ftrace_max_lock); | 459 | __raw_spin_unlock(&ftrace_max_lock); |
| 454 | } | 460 | } |
| @@ -466,17 +472,30 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 466 | { | 472 | { |
| 467 | int ret; | 473 | int ret; |
| 468 | 474 | ||
| 475 | if (trace_stop_count) | ||
| 476 | return; | ||
| 477 | |||
| 469 | WARN_ON_ONCE(!irqs_disabled()); | 478 | WARN_ON_ONCE(!irqs_disabled()); |
| 470 | __raw_spin_lock(&ftrace_max_lock); | 479 | __raw_spin_lock(&ftrace_max_lock); |
| 471 | 480 | ||
| 472 | ftrace_disable_cpu(); | 481 | ftrace_disable_cpu(); |
| 473 | 482 | ||
| 474 | ring_buffer_reset(max_tr.buffer); | ||
| 475 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 483 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); |
| 476 | 484 | ||
| 485 | if (ret == -EBUSY) { | ||
| 486 | /* | ||
| 487 | * We failed to swap the buffer due to a commit taking | ||
| 488 | * place on this CPU. We fail to record, but we reset | ||
| 489 | * the max trace buffer (no one writes directly to it) | ||
| 490 | * and flag that it failed. | ||
| 491 | */ | ||
| 492 | trace_array_printk(&max_tr, _THIS_IP_, | ||
| 493 | "Failed to swap buffers due to commit in progress\n"); | ||
| 494 | } | ||
| 495 | |||
| 477 | ftrace_enable_cpu(); | 496 | ftrace_enable_cpu(); |
| 478 | 497 | ||
| 479 | WARN_ON_ONCE(ret && ret != -EAGAIN); | 498 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
| 480 | 499 | ||
| 481 | __update_max_tr(tr, tsk, cpu); | 500 | __update_max_tr(tr, tsk, cpu); |
| 482 | __raw_spin_unlock(&ftrace_max_lock); | 501 | __raw_spin_unlock(&ftrace_max_lock); |
| @@ -538,7 +557,6 @@ __acquires(kernel_lock) | |||
| 538 | if (type->selftest && !tracing_selftest_disabled) { | 557 | if (type->selftest && !tracing_selftest_disabled) { |
| 539 | struct tracer *saved_tracer = current_trace; | 558 | struct tracer *saved_tracer = current_trace; |
| 540 | struct trace_array *tr = &global_trace; | 559 | struct trace_array *tr = &global_trace; |
| 541 | int i; | ||
| 542 | 560 | ||
| 543 | /* | 561 | /* |
| 544 | * Run a selftest on this tracer. | 562 | * Run a selftest on this tracer. |
| @@ -547,8 +565,7 @@ __acquires(kernel_lock) | |||
| 547 | * internal tracing to verify that everything is in order. | 565 | * internal tracing to verify that everything is in order. |
| 548 | * If we fail, we do not register this tracer. | 566 | * If we fail, we do not register this tracer. |
| 549 | */ | 567 | */ |
| 550 | for_each_tracing_cpu(i) | 568 | tracing_reset_online_cpus(tr); |
| 551 | tracing_reset(tr, i); | ||
| 552 | 569 | ||
| 553 | current_trace = type; | 570 | current_trace = type; |
| 554 | /* the test is responsible for initializing and enabling */ | 571 | /* the test is responsible for initializing and enabling */ |
| @@ -561,8 +578,7 @@ __acquires(kernel_lock) | |||
| 561 | goto out; | 578 | goto out; |
| 562 | } | 579 | } |
| 563 | /* Only reset on passing, to avoid touching corrupted buffers */ | 580 | /* Only reset on passing, to avoid touching corrupted buffers */ |
| 564 | for_each_tracing_cpu(i) | 581 | tracing_reset_online_cpus(tr); |
| 565 | tracing_reset(tr, i); | ||
| 566 | 582 | ||
| 567 | printk(KERN_CONT "PASSED\n"); | 583 | printk(KERN_CONT "PASSED\n"); |
| 568 | } | 584 | } |
| @@ -637,21 +653,42 @@ void unregister_tracer(struct tracer *type) | |||
| 637 | mutex_unlock(&trace_types_lock); | 653 | mutex_unlock(&trace_types_lock); |
| 638 | } | 654 | } |
| 639 | 655 | ||
| 640 | void tracing_reset(struct trace_array *tr, int cpu) | 656 | static void __tracing_reset(struct trace_array *tr, int cpu) |
| 641 | { | 657 | { |
| 642 | ftrace_disable_cpu(); | 658 | ftrace_disable_cpu(); |
| 643 | ring_buffer_reset_cpu(tr->buffer, cpu); | 659 | ring_buffer_reset_cpu(tr->buffer, cpu); |
| 644 | ftrace_enable_cpu(); | 660 | ftrace_enable_cpu(); |
| 645 | } | 661 | } |
| 646 | 662 | ||
| 663 | void tracing_reset(struct trace_array *tr, int cpu) | ||
| 664 | { | ||
| 665 | struct ring_buffer *buffer = tr->buffer; | ||
| 666 | |||
| 667 | ring_buffer_record_disable(buffer); | ||
| 668 | |||
| 669 | /* Make sure all commits have finished */ | ||
| 670 | synchronize_sched(); | ||
| 671 | __tracing_reset(tr, cpu); | ||
| 672 | |||
| 673 | ring_buffer_record_enable(buffer); | ||
| 674 | } | ||
| 675 | |||
| 647 | void tracing_reset_online_cpus(struct trace_array *tr) | 676 | void tracing_reset_online_cpus(struct trace_array *tr) |
| 648 | { | 677 | { |
| 678 | struct ring_buffer *buffer = tr->buffer; | ||
| 649 | int cpu; | 679 | int cpu; |
| 650 | 680 | ||
| 681 | ring_buffer_record_disable(buffer); | ||
| 682 | |||
| 683 | /* Make sure all commits have finished */ | ||
| 684 | synchronize_sched(); | ||
| 685 | |||
| 651 | tr->time_start = ftrace_now(tr->cpu); | 686 | tr->time_start = ftrace_now(tr->cpu); |
| 652 | 687 | ||
| 653 | for_each_online_cpu(cpu) | 688 | for_each_online_cpu(cpu) |
| 654 | tracing_reset(tr, cpu); | 689 | __tracing_reset(tr, cpu); |
| 690 | |||
| 691 | ring_buffer_record_enable(buffer); | ||
| 655 | } | 692 | } |
| 656 | 693 | ||
| 657 | void tracing_reset_current(int cpu) | 694 | void tracing_reset_current(int cpu) |
| @@ -682,9 +719,6 @@ static void trace_init_cmdlines(void) | |||
| 682 | cmdline_idx = 0; | 719 | cmdline_idx = 0; |
| 683 | } | 720 | } |
| 684 | 721 | ||
| 685 | static int trace_stop_count; | ||
| 686 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
| 687 | |||
| 688 | /** | 722 | /** |
| 689 | * ftrace_off_permanent - disable all ftrace code permanently | 723 | * ftrace_off_permanent - disable all ftrace code permanently |
| 690 | * | 724 | * |
| @@ -865,14 +899,15 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 865 | } | 899 | } |
| 866 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | 900 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
| 867 | 901 | ||
| 868 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 902 | struct ring_buffer_event * |
| 869 | int type, | 903 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
| 870 | unsigned long len, | 904 | int type, |
| 871 | unsigned long flags, int pc) | 905 | unsigned long len, |
| 906 | unsigned long flags, int pc) | ||
| 872 | { | 907 | { |
| 873 | struct ring_buffer_event *event; | 908 | struct ring_buffer_event *event; |
| 874 | 909 | ||
| 875 | event = ring_buffer_lock_reserve(tr->buffer, len); | 910 | event = ring_buffer_lock_reserve(buffer, len); |
| 876 | if (event != NULL) { | 911 | if (event != NULL) { |
| 877 | struct trace_entry *ent = ring_buffer_event_data(event); | 912 | struct trace_entry *ent = ring_buffer_event_data(event); |
| 878 | 913 | ||
| @@ -883,53 +918,59 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | |||
| 883 | return event; | 918 | return event; |
| 884 | } | 919 | } |
| 885 | 920 | ||
| 886 | static inline void __trace_buffer_unlock_commit(struct trace_array *tr, | 921 | static inline void |
| 887 | struct ring_buffer_event *event, | 922 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, |
| 888 | unsigned long flags, int pc, | 923 | struct ring_buffer_event *event, |
| 889 | int wake) | 924 | unsigned long flags, int pc, |
| 925 | int wake) | ||
| 890 | { | 926 | { |
| 891 | ring_buffer_unlock_commit(tr->buffer, event); | 927 | ring_buffer_unlock_commit(buffer, event); |
| 892 | 928 | ||
| 893 | ftrace_trace_stack(tr, flags, 6, pc); | 929 | ftrace_trace_stack(buffer, flags, 6, pc); |
| 894 | ftrace_trace_userstack(tr, flags, pc); | 930 | ftrace_trace_userstack(buffer, flags, pc); |
| 895 | 931 | ||
| 896 | if (wake) | 932 | if (wake) |
| 897 | trace_wake_up(); | 933 | trace_wake_up(); |
| 898 | } | 934 | } |
| 899 | 935 | ||
| 900 | void trace_buffer_unlock_commit(struct trace_array *tr, | 936 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
| 901 | struct ring_buffer_event *event, | 937 | struct ring_buffer_event *event, |
| 902 | unsigned long flags, int pc) | 938 | unsigned long flags, int pc) |
| 903 | { | 939 | { |
| 904 | __trace_buffer_unlock_commit(tr, event, flags, pc, 1); | 940 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
| 905 | } | 941 | } |
| 906 | 942 | ||
| 907 | struct ring_buffer_event * | 943 | struct ring_buffer_event * |
| 908 | trace_current_buffer_lock_reserve(int type, unsigned long len, | 944 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, |
| 945 | int type, unsigned long len, | ||
| 909 | unsigned long flags, int pc) | 946 | unsigned long flags, int pc) |
| 910 | { | 947 | { |
| 911 | return trace_buffer_lock_reserve(&global_trace, | 948 | *current_rb = global_trace.buffer; |
| 949 | return trace_buffer_lock_reserve(*current_rb, | ||
| 912 | type, len, flags, pc); | 950 | type, len, flags, pc); |
| 913 | } | 951 | } |
| 914 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | 952 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); |
| 915 | 953 | ||
| 916 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 954 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
| 955 | struct ring_buffer_event *event, | ||
| 917 | unsigned long flags, int pc) | 956 | unsigned long flags, int pc) |
| 918 | { | 957 | { |
| 919 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | 958 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
| 920 | } | 959 | } |
| 921 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 960 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); |
| 922 | 961 | ||
| 923 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 962 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
| 924 | unsigned long flags, int pc) | 963 | struct ring_buffer_event *event, |
| 964 | unsigned long flags, int pc) | ||
| 925 | { | 965 | { |
| 926 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | 966 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); |
| 927 | } | 967 | } |
| 928 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | 968 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); |
| 929 | 969 | ||
| 930 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event) | 970 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
| 971 | struct ring_buffer_event *event) | ||
| 931 | { | 972 | { |
| 932 | ring_buffer_discard_commit(global_trace.buffer, event); | 973 | ring_buffer_discard_commit(buffer, event); |
| 933 | } | 974 | } |
| 934 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | 975 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); |
| 935 | 976 | ||
| @@ -939,6 +980,7 @@ trace_function(struct trace_array *tr, | |||
| 939 | int pc) | 980 | int pc) |
| 940 | { | 981 | { |
| 941 | struct ftrace_event_call *call = &event_function; | 982 | struct ftrace_event_call *call = &event_function; |
| 983 | struct ring_buffer *buffer = tr->buffer; | ||
| 942 | struct ring_buffer_event *event; | 984 | struct ring_buffer_event *event; |
| 943 | struct ftrace_entry *entry; | 985 | struct ftrace_entry *entry; |
| 944 | 986 | ||
| @@ -946,7 +988,7 @@ trace_function(struct trace_array *tr, | |||
| 946 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 988 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
| 947 | return; | 989 | return; |
| 948 | 990 | ||
| 949 | event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), | 991 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
| 950 | flags, pc); | 992 | flags, pc); |
| 951 | if (!event) | 993 | if (!event) |
| 952 | return; | 994 | return; |
| @@ -954,8 +996,8 @@ trace_function(struct trace_array *tr, | |||
| 954 | entry->ip = ip; | 996 | entry->ip = ip; |
| 955 | entry->parent_ip = parent_ip; | 997 | entry->parent_ip = parent_ip; |
| 956 | 998 | ||
| 957 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 999 | if (!filter_check_discard(call, entry, buffer, event)) |
| 958 | ring_buffer_unlock_commit(tr->buffer, event); | 1000 | ring_buffer_unlock_commit(buffer, event); |
| 959 | } | 1001 | } |
| 960 | 1002 | ||
| 961 | void | 1003 | void |
| @@ -968,7 +1010,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
| 968 | } | 1010 | } |
| 969 | 1011 | ||
| 970 | #ifdef CONFIG_STACKTRACE | 1012 | #ifdef CONFIG_STACKTRACE |
| 971 | static void __ftrace_trace_stack(struct trace_array *tr, | 1013 | static void __ftrace_trace_stack(struct ring_buffer *buffer, |
| 972 | unsigned long flags, | 1014 | unsigned long flags, |
| 973 | int skip, int pc) | 1015 | int skip, int pc) |
| 974 | { | 1016 | { |
| @@ -977,7 +1019,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
| 977 | struct stack_entry *entry; | 1019 | struct stack_entry *entry; |
| 978 | struct stack_trace trace; | 1020 | struct stack_trace trace; |
| 979 | 1021 | ||
| 980 | event = trace_buffer_lock_reserve(tr, TRACE_STACK, | 1022 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, |
| 981 | sizeof(*entry), flags, pc); | 1023 | sizeof(*entry), flags, pc); |
| 982 | if (!event) | 1024 | if (!event) |
| 983 | return; | 1025 | return; |
| @@ -990,26 +1032,27 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
| 990 | trace.entries = entry->caller; | 1032 | trace.entries = entry->caller; |
| 991 | 1033 | ||
| 992 | save_stack_trace(&trace); | 1034 | save_stack_trace(&trace); |
| 993 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1035 | if (!filter_check_discard(call, entry, buffer, event)) |
| 994 | ring_buffer_unlock_commit(tr->buffer, event); | 1036 | ring_buffer_unlock_commit(buffer, event); |
| 995 | } | 1037 | } |
| 996 | 1038 | ||
| 997 | void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 1039 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
| 998 | int pc) | 1040 | int skip, int pc) |
| 999 | { | 1041 | { |
| 1000 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 1042 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) |
| 1001 | return; | 1043 | return; |
| 1002 | 1044 | ||
| 1003 | __ftrace_trace_stack(tr, flags, skip, pc); | 1045 | __ftrace_trace_stack(buffer, flags, skip, pc); |
| 1004 | } | 1046 | } |
| 1005 | 1047 | ||
| 1006 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 1048 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
| 1007 | int pc) | 1049 | int pc) |
| 1008 | { | 1050 | { |
| 1009 | __ftrace_trace_stack(tr, flags, skip, pc); | 1051 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
| 1010 | } | 1052 | } |
| 1011 | 1053 | ||
| 1012 | void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | 1054 | void |
| 1055 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | ||
| 1013 | { | 1056 | { |
| 1014 | struct ftrace_event_call *call = &event_user_stack; | 1057 | struct ftrace_event_call *call = &event_user_stack; |
| 1015 | struct ring_buffer_event *event; | 1058 | struct ring_buffer_event *event; |
| @@ -1019,7 +1062,7 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | |||
| 1019 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1062 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
| 1020 | return; | 1063 | return; |
| 1021 | 1064 | ||
| 1022 | event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, | 1065 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1023 | sizeof(*entry), flags, pc); | 1066 | sizeof(*entry), flags, pc); |
| 1024 | if (!event) | 1067 | if (!event) |
| 1025 | return; | 1068 | return; |
| @@ -1033,8 +1076,8 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | |||
| 1033 | trace.entries = entry->caller; | 1076 | trace.entries = entry->caller; |
| 1034 | 1077 | ||
| 1035 | save_stack_trace_user(&trace); | 1078 | save_stack_trace_user(&trace); |
| 1036 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1079 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1037 | ring_buffer_unlock_commit(tr->buffer, event); | 1080 | ring_buffer_unlock_commit(buffer, event); |
| 1038 | } | 1081 | } |
| 1039 | 1082 | ||
| 1040 | #ifdef UNUSED | 1083 | #ifdef UNUSED |
| @@ -1053,9 +1096,10 @@ ftrace_trace_special(void *__tr, | |||
| 1053 | { | 1096 | { |
| 1054 | struct ring_buffer_event *event; | 1097 | struct ring_buffer_event *event; |
| 1055 | struct trace_array *tr = __tr; | 1098 | struct trace_array *tr = __tr; |
| 1099 | struct ring_buffer *buffer = tr->buffer; | ||
| 1056 | struct special_entry *entry; | 1100 | struct special_entry *entry; |
| 1057 | 1101 | ||
| 1058 | event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, | 1102 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, |
| 1059 | sizeof(*entry), 0, pc); | 1103 | sizeof(*entry), 0, pc); |
| 1060 | if (!event) | 1104 | if (!event) |
| 1061 | return; | 1105 | return; |
| @@ -1063,7 +1107,7 @@ ftrace_trace_special(void *__tr, | |||
| 1063 | entry->arg1 = arg1; | 1107 | entry->arg1 = arg1; |
| 1064 | entry->arg2 = arg2; | 1108 | entry->arg2 = arg2; |
| 1065 | entry->arg3 = arg3; | 1109 | entry->arg3 = arg3; |
| 1066 | trace_buffer_unlock_commit(tr, event, 0, pc); | 1110 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
| 1067 | } | 1111 | } |
| 1068 | 1112 | ||
| 1069 | void | 1113 | void |
| @@ -1109,6 +1153,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1109 | 1153 | ||
| 1110 | struct ftrace_event_call *call = &event_bprint; | 1154 | struct ftrace_event_call *call = &event_bprint; |
| 1111 | struct ring_buffer_event *event; | 1155 | struct ring_buffer_event *event; |
| 1156 | struct ring_buffer *buffer; | ||
| 1112 | struct trace_array *tr = &global_trace; | 1157 | struct trace_array *tr = &global_trace; |
| 1113 | struct trace_array_cpu *data; | 1158 | struct trace_array_cpu *data; |
| 1114 | struct bprint_entry *entry; | 1159 | struct bprint_entry *entry; |
| @@ -1141,7 +1186,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1141 | goto out_unlock; | 1186 | goto out_unlock; |
| 1142 | 1187 | ||
| 1143 | size = sizeof(*entry) + sizeof(u32) * len; | 1188 | size = sizeof(*entry) + sizeof(u32) * len; |
| 1144 | event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); | 1189 | buffer = tr->buffer; |
| 1190 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | ||
| 1191 | flags, pc); | ||
| 1145 | if (!event) | 1192 | if (!event) |
| 1146 | goto out_unlock; | 1193 | goto out_unlock; |
| 1147 | entry = ring_buffer_event_data(event); | 1194 | entry = ring_buffer_event_data(event); |
| @@ -1149,8 +1196,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1149 | entry->fmt = fmt; | 1196 | entry->fmt = fmt; |
| 1150 | 1197 | ||
| 1151 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1198 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
| 1152 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1199 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1153 | ring_buffer_unlock_commit(tr->buffer, event); | 1200 | ring_buffer_unlock_commit(buffer, event); |
| 1154 | 1201 | ||
| 1155 | out_unlock: | 1202 | out_unlock: |
| 1156 | __raw_spin_unlock(&trace_buf_lock); | 1203 | __raw_spin_unlock(&trace_buf_lock); |
| @@ -1165,14 +1212,30 @@ out: | |||
| 1165 | } | 1212 | } |
| 1166 | EXPORT_SYMBOL_GPL(trace_vbprintk); | 1213 | EXPORT_SYMBOL_GPL(trace_vbprintk); |
| 1167 | 1214 | ||
| 1168 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1215 | int trace_array_printk(struct trace_array *tr, |
| 1216 | unsigned long ip, const char *fmt, ...) | ||
| 1217 | { | ||
| 1218 | int ret; | ||
| 1219 | va_list ap; | ||
| 1220 | |||
| 1221 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
| 1222 | return 0; | ||
| 1223 | |||
| 1224 | va_start(ap, fmt); | ||
| 1225 | ret = trace_array_vprintk(tr, ip, fmt, ap); | ||
| 1226 | va_end(ap); | ||
| 1227 | return ret; | ||
| 1228 | } | ||
| 1229 | |||
| 1230 | int trace_array_vprintk(struct trace_array *tr, | ||
| 1231 | unsigned long ip, const char *fmt, va_list args) | ||
| 1169 | { | 1232 | { |
| 1170 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1233 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; |
| 1171 | static char trace_buf[TRACE_BUF_SIZE]; | 1234 | static char trace_buf[TRACE_BUF_SIZE]; |
| 1172 | 1235 | ||
| 1173 | struct ftrace_event_call *call = &event_print; | 1236 | struct ftrace_event_call *call = &event_print; |
| 1174 | struct ring_buffer_event *event; | 1237 | struct ring_buffer_event *event; |
| 1175 | struct trace_array *tr = &global_trace; | 1238 | struct ring_buffer *buffer; |
| 1176 | struct trace_array_cpu *data; | 1239 | struct trace_array_cpu *data; |
| 1177 | int cpu, len = 0, size, pc; | 1240 | int cpu, len = 0, size, pc; |
| 1178 | struct print_entry *entry; | 1241 | struct print_entry *entry; |
| @@ -1200,7 +1263,9 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1200 | trace_buf[len] = 0; | 1263 | trace_buf[len] = 0; |
| 1201 | 1264 | ||
| 1202 | size = sizeof(*entry) + len + 1; | 1265 | size = sizeof(*entry) + len + 1; |
| 1203 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | 1266 | buffer = tr->buffer; |
| 1267 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | ||
| 1268 | irq_flags, pc); | ||
| 1204 | if (!event) | 1269 | if (!event) |
| 1205 | goto out_unlock; | 1270 | goto out_unlock; |
| 1206 | entry = ring_buffer_event_data(event); | 1271 | entry = ring_buffer_event_data(event); |
| @@ -1208,8 +1273,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1208 | 1273 | ||
| 1209 | memcpy(&entry->buf, trace_buf, len); | 1274 | memcpy(&entry->buf, trace_buf, len); |
| 1210 | entry->buf[len] = 0; | 1275 | entry->buf[len] = 0; |
| 1211 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1276 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1212 | ring_buffer_unlock_commit(tr->buffer, event); | 1277 | ring_buffer_unlock_commit(buffer, event); |
| 1213 | 1278 | ||
| 1214 | out_unlock: | 1279 | out_unlock: |
| 1215 | __raw_spin_unlock(&trace_buf_lock); | 1280 | __raw_spin_unlock(&trace_buf_lock); |
| @@ -1221,6 +1286,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1221 | 1286 | ||
| 1222 | return len; | 1287 | return len; |
| 1223 | } | 1288 | } |
| 1289 | |||
| 1290 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | ||
| 1291 | { | ||
| 1292 | return trace_array_printk(&global_trace, ip, fmt, args); | ||
| 1293 | } | ||
| 1224 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1294 | EXPORT_SYMBOL_GPL(trace_vprintk); |
| 1225 | 1295 | ||
| 1226 | enum trace_file_type { | 1296 | enum trace_file_type { |
| @@ -1360,6 +1430,37 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1360 | return ent; | 1430 | return ent; |
| 1361 | } | 1431 | } |
| 1362 | 1432 | ||
| 1433 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | ||
| 1434 | { | ||
| 1435 | struct trace_array *tr = iter->tr; | ||
| 1436 | struct ring_buffer_event *event; | ||
| 1437 | struct ring_buffer_iter *buf_iter; | ||
| 1438 | unsigned long entries = 0; | ||
| 1439 | u64 ts; | ||
| 1440 | |||
| 1441 | tr->data[cpu]->skipped_entries = 0; | ||
| 1442 | |||
| 1443 | if (!iter->buffer_iter[cpu]) | ||
| 1444 | return; | ||
| 1445 | |||
| 1446 | buf_iter = iter->buffer_iter[cpu]; | ||
| 1447 | ring_buffer_iter_reset(buf_iter); | ||
| 1448 | |||
| 1449 | /* | ||
| 1450 | * We could have the case with the max latency tracers | ||
| 1451 | * that a reset never took place on a cpu. This is evident | ||
| 1452 | * by the timestamp being before the start of the buffer. | ||
| 1453 | */ | ||
| 1454 | while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { | ||
| 1455 | if (ts >= iter->tr->time_start) | ||
| 1456 | break; | ||
| 1457 | entries++; | ||
| 1458 | ring_buffer_read(buf_iter, NULL); | ||
| 1459 | } | ||
| 1460 | |||
| 1461 | tr->data[cpu]->skipped_entries = entries; | ||
| 1462 | } | ||
| 1463 | |||
| 1363 | /* | 1464 | /* |
| 1364 | * No necessary locking here. The worst thing which can | 1465 | * No necessary locking here. The worst thing which can |
| 1365 | * happen is loosing events consumed at the same time | 1466 | * happen is loosing events consumed at the same time |
| @@ -1398,10 +1499,9 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1398 | 1499 | ||
| 1399 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1500 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
| 1400 | for_each_tracing_cpu(cpu) | 1501 | for_each_tracing_cpu(cpu) |
| 1401 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | 1502 | tracing_iter_reset(iter, cpu); |
| 1402 | } else | 1503 | } else |
| 1403 | ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); | 1504 | tracing_iter_reset(iter, cpu_file); |
| 1404 | |||
| 1405 | 1505 | ||
| 1406 | ftrace_enable_cpu(); | 1506 | ftrace_enable_cpu(); |
| 1407 | 1507 | ||
| @@ -1450,16 +1550,32 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
| 1450 | struct trace_array *tr = iter->tr; | 1550 | struct trace_array *tr = iter->tr; |
| 1451 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 1551 | struct trace_array_cpu *data = tr->data[tr->cpu]; |
| 1452 | struct tracer *type = current_trace; | 1552 | struct tracer *type = current_trace; |
| 1453 | unsigned long total; | 1553 | unsigned long entries = 0; |
| 1454 | unsigned long entries; | 1554 | unsigned long total = 0; |
| 1555 | unsigned long count; | ||
| 1455 | const char *name = "preemption"; | 1556 | const char *name = "preemption"; |
| 1557 | int cpu; | ||
| 1456 | 1558 | ||
| 1457 | if (type) | 1559 | if (type) |
| 1458 | name = type->name; | 1560 | name = type->name; |
| 1459 | 1561 | ||
| 1460 | entries = ring_buffer_entries(iter->tr->buffer); | 1562 | |
| 1461 | total = entries + | 1563 | for_each_tracing_cpu(cpu) { |
| 1462 | ring_buffer_overruns(iter->tr->buffer); | 1564 | count = ring_buffer_entries_cpu(tr->buffer, cpu); |
| 1565 | /* | ||
| 1566 | * If this buffer has skipped entries, then we hold all | ||
| 1567 | * entries for the trace and we need to ignore the | ||
| 1568 | * ones before the time stamp. | ||
| 1569 | */ | ||
| 1570 | if (tr->data[cpu]->skipped_entries) { | ||
| 1571 | count -= tr->data[cpu]->skipped_entries; | ||
| 1572 | /* total is the same as the entries */ | ||
| 1573 | total += count; | ||
| 1574 | } else | ||
| 1575 | total += count + | ||
| 1576 | ring_buffer_overrun_cpu(tr->buffer, cpu); | ||
| 1577 | entries += count; | ||
| 1578 | } | ||
| 1463 | 1579 | ||
| 1464 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 1580 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", |
| 1465 | name, UTS_RELEASE); | 1581 | name, UTS_RELEASE); |
| @@ -1501,7 +1617,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
| 1501 | seq_puts(m, "\n# => ended at: "); | 1617 | seq_puts(m, "\n# => ended at: "); |
| 1502 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 1618 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); |
| 1503 | trace_print_seq(m, &iter->seq); | 1619 | trace_print_seq(m, &iter->seq); |
| 1504 | seq_puts(m, "#\n"); | 1620 | seq_puts(m, "\n#\n"); |
| 1505 | } | 1621 | } |
| 1506 | 1622 | ||
| 1507 | seq_puts(m, "#\n"); | 1623 | seq_puts(m, "#\n"); |
| @@ -1520,6 +1636,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
| 1520 | if (cpumask_test_cpu(iter->cpu, iter->started)) | 1636 | if (cpumask_test_cpu(iter->cpu, iter->started)) |
| 1521 | return; | 1637 | return; |
| 1522 | 1638 | ||
| 1639 | if (iter->tr->data[iter->cpu]->skipped_entries) | ||
| 1640 | return; | ||
| 1641 | |||
| 1523 | cpumask_set_cpu(iter->cpu, iter->started); | 1642 | cpumask_set_cpu(iter->cpu, iter->started); |
| 1524 | 1643 | ||
| 1525 | /* Don't print started cpu buffer for the first entry of the trace */ | 1644 | /* Don't print started cpu buffer for the first entry of the trace */ |
| @@ -1782,19 +1901,23 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 1782 | if (ring_buffer_overruns(iter->tr->buffer)) | 1901 | if (ring_buffer_overruns(iter->tr->buffer)) |
| 1783 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 1902 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
| 1784 | 1903 | ||
| 1904 | /* stop the trace while dumping */ | ||
| 1905 | tracing_stop(); | ||
| 1906 | |||
| 1785 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 1907 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
| 1786 | for_each_tracing_cpu(cpu) { | 1908 | for_each_tracing_cpu(cpu) { |
| 1787 | 1909 | ||
| 1788 | iter->buffer_iter[cpu] = | 1910 | iter->buffer_iter[cpu] = |
| 1789 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1911 | ring_buffer_read_start(iter->tr->buffer, cpu); |
| 1912 | tracing_iter_reset(iter, cpu); | ||
| 1790 | } | 1913 | } |
| 1791 | } else { | 1914 | } else { |
| 1792 | cpu = iter->cpu_file; | 1915 | cpu = iter->cpu_file; |
| 1793 | iter->buffer_iter[cpu] = | 1916 | iter->buffer_iter[cpu] = |
| 1794 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1917 | ring_buffer_read_start(iter->tr->buffer, cpu); |
| 1918 | tracing_iter_reset(iter, cpu); | ||
| 1795 | } | 1919 | } |
| 1796 | 1920 | ||
| 1797 | /* TODO stop tracer */ | ||
| 1798 | ret = seq_open(file, &tracer_seq_ops); | 1921 | ret = seq_open(file, &tracer_seq_ops); |
| 1799 | if (ret < 0) { | 1922 | if (ret < 0) { |
| 1800 | fail_ret = ERR_PTR(ret); | 1923 | fail_ret = ERR_PTR(ret); |
| @@ -1804,9 +1927,6 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 1804 | m = file->private_data; | 1927 | m = file->private_data; |
| 1805 | m->private = iter; | 1928 | m->private = iter; |
| 1806 | 1929 | ||
| 1807 | /* stop the trace while dumping */ | ||
| 1808 | tracing_stop(); | ||
| 1809 | |||
| 1810 | mutex_unlock(&trace_types_lock); | 1930 | mutex_unlock(&trace_types_lock); |
| 1811 | 1931 | ||
| 1812 | return iter; | 1932 | return iter; |
| @@ -1817,6 +1937,7 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 1817 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 1937 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
| 1818 | } | 1938 | } |
| 1819 | free_cpumask_var(iter->started); | 1939 | free_cpumask_var(iter->started); |
| 1940 | tracing_start(); | ||
| 1820 | fail: | 1941 | fail: |
| 1821 | mutex_unlock(&trace_types_lock); | 1942 | mutex_unlock(&trace_types_lock); |
| 1822 | kfree(iter->trace); | 1943 | kfree(iter->trace); |
| @@ -3780,17 +3901,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 3780 | if (ret < 0) | 3901 | if (ret < 0) |
| 3781 | return ret; | 3902 | return ret; |
| 3782 | 3903 | ||
| 3783 | switch (val) { | 3904 | if (val != 0 && val != 1) |
| 3784 | case 0: | ||
| 3785 | trace_flags &= ~(1 << index); | ||
| 3786 | break; | ||
| 3787 | case 1: | ||
| 3788 | trace_flags |= 1 << index; | ||
| 3789 | break; | ||
| 3790 | |||
| 3791 | default: | ||
| 3792 | return -EINVAL; | 3905 | return -EINVAL; |
| 3793 | } | 3906 | set_tracer_flags(1 << index, val); |
| 3794 | 3907 | ||
| 3795 | *ppos += cnt; | 3908 | *ppos += cnt; |
| 3796 | 3909 | ||
