diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-09-11 18:29:17 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-09-11 19:39:19 -0400 |
commit | 9ec3f4e437ede2f3b5087d412abe16a0219b3b99 (patch) | |
tree | 80cdb3f7c1d53fc6209fac2d57ff9a4052e2750e /tools/perf/builtin-sched.c | |
parent | 2b7fcbc5a9c719a306af1c4986a9f5c2cbfcec65 (diff) |
perf sched: Don't read all tracepoint variables in advance
Do it just at the actual consumer of these fields, that way we avoid
needless lookups:
[root@sandy ~]# perf sched record sleep 30s
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 8.585 MB perf.data (~375063 samples) ]
Before:
[root@sandy ~]# perf stat -r 10 perf sched lat > /dev/null
Performance counter stats for 'perf sched lat' (10 runs):
103.592215 task-clock # 0.993 CPUs utilized ( +- 0.33% )
12 context-switches # 0.114 K/sec ( +- 3.29% )
0 cpu-migrations # 0.000 K/sec
7,605 page-faults # 0.073 M/sec ( +- 0.00% )
345,796,112 cycles # 3.338 GHz ( +- 0.07% ) [82.90%]
106,876,796 stalled-cycles-frontend # 30.91% frontend cycles idle ( +- 0.38% ) [83.23%]
62,060,877 stalled-cycles-backend # 17.95% backend cycles idle ( +- 0.80% ) [67.14%]
628,246,586 instructions # 1.82 insns per cycle
# 0.17 stalled cycles per insn ( +- 0.04% ) [83.64%]
134,962,057 branches # 1302.820 M/sec ( +- 0.10% ) [83.64%]
1,233,037 branch-misses # 0.91% of all branches ( +- 0.29% ) [83.41%]
0.104333272 seconds time elapsed ( +- 0.33% )
[root@sandy ~]# perf stat -r 10 perf sched lat > /dev/null
Performance counter stats for 'perf sched lat' (10 runs):
98.848272 task-clock # 0.993 CPUs utilized ( +- 0.48% )
11 context-switches # 0.112 K/sec ( +- 2.83% )
0 cpu-migrations # 0.003 K/sec ( +- 50.92% )
7,604 page-faults # 0.077 M/sec ( +- 0.00% )
332,216,085 cycles # 3.361 GHz ( +- 0.14% ) [82.87%]
100,623,710 stalled-cycles-frontend # 30.29% frontend cycles idle ( +- 0.53% ) [82.95%]
58,788,692 stalled-cycles-backend # 17.70% backend cycles idle ( +- 0.59% ) [67.15%]
609,402,433 instructions # 1.83 insns per cycle
# 0.17 stalled cycles per insn ( +- 0.04% ) [83.76%]
131,277,138 branches # 1328.067 M/sec ( +- 0.06% ) [83.77%]
1,117,871 branch-misses # 0.85% of all branches ( +- 0.32% ) [83.51%]
0.099580430 seconds time elapsed ( +- 0.48% )
[root@sandy ~]#
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-kracdpw8wqlr0xjh75uk8g11@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 277 |
1 files changed, 97 insertions, 180 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 0df5e7a08c63..af305f57bd22 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -97,73 +97,25 @@ struct work_atoms { | |||
97 | 97 | ||
98 | typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); | 98 | typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); |
99 | 99 | ||
100 | struct trace_switch_event { | 100 | struct perf_sched; |
101 | char *prev_comm; | ||
102 | u32 prev_pid; | ||
103 | u32 prev_prio; | ||
104 | u64 prev_state; | ||
105 | char *next_comm; | ||
106 | u32 next_pid; | ||
107 | u32 next_prio; | ||
108 | }; | ||
109 | |||
110 | struct trace_runtime_event { | ||
111 | char *comm; | ||
112 | u32 pid; | ||
113 | u64 runtime; | ||
114 | u64 vruntime; | ||
115 | }; | ||
116 | 101 | ||
117 | struct trace_wakeup_event { | 102 | struct trace_sched_handler { |
118 | char *comm; | 103 | int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
119 | u32 pid; | 104 | struct perf_sample *sample, struct machine *machine); |
120 | u32 prio; | ||
121 | u32 success; | ||
122 | u32 cpu; | ||
123 | }; | ||
124 | 105 | ||
125 | struct trace_fork_event { | 106 | int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
126 | char *parent_comm; | 107 | struct perf_sample *sample, struct machine *machine); |
127 | u32 parent_pid; | ||
128 | char *child_comm; | ||
129 | u32 child_pid; | ||
130 | }; | ||
131 | 108 | ||
132 | struct trace_migrate_task_event { | 109 | int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
133 | char *comm; | 110 | struct perf_sample *sample, struct machine *machine); |
134 | u32 pid; | ||
135 | u32 prio; | ||
136 | u32 cpu; | ||
137 | }; | ||
138 | 111 | ||
139 | struct perf_sched; | 112 | int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
140 | 113 | struct perf_sample *sample); | |
141 | struct trace_sched_handler { | ||
142 | int (*switch_event)(struct perf_sched *sched, | ||
143 | struct trace_switch_event *event, | ||
144 | struct machine *machine, | ||
145 | struct perf_evsel *evsel, | ||
146 | struct perf_sample *sample); | ||
147 | |||
148 | int (*runtime_event)(struct perf_sched *sched, | ||
149 | struct trace_runtime_event *event, | ||
150 | struct machine *machine, | ||
151 | struct perf_sample *sample); | ||
152 | |||
153 | int (*wakeup_event)(struct perf_sched *sched, | ||
154 | struct trace_wakeup_event *event, | ||
155 | struct machine *machine, | ||
156 | struct perf_evsel *evsel, | ||
157 | struct perf_sample *sample); | ||
158 | |||
159 | int (*fork_event)(struct perf_sched *sched, | ||
160 | struct trace_fork_event *event, | ||
161 | struct perf_evsel *evsel); | ||
162 | 114 | ||
163 | int (*migrate_task_event)(struct perf_sched *sched, | 115 | int (*migrate_task_event)(struct perf_sched *sched, |
164 | struct trace_migrate_task_event *event, | 116 | struct perf_evsel *evsel, |
165 | struct machine *machine, | 117 | struct perf_sample *sample, |
166 | struct perf_sample *sample); | 118 | struct machine *machine); |
167 | }; | 119 | }; |
168 | 120 | ||
169 | struct perf_sched { | 121 | struct perf_sched { |
@@ -700,33 +652,36 @@ static void test_calibrations(struct perf_sched *sched) | |||
700 | 652 | ||
701 | static int | 653 | static int |
702 | replay_wakeup_event(struct perf_sched *sched, | 654 | replay_wakeup_event(struct perf_sched *sched, |
703 | struct trace_wakeup_event *wakeup_event, | 655 | struct perf_evsel *evsel, struct perf_sample *sample, |
704 | struct machine *machine __maybe_unused, | 656 | struct machine *machine __maybe_unused) |
705 | struct perf_evsel *evsel, struct perf_sample *sample) | ||
706 | { | 657 | { |
658 | const char *comm = perf_evsel__strval(evsel, sample, "comm"); | ||
659 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); | ||
707 | struct task_desc *waker, *wakee; | 660 | struct task_desc *waker, *wakee; |
708 | 661 | ||
709 | if (verbose) { | 662 | if (verbose) { |
710 | printf("sched_wakeup event %p\n", evsel); | 663 | printf("sched_wakeup event %p\n", evsel); |
711 | 664 | ||
712 | printf(" ... pid %d woke up %s/%d\n", | 665 | printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); |
713 | sample->tid, wakeup_event->comm, wakeup_event->pid); | ||
714 | } | 666 | } |
715 | 667 | ||
716 | waker = register_pid(sched, sample->tid, "<unknown>"); | 668 | waker = register_pid(sched, sample->tid, "<unknown>"); |
717 | wakee = register_pid(sched, wakeup_event->pid, wakeup_event->comm); | 669 | wakee = register_pid(sched, pid, comm); |
718 | 670 | ||
719 | add_sched_event_wakeup(sched, waker, sample->time, wakee); | 671 | add_sched_event_wakeup(sched, waker, sample->time, wakee); |
720 | return 0; | 672 | return 0; |
721 | } | 673 | } |
722 | 674 | ||
723 | static int | 675 | static int replay_switch_event(struct perf_sched *sched, |
724 | replay_switch_event(struct perf_sched *sched, | 676 | struct perf_evsel *evsel, |
725 | struct trace_switch_event *switch_event, | 677 | struct perf_sample *sample, |
726 | struct machine *machine __maybe_unused, | 678 | struct machine *machine __maybe_unused) |
727 | struct perf_evsel *evsel, | ||
728 | struct perf_sample *sample) | ||
729 | { | 679 | { |
680 | const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), | ||
681 | *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); | ||
682 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), | ||
683 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
684 | const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); | ||
730 | struct task_desc *prev, __maybe_unused *next; | 685 | struct task_desc *prev, __maybe_unused *next; |
731 | u64 timestamp0, timestamp = sample->time; | 686 | u64 timestamp0, timestamp = sample->time; |
732 | int cpu = sample->cpu; | 687 | int cpu = sample->cpu; |
@@ -749,35 +704,36 @@ replay_switch_event(struct perf_sched *sched, | |||
749 | return -1; | 704 | return -1; |
750 | } | 705 | } |
751 | 706 | ||
752 | if (verbose) { | 707 | pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", |
753 | printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", | 708 | prev_comm, prev_pid, next_comm, next_pid, delta); |
754 | switch_event->prev_comm, switch_event->prev_pid, | ||
755 | switch_event->next_comm, switch_event->next_pid, | ||
756 | delta); | ||
757 | } | ||
758 | 709 | ||
759 | prev = register_pid(sched, switch_event->prev_pid, switch_event->prev_comm); | 710 | prev = register_pid(sched, prev_pid, prev_comm); |
760 | next = register_pid(sched, switch_event->next_pid, switch_event->next_comm); | 711 | next = register_pid(sched, next_pid, next_comm); |
761 | 712 | ||
762 | sched->cpu_last_switched[cpu] = timestamp; | 713 | sched->cpu_last_switched[cpu] = timestamp; |
763 | 714 | ||
764 | add_sched_event_run(sched, prev, timestamp, delta); | 715 | add_sched_event_run(sched, prev, timestamp, delta); |
765 | add_sched_event_sleep(sched, prev, timestamp, switch_event->prev_state); | 716 | add_sched_event_sleep(sched, prev, timestamp, prev_state); |
766 | 717 | ||
767 | return 0; | 718 | return 0; |
768 | } | 719 | } |
769 | 720 | ||
770 | static int | 721 | static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel, |
771 | replay_fork_event(struct perf_sched *sched, struct trace_fork_event *fork_event, | 722 | struct perf_sample *sample) |
772 | struct perf_evsel *evsel) | ||
773 | { | 723 | { |
724 | const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), | ||
725 | *child_comm = perf_evsel__strval(evsel, sample, "child_comm"); | ||
726 | const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), | ||
727 | child_pid = perf_evsel__intval(evsel, sample, "child_pid"); | ||
728 | |||
774 | if (verbose) { | 729 | if (verbose) { |
775 | printf("sched_fork event %p\n", evsel); | 730 | printf("sched_fork event %p\n", evsel); |
776 | printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); | 731 | printf("... parent: %s/%d\n", parent_comm, parent_pid); |
777 | printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); | 732 | printf("... child: %s/%d\n", child_comm, child_pid); |
778 | } | 733 | } |
779 | register_pid(sched, fork_event->parent_pid, fork_event->parent_comm); | 734 | |
780 | register_pid(sched, fork_event->child_pid, fork_event->child_comm); | 735 | register_pid(sched, parent_pid, parent_comm); |
736 | register_pid(sched, child_pid, child_comm); | ||
781 | return 0; | 737 | return 0; |
782 | } | 738 | } |
783 | 739 | ||
@@ -870,18 +826,18 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) | |||
870 | } | 826 | } |
871 | 827 | ||
872 | static int latency_fork_event(struct perf_sched *sched __maybe_unused, | 828 | static int latency_fork_event(struct perf_sched *sched __maybe_unused, |
873 | struct trace_fork_event *fork_event __maybe_unused, | 829 | struct perf_evsel *evsel __maybe_unused, |
874 | struct perf_evsel *evsel __maybe_unused) | 830 | struct perf_sample *sample __maybe_unused) |
875 | { | 831 | { |
876 | /* should insert the newcomer */ | 832 | /* should insert the newcomer */ |
877 | return 0; | 833 | return 0; |
878 | } | 834 | } |
879 | 835 | ||
880 | static char sched_out_state(struct trace_switch_event *switch_event) | 836 | static char sched_out_state(u64 prev_state) |
881 | { | 837 | { |
882 | const char *str = TASK_STATE_TO_CHAR_STR; | 838 | const char *str = TASK_STATE_TO_CHAR_STR; |
883 | 839 | ||
884 | return str[switch_event->prev_state]; | 840 | return str[prev_state]; |
885 | } | 841 | } |
886 | 842 | ||
887 | static int | 843 | static int |
@@ -951,13 +907,14 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) | |||
951 | atoms->nb_atoms++; | 907 | atoms->nb_atoms++; |
952 | } | 908 | } |
953 | 909 | ||
954 | static int | 910 | static int latency_switch_event(struct perf_sched *sched, |
955 | latency_switch_event(struct perf_sched *sched, | 911 | struct perf_evsel *evsel, |
956 | struct trace_switch_event *switch_event, | 912 | struct perf_sample *sample, |
957 | struct machine *machine, | 913 | struct machine *machine) |
958 | struct perf_evsel *evsel __maybe_unused, | ||
959 | struct perf_sample *sample) | ||
960 | { | 914 | { |
915 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), | ||
916 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
917 | const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); | ||
961 | struct work_atoms *out_events, *in_events; | 918 | struct work_atoms *out_events, *in_events; |
962 | struct thread *sched_out, *sched_in; | 919 | struct thread *sched_out, *sched_in; |
963 | u64 timestamp0, timestamp = sample->time; | 920 | u64 timestamp0, timestamp = sample->time; |
@@ -978,8 +935,8 @@ latency_switch_event(struct perf_sched *sched, | |||
978 | return -1; | 935 | return -1; |
979 | } | 936 | } |
980 | 937 | ||
981 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); | 938 | sched_out = machine__findnew_thread(machine, prev_pid); |
982 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); | 939 | sched_in = machine__findnew_thread(machine, next_pid); |
983 | 940 | ||
984 | out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); | 941 | out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); |
985 | if (!out_events) { | 942 | if (!out_events) { |
@@ -991,7 +948,7 @@ latency_switch_event(struct perf_sched *sched, | |||
991 | return -1; | 948 | return -1; |
992 | } | 949 | } |
993 | } | 950 | } |
994 | if (add_sched_out_event(out_events, sched_out_state(switch_event), timestamp)) | 951 | if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) |
995 | return -1; | 952 | return -1; |
996 | 953 | ||
997 | in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); | 954 | in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); |
@@ -1015,12 +972,14 @@ latency_switch_event(struct perf_sched *sched, | |||
1015 | return 0; | 972 | return 0; |
1016 | } | 973 | } |
1017 | 974 | ||
1018 | static int | 975 | static int latency_runtime_event(struct perf_sched *sched, |
1019 | latency_runtime_event(struct perf_sched *sched, | 976 | struct perf_evsel *evsel, |
1020 | struct trace_runtime_event *runtime_event, | 977 | struct perf_sample *sample, |
1021 | struct machine *machine, struct perf_sample *sample) | 978 | struct machine *machine) |
1022 | { | 979 | { |
1023 | struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); | 980 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); |
981 | const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); | ||
982 | struct thread *thread = machine__findnew_thread(machine, pid); | ||
1024 | struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); | 983 | struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); |
1025 | u64 timestamp = sample->time; | 984 | u64 timestamp = sample->time; |
1026 | int cpu = sample->cpu; | 985 | int cpu = sample->cpu; |
@@ -1038,27 +997,27 @@ latency_runtime_event(struct perf_sched *sched, | |||
1038 | return -1; | 997 | return -1; |
1039 | } | 998 | } |
1040 | 999 | ||
1041 | add_runtime_event(atoms, runtime_event->runtime, timestamp); | 1000 | add_runtime_event(atoms, runtime, timestamp); |
1042 | return 0; | 1001 | return 0; |
1043 | } | 1002 | } |
1044 | 1003 | ||
1045 | static int | 1004 | static int latency_wakeup_event(struct perf_sched *sched, |
1046 | latency_wakeup_event(struct perf_sched *sched, | 1005 | struct perf_evsel *evsel, |
1047 | struct trace_wakeup_event *wakeup_event, | 1006 | struct perf_sample *sample, |
1048 | struct machine *machine, | 1007 | struct machine *machine) |
1049 | struct perf_evsel *evsel __maybe_unused, | ||
1050 | struct perf_sample *sample) | ||
1051 | { | 1008 | { |
1009 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"), | ||
1010 | success = perf_evsel__intval(evsel, sample, "success"); | ||
1052 | struct work_atoms *atoms; | 1011 | struct work_atoms *atoms; |
1053 | struct work_atom *atom; | 1012 | struct work_atom *atom; |
1054 | struct thread *wakee; | 1013 | struct thread *wakee; |
1055 | u64 timestamp = sample->time; | 1014 | u64 timestamp = sample->time; |
1056 | 1015 | ||
1057 | /* Note for later, it may be interesting to observe the failing cases */ | 1016 | /* Note for later, it may be interesting to observe the failing cases */ |
1058 | if (!wakeup_event->success) | 1017 | if (!success) |
1059 | return 0; | 1018 | return 0; |
1060 | 1019 | ||
1061 | wakee = machine__findnew_thread(machine, wakeup_event->pid); | 1020 | wakee = machine__findnew_thread(machine, pid); |
1062 | atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); | 1021 | atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); |
1063 | if (!atoms) { | 1022 | if (!atoms) { |
1064 | if (thread_atoms_insert(sched, wakee)) | 1023 | if (thread_atoms_insert(sched, wakee)) |
@@ -1095,11 +1054,12 @@ latency_wakeup_event(struct perf_sched *sched, | |||
1095 | return 0; | 1054 | return 0; |
1096 | } | 1055 | } |
1097 | 1056 | ||
1098 | static int | 1057 | static int latency_migrate_task_event(struct perf_sched *sched, |
1099 | latency_migrate_task_event(struct perf_sched *sched, | 1058 | struct perf_evsel *evsel, |
1100 | struct trace_migrate_task_event *migrate_task_event, | 1059 | struct perf_sample *sample, |
1101 | struct machine *machine, struct perf_sample *sample) | 1060 | struct machine *machine) |
1102 | { | 1061 | { |
1062 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); | ||
1103 | u64 timestamp = sample->time; | 1063 | u64 timestamp = sample->time; |
1104 | struct work_atoms *atoms; | 1064 | struct work_atoms *atoms; |
1105 | struct work_atom *atom; | 1065 | struct work_atom *atom; |
@@ -1111,7 +1071,7 @@ latency_migrate_task_event(struct perf_sched *sched, | |||
1111 | if (sched->profile_cpu == -1) | 1071 | if (sched->profile_cpu == -1) |
1112 | return 0; | 1072 | return 0; |
1113 | 1073 | ||
1114 | migrant = machine__findnew_thread(machine, migrate_task_event->pid); | 1074 | migrant = machine__findnew_thread(machine, pid); |
1115 | atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); | 1075 | atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); |
1116 | if (!atoms) { | 1076 | if (!atoms) { |
1117 | if (thread_atoms_insert(sched, migrant)) | 1077 | if (thread_atoms_insert(sched, migrant)) |
@@ -1296,28 +1256,17 @@ static int process_sched_wakeup_event(struct perf_tool *tool, | |||
1296 | { | 1256 | { |
1297 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); | 1257 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1298 | 1258 | ||
1299 | if (sched->tp_handler->wakeup_event) { | 1259 | if (sched->tp_handler->wakeup_event) |
1300 | struct trace_wakeup_event event = { | 1260 | return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); |
1301 | .comm = perf_evsel__strval(evsel, sample, "comm"), | ||
1302 | .pid = perf_evsel__intval(evsel, sample, "pid"), | ||
1303 | .prio = perf_evsel__intval(evsel, sample, "prio"), | ||
1304 | .success = perf_evsel__intval(evsel, sample, "success"), | ||
1305 | .cpu = perf_evsel__intval(evsel, sample, "cpu"), | ||
1306 | }; | ||
1307 | |||
1308 | return sched->tp_handler->wakeup_event(sched, &event, machine, evsel, sample); | ||
1309 | } | ||
1310 | 1261 | ||
1311 | return 0; | 1262 | return 0; |
1312 | } | 1263 | } |
1313 | 1264 | ||
1314 | static int | 1265 | static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, |
1315 | map_switch_event(struct perf_sched *sched, | 1266 | struct perf_sample *sample, struct machine *machine) |
1316 | struct trace_switch_event *switch_event, | ||
1317 | struct machine *machine, | ||
1318 | struct perf_evsel *evsel __maybe_unused, | ||
1319 | struct perf_sample *sample) | ||
1320 | { | 1267 | { |
1268 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), | ||
1269 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
1321 | struct thread *sched_out __maybe_unused, *sched_in; | 1270 | struct thread *sched_out __maybe_unused, *sched_in; |
1322 | int new_shortname; | 1271 | int new_shortname; |
1323 | u64 timestamp0, timestamp = sample->time; | 1272 | u64 timestamp0, timestamp = sample->time; |
@@ -1341,8 +1290,8 @@ map_switch_event(struct perf_sched *sched, | |||
1341 | return -1; | 1290 | return -1; |
1342 | } | 1291 | } |
1343 | 1292 | ||
1344 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); | 1293 | sched_out = machine__findnew_thread(machine, prev_pid); |
1345 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); | 1294 | sched_in = machine__findnew_thread(machine, next_pid); |
1346 | 1295 | ||
1347 | sched->curr_thread[this_cpu] = sched_in; | 1296 | sched->curr_thread[this_cpu] = sched_in; |
1348 | 1297 | ||
@@ -1411,19 +1360,8 @@ static int process_sched_switch_event(struct perf_tool *tool, | |||
1411 | sched->nr_context_switch_bugs++; | 1360 | sched->nr_context_switch_bugs++; |
1412 | } | 1361 | } |
1413 | 1362 | ||
1414 | if (sched->tp_handler->switch_event) { | 1363 | if (sched->tp_handler->switch_event) |
1415 | struct trace_switch_event event = { | 1364 | err = sched->tp_handler->switch_event(sched, evsel, sample, machine); |
1416 | .prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), | ||
1417 | .prev_pid = prev_pid, | ||
1418 | .prev_prio = perf_evsel__intval(evsel, sample, "prev_prio"), | ||
1419 | .prev_state = perf_evsel__intval(evsel, sample, "prev_state"), | ||
1420 | .next_comm = perf_evsel__strval(evsel, sample, "next_comm"), | ||
1421 | .next_pid = next_pid, | ||
1422 | .next_prio = perf_evsel__intval(evsel, sample, "next_prio"), | ||
1423 | }; | ||
1424 | |||
1425 | err = sched->tp_handler->switch_event(sched, &event, machine, evsel, sample); | ||
1426 | } | ||
1427 | 1365 | ||
1428 | sched->curr_pid[this_cpu] = next_pid; | 1366 | sched->curr_pid[this_cpu] = next_pid; |
1429 | return err; | 1367 | return err; |
@@ -1436,15 +1374,8 @@ static int process_sched_runtime_event(struct perf_tool *tool, | |||
1436 | { | 1374 | { |
1437 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); | 1375 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1438 | 1376 | ||
1439 | if (sched->tp_handler->runtime_event) { | 1377 | if (sched->tp_handler->runtime_event) |
1440 | struct trace_runtime_event event = { | 1378 | return sched->tp_handler->runtime_event(sched, evsel, sample, machine); |
1441 | .comm = perf_evsel__strval(evsel, sample, "comm"), | ||
1442 | .pid = perf_evsel__intval(evsel, sample, "pid"), | ||
1443 | .runtime = perf_evsel__intval(evsel, sample, "runtime"), | ||
1444 | .vruntime = perf_evsel__intval(evsel, sample, "vruntime"), | ||
1445 | }; | ||
1446 | return sched->tp_handler->runtime_event(sched, &event, machine, sample); | ||
1447 | } | ||
1448 | 1379 | ||
1449 | return 0; | 1380 | return 0; |
1450 | } | 1381 | } |
@@ -1456,15 +1387,8 @@ static int process_sched_fork_event(struct perf_tool *tool, | |||
1456 | { | 1387 | { |
1457 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); | 1388 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1458 | 1389 | ||
1459 | if (sched->tp_handler->fork_event) { | 1390 | if (sched->tp_handler->fork_event) |
1460 | struct trace_fork_event event = { | 1391 | return sched->tp_handler->fork_event(sched, evsel, sample); |
1461 | .parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), | ||
1462 | .child_comm = perf_evsel__strval(evsel, sample, "child_comm"), | ||
1463 | .parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), | ||
1464 | .child_pid = perf_evsel__intval(evsel, sample, "child_pid"), | ||
1465 | }; | ||
1466 | return sched->tp_handler->fork_event(sched, &event, evsel); | ||
1467 | } | ||
1468 | 1392 | ||
1469 | return 0; | 1393 | return 0; |
1470 | } | 1394 | } |
@@ -1485,15 +1409,8 @@ static int process_sched_migrate_task_event(struct perf_tool *tool, | |||
1485 | { | 1409 | { |
1486 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); | 1410 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1487 | 1411 | ||
1488 | if (sched->tp_handler->migrate_task_event) { | 1412 | if (sched->tp_handler->migrate_task_event) |
1489 | struct trace_migrate_task_event event = { | 1413 | return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); |
1490 | .comm = perf_evsel__strval(evsel, sample, "comm"), | ||
1491 | .pid = perf_evsel__intval(evsel, sample, "pid"), | ||
1492 | .prio = perf_evsel__intval(evsel, sample, "prio"), | ||
1493 | .cpu = perf_evsel__intval(evsel, sample, "cpu"), | ||
1494 | }; | ||
1495 | return sched->tp_handler->migrate_task_event(sched, &event, machine, sample); | ||
1496 | } | ||
1497 | 1414 | ||
1498 | return 0; | 1415 | return 0; |
1499 | } | 1416 | } |