diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-09-08 21:53:06 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-09-09 10:39:02 -0400 |
commit | a116e05dcf61c8d758e0f0aed40325534aee2c13 (patch) | |
tree | 14c6d47407cea9cc3947e52631e880930cb995ec /tools/perf/builtin-sched.c | |
parent | 32c7f7383a096a4fc878fdda686c7725945e8a8f (diff) |
perf sched: Remove die() calls
Just use pr_err() + return -1 and perf_session__process_events to abort
when some event would call die(), then let the perf's main() exit doing
whatever it needs.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-88cwdogxqomsy9tfr8r0as58@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 281 |
1 files changed, 179 insertions, 102 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index a25a023965bb..782f66d3610e 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -423,8 +423,8 @@ static int self_open_counters(void) | |||
423 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | 423 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); |
424 | 424 | ||
425 | if (fd < 0) | 425 | if (fd < 0) |
426 | die("Error: sys_perf_event_open() syscall returned" | 426 | pr_debug("Error: sys_perf_event_open() syscall returned" |
427 | "with %d (%s)\n", fd, strerror(errno)); | 427 | "with %d (%s)\n", fd, strerror(errno)); |
428 | return fd; | 428 | return fd; |
429 | } | 429 | } |
430 | 430 | ||
@@ -450,7 +450,8 @@ static void *thread_func(void *ctx) | |||
450 | sprintf(comm2, ":%s", this_task->comm); | 450 | sprintf(comm2, ":%s", this_task->comm); |
451 | prctl(PR_SET_NAME, comm2); | 451 | prctl(PR_SET_NAME, comm2); |
452 | fd = self_open_counters(); | 452 | fd = self_open_counters(); |
453 | 453 | if (fd < 0) | |
454 | return NULL; | ||
454 | again: | 455 | again: |
455 | ret = sem_post(&this_task->ready_for_work); | 456 | ret = sem_post(&this_task->ready_for_work); |
456 | BUG_ON(ret); | 457 | BUG_ON(ret); |
@@ -726,30 +727,30 @@ struct trace_migrate_task_event { | |||
726 | }; | 727 | }; |
727 | 728 | ||
728 | struct trace_sched_handler { | 729 | struct trace_sched_handler { |
729 | void (*switch_event)(struct trace_switch_event *, | 730 | int (*switch_event)(struct trace_switch_event *event, |
730 | struct machine *, | 731 | struct machine *machine, |
731 | struct event_format *, | 732 | struct event_format *tp_format, |
732 | struct perf_sample *sample); | 733 | struct perf_sample *sample); |
733 | |||
734 | void (*runtime_event)(struct trace_runtime_event *, | ||
735 | struct machine *, | ||
736 | struct perf_sample *sample); | ||
737 | 734 | ||
738 | void (*wakeup_event)(struct trace_wakeup_event *, | 735 | int (*runtime_event)(struct trace_runtime_event *event, |
739 | struct machine *, | 736 | struct machine *machine, |
740 | struct event_format *, | ||
741 | struct perf_sample *sample); | 737 | struct perf_sample *sample); |
742 | 738 | ||
743 | void (*fork_event)(struct trace_fork_event *, | 739 | int (*wakeup_event)(struct trace_wakeup_event *event, |
744 | struct event_format *event); | 740 | struct machine *machine, |
741 | struct event_format *tp_format, | ||
742 | struct perf_sample *sample); | ||
745 | 743 | ||
746 | void (*migrate_task_event)(struct trace_migrate_task_event *, | 744 | int (*fork_event)(struct trace_fork_event *event, |
747 | struct machine *machine, | 745 | struct event_format *tp_format); |
748 | struct perf_sample *sample); | 746 | |
747 | int (*migrate_task_event)(struct trace_migrate_task_event *event, | ||
748 | struct machine *machine, | ||
749 | struct perf_sample *sample); | ||
749 | }; | 750 | }; |
750 | 751 | ||
751 | 752 | ||
752 | static void | 753 | static int |
753 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | 754 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, |
754 | struct machine *machine __used, | 755 | struct machine *machine __used, |
755 | struct event_format *event, struct perf_sample *sample) | 756 | struct event_format *event, struct perf_sample *sample) |
@@ -769,11 +770,12 @@ replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
769 | wakee = register_pid(wakeup_event->pid, wakeup_event->comm); | 770 | wakee = register_pid(wakeup_event->pid, wakeup_event->comm); |
770 | 771 | ||
771 | add_sched_event_wakeup(waker, sample->time, wakee); | 772 | add_sched_event_wakeup(waker, sample->time, wakee); |
773 | return 0; | ||
772 | } | 774 | } |
773 | 775 | ||
774 | static u64 cpu_last_switched[MAX_CPUS]; | 776 | static u64 cpu_last_switched[MAX_CPUS]; |
775 | 777 | ||
776 | static void | 778 | static int |
777 | replay_switch_event(struct trace_switch_event *switch_event, | 779 | replay_switch_event(struct trace_switch_event *switch_event, |
778 | struct machine *machine __used, | 780 | struct machine *machine __used, |
779 | struct event_format *event, | 781 | struct event_format *event, |
@@ -788,7 +790,7 @@ replay_switch_event(struct trace_switch_event *switch_event, | |||
788 | printf("sched_switch event %p\n", event); | 790 | printf("sched_switch event %p\n", event); |
789 | 791 | ||
790 | if (cpu >= MAX_CPUS || cpu < 0) | 792 | if (cpu >= MAX_CPUS || cpu < 0) |
791 | return; | 793 | return 0; |
792 | 794 | ||
793 | timestamp0 = cpu_last_switched[cpu]; | 795 | timestamp0 = cpu_last_switched[cpu]; |
794 | if (timestamp0) | 796 | if (timestamp0) |
@@ -796,8 +798,10 @@ replay_switch_event(struct trace_switch_event *switch_event, | |||
796 | else | 798 | else |
797 | delta = 0; | 799 | delta = 0; |
798 | 800 | ||
799 | if (delta < 0) | 801 | if (delta < 0) { |
800 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 802 | pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
803 | return -1; | ||
804 | } | ||
801 | 805 | ||
802 | if (verbose) { | 806 | if (verbose) { |
803 | printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", | 807 | printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", |
@@ -813,10 +817,12 @@ replay_switch_event(struct trace_switch_event *switch_event, | |||
813 | 817 | ||
814 | add_sched_event_run(prev, timestamp, delta); | 818 | add_sched_event_run(prev, timestamp, delta); |
815 | add_sched_event_sleep(prev, timestamp, switch_event->prev_state); | 819 | add_sched_event_sleep(prev, timestamp, switch_event->prev_state); |
820 | |||
821 | return 0; | ||
816 | } | 822 | } |
817 | 823 | ||
818 | 824 | ||
819 | static void | 825 | static int |
820 | replay_fork_event(struct trace_fork_event *fork_event, | 826 | replay_fork_event(struct trace_fork_event *fork_event, |
821 | struct event_format *event) | 827 | struct event_format *event) |
822 | { | 828 | { |
@@ -827,6 +833,7 @@ replay_fork_event(struct trace_fork_event *fork_event, | |||
827 | } | 833 | } |
828 | register_pid(fork_event->parent_pid, fork_event->parent_comm); | 834 | register_pid(fork_event->parent_pid, fork_event->parent_comm); |
829 | register_pid(fork_event->child_pid, fork_event->child_comm); | 835 | register_pid(fork_event->child_pid, fork_event->child_comm); |
836 | return 0; | ||
830 | } | 837 | } |
831 | 838 | ||
832 | static struct trace_sched_handler replay_ops = { | 839 | static struct trace_sched_handler replay_ops = { |
@@ -911,22 +918,26 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data, | |||
911 | rb_insert_color(&data->node, root); | 918 | rb_insert_color(&data->node, root); |
912 | } | 919 | } |
913 | 920 | ||
914 | static void thread_atoms_insert(struct thread *thread) | 921 | static int thread_atoms_insert(struct thread *thread) |
915 | { | 922 | { |
916 | struct work_atoms *atoms = zalloc(sizeof(*atoms)); | 923 | struct work_atoms *atoms = zalloc(sizeof(*atoms)); |
917 | if (!atoms) | 924 | if (!atoms) { |
918 | die("No memory"); | 925 | pr_err("No memory at %s\n", __func__); |
926 | return -1; | ||
927 | } | ||
919 | 928 | ||
920 | atoms->thread = thread; | 929 | atoms->thread = thread; |
921 | INIT_LIST_HEAD(&atoms->work_list); | 930 | INIT_LIST_HEAD(&atoms->work_list); |
922 | __thread_latency_insert(&atom_root, atoms, &cmp_pid); | 931 | __thread_latency_insert(&atom_root, atoms, &cmp_pid); |
932 | return 0; | ||
923 | } | 933 | } |
924 | 934 | ||
925 | static void | 935 | static int |
926 | latency_fork_event(struct trace_fork_event *fork_event __used, | 936 | latency_fork_event(struct trace_fork_event *fork_event __used, |
927 | struct event_format *event __used) | 937 | struct event_format *event __used) |
928 | { | 938 | { |
929 | /* should insert the newcomer */ | 939 | /* should insert the newcomer */ |
940 | return 0; | ||
930 | } | 941 | } |
931 | 942 | ||
932 | __used | 943 | __used |
@@ -937,14 +948,16 @@ static char sched_out_state(struct trace_switch_event *switch_event) | |||
937 | return str[switch_event->prev_state]; | 948 | return str[switch_event->prev_state]; |
938 | } | 949 | } |
939 | 950 | ||
940 | static void | 951 | static int |
941 | add_sched_out_event(struct work_atoms *atoms, | 952 | add_sched_out_event(struct work_atoms *atoms, |
942 | char run_state, | 953 | char run_state, |
943 | u64 timestamp) | 954 | u64 timestamp) |
944 | { | 955 | { |
945 | struct work_atom *atom = zalloc(sizeof(*atom)); | 956 | struct work_atom *atom = zalloc(sizeof(*atom)); |
946 | if (!atom) | 957 | if (!atom) { |
947 | die("Non memory"); | 958 | pr_err("Non memory at %s", __func__); |
959 | return -1; | ||
960 | } | ||
948 | 961 | ||
949 | atom->sched_out_time = timestamp; | 962 | atom->sched_out_time = timestamp; |
950 | 963 | ||
@@ -954,6 +967,7 @@ add_sched_out_event(struct work_atoms *atoms, | |||
954 | } | 967 | } |
955 | 968 | ||
956 | list_add_tail(&atom->list, &atoms->work_list); | 969 | list_add_tail(&atom->list, &atoms->work_list); |
970 | return 0; | ||
957 | } | 971 | } |
958 | 972 | ||
959 | static void | 973 | static void |
@@ -1000,7 +1014,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) | |||
1000 | atoms->nb_atoms++; | 1014 | atoms->nb_atoms++; |
1001 | } | 1015 | } |
1002 | 1016 | ||
1003 | static void | 1017 | static int |
1004 | latency_switch_event(struct trace_switch_event *switch_event, | 1018 | latency_switch_event(struct trace_switch_event *switch_event, |
1005 | struct machine *machine, | 1019 | struct machine *machine, |
1006 | struct event_format *event __used, | 1020 | struct event_format *event __used, |
@@ -1021,38 +1035,49 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1021 | else | 1035 | else |
1022 | delta = 0; | 1036 | delta = 0; |
1023 | 1037 | ||
1024 | if (delta < 0) | 1038 | if (delta < 0) { |
1025 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 1039 | pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
1026 | 1040 | return -1; | |
1041 | } | ||
1027 | 1042 | ||
1028 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); | 1043 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); |
1029 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); | 1044 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); |
1030 | 1045 | ||
1031 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 1046 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); |
1032 | if (!out_events) { | 1047 | if (!out_events) { |
1033 | thread_atoms_insert(sched_out); | 1048 | if (thread_atoms_insert(sched_out)) |
1049 | return -1; | ||
1034 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 1050 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); |
1035 | if (!out_events) | 1051 | if (!out_events) { |
1036 | die("out-event: Internal tree error"); | 1052 | pr_err("out-event: Internal tree error"); |
1053 | return -1; | ||
1054 | } | ||
1037 | } | 1055 | } |
1038 | add_sched_out_event(out_events, sched_out_state(switch_event), timestamp); | 1056 | if (add_sched_out_event(out_events, sched_out_state(switch_event), timestamp)) |
1057 | return -1; | ||
1039 | 1058 | ||
1040 | in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); | 1059 | in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); |
1041 | if (!in_events) { | 1060 | if (!in_events) { |
1042 | thread_atoms_insert(sched_in); | 1061 | if (thread_atoms_insert(sched_in)) |
1062 | return -1; | ||
1043 | in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); | 1063 | in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); |
1044 | if (!in_events) | 1064 | if (!in_events) { |
1045 | die("in-event: Internal tree error"); | 1065 | pr_err("in-event: Internal tree error"); |
1066 | return -1; | ||
1067 | } | ||
1046 | /* | 1068 | /* |
1047 | * Take came in we have not heard about yet, | 1069 | * Take came in we have not heard about yet, |
1048 | * add in an initial atom in runnable state: | 1070 | * add in an initial atom in runnable state: |
1049 | */ | 1071 | */ |
1050 | add_sched_out_event(in_events, 'R', timestamp); | 1072 | if (add_sched_out_event(in_events, 'R', timestamp)) |
1073 | return -1; | ||
1051 | } | 1074 | } |
1052 | add_sched_in_event(in_events, timestamp); | 1075 | add_sched_in_event(in_events, timestamp); |
1076 | |||
1077 | return 0; | ||
1053 | } | 1078 | } |
1054 | 1079 | ||
1055 | static void | 1080 | static int |
1056 | latency_runtime_event(struct trace_runtime_event *runtime_event, | 1081 | latency_runtime_event(struct trace_runtime_event *runtime_event, |
1057 | struct machine *machine, struct perf_sample *sample) | 1082 | struct machine *machine, struct perf_sample *sample) |
1058 | { | 1083 | { |
@@ -1063,17 +1088,22 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, | |||
1063 | 1088 | ||
1064 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 1089 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
1065 | if (!atoms) { | 1090 | if (!atoms) { |
1066 | thread_atoms_insert(thread); | 1091 | if (thread_atoms_insert(thread)) |
1092 | return -1; | ||
1067 | atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | 1093 | atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); |
1068 | if (!atoms) | 1094 | if (!atoms) { |
1069 | die("in-event: Internal tree error"); | 1095 | pr_debug("in-event: Internal tree error"); |
1070 | add_sched_out_event(atoms, 'R', timestamp); | 1096 | return -1; |
1097 | } | ||
1098 | if (add_sched_out_event(atoms, 'R', timestamp)) | ||
1099 | return -1; | ||
1071 | } | 1100 | } |
1072 | 1101 | ||
1073 | add_runtime_event(atoms, runtime_event->runtime, timestamp); | 1102 | add_runtime_event(atoms, runtime_event->runtime, timestamp); |
1103 | return 0; | ||
1074 | } | 1104 | } |
1075 | 1105 | ||
1076 | static void | 1106 | static int |
1077 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | 1107 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, |
1078 | struct machine *machine, struct event_format *event __used, | 1108 | struct machine *machine, struct event_format *event __used, |
1079 | struct perf_sample *sample) | 1109 | struct perf_sample *sample) |
@@ -1085,16 +1115,20 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1085 | 1115 | ||
1086 | /* Note for later, it may be interesting to observe the failing cases */ | 1116 | /* Note for later, it may be interesting to observe the failing cases */ |
1087 | if (!wakeup_event->success) | 1117 | if (!wakeup_event->success) |
1088 | return; | 1118 | return 0; |
1089 | 1119 | ||
1090 | wakee = machine__findnew_thread(machine, wakeup_event->pid); | 1120 | wakee = machine__findnew_thread(machine, wakeup_event->pid); |
1091 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1121 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); |
1092 | if (!atoms) { | 1122 | if (!atoms) { |
1093 | thread_atoms_insert(wakee); | 1123 | if (thread_atoms_insert(wakee)) |
1124 | return -1; | ||
1094 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1125 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); |
1095 | if (!atoms) | 1126 | if (!atoms) { |
1096 | die("wakeup-event: Internal tree error"); | 1127 | pr_debug("wakeup-event: Internal tree error"); |
1097 | add_sched_out_event(atoms, 'S', timestamp); | 1128 | return -1; |
1129 | } | ||
1130 | if (add_sched_out_event(atoms, 'S', timestamp)) | ||
1131 | return -1; | ||
1098 | } | 1132 | } |
1099 | 1133 | ||
1100 | BUG_ON(list_empty(&atoms->work_list)); | 1134 | BUG_ON(list_empty(&atoms->work_list)); |
@@ -1112,14 +1146,15 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1112 | nr_timestamps++; | 1146 | nr_timestamps++; |
1113 | if (atom->sched_out_time > timestamp) { | 1147 | if (atom->sched_out_time > timestamp) { |
1114 | nr_unordered_timestamps++; | 1148 | nr_unordered_timestamps++; |
1115 | return; | 1149 | return 0; |
1116 | } | 1150 | } |
1117 | 1151 | ||
1118 | atom->state = THREAD_WAIT_CPU; | 1152 | atom->state = THREAD_WAIT_CPU; |
1119 | atom->wake_up_time = timestamp; | 1153 | atom->wake_up_time = timestamp; |
1154 | return 0; | ||
1120 | } | 1155 | } |
1121 | 1156 | ||
1122 | static void | 1157 | static int |
1123 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | 1158 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, |
1124 | struct machine *machine, struct perf_sample *sample) | 1159 | struct machine *machine, struct perf_sample *sample) |
1125 | { | 1160 | { |
@@ -1132,17 +1167,21 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | |||
1132 | * Only need to worry about migration when profiling one CPU. | 1167 | * Only need to worry about migration when profiling one CPU. |
1133 | */ | 1168 | */ |
1134 | if (profile_cpu == -1) | 1169 | if (profile_cpu == -1) |
1135 | return; | 1170 | return 0; |
1136 | 1171 | ||
1137 | migrant = machine__findnew_thread(machine, migrate_task_event->pid); | 1172 | migrant = machine__findnew_thread(machine, migrate_task_event->pid); |
1138 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | 1173 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); |
1139 | if (!atoms) { | 1174 | if (!atoms) { |
1140 | thread_atoms_insert(migrant); | 1175 | if (thread_atoms_insert(migrant)) |
1176 | return -1; | ||
1141 | register_pid(migrant->pid, migrant->comm); | 1177 | register_pid(migrant->pid, migrant->comm); |
1142 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | 1178 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); |
1143 | if (!atoms) | 1179 | if (!atoms) { |
1144 | die("migration-event: Internal tree error"); | 1180 | pr_debug("migration-event: Internal tree error"); |
1145 | add_sched_out_event(atoms, 'R', timestamp); | 1181 | return -1; |
1182 | } | ||
1183 | if (add_sched_out_event(atoms, 'R', timestamp)) | ||
1184 | return -1; | ||
1146 | } | 1185 | } |
1147 | 1186 | ||
1148 | BUG_ON(list_empty(&atoms->work_list)); | 1187 | BUG_ON(list_empty(&atoms->work_list)); |
@@ -1154,6 +1193,8 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | |||
1154 | 1193 | ||
1155 | if (atom->sched_out_time > timestamp) | 1194 | if (atom->sched_out_time > timestamp) |
1156 | nr_unordered_timestamps++; | 1195 | nr_unordered_timestamps++; |
1196 | |||
1197 | return 0; | ||
1157 | } | 1198 | } |
1158 | 1199 | ||
1159 | static struct trace_sched_handler lat_ops = { | 1200 | static struct trace_sched_handler lat_ops = { |
@@ -1328,7 +1369,7 @@ static void sort_lat(void) | |||
1328 | 1369 | ||
1329 | static struct trace_sched_handler *trace_handler; | 1370 | static struct trace_sched_handler *trace_handler; |
1330 | 1371 | ||
1331 | static void | 1372 | static int |
1332 | process_sched_wakeup_event(struct perf_tool *tool __used, | 1373 | process_sched_wakeup_event(struct perf_tool *tool __used, |
1333 | struct event_format *event, | 1374 | struct event_format *event, |
1334 | struct perf_sample *sample, | 1375 | struct perf_sample *sample, |
@@ -1337,6 +1378,7 @@ process_sched_wakeup_event(struct perf_tool *tool __used, | |||
1337 | { | 1378 | { |
1338 | void *data = sample->raw_data; | 1379 | void *data = sample->raw_data; |
1339 | struct trace_wakeup_event wakeup_event; | 1380 | struct trace_wakeup_event wakeup_event; |
1381 | int err = 0; | ||
1340 | 1382 | ||
1341 | FILL_COMMON_FIELDS(wakeup_event, event, data); | 1383 | FILL_COMMON_FIELDS(wakeup_event, event, data); |
1342 | 1384 | ||
@@ -1347,7 +1389,9 @@ process_sched_wakeup_event(struct perf_tool *tool __used, | |||
1347 | FILL_FIELD(wakeup_event, cpu, event, data); | 1389 | FILL_FIELD(wakeup_event, cpu, event, data); |
1348 | 1390 | ||
1349 | if (trace_handler->wakeup_event) | 1391 | if (trace_handler->wakeup_event) |
1350 | trace_handler->wakeup_event(&wakeup_event, machine, event, sample); | 1392 | err = trace_handler->wakeup_event(&wakeup_event, machine, event, sample); |
1393 | |||
1394 | return err; | ||
1351 | } | 1395 | } |
1352 | 1396 | ||
1353 | /* | 1397 | /* |
@@ -1363,7 +1407,7 @@ static struct thread *curr_thread[MAX_CPUS]; | |||
1363 | static char next_shortname1 = 'A'; | 1407 | static char next_shortname1 = 'A'; |
1364 | static char next_shortname2 = '0'; | 1408 | static char next_shortname2 = '0'; |
1365 | 1409 | ||
1366 | static void | 1410 | static int |
1367 | map_switch_event(struct trace_switch_event *switch_event, | 1411 | map_switch_event(struct trace_switch_event *switch_event, |
1368 | struct machine *machine, | 1412 | struct machine *machine, |
1369 | struct event_format *event __used, | 1413 | struct event_format *event __used, |
@@ -1387,9 +1431,10 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1387 | else | 1431 | else |
1388 | delta = 0; | 1432 | delta = 0; |
1389 | 1433 | ||
1390 | if (delta < 0) | 1434 | if (delta < 0) { |
1391 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 1435 | pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
1392 | 1436 | return -1; | |
1437 | } | ||
1393 | 1438 | ||
1394 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); | 1439 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); |
1395 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); | 1440 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); |
@@ -1438,16 +1483,18 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1438 | } else { | 1483 | } else { |
1439 | printf("\n"); | 1484 | printf("\n"); |
1440 | } | 1485 | } |
1486 | |||
1487 | return 0; | ||
1441 | } | 1488 | } |
1442 | 1489 | ||
1443 | static void | 1490 | static int |
1444 | process_sched_switch_event(struct perf_tool *tool __used, | 1491 | process_sched_switch_event(struct perf_tool *tool __used, |
1445 | struct event_format *event, | 1492 | struct event_format *event, |
1446 | struct perf_sample *sample, | 1493 | struct perf_sample *sample, |
1447 | struct machine *machine, | 1494 | struct machine *machine, |
1448 | struct thread *thread __used) | 1495 | struct thread *thread __used) |
1449 | { | 1496 | { |
1450 | int this_cpu = sample->cpu; | 1497 | int this_cpu = sample->cpu, err = 0; |
1451 | void *data = sample->raw_data; | 1498 | void *data = sample->raw_data; |
1452 | struct trace_switch_event switch_event; | 1499 | struct trace_switch_event switch_event; |
1453 | 1500 | ||
@@ -1470,12 +1517,13 @@ process_sched_switch_event(struct perf_tool *tool __used, | |||
1470 | nr_context_switch_bugs++; | 1517 | nr_context_switch_bugs++; |
1471 | } | 1518 | } |
1472 | if (trace_handler->switch_event) | 1519 | if (trace_handler->switch_event) |
1473 | trace_handler->switch_event(&switch_event, machine, event, sample); | 1520 | err = trace_handler->switch_event(&switch_event, machine, event, sample); |
1474 | 1521 | ||
1475 | curr_pid[this_cpu] = switch_event.next_pid; | 1522 | curr_pid[this_cpu] = switch_event.next_pid; |
1523 | return err; | ||
1476 | } | 1524 | } |
1477 | 1525 | ||
1478 | static void | 1526 | static int |
1479 | process_sched_runtime_event(struct perf_tool *tool __used, | 1527 | process_sched_runtime_event(struct perf_tool *tool __used, |
1480 | struct event_format *event, | 1528 | struct event_format *event, |
1481 | struct perf_sample *sample, | 1529 | struct perf_sample *sample, |
@@ -1484,6 +1532,7 @@ process_sched_runtime_event(struct perf_tool *tool __used, | |||
1484 | { | 1532 | { |
1485 | void *data = sample->raw_data; | 1533 | void *data = sample->raw_data; |
1486 | struct trace_runtime_event runtime_event; | 1534 | struct trace_runtime_event runtime_event; |
1535 | int err = 0; | ||
1487 | 1536 | ||
1488 | FILL_ARRAY(runtime_event, comm, event, data); | 1537 | FILL_ARRAY(runtime_event, comm, event, data); |
1489 | FILL_FIELD(runtime_event, pid, event, data); | 1538 | FILL_FIELD(runtime_event, pid, event, data); |
@@ -1491,10 +1540,12 @@ process_sched_runtime_event(struct perf_tool *tool __used, | |||
1491 | FILL_FIELD(runtime_event, vruntime, event, data); | 1540 | FILL_FIELD(runtime_event, vruntime, event, data); |
1492 | 1541 | ||
1493 | if (trace_handler->runtime_event) | 1542 | if (trace_handler->runtime_event) |
1494 | trace_handler->runtime_event(&runtime_event, machine, sample); | 1543 | err = trace_handler->runtime_event(&runtime_event, machine, sample); |
1544 | |||
1545 | return err; | ||
1495 | } | 1546 | } |
1496 | 1547 | ||
1497 | static void | 1548 | static int |
1498 | process_sched_fork_event(struct perf_tool *tool __used, | 1549 | process_sched_fork_event(struct perf_tool *tool __used, |
1499 | struct event_format *event, | 1550 | struct event_format *event, |
1500 | struct perf_sample *sample, | 1551 | struct perf_sample *sample, |
@@ -1503,6 +1554,7 @@ process_sched_fork_event(struct perf_tool *tool __used, | |||
1503 | { | 1554 | { |
1504 | void *data = sample->raw_data; | 1555 | void *data = sample->raw_data; |
1505 | struct trace_fork_event fork_event; | 1556 | struct trace_fork_event fork_event; |
1557 | int err = 0; | ||
1506 | 1558 | ||
1507 | FILL_COMMON_FIELDS(fork_event, event, data); | 1559 | FILL_COMMON_FIELDS(fork_event, event, data); |
1508 | 1560 | ||
@@ -1512,10 +1564,12 @@ process_sched_fork_event(struct perf_tool *tool __used, | |||
1512 | FILL_FIELD(fork_event, child_pid, event, data); | 1564 | FILL_FIELD(fork_event, child_pid, event, data); |
1513 | 1565 | ||
1514 | if (trace_handler->fork_event) | 1566 | if (trace_handler->fork_event) |
1515 | trace_handler->fork_event(&fork_event, event); | 1567 | err = trace_handler->fork_event(&fork_event, event); |
1568 | |||
1569 | return err; | ||
1516 | } | 1570 | } |
1517 | 1571 | ||
1518 | static void | 1572 | static int |
1519 | process_sched_exit_event(struct perf_tool *tool __used, | 1573 | process_sched_exit_event(struct perf_tool *tool __used, |
1520 | struct event_format *event, | 1574 | struct event_format *event, |
1521 | struct perf_sample *sample __used, | 1575 | struct perf_sample *sample __used, |
@@ -1524,9 +1578,11 @@ process_sched_exit_event(struct perf_tool *tool __used, | |||
1524 | { | 1578 | { |
1525 | if (verbose) | 1579 | if (verbose) |
1526 | printf("sched_exit event %p\n", event); | 1580 | printf("sched_exit event %p\n", event); |
1581 | |||
1582 | return 0; | ||
1527 | } | 1583 | } |
1528 | 1584 | ||
1529 | static void | 1585 | static int |
1530 | process_sched_migrate_task_event(struct perf_tool *tool __used, | 1586 | process_sched_migrate_task_event(struct perf_tool *tool __used, |
1531 | struct event_format *event, | 1587 | struct event_format *event, |
1532 | struct perf_sample *sample, | 1588 | struct perf_sample *sample, |
@@ -1535,6 +1591,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, | |||
1535 | { | 1591 | { |
1536 | void *data = sample->raw_data; | 1592 | void *data = sample->raw_data; |
1537 | struct trace_migrate_task_event migrate_task_event; | 1593 | struct trace_migrate_task_event migrate_task_event; |
1594 | int err = 0; | ||
1538 | 1595 | ||
1539 | FILL_COMMON_FIELDS(migrate_task_event, event, data); | 1596 | FILL_COMMON_FIELDS(migrate_task_event, event, data); |
1540 | 1597 | ||
@@ -1544,13 +1601,16 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, | |||
1544 | FILL_FIELD(migrate_task_event, cpu, event, data); | 1601 | FILL_FIELD(migrate_task_event, cpu, event, data); |
1545 | 1602 | ||
1546 | if (trace_handler->migrate_task_event) | 1603 | if (trace_handler->migrate_task_event) |
1547 | trace_handler->migrate_task_event(&migrate_task_event, machine, sample); | 1604 | err = trace_handler->migrate_task_event(&migrate_task_event, machine, sample); |
1605 | |||
1606 | return err; | ||
1548 | } | 1607 | } |
1549 | 1608 | ||
1550 | typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, | 1609 | typedef int (*tracepoint_handler)(struct perf_tool *tool, |
1551 | struct perf_sample *sample, | 1610 | struct event_format *tp_format, |
1552 | struct machine *machine, | 1611 | struct perf_sample *sample, |
1553 | struct thread *thread); | 1612 | struct machine *machine, |
1613 | struct thread *thread); | ||
1554 | 1614 | ||
1555 | static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, | 1615 | static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, |
1556 | union perf_event *event __used, | 1616 | union perf_event *event __used, |
@@ -1559,6 +1619,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, | |||
1559 | struct machine *machine) | 1619 | struct machine *machine) |
1560 | { | 1620 | { |
1561 | struct thread *thread = machine__findnew_thread(machine, sample->pid); | 1621 | struct thread *thread = machine__findnew_thread(machine, sample->pid); |
1622 | int err = 0; | ||
1562 | 1623 | ||
1563 | if (thread == NULL) { | 1624 | if (thread == NULL) { |
1564 | pr_debug("problem processing %s event, skipping it.\n", | 1625 | pr_debug("problem processing %s event, skipping it.\n", |
@@ -1571,10 +1632,10 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, | |||
1571 | 1632 | ||
1572 | if (evsel->handler.func != NULL) { | 1633 | if (evsel->handler.func != NULL) { |
1573 | tracepoint_handler f = evsel->handler.func; | 1634 | tracepoint_handler f = evsel->handler.func; |
1574 | f(tool, evsel->tp_format, sample, machine, thread); | 1635 | err = f(tool, evsel->tp_format, sample, machine, thread); |
1575 | } | 1636 | } |
1576 | 1637 | ||
1577 | return 0; | 1638 | return err; |
1578 | } | 1639 | } |
1579 | 1640 | ||
1580 | static struct perf_tool perf_sched = { | 1641 | static struct perf_tool perf_sched = { |
@@ -1585,9 +1646,8 @@ static struct perf_tool perf_sched = { | |||
1585 | .ordered_samples = true, | 1646 | .ordered_samples = true, |
1586 | }; | 1647 | }; |
1587 | 1648 | ||
1588 | static void read_events(bool destroy, struct perf_session **psession) | 1649 | static int read_events(bool destroy, struct perf_session **psession) |
1589 | { | 1650 | { |
1590 | int err = -EINVAL; | ||
1591 | const struct perf_evsel_str_handler handlers[] = { | 1651 | const struct perf_evsel_str_handler handlers[] = { |
1592 | { "sched:sched_switch", process_sched_switch_event, }, | 1652 | { "sched:sched_switch", process_sched_switch_event, }, |
1593 | { "sched:sched_stat_runtime", process_sched_runtime_event, }, | 1653 | { "sched:sched_stat_runtime", process_sched_runtime_event, }, |
@@ -1600,16 +1660,20 @@ static void read_events(bool destroy, struct perf_session **psession) | |||
1600 | struct perf_session *session; | 1660 | struct perf_session *session; |
1601 | 1661 | ||
1602 | session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched); | 1662 | session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched); |
1603 | if (session == NULL) | 1663 | if (session == NULL) { |
1604 | die("No Memory"); | 1664 | pr_debug("No Memory for session\n"); |
1665 | return -1; | ||
1666 | } | ||
1605 | 1667 | ||
1606 | err = perf_session__set_tracepoints_handlers(session, handlers); | 1668 | if (perf_session__set_tracepoints_handlers(session, handlers)) |
1607 | assert(err == 0); | 1669 | goto out_delete; |
1608 | 1670 | ||
1609 | if (perf_session__has_traces(session, "record -R")) { | 1671 | if (perf_session__has_traces(session, "record -R")) { |
1610 | err = perf_session__process_events(session, &perf_sched); | 1672 | int err = perf_session__process_events(session, &perf_sched); |
1611 | if (err) | 1673 | if (err) { |
1612 | die("Failed to process events, error %d", err); | 1674 | pr_err("Failed to process events, error %d", err); |
1675 | goto out_delete; | ||
1676 | } | ||
1613 | 1677 | ||
1614 | nr_events = session->hists.stats.nr_events[0]; | 1678 | nr_events = session->hists.stats.nr_events[0]; |
1615 | nr_lost_events = session->hists.stats.total_lost; | 1679 | nr_lost_events = session->hists.stats.total_lost; |
@@ -1621,6 +1685,12 @@ static void read_events(bool destroy, struct perf_session **psession) | |||
1621 | 1685 | ||
1622 | if (psession) | 1686 | if (psession) |
1623 | *psession = session; | 1687 | *psession = session; |
1688 | |||
1689 | return 0; | ||
1690 | |||
1691 | out_delete: | ||
1692 | perf_session__delete(session); | ||
1693 | return -1; | ||
1624 | } | 1694 | } |
1625 | 1695 | ||
1626 | static void print_bad_events(void) | 1696 | static void print_bad_events(void) |
@@ -1653,13 +1723,14 @@ static void print_bad_events(void) | |||
1653 | } | 1723 | } |
1654 | } | 1724 | } |
1655 | 1725 | ||
1656 | static void __cmd_lat(void) | 1726 | static int __cmd_lat(void) |
1657 | { | 1727 | { |
1658 | struct rb_node *next; | 1728 | struct rb_node *next; |
1659 | struct perf_session *session; | 1729 | struct perf_session *session; |
1660 | 1730 | ||
1661 | setup_pager(); | 1731 | setup_pager(); |
1662 | read_events(false, &session); | 1732 | if (read_events(false, &session)) |
1733 | return -1; | ||
1663 | sort_lat(); | 1734 | sort_lat(); |
1664 | 1735 | ||
1665 | printf("\n ---------------------------------------------------------------------------------------------------------------\n"); | 1736 | printf("\n ---------------------------------------------------------------------------------------------------------------\n"); |
@@ -1686,6 +1757,7 @@ static void __cmd_lat(void) | |||
1686 | printf("\n"); | 1757 | printf("\n"); |
1687 | 1758 | ||
1688 | perf_session__delete(session); | 1759 | perf_session__delete(session); |
1760 | return 0; | ||
1689 | } | 1761 | } |
1690 | 1762 | ||
1691 | static struct trace_sched_handler map_ops = { | 1763 | static struct trace_sched_handler map_ops = { |
@@ -1695,16 +1767,18 @@ static struct trace_sched_handler map_ops = { | |||
1695 | .fork_event = NULL, | 1767 | .fork_event = NULL, |
1696 | }; | 1768 | }; |
1697 | 1769 | ||
1698 | static void __cmd_map(void) | 1770 | static int __cmd_map(void) |
1699 | { | 1771 | { |
1700 | max_cpu = sysconf(_SC_NPROCESSORS_CONF); | 1772 | max_cpu = sysconf(_SC_NPROCESSORS_CONF); |
1701 | 1773 | ||
1702 | setup_pager(); | 1774 | setup_pager(); |
1703 | read_events(true, NULL); | 1775 | if (read_events(true, NULL)) |
1776 | return -1; | ||
1704 | print_bad_events(); | 1777 | print_bad_events(); |
1778 | return 0; | ||
1705 | } | 1779 | } |
1706 | 1780 | ||
1707 | static void __cmd_replay(void) | 1781 | static int __cmd_replay(void) |
1708 | { | 1782 | { |
1709 | unsigned long i; | 1783 | unsigned long i; |
1710 | 1784 | ||
@@ -1713,7 +1787,8 @@ static void __cmd_replay(void) | |||
1713 | 1787 | ||
1714 | test_calibrations(); | 1788 | test_calibrations(); |
1715 | 1789 | ||
1716 | read_events(true, NULL); | 1790 | if (read_events(true, NULL)) |
1791 | return -1; | ||
1717 | 1792 | ||
1718 | printf("nr_run_events: %ld\n", nr_run_events); | 1793 | printf("nr_run_events: %ld\n", nr_run_events); |
1719 | printf("nr_sleep_events: %ld\n", nr_sleep_events); | 1794 | printf("nr_sleep_events: %ld\n", nr_sleep_events); |
@@ -1734,6 +1809,8 @@ static void __cmd_replay(void) | |||
1734 | printf("------------------------------------------------------------\n"); | 1809 | printf("------------------------------------------------------------\n"); |
1735 | for (i = 0; i < replay_repeat; i++) | 1810 | for (i = 0; i < replay_repeat; i++) |
1736 | run_one_test(); | 1811 | run_one_test(); |
1812 | |||
1813 | return 0; | ||
1737 | } | 1814 | } |
1738 | 1815 | ||
1739 | 1816 | ||
@@ -1865,11 +1942,11 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) | |||
1865 | usage_with_options(latency_usage, latency_options); | 1942 | usage_with_options(latency_usage, latency_options); |
1866 | } | 1943 | } |
1867 | setup_sorting(); | 1944 | setup_sorting(); |
1868 | __cmd_lat(); | 1945 | return __cmd_lat(); |
1869 | } else if (!strcmp(argv[0], "map")) { | 1946 | } else if (!strcmp(argv[0], "map")) { |
1870 | trace_handler = &map_ops; | 1947 | trace_handler = &map_ops; |
1871 | setup_sorting(); | 1948 | setup_sorting(); |
1872 | __cmd_map(); | 1949 | return __cmd_map(); |
1873 | } else if (!strncmp(argv[0], "rep", 3)) { | 1950 | } else if (!strncmp(argv[0], "rep", 3)) { |
1874 | trace_handler = &replay_ops; | 1951 | trace_handler = &replay_ops; |
1875 | if (argc) { | 1952 | if (argc) { |
@@ -1877,7 +1954,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) | |||
1877 | if (argc) | 1954 | if (argc) |
1878 | usage_with_options(replay_usage, replay_options); | 1955 | usage_with_options(replay_usage, replay_options); |
1879 | } | 1956 | } |
1880 | __cmd_replay(); | 1957 | return __cmd_replay(); |
1881 | } else { | 1958 | } else { |
1882 | usage_with_options(sched_usage, sched_options); | 1959 | usage_with_options(sched_usage, sched_options); |
1883 | } | 1960 | } |