aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/builtin-sched.c112
1 files changed, 56 insertions, 56 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 727cc5b852c2..7e57a986c056 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -872,7 +872,7 @@ enum thread_state {
872 THREAD_IGNORE 872 THREAD_IGNORE
873}; 873};
874 874
875struct lat_snapshot { 875struct work_atom {
876 struct list_head list; 876 struct list_head list;
877 enum thread_state state; 877 enum thread_state state;
878 u64 wake_up_time; 878 u64 wake_up_time;
@@ -880,7 +880,7 @@ struct lat_snapshot {
880 u64 runtime; 880 u64 runtime;
881}; 881};
882 882
883struct thread_latency { 883struct task_atoms {
884 struct list_head snapshot_list; 884 struct list_head snapshot_list;
885 struct thread *thread; 885 struct thread *thread;
886 struct rb_node node; 886 struct rb_node node;
@@ -888,35 +888,35 @@ struct thread_latency {
888 888
889static struct rb_root lat_snapshot_root; 889static struct rb_root lat_snapshot_root;
890 890
891static struct thread_latency * 891static struct task_atoms *
892thread_latency_search(struct rb_root *root, struct thread *thread) 892thread_atom_list_search(struct rb_root *root, struct thread *thread)
893{ 893{
894 struct rb_node *node = root->rb_node; 894 struct rb_node *node = root->rb_node;
895 895
896 while (node) { 896 while (node) {
897 struct thread_latency *lat; 897 struct task_atoms *atoms;
898 898
899 lat = container_of(node, struct thread_latency, node); 899 atoms = container_of(node, struct task_atoms, node);
900 if (thread->pid < lat->thread->pid) 900 if (thread->pid < atoms->thread->pid)
901 node = node->rb_left; 901 node = node->rb_left;
902 else if (thread->pid > lat->thread->pid) 902 else if (thread->pid > atoms->thread->pid)
903 node = node->rb_right; 903 node = node->rb_right;
904 else { 904 else {
905 return lat; 905 return atoms;
906 } 906 }
907 } 907 }
908 return NULL; 908 return NULL;
909} 909}
910 910
911static void 911static void
912__thread_latency_insert(struct rb_root *root, struct thread_latency *data) 912__thread_latency_insert(struct rb_root *root, struct task_atoms *data)
913{ 913{
914 struct rb_node **new = &(root->rb_node), *parent = NULL; 914 struct rb_node **new = &(root->rb_node), *parent = NULL;
915 915
916 while (*new) { 916 while (*new) {
917 struct thread_latency *this; 917 struct task_atoms *this;
918 918
919 this = container_of(*new, struct thread_latency, node); 919 this = container_of(*new, struct task_atoms, node);
920 parent = *new; 920 parent = *new;
921 if (data->thread->pid < this->thread->pid) 921 if (data->thread->pid < this->thread->pid)
922 new = &((*new)->rb_left); 922 new = &((*new)->rb_left);
@@ -930,16 +930,16 @@ __thread_latency_insert(struct rb_root *root, struct thread_latency *data)
930 rb_insert_color(&data->node, root); 930 rb_insert_color(&data->node, root);
931} 931}
932 932
933static void thread_latency_insert(struct thread *thread) 933static void thread_atom_list_insert(struct thread *thread)
934{ 934{
935 struct thread_latency *lat; 935 struct task_atoms *atoms;
936 lat = calloc(sizeof(*lat), 1); 936 atoms = calloc(sizeof(*atoms), 1);
937 if (!lat) 937 if (!atoms)
938 die("No memory"); 938 die("No memory");
939 939
940 lat->thread = thread; 940 atoms->thread = thread;
941 INIT_LIST_HEAD(&lat->snapshot_list); 941 INIT_LIST_HEAD(&atoms->snapshot_list);
942 __thread_latency_insert(&lat_snapshot_root, lat); 942 __thread_latency_insert(&lat_snapshot_root, atoms);
943} 943}
944 944
945static void 945static void
@@ -961,28 +961,28 @@ static char sched_out_state(struct trace_switch_event *switch_event)
961} 961}
962 962
963static void 963static void
964lat_sched_out(struct thread_latency *lat, 964lat_sched_out(struct task_atoms *atoms,
965 struct trace_switch_event *switch_event __used, u64 delta) 965 struct trace_switch_event *switch_event __used, u64 delta)
966{ 966{
967 struct lat_snapshot *snapshot; 967 struct work_atom *snapshot;
968 968
969 snapshot = calloc(sizeof(*snapshot), 1); 969 snapshot = calloc(sizeof(*snapshot), 1);
970 if (!snapshot) 970 if (!snapshot)
971 die("Non memory"); 971 die("Non memory");
972 972
973 snapshot->runtime = delta; 973 snapshot->runtime = delta;
974 list_add_tail(&snapshot->list, &lat->snapshot_list); 974 list_add_tail(&snapshot->list, &atoms->snapshot_list);
975} 975}
976 976
977static void 977static void
978lat_sched_in(struct thread_latency *lat, u64 timestamp) 978lat_sched_in(struct task_atoms *atoms, u64 timestamp)
979{ 979{
980 struct lat_snapshot *snapshot; 980 struct work_atom *snapshot;
981 981
982 if (list_empty(&lat->snapshot_list)) 982 if (list_empty(&atoms->snapshot_list))
983 return; 983 return;
984 984
985 snapshot = list_entry(lat->snapshot_list.prev, struct lat_snapshot, 985 snapshot = list_entry(atoms->snapshot_list.prev, struct work_atom,
986 list); 986 list);
987 987
988 if (snapshot->state != THREAD_WAKED_UP) 988 if (snapshot->state != THREAD_WAKED_UP)
@@ -1004,7 +1004,7 @@ latency_switch_event(struct trace_switch_event *switch_event,
1004 u64 timestamp, 1004 u64 timestamp,
1005 struct thread *thread __used) 1005 struct thread *thread __used)
1006{ 1006{
1007 struct thread_latency *out_lat, *in_lat; 1007 struct task_atoms *out_atoms, *in_atoms;
1008 struct thread *sched_out, *sched_in; 1008 struct thread *sched_out, *sched_in;
1009 u64 timestamp0; 1009 u64 timestamp0;
1010 s64 delta; 1010 s64 delta;
@@ -1026,24 +1026,24 @@ latency_switch_event(struct trace_switch_event *switch_event,
1026 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); 1026 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1027 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); 1027 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1028 1028
1029 in_lat = thread_latency_search(&lat_snapshot_root, sched_in); 1029 in_atoms = thread_atom_list_search(&lat_snapshot_root, sched_in);
1030 if (!in_lat) { 1030 if (!in_atoms) {
1031 thread_latency_insert(sched_in); 1031 thread_atom_list_insert(sched_in);
1032 in_lat = thread_latency_search(&lat_snapshot_root, sched_in); 1032 in_atoms = thread_atom_list_search(&lat_snapshot_root, sched_in);
1033 if (!in_lat) 1033 if (!in_atoms)
1034 die("Internal latency tree error"); 1034 die("Internal latency tree error");
1035 } 1035 }
1036 1036
1037 out_lat = thread_latency_search(&lat_snapshot_root, sched_out); 1037 out_atoms = thread_atom_list_search(&lat_snapshot_root, sched_out);
1038 if (!out_lat) { 1038 if (!out_atoms) {
1039 thread_latency_insert(sched_out); 1039 thread_atom_list_insert(sched_out);
1040 out_lat = thread_latency_search(&lat_snapshot_root, sched_out); 1040 out_atoms = thread_atom_list_search(&lat_snapshot_root, sched_out);
1041 if (!out_lat) 1041 if (!out_atoms)
1042 die("Internal latency tree error"); 1042 die("Internal latency tree error");
1043 } 1043 }
1044 1044
1045 lat_sched_in(in_lat, timestamp); 1045 lat_sched_in(in_atoms, timestamp);
1046 lat_sched_out(out_lat, switch_event, delta); 1046 lat_sched_out(out_atoms, switch_event, delta);
1047} 1047}
1048 1048
1049static void 1049static void
@@ -1053,8 +1053,8 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1053 u64 timestamp, 1053 u64 timestamp,
1054 struct thread *thread __used) 1054 struct thread *thread __used)
1055{ 1055{
1056 struct thread_latency *lat; 1056 struct task_atoms *atoms;
1057 struct lat_snapshot *snapshot; 1057 struct work_atom *snapshot;
1058 struct thread *wakee; 1058 struct thread *wakee;
1059 1059
1060 /* Note for later, it may be interesting to observe the failing cases */ 1060 /* Note for later, it may be interesting to observe the failing cases */
@@ -1062,16 +1062,16 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1062 return; 1062 return;
1063 1063
1064 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); 1064 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
1065 lat = thread_latency_search(&lat_snapshot_root, wakee); 1065 atoms = thread_atom_list_search(&lat_snapshot_root, wakee);
1066 if (!lat) { 1066 if (!atoms) {
1067 thread_latency_insert(wakee); 1067 thread_atom_list_insert(wakee);
1068 return; 1068 return;
1069 } 1069 }
1070 1070
1071 if (list_empty(&lat->snapshot_list)) 1071 if (list_empty(&atoms->snapshot_list))
1072 return; 1072 return;
1073 1073
1074 snapshot = list_entry(lat->snapshot_list.prev, struct lat_snapshot, 1074 snapshot = list_entry(atoms->snapshot_list.prev, struct work_atom,
1075 list); 1075 list);
1076 1076
1077 if (snapshot->state != THREAD_SLEEPING) 1077 if (snapshot->state != THREAD_SLEEPING)
@@ -1090,9 +1090,9 @@ static struct trace_sched_handler lat_ops = {
1090static u64 all_runtime; 1090static u64 all_runtime;
1091static u64 all_count; 1091static u64 all_count;
1092 1092
1093static void output_lat_thread(struct thread_latency *lat) 1093static void output_lat_thread(struct task_atoms *atom_list)
1094{ 1094{
1095 struct lat_snapshot *shot; 1095 struct work_atom *atom;
1096 int count = 0; 1096 int count = 0;
1097 int i; 1097 int i;
1098 int ret; 1098 int ret;
@@ -1100,15 +1100,15 @@ static void output_lat_thread(struct thread_latency *lat)
1100 u64 total = 0, delta; 1100 u64 total = 0, delta;
1101 u64 total_runtime = 0; 1101 u64 total_runtime = 0;
1102 1102
1103 list_for_each_entry(shot, &lat->snapshot_list, list) { 1103 list_for_each_entry(atom, &atom_list->snapshot_list, list) {
1104 total_runtime += shot->runtime; 1104 total_runtime += atom->runtime;
1105 1105
1106 if (shot->state != THREAD_SCHED_IN) 1106 if (atom->state != THREAD_SCHED_IN)
1107 continue; 1107 continue;
1108 1108
1109 count++; 1109 count++;
1110 1110
1111 delta = shot->sched_in_time - shot->wake_up_time; 1111 delta = atom->sched_in_time - atom->wake_up_time;
1112 if (delta > max) 1112 if (delta > max)
1113 max = delta; 1113 max = delta;
1114 total += delta; 1114 total += delta;
@@ -1120,7 +1120,7 @@ static void output_lat_thread(struct thread_latency *lat)
1120 if (!count) 1120 if (!count)
1121 return; 1121 return;
1122 1122
1123 ret = printf(" %s ", lat->thread->comm); 1123 ret = printf(" %s ", atom_list->thread->comm);
1124 1124
1125 for (i = 0; i < 19 - ret; i++) 1125 for (i = 0; i < 19 - ret; i++)
1126 printf(" "); 1126 printf(" ");
@@ -1145,10 +1145,10 @@ static void __cmd_lat(void)
1145 next = rb_first(&lat_snapshot_root); 1145 next = rb_first(&lat_snapshot_root);
1146 1146
1147 while (next) { 1147 while (next) {
1148 struct thread_latency *lat; 1148 struct task_atoms *atom_list;
1149 1149
1150 lat = rb_entry(next, struct thread_latency, node); 1150 atom_list = rb_entry(next, struct task_atoms, node);
1151 output_lat_thread(lat); 1151 output_lat_thread(atom_list);
1152 next = rb_next(next); 1152 next = rb_next(next);
1153 } 1153 }
1154 1154