aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-11 06:12:54 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-13 04:22:50 -0400
commitb5fae128e41021889777f8ead810cbd2a8b249fc (patch)
tree4fb7885dc9d9232c6c8fb4f45b95dfedcdbac175 /tools
parentb1ffe8f3e0c96f5527a89e24410d6b0e59b3554a (diff)
perf sched: Clean up PID sorting logic
Use a sort list for thread atoms insertion as well - instead of hardcoded for PID. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/builtin-sched.c88
-rw-r--r--tools/perf/util/thread.h8
2 files changed, 51 insertions, 45 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index cc2dbd5b50eb..b72544f2b964 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -144,7 +144,7 @@ struct task_atoms {
144 u64 total_runtime; 144 u64 total_runtime;
145}; 145};
146 146
147typedef int (*sort_thread_lat)(struct task_atoms *, struct task_atoms *); 147typedef int (*sort_fn_t)(struct task_atoms *, struct task_atoms *);
148 148
149static struct rb_root atom_root, sorted_atom_root; 149static struct rb_root atom_root, sorted_atom_root;
150 150
@@ -869,41 +869,22 @@ static struct trace_sched_handler replay_ops = {
869 .fork_event = replay_fork_event, 869 .fork_event = replay_fork_event,
870}; 870};
871 871
872static struct task_atoms *
873thread_atoms_search(struct rb_root *root, struct thread *thread)
874{
875 struct rb_node *node = root->rb_node;
876
877 while (node) {
878 struct task_atoms *atoms;
879
880 atoms = container_of(node, struct task_atoms, node);
881 if (thread->pid > atoms->thread->pid)
882 node = node->rb_left;
883 else if (thread->pid < atoms->thread->pid)
884 node = node->rb_right;
885 else {
886 return atoms;
887 }
888 }
889 return NULL;
890}
891
892struct sort_dimension { 872struct sort_dimension {
893 const char *name; 873 const char *name;
894 sort_thread_lat cmp; 874 sort_fn_t cmp;
895 struct list_head list; 875 struct list_head list;
896}; 876};
897 877
898static LIST_HEAD(cmp_pid); 878static LIST_HEAD(cmp_pid);
899 879
900static int 880static int
901thread_lat_cmp(struct list_head *list, struct task_atoms *l, 881thread_lat_cmp(struct list_head *list, struct task_atoms *l, struct task_atoms *r)
902 struct task_atoms *r)
903{ 882{
904 struct sort_dimension *sort; 883 struct sort_dimension *sort;
905 int ret = 0; 884 int ret = 0;
906 885
886 BUG_ON(list_empty(list));
887
907 list_for_each_entry(sort, list, list) { 888 list_for_each_entry(sort, list, list) {
908 ret = sort->cmp(l, r); 889 ret = sort->cmp(l, r);
909 if (ret) 890 if (ret)
@@ -913,6 +894,32 @@ thread_lat_cmp(struct list_head *list, struct task_atoms *l,
913 return ret; 894 return ret;
914} 895}
915 896
897static struct task_atoms *
898thread_atoms_search(struct rb_root *root, struct thread *thread,
899 struct list_head *sort_list)
900{
901 struct rb_node *node = root->rb_node;
902 struct task_atoms key = { .thread = thread };
903
904 while (node) {
905 struct task_atoms *atoms;
906 int cmp;
907
908 atoms = container_of(node, struct task_atoms, node);
909
910 cmp = thread_lat_cmp(sort_list, &key, atoms);
911 if (cmp > 0)
912 node = node->rb_left;
913 else if (cmp < 0)
914 node = node->rb_right;
915 else {
916 BUG_ON(thread != atoms->thread);
917 return atoms;
918 }
919 }
920 return NULL;
921}
922
916static void 923static void
917__thread_latency_insert(struct rb_root *root, struct task_atoms *data, 924__thread_latency_insert(struct rb_root *root, struct task_atoms *data,
918 struct list_head *sort_list) 925 struct list_head *sort_list)
@@ -1049,18 +1056,18 @@ latency_switch_event(struct trace_switch_event *switch_event,
1049 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); 1056 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1050 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); 1057 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1051 1058
1052 in_atoms = thread_atoms_search(&atom_root, sched_in); 1059 in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1053 if (!in_atoms) { 1060 if (!in_atoms) {
1054 thread_atoms_insert(sched_in); 1061 thread_atoms_insert(sched_in);
1055 in_atoms = thread_atoms_search(&atom_root, sched_in); 1062 in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1056 if (!in_atoms) 1063 if (!in_atoms)
1057 die("in-atom: Internal tree error"); 1064 die("in-atom: Internal tree error");
1058 } 1065 }
1059 1066
1060 out_atoms = thread_atoms_search(&atom_root, sched_out); 1067 out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1061 if (!out_atoms) { 1068 if (!out_atoms) {
1062 thread_atoms_insert(sched_out); 1069 thread_atoms_insert(sched_out);
1063 out_atoms = thread_atoms_search(&atom_root, sched_out); 1070 out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1064 if (!out_atoms) 1071 if (!out_atoms)
1065 die("out-atom: Internal tree error"); 1072 die("out-atom: Internal tree error");
1066 } 1073 }
@@ -1085,7 +1092,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1085 return; 1092 return;
1086 1093
1087 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); 1094 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
1088 atoms = thread_atoms_search(&atom_root, wakee); 1095 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1089 if (!atoms) { 1096 if (!atoms) {
1090 thread_atoms_insert(wakee); 1097 thread_atoms_insert(wakee);
1091 return; 1098 return;
@@ -1136,7 +1143,6 @@ static void output_lat_thread(struct task_atoms *atom_list)
1136 1143
1137static int pid_cmp(struct task_atoms *l, struct task_atoms *r) 1144static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
1138{ 1145{
1139
1140 if (l->thread->pid < r->thread->pid) 1146 if (l->thread->pid < r->thread->pid)
1141 return -1; 1147 return -1;
1142 if (l->thread->pid > r->thread->pid) 1148 if (l->thread->pid > r->thread->pid)
@@ -1146,8 +1152,8 @@ static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
1146} 1152}
1147 1153
1148static struct sort_dimension pid_sort_dimension = { 1154static struct sort_dimension pid_sort_dimension = {
1149 .name = "pid", 1155 .name = "pid",
1150 .cmp = pid_cmp, 1156 .cmp = pid_cmp,
1151}; 1157};
1152 1158
1153static int avg_cmp(struct task_atoms *l, struct task_atoms *r) 1159static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1172,8 +1178,8 @@ static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
1172} 1178}
1173 1179
1174static struct sort_dimension avg_sort_dimension = { 1180static struct sort_dimension avg_sort_dimension = {
1175 .name = "avg", 1181 .name = "avg",
1176 .cmp = avg_cmp, 1182 .cmp = avg_cmp,
1177}; 1183};
1178 1184
1179static int max_cmp(struct task_atoms *l, struct task_atoms *r) 1185static int max_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1187,8 +1193,8 @@ static int max_cmp(struct task_atoms *l, struct task_atoms *r)
1187} 1193}
1188 1194
1189static struct sort_dimension max_sort_dimension = { 1195static struct sort_dimension max_sort_dimension = {
1190 .name = "max", 1196 .name = "max",
1191 .cmp = max_cmp, 1197 .cmp = max_cmp,
1192}; 1198};
1193 1199
1194static int switch_cmp(struct task_atoms *l, struct task_atoms *r) 1200static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1202,8 +1208,8 @@ static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
1202} 1208}
1203 1209
1204static struct sort_dimension switch_sort_dimension = { 1210static struct sort_dimension switch_sort_dimension = {
1205 .name = "switch", 1211 .name = "switch",
1206 .cmp = switch_cmp, 1212 .cmp = switch_cmp,
1207}; 1213};
1208 1214
1209static int runtime_cmp(struct task_atoms *l, struct task_atoms *r) 1215static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1217,8 +1223,8 @@ static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
1217} 1223}
1218 1224
1219static struct sort_dimension runtime_sort_dimension = { 1225static struct sort_dimension runtime_sort_dimension = {
1220 .name = "runtime", 1226 .name = "runtime",
1221 .cmp = runtime_cmp, 1227 .cmp = runtime_cmp,
1222}; 1228};
1223 1229
1224static struct sort_dimension *available_sorts[] = { 1230static struct sort_dimension *available_sorts[] = {
@@ -1666,8 +1672,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
1666 argc = parse_options(argc, argv, latency_options, latency_usage, 0); 1672 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1667 if (argc) 1673 if (argc)
1668 usage_with_options(latency_usage, latency_options); 1674 usage_with_options(latency_usage, latency_options);
1669 setup_sorting();
1670 } 1675 }
1676 setup_sorting();
1671 __cmd_lat(); 1677 __cmd_lat();
1672 } else if (!strncmp(argv[0], "rep", 3)) { 1678 } else if (!strncmp(argv[0], "rep", 3)) {
1673 trace_handler = &replay_ops; 1679 trace_handler = &replay_ops;
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 634f2809a342..665d1f3dc977 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -4,10 +4,10 @@
4#include "symbol.h" 4#include "symbol.h"
5 5
6struct thread { 6struct thread {
7 struct rb_node rb_node; 7 struct rb_node rb_node;
8 struct list_head maps; 8 struct list_head maps;
9 pid_t pid; 9 pid_t pid;
10 char *comm; 10 char *comm;
11}; 11};
12 12
13int thread__set_comm(struct thread *self, const char *comm); 13int thread__set_comm(struct thread *self, const char *comm);