aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-top.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2011-01-30 08:59:43 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2011-01-31 09:40:52 -0500
commit7e2ed097538c57ff5268e9a6bced7c0b885809c8 (patch)
tree44f9998cc6054d5bef07d6c2979afb0e81ddf13c /tools/perf/builtin-top.c
parentf8a9530939ed87b9a1b1a038b90e355098b679a2 (diff)
perf evlist: Store pointer to the cpu and thread maps
So that we don't have to pass it around to the several methods that needs it, simplifying usage. There is one case where we don't have the thread/cpu map in advance, which is in the parsing routines used by top, stat, record, that we have to wait till all options are parsed to know if a cpu or thread list was passed to then create those maps. For that case consolidate the cpu and thread map creation via perf_evlist__create_maps() out of the code in top and record, while also providing a perf_evlist__set_maps() for cases where multiple evlists share maps or for when maps that represent CPU sockets, for instance, get crafted out of topology information or subsets of threads in a particular application are to be monitored, providing more granularity in specifying which cpus and threads to monitor. Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-top.c')
-rw-r--r--tools/perf/builtin-top.c47
1 files changed, 19 insertions, 28 deletions
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 2f4d1f244be1..599036b06730 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -73,9 +73,7 @@ static int print_entries;
73 73
74static int target_pid = -1; 74static int target_pid = -1;
75static int target_tid = -1; 75static int target_tid = -1;
76static struct thread_map *threads;
77static bool inherit = false; 76static bool inherit = false;
78static struct cpu_map *cpus;
79static int realtime_prio = 0; 77static int realtime_prio = 0;
80static bool group = false; 78static bool group = false;
81static unsigned int page_size; 79static unsigned int page_size;
@@ -567,12 +565,13 @@ static void print_sym_table(struct perf_session *session)
567 printf(" (all"); 565 printf(" (all");
568 566
569 if (cpu_list) 567 if (cpu_list)
570 printf(", CPU%s: %s)\n", cpus->nr > 1 ? "s" : "", cpu_list); 568 printf(", CPU%s: %s)\n", evsel_list->cpus->nr > 1 ? "s" : "", cpu_list);
571 else { 569 else {
572 if (target_tid != -1) 570 if (target_tid != -1)
573 printf(")\n"); 571 printf(")\n");
574 else 572 else
575 printf(", %d CPU%s)\n", cpus->nr, cpus->nr > 1 ? "s" : ""); 573 printf(", %d CPU%s)\n", evsel_list->cpus->nr,
574 evsel_list->cpus->nr > 1 ? "s" : "");
576 } 575 }
577 576
578 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 577 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -1124,7 +1123,7 @@ static void perf_session__mmap_read(struct perf_session *self)
1124{ 1123{
1125 int i; 1124 int i;
1126 1125
1127 for (i = 0; i < cpus->nr; i++) 1126 for (i = 0; i < evsel_list->cpus->nr; i++)
1128 perf_session__mmap_read_cpu(self, i); 1127 perf_session__mmap_read_cpu(self, i);
1129} 1128}
1130 1129
@@ -1150,7 +1149,8 @@ static void start_counters(struct perf_evlist *evlist)
1150 1149
1151 attr->mmap = 1; 1150 attr->mmap = 1;
1152try_again: 1151try_again:
1153 if (perf_evsel__open(counter, cpus, threads, group, inherit) < 0) { 1152 if (perf_evsel__open(counter, evsel_list->cpus,
1153 evsel_list->threads, group, inherit) < 0) {
1154 int err = errno; 1154 int err = errno;
1155 1155
1156 if (err == EPERM || err == EACCES) 1156 if (err == EPERM || err == EACCES)
@@ -1181,7 +1181,7 @@ try_again:
1181 } 1181 }
1182 } 1182 }
1183 1183
1184 if (perf_evlist__mmap(evlist, cpus, threads, mmap_pages, false) < 0) 1184 if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
1185 die("failed to mmap with %d (%s)\n", errno, strerror(errno)); 1185 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
1186} 1186}
1187 1187
@@ -1296,7 +1296,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1296 struct perf_evsel *pos; 1296 struct perf_evsel *pos;
1297 int status = -ENOMEM; 1297 int status = -ENOMEM;
1298 1298
1299 evsel_list = perf_evlist__new(); 1299 evsel_list = perf_evlist__new(NULL, NULL);
1300 if (evsel_list == NULL) 1300 if (evsel_list == NULL)
1301 return -ENOMEM; 1301 return -ENOMEM;
1302 1302
@@ -1306,15 +1306,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1306 if (argc) 1306 if (argc)
1307 usage_with_options(top_usage, options); 1307 usage_with_options(top_usage, options);
1308 1308
1309 if (target_pid != -1)
1310 target_tid = target_pid;
1311
1312 threads = thread_map__new(target_pid, target_tid);
1313 if (threads == NULL) {
1314 pr_err("Problems finding threads of monitor\n");
1315 usage_with_options(top_usage, options);
1316 }
1317
1318 /* CPU and PID are mutually exclusive */ 1309 /* CPU and PID are mutually exclusive */
1319 if (target_tid > 0 && cpu_list) { 1310 if (target_tid > 0 && cpu_list) {
1320 printf("WARNING: PID switch overriding CPU\n"); 1311 printf("WARNING: PID switch overriding CPU\n");
@@ -1322,6 +1313,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1322 cpu_list = NULL; 1313 cpu_list = NULL;
1323 } 1314 }
1324 1315
1316 if (target_pid != -1)
1317 target_tid = target_pid;
1318
1319 if (perf_evlist__create_maps(evsel_list, target_pid,
1320 target_tid, cpu_list) < 0)
1321 usage_with_options(top_usage, options);
1322
1325 if (!evsel_list->nr_entries && 1323 if (!evsel_list->nr_entries &&
1326 perf_evlist__add_default(evsel_list) < 0) { 1324 perf_evlist__add_default(evsel_list) < 0) {
1327 pr_err("Not enough memory for event selector list\n"); 1325 pr_err("Not enough memory for event selector list\n");
@@ -1343,16 +1341,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1343 exit(EXIT_FAILURE); 1341 exit(EXIT_FAILURE);
1344 } 1342 }
1345 1343
1346 if (target_tid != -1)
1347 cpus = cpu_map__dummy_new();
1348 else
1349 cpus = cpu_map__new(cpu_list);
1350
1351 if (cpus == NULL)
1352 usage_with_options(top_usage, options);
1353
1354 list_for_each_entry(pos, &evsel_list->entries, node) { 1344 list_for_each_entry(pos, &evsel_list->entries, node) {
1355 if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0) 1345 if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
1346 evsel_list->threads->nr) < 0)
1356 goto out_free_fd; 1347 goto out_free_fd;
1357 /* 1348 /*
1358 * Fill in the ones not specifically initialized via -c: 1349 * Fill in the ones not specifically initialized via -c:
@@ -1363,8 +1354,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1363 pos->attr.sample_period = default_interval; 1354 pos->attr.sample_period = default_interval;
1364 } 1355 }
1365 1356
1366 if (perf_evlist__alloc_pollfd(evsel_list, cpus->nr, threads->nr) < 0 || 1357 if (perf_evlist__alloc_pollfd(evsel_list) < 0 ||
1367 perf_evlist__alloc_mmap(evsel_list, cpus->nr) < 0) 1358 perf_evlist__alloc_mmap(evsel_list) < 0)
1368 goto out_free_fd; 1359 goto out_free_fd;
1369 1360
1370 sym_evsel = list_entry(evsel_list->entries.next, struct perf_evsel, node); 1361 sym_evsel = list_entry(evsel_list->entries.next, struct perf_evsel, node);