diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-12 07:52:47 -0500 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-22 16:56:29 -0500 |
commit | 72cb7013e08dec29631e0447f9496b7bacd3e14b (patch) | |
tree | a26ccf04710cdc06f03fffafe5a09f4f6503abf4 /tools/perf/builtin-top.c | |
parent | 9d04f1781772e11bd58806391555fc23ebb54377 (diff) |
perf top: Use perf_evsel__open
Now that it handles group_fd and inherit we can use it, sharing it with
stat.
Next step: 'perf record' should use, then move the mmap_array out of
->priv and into perf_evsel, with top and record sharing this, and at the
same time, write a 'perf test' stress test.
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-top.c')
-rw-r--r-- | tools/perf/builtin-top.c | 92 |
1 files changed, 42 insertions, 50 deletions
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1bc465215fc6..15d89bede2fb 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -1210,39 +1210,50 @@ static void perf_session__mmap_read(struct perf_session *self) | |||
1210 | } | 1210 | } |
1211 | } | 1211 | } |
1212 | 1212 | ||
1213 | int group_fd; | ||
1214 | |||
1215 | static void start_counter(int i, struct perf_evlist *evlist, | 1213 | static void start_counter(int i, struct perf_evlist *evlist, |
1216 | struct perf_evsel *evsel) | 1214 | struct perf_evsel *evsel) |
1217 | { | 1215 | { |
1218 | struct xyarray *mmap_array = evsel->priv; | 1216 | struct xyarray *mmap_array = evsel->priv; |
1219 | struct mmap_data *mm; | 1217 | struct mmap_data *mm; |
1220 | struct perf_event_attr *attr; | ||
1221 | int cpu = -1; | ||
1222 | int thread_index; | 1218 | int thread_index; |
1223 | 1219 | ||
1224 | if (target_tid == -1) | 1220 | for (thread_index = 0; thread_index < threads->nr; thread_index++) { |
1225 | cpu = cpus->map[i]; | 1221 | assert(FD(evsel, i, thread_index) >= 0); |
1226 | 1222 | fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK); | |
1227 | attr = &evsel->attr; | ||
1228 | 1223 | ||
1229 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | 1224 | evlist->pollfd[evlist->nr_fds].fd = FD(evsel, i, thread_index); |
1225 | evlist->pollfd[evlist->nr_fds].events = POLLIN; | ||
1226 | evlist->nr_fds++; | ||
1230 | 1227 | ||
1231 | if (freq) { | 1228 | mm = xyarray__entry(mmap_array, i, thread_index); |
1232 | attr->sample_type |= PERF_SAMPLE_PERIOD; | 1229 | mm->prev = 0; |
1233 | attr->freq = 1; | 1230 | mm->mask = mmap_pages*page_size - 1; |
1234 | attr->sample_freq = freq; | 1231 | mm->base = mmap(NULL, (mmap_pages+1)*page_size, |
1232 | PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0); | ||
1233 | if (mm->base == MAP_FAILED) | ||
1234 | die("failed to mmap with %d (%s)\n", errno, strerror(errno)); | ||
1235 | } | 1235 | } |
1236 | } | ||
1237 | |||
1238 | static void start_counters(struct perf_evlist *evlist) | ||
1239 | { | ||
1240 | struct perf_evsel *counter; | ||
1241 | int i; | ||
1236 | 1242 | ||
1237 | attr->inherit = (cpu < 0) && inherit; | 1243 | list_for_each_entry(counter, &evlist->entries, node) { |
1238 | attr->mmap = 1; | 1244 | struct perf_event_attr *attr = &counter->attr; |
1239 | 1245 | ||
1240 | for (thread_index = 0; thread_index < threads->nr; thread_index++) { | 1246 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; |
1241 | try_again: | 1247 | |
1242 | FD(evsel, i, thread_index) = sys_perf_event_open(attr, | 1248 | if (freq) { |
1243 | threads->map[thread_index], cpu, group_fd, 0); | 1249 | attr->sample_type |= PERF_SAMPLE_PERIOD; |
1250 | attr->freq = 1; | ||
1251 | attr->sample_freq = freq; | ||
1252 | } | ||
1244 | 1253 | ||
1245 | if (FD(evsel, i, thread_index) < 0) { | 1254 | attr->mmap = 1; |
1255 | try_again: | ||
1256 | if (perf_evsel__open(counter, cpus, threads, group, inherit) < 0) { | ||
1246 | int err = errno; | 1257 | int err = errno; |
1247 | 1258 | ||
1248 | if (err == EPERM || err == EACCES) | 1259 | if (err == EPERM || err == EACCES) |
@@ -1254,8 +1265,8 @@ try_again: | |||
1254 | * based cpu-clock-tick sw counter, which | 1265 | * based cpu-clock-tick sw counter, which |
1255 | * is always available even if no PMU support: | 1266 | * is always available even if no PMU support: |
1256 | */ | 1267 | */ |
1257 | if (attr->type == PERF_TYPE_HARDWARE | 1268 | if (attr->type == PERF_TYPE_HARDWARE && |
1258 | && attr->config == PERF_COUNT_HW_CPU_CYCLES) { | 1269 | attr->config == PERF_COUNT_HW_CPU_CYCLES) { |
1259 | 1270 | ||
1260 | if (verbose) | 1271 | if (verbose) |
1261 | warning(" ... trying to fall back to cpu-clock-ticks\n"); | 1272 | warning(" ... trying to fall back to cpu-clock-ticks\n"); |
@@ -1265,39 +1276,24 @@ try_again: | |||
1265 | goto try_again; | 1276 | goto try_again; |
1266 | } | 1277 | } |
1267 | printf("\n"); | 1278 | printf("\n"); |
1268 | error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", | 1279 | error("sys_perf_event_open() syscall returned with %d " |
1269 | FD(evsel, i, thread_index), strerror(err)); | 1280 | "(%s). /bin/dmesg may provide additional information.\n", |
1281 | err, strerror(err)); | ||
1270 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); | 1282 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); |
1271 | exit(-1); | 1283 | exit(-1); |
1272 | } | 1284 | } |
1273 | assert(FD(evsel, i, thread_index) >= 0); | 1285 | } |
1274 | fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK); | ||
1275 | |||
1276 | /* | ||
1277 | * First counter acts as the group leader: | ||
1278 | */ | ||
1279 | if (group && group_fd == -1) | ||
1280 | group_fd = FD(evsel, i, thread_index); | ||
1281 | |||
1282 | evlist->pollfd[evlist->nr_fds].fd = FD(evsel, i, thread_index); | ||
1283 | evlist->pollfd[evlist->nr_fds].events = POLLIN; | ||
1284 | evlist->nr_fds++; | ||
1285 | 1286 | ||
1286 | mm = xyarray__entry(mmap_array, i, thread_index); | 1287 | for (i = 0; i < cpus->nr; i++) { |
1287 | mm->prev = 0; | 1288 | list_for_each_entry(counter, &evlist->entries, node) |
1288 | mm->mask = mmap_pages*page_size - 1; | 1289 | start_counter(i, evsel_list, counter); |
1289 | mm->base = mmap(NULL, (mmap_pages+1)*page_size, | ||
1290 | PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0); | ||
1291 | if (mm->base == MAP_FAILED) | ||
1292 | die("failed to mmap with %d (%s)\n", errno, strerror(errno)); | ||
1293 | } | 1290 | } |
1294 | } | 1291 | } |
1295 | 1292 | ||
1296 | static int __cmd_top(void) | 1293 | static int __cmd_top(void) |
1297 | { | 1294 | { |
1298 | pthread_t thread; | 1295 | pthread_t thread; |
1299 | struct perf_evsel *counter; | 1296 | int ret; |
1300 | int i, ret; | ||
1301 | /* | 1297 | /* |
1302 | * FIXME: perf_session__new should allow passing a O_MMAP, so that all this | 1298 | * FIXME: perf_session__new should allow passing a O_MMAP, so that all this |
1303 | * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. | 1299 | * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. |
@@ -1311,11 +1307,7 @@ static int __cmd_top(void) | |||
1311 | else | 1307 | else |
1312 | event__synthesize_threads(event__process, session); | 1308 | event__synthesize_threads(event__process, session); |
1313 | 1309 | ||
1314 | for (i = 0; i < cpus->nr; i++) { | 1310 | start_counters(evsel_list); |
1315 | group_fd = -1; | ||
1316 | list_for_each_entry(counter, &evsel_list->entries, node) | ||
1317 | start_counter(i, evsel_list, counter); | ||
1318 | } | ||
1319 | 1311 | ||
1320 | /* Wait for a minimal set of events before starting the snapshot */ | 1312 | /* Wait for a minimal set of events before starting the snapshot */ |
1321 | poll(evsel_list->pollfd, evsel_list->nr_fds, 100); | 1313 | poll(evsel_list->pollfd, evsel_list->nr_fds, 100); |