diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-05-15 08:39:00 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-05-15 09:02:14 -0400 |
commit | aece948f5ddd70d70df2f35855c706ef9a4f62e2 (patch) | |
tree | ea2611cea32c492d7b3f4f49ec26df05132d5607 | |
parent | b90194181988063266f3da0b7bf3e57268c627c8 (diff) |
perf evlist: Fix per thread mmap setup
The PERF_EVENT_IOC_SET_OUTPUT ioctl was returning -EINVAL when using
--pid when monitoring multithreaded apps, as we can only share a ring
buffer for events on the same thread if not doing per cpu.
Fix it by using per thread ring buffers.
Tested with:
[root@felicio ~]# tuna -t 26131 -CP | nl
1 thread ctxt_switches
2 pid SCHED_ rtpri affinity voluntary nonvoluntary cmd
3 26131 OTHER 0 0,1 10814276 2397830 chromium-browse
4 642 OTHER 0 0,1 14688 0 chromium-browse
5 26148 OTHER 0 0,1 713602 115479 chromium-browse
6 26149 OTHER 0 0,1 801958 2262 chromium-browse
7 26150 OTHER 0 0,1 1271128 248 chromium-browse
8 26151 OTHER 0 0,1 3 0 chromium-browse
9 27049 OTHER 0 0,1 36796 9 chromium-browse
10 618 OTHER 0 0,1 14711 0 chromium-browse
11 661 OTHER 0 0,1 14593 0 chromium-browse
12 29048 OTHER 0 0,1 28125 0 chromium-browse
13 26143 OTHER 0 0,1 2202789 781 chromium-browse
[root@felicio ~]#
So 11 threads under pid 26131, then:
[root@felicio ~]# perf record -F 50000 --pid 26131
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fa4a2538000-7fa4a25b9000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fa4a25b9000-7fa4a263a000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
3 7fa4a263a000-7fa4a26bb000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
4 7fa4a26bb000-7fa4a273c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
5 7fa4a273c000-7fa4a27bd000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
6 7fa4a27bd000-7fa4a283e000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
7 7fa4a283e000-7fa4a28bf000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
8 7fa4a28bf000-7fa4a2940000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
9 7fa4a2940000-7fa4a29c1000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
10 7fa4a29c1000-7fa4a2a42000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
11 7fa4a2a42000-7fa4a2ac3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
11 mmaps, one per thread since we didn't specify any CPU list, so we need one
mmap per thread and:
[root@felicio ~]# perf record -F 50000 --pid 26131
^M
^C[ perf record: Woken up 79 times to write data ]
[ perf record: Captured and wrote 20.614 MB perf.data (~900639 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 371310 26131
2 96516 26148
3 95694 26149
4 95203 26150
5 7291 26143
6 87 27049
7 76 661
8 60 29048
9 47 618
10 43 642
[root@felicio ~]#
Ok, one of the threads, 26151 was quiescent, so no samples there, but all the
others are there.
Then, if I specify one CPU:
[root@felicio ~]# perf record -F 50000 --pid 26131 --cpu 1
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.680 MB perf.data (~29730 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 8444 26131
2 2584 26149
3 2518 26148
4 2324 26150
5 123 26143
6 9 661
7 9 29048
[root@felicio ~]#
This machine has two cores, so fewer threads appeared on the radar, and:
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f484b922000-7f484b9a3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Just one mmap, as now we can use just one per-cpu buffer instead of the
per-thread needed in the previous case.
For global profiling:
[root@felicio ~]# perf record -F 50000 -a
^C[ perf record: Woken up 26 times to write data ]
[ perf record: Captured and wrote 7.128 MB perf.data (~311412 samples) ]
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fb49b435000-7fb49b4b6000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fb49b4b6000-7fb49b537000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
It uses per-cpu buffers.
For just one thread:
[root@felicio ~]# perf record -F 50000 --tid 26148
^C[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.330 MB perf.data (~14426 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 9969 26148
[root@felicio ~]#
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f286a51b000-7f286a59c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Tested-by: David Ahern <dsahern@gmail.com>
Tested-by: Lin Ming <ming.m.lin@intel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Link: http://lkml.kernel.org/r/20110426204401.GB1746@ghostprotocols.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r-- | tools/perf/builtin-record.c | 2 | ||||
-rw-r--r-- | tools/perf/builtin-test.c | 2 | ||||
-rw-r--r-- | tools/perf/builtin-top.c | 8 | ||||
-rw-r--r-- | tools/perf/util/evlist.c | 151 | ||||
-rw-r--r-- | tools/perf/util/evlist.h | 3 | ||||
-rw-r--r-- | tools/perf/util/python.c | 2 |
6 files changed, 115 insertions, 53 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 416538248a4b..0974f957b8fa 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -427,7 +427,7 @@ static void mmap_read_all(void) | |||
427 | { | 427 | { |
428 | int i; | 428 | int i; |
429 | 429 | ||
430 | for (i = 0; i < evsel_list->cpus->nr; i++) { | 430 | for (i = 0; i < evsel_list->nr_mmaps; i++) { |
431 | if (evsel_list->mmap[i].base) | 431 | if (evsel_list->mmap[i].base) |
432 | mmap_read(&evsel_list->mmap[i]); | 432 | mmap_read(&evsel_list->mmap[i]); |
433 | } | 433 | } |
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 11e3c8458362..2f9a337b182f 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -549,7 +549,7 @@ static int test__basic_mmap(void) | |||
549 | ++foo; | 549 | ++foo; |
550 | } | 550 | } |
551 | 551 | ||
552 | while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { | 552 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { |
553 | struct perf_sample sample; | 553 | struct perf_sample sample; |
554 | 554 | ||
555 | if (event->header.type != PERF_RECORD_SAMPLE) { | 555 | if (event->header.type != PERF_RECORD_SAMPLE) { |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7e3d6e310bf8..ebfc7cf5f63b 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event, | |||
801 | } | 801 | } |
802 | } | 802 | } |
803 | 803 | ||
804 | static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) | 804 | static void perf_session__mmap_read_idx(struct perf_session *self, int idx) |
805 | { | 805 | { |
806 | struct perf_sample sample; | 806 | struct perf_sample sample; |
807 | union perf_event *event; | 807 | union perf_event *event; |
808 | 808 | ||
809 | while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { | 809 | while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { |
810 | perf_session__parse_sample(self, event, &sample); | 810 | perf_session__parse_sample(self, event, &sample); |
811 | 811 | ||
812 | if (event->header.type == PERF_RECORD_SAMPLE) | 812 | if (event->header.type == PERF_RECORD_SAMPLE) |
@@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self) | |||
820 | { | 820 | { |
821 | int i; | 821 | int i; |
822 | 822 | ||
823 | for (i = 0; i < top.evlist->cpus->nr; i++) | 823 | for (i = 0; i < top.evlist->nr_mmaps; i++) |
824 | perf_session__mmap_read_cpu(self, i); | 824 | perf_session__mmap_read_idx(self, i); |
825 | } | 825 | } |
826 | 826 | ||
827 | static void start_counters(struct perf_evlist *evlist) | 827 | static void start_counters(struct perf_evlist *evlist) |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 1884a7c7eb8f..23eb22b05d27 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -166,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |||
166 | return NULL; | 166 | return NULL; |
167 | } | 167 | } |
168 | 168 | ||
169 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) | 169 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) |
170 | { | 170 | { |
171 | /* XXX Move this to perf.c, making it generally available */ | 171 | /* XXX Move this to perf.c, making it generally available */ |
172 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | 172 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); |
173 | struct perf_mmap *md = &evlist->mmap[cpu]; | 173 | struct perf_mmap *md = &evlist->mmap[idx]; |
174 | unsigned int head = perf_mmap__read_head(md); | 174 | unsigned int head = perf_mmap__read_head(md); |
175 | unsigned int old = md->prev; | 175 | unsigned int old = md->prev; |
176 | unsigned char *data = md->base + page_size; | 176 | unsigned char *data = md->base + page_size; |
@@ -235,31 +235,37 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) | |||
235 | 235 | ||
236 | void perf_evlist__munmap(struct perf_evlist *evlist) | 236 | void perf_evlist__munmap(struct perf_evlist *evlist) |
237 | { | 237 | { |
238 | int cpu; | 238 | int i; |
239 | 239 | ||
240 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 240 | for (i = 0; i < evlist->nr_mmaps; i++) { |
241 | if (evlist->mmap[cpu].base != NULL) { | 241 | if (evlist->mmap[i].base != NULL) { |
242 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | 242 | munmap(evlist->mmap[i].base, evlist->mmap_len); |
243 | evlist->mmap[cpu].base = NULL; | 243 | evlist->mmap[i].base = NULL; |
244 | } | 244 | } |
245 | } | 245 | } |
246 | |||
247 | free(evlist->mmap); | ||
248 | evlist->mmap = NULL; | ||
246 | } | 249 | } |
247 | 250 | ||
248 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) | 251 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
249 | { | 252 | { |
250 | evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); | 253 | evlist->nr_mmaps = evlist->cpus->nr; |
254 | if (evlist->cpus->map[0] == -1) | ||
255 | evlist->nr_mmaps = evlist->threads->nr; | ||
256 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); | ||
251 | return evlist->mmap != NULL ? 0 : -ENOMEM; | 257 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
252 | } | 258 | } |
253 | 259 | ||
254 | static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, | 260 | static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, |
255 | int cpu, int prot, int mask, int fd) | 261 | int idx, int prot, int mask, int fd) |
256 | { | 262 | { |
257 | evlist->mmap[cpu].prev = 0; | 263 | evlist->mmap[idx].prev = 0; |
258 | evlist->mmap[cpu].mask = mask; | 264 | evlist->mmap[idx].mask = mask; |
259 | evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, | 265 | evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, |
260 | MAP_SHARED, fd, 0); | 266 | MAP_SHARED, fd, 0); |
261 | if (evlist->mmap[cpu].base == MAP_FAILED) { | 267 | if (evlist->mmap[idx].base == MAP_FAILED) { |
262 | if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit) | 268 | if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit) |
263 | ui__warning("Inherit is not allowed on per-task " | 269 | ui__warning("Inherit is not allowed on per-task " |
264 | "events using mmap.\n"); | 270 | "events using mmap.\n"); |
265 | return -1; | 271 | return -1; |
@@ -269,6 +275,86 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev | |||
269 | return 0; | 275 | return 0; |
270 | } | 276 | } |
271 | 277 | ||
278 | static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) | ||
279 | { | ||
280 | struct perf_evsel *evsel; | ||
281 | int cpu, thread; | ||
282 | |||
283 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
284 | int output = -1; | ||
285 | |||
286 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
287 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
288 | int fd = FD(evsel, cpu, thread); | ||
289 | |||
290 | if (output == -1) { | ||
291 | output = fd; | ||
292 | if (__perf_evlist__mmap(evlist, evsel, cpu, | ||
293 | prot, mask, output) < 0) | ||
294 | goto out_unmap; | ||
295 | } else { | ||
296 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | ||
297 | goto out_unmap; | ||
298 | } | ||
299 | |||
300 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
301 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | ||
302 | goto out_unmap; | ||
303 | } | ||
304 | } | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | |||
309 | out_unmap: | ||
310 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
311 | if (evlist->mmap[cpu].base != NULL) { | ||
312 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
313 | evlist->mmap[cpu].base = NULL; | ||
314 | } | ||
315 | } | ||
316 | return -1; | ||
317 | } | ||
318 | |||
319 | static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) | ||
320 | { | ||
321 | struct perf_evsel *evsel; | ||
322 | int thread; | ||
323 | |||
324 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
325 | int output = -1; | ||
326 | |||
327 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
328 | int fd = FD(evsel, 0, thread); | ||
329 | |||
330 | if (output == -1) { | ||
331 | output = fd; | ||
332 | if (__perf_evlist__mmap(evlist, evsel, thread, | ||
333 | prot, mask, output) < 0) | ||
334 | goto out_unmap; | ||
335 | } else { | ||
336 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | ||
337 | goto out_unmap; | ||
338 | } | ||
339 | |||
340 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
341 | perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) | ||
342 | goto out_unmap; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | return 0; | ||
347 | |||
348 | out_unmap: | ||
349 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
350 | if (evlist->mmap[thread].base != NULL) { | ||
351 | munmap(evlist->mmap[thread].base, evlist->mmap_len); | ||
352 | evlist->mmap[thread].base = NULL; | ||
353 | } | ||
354 | } | ||
355 | return -1; | ||
356 | } | ||
357 | |||
272 | /** perf_evlist__mmap - Create per cpu maps to receive events | 358 | /** perf_evlist__mmap - Create per cpu maps to receive events |
273 | * | 359 | * |
274 | * @evlist - list of events | 360 | * @evlist - list of events |
@@ -287,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev | |||
287 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) | 373 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) |
288 | { | 374 | { |
289 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | 375 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); |
290 | int mask = pages * page_size - 1, cpu; | 376 | int mask = pages * page_size - 1; |
291 | struct perf_evsel *first_evsel, *evsel; | 377 | struct perf_evsel *evsel; |
292 | const struct cpu_map *cpus = evlist->cpus; | 378 | const struct cpu_map *cpus = evlist->cpus; |
293 | const struct thread_map *threads = evlist->threads; | 379 | const struct thread_map *threads = evlist->threads; |
294 | int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); | 380 | int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); |
295 | 381 | ||
296 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) | 382 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) |
297 | return -ENOMEM; | 383 | return -ENOMEM; |
@@ -301,43 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) | |||
301 | 387 | ||
302 | evlist->overwrite = overwrite; | 388 | evlist->overwrite = overwrite; |
303 | evlist->mmap_len = (pages + 1) * page_size; | 389 | evlist->mmap_len = (pages + 1) * page_size; |
304 | first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
305 | 390 | ||
306 | list_for_each_entry(evsel, &evlist->entries, node) { | 391 | list_for_each_entry(evsel, &evlist->entries, node) { |
307 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | 392 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && |
308 | evsel->sample_id == NULL && | 393 | evsel->sample_id == NULL && |
309 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | 394 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) |
310 | return -ENOMEM; | 395 | return -ENOMEM; |
311 | |||
312 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
313 | for (thread = 0; thread < threads->nr; thread++) { | ||
314 | int fd = FD(evsel, cpu, thread); | ||
315 | |||
316 | if (evsel->idx || thread) { | ||
317 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, | ||
318 | FD(first_evsel, cpu, 0)) != 0) | ||
319 | goto out_unmap; | ||
320 | } else if (__perf_evlist__mmap(evlist, evsel, cpu, | ||
321 | prot, mask, fd) < 0) | ||
322 | goto out_unmap; | ||
323 | |||
324 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
325 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | ||
326 | goto out_unmap; | ||
327 | } | ||
328 | } | ||
329 | } | 396 | } |
330 | 397 | ||
331 | return 0; | 398 | if (evlist->cpus->map[0] == -1) |
399 | return perf_evlist__mmap_per_thread(evlist, prot, mask); | ||
332 | 400 | ||
333 | out_unmap: | 401 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); |
334 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
335 | if (evlist->mmap[cpu].base != NULL) { | ||
336 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
337 | evlist->mmap[cpu].base = NULL; | ||
338 | } | ||
339 | } | ||
340 | return -1; | ||
341 | } | 402 | } |
342 | 403 | ||
343 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | 404 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 8b1cb7a4c5f1..7109d7add14e 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -17,6 +17,7 @@ struct perf_evlist { | |||
17 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; | 17 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; |
18 | int nr_entries; | 18 | int nr_entries; |
19 | int nr_fds; | 19 | int nr_fds; |
20 | int nr_mmaps; | ||
20 | int mmap_len; | 21 | int mmap_len; |
21 | bool overwrite; | 22 | bool overwrite; |
22 | union perf_event event_copy; | 23 | union perf_event event_copy; |
@@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); | |||
46 | 47 | ||
47 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); | 48 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); |
48 | 49 | ||
49 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); | 50 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); |
50 | 51 | ||
51 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist); | 52 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist); |
52 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); | 53 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index f5e38451fdc5..99c722672f84 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -680,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, | |||
680 | &cpu, &sample_id_all)) | 680 | &cpu, &sample_id_all)) |
681 | return NULL; | 681 | return NULL; |
682 | 682 | ||
683 | event = perf_evlist__read_on_cpu(evlist, cpu); | 683 | event = perf_evlist__mmap_read(evlist, cpu); |
684 | if (event != NULL) { | 684 | if (event != NULL) { |
685 | struct perf_evsel *first; | 685 | struct perf_evsel *first; |
686 | PyObject *pyevent = pyrf_event__new(event); | 686 | PyObject *pyevent = pyrf_event__new(event); |