diff options
author | Jiri Olsa <jolsa@kernel.org> | 2019-07-27 16:27:55 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2019-09-25 08:51:45 -0400 |
commit | 4fd0cef2c7b6469abfeef1f9bd056265ce369b13 (patch) | |
tree | 3ad0ec5b6f1c5bf19a4e728d93b7f40cb5f786cb /tools | |
parent | 547740f7b357cd91cca1fab5d7bf3a37469f7587 (diff) |
libperf: Add 'mask' to struct perf_mmap
Move 'mask' from tools/perf's mmap to libperf's perf_mmap struct.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lore.kernel.org/lkml/20190913132355.21634-12-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/lib/include/internal/mmap.h | 1 | ||||
-rw-r--r-- | tools/perf/util/mmap.c | 24 | ||||
-rw-r--r-- | tools/perf/util/mmap.h | 1 |
3 files changed, 13 insertions, 13 deletions
diff --git a/tools/perf/lib/include/internal/mmap.h b/tools/perf/lib/include/internal/mmap.h index 2ef051901f48..1caa1e8ee5c6 100644 --- a/tools/perf/lib/include/internal/mmap.h +++ b/tools/perf/lib/include/internal/mmap.h | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | struct perf_mmap { | 10 | struct perf_mmap { |
11 | void *base; | 11 | void *base; |
12 | int mask; | ||
12 | }; | 13 | }; |
13 | 14 | ||
14 | #endif /* __LIBPERF_INTERNAL_MMAP_H */ | 15 | #endif /* __LIBPERF_INTERNAL_MMAP_H */ |
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 76190b2edd78..702e8e0b90ea 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | size_t perf_mmap__mmap_len(struct mmap *map) | 25 | size_t perf_mmap__mmap_len(struct mmap *map) |
26 | { | 26 | { |
27 | return map->mask + 1 + page_size; | 27 | return map->core.mask + 1 + page_size; |
28 | } | 28 | } |
29 | 29 | ||
30 | /* When check_messup is true, 'end' must points to a good entry */ | 30 | /* When check_messup is true, 'end' must points to a good entry */ |
@@ -38,7 +38,7 @@ static union perf_event *perf_mmap__read(struct mmap *map, | |||
38 | if (diff >= (int)sizeof(event->header)) { | 38 | if (diff >= (int)sizeof(event->header)) { |
39 | size_t size; | 39 | size_t size; |
40 | 40 | ||
41 | event = (union perf_event *)&data[*startp & map->mask]; | 41 | event = (union perf_event *)&data[*startp & map->core.mask]; |
42 | size = event->header.size; | 42 | size = event->header.size; |
43 | 43 | ||
44 | if (size < sizeof(event->header) || diff < (int)size) | 44 | if (size < sizeof(event->header) || diff < (int)size) |
@@ -48,14 +48,14 @@ static union perf_event *perf_mmap__read(struct mmap *map, | |||
48 | * Event straddles the mmap boundary -- header should always | 48 | * Event straddles the mmap boundary -- header should always |
49 | * be inside due to u64 alignment of output. | 49 | * be inside due to u64 alignment of output. |
50 | */ | 50 | */ |
51 | if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { | 51 | if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) { |
52 | unsigned int offset = *startp; | 52 | unsigned int offset = *startp; |
53 | unsigned int len = min(sizeof(*event), size), cpy; | 53 | unsigned int len = min(sizeof(*event), size), cpy; |
54 | void *dst = map->event_copy; | 54 | void *dst = map->event_copy; |
55 | 55 | ||
56 | do { | 56 | do { |
57 | cpy = min(map->mask + 1 - (offset & map->mask), len); | 57 | cpy = min(map->core.mask + 1 - (offset & map->core.mask), len); |
58 | memcpy(dst, &data[offset & map->mask], cpy); | 58 | memcpy(dst, &data[offset & map->core.mask], cpy); |
59 | offset += cpy; | 59 | offset += cpy; |
60 | dst += cpy; | 60 | dst += cpy; |
61 | len -= cpy; | 61 | len -= cpy; |
@@ -369,7 +369,7 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) | |||
369 | */ | 369 | */ |
370 | refcount_set(&map->refcnt, 2); | 370 | refcount_set(&map->refcnt, 2); |
371 | map->prev = 0; | 371 | map->prev = 0; |
372 | map->mask = mp->mask; | 372 | map->core.mask = mp->mask; |
373 | map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, | 373 | map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, |
374 | MAP_SHARED, fd, 0); | 374 | MAP_SHARED, fd, 0); |
375 | if (map->core.base == MAP_FAILED) { | 375 | if (map->core.base == MAP_FAILED) { |
@@ -454,7 +454,7 @@ static int __perf_mmap__read_init(struct mmap *md) | |||
454 | return -EAGAIN; | 454 | return -EAGAIN; |
455 | 455 | ||
456 | size = md->end - md->start; | 456 | size = md->end - md->start; |
457 | if (size > (unsigned long)(md->mask) + 1) { | 457 | if (size > (unsigned long)(md->core.mask) + 1) { |
458 | if (!md->overwrite) { | 458 | if (!md->overwrite) { |
459 | WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); | 459 | WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); |
460 | 460 | ||
@@ -467,7 +467,7 @@ static int __perf_mmap__read_init(struct mmap *md) | |||
467 | * Backward ring buffer is full. We still have a chance to read | 467 | * Backward ring buffer is full. We still have a chance to read |
468 | * most of data from it. | 468 | * most of data from it. |
469 | */ | 469 | */ |
470 | if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) | 470 | if (overwrite_rb_find_range(data, md->core.mask, &md->start, &md->end)) |
471 | return -EINVAL; | 471 | return -EINVAL; |
472 | } | 472 | } |
473 | 473 | ||
@@ -500,9 +500,9 @@ int perf_mmap__push(struct mmap *md, void *to, | |||
500 | 500 | ||
501 | size = md->end - md->start; | 501 | size = md->end - md->start; |
502 | 502 | ||
503 | if ((md->start & md->mask) + size != (md->end & md->mask)) { | 503 | if ((md->start & md->core.mask) + size != (md->end & md->core.mask)) { |
504 | buf = &data[md->start & md->mask]; | 504 | buf = &data[md->start & md->core.mask]; |
505 | size = md->mask + 1 - (md->start & md->mask); | 505 | size = md->core.mask + 1 - (md->start & md->core.mask); |
506 | md->start += size; | 506 | md->start += size; |
507 | 507 | ||
508 | if (push(md, to, buf, size) < 0) { | 508 | if (push(md, to, buf, size) < 0) { |
@@ -511,7 +511,7 @@ int perf_mmap__push(struct mmap *md, void *to, | |||
511 | } | 511 | } |
512 | } | 512 | } |
513 | 513 | ||
514 | buf = &data[md->start & md->mask]; | 514 | buf = &data[md->start & md->core.mask]; |
515 | size = md->end - md->start; | 515 | size = md->end - md->start; |
516 | md->start += size; | 516 | md->start += size; |
517 | 517 | ||
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index d2f0ce581e2c..370138e395fc 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h | |||
@@ -22,7 +22,6 @@ struct aiocb; | |||
22 | */ | 22 | */ |
23 | struct mmap { | 23 | struct mmap { |
24 | struct perf_mmap core; | 24 | struct perf_mmap core; |
25 | int mask; | ||
26 | int fd; | 25 | int fd; |
27 | int cpu; | 26 | int cpu; |
28 | refcount_t refcnt; | 27 | refcount_t refcnt; |