aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKan Liang <kan.liang@intel.com>2018-01-18 16:26:25 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2018-02-15 07:54:17 -0500
commit3effc2f165a842d640873e29d4c5cc1650143aef (patch)
tree02f60621f18ed7291fce79cf1cce2359faeaab3f
parent600a7cfe88de2c6e44e23d61dd721b996b790eb2 (diff)
perf mmap: Discard legacy interface for mmap read
Discards perf_mmap__read_backward() and perf_mmap__read_catchup(). No tools use them. There are tools still use perf_mmap__read_forward(). Keep it, but add comments to point to the new interface for future use. Signed-off-by: Kan Liang <kan.liang@intel.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/r/1516310792-208685-11-git-send-email-kan.liang@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/util/mmap.c50
-rw-r--r--tools/perf/util/mmap.h3
2 files changed, 4 insertions, 49 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index f804926778b7..91531a7c8fbf 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -63,6 +63,10 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
63 return event; 63 return event;
64} 64}
65 65
66/*
67 * legacy interface for mmap read.
68 * Don't use it. Use perf_mmap__read_event().
69 */
66union perf_event *perf_mmap__read_forward(struct perf_mmap *map) 70union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
67{ 71{
68 u64 head; 72 u64 head;
@@ -78,41 +82,6 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
78 return perf_mmap__read(map, &map->prev, head); 82 return perf_mmap__read(map, &map->prev, head);
79} 83}
80 84
81union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
82{
83 u64 head, end;
84
85 /*
86 * Check if event was unmapped due to a POLLHUP/POLLERR.
87 */
88 if (!refcount_read(&map->refcnt))
89 return NULL;
90
91 head = perf_mmap__read_head(map);
92 if (!head)
93 return NULL;
94
95 /*
96 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
97 * it each time when kernel writes to it, so in fact 'head' is
98 * negative. 'end' pointer is made manually by adding the size of
99 * the ring buffer to 'head' pointer, means the validate data can
100 * read is the whole ring buffer. If 'end' is positive, the ring
101 * buffer has not fully filled, so we must adjust 'end' to 0.
102 *
103 * However, since both 'head' and 'end' is unsigned, we can't
104 * simply compare 'end' against 0. Here we compare '-head' and
105 * the size of the ring buffer, where -head is the number of bytes
106 * kernel write to the ring buffer.
107 */
108 if (-head < (u64)(map->mask + 1))
109 end = 0;
110 else
111 end = head + map->mask + 1;
112
113 return perf_mmap__read(map, &map->prev, end);
114}
115
116/* 85/*
117 * Read event from ring buffer one by one. 86 * Read event from ring buffer one by one.
118 * Return one event for each call. 87 * Return one event for each call.
@@ -152,17 +121,6 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map,
152 return event; 121 return event;
153} 122}
154 123
155void perf_mmap__read_catchup(struct perf_mmap *map)
156{
157 u64 head;
158
159 if (!refcount_read(&map->refcnt))
160 return;
161
162 head = perf_mmap__read_head(map);
163 map->prev = head;
164}
165
166static bool perf_mmap__empty(struct perf_mmap *map) 124static bool perf_mmap__empty(struct perf_mmap *map)
167{ 125{
168 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base; 126 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index 28718543dd42..ec7d3a24e276 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -65,8 +65,6 @@ void perf_mmap__put(struct perf_mmap *map);
65 65
66void perf_mmap__consume(struct perf_mmap *map, bool overwrite); 66void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
67 67
68void perf_mmap__read_catchup(struct perf_mmap *md);
69
70static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 68static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
71{ 69{
72 struct perf_event_mmap_page *pc = mm->base; 70 struct perf_event_mmap_page *pc = mm->base;
@@ -87,7 +85,6 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
87} 85}
88 86
89union perf_event *perf_mmap__read_forward(struct perf_mmap *map); 87union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
90union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
91 88
92union perf_event *perf_mmap__read_event(struct perf_mmap *map, 89union perf_event *perf_mmap__read_event(struct perf_mmap *map,
93 bool overwrite, 90 bool overwrite,