diff options
Diffstat (limited to 'tools/perf/util/mmap.c')
-rw-r--r-- | tools/perf/util/mmap.c | 73 |
1 files changed, 25 insertions, 48 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 9fe5f9c7d577..05076e683938 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c | |||
@@ -21,33 +21,13 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map) | |||
21 | } | 21 | } |
22 | 22 | ||
23 | /* When check_messup is true, 'end' must points to a good entry */ | 23 | /* When check_messup is true, 'end' must points to a good entry */ |
24 | static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup, | 24 | static union perf_event *perf_mmap__read(struct perf_mmap *map, |
25 | u64 start, u64 end, u64 *prev) | 25 | u64 start, u64 end, u64 *prev) |
26 | { | 26 | { |
27 | unsigned char *data = map->base + page_size; | 27 | unsigned char *data = map->base + page_size; |
28 | union perf_event *event = NULL; | 28 | union perf_event *event = NULL; |
29 | int diff = end - start; | 29 | int diff = end - start; |
30 | 30 | ||
31 | if (check_messup) { | ||
32 | /* | ||
33 | * If we're further behind than half the buffer, there's a chance | ||
34 | * the writer will bite our tail and mess up the samples under us. | ||
35 | * | ||
36 | * If we somehow ended up ahead of the 'end', we got messed up. | ||
37 | * | ||
38 | * In either case, truncate and restart at 'end'. | ||
39 | */ | ||
40 | if (diff > map->mask / 2 || diff < 0) { | ||
41 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | ||
42 | |||
43 | /* | ||
44 | * 'end' points to a known good entry, start there. | ||
45 | */ | ||
46 | start = end; | ||
47 | diff = 0; | ||
48 | } | ||
49 | } | ||
50 | |||
51 | if (diff >= (int)sizeof(event->header)) { | 31 | if (diff >= (int)sizeof(event->header)) { |
52 | size_t size; | 32 | size_t size; |
53 | 33 | ||
@@ -89,7 +69,7 @@ broken_event: | |||
89 | return event; | 69 | return event; |
90 | } | 70 | } |
91 | 71 | ||
92 | union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup) | 72 | union perf_event *perf_mmap__read_forward(struct perf_mmap *map) |
93 | { | 73 | { |
94 | u64 head; | 74 | u64 head; |
95 | u64 old = map->prev; | 75 | u64 old = map->prev; |
@@ -102,7 +82,7 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_mess | |||
102 | 82 | ||
103 | head = perf_mmap__read_head(map); | 83 | head = perf_mmap__read_head(map); |
104 | 84 | ||
105 | return perf_mmap__read(map, check_messup, old, head, &map->prev); | 85 | return perf_mmap__read(map, old, head, &map->prev); |
106 | } | 86 | } |
107 | 87 | ||
108 | union perf_event *perf_mmap__read_backward(struct perf_mmap *map) | 88 | union perf_event *perf_mmap__read_backward(struct perf_mmap *map) |
@@ -138,7 +118,7 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map) | |||
138 | else | 118 | else |
139 | end = head + map->mask + 1; | 119 | end = head + map->mask + 1; |
140 | 120 | ||
141 | return perf_mmap__read(map, false, start, end, &map->prev); | 121 | return perf_mmap__read(map, start, end, &map->prev); |
142 | } | 122 | } |
143 | 123 | ||
144 | void perf_mmap__read_catchup(struct perf_mmap *map) | 124 | void perf_mmap__read_catchup(struct perf_mmap *map) |
@@ -254,18 +234,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd) | |||
254 | return 0; | 234 | return 0; |
255 | } | 235 | } |
256 | 236 | ||
257 | static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end) | 237 | static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end) |
258 | { | 238 | { |
259 | struct perf_event_header *pheader; | 239 | struct perf_event_header *pheader; |
260 | u64 evt_head = head; | 240 | u64 evt_head = head; |
261 | int size = mask + 1; | 241 | int size = mask + 1; |
262 | 242 | ||
263 | pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head); | 243 | pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head); |
264 | pheader = (struct perf_event_header *)(buf + (head & mask)); | 244 | pheader = (struct perf_event_header *)(buf + (head & mask)); |
265 | *start = head; | 245 | *start = head; |
266 | while (true) { | 246 | while (true) { |
267 | if (evt_head - head >= (unsigned int)size) { | 247 | if (evt_head - head >= (unsigned int)size) { |
268 | pr_debug("Finished reading backward ring buffer: rewind\n"); | 248 | pr_debug("Finished reading overwrite ring buffer: rewind\n"); |
269 | if (evt_head - head > (unsigned int)size) | 249 | if (evt_head - head > (unsigned int)size) |
270 | evt_head -= pheader->size; | 250 | evt_head -= pheader->size; |
271 | *end = evt_head; | 251 | *end = evt_head; |
@@ -275,7 +255,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 | |||
275 | pheader = (struct perf_event_header *)(buf + (evt_head & mask)); | 255 | pheader = (struct perf_event_header *)(buf + (evt_head & mask)); |
276 | 256 | ||
277 | if (pheader->size == 0) { | 257 | if (pheader->size == 0) { |
278 | pr_debug("Finished reading backward ring buffer: get start\n"); | 258 | pr_debug("Finished reading overwrite ring buffer: get start\n"); |
279 | *end = evt_head; | 259 | *end = evt_head; |
280 | return 0; | 260 | return 0; |
281 | } | 261 | } |
@@ -287,19 +267,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 | |||
287 | return -1; | 267 | return -1; |
288 | } | 268 | } |
289 | 269 | ||
290 | static int rb_find_range(void *data, int mask, u64 head, u64 old, | 270 | int perf_mmap__push(struct perf_mmap *md, bool overwrite, |
291 | u64 *start, u64 *end, bool backward) | ||
292 | { | ||
293 | if (!backward) { | ||
294 | *start = old; | ||
295 | *end = head; | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | return backward_rb_find_range(data, mask, head, start, end); | ||
300 | } | ||
301 | |||
302 | int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward, | ||
303 | void *to, int push(void *to, void *buf, size_t size)) | 271 | void *to, int push(void *to, void *buf, size_t size)) |
304 | { | 272 | { |
305 | u64 head = perf_mmap__read_head(md); | 273 | u64 head = perf_mmap__read_head(md); |
@@ -310,19 +278,28 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward, | |||
310 | void *buf; | 278 | void *buf; |
311 | int rc = 0; | 279 | int rc = 0; |
312 | 280 | ||
313 | if (rb_find_range(data, md->mask, head, old, &start, &end, backward)) | 281 | start = overwrite ? head : old; |
314 | return -1; | 282 | end = overwrite ? old : head; |
315 | 283 | ||
316 | if (start == end) | 284 | if (start == end) |
317 | return 0; | 285 | return 0; |
318 | 286 | ||
319 | size = end - start; | 287 | size = end - start; |
320 | if (size > (unsigned long)(md->mask) + 1) { | 288 | if (size > (unsigned long)(md->mask) + 1) { |
321 | WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); | 289 | if (!overwrite) { |
290 | WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); | ||
322 | 291 | ||
323 | md->prev = head; | 292 | md->prev = head; |
324 | perf_mmap__consume(md, overwrite || backward); | 293 | perf_mmap__consume(md, overwrite); |
325 | return 0; | 294 | return 0; |
295 | } | ||
296 | |||
297 | /* | ||
298 | * Backward ring buffer is full. We still have a chance to read | ||
299 | * most of data from it. | ||
300 | */ | ||
301 | if (overwrite_rb_find_range(data, md->mask, head, &start, &end)) | ||
302 | return -1; | ||
326 | } | 303 | } |
327 | 304 | ||
328 | if ((start & md->mask) + size != (end & md->mask)) { | 305 | if ((start & md->mask) + size != (end & md->mask)) { |
@@ -346,7 +323,7 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward, | |||
346 | } | 323 | } |
347 | 324 | ||
348 | md->prev = head; | 325 | md->prev = head; |
349 | perf_mmap__consume(md, overwrite || backward); | 326 | perf_mmap__consume(md, overwrite); |
350 | out: | 327 | out: |
351 | return rc; | 328 | return rc; |
352 | } | 329 | } |