aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWang Nan <wangnan0@huawei.com>2016-04-26 22:19:21 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-05-05 20:04:04 -0400
commitb6b85dad30ad7e7394990e2317a780577974a4e6 (patch)
tree731d887c8bae23eb0408dcfd2193bc2077f621ee
parent0f4ccd11813f59d766039dfdd13aa98245a67294 (diff)
perf evlist: Rename variable in perf_mmap__read()
In perf_mmap__read(), give better names to pointers. Original name 'old' and 'head' directly related to pointers in ring buffer control page. For backward ring buffer, the meaning of 'head' point is not 'the first byte of free space', but 'the first byte of the last record'. To reduce confusion, rename 'old' to 'start', 'head' to 'end'. 'start' -> 'end' is the direction the records should be read from. Change parameter order. Change 'overwrite' to 'check_messup'. When reading from 'head', no need to check messup for for backward ring buffer. Signed-off-by: Wang Nan <wangnan0@huawei.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Zefan Li <lizefan@huawei.com> Cc: pi3orama@163.com Link: http://lkml.kernel.org/r/1461723563-67451-3-git-send-email-wangnan0@huawei.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/util/evlist.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 96c71916e367..17cd01421e7f 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -679,30 +679,31 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
679 return NULL; 679 return NULL;
680} 680}
681 681
682/* When check_messup is true, 'end' must points to a good entry */
682static union perf_event * 683static union perf_event *
683perf_mmap__read(struct perf_mmap *md, bool overwrite, u64 head, 684perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
684 u64 old, u64 *prev) 685 u64 end, u64 *prev)
685{ 686{
686 unsigned char *data = md->base + page_size; 687 unsigned char *data = md->base + page_size;
687 union perf_event *event = NULL; 688 union perf_event *event = NULL;
688 int diff = head - old; 689 int diff = end - start;
689 690
690 if (overwrite) { 691 if (check_messup) {
691 /* 692 /*
692 * If we're further behind than half the buffer, there's a chance 693 * If we're further behind than half the buffer, there's a chance
693 * the writer will bite our tail and mess up the samples under us. 694 * the writer will bite our tail and mess up the samples under us.
694 * 695 *
695 * If we somehow ended up ahead of the head, we got messed up. 696 * If we somehow ended up ahead of the 'end', we got messed up.
696 * 697 *
697 * In either case, truncate and restart at head. 698 * In either case, truncate and restart at 'end'.
698 */ 699 */
699 if (diff > md->mask / 2 || diff < 0) { 700 if (diff > md->mask / 2 || diff < 0) {
700 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 701 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
701 702
702 /* 703 /*
703 * head points to a known good entry, start there. 704 * 'end' points to a known good entry, start there.
704 */ 705 */
705 old = head; 706 start = end;
706 diff = 0; 707 diff = 0;
707 } 708 }
708 } 709 }
@@ -710,7 +711,7 @@ perf_mmap__read(struct perf_mmap *md, bool overwrite, u64 head,
710 if (diff >= (int)sizeof(event->header)) { 711 if (diff >= (int)sizeof(event->header)) {
711 size_t size; 712 size_t size;
712 713
713 event = (union perf_event *)&data[old & md->mask]; 714 event = (union perf_event *)&data[start & md->mask];
714 size = event->header.size; 715 size = event->header.size;
715 716
716 if (size < sizeof(event->header) || diff < (int)size) { 717 if (size < sizeof(event->header) || diff < (int)size) {
@@ -722,8 +723,8 @@ perf_mmap__read(struct perf_mmap *md, bool overwrite, u64 head,
722 * Event straddles the mmap boundary -- header should always 723 * Event straddles the mmap boundary -- header should always
723 * be inside due to u64 alignment of output. 724 * be inside due to u64 alignment of output.
724 */ 725 */
725 if ((old & md->mask) + size != ((old + size) & md->mask)) { 726 if ((start & md->mask) + size != ((start + size) & md->mask)) {
726 unsigned int offset = old; 727 unsigned int offset = start;
727 unsigned int len = min(sizeof(*event), size), cpy; 728 unsigned int len = min(sizeof(*event), size), cpy;
728 void *dst = md->event_copy; 729 void *dst = md->event_copy;
729 730
@@ -738,12 +739,12 @@ perf_mmap__read(struct perf_mmap *md, bool overwrite, u64 head,
738 event = (union perf_event *) md->event_copy; 739 event = (union perf_event *) md->event_copy;
739 } 740 }
740 741
741 old += size; 742 start += size;
742 } 743 }
743 744
744broken_event: 745broken_event:
745 if (prev) 746 if (prev)
746 *prev = old; 747 *prev = start;
747 748
748 return event; 749 return event;
749} 750}
@@ -762,7 +763,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
762 763
763 head = perf_mmap__read_head(md); 764 head = perf_mmap__read_head(md);
764 765
765 return perf_mmap__read(md, evlist->overwrite, head, old, &md->prev); 766 return perf_mmap__read(md, evlist->overwrite, old, head, &md->prev);
766} 767}
767 768
768static bool perf_mmap__empty(struct perf_mmap *md) 769static bool perf_mmap__empty(struct perf_mmap *md)