diff options
author | Wang Nan <wangnan0@huawei.com> | 2016-04-26 22:19:20 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2016-05-05 20:04:03 -0400 |
commit | 0f4ccd11813f59d766039dfdd13aa98245a67294 (patch) | |
tree | 94c1b23d7da0458834cafbc5be763c237eb7c5c4 | |
parent | 0b3c2264ae30ed692fd1ffd2b84c5fbdf737cb0d (diff) |
perf evlist: Extract perf_mmap__read()
Extract event reader from perf_evlist__mmap_read() to perf__mmap_read().
Future commit will feed it with manually computed 'head' and 'old'
pointers.
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1461723563-67451-2-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r-- | tools/perf/util/evlist.c | 39 |
1 files changed, 24 insertions, 15 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 85271e54a63b..96c71916e367 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -679,24 +679,15 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, | |||
679 | return NULL; | 679 | return NULL; |
680 | } | 680 | } |
681 | 681 | ||
682 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) | 682 | static union perf_event * |
683 | perf_mmap__read(struct perf_mmap *md, bool overwrite, u64 head, | ||
684 | u64 old, u64 *prev) | ||
683 | { | 685 | { |
684 | struct perf_mmap *md = &evlist->mmap[idx]; | ||
685 | u64 head; | ||
686 | u64 old = md->prev; | ||
687 | int diff; | ||
688 | unsigned char *data = md->base + page_size; | 686 | unsigned char *data = md->base + page_size; |
689 | union perf_event *event = NULL; | 687 | union perf_event *event = NULL; |
688 | int diff = head - old; | ||
690 | 689 | ||
691 | /* | 690 | if (overwrite) { |
692 | * Check if event was unmapped due to a POLLHUP/POLLERR. | ||
693 | */ | ||
694 | if (!atomic_read(&md->refcnt)) | ||
695 | return NULL; | ||
696 | |||
697 | head = perf_mmap__read_head(md); | ||
698 | diff = head - old; | ||
699 | if (evlist->overwrite) { | ||
700 | /* | 691 | /* |
701 | * If we're further behind than half the buffer, there's a chance | 692 | * If we're further behind than half the buffer, there's a chance |
702 | * the writer will bite our tail and mess up the samples under us. | 693 | * the writer will bite our tail and mess up the samples under us. |
@@ -751,11 +742,29 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) | |||
751 | } | 742 | } |
752 | 743 | ||
753 | broken_event: | 744 | broken_event: |
754 | md->prev = old; | 745 | if (prev) |
746 | *prev = old; | ||
755 | 747 | ||
756 | return event; | 748 | return event; |
757 | } | 749 | } |
758 | 750 | ||
751 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) | ||
752 | { | ||
753 | struct perf_mmap *md = &evlist->mmap[idx]; | ||
754 | u64 head; | ||
755 | u64 old = md->prev; | ||
756 | |||
757 | /* | ||
758 | * Check if event was unmapped due to a POLLHUP/POLLERR. | ||
759 | */ | ||
760 | if (!atomic_read(&md->refcnt)) | ||
761 | return NULL; | ||
762 | |||
763 | head = perf_mmap__read_head(md); | ||
764 | |||
765 | return perf_mmap__read(md, evlist->overwrite, head, old, &md->prev); | ||
766 | } | ||
767 | |||
759 | static bool perf_mmap__empty(struct perf_mmap *md) | 768 | static bool perf_mmap__empty(struct perf_mmap *md) |
760 | { | 769 | { |
761 | return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; | 770 | return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; |