aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2014-07-14 06:02:52 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2014-07-16 16:57:36 -0400
commita8a8f3eb5de55aeaf007c18572668e8ec463547b (patch)
tree839f1d1bcb5676820218c75b0a552c65e1f45b73 /tools/perf
parent919d86d3a3109d7d4f0d7347f34711ee2f8e6609 (diff)
perf evlist: Pass mmap parameters in a struct
In preparation for adding more mmap parameters, pass existing parameters in a struct. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1405332185-4050-29-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/util/evlist.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c51223ac25f4..814e954c1318 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -606,12 +606,17 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
606 return evlist->mmap != NULL ? 0 : -ENOMEM; 606 return evlist->mmap != NULL ? 0 : -ENOMEM;
607} 607}
608 608
609static int __perf_evlist__mmap(struct perf_evlist *evlist, 609struct mmap_params {
610 int idx, int prot, int mask, int fd) 610 int prot;
611 int mask;
612};
613
614static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
615 struct mmap_params *mp, int fd)
611{ 616{
612 evlist->mmap[idx].prev = 0; 617 evlist->mmap[idx].prev = 0;
613 evlist->mmap[idx].mask = mask; 618 evlist->mmap[idx].mask = mp->mask;
614 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 619 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
615 MAP_SHARED, fd, 0); 620 MAP_SHARED, fd, 0);
616 if (evlist->mmap[idx].base == MAP_FAILED) { 621 if (evlist->mmap[idx].base == MAP_FAILED) {
617 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 622 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
@@ -625,8 +630,8 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
625} 630}
626 631
627static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, 632static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
628 int prot, int mask, int cpu, int thread, 633 struct mmap_params *mp, int cpu,
629 int *output) 634 int thread, int *output)
630{ 635{
631 struct perf_evsel *evsel; 636 struct perf_evsel *evsel;
632 637
@@ -635,8 +640,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
635 640
636 if (*output == -1) { 641 if (*output == -1) {
637 *output = fd; 642 *output = fd;
638 if (__perf_evlist__mmap(evlist, idx, prot, mask, 643 if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
639 *output) < 0)
640 return -1; 644 return -1;
641 } else { 645 } else {
642 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) 646 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
@@ -651,8 +655,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
651 return 0; 655 return 0;
652} 656}
653 657
654static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, 658static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
655 int mask) 659 struct mmap_params *mp)
656{ 660{
657 int cpu, thread; 661 int cpu, thread;
658 int nr_cpus = cpu_map__nr(evlist->cpus); 662 int nr_cpus = cpu_map__nr(evlist->cpus);
@@ -663,8 +667,8 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
663 int output = -1; 667 int output = -1;
664 668
665 for (thread = 0; thread < nr_threads; thread++) { 669 for (thread = 0; thread < nr_threads; thread++) {
666 if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask, 670 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
667 cpu, thread, &output)) 671 thread, &output))
668 goto out_unmap; 672 goto out_unmap;
669 } 673 }
670 } 674 }
@@ -677,8 +681,8 @@ out_unmap:
677 return -1; 681 return -1;
678} 682}
679 683
680static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, 684static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
681 int mask) 685 struct mmap_params *mp)
682{ 686{
683 int thread; 687 int thread;
684 int nr_threads = thread_map__nr(evlist->threads); 688 int nr_threads = thread_map__nr(evlist->threads);
@@ -687,8 +691,8 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
687 for (thread = 0; thread < nr_threads; thread++) { 691 for (thread = 0; thread < nr_threads; thread++) {
688 int output = -1; 692 int output = -1;
689 693
690 if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0, 694 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
691 thread, &output)) 695 &output))
692 goto out_unmap; 696 goto out_unmap;
693 } 697 }
694 698
@@ -793,7 +797,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
793 struct perf_evsel *evsel; 797 struct perf_evsel *evsel;
794 const struct cpu_map *cpus = evlist->cpus; 798 const struct cpu_map *cpus = evlist->cpus;
795 const struct thread_map *threads = evlist->threads; 799 const struct thread_map *threads = evlist->threads;
796 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 800 struct mmap_params mp = {
801 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
802 };
797 803
798 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 804 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
799 return -ENOMEM; 805 return -ENOMEM;
@@ -804,7 +810,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
804 evlist->overwrite = overwrite; 810 evlist->overwrite = overwrite;
805 evlist->mmap_len = perf_evlist__mmap_size(pages); 811 evlist->mmap_len = perf_evlist__mmap_size(pages);
806 pr_debug("mmap size %zuB\n", evlist->mmap_len); 812 pr_debug("mmap size %zuB\n", evlist->mmap_len);
807 mask = evlist->mmap_len - page_size - 1; 813 mp.mask = evlist->mmap_len - page_size - 1;
808 814
809 evlist__for_each(evlist, evsel) { 815 evlist__for_each(evlist, evsel) {
810 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 816 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -814,9 +820,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
814 } 820 }
815 821
816 if (cpu_map__empty(cpus)) 822 if (cpu_map__empty(cpus))
817 return perf_evlist__mmap_per_thread(evlist, prot, mask); 823 return perf_evlist__mmap_per_thread(evlist, &mp);
818 824
819 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 825 return perf_evlist__mmap_per_cpu(evlist, &mp);
820} 826}
821 827
822int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) 828int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)