aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-09-27 23:43:07 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-27 23:43:07 -0400
commit01552f765cae873d0ea3cca1e64e41dfd62659e6 (patch)
tree80887592fb5fb3dd848b06c44a2b0c76770843be /arch/sparc
parent7eebda60d57a0862a410f45122c73b8bbe6e260c (diff)
sparc64: Add initial perf event conflict resolution and checks.
Cribbed from powerpc code, as usual. :-) Currently it is only used to validate that all counters have the same user/kernel/hv attributes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/kernel/perf_event.c82
1 files changed, 77 insertions, 5 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 9541b456c3ee..919952498155 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -713,12 +713,66 @@ static void hw_perf_event_destroy(struct perf_event *event)
713 perf_event_release_pmc(); 713 perf_event_release_pmc();
714} 714}
715 715
716static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
717{
718 int eu = 0, ek = 0, eh = 0;
719 struct perf_event *event;
720 int i, n, first;
721
722 n = n_prev + n_new;
723 if (n <= 1)
724 return 0;
725
726 first = 1;
727 for (i = 0; i < n; i++) {
728 event = evts[i];
729 if (first) {
730 eu = event->attr.exclude_user;
731 ek = event->attr.exclude_kernel;
732 eh = event->attr.exclude_hv;
733 first = 0;
734 } else if (event->attr.exclude_user != eu ||
735 event->attr.exclude_kernel != ek ||
736 event->attr.exclude_hv != eh) {
737 return -EAGAIN;
738 }
739 }
740
741 return 0;
742}
743
744static int collect_events(struct perf_event *group, int max_count,
745 struct perf_event *evts[], u64 *events)
746{
747 struct perf_event *event;
748 int n = 0;
749
750 if (!is_software_event(group)) {
751 if (n >= max_count)
752 return -1;
753 evts[n] = group;
754 events[n++] = group->hw.config;
755 }
756 list_for_each_entry(event, &group->sibling_list, group_entry) {
757 if (!is_software_event(event) &&
758 event->state != PERF_EVENT_STATE_OFF) {
759 if (n >= max_count)
760 return -1;
761 evts[n] = event;
762 events[n++] = event->hw.config;
763 }
764 }
765 return n;
766}
767
716static int __hw_perf_event_init(struct perf_event *event) 768static int __hw_perf_event_init(struct perf_event *event)
717{ 769{
718 struct perf_event_attr *attr = &event->attr; 770 struct perf_event_attr *attr = &event->attr;
771 struct perf_event *evts[MAX_HWEVENTS];
719 struct hw_perf_event *hwc = &event->hw; 772 struct hw_perf_event *hwc = &event->hw;
720 const struct perf_event_map *pmap; 773 const struct perf_event_map *pmap;
721 u64 enc; 774 u64 enc, events[MAX_HWEVENTS];
775 int n;
722 776
723 if (atomic_read(&nmi_active) < 0) 777 if (atomic_read(&nmi_active) < 0)
724 return -ENODEV; 778 return -ENODEV;
@@ -734,9 +788,6 @@ static int __hw_perf_event_init(struct perf_event *event)
734 } else 788 } else
735 return -EOPNOTSUPP; 789 return -EOPNOTSUPP;
736 790
737 perf_event_grab_pmc();
738 event->destroy = hw_perf_event_destroy;
739
740 /* We save the enable bits in the config_base. So to 791 /* We save the enable bits in the config_base. So to
741 * turn off sampling just write 'config', and to enable 792 * turn off sampling just write 'config', and to enable
742 * things write 'config | config_base'. 793 * things write 'config | config_base'.
@@ -749,13 +800,34 @@ static int __hw_perf_event_init(struct perf_event *event)
749 if (!attr->exclude_hv) 800 if (!attr->exclude_hv)
750 hwc->config_base |= sparc_pmu->hv_bit; 801 hwc->config_base |= sparc_pmu->hv_bit;
751 802
803 enc = pmap->encoding;
804
805 n = 0;
806 if (event->group_leader != event) {
807 n = collect_events(event->group_leader,
808 perf_max_events - 1,
809 evts, events);
810 if (n < 0)
811 return -EINVAL;
812 }
813 events[n] = enc;
814 evts[n] = event;
815
816 if (check_excludes(evts, n, 1))
817 return -EINVAL;
818
819 /* Try to do all error checking before this point, as unwinding
820 * state after grabbing the PMC is difficult.
821 */
822 perf_event_grab_pmc();
823 event->destroy = hw_perf_event_destroy;
824
752 if (!hwc->sample_period) { 825 if (!hwc->sample_period) {
753 hwc->sample_period = MAX_PERIOD; 826 hwc->sample_period = MAX_PERIOD;
754 hwc->last_period = hwc->sample_period; 827 hwc->last_period = hwc->sample_period;
755 atomic64_set(&hwc->period_left, hwc->sample_period); 828 atomic64_set(&hwc->period_left, hwc->sample_period);
756 } 829 }
757 830
758 enc = pmap->encoding;
759 if (pmap->pic_mask & PIC_UPPER) { 831 if (pmap->pic_mask & PIC_UPPER) {
760 hwc->idx = PIC_UPPER_INDEX; 832 hwc->idx = PIC_UPPER_INDEX;
761 enc <<= sparc_pmu->upper_shift; 833 enc <<= sparc_pmu->upper_shift;