aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c69
1 files changed, 64 insertions, 5 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 919952498155..2b7743466ae4 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -68,6 +68,17 @@ struct perf_event_map {
68#define PIC_LOWER 0x02 68#define PIC_LOWER 0x02
69}; 69};
70 70
71static unsigned long perf_event_encode(const struct perf_event_map *pmap)
72{
73 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
74}
75
76static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
77{
78 *msk = val & 0xff;
79 *enc = val >> 16;
80}
81
71#define C(x) PERF_COUNT_HW_CACHE_##x 82#define C(x) PERF_COUNT_HW_CACHE_##x
72 83
73#define CACHE_OP_UNSUPPORTED 0xfffe 84#define CACHE_OP_UNSUPPORTED 0xfffe
@@ -713,6 +724,48 @@ static void hw_perf_event_destroy(struct perf_event *event)
713 perf_event_release_pmc(); 724 perf_event_release_pmc();
714} 725}
715 726
727/* Make sure all events can be scheduled into the hardware at
728 * the same time. This is simplified by the fact that we only
729 * need to support 2 simultaneous HW events.
730 */
731static int sparc_check_constraints(unsigned long *events, int n_ev)
732{
733 if (n_ev <= perf_max_events) {
734 u8 msk1, msk2;
735 u16 dummy;
736
737 if (n_ev == 1)
738 return 0;
739 BUG_ON(n_ev != 2);
740 perf_event_decode(events[0], &dummy, &msk1);
741 perf_event_decode(events[1], &dummy, &msk2);
742
743 /* If both events can go on any counter, OK. */
744 if (msk1 == (PIC_UPPER | PIC_LOWER) &&
745 msk2 == (PIC_UPPER | PIC_LOWER))
746 return 0;
747
748 /* If one event is limited to a specific counter,
749 * and the other can go on both, OK.
750 */
751 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
752 msk2 == (PIC_UPPER | PIC_LOWER))
753 return 0;
754 if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
755 msk1 == (PIC_UPPER | PIC_LOWER))
756 return 0;
757
758 /* If the events are fixed to different counters, OK. */
759 if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
760 (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
761 return 0;
762
763 /* Otherwise, there is a conflict. */
764 }
765
766 return -1;
767}
768
716static int check_excludes(struct perf_event **evts, int n_prev, int n_new) 769static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
717{ 770{
718 int eu = 0, ek = 0, eh = 0; 771 int eu = 0, ek = 0, eh = 0;
@@ -742,7 +795,7 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
742} 795}
743 796
744static int collect_events(struct perf_event *group, int max_count, 797static int collect_events(struct perf_event *group, int max_count,
745 struct perf_event *evts[], u64 *events) 798 struct perf_event *evts[], unsigned long *events)
746{ 799{
747 struct perf_event *event; 800 struct perf_event *event;
748 int n = 0; 801 int n = 0;
@@ -751,7 +804,7 @@ static int collect_events(struct perf_event *group, int max_count,
751 if (n >= max_count) 804 if (n >= max_count)
752 return -1; 805 return -1;
753 evts[n] = group; 806 evts[n] = group;
754 events[n++] = group->hw.config; 807 events[n++] = group->hw.event_base;
755 } 808 }
756 list_for_each_entry(event, &group->sibling_list, group_entry) { 809 list_for_each_entry(event, &group->sibling_list, group_entry) {
757 if (!is_software_event(event) && 810 if (!is_software_event(event) &&
@@ -759,7 +812,7 @@ static int collect_events(struct perf_event *group, int max_count,
759 if (n >= max_count) 812 if (n >= max_count)
760 return -1; 813 return -1;
761 evts[n] = event; 814 evts[n] = event;
762 events[n++] = event->hw.config; 815 events[n++] = event->hw.event_base;
763 } 816 }
764 } 817 }
765 return n; 818 return n;
@@ -770,8 +823,9 @@ static int __hw_perf_event_init(struct perf_event *event)
770 struct perf_event_attr *attr = &event->attr; 823 struct perf_event_attr *attr = &event->attr;
771 struct perf_event *evts[MAX_HWEVENTS]; 824 struct perf_event *evts[MAX_HWEVENTS];
772 struct hw_perf_event *hwc = &event->hw; 825 struct hw_perf_event *hwc = &event->hw;
826 unsigned long events[MAX_HWEVENTS];
773 const struct perf_event_map *pmap; 827 const struct perf_event_map *pmap;
774 u64 enc, events[MAX_HWEVENTS]; 828 u64 enc;
775 int n; 829 int n;
776 830
777 if (atomic_read(&nmi_active) < 0) 831 if (atomic_read(&nmi_active) < 0)
@@ -800,6 +854,8 @@ static int __hw_perf_event_init(struct perf_event *event)
800 if (!attr->exclude_hv) 854 if (!attr->exclude_hv)
801 hwc->config_base |= sparc_pmu->hv_bit; 855 hwc->config_base |= sparc_pmu->hv_bit;
802 856
857 hwc->event_base = perf_event_encode(pmap);
858
803 enc = pmap->encoding; 859 enc = pmap->encoding;
804 860
805 n = 0; 861 n = 0;
@@ -810,12 +866,15 @@ static int __hw_perf_event_init(struct perf_event *event)
810 if (n < 0) 866 if (n < 0)
811 return -EINVAL; 867 return -EINVAL;
812 } 868 }
813 events[n] = enc; 869 events[n] = hwc->event_base;
814 evts[n] = event; 870 evts[n] = event;
815 871
816 if (check_excludes(evts, n, 1)) 872 if (check_excludes(evts, n, 1))
817 return -EINVAL; 873 return -EINVAL;
818 874
875 if (sparc_check_constraints(events, n + 1))
876 return -EINVAL;
877
819 /* Try to do all error checking before this point, as unwinding 878 /* Try to do all error checking before this point, as unwinding
820 * state after grabbing the PMC is difficult. 879 * state after grabbing the PMC is difficult.
821 */ 880 */