aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-01-20 05:59:47 -0500
committerDavid S. Miller <davem@davemloft.net>2010-01-20 19:23:03 -0500
commite7bef6b04ca2e8e4cf667c43d7e2ab3034a869d5 (patch)
treeb21d045e0b97f6e3f68a97e992e92432b3f91c2f /arch/sparc/kernel
parent4f6dbe4ac01d2664231d3f3eceadd33a44cde993 (diff)
sparc64: Fully support both performance counters.
Add the rest of the conflict detection and resolution logic necessary to support more than one counter at a time on sparc64. The structure and implementation closely mimicks that of powerpc. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/perf_event.c507
1 files changed, 353 insertions, 154 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2386ac6ec956..e856456ec02f 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -55,16 +55,49 @@
55 55
56#define PIC_UPPER_INDEX 0 56#define PIC_UPPER_INDEX 0
57#define PIC_LOWER_INDEX 1 57#define PIC_LOWER_INDEX 1
58#define PIC_NO_INDEX -1
58 59
59struct cpu_hw_events { 60struct cpu_hw_events {
60 struct perf_event *events[MAX_HWEVENTS]; 61 /* Number of events currently scheduled onto this cpu.
61 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 62 * This tells how many entries in the arrays below
62 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 63 * are valid.
64 */
65 int n_events;
66
67 /* Number of new events added since the last hw_perf_disable().
68 * This works because the perf event layer always adds new
69 * events inside of a perf_{disable,enable}() sequence.
70 */
71 int n_added;
72
73 /* Array of events current scheduled on this cpu. */
74 struct perf_event *event[MAX_HWEVENTS];
75
76 /* Array of encoded longs, specifying the %pcr register
77 * encoding and the mask of PIC counters this even can
78 * be scheduled on. See perf_event_encode() et al.
79 */
80 unsigned long events[MAX_HWEVENTS];
81
82 /* The current counter index assigned to an event. When the
83 * event hasn't been programmed into the cpu yet, this will
84 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
85 * we ought to schedule the event.
86 */
87 int current_idx[MAX_HWEVENTS];
88
89 /* Software copy of %pcr register on this cpu. */
63 u64 pcr; 90 u64 pcr;
91
92 /* Enabled/disable state. */
64 int enabled; 93 int enabled;
65}; 94};
66DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 95DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
67 96
97/* An event map describes the characteristics of a performance
98 * counter event. In particular it gives the encoding as well as
99 * a mask telling which counters the event can be measured on.
100 */
68struct perf_event_map { 101struct perf_event_map {
69 u16 encoding; 102 u16 encoding;
70 u8 pic_mask; 103 u8 pic_mask;
@@ -73,15 +106,20 @@ struct perf_event_map {
73#define PIC_LOWER 0x02 106#define PIC_LOWER 0x02
74}; 107};
75 108
109/* Encode a perf_event_map entry into a long. */
76static unsigned long perf_event_encode(const struct perf_event_map *pmap) 110static unsigned long perf_event_encode(const struct perf_event_map *pmap)
77{ 111{
78 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; 112 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
79} 113}
80 114
81static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) 115static u8 perf_event_get_msk(unsigned long val)
116{
117 return val & 0xff;
118}
119
120static u64 perf_event_get_enc(unsigned long val)
82{ 121{
83 *msk = val & 0xff; 122 return val >> 16;
84 *enc = val >> 16;
85} 123}
86 124
87#define C(x) PERF_COUNT_HW_CACHE_##x 125#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -495,53 +533,6 @@ static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw
495 pcr_ops->write(cpuc->pcr); 533 pcr_ops->write(cpuc->pcr);
496} 534}
497 535
498void hw_perf_enable(void)
499{
500 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
501 u64 val;
502 int i;
503
504 if (cpuc->enabled)
505 return;
506
507 cpuc->enabled = 1;
508 barrier();
509
510 val = cpuc->pcr;
511
512 for (i = 0; i < MAX_HWEVENTS; i++) {
513 struct perf_event *cp = cpuc->events[i];
514 struct hw_perf_event *hwc;
515
516 if (!cp)
517 continue;
518 hwc = &cp->hw;
519 val |= hwc->config_base;
520 }
521
522 cpuc->pcr = val;
523
524 pcr_ops->write(cpuc->pcr);
525}
526
527void hw_perf_disable(void)
528{
529 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
530 u64 val;
531
532 if (!cpuc->enabled)
533 return;
534
535 cpuc->enabled = 0;
536
537 val = cpuc->pcr;
538 val &= ~(PCR_UTRACE | PCR_STRACE |
539 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
540 cpuc->pcr = val;
541
542 pcr_ops->write(cpuc->pcr);
543}
544
545static u32 read_pmc(int idx) 536static u32 read_pmc(int idx)
546{ 537{
547 u64 val; 538 u64 val;
@@ -570,6 +561,30 @@ static void write_pmc(int idx, u64 val)
570 write_pic(pic); 561 write_pic(pic);
571} 562}
572 563
564static u64 sparc_perf_event_update(struct perf_event *event,
565 struct hw_perf_event *hwc, int idx)
566{
567 int shift = 64 - 32;
568 u64 prev_raw_count, new_raw_count;
569 s64 delta;
570
571again:
572 prev_raw_count = atomic64_read(&hwc->prev_count);
573 new_raw_count = read_pmc(idx);
574
575 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
576 new_raw_count) != prev_raw_count)
577 goto again;
578
579 delta = (new_raw_count << shift) - (prev_raw_count << shift);
580 delta >>= shift;
581
582 atomic64_add(delta, &event->count);
583 atomic64_sub(delta, &hwc->period_left);
584
585 return new_raw_count;
586}
587
573static int sparc_perf_event_set_period(struct perf_event *event, 588static int sparc_perf_event_set_period(struct perf_event *event,
574 struct hw_perf_event *hwc, int idx) 589 struct hw_perf_event *hwc, int idx)
575{ 590{
@@ -602,81 +617,166 @@ static int sparc_perf_event_set_period(struct perf_event *event,
602 return ret; 617 return ret;
603} 618}
604 619
605static int sparc_pmu_enable(struct perf_event *event) 620/* If performance event entries have been added, move existing
621 * events around (if necessary) and then assign new entries to
622 * counters.
623 */
624static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
606{ 625{
607 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 626 int i;
608 struct hw_perf_event *hwc = &event->hw;
609 int idx = hwc->idx;
610 627
611 if (test_and_set_bit(idx, cpuc->used_mask)) 628 if (!cpuc->n_added)
612 return -EAGAIN; 629 goto out;
613 630
614 sparc_pmu_disable_event(cpuc, hwc, idx); 631 /* Read in the counters which are moving. */
632 for (i = 0; i < cpuc->n_events; i++) {
633 struct perf_event *cp = cpuc->event[i];
615 634
616 cpuc->events[idx] = event; 635 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
617 set_bit(idx, cpuc->active_mask); 636 cpuc->current_idx[i] != cp->hw.idx) {
637 sparc_perf_event_update(cp, &cp->hw,
638 cpuc->current_idx[i]);
639 cpuc->current_idx[i] = PIC_NO_INDEX;
640 }
641 }
618 642
619 sparc_perf_event_set_period(event, hwc, idx); 643 /* Assign to counters all unassigned events. */
620 sparc_pmu_enable_event(cpuc, hwc, idx); 644 for (i = 0; i < cpuc->n_events; i++) {
621 perf_event_update_userpage(event); 645 struct perf_event *cp = cpuc->event[i];
622 return 0; 646 struct hw_perf_event *hwc = &cp->hw;
647 int idx = hwc->idx;
648 u64 enc;
649
650 if (cpuc->current_idx[i] != PIC_NO_INDEX)
651 continue;
652
653 sparc_perf_event_set_period(cp, hwc, idx);
654 cpuc->current_idx[i] = idx;
655
656 enc = perf_event_get_enc(cpuc->events[i]);
657 pcr |= event_encoding(enc, idx);
658 }
659out:
660 return pcr;
623} 661}
624 662
625static u64 sparc_perf_event_update(struct perf_event *event, 663void hw_perf_enable(void)
626 struct hw_perf_event *hwc, int idx)
627{ 664{
628 int shift = 64 - 32; 665 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
629 u64 prev_raw_count, new_raw_count; 666 u64 pcr;
630 s64 delta;
631 667
632again: 668 if (cpuc->enabled)
633 prev_raw_count = atomic64_read(&hwc->prev_count); 669 return;
634 new_raw_count = read_pmc(idx);
635 670
636 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 671 cpuc->enabled = 1;
637 new_raw_count) != prev_raw_count) 672 barrier();
638 goto again;
639 673
640 delta = (new_raw_count << shift) - (prev_raw_count << shift); 674 pcr = cpuc->pcr;
641 delta >>= shift; 675 if (!cpuc->n_events) {
676 pcr = 0;
677 } else {
678 pcr = maybe_change_configuration(cpuc, pcr);
642 679
643 atomic64_add(delta, &event->count); 680 /* We require that all of the events have the same
644 atomic64_sub(delta, &hwc->period_left); 681 * configuration, so just fetch the settings from the
682 * first entry.
683 */
684 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
685 }
645 686
646 return new_raw_count; 687 pcr_ops->write(cpuc->pcr);
688}
689
690void hw_perf_disable(void)
691{
692 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
693 u64 val;
694
695 if (!cpuc->enabled)
696 return;
697
698 cpuc->enabled = 0;
699 cpuc->n_added = 0;
700
701 val = cpuc->pcr;
702 val &= ~(PCR_UTRACE | PCR_STRACE |
703 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
704 cpuc->pcr = val;
705
706 pcr_ops->write(cpuc->pcr);
647} 707}
648 708
649static void sparc_pmu_disable(struct perf_event *event) 709static void sparc_pmu_disable(struct perf_event *event)
650{ 710{
651 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 711 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
652 struct hw_perf_event *hwc = &event->hw; 712 struct hw_perf_event *hwc = &event->hw;
653 int idx = hwc->idx; 713 unsigned long flags;
714 int i;
654 715
655 clear_bit(idx, cpuc->active_mask); 716 local_irq_save(flags);
656 sparc_pmu_disable_event(cpuc, hwc, idx); 717 perf_disable();
718
719 for (i = 0; i < cpuc->n_events; i++) {
720 if (event == cpuc->event[i]) {
721 int idx = cpuc->current_idx[i];
722
723 /* Shift remaining entries down into
724 * the existing slot.
725 */
726 while (++i < cpuc->n_events) {
727 cpuc->event[i - 1] = cpuc->event[i];
728 cpuc->events[i - 1] = cpuc->events[i];
729 cpuc->current_idx[i - 1] =
730 cpuc->current_idx[i];
731 }
732
733 /* Absorb the final count and turn off the
734 * event.
735 */
736 sparc_pmu_disable_event(cpuc, hwc, idx);
737 barrier();
738 sparc_perf_event_update(event, hwc, idx);
657 739
658 barrier(); 740 perf_event_update_userpage(event);
659 741
660 sparc_perf_event_update(event, hwc, idx); 742 cpuc->n_events--;
661 cpuc->events[idx] = NULL; 743 break;
662 clear_bit(idx, cpuc->used_mask); 744 }
745 }
663 746
664 perf_event_update_userpage(event); 747 perf_enable();
748 local_irq_restore(flags);
749}
750
751static int active_event_index(struct cpu_hw_events *cpuc,
752 struct perf_event *event)
753{
754 int i;
755
756 for (i = 0; i < cpuc->n_events; i++) {
757 if (cpuc->event[i] == event)
758 break;
759 }
760 BUG_ON(i == cpuc->n_events);
761 return cpuc->current_idx[i];
665} 762}
666 763
667static void sparc_pmu_read(struct perf_event *event) 764static void sparc_pmu_read(struct perf_event *event)
668{ 765{
766 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
767 int idx = active_event_index(cpuc, event);
669 struct hw_perf_event *hwc = &event->hw; 768 struct hw_perf_event *hwc = &event->hw;
670 769
671 sparc_perf_event_update(event, hwc, hwc->idx); 770 sparc_perf_event_update(event, hwc, idx);
672} 771}
673 772
674static void sparc_pmu_unthrottle(struct perf_event *event) 773static void sparc_pmu_unthrottle(struct perf_event *event)
675{ 774{
676 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 775 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
776 int idx = active_event_index(cpuc, event);
677 struct hw_perf_event *hwc = &event->hw; 777 struct hw_perf_event *hwc = &event->hw;
678 778
679 sparc_pmu_enable_event(cpuc, hwc, hwc->idx); 779 sparc_pmu_enable_event(cpuc, hwc, idx);
680} 780}
681 781
682static atomic_t active_events = ATOMIC_INIT(0); 782static atomic_t active_events = ATOMIC_INIT(0);
@@ -754,43 +854,75 @@ static void hw_perf_event_destroy(struct perf_event *event)
754/* Make sure all events can be scheduled into the hardware at 854/* Make sure all events can be scheduled into the hardware at
755 * the same time. This is simplified by the fact that we only 855 * the same time. This is simplified by the fact that we only
756 * need to support 2 simultaneous HW events. 856 * need to support 2 simultaneous HW events.
857 *
858 * As a side effect, the evts[]->hw.idx values will be assigned
859 * on success. These are pending indexes. When the events are
860 * actually programmed into the chip, these values will propagate
861 * to the per-cpu cpuc->current_idx[] slots, see the code in
862 * maybe_change_configuration() for details.
757 */ 863 */
758static int sparc_check_constraints(unsigned long *events, int n_ev) 864static int sparc_check_constraints(struct perf_event **evts,
865 unsigned long *events, int n_ev)
759{ 866{
760 if (n_ev <= perf_max_events) { 867 u8 msk0 = 0, msk1 = 0;
761 u8 msk1, msk2; 868 int idx0 = 0;
762 u16 dummy; 869
763 870 /* This case is possible when we are invoked from
764 if (n_ev == 1) 871 * hw_perf_group_sched_in().
765 return 0; 872 */
766 BUG_ON(n_ev != 2); 873 if (!n_ev)
767 perf_event_decode(events[0], &dummy, &msk1); 874 return 0;
768 perf_event_decode(events[1], &dummy, &msk2); 875
769 876 if (n_ev > perf_max_events)
770 /* If both events can go on any counter, OK. */ 877 return -1;
771 if (msk1 == (PIC_UPPER | PIC_LOWER) && 878
772 msk2 == (PIC_UPPER | PIC_LOWER)) 879 msk0 = perf_event_get_msk(events[0]);
773 return 0; 880 if (n_ev == 1) {
774 881 if (msk0 & PIC_LOWER)
775 /* If one event is limited to a specific counter, 882 idx0 = 1;
776 * and the other can go on both, OK. 883 goto success;
777 */ 884 }
778 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && 885 BUG_ON(n_ev != 2);
779 msk2 == (PIC_UPPER | PIC_LOWER)) 886 msk1 = perf_event_get_msk(events[1]);
780 return 0; 887
781 if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && 888 /* If both events can go on any counter, OK. */
782 msk1 == (PIC_UPPER | PIC_LOWER)) 889 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
783 return 0; 890 msk1 == (PIC_UPPER | PIC_LOWER))
784 891 goto success;
785 /* If the events are fixed to different counters, OK. */ 892
786 if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || 893 /* If one event is limited to a specific counter,
787 (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) 894 * and the other can go on both, OK.
788 return 0; 895 */
789 896 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
790 /* Otherwise, there is a conflict. */ 897 msk1 == (PIC_UPPER | PIC_LOWER)) {
898 if (msk0 & PIC_LOWER)
899 idx0 = 1;
900 goto success;
791 } 901 }
792 902
903 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
904 msk0 == (PIC_UPPER | PIC_LOWER)) {
905 if (msk1 & PIC_UPPER)
906 idx0 = 1;
907 goto success;
908 }
909
910 /* If the events are fixed to different counters, OK. */
911 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
912 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
913 if (msk0 & PIC_LOWER)
914 idx0 = 1;
915 goto success;
916 }
917
918 /* Otherwise, there is a conflict. */
793 return -1; 919 return -1;
920
921success:
922 evts[0]->hw.idx = idx0;
923 if (n_ev == 2)
924 evts[1]->hw.idx = idx0 ^ 1;
925 return 0;
794} 926}
795 927
796static int check_excludes(struct perf_event **evts, int n_prev, int n_new) 928static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
@@ -822,7 +954,8 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
822} 954}
823 955
824static int collect_events(struct perf_event *group, int max_count, 956static int collect_events(struct perf_event *group, int max_count,
825 struct perf_event *evts[], unsigned long *events) 957 struct perf_event *evts[], unsigned long *events,
958 int *current_idx)
826{ 959{
827 struct perf_event *event; 960 struct perf_event *event;
828 int n = 0; 961 int n = 0;
@@ -831,7 +964,8 @@ static int collect_events(struct perf_event *group, int max_count,
831 if (n >= max_count) 964 if (n >= max_count)
832 return -1; 965 return -1;
833 evts[n] = group; 966 evts[n] = group;
834 events[n++] = group->hw.event_base; 967 events[n] = group->hw.event_base;
968 current_idx[n++] = PIC_NO_INDEX;
835 } 969 }
836 list_for_each_entry(event, &group->sibling_list, group_entry) { 970 list_for_each_entry(event, &group->sibling_list, group_entry) {
837 if (!is_software_event(event) && 971 if (!is_software_event(event) &&
@@ -839,20 +973,100 @@ static int collect_events(struct perf_event *group, int max_count,
839 if (n >= max_count) 973 if (n >= max_count)
840 return -1; 974 return -1;
841 evts[n] = event; 975 evts[n] = event;
842 events[n++] = event->hw.event_base; 976 events[n] = event->hw.event_base;
977 current_idx[n++] = PIC_NO_INDEX;
843 } 978 }
844 } 979 }
845 return n; 980 return n;
846} 981}
847 982
983static void event_sched_in(struct perf_event *event, int cpu)
984{
985 event->state = PERF_EVENT_STATE_ACTIVE;
986 event->oncpu = cpu;
987 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
988 if (is_software_event(event))
989 event->pmu->enable(event);
990}
991
992int hw_perf_group_sched_in(struct perf_event *group_leader,
993 struct perf_cpu_context *cpuctx,
994 struct perf_event_context *ctx, int cpu)
995{
996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
997 struct perf_event *sub;
998 int n0, n;
999
1000 if (!sparc_pmu)
1001 return 0;
1002
1003 n0 = cpuc->n_events;
1004 n = collect_events(group_leader, perf_max_events - n0,
1005 &cpuc->event[n0], &cpuc->events[n0],
1006 &cpuc->current_idx[n0]);
1007 if (n < 0)
1008 return -EAGAIN;
1009 if (check_excludes(cpuc->event, n0, n))
1010 return -EINVAL;
1011 if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
1012 return -EAGAIN;
1013 cpuc->n_events = n0 + n;
1014 cpuc->n_added += n;
1015
1016 cpuctx->active_oncpu += n;
1017 n = 1;
1018 event_sched_in(group_leader, cpu);
1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1020 if (sub->state != PERF_EVENT_STATE_OFF) {
1021 event_sched_in(sub, cpu);
1022 n++;
1023 }
1024 }
1025 ctx->nr_active += n;
1026
1027 return 1;
1028}
1029
1030static int sparc_pmu_enable(struct perf_event *event)
1031{
1032 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1033 int n0, ret = -EAGAIN;
1034 unsigned long flags;
1035
1036 local_irq_save(flags);
1037 perf_disable();
1038
1039 n0 = cpuc->n_events;
1040 if (n0 >= perf_max_events)
1041 goto out;
1042
1043 cpuc->event[n0] = event;
1044 cpuc->events[n0] = event->hw.event_base;
1045 cpuc->current_idx[n0] = PIC_NO_INDEX;
1046
1047 if (check_excludes(cpuc->event, n0, 1))
1048 goto out;
1049 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1050 goto out;
1051
1052 cpuc->n_events++;
1053 cpuc->n_added++;
1054
1055 ret = 0;
1056out:
1057 perf_enable();
1058 local_irq_restore(flags);
1059 return ret;
1060}
1061
848static int __hw_perf_event_init(struct perf_event *event) 1062static int __hw_perf_event_init(struct perf_event *event)
849{ 1063{
850 struct perf_event_attr *attr = &event->attr; 1064 struct perf_event_attr *attr = &event->attr;
851 struct perf_event *evts[MAX_HWEVENTS]; 1065 struct perf_event *evts[MAX_HWEVENTS];
852 struct hw_perf_event *hwc = &event->hw; 1066 struct hw_perf_event *hwc = &event->hw;
853 unsigned long events[MAX_HWEVENTS]; 1067 unsigned long events[MAX_HWEVENTS];
1068 int current_idx_dmy[MAX_HWEVENTS];
854 const struct perf_event_map *pmap; 1069 const struct perf_event_map *pmap;
855 u64 enc;
856 int n; 1070 int n;
857 1071
858 if (atomic_read(&nmi_active) < 0) 1072 if (atomic_read(&nmi_active) < 0)
@@ -869,10 +1083,7 @@ static int __hw_perf_event_init(struct perf_event *event)
869 } else 1083 } else
870 return -EOPNOTSUPP; 1084 return -EOPNOTSUPP;
871 1085
872 /* We save the enable bits in the config_base. So to 1086 /* We save the enable bits in the config_base. */
873 * turn off sampling just write 'config', and to enable
874 * things write 'config | config_base'.
875 */
876 hwc->config_base = sparc_pmu->irq_bit; 1087 hwc->config_base = sparc_pmu->irq_bit;
877 if (!attr->exclude_user) 1088 if (!attr->exclude_user)
878 hwc->config_base |= PCR_UTRACE; 1089 hwc->config_base |= PCR_UTRACE;
@@ -883,13 +1094,11 @@ static int __hw_perf_event_init(struct perf_event *event)
883 1094
884 hwc->event_base = perf_event_encode(pmap); 1095 hwc->event_base = perf_event_encode(pmap);
885 1096
886 enc = pmap->encoding;
887
888 n = 0; 1097 n = 0;
889 if (event->group_leader != event) { 1098 if (event->group_leader != event) {
890 n = collect_events(event->group_leader, 1099 n = collect_events(event->group_leader,
891 perf_max_events - 1, 1100 perf_max_events - 1,
892 evts, events); 1101 evts, events, current_idx_dmy);
893 if (n < 0) 1102 if (n < 0)
894 return -EINVAL; 1103 return -EINVAL;
895 } 1104 }
@@ -899,9 +1108,11 @@ static int __hw_perf_event_init(struct perf_event *event)
899 if (check_excludes(evts, n, 1)) 1108 if (check_excludes(evts, n, 1))
900 return -EINVAL; 1109 return -EINVAL;
901 1110
902 if (sparc_check_constraints(events, n + 1)) 1111 if (sparc_check_constraints(evts, events, n + 1))
903 return -EINVAL; 1112 return -EINVAL;
904 1113
1114 hwc->idx = PIC_NO_INDEX;
1115
905 /* Try to do all error checking before this point, as unwinding 1116 /* Try to do all error checking before this point, as unwinding
906 * state after grabbing the PMC is difficult. 1117 * state after grabbing the PMC is difficult.
907 */ 1118 */
@@ -914,15 +1125,6 @@ static int __hw_perf_event_init(struct perf_event *event)
914 atomic64_set(&hwc->period_left, hwc->sample_period); 1125 atomic64_set(&hwc->period_left, hwc->sample_period);
915 } 1126 }
916 1127
917 if (pmap->pic_mask & PIC_UPPER) {
918 hwc->idx = PIC_UPPER_INDEX;
919 enc <<= sparc_pmu->upper_shift;
920 } else {
921 hwc->idx = PIC_LOWER_INDEX;
922 enc <<= sparc_pmu->lower_shift;
923 }
924
925 hwc->config |= enc;
926 return 0; 1128 return 0;
927} 1129}
928 1130
@@ -972,7 +1174,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
972 struct perf_sample_data data; 1174 struct perf_sample_data data;
973 struct cpu_hw_events *cpuc; 1175 struct cpu_hw_events *cpuc;
974 struct pt_regs *regs; 1176 struct pt_regs *regs;
975 int idx; 1177 int i;
976 1178
977 if (!atomic_read(&active_events)) 1179 if (!atomic_read(&active_events))
978 return NOTIFY_DONE; 1180 return NOTIFY_DONE;
@@ -1001,13 +1203,12 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1001 if (sparc_pmu->irq_bit) 1203 if (sparc_pmu->irq_bit)
1002 pcr_ops->write(cpuc->pcr); 1204 pcr_ops->write(cpuc->pcr);
1003 1205
1004 for (idx = 0; idx < MAX_HWEVENTS; idx++) { 1206 for (i = 0; i < cpuc->n_events; i++) {
1005 struct perf_event *event = cpuc->events[idx]; 1207 struct perf_event *event = cpuc->event[i];
1208 int idx = cpuc->current_idx[i];
1006 struct hw_perf_event *hwc; 1209 struct hw_perf_event *hwc;
1007 u64 val; 1210 u64 val;
1008 1211
1009 if (!test_bit(idx, cpuc->active_mask))
1010 continue;
1011 hwc = &event->hw; 1212 hwc = &event->hw;
1012 val = sparc_perf_event_update(event, hwc, idx); 1213 val = sparc_perf_event_update(event, hwc, idx);
1013 if (val & (1ULL << 31)) 1214 if (val & (1ULL << 31))
@@ -1059,10 +1260,8 @@ void __init init_hw_perf_events(void)
1059 1260
1060 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1261 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1061 1262
1062 /* All sparc64 PMUs currently have 2 events. But this simple 1263 /* All sparc64 PMUs currently have 2 events. */
1063 * driver only supports one active event at a time. 1264 perf_max_events = 2;
1064 */
1065 perf_max_events = 1;
1066 1265
1067 register_die_notifier(&perf_event_nmi_notifier); 1266 register_die_notifier(&perf_event_nmi_notifier);
1068} 1267}