diff options
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
| -rw-r--r-- | arch/sparc/kernel/perf_event.c | 55 |
1 files changed, 43 insertions, 12 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 46a5e4508752..86eebfa3b158 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
| @@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = { | |||
| 792 | .num_pic_regs = 4, | 792 | .num_pic_regs = 4, |
| 793 | }; | 793 | }; |
| 794 | 794 | ||
| 795 | static void sparc_m7_write_pmc(int idx, u64 val) | ||
| 796 | { | ||
| 797 | u64 pcr; | ||
| 798 | |||
| 799 | pcr = pcr_ops->read_pcr(idx); | ||
| 800 | /* ensure ov and ntc are reset */ | ||
| 801 | pcr &= ~(PCR_N4_OV | PCR_N4_NTC); | ||
| 802 | |||
| 803 | pcr_ops->write_pic(idx, val & 0xffffffff); | ||
| 804 | |||
| 805 | pcr_ops->write_pcr(idx, pcr); | ||
| 806 | } | ||
| 807 | |||
| 808 | static const struct sparc_pmu sparc_m7_pmu = { | ||
| 809 | .event_map = niagara4_event_map, | ||
| 810 | .cache_map = &niagara4_cache_map, | ||
| 811 | .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), | ||
| 812 | .read_pmc = sparc_vt_read_pmc, | ||
| 813 | .write_pmc = sparc_m7_write_pmc, | ||
| 814 | .upper_shift = 5, | ||
| 815 | .lower_shift = 5, | ||
| 816 | .event_mask = 0x7ff, | ||
| 817 | .user_bit = PCR_N4_UTRACE, | ||
| 818 | .priv_bit = PCR_N4_STRACE, | ||
| 819 | |||
| 820 | /* We explicitly don't support hypervisor tracing. */ | ||
| 821 | .hv_bit = 0, | ||
| 822 | |||
| 823 | .irq_bit = PCR_N4_TOE, | ||
| 824 | .upper_nop = 0, | ||
| 825 | .lower_nop = 0, | ||
| 826 | .flags = 0, | ||
| 827 | .max_hw_events = 4, | ||
| 828 | .num_pcrs = 4, | ||
| 829 | .num_pic_regs = 4, | ||
| 830 | }; | ||
| 795 | static const struct sparc_pmu *sparc_pmu __read_mostly; | 831 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
| 796 | 832 | ||
| 797 | static u64 event_encoding(u64 event_id, int idx) | 833 | static u64 event_encoding(u64 event_id, int idx) |
| @@ -960,6 +996,8 @@ out: | |||
| 960 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; | 996 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; |
| 961 | } | 997 | } |
| 962 | 998 | ||
| 999 | static void sparc_pmu_start(struct perf_event *event, int flags); | ||
| 1000 | |||
| 963 | /* On this PMU each PIC has it's own PCR control register. */ | 1001 | /* On this PMU each PIC has it's own PCR control register. */ |
| 964 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | 1002 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) |
| 965 | { | 1003 | { |
| @@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | |||
| 972 | struct perf_event *cp = cpuc->event[i]; | 1010 | struct perf_event *cp = cpuc->event[i]; |
| 973 | struct hw_perf_event *hwc = &cp->hw; | 1011 | struct hw_perf_event *hwc = &cp->hw; |
| 974 | int idx = hwc->idx; | 1012 | int idx = hwc->idx; |
| 975 | u64 enc; | ||
| 976 | 1013 | ||
| 977 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | 1014 | if (cpuc->current_idx[i] != PIC_NO_INDEX) |
| 978 | continue; | 1015 | continue; |
| 979 | 1016 | ||
| 980 | sparc_perf_event_set_period(cp, hwc, idx); | ||
| 981 | cpuc->current_idx[i] = idx; | 1017 | cpuc->current_idx[i] = idx; |
| 982 | 1018 | ||
| 983 | enc = perf_event_get_enc(cpuc->events[i]); | 1019 | sparc_pmu_start(cp, PERF_EF_RELOAD); |
| 984 | cpuc->pcr[idx] &= ~mask_for_index(idx); | ||
| 985 | if (hwc->state & PERF_HES_STOPPED) | ||
| 986 | cpuc->pcr[idx] |= nop_for_index(idx); | ||
| 987 | else | ||
| 988 | cpuc->pcr[idx] |= event_encoding(enc, idx); | ||
| 989 | } | 1020 | } |
| 990 | out: | 1021 | out: |
| 991 | for (i = 0; i < cpuc->n_events; i++) { | 1022 | for (i = 0; i < cpuc->n_events; i++) { |
| @@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
| 1101 | int i; | 1132 | int i; |
| 1102 | 1133 | ||
| 1103 | local_irq_save(flags); | 1134 | local_irq_save(flags); |
| 1104 | perf_pmu_disable(event->pmu); | ||
| 1105 | 1135 | ||
| 1106 | for (i = 0; i < cpuc->n_events; i++) { | 1136 | for (i = 0; i < cpuc->n_events; i++) { |
| 1107 | if (event == cpuc->event[i]) { | 1137 | if (event == cpuc->event[i]) { |
| @@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
| 1127 | } | 1157 | } |
| 1128 | } | 1158 | } |
| 1129 | 1159 | ||
| 1130 | perf_pmu_enable(event->pmu); | ||
| 1131 | local_irq_restore(flags); | 1160 | local_irq_restore(flags); |
| 1132 | } | 1161 | } |
| 1133 | 1162 | ||
| @@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) | |||
| 1361 | unsigned long flags; | 1390 | unsigned long flags; |
| 1362 | 1391 | ||
| 1363 | local_irq_save(flags); | 1392 | local_irq_save(flags); |
| 1364 | perf_pmu_disable(event->pmu); | ||
| 1365 | 1393 | ||
| 1366 | n0 = cpuc->n_events; | 1394 | n0 = cpuc->n_events; |
| 1367 | if (n0 >= sparc_pmu->max_hw_events) | 1395 | if (n0 >= sparc_pmu->max_hw_events) |
| @@ -1394,7 +1422,6 @@ nocheck: | |||
| 1394 | 1422 | ||
| 1395 | ret = 0; | 1423 | ret = 0; |
| 1396 | out: | 1424 | out: |
| 1397 | perf_pmu_enable(event->pmu); | ||
| 1398 | local_irq_restore(flags); | 1425 | local_irq_restore(flags); |
| 1399 | return ret; | 1426 | return ret; |
| 1400 | } | 1427 | } |
| @@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void) | |||
| 1667 | sparc_pmu = &niagara4_pmu; | 1694 | sparc_pmu = &niagara4_pmu; |
| 1668 | return true; | 1695 | return true; |
| 1669 | } | 1696 | } |
| 1697 | if (!strcmp(sparc_pmu_type, "sparc-m7")) { | ||
| 1698 | sparc_pmu = &sparc_m7_pmu; | ||
| 1699 | return true; | ||
| 1700 | } | ||
| 1670 | return false; | 1701 | return false; |
| 1671 | } | 1702 | } |
| 1672 | 1703 | ||
