aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-08-17 05:31:10 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-19 02:26:19 -0400
commitb38e99f5bdf62f37d7552311fef1bff00bec6308 (patch)
treef3c3de99b42a698bd268447ba97bcd3d5de5d688 /arch/sparc/kernel
parent6faaeb8ea30e55c9fd7cf65d05f3ce44973d1d12 (diff)
sparc64: Add perf_event abstractions for orthogonal PMUs.
Starting with SPARC-T4 we have a seperate PCR control register for each performance counter, and there are absolutely no restrictions on what events can run on which counters. Add flags that we can use to elide the conflict and dependency logic used to handle older chips. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/perf_event.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index dd12aa35805d..11b424bb0b2b 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -148,6 +148,9 @@ struct sparc_pmu {
148 int irq_bit; 148 int irq_bit;
149 int upper_nop; 149 int upper_nop;
150 int lower_nop; 150 int lower_nop;
151 unsigned int flags;
152#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
153#define SPARC_PMU_HAS_CONFLICTS 0x00000002
151}; 154};
152 155
153static const struct perf_event_map ultra3_perfmon_event_map[] = { 156static const struct perf_event_map ultra3_perfmon_event_map[] = {
@@ -272,6 +275,8 @@ static const struct sparc_pmu ultra3_pmu = {
272 .event_mask = 0x3f, 275 .event_mask = 0x3f,
273 .upper_nop = 0x1c, 276 .upper_nop = 0x1c,
274 .lower_nop = 0x14, 277 .lower_nop = 0x14,
278 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
279 SPARC_PMU_HAS_CONFLICTS),
275}; 280};
276 281
277/* Niagara1 is very limited. The upper PIC is hard-locked to count 282/* Niagara1 is very limited. The upper PIC is hard-locked to count
@@ -401,6 +406,8 @@ static const struct sparc_pmu niagara1_pmu = {
401 .event_mask = 0x7, 406 .event_mask = 0x7,
402 .upper_nop = 0x0, 407 .upper_nop = 0x0,
403 .lower_nop = 0x0, 408 .lower_nop = 0x0,
409 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
410 SPARC_PMU_HAS_CONFLICTS),
404}; 411};
405 412
406static const struct perf_event_map niagara2_perfmon_event_map[] = { 413static const struct perf_event_map niagara2_perfmon_event_map[] = {
@@ -529,6 +536,8 @@ static const struct sparc_pmu niagara2_pmu = {
529 .irq_bit = 0x30, 536 .irq_bit = 0x30,
530 .upper_nop = 0x220, 537 .upper_nop = 0x220,
531 .lower_nop = 0x220, 538 .lower_nop = 0x220,
539 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
540 SPARC_PMU_HAS_CONFLICTS),
532}; 541};
533 542
534static const struct sparc_pmu *sparc_pmu __read_mostly; 543static const struct sparc_pmu *sparc_pmu __read_mostly;
@@ -944,6 +953,14 @@ static int sparc_check_constraints(struct perf_event **evts,
944 if (n_ev > MAX_HWEVENTS) 953 if (n_ev > MAX_HWEVENTS)
945 return -1; 954 return -1;
946 955
956 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
957 int i;
958
959 for (i = 0; i < n_ev; i++)
960 evts[i]->hw.idx = i;
961 return 0;
962 }
963
947 msk0 = perf_event_get_msk(events[0]); 964 msk0 = perf_event_get_msk(events[0]);
948 if (n_ev == 1) { 965 if (n_ev == 1) {
949 if (msk0 & PIC_LOWER) 966 if (msk0 & PIC_LOWER)
@@ -999,6 +1016,9 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
999 struct perf_event *event; 1016 struct perf_event *event;
1000 int i, n, first; 1017 int i, n, first;
1001 1018
1019 if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
1020 return 0;
1021
1002 n = n_prev + n_new; 1022 n = n_prev + n_new;
1003 if (n <= 1) 1023 if (n <= 1)
1004 return 0; 1024 return 0;