aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-08-17 05:33:44 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-19 02:26:19 -0400
commit59660495e80e7eabc726c301ddc46afd2ce1bcac (patch)
tree394bfc9a65e1e236a925d34300e9015cf10e6489 /arch/sparc/kernel/perf_event.c
parentb38e99f5bdf62f37d7552311fef1bff00bec6308 (diff)
sparc64: Allow max hw perf events to be variable.
Now specified in sparc_pmu descriptor. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 11b424bb0b2b..f7b9ae39c264 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -151,6 +151,7 @@ struct sparc_pmu {
151 unsigned int flags; 151 unsigned int flags;
152#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001 152#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
153#define SPARC_PMU_HAS_CONFLICTS 0x00000002 153#define SPARC_PMU_HAS_CONFLICTS 0x00000002
154 int max_hw_events;
154}; 155};
155 156
156static const struct perf_event_map ultra3_perfmon_event_map[] = { 157static const struct perf_event_map ultra3_perfmon_event_map[] = {
@@ -277,6 +278,7 @@ static const struct sparc_pmu ultra3_pmu = {
277 .lower_nop = 0x14, 278 .lower_nop = 0x14,
278 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | 279 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
279 SPARC_PMU_HAS_CONFLICTS), 280 SPARC_PMU_HAS_CONFLICTS),
281 .max_hw_events = 2,
280}; 282};
281 283
282/* Niagara1 is very limited. The upper PIC is hard-locked to count 284/* Niagara1 is very limited. The upper PIC is hard-locked to count
@@ -408,6 +410,7 @@ static const struct sparc_pmu niagara1_pmu = {
408 .lower_nop = 0x0, 410 .lower_nop = 0x0,
409 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | 411 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
410 SPARC_PMU_HAS_CONFLICTS), 412 SPARC_PMU_HAS_CONFLICTS),
413 .max_hw_events = 2,
411}; 414};
412 415
413static const struct perf_event_map niagara2_perfmon_event_map[] = { 416static const struct perf_event_map niagara2_perfmon_event_map[] = {
@@ -538,6 +541,7 @@ static const struct sparc_pmu niagara2_pmu = {
538 .lower_nop = 0x220, 541 .lower_nop = 0x220,
539 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | 542 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
540 SPARC_PMU_HAS_CONFLICTS), 543 SPARC_PMU_HAS_CONFLICTS),
544 .max_hw_events = 2,
541}; 545};
542 546
543static const struct sparc_pmu *sparc_pmu __read_mostly; 547static const struct sparc_pmu *sparc_pmu __read_mostly;
@@ -950,7 +954,7 @@ static int sparc_check_constraints(struct perf_event **evts,
950 if (!n_ev) 954 if (!n_ev)
951 return 0; 955 return 0;
952 956
953 if (n_ev > MAX_HWEVENTS) 957 if (n_ev > sparc_pmu->max_hw_events)
954 return -1; 958 return -1;
955 959
956 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) { 960 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
@@ -1078,7 +1082,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1078 perf_pmu_disable(event->pmu); 1082 perf_pmu_disable(event->pmu);
1079 1083
1080 n0 = cpuc->n_events; 1084 n0 = cpuc->n_events;
1081 if (n0 >= MAX_HWEVENTS) 1085 if (n0 >= sparc_pmu->max_hw_events)
1082 goto out; 1086 goto out;
1083 1087
1084 cpuc->event[n0] = event; 1088 cpuc->event[n0] = event;
@@ -1174,7 +1178,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
1174 n = 0; 1178 n = 0;
1175 if (event->group_leader != event) { 1179 if (event->group_leader != event) {
1176 n = collect_events(event->group_leader, 1180 n = collect_events(event->group_leader,
1177 MAX_HWEVENTS - 1, 1181 sparc_pmu->max_hw_events - 1,
1178 evts, events, current_idx_dmy); 1182 evts, events, current_idx_dmy);
1179 if (n < 0) 1183 if (n < 0)
1180 return -EINVAL; 1184 return -EINVAL;