aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2011-07-19 04:37:10 -0400
committerWill Deacon <will.deacon@arm.com>2011-08-31 05:50:02 -0400
commitc47f8684baefa2bf52c4320f894e73db08dc8a0a (patch)
tree685df6d09e03620e12d50c31cbf20e69b9e6ee32
parent7b9f72c62ed047a200b1ef8c70bee0b58e880af8 (diff)
ARM: perf: remove active_mask
Currently, pmu_hw_events::active_mask is used to keep track of which events are active in hardware. As we can stop counters and their interrupts, this is unnecessary. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Jamie Iles <jamie@jamieiles.com> Reviewed-by: Ashwin Chaugule <ashwinc@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/kernel/perf_event.c8
-rw-r--r--arch/arm/kernel/perf_event_v6.c19
-rw-r--r--arch/arm/kernel/perf_event_v7.c3
-rw-r--r--arch/arm/kernel/perf_event_xscale.c6
4 files changed, 18 insertions, 18 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index dfde9283aec1..438482ff7498 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -57,12 +57,6 @@ struct cpu_hw_events {
57 * an event. A 0 means that the counter can be used. 57 * an event. A 0 means that the counter can be used.
58 */ 58 */
59 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; 59 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
60
61 /*
62 * A 1 bit for an index indicates that the counter is actively being
63 * used.
64 */
65 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
66}; 60};
67static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 61static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
68 62
@@ -295,7 +289,6 @@ armpmu_del(struct perf_event *event, int flags)
295 289
296 WARN_ON(idx < 0); 290 WARN_ON(idx < 0);
297 291
298 clear_bit(idx, cpuc->active_mask);
299 armpmu_stop(event, PERF_EF_UPDATE); 292 armpmu_stop(event, PERF_EF_UPDATE);
300 cpuc->events[idx] = NULL; 293 cpuc->events[idx] = NULL;
301 clear_bit(idx, cpuc->used_mask); 294 clear_bit(idx, cpuc->used_mask);
@@ -327,7 +320,6 @@ armpmu_add(struct perf_event *event, int flags)
327 event->hw.idx = idx; 320 event->hw.idx = idx;
328 armpmu->disable(hwc, idx); 321 armpmu->disable(hwc, idx);
329 cpuc->events[idx] = event; 322 cpuc->events[idx] = event;
330 set_bit(idx, cpuc->active_mask);
331 323
332 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 324 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
333 if (flags & PERF_EF_START) 325 if (flags & PERF_EF_START)
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 87f29b553b8f..839012862264 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -462,6 +462,23 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
462 raw_spin_unlock_irqrestore(&pmu_lock, flags); 462 raw_spin_unlock_irqrestore(&pmu_lock, flags);
463} 463}
464 464
465static int counter_is_active(unsigned long pmcr, int idx)
466{
467 unsigned long mask = 0;
468 if (idx == ARMV6_CYCLE_COUNTER)
469 mask = ARMV6_PMCR_CCOUNT_IEN;
470 else if (idx == ARMV6_COUNTER0)
471 mask = ARMV6_PMCR_COUNT0_IEN;
472 else if (idx == ARMV6_COUNTER1)
473 mask = ARMV6_PMCR_COUNT1_IEN;
474
475 if (mask)
476 return pmcr & mask;
477
478 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
479 return 0;
480}
481
465static irqreturn_t 482static irqreturn_t
466armv6pmu_handle_irq(int irq_num, 483armv6pmu_handle_irq(int irq_num,
467 void *dev) 484 void *dev)
@@ -491,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
491 struct perf_event *event = cpuc->events[idx]; 508 struct perf_event *event = cpuc->events[idx];
492 struct hw_perf_event *hwc; 509 struct hw_perf_event *hwc;
493 510
494 if (!test_bit(idx, cpuc->active_mask)) 511 if (!counter_is_active(pmcr, idx))
495 continue; 512 continue;
496 513
497 /* 514 /*
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index fe6c931d2c4b..f4170fc228b6 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1022,9 +1022,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1022 struct perf_event *event = cpuc->events[idx]; 1022 struct perf_event *event = cpuc->events[idx];
1023 struct hw_perf_event *hwc; 1023 struct hw_perf_event *hwc;
1024 1024
1025 if (!test_bit(idx, cpuc->active_mask))
1026 continue;
1027
1028 /* 1025 /*
1029 * We have a single interrupt for all counters. Check that 1026 * We have a single interrupt for all counters. Check that
1030 * each counter has overflowed before we process it. 1027 * each counter has overflowed before we process it.
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 54312fc45ca3..ca89a06c8e92 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -253,9 +253,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
253 struct perf_event *event = cpuc->events[idx]; 253 struct perf_event *event = cpuc->events[idx];
254 struct hw_perf_event *hwc; 254 struct hw_perf_event *hwc;
255 255
256 if (!test_bit(idx, cpuc->active_mask))
257 continue;
258
259 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) 256 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
260 continue; 257 continue;
261 258
@@ -585,9 +582,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
585 struct perf_event *event = cpuc->events[idx]; 582 struct perf_event *event = cpuc->events[idx];
586 struct hw_perf_event *hwc; 583 struct hw_perf_event *hwc;
587 584
588 if (!test_bit(idx, cpuc->active_mask))
589 continue;
590
591 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) 585 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
592 continue; 586 continue;
593 587