aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event_v6.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2010-12-02 12:01:49 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-12-04 06:18:08 -0500
commit961ec6daa7b14f376c30d447a830fa4783a2112c (patch)
treefd4952bd65fa9e991de7687eaef4b2e5af7a0e70 /arch/arm/kernel/perf_event_v6.c
parent4d6b7a779be34e1df296abc1dc555134a8cf34af (diff)
ARM: 6521/1: perf: use raw_spinlock_t for pmu_lock
For kernels built with PREEMPT_RT, critical sections protected by standard spinlocks are preemptible. This is not acceptable on perf as (a) we may be scheduled onto a different CPU whilst reading/writing banked PMU registers and (b) the latency when reading the PMU registers becomes unpredictable. This patch upgrades the pmu_lock spinlock to a raw_spinlock instead. Reported-by: Jamie Iles <jamie@jamieiles.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/perf_event_v6.c')
-rw-r--r--arch/arm/kernel/perf_event_v6.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 3f427aae211d..c058bfc8532b 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -426,12 +426,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
426 * Mask out the current event and set the counter to count the event 426 * Mask out the current event and set the counter to count the event
427 * that we're interested in. 427 * that we're interested in.
428 */ 428 */
429 spin_lock_irqsave(&pmu_lock, flags); 429 raw_spin_lock_irqsave(&pmu_lock, flags);
430 val = armv6_pmcr_read(); 430 val = armv6_pmcr_read();
431 val &= ~mask; 431 val &= ~mask;
432 val |= evt; 432 val |= evt;
433 armv6_pmcr_write(val); 433 armv6_pmcr_write(val);
434 spin_unlock_irqrestore(&pmu_lock, flags); 434 raw_spin_unlock_irqrestore(&pmu_lock, flags);
435} 435}
436 436
437static irqreturn_t 437static irqreturn_t
@@ -500,11 +500,11 @@ armv6pmu_start(void)
500{ 500{
501 unsigned long flags, val; 501 unsigned long flags, val;
502 502
503 spin_lock_irqsave(&pmu_lock, flags); 503 raw_spin_lock_irqsave(&pmu_lock, flags);
504 val = armv6_pmcr_read(); 504 val = armv6_pmcr_read();
505 val |= ARMV6_PMCR_ENABLE; 505 val |= ARMV6_PMCR_ENABLE;
506 armv6_pmcr_write(val); 506 armv6_pmcr_write(val);
507 spin_unlock_irqrestore(&pmu_lock, flags); 507 raw_spin_unlock_irqrestore(&pmu_lock, flags);
508} 508}
509 509
510static void 510static void
@@ -512,11 +512,11 @@ armv6pmu_stop(void)
512{ 512{
513 unsigned long flags, val; 513 unsigned long flags, val;
514 514
515 spin_lock_irqsave(&pmu_lock, flags); 515 raw_spin_lock_irqsave(&pmu_lock, flags);
516 val = armv6_pmcr_read(); 516 val = armv6_pmcr_read();
517 val &= ~ARMV6_PMCR_ENABLE; 517 val &= ~ARMV6_PMCR_ENABLE;
518 armv6_pmcr_write(val); 518 armv6_pmcr_write(val);
519 spin_unlock_irqrestore(&pmu_lock, flags); 519 raw_spin_unlock_irqrestore(&pmu_lock, flags);
520} 520}
521 521
522static int 522static int
@@ -570,12 +570,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
570 * of ETM bus signal assertion cycles. The external reporting should 570 * of ETM bus signal assertion cycles. The external reporting should
571 * be disabled and so this should never increment. 571 * be disabled and so this should never increment.
572 */ 572 */
573 spin_lock_irqsave(&pmu_lock, flags); 573 raw_spin_lock_irqsave(&pmu_lock, flags);
574 val = armv6_pmcr_read(); 574 val = armv6_pmcr_read();
575 val &= ~mask; 575 val &= ~mask;
576 val |= evt; 576 val |= evt;
577 armv6_pmcr_write(val); 577 armv6_pmcr_write(val);
578 spin_unlock_irqrestore(&pmu_lock, flags); 578 raw_spin_unlock_irqrestore(&pmu_lock, flags);
579} 579}
580 580
581static void 581static void
@@ -599,12 +599,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
599 * Unlike UP ARMv6, we don't have a way of stopping the counters. We 599 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
600 * simply disable the interrupt reporting. 600 * simply disable the interrupt reporting.
601 */ 601 */
602 spin_lock_irqsave(&pmu_lock, flags); 602 raw_spin_lock_irqsave(&pmu_lock, flags);
603 val = armv6_pmcr_read(); 603 val = armv6_pmcr_read();
604 val &= ~mask; 604 val &= ~mask;
605 val |= evt; 605 val |= evt;
606 armv6_pmcr_write(val); 606 armv6_pmcr_write(val);
607 spin_unlock_irqrestore(&pmu_lock, flags); 607 raw_spin_unlock_irqrestore(&pmu_lock, flags);
608} 608}
609 609
610static const struct arm_pmu armv6pmu = { 610static const struct arm_pmu armv6pmu = {