diff options
author | Will Deacon <will.deacon@arm.com> | 2010-12-02 12:01:49 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-12-04 06:18:08 -0500 |
commit | 961ec6daa7b14f376c30d447a830fa4783a2112c (patch) | |
tree | fd4952bd65fa9e991de7687eaef4b2e5af7a0e70 /arch/arm/kernel/perf_event_xscale.c | |
parent | 4d6b7a779be34e1df296abc1dc555134a8cf34af (diff) |
ARM: 6521/1: perf: use raw_spinlock_t for pmu_lock
For kernels built with PREEMPT_RT, critical sections protected
by standard spinlocks are preemptible. This is not acceptable
on perf as (a) we may be scheduled onto a different CPU whilst
reading/writing banked PMU registers and (b) the latency when
reading the PMU registers becomes unpredictable.
This patch upgrades the pmu_lock spinlock to a raw_spinlock
instead.
Reported-by: Jamie Iles <jamie@jamieiles.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/perf_event_xscale.c')
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index f14fbb6c345b..28cd3b025bc3 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -291,12 +291,12 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
291 | return; | 291 | return; |
292 | } | 292 | } |
293 | 293 | ||
294 | spin_lock_irqsave(&pmu_lock, flags); | 294 | raw_spin_lock_irqsave(&pmu_lock, flags); |
295 | val = xscale1pmu_read_pmnc(); | 295 | val = xscale1pmu_read_pmnc(); |
296 | val &= ~mask; | 296 | val &= ~mask; |
297 | val |= evt; | 297 | val |= evt; |
298 | xscale1pmu_write_pmnc(val); | 298 | xscale1pmu_write_pmnc(val); |
299 | spin_unlock_irqrestore(&pmu_lock, flags); | 299 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
300 | } | 300 | } |
301 | 301 | ||
302 | static void | 302 | static void |
@@ -322,12 +322,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
322 | return; | 322 | return; |
323 | } | 323 | } |
324 | 324 | ||
325 | spin_lock_irqsave(&pmu_lock, flags); | 325 | raw_spin_lock_irqsave(&pmu_lock, flags); |
326 | val = xscale1pmu_read_pmnc(); | 326 | val = xscale1pmu_read_pmnc(); |
327 | val &= ~mask; | 327 | val &= ~mask; |
328 | val |= evt; | 328 | val |= evt; |
329 | xscale1pmu_write_pmnc(val); | 329 | xscale1pmu_write_pmnc(val); |
330 | spin_unlock_irqrestore(&pmu_lock, flags); | 330 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
331 | } | 331 | } |
332 | 332 | ||
333 | static int | 333 | static int |
@@ -355,11 +355,11 @@ xscale1pmu_start(void) | |||
355 | { | 355 | { |
356 | unsigned long flags, val; | 356 | unsigned long flags, val; |
357 | 357 | ||
358 | spin_lock_irqsave(&pmu_lock, flags); | 358 | raw_spin_lock_irqsave(&pmu_lock, flags); |
359 | val = xscale1pmu_read_pmnc(); | 359 | val = xscale1pmu_read_pmnc(); |
360 | val |= XSCALE_PMU_ENABLE; | 360 | val |= XSCALE_PMU_ENABLE; |
361 | xscale1pmu_write_pmnc(val); | 361 | xscale1pmu_write_pmnc(val); |
362 | spin_unlock_irqrestore(&pmu_lock, flags); | 362 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
363 | } | 363 | } |
364 | 364 | ||
365 | static void | 365 | static void |
@@ -367,11 +367,11 @@ xscale1pmu_stop(void) | |||
367 | { | 367 | { |
368 | unsigned long flags, val; | 368 | unsigned long flags, val; |
369 | 369 | ||
370 | spin_lock_irqsave(&pmu_lock, flags); | 370 | raw_spin_lock_irqsave(&pmu_lock, flags); |
371 | val = xscale1pmu_read_pmnc(); | 371 | val = xscale1pmu_read_pmnc(); |
372 | val &= ~XSCALE_PMU_ENABLE; | 372 | val &= ~XSCALE_PMU_ENABLE; |
373 | xscale1pmu_write_pmnc(val); | 373 | xscale1pmu_write_pmnc(val); |
374 | spin_unlock_irqrestore(&pmu_lock, flags); | 374 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
375 | } | 375 | } |
376 | 376 | ||
377 | static inline u32 | 377 | static inline u32 |
@@ -635,10 +635,10 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
635 | return; | 635 | return; |
636 | } | 636 | } |
637 | 637 | ||
638 | spin_lock_irqsave(&pmu_lock, flags); | 638 | raw_spin_lock_irqsave(&pmu_lock, flags); |
639 | xscale2pmu_write_event_select(evtsel); | 639 | xscale2pmu_write_event_select(evtsel); |
640 | xscale2pmu_write_int_enable(ien); | 640 | xscale2pmu_write_int_enable(ien); |
641 | spin_unlock_irqrestore(&pmu_lock, flags); | 641 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
642 | } | 642 | } |
643 | 643 | ||
644 | static void | 644 | static void |
@@ -678,10 +678,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
678 | return; | 678 | return; |
679 | } | 679 | } |
680 | 680 | ||
681 | spin_lock_irqsave(&pmu_lock, flags); | 681 | raw_spin_lock_irqsave(&pmu_lock, flags); |
682 | xscale2pmu_write_event_select(evtsel); | 682 | xscale2pmu_write_event_select(evtsel); |
683 | xscale2pmu_write_int_enable(ien); | 683 | xscale2pmu_write_int_enable(ien); |
684 | spin_unlock_irqrestore(&pmu_lock, flags); | 684 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
685 | } | 685 | } |
686 | 686 | ||
687 | static int | 687 | static int |
@@ -705,11 +705,11 @@ xscale2pmu_start(void) | |||
705 | { | 705 | { |
706 | unsigned long flags, val; | 706 | unsigned long flags, val; |
707 | 707 | ||
708 | spin_lock_irqsave(&pmu_lock, flags); | 708 | raw_spin_lock_irqsave(&pmu_lock, flags); |
709 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; | 709 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
710 | val |= XSCALE_PMU_ENABLE; | 710 | val |= XSCALE_PMU_ENABLE; |
711 | xscale2pmu_write_pmnc(val); | 711 | xscale2pmu_write_pmnc(val); |
712 | spin_unlock_irqrestore(&pmu_lock, flags); | 712 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
713 | } | 713 | } |
714 | 714 | ||
715 | static void | 715 | static void |
@@ -717,11 +717,11 @@ xscale2pmu_stop(void) | |||
717 | { | 717 | { |
718 | unsigned long flags, val; | 718 | unsigned long flags, val; |
719 | 719 | ||
720 | spin_lock_irqsave(&pmu_lock, flags); | 720 | raw_spin_lock_irqsave(&pmu_lock, flags); |
721 | val = xscale2pmu_read_pmnc(); | 721 | val = xscale2pmu_read_pmnc(); |
722 | val &= ~XSCALE_PMU_ENABLE; | 722 | val &= ~XSCALE_PMU_ENABLE; |
723 | xscale2pmu_write_pmnc(val); | 723 | xscale2pmu_write_pmnc(val); |
724 | spin_unlock_irqrestore(&pmu_lock, flags); | 724 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
725 | } | 725 | } |
726 | 726 | ||
727 | static inline u32 | 727 | static inline u32 |