diff options
author | Will Deacon <will.deacon@arm.com> | 2010-12-02 12:01:49 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-12-04 06:18:08 -0500 |
commit | 961ec6daa7b14f376c30d447a830fa4783a2112c (patch) | |
tree | fd4952bd65fa9e991de7687eaef4b2e5af7a0e70 /arch/arm/kernel | |
parent | 4d6b7a779be34e1df296abc1dc555134a8cf34af (diff) |
ARM: 6521/1: perf: use raw_spinlock_t for pmu_lock
For kernels built with PREEMPT_RT, critical sections protected
by standard spinlocks are preemptible. This is not acceptable
on perf as (a) we may be scheduled onto a different CPU whilst
reading/writing banked PMU registers and (b) the latency when
reading the PMU registers becomes unpredictable.
This patch upgrades the pmu_lock spinlock to a raw_spinlock
instead.
Reported-by: Jamie Iles <jamie@jamieiles.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 20 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 16 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 32 |
4 files changed, 35 insertions, 35 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 50c197bfac0f..624e2a5de2b3 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -32,7 +32,7 @@ static struct platform_device *pmu_device; | |||
32 | * Hardware lock to serialize accesses to PMU registers. Needed for the | 32 | * Hardware lock to serialize accesses to PMU registers. Needed for the |
33 | * read/modify/write sequences. | 33 | * read/modify/write sequences. |
34 | */ | 34 | */ |
35 | static DEFINE_SPINLOCK(pmu_lock); | 35 | static DEFINE_RAW_SPINLOCK(pmu_lock); |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * ARMv6 supports a maximum of 3 events, starting from index 1. If we add | 38 | * ARMv6 supports a maximum of 3 events, starting from index 1. If we add |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 3f427aae211d..c058bfc8532b 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -426,12 +426,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
426 | * Mask out the current event and set the counter to count the event | 426 | * Mask out the current event and set the counter to count the event |
427 | * that we're interested in. | 427 | * that we're interested in. |
428 | */ | 428 | */ |
429 | spin_lock_irqsave(&pmu_lock, flags); | 429 | raw_spin_lock_irqsave(&pmu_lock, flags); |
430 | val = armv6_pmcr_read(); | 430 | val = armv6_pmcr_read(); |
431 | val &= ~mask; | 431 | val &= ~mask; |
432 | val |= evt; | 432 | val |= evt; |
433 | armv6_pmcr_write(val); | 433 | armv6_pmcr_write(val); |
434 | spin_unlock_irqrestore(&pmu_lock, flags); | 434 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
435 | } | 435 | } |
436 | 436 | ||
437 | static irqreturn_t | 437 | static irqreturn_t |
@@ -500,11 +500,11 @@ armv6pmu_start(void) | |||
500 | { | 500 | { |
501 | unsigned long flags, val; | 501 | unsigned long flags, val; |
502 | 502 | ||
503 | spin_lock_irqsave(&pmu_lock, flags); | 503 | raw_spin_lock_irqsave(&pmu_lock, flags); |
504 | val = armv6_pmcr_read(); | 504 | val = armv6_pmcr_read(); |
505 | val |= ARMV6_PMCR_ENABLE; | 505 | val |= ARMV6_PMCR_ENABLE; |
506 | armv6_pmcr_write(val); | 506 | armv6_pmcr_write(val); |
507 | spin_unlock_irqrestore(&pmu_lock, flags); | 507 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
508 | } | 508 | } |
509 | 509 | ||
510 | static void | 510 | static void |
@@ -512,11 +512,11 @@ armv6pmu_stop(void) | |||
512 | { | 512 | { |
513 | unsigned long flags, val; | 513 | unsigned long flags, val; |
514 | 514 | ||
515 | spin_lock_irqsave(&pmu_lock, flags); | 515 | raw_spin_lock_irqsave(&pmu_lock, flags); |
516 | val = armv6_pmcr_read(); | 516 | val = armv6_pmcr_read(); |
517 | val &= ~ARMV6_PMCR_ENABLE; | 517 | val &= ~ARMV6_PMCR_ENABLE; |
518 | armv6_pmcr_write(val); | 518 | armv6_pmcr_write(val); |
519 | spin_unlock_irqrestore(&pmu_lock, flags); | 519 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
520 | } | 520 | } |
521 | 521 | ||
522 | static int | 522 | static int |
@@ -570,12 +570,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
570 | * of ETM bus signal assertion cycles. The external reporting should | 570 | * of ETM bus signal assertion cycles. The external reporting should |
571 | * be disabled and so this should never increment. | 571 | * be disabled and so this should never increment. |
572 | */ | 572 | */ |
573 | spin_lock_irqsave(&pmu_lock, flags); | 573 | raw_spin_lock_irqsave(&pmu_lock, flags); |
574 | val = armv6_pmcr_read(); | 574 | val = armv6_pmcr_read(); |
575 | val &= ~mask; | 575 | val &= ~mask; |
576 | val |= evt; | 576 | val |= evt; |
577 | armv6_pmcr_write(val); | 577 | armv6_pmcr_write(val); |
578 | spin_unlock_irqrestore(&pmu_lock, flags); | 578 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
579 | } | 579 | } |
580 | 580 | ||
581 | static void | 581 | static void |
@@ -599,12 +599,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |||
599 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | 599 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We |
600 | * simply disable the interrupt reporting. | 600 | * simply disable the interrupt reporting. |
601 | */ | 601 | */ |
602 | spin_lock_irqsave(&pmu_lock, flags); | 602 | raw_spin_lock_irqsave(&pmu_lock, flags); |
603 | val = armv6_pmcr_read(); | 603 | val = armv6_pmcr_read(); |
604 | val &= ~mask; | 604 | val &= ~mask; |
605 | val |= evt; | 605 | val |= evt; |
606 | armv6_pmcr_write(val); | 606 | armv6_pmcr_write(val); |
607 | spin_unlock_irqrestore(&pmu_lock, flags); | 607 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
608 | } | 608 | } |
609 | 609 | ||
610 | static const struct arm_pmu armv6pmu = { | 610 | static const struct arm_pmu armv6pmu = { |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index a68ff1c10dec..2e1402556fa0 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -689,7 +689,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
689 | * Enable counter and interrupt, and set the counter to count | 689 | * Enable counter and interrupt, and set the counter to count |
690 | * the event that we're interested in. | 690 | * the event that we're interested in. |
691 | */ | 691 | */ |
692 | spin_lock_irqsave(&pmu_lock, flags); | 692 | raw_spin_lock_irqsave(&pmu_lock, flags); |
693 | 693 | ||
694 | /* | 694 | /* |
695 | * Disable counter | 695 | * Disable counter |
@@ -713,7 +713,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
713 | */ | 713 | */ |
714 | armv7_pmnc_enable_counter(idx); | 714 | armv7_pmnc_enable_counter(idx); |
715 | 715 | ||
716 | spin_unlock_irqrestore(&pmu_lock, flags); | 716 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
717 | } | 717 | } |
718 | 718 | ||
719 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | 719 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) |
@@ -723,7 +723,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
723 | /* | 723 | /* |
724 | * Disable counter and interrupt | 724 | * Disable counter and interrupt |
725 | */ | 725 | */ |
726 | spin_lock_irqsave(&pmu_lock, flags); | 726 | raw_spin_lock_irqsave(&pmu_lock, flags); |
727 | 727 | ||
728 | /* | 728 | /* |
729 | * Disable counter | 729 | * Disable counter |
@@ -735,7 +735,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
735 | */ | 735 | */ |
736 | armv7_pmnc_disable_intens(idx); | 736 | armv7_pmnc_disable_intens(idx); |
737 | 737 | ||
738 | spin_unlock_irqrestore(&pmu_lock, flags); | 738 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
739 | } | 739 | } |
740 | 740 | ||
741 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | 741 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) |
@@ -805,20 +805,20 @@ static void armv7pmu_start(void) | |||
805 | { | 805 | { |
806 | unsigned long flags; | 806 | unsigned long flags; |
807 | 807 | ||
808 | spin_lock_irqsave(&pmu_lock, flags); | 808 | raw_spin_lock_irqsave(&pmu_lock, flags); |
809 | /* Enable all counters */ | 809 | /* Enable all counters */ |
810 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | 810 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); |
811 | spin_unlock_irqrestore(&pmu_lock, flags); | 811 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
812 | } | 812 | } |
813 | 813 | ||
814 | static void armv7pmu_stop(void) | 814 | static void armv7pmu_stop(void) |
815 | { | 815 | { |
816 | unsigned long flags; | 816 | unsigned long flags; |
817 | 817 | ||
818 | spin_lock_irqsave(&pmu_lock, flags); | 818 | raw_spin_lock_irqsave(&pmu_lock, flags); |
819 | /* Disable all counters */ | 819 | /* Disable all counters */ |
820 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | 820 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); |
821 | spin_unlock_irqrestore(&pmu_lock, flags); | 821 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
822 | } | 822 | } |
823 | 823 | ||
824 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | 824 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index f14fbb6c345b..28cd3b025bc3 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -291,12 +291,12 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
291 | return; | 291 | return; |
292 | } | 292 | } |
293 | 293 | ||
294 | spin_lock_irqsave(&pmu_lock, flags); | 294 | raw_spin_lock_irqsave(&pmu_lock, flags); |
295 | val = xscale1pmu_read_pmnc(); | 295 | val = xscale1pmu_read_pmnc(); |
296 | val &= ~mask; | 296 | val &= ~mask; |
297 | val |= evt; | 297 | val |= evt; |
298 | xscale1pmu_write_pmnc(val); | 298 | xscale1pmu_write_pmnc(val); |
299 | spin_unlock_irqrestore(&pmu_lock, flags); | 299 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
300 | } | 300 | } |
301 | 301 | ||
302 | static void | 302 | static void |
@@ -322,12 +322,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
322 | return; | 322 | return; |
323 | } | 323 | } |
324 | 324 | ||
325 | spin_lock_irqsave(&pmu_lock, flags); | 325 | raw_spin_lock_irqsave(&pmu_lock, flags); |
326 | val = xscale1pmu_read_pmnc(); | 326 | val = xscale1pmu_read_pmnc(); |
327 | val &= ~mask; | 327 | val &= ~mask; |
328 | val |= evt; | 328 | val |= evt; |
329 | xscale1pmu_write_pmnc(val); | 329 | xscale1pmu_write_pmnc(val); |
330 | spin_unlock_irqrestore(&pmu_lock, flags); | 330 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
331 | } | 331 | } |
332 | 332 | ||
333 | static int | 333 | static int |
@@ -355,11 +355,11 @@ xscale1pmu_start(void) | |||
355 | { | 355 | { |
356 | unsigned long flags, val; | 356 | unsigned long flags, val; |
357 | 357 | ||
358 | spin_lock_irqsave(&pmu_lock, flags); | 358 | raw_spin_lock_irqsave(&pmu_lock, flags); |
359 | val = xscale1pmu_read_pmnc(); | 359 | val = xscale1pmu_read_pmnc(); |
360 | val |= XSCALE_PMU_ENABLE; | 360 | val |= XSCALE_PMU_ENABLE; |
361 | xscale1pmu_write_pmnc(val); | 361 | xscale1pmu_write_pmnc(val); |
362 | spin_unlock_irqrestore(&pmu_lock, flags); | 362 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
363 | } | 363 | } |
364 | 364 | ||
365 | static void | 365 | static void |
@@ -367,11 +367,11 @@ xscale1pmu_stop(void) | |||
367 | { | 367 | { |
368 | unsigned long flags, val; | 368 | unsigned long flags, val; |
369 | 369 | ||
370 | spin_lock_irqsave(&pmu_lock, flags); | 370 | raw_spin_lock_irqsave(&pmu_lock, flags); |
371 | val = xscale1pmu_read_pmnc(); | 371 | val = xscale1pmu_read_pmnc(); |
372 | val &= ~XSCALE_PMU_ENABLE; | 372 | val &= ~XSCALE_PMU_ENABLE; |
373 | xscale1pmu_write_pmnc(val); | 373 | xscale1pmu_write_pmnc(val); |
374 | spin_unlock_irqrestore(&pmu_lock, flags); | 374 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
375 | } | 375 | } |
376 | 376 | ||
377 | static inline u32 | 377 | static inline u32 |
@@ -635,10 +635,10 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
635 | return; | 635 | return; |
636 | } | 636 | } |
637 | 637 | ||
638 | spin_lock_irqsave(&pmu_lock, flags); | 638 | raw_spin_lock_irqsave(&pmu_lock, flags); |
639 | xscale2pmu_write_event_select(evtsel); | 639 | xscale2pmu_write_event_select(evtsel); |
640 | xscale2pmu_write_int_enable(ien); | 640 | xscale2pmu_write_int_enable(ien); |
641 | spin_unlock_irqrestore(&pmu_lock, flags); | 641 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
642 | } | 642 | } |
643 | 643 | ||
644 | static void | 644 | static void |
@@ -678,10 +678,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
678 | return; | 678 | return; |
679 | } | 679 | } |
680 | 680 | ||
681 | spin_lock_irqsave(&pmu_lock, flags); | 681 | raw_spin_lock_irqsave(&pmu_lock, flags); |
682 | xscale2pmu_write_event_select(evtsel); | 682 | xscale2pmu_write_event_select(evtsel); |
683 | xscale2pmu_write_int_enable(ien); | 683 | xscale2pmu_write_int_enable(ien); |
684 | spin_unlock_irqrestore(&pmu_lock, flags); | 684 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
685 | } | 685 | } |
686 | 686 | ||
687 | static int | 687 | static int |
@@ -705,11 +705,11 @@ xscale2pmu_start(void) | |||
705 | { | 705 | { |
706 | unsigned long flags, val; | 706 | unsigned long flags, val; |
707 | 707 | ||
708 | spin_lock_irqsave(&pmu_lock, flags); | 708 | raw_spin_lock_irqsave(&pmu_lock, flags); |
709 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; | 709 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
710 | val |= XSCALE_PMU_ENABLE; | 710 | val |= XSCALE_PMU_ENABLE; |
711 | xscale2pmu_write_pmnc(val); | 711 | xscale2pmu_write_pmnc(val); |
712 | spin_unlock_irqrestore(&pmu_lock, flags); | 712 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
713 | } | 713 | } |
714 | 714 | ||
715 | static void | 715 | static void |
@@ -717,11 +717,11 @@ xscale2pmu_stop(void) | |||
717 | { | 717 | { |
718 | unsigned long flags, val; | 718 | unsigned long flags, val; |
719 | 719 | ||
720 | spin_lock_irqsave(&pmu_lock, flags); | 720 | raw_spin_lock_irqsave(&pmu_lock, flags); |
721 | val = xscale2pmu_read_pmnc(); | 721 | val = xscale2pmu_read_pmnc(); |
722 | val &= ~XSCALE_PMU_ENABLE; | 722 | val &= ~XSCALE_PMU_ENABLE; |
723 | xscale2pmu_write_pmnc(val); | 723 | xscale2pmu_write_pmnc(val); |
724 | spin_unlock_irqrestore(&pmu_lock, flags); | 724 | raw_spin_unlock_irqrestore(&pmu_lock, flags); |
725 | } | 725 | } |
726 | 726 | ||
727 | static inline u32 | 727 | static inline u32 |