diff options
author | Mark Rutland <mark.rutland@arm.com> | 2011-04-28 05:17:04 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2011-08-31 05:50:07 -0400 |
commit | 0f78d2d5ccf72ec834da6901886a40fd8e3b7615 (patch) | |
tree | da1262d040b2c10d95c6fc313b44e18801bcb4a3 /arch/arm/kernel/perf_event_v6.c | |
parent | 1b69beb7684c79673995607939d8acab51056b63 (diff) |
ARM: perf: lock PMU registers per-CPU
Currently, a single lock serialises access to CPU PMU registers. This
global locking is unnecessary as PMU registers are local to the CPU
they monitor.
This patch replaces the global lock with a per-CPU lock. As the lock is
in struct cpu_hw_events, PMUs providing a single cpu_hw_events instance
can be locked globally.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Jamie Iles <jamie@jamieiles.com>
Reviewed-by: Ashwin Chaugule <ashwinc@codeaurora.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_v6.c')
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 839012862264..68cf70425f2f 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
433 | int idx) | 433 | int idx) |
434 | { | 434 | { |
435 | unsigned long val, mask, evt, flags; | 435 | unsigned long val, mask, evt, flags; |
436 | struct cpu_hw_events *events = armpmu->get_hw_events(); | ||
436 | 437 | ||
437 | if (ARMV6_CYCLE_COUNTER == idx) { | 438 | if (ARMV6_CYCLE_COUNTER == idx) { |
438 | mask = 0; | 439 | mask = 0; |
@@ -454,12 +455,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
454 | * Mask out the current event and set the counter to count the event | 455 | * Mask out the current event and set the counter to count the event |
455 | * that we're interested in. | 456 | * that we're interested in. |
456 | */ | 457 | */ |
457 | raw_spin_lock_irqsave(&pmu_lock, flags); | 458 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
458 | val = armv6_pmcr_read(); | 459 | val = armv6_pmcr_read(); |
459 | val &= ~mask; | 460 | val &= ~mask; |
460 | val |= evt; | 461 | val |= evt; |
461 | armv6_pmcr_write(val); | 462 | armv6_pmcr_write(val); |
462 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 463 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
463 | } | 464 | } |
464 | 465 | ||
465 | static int counter_is_active(unsigned long pmcr, int idx) | 466 | static int counter_is_active(unsigned long pmcr, int idx) |
@@ -544,24 +545,26 @@ static void | |||
544 | armv6pmu_start(void) | 545 | armv6pmu_start(void) |
545 | { | 546 | { |
546 | unsigned long flags, val; | 547 | unsigned long flags, val; |
548 | struct cpu_hw_events *events = armpmu->get_hw_events(); | ||
547 | 549 | ||
548 | raw_spin_lock_irqsave(&pmu_lock, flags); | 550 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
549 | val = armv6_pmcr_read(); | 551 | val = armv6_pmcr_read(); |
550 | val |= ARMV6_PMCR_ENABLE; | 552 | val |= ARMV6_PMCR_ENABLE; |
551 | armv6_pmcr_write(val); | 553 | armv6_pmcr_write(val); |
552 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 554 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
553 | } | 555 | } |
554 | 556 | ||
555 | static void | 557 | static void |
556 | armv6pmu_stop(void) | 558 | armv6pmu_stop(void) |
557 | { | 559 | { |
558 | unsigned long flags, val; | 560 | unsigned long flags, val; |
561 | struct cpu_hw_events *events = armpmu->get_hw_events(); | ||
559 | 562 | ||
560 | raw_spin_lock_irqsave(&pmu_lock, flags); | 563 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
561 | val = armv6_pmcr_read(); | 564 | val = armv6_pmcr_read(); |
562 | val &= ~ARMV6_PMCR_ENABLE; | 565 | val &= ~ARMV6_PMCR_ENABLE; |
563 | armv6_pmcr_write(val); | 566 | armv6_pmcr_write(val); |
564 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 567 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
565 | } | 568 | } |
566 | 569 | ||
567 | static int | 570 | static int |
@@ -595,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
595 | int idx) | 598 | int idx) |
596 | { | 599 | { |
597 | unsigned long val, mask, evt, flags; | 600 | unsigned long val, mask, evt, flags; |
601 | struct cpu_hw_events *events = armpmu->get_hw_events(); | ||
598 | 602 | ||
599 | if (ARMV6_CYCLE_COUNTER == idx) { | 603 | if (ARMV6_CYCLE_COUNTER == idx) { |
600 | mask = ARMV6_PMCR_CCOUNT_IEN; | 604 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -615,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
615 | * of ETM bus signal assertion cycles. The external reporting should | 619 | * of ETM bus signal assertion cycles. The external reporting should |
616 | * be disabled and so this should never increment. | 620 | * be disabled and so this should never increment. |
617 | */ | 621 | */ |
618 | raw_spin_lock_irqsave(&pmu_lock, flags); | 622 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
619 | val = armv6_pmcr_read(); | 623 | val = armv6_pmcr_read(); |
620 | val &= ~mask; | 624 | val &= ~mask; |
621 | val |= evt; | 625 | val |= evt; |
622 | armv6_pmcr_write(val); | 626 | armv6_pmcr_write(val); |
623 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 627 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
624 | } | 628 | } |
625 | 629 | ||
626 | static void | 630 | static void |
@@ -628,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |||
628 | int idx) | 632 | int idx) |
629 | { | 633 | { |
630 | unsigned long val, mask, flags, evt = 0; | 634 | unsigned long val, mask, flags, evt = 0; |
635 | struct cpu_hw_events *events = armpmu->get_hw_events(); | ||
631 | 636 | ||
632 | if (ARMV6_CYCLE_COUNTER == idx) { | 637 | if (ARMV6_CYCLE_COUNTER == idx) { |
633 | mask = ARMV6_PMCR_CCOUNT_IEN; | 638 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -644,12 +649,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |||
644 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | 649 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We |
645 | * simply disable the interrupt reporting. | 650 | * simply disable the interrupt reporting. |
646 | */ | 651 | */ |
647 | raw_spin_lock_irqsave(&pmu_lock, flags); | 652 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
648 | val = armv6_pmcr_read(); | 653 | val = armv6_pmcr_read(); |
649 | val &= ~mask; | 654 | val &= ~mask; |
650 | val |= evt; | 655 | val |= evt; |
651 | armv6_pmcr_write(val); | 656 | armv6_pmcr_write(val); |
652 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 657 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
653 | } | 658 | } |
654 | 659 | ||
655 | static struct arm_pmu armv6pmu = { | 660 | static struct arm_pmu armv6pmu = { |