diff options
author | Mark Rutland <mark.rutland@arm.com> | 2011-04-27 06:20:11 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2011-08-31 05:50:03 -0400 |
commit | 03b7898d300de62078cc130fbc83b84b1d1e0f8d (patch) | |
tree | d0956ed5223569c138f1222d3b8897282c1536c7 /arch/arm | |
parent | c47f8684baefa2bf52c4320f894e73db08dc8a0a (diff) |
ARM: perf: move active_events into struct arm_pmu
This patch moves the active_events counter into struct arm_pmu, in
preparation for supporting multiple PMUs. This also moves
pmu_reserve_mutex, as it is used to guard accesses to active_events.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Jamie Iles <jamie@jamieiles.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 438482ff7498..9874395e7e7a 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -82,6 +82,8 @@ struct arm_pmu { | |||
82 | const unsigned (*event_map)[PERF_COUNT_HW_MAX]; | 82 | const unsigned (*event_map)[PERF_COUNT_HW_MAX]; |
83 | u32 raw_event_mask; | 83 | u32 raw_event_mask; |
84 | int num_events; | 84 | int num_events; |
85 | atomic_t active_events; | ||
86 | struct mutex reserve_mutex; | ||
85 | u64 max_period; | 87 | u64 max_period; |
86 | }; | 88 | }; |
87 | 89 | ||
@@ -454,15 +456,15 @@ armpmu_reserve_hardware(void) | |||
454 | return 0; | 456 | return 0; |
455 | } | 457 | } |
456 | 458 | ||
457 | static atomic_t active_events = ATOMIC_INIT(0); | ||
458 | static DEFINE_MUTEX(pmu_reserve_mutex); | ||
459 | |||
460 | static void | 459 | static void |
461 | hw_perf_event_destroy(struct perf_event *event) | 460 | hw_perf_event_destroy(struct perf_event *event) |
462 | { | 461 | { |
463 | if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { | 462 | atomic_t *active_events = &armpmu->active_events; |
463 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | ||
464 | |||
465 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { | ||
464 | armpmu_release_hardware(); | 466 | armpmu_release_hardware(); |
465 | mutex_unlock(&pmu_reserve_mutex); | 467 | mutex_unlock(pmu_reserve_mutex); |
466 | } | 468 | } |
467 | } | 469 | } |
468 | 470 | ||
@@ -543,6 +545,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
543 | static int armpmu_event_init(struct perf_event *event) | 545 | static int armpmu_event_init(struct perf_event *event) |
544 | { | 546 | { |
545 | int err = 0; | 547 | int err = 0; |
548 | atomic_t *active_events = &armpmu->active_events; | ||
546 | 549 | ||
547 | switch (event->attr.type) { | 550 | switch (event->attr.type) { |
548 | case PERF_TYPE_RAW: | 551 | case PERF_TYPE_RAW: |
@@ -556,15 +559,14 @@ static int armpmu_event_init(struct perf_event *event) | |||
556 | 559 | ||
557 | event->destroy = hw_perf_event_destroy; | 560 | event->destroy = hw_perf_event_destroy; |
558 | 561 | ||
559 | if (!atomic_inc_not_zero(&active_events)) { | 562 | if (!atomic_inc_not_zero(active_events)) { |
560 | mutex_lock(&pmu_reserve_mutex); | 563 | mutex_lock(&armpmu->reserve_mutex); |
561 | if (atomic_read(&active_events) == 0) { | 564 | if (atomic_read(active_events) == 0) |
562 | err = armpmu_reserve_hardware(); | 565 | err = armpmu_reserve_hardware(); |
563 | } | ||
564 | 566 | ||
565 | if (!err) | 567 | if (!err) |
566 | atomic_inc(&active_events); | 568 | atomic_inc(active_events); |
567 | mutex_unlock(&pmu_reserve_mutex); | 569 | mutex_unlock(&armpmu->reserve_mutex); |
568 | } | 570 | } |
569 | 571 | ||
570 | if (err) | 572 | if (err) |
@@ -613,6 +615,12 @@ static struct pmu pmu = { | |||
613 | .read = armpmu_read, | 615 | .read = armpmu_read, |
614 | }; | 616 | }; |
615 | 617 | ||
618 | static void __init armpmu_init(struct arm_pmu *armpmu) | ||
619 | { | ||
620 | atomic_set(&armpmu->active_events, 0); | ||
621 | mutex_init(&armpmu->reserve_mutex); | ||
622 | } | ||
623 | |||
616 | /* Include the PMU-specific implementations. */ | 624 | /* Include the PMU-specific implementations. */ |
617 | #include "perf_event_xscale.c" | 625 | #include "perf_event_xscale.c" |
618 | #include "perf_event_v6.c" | 626 | #include "perf_event_v6.c" |
@@ -718,6 +726,7 @@ init_hw_perf_events(void) | |||
718 | if (armpmu) { | 726 | if (armpmu) { |
719 | pr_info("enabled with %s PMU driver, %d counters available\n", | 727 | pr_info("enabled with %s PMU driver, %d counters available\n", |
720 | armpmu->name, armpmu->num_events); | 728 | armpmu->name, armpmu->num_events); |
729 | armpmu_init(armpmu); | ||
721 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | 730 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
722 | } else { | 731 | } else { |
723 | pr_info("no hardware support available\n"); | 732 | pr_info("no hardware support available\n"); |