diff options
author | Mark Rutland <mark.rutland@arm.com> | 2011-05-17 04:15:38 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-05-20 17:39:17 -0400 |
commit | 57ce9bb39b476accf8fba6e16aea67ed76ea523d (patch) | |
tree | 8fb318ab6ffe5b05b691e452d85469ff7a187e04 /arch/arm/kernel/perf_event.c | |
parent | 31bee4cf0e74e9c962d481a68452debaf45ed4ac (diff) |
ARM: 6902/1: perf: Remove erroneous check on active_events
When initialising a PMU, there is a check to protect against races with
other CPUs filling all of the available event slots. Since armpmu_add
checks that an event can be scheduled, we do not need to do this at
initialisation time. Furthermore the current code is broken because it
assumes that atomic_inc_not_zero will unconditionally increment
active_counts and then tries to decrement it again on failure.
This patch removes the broken, redundant code.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Jamie Iles <jamie@jamieiles.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 5 |
1 files changed, 0 insertions, 5 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 139e3c827369..d53c0abc4dd3 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -560,11 +560,6 @@ static int armpmu_event_init(struct perf_event *event) | |||
560 | event->destroy = hw_perf_event_destroy; | 560 | event->destroy = hw_perf_event_destroy; |
561 | 561 | ||
562 | if (!atomic_inc_not_zero(&active_events)) { | 562 | if (!atomic_inc_not_zero(&active_events)) { |
563 | if (atomic_read(&active_events) > armpmu->num_events) { | ||
564 | atomic_dec(&active_events); | ||
565 | return -ENOSPC; | ||
566 | } | ||
567 | |||
568 | mutex_lock(&pmu_reserve_mutex); | 563 | mutex_lock(&pmu_reserve_mutex); |
569 | if (atomic_read(&active_events) == 0) { | 564 | if (atomic_read(&active_events) == 0) { |
570 | err = armpmu_reserve_hardware(); | 565 | err = armpmu_reserve_hardware(); |