diff options
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d4c0cc9d3263..196b58f04448 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -171,7 +171,7 @@ again: | |||
171 | return new_raw_count; | 171 | return new_raw_count; |
172 | } | 172 | } |
173 | 173 | ||
174 | static atomic_t num_counters; | 174 | static atomic_t active_counters; |
175 | static DEFINE_MUTEX(pmc_reserve_mutex); | 175 | static DEFINE_MUTEX(pmc_reserve_mutex); |
176 | 176 | ||
177 | static bool reserve_pmc_hardware(void) | 177 | static bool reserve_pmc_hardware(void) |
@@ -224,7 +224,7 @@ static void release_pmc_hardware(void) | |||
224 | 224 | ||
225 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 225 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
226 | { | 226 | { |
227 | if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) { | 227 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { |
228 | release_pmc_hardware(); | 228 | release_pmc_hardware(); |
229 | mutex_unlock(&pmc_reserve_mutex); | 229 | mutex_unlock(&pmc_reserve_mutex); |
230 | } | 230 | } |
@@ -248,12 +248,12 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
248 | return -ENODEV; | 248 | return -ENODEV; |
249 | 249 | ||
250 | err = 0; | 250 | err = 0; |
251 | if (atomic_inc_not_zero(&num_counters)) { | 251 | if (!atomic_inc_not_zero(&active_counters)) { |
252 | mutex_lock(&pmc_reserve_mutex); | 252 | mutex_lock(&pmc_reserve_mutex); |
253 | if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware()) | 253 | if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) |
254 | err = -EBUSY; | 254 | err = -EBUSY; |
255 | else | 255 | else |
256 | atomic_inc(&num_counters); | 256 | atomic_inc(&active_counters); |
257 | mutex_unlock(&pmc_reserve_mutex); | 257 | mutex_unlock(&pmc_reserve_mutex); |
258 | } | 258 | } |
259 | if (err) | 259 | if (err) |
@@ -280,7 +280,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
280 | if (capable(CAP_SYS_ADMIN) && hw_event->nmi) | 280 | if (capable(CAP_SYS_ADMIN) && hw_event->nmi) |
281 | hwc->nmi = 1; | 281 | hwc->nmi = 1; |
282 | 282 | ||
283 | hwc->irq_period = hw_event->irq_period; | 283 | hwc->irq_period = hw_event->irq_period; |
284 | if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period) | 284 | if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period) |
285 | hwc->irq_period = x86_pmu.max_period; | 285 | hwc->irq_period = x86_pmu.max_period; |
286 | 286 | ||
@@ -871,7 +871,7 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
871 | struct pt_regs *regs; | 871 | struct pt_regs *regs; |
872 | int ret; | 872 | int ret; |
873 | 873 | ||
874 | if (!atomic_read(&num_counters)) | 874 | if (!atomic_read(&active_counters)) |
875 | return NOTIFY_DONE; | 875 | return NOTIFY_DONE; |
876 | 876 | ||
877 | switch (cmd) { | 877 | switch (cmd) { |