diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-06-24 10:47:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-06-30 07:08:46 -0400 |
commit | 93472aff802fd7b61f2209335207e9bd793012f7 (patch) | |
tree | 9ad37c5991a24356e7fa626d952e2e7541fdc7d7 | |
parent | 2d6dac2fcc796a9a2917d69bcab66f6b157fe51b (diff) |
perf/x86: Fix 'active_events' imbalance
Commit 1b7b938f1817 ("perf/x86/intel: Fix PMI handling for Intel PT") conditionally
increments active_events in x86_add_exclusive() but unconditionally decrements in
x86_del_exclusive().
These extra decrements can lead to the situation where
active_events is zero and thus the PMI handler is 'disabled'
while we have active events on the PMU generating PMIs.
This leads to a truckload of:
Uhhuh. NMI received for unknown reason 21 on CPU 28.
Do you have a strange power saving mode enabled?
Dazed and confused, but trying to continue
messages and generally messes up perf.
Remove the condition on the increment, double increment balanced
by a double decrement is perfectly fine.
Restructure the code a little bit to make the unconditional inc
a bit more natural.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: alexander.shishkin@linux.intel.com
Cc: brgerst@gmail.com
Cc: dvlasenk@redhat.com
Cc: luto@amacapital.net
Cc: oleg@redhat.com
Fixes: 1b7b938f1817 ("perf/x86/intel: Fix PMI handling for Intel PT")
Link: http://lkml.kernel.org/r/20150624144750.GJ18673@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 36 |
1 files changed, 13 insertions, 23 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5801a14f7524..3658de47900f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -357,34 +357,24 @@ void x86_release_hardware(void) | |||
357 | */ | 357 | */ |
358 | int x86_add_exclusive(unsigned int what) | 358 | int x86_add_exclusive(unsigned int what) |
359 | { | 359 | { |
360 | int ret = -EBUSY, i; | 360 | int i; |
361 | |||
362 | if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) | ||
363 | return 0; | ||
364 | 361 | ||
365 | mutex_lock(&pmc_reserve_mutex); | 362 | if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { |
366 | for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { | 363 | mutex_lock(&pmc_reserve_mutex); |
367 | if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) | 364 | for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { |
368 | goto out; | 365 | if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) |
366 | goto fail_unlock; | ||
367 | } | ||
368 | atomic_inc(&x86_pmu.lbr_exclusive[what]); | ||
369 | mutex_unlock(&pmc_reserve_mutex); | ||
369 | } | 370 | } |
370 | 371 | ||
371 | atomic_inc(&x86_pmu.lbr_exclusive[what]); | 372 | atomic_inc(&active_events); |
372 | ret = 0; | 373 | return 0; |
373 | 374 | ||
374 | out: | 375 | fail_unlock: |
375 | mutex_unlock(&pmc_reserve_mutex); | 376 | mutex_unlock(&pmc_reserve_mutex); |
376 | 377 | return -EBUSY; | |
377 | /* | ||
378 | * Assuming that all exclusive events will share the PMI handler | ||
379 | * (which checks active_events for whether there is work to do), | ||
380 | * we can bump active_events counter right here, except for | ||
381 | * x86_lbr_exclusive_lbr events that go through x86_pmu_event_init() | ||
382 | * path, which already bumps active_events for them. | ||
383 | */ | ||
384 | if (!ret && what != x86_lbr_exclusive_lbr) | ||
385 | atomic_inc(&active_events); | ||
386 | |||
387 | return ret; | ||
388 | } | 378 | } |
389 | 379 | ||
390 | void x86_del_exclusive(unsigned int what) | 380 | void x86_del_exclusive(unsigned int what) |