diff options
-rw-r--r-- | arch/x86/events/core.c | 13 | ||||
-rw-r--r-- | arch/x86/events/intel/core.c | 15 | ||||
-rw-r--r-- | arch/x86/events/intel/knc.c | 4 |
3 files changed, 29 insertions, 3 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 7402c8182813..5e830d0c95c9 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -597,6 +597,19 @@ void x86_pmu_disable_all(void) | |||
597 | } | 597 | } |
598 | } | 598 | } |
599 | 599 | ||
600 | /* | ||
601 | * There may be PMI landing after enabled=0. The PMI hitting could be before or | ||
602 | * after disable_all. | ||
603 | * | ||
604 | * If PMI hits before disable_all, the PMU will be disabled in the NMI handler. | ||
605 | * It will not be re-enabled in the NMI handler again, because enabled=0. After | ||
606 | * handling the NMI, disable_all will be called, which will not change the | ||
607 | * state either. If PMI hits after disable_all, the PMU is already disabled | ||
608 | * before entering NMI handler. The NMI handler will not change the state | ||
609 | * either. | ||
610 | * | ||
611 | * So either situation is harmless. | ||
612 | */ | ||
600 | static void x86_pmu_disable(struct pmu *pmu) | 613 | static void x86_pmu_disable(struct pmu *pmu) |
601 | { | 614 | { |
602 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 615 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a7ec685657a5..b3f6349a33b5 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -1502,7 +1502,15 @@ static __initconst const u64 knl_hw_cache_extra_regs | |||
1502 | }; | 1502 | }; |
1503 | 1503 | ||
1504 | /* | 1504 | /* |
1505 | * Use from PMIs where the LBRs are already disabled. | 1505 | * Used from PMIs where the LBRs are already disabled. |
1506 | * | ||
1507 | * This function could be called consecutively. It is required to remain in | ||
1508 | * disabled state if called consecutively. | ||
1509 | * | ||
1510 | * During consecutive calls, the same disable value will be written to related | ||
1511 | * registers, so the PMU state remains unchanged. hw.state in | ||
1512 | * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive | ||
1513 | * calls. | ||
1506 | */ | 1514 | */ |
1507 | static void __intel_pmu_disable_all(void) | 1515 | static void __intel_pmu_disable_all(void) |
1508 | { | 1516 | { |
@@ -1929,7 +1937,10 @@ again: | |||
1929 | goto again; | 1937 | goto again; |
1930 | 1938 | ||
1931 | done: | 1939 | done: |
1932 | __intel_pmu_enable_all(0, true); | 1940 | /* Only restore PMU state when it's active. See x86_pmu_disable(). */ |
1941 | if (cpuc->enabled) | ||
1942 | __intel_pmu_enable_all(0, true); | ||
1943 | |||
1933 | /* | 1944 | /* |
1934 | * Only unmask the NMI after the overflow counters | 1945 | * Only unmask the NMI after the overflow counters |
1935 | * have been reset. This avoids spurious NMIs on | 1946 | * have been reset. This avoids spurious NMIs on |
diff --git a/arch/x86/events/intel/knc.c b/arch/x86/events/intel/knc.c index 206226e08f49..548d5f774b07 100644 --- a/arch/x86/events/intel/knc.c +++ b/arch/x86/events/intel/knc.c | |||
@@ -263,7 +263,9 @@ again: | |||
263 | goto again; | 263 | goto again; |
264 | 264 | ||
265 | done: | 265 | done: |
266 | knc_pmu_enable_all(0); | 266 | /* Only restore PMU state when it's active. See x86_pmu_disable(). */ |
267 | if (cpuc->enabled) | ||
268 | knc_pmu_enable_all(0); | ||
267 | 269 | ||
268 | return handled; | 270 | return handled; |
269 | } | 271 | } |