diff options
| -rw-r--r-- | arch/x86/include/asm/perf_event.h | 8 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 8 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 37 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 5 |
4 files changed, 54 insertions, 4 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 096c975e099f..461ce432b1c2 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
| @@ -242,4 +242,12 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) | |||
| 242 | static inline void perf_events_lapic_init(void) { } | 242 | static inline void perf_events_lapic_init(void) { } |
| 243 | #endif | 243 | #endif |
| 244 | 244 | ||
| 245 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) | ||
| 246 | extern void amd_pmu_enable_virt(void); | ||
| 247 | extern void amd_pmu_disable_virt(void); | ||
| 248 | #else | ||
| 249 | static inline void amd_pmu_enable_virt(void) { } | ||
| 250 | static inline void amd_pmu_disable_virt(void) { } | ||
| 251 | #endif | ||
| 252 | |||
| 245 | #endif /* _ASM_X86_PERF_EVENT_H */ | 253 | #endif /* _ASM_X86_PERF_EVENT_H */ |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 8944062f46e2..c30c807ddc72 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
| @@ -147,7 +147,9 @@ struct cpu_hw_events { | |||
| 147 | /* | 147 | /* |
| 148 | * AMD specific bits | 148 | * AMD specific bits |
| 149 | */ | 149 | */ |
| 150 | struct amd_nb *amd_nb; | 150 | struct amd_nb *amd_nb; |
| 151 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | ||
| 152 | u64 perf_ctr_virt_mask; | ||
| 151 | 153 | ||
| 152 | void *kfree_on_online; | 154 | void *kfree_on_online; |
| 153 | }; | 155 | }; |
| @@ -417,9 +419,11 @@ void x86_pmu_disable_all(void); | |||
| 417 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | 419 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
| 418 | u64 enable_mask) | 420 | u64 enable_mask) |
| 419 | { | 421 | { |
| 422 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); | ||
| 423 | |||
| 420 | if (hwc->extra_reg.reg) | 424 | if (hwc->extra_reg.reg) |
| 421 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | 425 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
| 422 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | 426 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
| 423 | } | 427 | } |
| 424 | 428 | ||
| 425 | void x86_pmu_enable_all(int added); | 429 | void x86_pmu_enable_all(int added); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 0397b23be8e9..67250a52430b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <linux/perf_event.h> | 1 | #include <linux/perf_event.h> |
| 2 | #include <linux/export.h> | ||
| 2 | #include <linux/types.h> | 3 | #include <linux/types.h> |
| 3 | #include <linux/init.h> | 4 | #include <linux/init.h> |
| 4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| @@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu) | |||
| 357 | struct amd_nb *nb; | 358 | struct amd_nb *nb; |
| 358 | int i, nb_id; | 359 | int i, nb_id; |
| 359 | 360 | ||
| 360 | if (boot_cpu_data.x86_max_cores < 2) | 361 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; |
| 362 | |||
| 363 | if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) | ||
| 361 | return; | 364 | return; |
| 362 | 365 | ||
| 363 | nb_id = amd_get_nb_id(cpu); | 366 | nb_id = amd_get_nb_id(cpu); |
| @@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = { | |||
| 587 | .put_event_constraints = amd_put_event_constraints, | 590 | .put_event_constraints = amd_put_event_constraints, |
| 588 | 591 | ||
| 589 | .cpu_prepare = amd_pmu_cpu_prepare, | 592 | .cpu_prepare = amd_pmu_cpu_prepare, |
| 590 | .cpu_starting = amd_pmu_cpu_starting, | ||
| 591 | .cpu_dead = amd_pmu_cpu_dead, | 593 | .cpu_dead = amd_pmu_cpu_dead, |
| 592 | #endif | 594 | #endif |
| 595 | .cpu_starting = amd_pmu_cpu_starting, | ||
| 593 | }; | 596 | }; |
| 594 | 597 | ||
| 595 | __init int amd_pmu_init(void) | 598 | __init int amd_pmu_init(void) |
| @@ -621,3 +624,33 @@ __init int amd_pmu_init(void) | |||
| 621 | 624 | ||
| 622 | return 0; | 625 | return 0; |
| 623 | } | 626 | } |
| 627 | |||
| 628 | void amd_pmu_enable_virt(void) | ||
| 629 | { | ||
| 630 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 631 | |||
| 632 | cpuc->perf_ctr_virt_mask = 0; | ||
| 633 | |||
| 634 | /* Reload all events */ | ||
| 635 | x86_pmu_disable_all(); | ||
| 636 | x86_pmu_enable_all(0); | ||
| 637 | } | ||
| 638 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | ||
| 639 | |||
| 640 | void amd_pmu_disable_virt(void) | ||
| 641 | { | ||
| 642 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 643 | |||
| 644 | /* | ||
| 645 | * We only mask out the Host-only bit so that host-only counting works | ||
| 646 | * when SVM is disabled. If someone sets up a guest-only counter when | ||
| 647 | * SVM is disabled the Guest-only bits still gets set and the counter | ||
| 648 | * will not count anything. | ||
| 649 | */ | ||
| 650 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | ||
| 651 | |||
| 652 | /* Reload all events */ | ||
| 653 | x86_pmu_disable_all(); | ||
| 654 | x86_pmu_enable_all(0); | ||
| 655 | } | ||
| 656 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5fa553babe56..e385214711cb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/ftrace_event.h> | 29 | #include <linux/ftrace_event.h> |
| 30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 31 | 31 | ||
| 32 | #include <asm/perf_event.h> | ||
| 32 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
| 33 | #include <asm/desc.h> | 34 | #include <asm/desc.h> |
| 34 | #include <asm/kvm_para.h> | 35 | #include <asm/kvm_para.h> |
| @@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage) | |||
| 575 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | 576 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
| 576 | 577 | ||
| 577 | cpu_svm_disable(); | 578 | cpu_svm_disable(); |
| 579 | |||
| 580 | amd_pmu_disable_virt(); | ||
| 578 | } | 581 | } |
| 579 | 582 | ||
| 580 | static int svm_hardware_enable(void *garbage) | 583 | static int svm_hardware_enable(void *garbage) |
| @@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage) | |||
| 622 | 625 | ||
| 623 | svm_init_erratum_383(); | 626 | svm_init_erratum_383(); |
| 624 | 627 | ||
| 628 | amd_pmu_enable_virt(); | ||
| 629 | |||
| 625 | return 0; | 630 | return 0; |
| 626 | } | 631 | } |
| 627 | 632 | ||
