aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2012-02-29 08:57:32 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-02 06:16:39 -0500
commit1018faa6cf23b256bf25919ef203cd7c129f06f2 (patch)
tree16f8f223062c41e29431edfa71e66832101dee4e
parent5d85d97c9f6973ba854f35a2d5e80fe68272143e (diff)
perf/x86/kvm: Fix Host-Only/Guest-Only counting with SVM disabled
It turned out that a performance counter on AMD does not count at all when the GO or HO bit is set in the control register and SVM is disabled in EFER. This patch works around this issue by masking out the HO bit in the performance counter control register when SVM is not enabled. The GO bit is not touched because it is only set when the user wants to count in guest-mode only. So when SVM is disabled the counter should not run at all and the not-counting is the intended behaviour. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Avi Kivity <avi@redhat.com> Cc: Stephane Eranian <eranian@google.com> Cc: David Ahern <dsahern@gmail.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Robert Richter <robert.richter@amd.com> Cc: stable@vger.kernel.org # v3.2 Link: http://lkml.kernel.org/r/1330523852-19566-1-git-send-email-joerg.roedel@amd.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/include/asm/perf_event.h8
-rw-r--r--arch/x86/kernel/cpu/perf_event.h8
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c37
-rw-r--r--arch/x86/kvm/svm.c5
4 files changed, 54 insertions, 4 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 096c975e099f..461ce432b1c2 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -242,4 +242,12 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
242static inline void perf_events_lapic_init(void) { } 242static inline void perf_events_lapic_init(void) { }
243#endif 243#endif
244 244
245#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
246 extern void amd_pmu_enable_virt(void);
247 extern void amd_pmu_disable_virt(void);
248#else
249 static inline void amd_pmu_enable_virt(void) { }
250 static inline void amd_pmu_disable_virt(void) { }
251#endif
252
245#endif /* _ASM_X86_PERF_EVENT_H */ 253#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 8944062f46e2..c30c807ddc72 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -147,7 +147,9 @@ struct cpu_hw_events {
147 /* 147 /*
148 * AMD specific bits 148 * AMD specific bits
149 */ 149 */
150 struct amd_nb *amd_nb; 150 struct amd_nb *amd_nb;
151 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
152 u64 perf_ctr_virt_mask;
151 153
152 void *kfree_on_online; 154 void *kfree_on_online;
153}; 155};
@@ -417,9 +419,11 @@ void x86_pmu_disable_all(void);
417static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 419static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
418 u64 enable_mask) 420 u64 enable_mask)
419{ 421{
422 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
423
420 if (hwc->extra_reg.reg) 424 if (hwc->extra_reg.reg)
421 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 425 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
422 wrmsrl(hwc->config_base, hwc->config | enable_mask); 426 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
423} 427}
424 428
425void x86_pmu_enable_all(int added); 429void x86_pmu_enable_all(int added);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 0397b23be8e9..67250a52430b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -1,4 +1,5 @@
1#include <linux/perf_event.h> 1#include <linux/perf_event.h>
2#include <linux/export.h>
2#include <linux/types.h> 3#include <linux/types.h>
3#include <linux/init.h> 4#include <linux/init.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu)
357 struct amd_nb *nb; 358 struct amd_nb *nb;
358 int i, nb_id; 359 int i, nb_id;
359 360
360 if (boot_cpu_data.x86_max_cores < 2) 361 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
362
363 if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
361 return; 364 return;
362 365
363 nb_id = amd_get_nb_id(cpu); 366 nb_id = amd_get_nb_id(cpu);
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
587 .put_event_constraints = amd_put_event_constraints, 590 .put_event_constraints = amd_put_event_constraints,
588 591
589 .cpu_prepare = amd_pmu_cpu_prepare, 592 .cpu_prepare = amd_pmu_cpu_prepare,
590 .cpu_starting = amd_pmu_cpu_starting,
591 .cpu_dead = amd_pmu_cpu_dead, 593 .cpu_dead = amd_pmu_cpu_dead,
592#endif 594#endif
595 .cpu_starting = amd_pmu_cpu_starting,
593}; 596};
594 597
595__init int amd_pmu_init(void) 598__init int amd_pmu_init(void)
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void)
621 624
622 return 0; 625 return 0;
623} 626}
627
628void amd_pmu_enable_virt(void)
629{
630 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
631
632 cpuc->perf_ctr_virt_mask = 0;
633
634 /* Reload all events */
635 x86_pmu_disable_all();
636 x86_pmu_enable_all(0);
637}
638EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
639
640void amd_pmu_disable_virt(void)
641{
642 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
643
644 /*
645 * We only mask out the Host-only bit so that host-only counting works
646 * when SVM is disabled. If someone sets up a guest-only counter when
647 * SVM is disabled the Guest-only bits still gets set and the counter
648 * will not count anything.
649 */
650 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
651
652 /* Reload all events */
653 x86_pmu_disable_all();
654 x86_pmu_enable_all(0);
655}
656EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5fa553babe56..e385214711cb 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -29,6 +29,7 @@
29#include <linux/ftrace_event.h> 29#include <linux/ftrace_event.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include <asm/perf_event.h>
32#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
33#include <asm/desc.h> 34#include <asm/desc.h>
34#include <asm/kvm_para.h> 35#include <asm/kvm_para.h>
@@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage)
575 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); 576 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
576 577
577 cpu_svm_disable(); 578 cpu_svm_disable();
579
580 amd_pmu_disable_virt();
578} 581}
579 582
580static int svm_hardware_enable(void *garbage) 583static int svm_hardware_enable(void *garbage)
@@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage)
622 625
623 svm_init_erratum_383(); 626 svm_init_erratum_383();
624 627
628 amd_pmu_enable_virt();
629
625 return 0; 630 return 0;
626} 631}
627 632