aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@amd.com>2009-06-12 16:01:29 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:54 -0400
commit71db602322b1197e7951655c46339324b6208bf9 (patch)
tree202daeb54a853ea09775d0f66c62a58c025a9997 /arch/x86/kvm/x86.c
parent2920d7285740582d6101f32c37d8d54f82531e1e (diff)
KVM: Move performance counter MSR access interception to generic x86 path
The performance counter MSRs are different for AMD and Intel CPUs and they are chosen mainly by the CPUID vendor string. This patch catches writes to all addresses (regardless of VMX/SVM path) and handles them in the generic MSR handler routine. Writing a 0 into the event select register is something we perfectly emulate ;-), so don't print out a warning to dmesg in this case. This fixes booting a 64bit Windows guest with an AMD CPUID on an Intel host. Signed-off-by: Andre Przywara <andre.przywara@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 89862a80e32c..30492f0ba4ea 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -886,6 +886,36 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
886 case MSR_IA32_MCG_STATUS: 886 case MSR_IA32_MCG_STATUS:
887 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 887 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
888 return set_msr_mce(vcpu, msr, data); 888 return set_msr_mce(vcpu, msr, data);
889
890 /* Performance counters are not protected by a CPUID bit,
891 * so we should check all of them in the generic path for the sake of
892 * cross vendor migration.
893 * Writing a zero into the event select MSRs disables them,
894 * which we perfectly emulate ;-). Any other value should be at least
895 * reported, some guests depend on them.
896 */
897 case MSR_P6_EVNTSEL0:
898 case MSR_P6_EVNTSEL1:
899 case MSR_K7_EVNTSEL0:
900 case MSR_K7_EVNTSEL1:
901 case MSR_K7_EVNTSEL2:
902 case MSR_K7_EVNTSEL3:
903 if (data != 0)
904 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
905 "0x%x data 0x%llx\n", msr, data);
906 break;
907 /* at least RHEL 4 unconditionally writes to the perfctr registers,
908 * so we ignore writes to make it happy.
909 */
910 case MSR_P6_PERFCTR0:
911 case MSR_P6_PERFCTR1:
912 case MSR_K7_PERFCTR0:
913 case MSR_K7_PERFCTR1:
914 case MSR_K7_PERFCTR2:
915 case MSR_K7_PERFCTR3:
916 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
917 "0x%x data 0x%llx\n", msr, data);
918 break;
889 default: 919 default:
890 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); 920 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
891 return 1; 921 return 1;