aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@amd.com>2009-06-12 16:01:29 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:54 -0400
commit71db602322b1197e7951655c46339324b6208bf9 (patch)
tree202daeb54a853ea09775d0f66c62a58c025a9997
parent2920d7285740582d6101f32c37d8d54f82531e1e (diff)
KVM: Move performance counter MSR access interception to generic x86 path
The performance counter MSRs are different for AMD and Intel CPUs and they are chosen mainly by the CPUID vendor string. This patch catches writes to all addresses (regardless of VMX/SVM path) and handles them in the generic MSR handler routine. Writing a 0 into the event select register is something we perfectly emulate ;-), so don't print out a warning to dmesg in this case. This fixes booting a 64bit Windows guest with an AMD CPUID on an Intel host. Signed-off-by: Andre Przywara <andre.przywara@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/svm.c16
-rw-r--r--arch/x86/kvm/vmx.c12
-rw-r--r--arch/x86/kvm/x86.c30
3 files changed, 30 insertions, 28 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 28b981409a8..060aa9f5571 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2142,22 +2142,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2142 else 2142 else
2143 svm_disable_lbrv(svm); 2143 svm_disable_lbrv(svm);
2144 break; 2144 break;
2145 case MSR_K7_EVNTSEL0:
2146 case MSR_K7_EVNTSEL1:
2147 case MSR_K7_EVNTSEL2:
2148 case MSR_K7_EVNTSEL3:
2149 case MSR_K7_PERFCTR0:
2150 case MSR_K7_PERFCTR1:
2151 case MSR_K7_PERFCTR2:
2152 case MSR_K7_PERFCTR3:
2153 /*
2154 * Just discard all writes to the performance counters; this
2155 * should keep both older linux and windows 64-bit guests
2156 * happy
2157 */
2158 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
2159
2160 break;
2161 case MSR_VM_HSAVE_PA: 2145 case MSR_VM_HSAVE_PA:
2162 svm->hsave_msr = data; 2146 svm->hsave_msr = data;
2163 break; 2147 break;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c08bb4cf372..6ee929255a3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1025,18 +1025,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1025 rdtscll(host_tsc); 1025 rdtscll(host_tsc);
1026 guest_write_tsc(data, host_tsc); 1026 guest_write_tsc(data, host_tsc);
1027 break; 1027 break;
1028 case MSR_P6_PERFCTR0:
1029 case MSR_P6_PERFCTR1:
1030 case MSR_P6_EVNTSEL0:
1031 case MSR_P6_EVNTSEL1:
1032 /*
1033 * Just discard all writes to the performance counters; this
1034 * should keep both older linux and windows 64-bit guests
1035 * happy
1036 */
1037 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data);
1038
1039 break;
1040 case MSR_IA32_CR_PAT: 1028 case MSR_IA32_CR_PAT:
1041 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 1029 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1042 vmcs_write64(GUEST_IA32_PAT, data); 1030 vmcs_write64(GUEST_IA32_PAT, data);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 89862a80e32..30492f0ba4e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -886,6 +886,36 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
886 case MSR_IA32_MCG_STATUS: 886 case MSR_IA32_MCG_STATUS:
887 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 887 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
888 return set_msr_mce(vcpu, msr, data); 888 return set_msr_mce(vcpu, msr, data);
889
890 /* Performance counters are not protected by a CPUID bit,
891 * so we should check all of them in the generic path for the sake of
892 * cross vendor migration.
893 * Writing a zero into the event select MSRs disables them,
894 * which we perfectly emulate ;-). Any other value should be at least
895 * reported, some guests depend on them.
896 */
897 case MSR_P6_EVNTSEL0:
898 case MSR_P6_EVNTSEL1:
899 case MSR_K7_EVNTSEL0:
900 case MSR_K7_EVNTSEL1:
901 case MSR_K7_EVNTSEL2:
902 case MSR_K7_EVNTSEL3:
903 if (data != 0)
904 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
905 "0x%x data 0x%llx\n", msr, data);
906 break;
907 /* at least RHEL 4 unconditionally writes to the perfctr registers,
908 * so we ignore writes to make it happy.
909 */
910 case MSR_P6_PERFCTR0:
911 case MSR_P6_PERFCTR1:
912 case MSR_K7_PERFCTR0:
913 case MSR_K7_PERFCTR1:
914 case MSR_K7_PERFCTR2:
915 case MSR_K7_PERFCTR3:
916 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
917 "0x%x data 0x%llx\n", msr, data);
918 break;
889 default: 919 default:
890 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); 920 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
891 return 1; 921 return 1;