aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-02-24 12:59:15 -0500
committerAvi Kivity <avi@redhat.com>2010-04-25 06:53:14 -0400
commit4a810181c8bcd73112f5c62b205b5583fd4a197f (patch)
tree4e5d4e85771d860c85953b7c2ddfa23f0b5df129 /arch/x86/kvm/svm.c
parent2e554e8d67926024b01e97d2fe652810165354e2 (diff)
KVM: SVM: Implement emulation of vm_cr msr
This patch implements the emulation of the vm_cr msr for nested svm. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cac761c6d1dc..b4aac5c7ad87 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -71,6 +71,7 @@ struct kvm_vcpu;
71struct nested_state { 71struct nested_state {
72 struct vmcb *hsave; 72 struct vmcb *hsave;
73 u64 hsave_msr; 73 u64 hsave_msr;
74 u64 vm_cr_msr;
74 u64 vmcb; 75 u64 vmcb;
75 76
76 /* These are the merged vectors */ 77 /* These are the merged vectors */
@@ -2280,7 +2281,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2280 *data = svm->nested.hsave_msr; 2281 *data = svm->nested.hsave_msr;
2281 break; 2282 break;
2282 case MSR_VM_CR: 2283 case MSR_VM_CR:
2283 *data = 0; 2284 *data = svm->nested.vm_cr_msr;
2284 break; 2285 break;
2285 case MSR_IA32_UCODE_REV: 2286 case MSR_IA32_UCODE_REV:
2286 *data = 0x01000065; 2287 *data = 0x01000065;
@@ -2310,6 +2311,31 @@ static int rdmsr_interception(struct vcpu_svm *svm)
2310 return 1; 2311 return 1;
2311} 2312}
2312 2313
2314static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2315{
2316 struct vcpu_svm *svm = to_svm(vcpu);
2317 int svm_dis, chg_mask;
2318
2319 if (data & ~SVM_VM_CR_VALID_MASK)
2320 return 1;
2321
2322 chg_mask = SVM_VM_CR_VALID_MASK;
2323
2324 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2325 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2326
2327 svm->nested.vm_cr_msr &= ~chg_mask;
2328 svm->nested.vm_cr_msr |= (data & chg_mask);
2329
2330 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2331
2332 /* check for svm_disable while efer.svme is set */
2333 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2334 return 1;
2335
2336 return 0;
2337}
2338
2313static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) 2339static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2314{ 2340{
2315 struct vcpu_svm *svm = to_svm(vcpu); 2341 struct vcpu_svm *svm = to_svm(vcpu);
@@ -2376,6 +2402,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2376 svm->nested.hsave_msr = data; 2402 svm->nested.hsave_msr = data;
2377 break; 2403 break;
2378 case MSR_VM_CR: 2404 case MSR_VM_CR:
2405 return svm_set_vm_cr(vcpu, data);
2379 case MSR_VM_IGNNE: 2406 case MSR_VM_IGNNE:
2380 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); 2407 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
2381 break; 2408 break;