aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-12-29 11:07:30 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:50 -0500
commit4d4ec0874583b127caac1d0f84033c8971b2fd2a (patch)
tree2ed5d1edff6a5253561fff0593e89d1c49518b1b /arch/x86/kvm/svm.c
parenta1f83a74feaa9718a5c61587256ea6cc1b993d16 (diff)
KVM: Replace read accesses of vcpu->arch.cr0 by an accessor
Since we'd like to allow the guest to own a few bits of cr0 at times, we need to know when we access those bits. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cf64fc026e3e..d3246ce70ae8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -980,7 +980,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
980 if (npt_enabled) 980 if (npt_enabled)
981 goto set; 981 goto set;
982 982
983 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { 983 if (kvm_read_cr0_bits(vcpu, X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
984 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 984 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
985 vcpu->fpu_active = 1; 985 vcpu->fpu_active = 1;
986 } 986 }
@@ -1244,7 +1244,7 @@ static int ud_interception(struct vcpu_svm *svm)
1244static int nm_interception(struct vcpu_svm *svm) 1244static int nm_interception(struct vcpu_svm *svm)
1245{ 1245{
1246 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 1246 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
1247 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) 1247 if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS))
1248 svm->vmcb->save.cr0 &= ~X86_CR0_TS; 1248 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
1249 svm->vcpu.fpu_active = 1; 1249 svm->vcpu.fpu_active = 1;
1250 1250
@@ -1743,7 +1743,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1743 hsave->save.gdtr = vmcb->save.gdtr; 1743 hsave->save.gdtr = vmcb->save.gdtr;
1744 hsave->save.idtr = vmcb->save.idtr; 1744 hsave->save.idtr = vmcb->save.idtr;
1745 hsave->save.efer = svm->vcpu.arch.shadow_efer; 1745 hsave->save.efer = svm->vcpu.arch.shadow_efer;
1746 hsave->save.cr0 = svm->vcpu.arch.cr0; 1746 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
1747 hsave->save.cr4 = svm->vcpu.arch.cr4; 1747 hsave->save.cr4 = svm->vcpu.arch.cr4;
1748 hsave->save.rflags = vmcb->save.rflags; 1748 hsave->save.rflags = vmcb->save.rflags;
1749 hsave->save.rip = svm->next_rip; 1749 hsave->save.rip = svm->next_rip;
@@ -2387,7 +2387,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
2387 2387
2388 if (npt_enabled) { 2388 if (npt_enabled) {
2389 int mmu_reload = 0; 2389 int mmu_reload = 0;
2390 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { 2390 if ((kvm_read_cr0_bits(vcpu, X86_CR0_PG) ^ svm->vmcb->save.cr0)
2391 & X86_CR0_PG) {
2391 svm_set_cr0(vcpu, svm->vmcb->save.cr0); 2392 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
2392 mmu_reload = 1; 2393 mmu_reload = 1;
2393 } 2394 }