diff options
author | Avi Kivity <avi@redhat.com> | 2010-12-05 10:30:00 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:31:15 -0500 |
commit | 9f8fe5043fd26627c2fa2e9a41896885e675000b (patch) | |
tree | e81d03d82d78903bde7c390461d46c8f937cb931 /arch/x86/kvm | |
parent | e49146dce8c3dc6f4485c1904b6587855f393e71 (diff) |
KVM: Replace reads of vcpu->arch.cr3 by an accessor
This allows us to keep cr3 in the VMCS, later on.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/kvm_cache_regs.h | 5 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 19 |
5 files changed, 27 insertions, 20 deletions
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 95ac3afa6e6f..a6bf8db326f5 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h | |||
@@ -73,6 +73,11 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) | |||
73 | return vcpu->arch.cr4 & mask; | 73 | return vcpu->arch.cr4 & mask; |
74 | } | 74 | } |
75 | 75 | ||
76 | static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) | ||
77 | { | ||
78 | return vcpu->arch.cr3; | ||
79 | } | ||
80 | |||
76 | static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) | 81 | static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) |
77 | { | 82 | { |
78 | return kvm_read_cr4_bits(vcpu, ~0UL); | 83 | return kvm_read_cr4_bits(vcpu, ~0UL); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a2127f82e786..e558795fccd5 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2727,13 +2727,13 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) | |||
2727 | 2727 | ||
2728 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | 2728 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
2729 | { | 2729 | { |
2730 | pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3); | 2730 | pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu)); |
2731 | mmu_free_roots(vcpu); | 2731 | mmu_free_roots(vcpu); |
2732 | } | 2732 | } |
2733 | 2733 | ||
2734 | static unsigned long get_cr3(struct kvm_vcpu *vcpu) | 2734 | static unsigned long get_cr3(struct kvm_vcpu *vcpu) |
2735 | { | 2735 | { |
2736 | return vcpu->arch.cr3; | 2736 | return kvm_read_cr3(vcpu); |
2737 | } | 2737 | } |
2738 | 2738 | ||
2739 | static void inject_page_fault(struct kvm_vcpu *vcpu, | 2739 | static void inject_page_fault(struct kvm_vcpu *vcpu, |
@@ -3637,7 +3637,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, | |||
3637 | 3637 | ||
3638 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) | 3638 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
3639 | { | 3639 | { |
3640 | (void)kvm_set_cr3(vcpu, vcpu->arch.cr3); | 3640 | (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu)); |
3641 | return 1; | 3641 | return 1; |
3642 | } | 3642 | } |
3643 | 3643 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index af4b911a8bed..a7b04c0bd7a5 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1174,7 +1174,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) | |||
1174 | switch (reg) { | 1174 | switch (reg) { |
1175 | case VCPU_EXREG_PDPTR: | 1175 | case VCPU_EXREG_PDPTR: |
1176 | BUG_ON(!npt_enabled); | 1176 | BUG_ON(!npt_enabled); |
1177 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); | 1177 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); |
1178 | break; | 1178 | break; |
1179 | default: | 1179 | default: |
1180 | BUG(); | 1180 | BUG(); |
@@ -2116,7 +2116,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) | |||
2116 | nested_vmcb->save.idtr = vmcb->save.idtr; | 2116 | nested_vmcb->save.idtr = vmcb->save.idtr; |
2117 | nested_vmcb->save.efer = svm->vcpu.arch.efer; | 2117 | nested_vmcb->save.efer = svm->vcpu.arch.efer; |
2118 | nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); | 2118 | nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); |
2119 | nested_vmcb->save.cr3 = svm->vcpu.arch.cr3; | 2119 | nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); |
2120 | nested_vmcb->save.cr2 = vmcb->save.cr2; | 2120 | nested_vmcb->save.cr2 = vmcb->save.cr2; |
2121 | nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; | 2121 | nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; |
2122 | nested_vmcb->save.rflags = vmcb->save.rflags; | 2122 | nested_vmcb->save.rflags = vmcb->save.rflags; |
@@ -2311,7 +2311,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) | |||
2311 | if (npt_enabled) | 2311 | if (npt_enabled) |
2312 | hsave->save.cr3 = vmcb->save.cr3; | 2312 | hsave->save.cr3 = vmcb->save.cr3; |
2313 | else | 2313 | else |
2314 | hsave->save.cr3 = svm->vcpu.arch.cr3; | 2314 | hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); |
2315 | 2315 | ||
2316 | copy_vmcb_control_area(hsave, vmcb); | 2316 | copy_vmcb_control_area(hsave, vmcb); |
2317 | 2317 | ||
@@ -2715,7 +2715,7 @@ static int cr_interception(struct vcpu_svm *svm) | |||
2715 | val = svm->vcpu.arch.cr2; | 2715 | val = svm->vcpu.arch.cr2; |
2716 | break; | 2716 | break; |
2717 | case 3: | 2717 | case 3: |
2718 | val = svm->vcpu.arch.cr3; | 2718 | val = kvm_read_cr3(&svm->vcpu); |
2719 | break; | 2719 | break; |
2720 | case 4: | 2720 | case 4: |
2721 | val = kvm_read_cr4(&svm->vcpu); | 2721 | val = kvm_read_cr4(&svm->vcpu); |
@@ -3693,7 +3693,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) | |||
3693 | mark_dirty(svm->vmcb, VMCB_NPT); | 3693 | mark_dirty(svm->vmcb, VMCB_NPT); |
3694 | 3694 | ||
3695 | /* Also sync guest cr3 here in case we live migrate */ | 3695 | /* Also sync guest cr3 here in case we live migrate */ |
3696 | svm->vmcb->save.cr3 = vcpu->arch.cr3; | 3696 | svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); |
3697 | mark_dirty(svm->vmcb, VMCB_CR); | 3697 | mark_dirty(svm->vmcb, VMCB_CR); |
3698 | 3698 | ||
3699 | svm_flush_tlb(vcpu); | 3699 | svm_flush_tlb(vcpu); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 67c085273510..141956ebf794 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1989,7 +1989,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
1989 | if (enable_ept) { | 1989 | if (enable_ept) { |
1990 | eptp = construct_eptp(cr3); | 1990 | eptp = construct_eptp(cr3); |
1991 | vmcs_write64(EPT_POINTER, eptp); | 1991 | vmcs_write64(EPT_POINTER, eptp); |
1992 | guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : | 1992 | guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) : |
1993 | vcpu->kvm->arch.ept_identity_map_addr; | 1993 | vcpu->kvm->arch.ept_identity_map_addr; |
1994 | ept_load_pdptrs(vcpu); | 1994 | ept_load_pdptrs(vcpu); |
1995 | } | 1995 | } |
@@ -3227,8 +3227,9 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
3227 | case 1: /*mov from cr*/ | 3227 | case 1: /*mov from cr*/ |
3228 | switch (cr) { | 3228 | switch (cr) { |
3229 | case 3: | 3229 | case 3: |
3230 | kvm_register_write(vcpu, reg, vcpu->arch.cr3); | 3230 | val = kvm_read_cr3(vcpu); |
3231 | trace_kvm_cr_read(cr, vcpu->arch.cr3); | 3231 | kvm_register_write(vcpu, reg, val); |
3232 | trace_kvm_cr_read(cr, val); | ||
3232 | skip_emulated_instruction(vcpu); | 3233 | skip_emulated_instruction(vcpu); |
3233 | return 1; | 3234 | return 1; |
3234 | case 8: | 3235 | case 8: |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7ad9cda8ff36..6e50314d64fb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -473,8 +473,8 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
473 | (unsigned long *)&vcpu->arch.regs_avail)) | 473 | (unsigned long *)&vcpu->arch.regs_avail)) |
474 | return true; | 474 | return true; |
475 | 475 | ||
476 | gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT; | 476 | gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; |
477 | offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1); | 477 | offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); |
478 | r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), | 478 | r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), |
479 | PFERR_USER_MASK | PFERR_WRITE_MASK); | 479 | PFERR_USER_MASK | PFERR_WRITE_MASK); |
480 | if (r < 0) | 480 | if (r < 0) |
@@ -519,7 +519,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
519 | } else | 519 | } else |
520 | #endif | 520 | #endif |
521 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, | 521 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, |
522 | vcpu->arch.cr3)) | 522 | kvm_read_cr3(vcpu))) |
523 | return 1; | 523 | return 1; |
524 | } | 524 | } |
525 | 525 | ||
@@ -611,7 +611,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
611 | return 1; | 611 | return 1; |
612 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) | 612 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
613 | && ((cr4 ^ old_cr4) & pdptr_bits) | 613 | && ((cr4 ^ old_cr4) & pdptr_bits) |
614 | && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) | 614 | && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, |
615 | kvm_read_cr3(vcpu))) | ||
615 | return 1; | 616 | return 1; |
616 | 617 | ||
617 | if (cr4 & X86_CR4_VMXE) | 618 | if (cr4 & X86_CR4_VMXE) |
@@ -631,7 +632,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); | |||
631 | 632 | ||
632 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 633 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
633 | { | 634 | { |
634 | if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { | 635 | if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { |
635 | kvm_mmu_sync_roots(vcpu); | 636 | kvm_mmu_sync_roots(vcpu); |
636 | kvm_mmu_flush_tlb(vcpu); | 637 | kvm_mmu_flush_tlb(vcpu); |
637 | return 0; | 638 | return 0; |
@@ -4073,7 +4074,7 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu) | |||
4073 | value = vcpu->arch.cr2; | 4074 | value = vcpu->arch.cr2; |
4074 | break; | 4075 | break; |
4075 | case 3: | 4076 | case 3: |
4076 | value = vcpu->arch.cr3; | 4077 | value = kvm_read_cr3(vcpu); |
4077 | break; | 4078 | break; |
4078 | case 4: | 4079 | case 4: |
4079 | value = kvm_read_cr4(vcpu); | 4080 | value = kvm_read_cr4(vcpu); |
@@ -5512,7 +5513,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
5512 | 5513 | ||
5513 | sregs->cr0 = kvm_read_cr0(vcpu); | 5514 | sregs->cr0 = kvm_read_cr0(vcpu); |
5514 | sregs->cr2 = vcpu->arch.cr2; | 5515 | sregs->cr2 = vcpu->arch.cr2; |
5515 | sregs->cr3 = vcpu->arch.cr3; | 5516 | sregs->cr3 = kvm_read_cr3(vcpu); |
5516 | sregs->cr4 = kvm_read_cr4(vcpu); | 5517 | sregs->cr4 = kvm_read_cr4(vcpu); |
5517 | sregs->cr8 = kvm_get_cr8(vcpu); | 5518 | sregs->cr8 = kvm_get_cr8(vcpu); |
5518 | sregs->efer = vcpu->arch.efer; | 5519 | sregs->efer = vcpu->arch.efer; |
@@ -5580,7 +5581,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
5580 | kvm_x86_ops->set_gdt(vcpu, &dt); | 5581 | kvm_x86_ops->set_gdt(vcpu, &dt); |
5581 | 5582 | ||
5582 | vcpu->arch.cr2 = sregs->cr2; | 5583 | vcpu->arch.cr2 = sregs->cr2; |
5583 | mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; | 5584 | mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; |
5584 | vcpu->arch.cr3 = sregs->cr3; | 5585 | vcpu->arch.cr3 = sregs->cr3; |
5585 | 5586 | ||
5586 | kvm_set_cr8(vcpu, sregs->cr8); | 5587 | kvm_set_cr8(vcpu, sregs->cr8); |
@@ -5598,7 +5599,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
5598 | if (sregs->cr4 & X86_CR4_OSXSAVE) | 5599 | if (sregs->cr4 & X86_CR4_OSXSAVE) |
5599 | update_cpuid(vcpu); | 5600 | update_cpuid(vcpu); |
5600 | if (!is_long_mode(vcpu) && is_pae(vcpu)) { | 5601 | if (!is_long_mode(vcpu) && is_pae(vcpu)) { |
5601 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); | 5602 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); |
5602 | mmu_reset_needed = 1; | 5603 | mmu_reset_needed = 1; |
5603 | } | 5604 | } |
5604 | 5605 | ||