aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 12:22:05 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 12:22:05 -0500
commit67171a3f0335f2ecd1723851e75a0af7e2115f25 (patch)
treeacabcdf01549c7cf3e157573469cd4254b8167a1 /arch/x86/kvm/mmu.c
parentce932967b9f77c130d4936d1e20d619a628ae08f (diff)
parent1a4e3f89c6b2cbe0b26c08ec63a8c34156eaae04 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: x86: disable KVM for Voyager and friends KVM: VMX: Avoid rearranging switched guest msrs while they are loaded KVM: MMU: Fix race when instantiating a shadow pte KVM: Route irq 0 to vcpu 0 exclusively KVM: Avoid infinite-frequency local apic timer KVM: make MMU_DEBUG compile again KVM: move alloc_apic_access_page() outside of non-preemptable region KVM: SVM: fix Windows XP 64 bit installation crash KVM: remove the usage of the mmap_sem for the protection of the memory slots. KVM: emulate access to MSR_IA32_MCG_CTL KVM: Make the supported cpuid list a host property rather than a vm property KVM: Fix kvm_arch_vcpu_ioctl_set_sregs so that set_cr0 works properly KVM: SVM: set NM intercept when enabling CR0.TS in the guest KVM: SVM: Fix lazy FPU switching
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8efdcdbebb03..d8172aabc660 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
681 unsigned level, 681 unsigned level,
682 int metaphysical, 682 int metaphysical,
683 unsigned access, 683 unsigned access,
684 u64 *parent_pte, 684 u64 *parent_pte)
685 bool *new_page)
686{ 685{
687 union kvm_mmu_page_role role; 686 union kvm_mmu_page_role role;
688 unsigned index; 687 unsigned index;
@@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
722 vcpu->arch.mmu.prefetch_page(vcpu, sp); 721 vcpu->arch.mmu.prefetch_page(vcpu, sp);
723 if (!metaphysical) 722 if (!metaphysical)
724 rmap_write_protect(vcpu->kvm, gfn); 723 rmap_write_protect(vcpu->kvm, gfn);
725 if (new_page)
726 *new_page = 1;
727 return sp; 724 return sp;
728} 725}
729 726
@@ -876,11 +873,18 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
876 873
877struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) 874struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
878{ 875{
876 struct page *page;
877
879 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 878 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
880 879
881 if (gpa == UNMAPPED_GVA) 880 if (gpa == UNMAPPED_GVA)
882 return NULL; 881 return NULL;
883 return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 882
883 down_read(&current->mm->mmap_sem);
884 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
885 up_read(&current->mm->mmap_sem);
886
887 return page;
884} 888}
885 889
886static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 890static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
@@ -999,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
999 >> PAGE_SHIFT; 1003 >> PAGE_SHIFT;
1000 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, 1004 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1001 v, level - 1, 1005 v, level - 1,
1002 1, ACC_ALL, &table[index], 1006 1, ACC_ALL, &table[index]);
1003 NULL);
1004 if (!new_table) { 1007 if (!new_table) {
1005 pgprintk("nonpaging_map: ENOMEM\n"); 1008 pgprintk("nonpaging_map: ENOMEM\n");
1006 kvm_release_page_clean(page); 1009 kvm_release_page_clean(page);
@@ -1020,15 +1023,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1020 1023
1021 struct page *page; 1024 struct page *page;
1022 1025
1026 down_read(&vcpu->kvm->slots_lock);
1027
1023 down_read(&current->mm->mmap_sem); 1028 down_read(&current->mm->mmap_sem);
1024 page = gfn_to_page(vcpu->kvm, gfn); 1029 page = gfn_to_page(vcpu->kvm, gfn);
1030 up_read(&current->mm->mmap_sem);
1025 1031
1026 spin_lock(&vcpu->kvm->mmu_lock); 1032 spin_lock(&vcpu->kvm->mmu_lock);
1027 kvm_mmu_free_some_pages(vcpu); 1033 kvm_mmu_free_some_pages(vcpu);
1028 r = __nonpaging_map(vcpu, v, write, gfn, page); 1034 r = __nonpaging_map(vcpu, v, write, gfn, page);
1029 spin_unlock(&vcpu->kvm->mmu_lock); 1035 spin_unlock(&vcpu->kvm->mmu_lock);
1030 1036
1031 up_read(&current->mm->mmap_sem); 1037 up_read(&vcpu->kvm->slots_lock);
1032 1038
1033 return r; 1039 return r;
1034} 1040}
@@ -1090,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1090 1096
1091 ASSERT(!VALID_PAGE(root)); 1097 ASSERT(!VALID_PAGE(root));
1092 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 1098 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1093 PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); 1099 PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
1094 root = __pa(sp->spt); 1100 root = __pa(sp->spt);
1095 ++sp->root_count; 1101 ++sp->root_count;
1096 vcpu->arch.mmu.root_hpa = root; 1102 vcpu->arch.mmu.root_hpa = root;
@@ -1111,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1111 root_gfn = 0; 1117 root_gfn = 0;
1112 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 1118 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1113 PT32_ROOT_LEVEL, !is_paging(vcpu), 1119 PT32_ROOT_LEVEL, !is_paging(vcpu),
1114 ACC_ALL, NULL, NULL); 1120 ACC_ALL, NULL);
1115 root = __pa(sp->spt); 1121 root = __pa(sp->spt);
1116 ++sp->root_count; 1122 ++sp->root_count;
1117 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; 1123 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
@@ -1172,7 +1178,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1172 1178
1173static void paging_new_cr3(struct kvm_vcpu *vcpu) 1179static void paging_new_cr3(struct kvm_vcpu *vcpu)
1174{ 1180{
1175 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3); 1181 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
1176 mmu_free_roots(vcpu); 1182 mmu_free_roots(vcpu);
1177} 1183}
1178 1184
@@ -1362,6 +1368,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1362 gfn_t gfn; 1368 gfn_t gfn;
1363 int r; 1369 int r;
1364 u64 gpte = 0; 1370 u64 gpte = 0;
1371 struct page *page;
1365 1372
1366 if (bytes != 4 && bytes != 8) 1373 if (bytes != 4 && bytes != 8)
1367 return; 1374 return;
@@ -1389,6 +1396,11 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1389 if (!is_present_pte(gpte)) 1396 if (!is_present_pte(gpte))
1390 return; 1397 return;
1391 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 1398 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1399
1400 down_read(&current->mm->mmap_sem);
1401 page = gfn_to_page(vcpu->kvm, gfn);
1402 up_read(&current->mm->mmap_sem);
1403
1392 vcpu->arch.update_pte.gfn = gfn; 1404 vcpu->arch.update_pte.gfn = gfn;
1393 vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); 1405 vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
1394} 1406}
@@ -1496,9 +1508,9 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1496 gpa_t gpa; 1508 gpa_t gpa;
1497 int r; 1509 int r;
1498 1510
1499 down_read(&current->mm->mmap_sem); 1511 down_read(&vcpu->kvm->slots_lock);
1500 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1512 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1501 up_read(&current->mm->mmap_sem); 1513 up_read(&vcpu->kvm->slots_lock);
1502 1514
1503 spin_lock(&vcpu->kvm->mmu_lock); 1515 spin_lock(&vcpu->kvm->mmu_lock);
1504 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1516 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);