diff options
author | Marcelo Tosatti <marcelo@kvack.org> | 2007-12-11 19:12:27 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:53:21 -0500 |
commit | 7819026eefee53eaaac3fdce1a2f157c7ea943fe (patch) | |
tree | e5ee690406a8ebe381ce5d712f010a5a0c706c4c /drivers/kvm/mmu.c | |
parent | 1d075434149c38d457c30d1f11d9c39210b0bb79 (diff) |
KVM: MMU: Fix SMP shadow instantiation race
There is a race where VCPU0 is shadowing a pagetable entry while VCPU1
is updating it, which results in a stale shadow copy.
Fix that by comparing the contents of the cached guest pte with the
current guest pte after write-protecting the guest pagetable.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index ba71e8d66761..92ac0d1106b4 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -681,7 +681,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
681 | unsigned level, | 681 | unsigned level, |
682 | int metaphysical, | 682 | int metaphysical, |
683 | unsigned access, | 683 | unsigned access, |
684 | u64 *parent_pte) | 684 | u64 *parent_pte, |
685 | bool *new_page) | ||
685 | { | 686 | { |
686 | union kvm_mmu_page_role role; | 687 | union kvm_mmu_page_role role; |
687 | unsigned index; | 688 | unsigned index; |
@@ -720,6 +721,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
720 | vcpu->mmu.prefetch_page(vcpu, sp); | 721 | vcpu->mmu.prefetch_page(vcpu, sp); |
721 | if (!metaphysical) | 722 | if (!metaphysical) |
722 | rmap_write_protect(vcpu->kvm, gfn); | 723 | rmap_write_protect(vcpu->kvm, gfn); |
724 | if (new_page) | ||
725 | *new_page = 1; | ||
723 | return sp; | 726 | return sp; |
724 | } | 727 | } |
725 | 728 | ||
@@ -993,7 +996,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
993 | >> PAGE_SHIFT; | 996 | >> PAGE_SHIFT; |
994 | new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, | 997 | new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, |
995 | v, level - 1, | 998 | v, level - 1, |
996 | 1, ACC_ALL, &table[index]); | 999 | 1, ACC_ALL, &table[index], |
1000 | NULL); | ||
997 | if (!new_table) { | 1001 | if (!new_table) { |
998 | pgprintk("nonpaging_map: ENOMEM\n"); | 1002 | pgprintk("nonpaging_map: ENOMEM\n"); |
999 | return -ENOMEM; | 1003 | return -ENOMEM; |
@@ -1059,7 +1063,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1059 | 1063 | ||
1060 | ASSERT(!VALID_PAGE(root)); | 1064 | ASSERT(!VALID_PAGE(root)); |
1061 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, | 1065 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
1062 | PT64_ROOT_LEVEL, 0, ACC_ALL, NULL); | 1066 | PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); |
1063 | root = __pa(sp->spt); | 1067 | root = __pa(sp->spt); |
1064 | ++sp->root_count; | 1068 | ++sp->root_count; |
1065 | vcpu->mmu.root_hpa = root; | 1069 | vcpu->mmu.root_hpa = root; |
@@ -1080,7 +1084,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1080 | root_gfn = 0; | 1084 | root_gfn = 0; |
1081 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 1085 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
1082 | PT32_ROOT_LEVEL, !is_paging(vcpu), | 1086 | PT32_ROOT_LEVEL, !is_paging(vcpu), |
1083 | ACC_ALL, NULL); | 1087 | ACC_ALL, NULL, NULL); |
1084 | root = __pa(sp->spt); | 1088 | root = __pa(sp->spt); |
1085 | ++sp->root_count; | 1089 | ++sp->root_count; |
1086 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; | 1090 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; |