diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 19:36:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-06 02:55:25 -0500 |
commit | 9b7a032567ee1128daeebebfc14d3acedfe28c8c (patch) | |
tree | 292b109ec407041f26371f3cb7de12d06a636592 /drivers/kvm/mmu.c | |
parent | da4a00f002239f72b0d7d0eeaa3b60100e2b1438 (diff) |
[PATCH] KVM: MMU: Zap shadow page table entries on writes to guest page tables
Iterate over all shadow pages which correspond to a the given guest page table
and remove the mappings.
A subsequent page fault will reestablish the new mapping.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index bce7eb21f739..6dbd83b86623 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -958,7 +958,43 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) | |||
958 | 958 | ||
959 | void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) | 959 | void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) |
960 | { | 960 | { |
961 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
962 | struct kvm_mmu_page *page; | ||
963 | struct kvm_mmu_page *child; | ||
964 | struct hlist_node *node; | ||
965 | struct hlist_head *bucket; | ||
966 | unsigned index; | ||
967 | u64 *spte; | ||
968 | u64 pte; | ||
969 | unsigned offset = offset_in_page(gpa); | ||
970 | unsigned page_offset; | ||
971 | int level; | ||
972 | |||
961 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); | 973 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); |
974 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | ||
975 | bucket = &vcpu->kvm->mmu_page_hash[index]; | ||
976 | hlist_for_each_entry(page, node, bucket, hash_link) { | ||
977 | if (page->gfn != gfn || page->role.metaphysical) | ||
978 | continue; | ||
979 | page_offset = offset; | ||
980 | level = page->role.level; | ||
981 | if (page->role.glevels == PT32_ROOT_LEVEL) { | ||
982 | page_offset <<= 1; /* 32->64 */ | ||
983 | page_offset &= ~PAGE_MASK; | ||
984 | } | ||
985 | spte = __va(page->page_hpa); | ||
986 | spte += page_offset / sizeof(*spte); | ||
987 | pte = *spte; | ||
988 | if (is_present_pte(pte)) { | ||
989 | if (level == PT_PAGE_TABLE_LEVEL) | ||
990 | rmap_remove(vcpu->kvm, spte); | ||
991 | else { | ||
992 | child = page_header(pte & PT64_BASE_ADDR_MASK); | ||
993 | mmu_page_remove_parent_pte(child, spte); | ||
994 | } | ||
995 | } | ||
996 | *spte = 0; | ||
997 | } | ||
962 | } | 998 | } |
963 | 999 | ||
964 | void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) | 1000 | void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) |