diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 19:36:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-06 02:55:25 -0500 |
commit | 374cbac0333ddf5cf1c6637efaf7f3adcc67fd75 (patch) | |
tree | 0960a8c54aa6f592f5d1cb8a7dbc116c9cb3836e /drivers/kvm | |
parent | cea0f0e7ea54753c3265dc77f605a6dad1912cfc (diff) |
[PATCH] KVM: MMU: Write protect guest pages when a shadow is created for them
When we cache a guest page table into a shadow page table, we need to prevent
further access to that page by the guest, as that would render the cache
incoherent.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/mmu.c | 72 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 1 |
2 files changed, 55 insertions, 18 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 47c699c21c08..ba813f49f8aa 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -274,6 +274,35 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
274 | } | 274 | } |
275 | } | 275 | } |
276 | 276 | ||
277 | static void rmap_write_protect(struct kvm *kvm, u64 gfn) | ||
278 | { | ||
279 | struct page *page; | ||
280 | struct kvm_memory_slot *slot; | ||
281 | struct kvm_rmap_desc *desc; | ||
282 | u64 *spte; | ||
283 | |||
284 | slot = gfn_to_memslot(kvm, gfn); | ||
285 | BUG_ON(!slot); | ||
286 | page = gfn_to_page(slot, gfn); | ||
287 | |||
288 | while (page->private) { | ||
289 | if (!(page->private & 1)) | ||
290 | spte = (u64 *)page->private; | ||
291 | else { | ||
292 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | ||
293 | spte = desc->shadow_ptes[0]; | ||
294 | } | ||
295 | BUG_ON(!spte); | ||
296 | BUG_ON((*spte & PT64_BASE_ADDR_MASK) != | ||
297 | page_to_pfn(page) << PAGE_SHIFT); | ||
298 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | ||
299 | BUG_ON(!(*spte & PT_WRITABLE_MASK)); | ||
300 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); | ||
301 | rmap_remove(kvm, spte); | ||
302 | *spte &= ~(u64)PT_WRITABLE_MASK; | ||
303 | } | ||
304 | } | ||
305 | |||
277 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa) | 306 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa) |
278 | { | 307 | { |
279 | struct kvm_mmu_page *page_head = page_header(page_hpa); | 308 | struct kvm_mmu_page *page_head = page_header(page_hpa); |
@@ -444,6 +473,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
444 | page->gfn = gfn; | 473 | page->gfn = gfn; |
445 | page->role = role; | 474 | page->role = role; |
446 | hlist_add_head(&page->hash_link, bucket); | 475 | hlist_add_head(&page->hash_link, bucket); |
476 | if (!metaphysical) | ||
477 | rmap_write_protect(vcpu->kvm, gfn); | ||
447 | return page; | 478 | return page; |
448 | } | 479 | } |
449 | 480 | ||
@@ -705,6 +736,7 @@ static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) | |||
705 | 736 | ||
706 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | 737 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
707 | { | 738 | { |
739 | pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3); | ||
708 | mmu_free_roots(vcpu); | 740 | mmu_free_roots(vcpu); |
709 | mmu_alloc_roots(vcpu); | 741 | mmu_alloc_roots(vcpu); |
710 | kvm_mmu_flush_tlb(vcpu); | 742 | kvm_mmu_flush_tlb(vcpu); |
@@ -727,24 +759,11 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu, | |||
727 | *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET; | 759 | *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET; |
728 | if (!dirty) | 760 | if (!dirty) |
729 | access_bits &= ~PT_WRITABLE_MASK; | 761 | access_bits &= ~PT_WRITABLE_MASK; |
730 | if (access_bits & PT_WRITABLE_MASK) { | ||
731 | struct kvm_mmu_page *shadow; | ||
732 | 762 | ||
733 | shadow = kvm_mmu_lookup_page(vcpu, gaddr >> PAGE_SHIFT); | 763 | paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK); |
734 | if (shadow) | ||
735 | pgprintk("%s: found shadow page for %lx, marking ro\n", | ||
736 | __FUNCTION__, (gfn_t)(gaddr >> PAGE_SHIFT)); | ||
737 | if (shadow) | ||
738 | access_bits &= ~PT_WRITABLE_MASK; | ||
739 | } | ||
740 | |||
741 | if (access_bits & PT_WRITABLE_MASK) | ||
742 | mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT); | ||
743 | 764 | ||
744 | *shadow_pte |= access_bits; | 765 | *shadow_pte |= access_bits; |
745 | 766 | ||
746 | paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK); | ||
747 | |||
748 | if (!(*shadow_pte & PT_GLOBAL_MASK)) | 767 | if (!(*shadow_pte & PT_GLOBAL_MASK)) |
749 | mark_pagetable_nonglobal(shadow_pte); | 768 | mark_pagetable_nonglobal(shadow_pte); |
750 | 769 | ||
@@ -752,11 +771,28 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu, | |||
752 | *shadow_pte |= gaddr; | 771 | *shadow_pte |= gaddr; |
753 | *shadow_pte |= PT_SHADOW_IO_MARK; | 772 | *shadow_pte |= PT_SHADOW_IO_MARK; |
754 | *shadow_pte &= ~PT_PRESENT_MASK; | 773 | *shadow_pte &= ~PT_PRESENT_MASK; |
755 | } else { | 774 | return; |
756 | *shadow_pte |= paddr; | ||
757 | page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); | ||
758 | rmap_add(vcpu->kvm, shadow_pte); | ||
759 | } | 775 | } |
776 | |||
777 | *shadow_pte |= paddr; | ||
778 | |||
779 | if (access_bits & PT_WRITABLE_MASK) { | ||
780 | struct kvm_mmu_page *shadow; | ||
781 | |||
782 | shadow = kvm_mmu_lookup_page(vcpu, gaddr >> PAGE_SHIFT); | ||
783 | if (shadow) { | ||
784 | pgprintk("%s: found shadow page for %lx, marking ro\n", | ||
785 | __FUNCTION__, (gfn_t)(gaddr >> PAGE_SHIFT)); | ||
786 | access_bits &= ~PT_WRITABLE_MASK; | ||
787 | *shadow_pte &= ~PT_WRITABLE_MASK; | ||
788 | } | ||
789 | } | ||
790 | |||
791 | if (access_bits & PT_WRITABLE_MASK) | ||
792 | mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT); | ||
793 | |||
794 | page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); | ||
795 | rmap_add(vcpu->kvm, shadow_pte); | ||
760 | } | 796 | } |
761 | 797 | ||
762 | static void inject_page_fault(struct kvm_vcpu *vcpu, | 798 | static void inject_page_fault(struct kvm_vcpu *vcpu, |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index f7cce443ca6f..cd71973c780c 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -133,6 +133,7 @@ static void FNAME(walk_addr)(struct guest_walker *walker, | |||
133 | walker->level - 1, table_gfn); | 133 | walker->level - 1, table_gfn); |
134 | } | 134 | } |
135 | walker->ptep = ptep; | 135 | walker->ptep = ptep; |
136 | pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep); | ||
136 | } | 137 | } |
137 | 138 | ||
138 | static void FNAME(release_walker)(struct guest_walker *walker) | 139 | static void FNAME(release_walker)(struct guest_walker *walker) |