aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2008-03-20 12:17:24 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:34 -0400
commit855149aaa90016c576a0e684361a34f8047307d0 (patch)
treebfca7a0e52e4a4e7857a2e9fc0ff9f98e9f26dfa /arch/x86/kvm/mmu.c
parent69a9f69bb24d6d3dbf3d2ba542ddceeda40536d5 (diff)
KVM: MMU: fix dirty bit setting when removing write permissions
When mmu_set_spte() checks if a page related to spte should be release as dirty or clean, it check if the shadow pte was writeble, but in case rmap_write_protect() is called called it is possible for shadow ptes that were writeble to become readonly and therefor mmu_set_spte will release the pages as clean. This patch fix this issue by marking the page as dirty inside rmap_write_protect(). Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a5872b3c466d..dd4b95b3896b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -626,6 +626,14 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
626 } 626 }
627 spte = rmap_next(kvm, rmapp, spte); 627 spte = rmap_next(kvm, rmapp, spte);
628 } 628 }
629 if (write_protected) {
630 struct page *page;
631
632 spte = rmap_next(kvm, rmapp, NULL);
633 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
634 SetPageDirty(page);
635 }
636
629 /* check for huge page mappings */ 637 /* check for huge page mappings */
630 rmapp = gfn_to_rmap(kvm, gfn, 1); 638 rmapp = gfn_to_rmap(kvm, gfn, 1);
631 spte = rmap_next(kvm, rmapp, NULL); 639 spte = rmap_next(kvm, rmapp, NULL);