aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-06 07:31:27 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:17 -0400
commitbe38d276b0189fa86231fc311428622a1981ad62 (patch)
tree4706819e23ade99c43bb676830071da9bd2d0abd /arch/x86/kvm/mmu.c
parentdd180b3e90253cb4ca95d603a8c17413f8daec69 (diff)
KVM: MMU: Introduce drop_spte()
When we call rmap_remove(), we (almost) always immediately follow it by an __set_spte() to a nonpresent pte. Since we need to perform the two operations atomically, to avoid losing the dirty and accessed bits, introduce a helper drop_spte() and convert all call sites. The operation is still nonatomic at this point. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70cdf6876b5..1ad39cf70e1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -658,6 +658,12 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
658 } 658 }
659} 659}
660 660
661static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
662{
663 rmap_remove(kvm, sptep);
664 __set_spte(sptep, new_spte);
665}
666
661static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 667static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
662{ 668{
663 struct kvm_rmap_desc *desc; 669 struct kvm_rmap_desc *desc;
@@ -722,9 +728,9 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
722 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); 728 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
723 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); 729 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
724 if (is_writable_pte(*spte)) { 730 if (is_writable_pte(*spte)) {
725 rmap_remove(kvm, spte); 731 drop_spte(kvm, spte,
732 shadow_trap_nonpresent_pte);
726 --kvm->stat.lpages; 733 --kvm->stat.lpages;
727 __set_spte(spte, shadow_trap_nonpresent_pte);
728 spte = NULL; 734 spte = NULL;
729 write_protected = 1; 735 write_protected = 1;
730 } 736 }
@@ -744,8 +750,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
744 while ((spte = rmap_next(kvm, rmapp, NULL))) { 750 while ((spte = rmap_next(kvm, rmapp, NULL))) {
745 BUG_ON(!(*spte & PT_PRESENT_MASK)); 751 BUG_ON(!(*spte & PT_PRESENT_MASK));
746 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); 752 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
747 rmap_remove(kvm, spte); 753 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
748 __set_spte(spte, shadow_trap_nonpresent_pte);
749 need_tlb_flush = 1; 754 need_tlb_flush = 1;
750 } 755 }
751 return need_tlb_flush; 756 return need_tlb_flush;
@@ -767,8 +772,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
767 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); 772 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
768 need_flush = 1; 773 need_flush = 1;
769 if (pte_write(*ptep)) { 774 if (pte_write(*ptep)) {
770 rmap_remove(kvm, spte); 775 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
771 __set_spte(spte, shadow_trap_nonpresent_pte);
772 spte = rmap_next(kvm, rmapp, NULL); 776 spte = rmap_next(kvm, rmapp, NULL);
773 } else { 777 } else {
774 new_spte = *spte &~ (PT64_BASE_ADDR_MASK); 778 new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
@@ -1464,7 +1468,8 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1464 } else { 1468 } else {
1465 if (is_large_pte(ent)) 1469 if (is_large_pte(ent))
1466 --kvm->stat.lpages; 1470 --kvm->stat.lpages;
1467 rmap_remove(kvm, &pt[i]); 1471 drop_spte(kvm, &pt[i],
1472 shadow_trap_nonpresent_pte);
1468 } 1473 }
1469 } 1474 }
1470 pt[i] = shadow_trap_nonpresent_pte; 1475 pt[i] = shadow_trap_nonpresent_pte;
@@ -1868,9 +1873,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1868 if (level > PT_PAGE_TABLE_LEVEL && 1873 if (level > PT_PAGE_TABLE_LEVEL &&
1869 has_wrprotected_page(vcpu->kvm, gfn, level)) { 1874 has_wrprotected_page(vcpu->kvm, gfn, level)) {
1870 ret = 1; 1875 ret = 1;
1871 rmap_remove(vcpu->kvm, sptep); 1876 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1872 spte = shadow_trap_nonpresent_pte; 1877 goto done;
1873 goto set_pte;
1874 } 1878 }
1875 1879
1876 spte |= PT_WRITABLE_MASK; 1880 spte |= PT_WRITABLE_MASK;
@@ -1902,6 +1906,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1902 1906
1903set_pte: 1907set_pte:
1904 __set_spte(sptep, spte); 1908 __set_spte(sptep, spte);
1909done:
1905 return ret; 1910 return ret;
1906} 1911}
1907 1912
@@ -1938,8 +1943,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1938 } else if (pfn != spte_to_pfn(*sptep)) { 1943 } else if (pfn != spte_to_pfn(*sptep)) {
1939 pgprintk("hfn old %lx new %lx\n", 1944 pgprintk("hfn old %lx new %lx\n",
1940 spte_to_pfn(*sptep), pfn); 1945 spte_to_pfn(*sptep), pfn);
1941 rmap_remove(vcpu->kvm, sptep); 1946 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1942 __set_spte(sptep, shadow_trap_nonpresent_pte);
1943 kvm_flush_remote_tlbs(vcpu->kvm); 1947 kvm_flush_remote_tlbs(vcpu->kvm);
1944 } else 1948 } else
1945 was_rmapped = 1; 1949 was_rmapped = 1;
@@ -2591,7 +2595,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2591 pte = *spte; 2595 pte = *spte;
2592 if (is_shadow_present_pte(pte)) { 2596 if (is_shadow_present_pte(pte)) {
2593 if (is_last_spte(pte, sp->role.level)) 2597 if (is_last_spte(pte, sp->role.level))
2594 rmap_remove(vcpu->kvm, spte); 2598 drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
2595 else { 2599 else {
2596 child = page_header(pte & PT64_BASE_ADDR_MASK); 2600 child = page_header(pte & PT64_BASE_ADDR_MASK);
2597 mmu_page_remove_parent_pte(child, spte); 2601 mmu_page_remove_parent_pte(child, spte);