aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-06 07:31:27 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:17 -0400
commitbe38d276b0189fa86231fc311428622a1981ad62 (patch)
tree4706819e23ade99c43bb676830071da9bd2d0abd /arch
parentdd180b3e90253cb4ca95d603a8c17413f8daec69 (diff)
KVM: MMU: Introduce drop_spte()
When we call rmap_remove(), we (almost) always immediately follow it by an __set_spte() to a nonpresent pte. Since we need to perform the two operations atomically, to avoid losing the dirty and accessed bits, introduce a helper drop_spte() and convert all call sites. The operation is still nonatomic at this point. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c30
-rw-r--r--arch/x86/kvm/paging_tmpl.h13
2 files changed, 23 insertions, 20 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70cdf6876b5f..1ad39cf70e18 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -658,6 +658,12 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
658 } 658 }
659} 659}
660 660
661static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
662{
663 rmap_remove(kvm, sptep);
664 __set_spte(sptep, new_spte);
665}
666
661static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 667static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
662{ 668{
663 struct kvm_rmap_desc *desc; 669 struct kvm_rmap_desc *desc;
@@ -722,9 +728,9 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
722 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); 728 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
723 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); 729 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
724 if (is_writable_pte(*spte)) { 730 if (is_writable_pte(*spte)) {
725 rmap_remove(kvm, spte); 731 drop_spte(kvm, spte,
732 shadow_trap_nonpresent_pte);
726 --kvm->stat.lpages; 733 --kvm->stat.lpages;
727 __set_spte(spte, shadow_trap_nonpresent_pte);
728 spte = NULL; 734 spte = NULL;
729 write_protected = 1; 735 write_protected = 1;
730 } 736 }
@@ -744,8 +750,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
744 while ((spte = rmap_next(kvm, rmapp, NULL))) { 750 while ((spte = rmap_next(kvm, rmapp, NULL))) {
745 BUG_ON(!(*spte & PT_PRESENT_MASK)); 751 BUG_ON(!(*spte & PT_PRESENT_MASK));
746 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); 752 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
747 rmap_remove(kvm, spte); 753 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
748 __set_spte(spte, shadow_trap_nonpresent_pte);
749 need_tlb_flush = 1; 754 need_tlb_flush = 1;
750 } 755 }
751 return need_tlb_flush; 756 return need_tlb_flush;
@@ -767,8 +772,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
767 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); 772 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
768 need_flush = 1; 773 need_flush = 1;
769 if (pte_write(*ptep)) { 774 if (pte_write(*ptep)) {
770 rmap_remove(kvm, spte); 775 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
771 __set_spte(spte, shadow_trap_nonpresent_pte);
772 spte = rmap_next(kvm, rmapp, NULL); 776 spte = rmap_next(kvm, rmapp, NULL);
773 } else { 777 } else {
774 new_spte = *spte &~ (PT64_BASE_ADDR_MASK); 778 new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
@@ -1464,7 +1468,8 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1464 } else { 1468 } else {
1465 if (is_large_pte(ent)) 1469 if (is_large_pte(ent))
1466 --kvm->stat.lpages; 1470 --kvm->stat.lpages;
1467 rmap_remove(kvm, &pt[i]); 1471 drop_spte(kvm, &pt[i],
1472 shadow_trap_nonpresent_pte);
1468 } 1473 }
1469 } 1474 }
1470 pt[i] = shadow_trap_nonpresent_pte; 1475 pt[i] = shadow_trap_nonpresent_pte;
@@ -1868,9 +1873,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1868 if (level > PT_PAGE_TABLE_LEVEL && 1873 if (level > PT_PAGE_TABLE_LEVEL &&
1869 has_wrprotected_page(vcpu->kvm, gfn, level)) { 1874 has_wrprotected_page(vcpu->kvm, gfn, level)) {
1870 ret = 1; 1875 ret = 1;
1871 rmap_remove(vcpu->kvm, sptep); 1876 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1872 spte = shadow_trap_nonpresent_pte; 1877 goto done;
1873 goto set_pte;
1874 } 1878 }
1875 1879
1876 spte |= PT_WRITABLE_MASK; 1880 spte |= PT_WRITABLE_MASK;
@@ -1902,6 +1906,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1902 1906
1903set_pte: 1907set_pte:
1904 __set_spte(sptep, spte); 1908 __set_spte(sptep, spte);
1909done:
1905 return ret; 1910 return ret;
1906} 1911}
1907 1912
@@ -1938,8 +1943,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1938 } else if (pfn != spte_to_pfn(*sptep)) { 1943 } else if (pfn != spte_to_pfn(*sptep)) {
1939 pgprintk("hfn old %lx new %lx\n", 1944 pgprintk("hfn old %lx new %lx\n",
1940 spte_to_pfn(*sptep), pfn); 1945 spte_to_pfn(*sptep), pfn);
1941 rmap_remove(vcpu->kvm, sptep); 1946 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1942 __set_spte(sptep, shadow_trap_nonpresent_pte);
1943 kvm_flush_remote_tlbs(vcpu->kvm); 1947 kvm_flush_remote_tlbs(vcpu->kvm);
1944 } else 1948 } else
1945 was_rmapped = 1; 1949 was_rmapped = 1;
@@ -2591,7 +2595,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2591 pte = *spte; 2595 pte = *spte;
2592 if (is_shadow_present_pte(pte)) { 2596 if (is_shadow_present_pte(pte)) {
2593 if (is_last_spte(pte, sp->role.level)) 2597 if (is_last_spte(pte, sp->role.level))
2594 rmap_remove(vcpu->kvm, spte); 2598 drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
2595 else { 2599 else {
2596 child = page_header(pte & PT64_BASE_ADDR_MASK); 2600 child = page_header(pte & PT64_BASE_ADDR_MASK);
2597 mmu_page_remove_parent_pte(child, spte); 2601 mmu_page_remove_parent_pte(child, spte);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 59e750c1a269..796a325c7e59 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -353,8 +353,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
353 } 353 }
354 354
355 if (is_large_pte(*sptep)) { 355 if (is_large_pte(*sptep)) {
356 rmap_remove(vcpu->kvm, sptep); 356 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
357 __set_spte(sptep, shadow_trap_nonpresent_pte);
358 kvm_flush_remote_tlbs(vcpu->kvm); 357 kvm_flush_remote_tlbs(vcpu->kvm);
359 } 358 }
360 359
@@ -516,12 +515,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
516 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); 515 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
517 516
518 if (is_shadow_present_pte(*sptep)) { 517 if (is_shadow_present_pte(*sptep)) {
519 rmap_remove(vcpu->kvm, sptep);
520 if (is_large_pte(*sptep)) 518 if (is_large_pte(*sptep))
521 --vcpu->kvm->stat.lpages; 519 --vcpu->kvm->stat.lpages;
520 drop_spte(vcpu->kvm, sptep,
521 shadow_trap_nonpresent_pte);
522 need_flush = 1; 522 need_flush = 1;
523 } 523 } else
524 __set_spte(sptep, shadow_trap_nonpresent_pte); 524 __set_spte(sptep, shadow_trap_nonpresent_pte);
525 break; 525 break;
526 } 526 }
527 527
@@ -637,12 +637,11 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
637 !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) { 637 !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
638 u64 nonpresent; 638 u64 nonpresent;
639 639
640 rmap_remove(vcpu->kvm, &sp->spt[i]);
641 if (is_present_gpte(gpte) || !clear_unsync) 640 if (is_present_gpte(gpte) || !clear_unsync)
642 nonpresent = shadow_trap_nonpresent_pte; 641 nonpresent = shadow_trap_nonpresent_pte;
643 else 642 else
644 nonpresent = shadow_notrap_nonpresent_pte; 643 nonpresent = shadow_notrap_nonpresent_pte;
645 __set_spte(&sp->spt[i], nonpresent); 644 drop_spte(vcpu->kvm, &sp->spt[i], nonpresent);
646 continue; 645 continue;
647 } 646 }
648 647