diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 36c5406b1813..7e7c3969f7a2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -640,6 +640,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) | |||
640 | rmap_remove(kvm, spte); | 640 | rmap_remove(kvm, spte); |
641 | --kvm->stat.lpages; | 641 | --kvm->stat.lpages; |
642 | set_shadow_pte(spte, shadow_trap_nonpresent_pte); | 642 | set_shadow_pte(spte, shadow_trap_nonpresent_pte); |
643 | spte = NULL; | ||
643 | write_protected = 1; | 644 | write_protected = 1; |
644 | } | 645 | } |
645 | spte = rmap_next(kvm, rmapp, spte); | 646 | spte = rmap_next(kvm, rmapp, spte); |
@@ -658,7 +659,7 @@ static int is_empty_shadow_page(u64 *spt) | |||
658 | u64 *end; | 659 | u64 *end; |
659 | 660 | ||
660 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) | 661 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
661 | if (*pos != shadow_trap_nonpresent_pte) { | 662 | if (is_shadow_present_pte(*pos)) { |
662 | printk(KERN_ERR "%s: %p %llx\n", __func__, | 663 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
663 | pos, *pos); | 664 | pos, *pos); |
664 | return 0; | 665 | return 0; |
@@ -1082,10 +1083,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1082 | struct kvm_mmu_page *shadow; | 1083 | struct kvm_mmu_page *shadow; |
1083 | 1084 | ||
1084 | spte |= PT_WRITABLE_MASK; | 1085 | spte |= PT_WRITABLE_MASK; |
1085 | if (user_fault) { | ||
1086 | mmu_unshadow(vcpu->kvm, gfn); | ||
1087 | goto unshadowed; | ||
1088 | } | ||
1089 | 1086 | ||
1090 | shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); | 1087 | shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); |
1091 | if (shadow || | 1088 | if (shadow || |
@@ -1102,8 +1099,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1102 | } | 1099 | } |
1103 | } | 1100 | } |
1104 | 1101 | ||
1105 | unshadowed: | ||
1106 | |||
1107 | if (pte_access & ACC_WRITE_MASK) | 1102 | if (pte_access & ACC_WRITE_MASK) |
1108 | mark_page_dirty(vcpu->kvm, gfn); | 1103 | mark_page_dirty(vcpu->kvm, gfn); |
1109 | 1104 | ||
@@ -1580,11 +1575,13 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, | |||
1580 | u64 *spte, | 1575 | u64 *spte, |
1581 | const void *new) | 1576 | const void *new) |
1582 | { | 1577 | { |
1583 | if ((sp->role.level != PT_PAGE_TABLE_LEVEL) | 1578 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { |
1584 | && !vcpu->arch.update_pte.largepage) { | 1579 | if (!vcpu->arch.update_pte.largepage || |
1585 | ++vcpu->kvm->stat.mmu_pde_zapped; | 1580 | sp->role.glevels == PT32_ROOT_LEVEL) { |
1586 | return; | 1581 | ++vcpu->kvm->stat.mmu_pde_zapped; |
1587 | } | 1582 | return; |
1583 | } | ||
1584 | } | ||
1588 | 1585 | ||
1589 | ++vcpu->kvm->stat.mmu_pte_updated; | 1586 | ++vcpu->kvm->stat.mmu_pte_updated; |
1590 | if (sp->role.glevels == PT32_ROOT_LEVEL) | 1587 | if (sp->role.glevels == PT32_ROOT_LEVEL) |
@@ -1858,6 +1855,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) | |||
1858 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, | 1855 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, |
1859 | struct kvm_mmu_page, link); | 1856 | struct kvm_mmu_page, link); |
1860 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1857 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1858 | cond_resched(); | ||
1861 | } | 1859 | } |
1862 | free_page((unsigned long)vcpu->arch.mmu.pae_root); | 1860 | free_page((unsigned long)vcpu->arch.mmu.pae_root); |
1863 | } | 1861 | } |
@@ -1996,7 +1994,7 @@ static struct shrinker mmu_shrinker = { | |||
1996 | .seeks = DEFAULT_SEEKS * 10, | 1994 | .seeks = DEFAULT_SEEKS * 10, |
1997 | }; | 1995 | }; |
1998 | 1996 | ||
1999 | void mmu_destroy_caches(void) | 1997 | static void mmu_destroy_caches(void) |
2000 | { | 1998 | { |
2001 | if (pte_chain_cache) | 1999 | if (pte_chain_cache) |
2002 | kmem_cache_destroy(pte_chain_cache); | 2000 | kmem_cache_destroy(pte_chain_cache); |