aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-07-11 11:07:26 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:12 -0400
commit5b5c6a5a60801effb559e787a947885d9850a7da (patch)
treeceb868a5108fa6adf4d49b47cf84b1257b46799d /arch/x86/kvm/mmu.c
parent31aa2b44afd5e73365221b1de66f6081e4616f33 (diff)
KVM: MMU: Simplify kvm_mmu_zap_page()
The twisty maze of conditionals can be reduced. [joerg: fix tlb flushing] Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 81016a3a6fd3..c3afbfe6b0c1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -955,7 +955,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
955 rmap_remove(kvm, &pt[i]); 955 rmap_remove(kvm, &pt[i]);
956 pt[i] = shadow_trap_nonpresent_pte; 956 pt[i] = shadow_trap_nonpresent_pte;
957 } 957 }
958 kvm_flush_remote_tlbs(kvm);
959 return; 958 return;
960 } 959 }
961 960
@@ -974,7 +973,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
974 } 973 }
975 pt[i] = shadow_trap_nonpresent_pte; 974 pt[i] = shadow_trap_nonpresent_pte;
976 } 975 }
977 kvm_flush_remote_tlbs(kvm);
978} 976}
979 977
980static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) 978static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
@@ -1016,18 +1014,16 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1016 ++kvm->stat.mmu_shadow_zapped; 1014 ++kvm->stat.mmu_shadow_zapped;
1017 kvm_mmu_page_unlink_children(kvm, sp); 1015 kvm_mmu_page_unlink_children(kvm, sp);
1018 kvm_mmu_unlink_parents(kvm, sp); 1016 kvm_mmu_unlink_parents(kvm, sp);
1017 kvm_flush_remote_tlbs(kvm);
1018 if (!sp->role.invalid && !sp->role.metaphysical)
1019 unaccount_shadowed(kvm, sp->gfn);
1019 if (!sp->root_count) { 1020 if (!sp->root_count) {
1020 if (!sp->role.metaphysical && !sp->role.invalid)
1021 unaccount_shadowed(kvm, sp->gfn);
1022 hlist_del(&sp->hash_link); 1021 hlist_del(&sp->hash_link);
1023 kvm_mmu_free_page(kvm, sp); 1022 kvm_mmu_free_page(kvm, sp);
1024 } else { 1023 } else {
1025 int invalid = sp->role.invalid;
1026 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1027 sp->role.invalid = 1; 1024 sp->role.invalid = 1;
1025 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1028 kvm_reload_remote_mmus(kvm); 1026 kvm_reload_remote_mmus(kvm);
1029 if (!sp->role.metaphysical && !invalid)
1030 unaccount_shadowed(kvm, sp->gfn);
1031 } 1027 }
1032 kvm_mmu_reset_last_pte_updated(kvm); 1028 kvm_mmu_reset_last_pte_updated(kvm);
1033} 1029}
@@ -1842,7 +1838,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1842 index = kvm_page_table_hashfn(gfn); 1838 index = kvm_page_table_hashfn(gfn);
1843 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1839 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1844 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 1840 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1845 if (sp->gfn != gfn || sp->role.metaphysical) 1841 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
1846 continue; 1842 continue;
1847 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; 1843 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1848 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 1844 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);