diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2011-05-15 11:27:52 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-07-12 04:45:07 -0400 |
commit | 38e3b2b28c5f8fe7914172f4ba631ef4552824d6 (patch) | |
tree | f81e4f6a4fbba9fc0b9cbb873ccda9dec24e59ad /arch/x86/kvm/mmu.c | |
parent | 67052b3508f09956427d6476fd35e8fddde6c618 (diff) |
KVM: MMU: cleanup for kvm_mmu_page_unlink_children
Cleanup the same operation between kvm_mmu_page_unlink_children and
mmu_pte_write_zap_pte
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 66 |
1 files changed, 23 insertions, 43 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9eaca1c739a6..71eddc4c9810 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1566,32 +1566,33 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
1566 | } | 1566 | } |
1567 | } | 1567 | } |
1568 | 1568 | ||
1569 | static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, | ||
1570 | u64 *spte) | ||
1571 | { | ||
1572 | u64 pte; | ||
1573 | struct kvm_mmu_page *child; | ||
1574 | |||
1575 | pte = *spte; | ||
1576 | if (is_shadow_present_pte(pte)) { | ||
1577 | if (is_last_spte(pte, sp->role.level)) | ||
1578 | drop_spte(kvm, spte, shadow_trap_nonpresent_pte); | ||
1579 | else { | ||
1580 | child = page_header(pte & PT64_BASE_ADDR_MASK); | ||
1581 | mmu_page_remove_parent_pte(child, spte); | ||
1582 | } | ||
1583 | } | ||
1584 | __set_spte(spte, shadow_trap_nonpresent_pte); | ||
1585 | if (is_large_pte(pte)) | ||
1586 | --kvm->stat.lpages; | ||
1587 | } | ||
1588 | |||
1569 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, | 1589 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, |
1570 | struct kvm_mmu_page *sp) | 1590 | struct kvm_mmu_page *sp) |
1571 | { | 1591 | { |
1572 | unsigned i; | 1592 | unsigned i; |
1573 | u64 *pt; | ||
1574 | u64 ent; | ||
1575 | |||
1576 | pt = sp->spt; | ||
1577 | 1593 | ||
1578 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | 1594 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
1579 | ent = pt[i]; | 1595 | mmu_page_zap_pte(kvm, sp, sp->spt + i); |
1580 | |||
1581 | if (is_shadow_present_pte(ent)) { | ||
1582 | if (!is_last_spte(ent, sp->role.level)) { | ||
1583 | ent &= PT64_BASE_ADDR_MASK; | ||
1584 | mmu_page_remove_parent_pte(page_header(ent), | ||
1585 | &pt[i]); | ||
1586 | } else { | ||
1587 | if (is_large_pte(ent)) | ||
1588 | --kvm->stat.lpages; | ||
1589 | drop_spte(kvm, &pt[i], | ||
1590 | shadow_trap_nonpresent_pte); | ||
1591 | } | ||
1592 | } | ||
1593 | pt[i] = shadow_trap_nonpresent_pte; | ||
1594 | } | ||
1595 | } | 1596 | } |
1596 | 1597 | ||
1597 | static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) | 1598 | static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) |
@@ -3069,27 +3070,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu) | |||
3069 | } | 3070 | } |
3070 | EXPORT_SYMBOL_GPL(kvm_mmu_unload); | 3071 | EXPORT_SYMBOL_GPL(kvm_mmu_unload); |
3071 | 3072 | ||
3072 | static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, | ||
3073 | struct kvm_mmu_page *sp, | ||
3074 | u64 *spte) | ||
3075 | { | ||
3076 | u64 pte; | ||
3077 | struct kvm_mmu_page *child; | ||
3078 | |||
3079 | pte = *spte; | ||
3080 | if (is_shadow_present_pte(pte)) { | ||
3081 | if (is_last_spte(pte, sp->role.level)) | ||
3082 | drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte); | ||
3083 | else { | ||
3084 | child = page_header(pte & PT64_BASE_ADDR_MASK); | ||
3085 | mmu_page_remove_parent_pte(child, spte); | ||
3086 | } | ||
3087 | } | ||
3088 | __set_spte(spte, shadow_trap_nonpresent_pte); | ||
3089 | if (is_large_pte(pte)) | ||
3090 | --vcpu->kvm->stat.lpages; | ||
3091 | } | ||
3092 | |||
3093 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, | 3073 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, |
3094 | struct kvm_mmu_page *sp, u64 *spte, | 3074 | struct kvm_mmu_page *sp, u64 *spte, |
3095 | const void *new) | 3075 | const void *new) |
@@ -3271,7 +3251,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
3271 | spte = &sp->spt[page_offset / sizeof(*spte)]; | 3251 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
3272 | while (npte--) { | 3252 | while (npte--) { |
3273 | entry = *spte; | 3253 | entry = *spte; |
3274 | mmu_pte_write_zap_pte(vcpu, sp, spte); | 3254 | mmu_page_zap_pte(vcpu->kvm, sp, spte); |
3275 | if (gentry && | 3255 | if (gentry && |
3276 | !((sp->role.word ^ vcpu->arch.mmu.base_role.word) | 3256 | !((sp->role.word ^ vcpu->arch.mmu.base_role.word) |
3277 | & mask.word)) | 3257 | & mask.word)) |