diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2011-09-22 04:56:06 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-12-27 04:16:54 -0500 |
commit | 505aef8f30a95f7e4abf2c07e54ded1521587ba0 (patch) | |
tree | 47b4c515c5782cbb3428437b4c7c820a7956c312 /arch/x86/kvm/paging_tmpl.h | |
parent | d01f8d5e02cc79998e3160f7ad545f77891b00e5 (diff) |
KVM: MMU: cleanup FNAME(invlpg)
Directly Use mmu_page_zap_pte to zap spte in FNAME(invlpg), also remove the
same code between FNAME(invlpg) and FNAME(sync_page)
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 44 |
1 files changed, 17 insertions, 27 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 92994100638b..d8d3906649da 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -656,6 +656,18 @@ out_unlock: | |||
656 | return 0; | 656 | return 0; |
657 | } | 657 | } |
658 | 658 | ||
659 | static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) | ||
660 | { | ||
661 | int offset = 0; | ||
662 | |||
663 | WARN_ON(sp->role.level != 1); | ||
664 | |||
665 | if (PTTYPE == 32) | ||
666 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | ||
667 | |||
668 | return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); | ||
669 | } | ||
670 | |||
659 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | 671 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) |
660 | { | 672 | { |
661 | struct kvm_shadow_walk_iterator iterator; | 673 | struct kvm_shadow_walk_iterator iterator; |
@@ -663,7 +675,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
663 | gpa_t pte_gpa = -1; | 675 | gpa_t pte_gpa = -1; |
664 | int level; | 676 | int level; |
665 | u64 *sptep; | 677 | u64 *sptep; |
666 | int need_flush = 0; | ||
667 | 678 | ||
668 | vcpu_clear_mmio_info(vcpu, gva); | 679 | vcpu_clear_mmio_info(vcpu, gva); |
669 | 680 | ||
@@ -675,36 +686,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
675 | 686 | ||
676 | sp = page_header(__pa(sptep)); | 687 | sp = page_header(__pa(sptep)); |
677 | if (is_last_spte(*sptep, level)) { | 688 | if (is_last_spte(*sptep, level)) { |
678 | int offset, shift; | ||
679 | |||
680 | if (!sp->unsync) | 689 | if (!sp->unsync) |
681 | break; | 690 | break; |
682 | 691 | ||
683 | shift = PAGE_SHIFT - | 692 | pte_gpa = FNAME(get_level1_sp_gpa)(sp); |
684 | (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level; | ||
685 | offset = sp->role.quadrant << shift; | ||
686 | |||
687 | pte_gpa = (sp->gfn << PAGE_SHIFT) + offset; | ||
688 | pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); | 693 | pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); |
689 | 694 | ||
690 | if (is_shadow_present_pte(*sptep)) { | 695 | if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) |
691 | if (is_large_pte(*sptep)) | 696 | kvm_flush_remote_tlbs(vcpu->kvm); |
692 | --vcpu->kvm->stat.lpages; | ||
693 | drop_spte(vcpu->kvm, sptep); | ||
694 | need_flush = 1; | ||
695 | } else if (is_mmio_spte(*sptep)) | ||
696 | mmu_spte_clear_no_track(sptep); | ||
697 | |||
698 | break; | ||
699 | } | 697 | } |
700 | 698 | ||
701 | if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) | 699 | if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) |
702 | break; | 700 | break; |
703 | } | 701 | } |
704 | 702 | ||
705 | if (need_flush) | ||
706 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
707 | |||
708 | atomic_inc(&vcpu->kvm->arch.invlpg_counter); | 703 | atomic_inc(&vcpu->kvm->arch.invlpg_counter); |
709 | 704 | ||
710 | spin_unlock(&vcpu->kvm->mmu_lock); | 705 | spin_unlock(&vcpu->kvm->mmu_lock); |
@@ -769,19 +764,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, | |||
769 | */ | 764 | */ |
770 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 765 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
771 | { | 766 | { |
772 | int i, offset, nr_present; | 767 | int i, nr_present = 0; |
773 | bool host_writable; | 768 | bool host_writable; |
774 | gpa_t first_pte_gpa; | 769 | gpa_t first_pte_gpa; |
775 | 770 | ||
776 | offset = nr_present = 0; | ||
777 | |||
778 | /* direct kvm_mmu_page can not be unsync. */ | 771 | /* direct kvm_mmu_page can not be unsync. */ |
779 | BUG_ON(sp->role.direct); | 772 | BUG_ON(sp->role.direct); |
780 | 773 | ||
781 | if (PTTYPE == 32) | 774 | first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); |
782 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | ||
783 | |||
784 | first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); | ||
785 | 775 | ||
786 | for (i = 0; i < PT64_ENT_PER_PAGE; i++) { | 776 | for (i = 0; i < PT64_ENT_PER_PAGE; i++) { |
787 | unsigned pte_access; | 777 | unsigned pte_access; |