aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-09-22 04:56:06 -0400
committerAvi Kivity <avi@redhat.com>2011-12-27 04:16:54 -0500
commit505aef8f30a95f7e4abf2c07e54ded1521587ba0 (patch)
tree47b4c515c5782cbb3428437b4c7c820a7956c312 /arch/x86
parentd01f8d5e02cc79998e3160f7ad545f77891b00e5 (diff)
KVM: MMU: cleanup FNAME(invlpg)
Directly Use mmu_page_zap_pte to zap spte in FNAME(invlpg), also remove the same code between FNAME(invlpg) and FNAME(sync_page) Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c16
-rw-r--r--arch/x86/kvm/paging_tmpl.h44
2 files changed, 27 insertions, 33 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b432a71a1839..d15f908649e7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1809,7 +1809,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1809 } 1809 }
1810} 1810}
1811 1811
1812static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, 1812static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1813 u64 *spte) 1813 u64 *spte)
1814{ 1814{
1815 u64 pte; 1815 u64 pte;
@@ -1817,17 +1817,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1817 1817
1818 pte = *spte; 1818 pte = *spte;
1819 if (is_shadow_present_pte(pte)) { 1819 if (is_shadow_present_pte(pte)) {
1820 if (is_last_spte(pte, sp->role.level)) 1820 if (is_last_spte(pte, sp->role.level)) {
1821 drop_spte(kvm, spte); 1821 drop_spte(kvm, spte);
1822 else { 1822 if (is_large_pte(pte))
1823 --kvm->stat.lpages;
1824 } else {
1823 child = page_header(pte & PT64_BASE_ADDR_MASK); 1825 child = page_header(pte & PT64_BASE_ADDR_MASK);
1824 drop_parent_pte(child, spte); 1826 drop_parent_pte(child, spte);
1825 } 1827 }
1826 } else if (is_mmio_spte(pte)) 1828 return true;
1829 }
1830
1831 if (is_mmio_spte(pte))
1827 mmu_spte_clear_no_track(spte); 1832 mmu_spte_clear_no_track(spte);
1828 1833
1829 if (is_large_pte(pte)) 1834 return false;
1830 --kvm->stat.lpages;
1831} 1835}
1832 1836
1833static void kvm_mmu_page_unlink_children(struct kvm *kvm, 1837static void kvm_mmu_page_unlink_children(struct kvm *kvm,
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 92994100638b..d8d3906649da 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -656,6 +656,18 @@ out_unlock:
656 return 0; 656 return 0;
657} 657}
658 658
659static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
660{
661 int offset = 0;
662
663 WARN_ON(sp->role.level != 1);
664
665 if (PTTYPE == 32)
666 offset = sp->role.quadrant << PT64_LEVEL_BITS;
667
668 return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
669}
670
659static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 671static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
660{ 672{
661 struct kvm_shadow_walk_iterator iterator; 673 struct kvm_shadow_walk_iterator iterator;
@@ -663,7 +675,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
663 gpa_t pte_gpa = -1; 675 gpa_t pte_gpa = -1;
664 int level; 676 int level;
665 u64 *sptep; 677 u64 *sptep;
666 int need_flush = 0;
667 678
668 vcpu_clear_mmio_info(vcpu, gva); 679 vcpu_clear_mmio_info(vcpu, gva);
669 680
@@ -675,36 +686,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
675 686
676 sp = page_header(__pa(sptep)); 687 sp = page_header(__pa(sptep));
677 if (is_last_spte(*sptep, level)) { 688 if (is_last_spte(*sptep, level)) {
678 int offset, shift;
679
680 if (!sp->unsync) 689 if (!sp->unsync)
681 break; 690 break;
682 691
683 shift = PAGE_SHIFT - 692 pte_gpa = FNAME(get_level1_sp_gpa)(sp);
684 (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
685 offset = sp->role.quadrant << shift;
686
687 pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
688 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); 693 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
689 694
690 if (is_shadow_present_pte(*sptep)) { 695 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
691 if (is_large_pte(*sptep)) 696 kvm_flush_remote_tlbs(vcpu->kvm);
692 --vcpu->kvm->stat.lpages;
693 drop_spte(vcpu->kvm, sptep);
694 need_flush = 1;
695 } else if (is_mmio_spte(*sptep))
696 mmu_spte_clear_no_track(sptep);
697
698 break;
699 } 697 }
700 698
701 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) 699 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
702 break; 700 break;
703 } 701 }
704 702
705 if (need_flush)
706 kvm_flush_remote_tlbs(vcpu->kvm);
707
708 atomic_inc(&vcpu->kvm->arch.invlpg_counter); 703 atomic_inc(&vcpu->kvm->arch.invlpg_counter);
709 704
710 spin_unlock(&vcpu->kvm->mmu_lock); 705 spin_unlock(&vcpu->kvm->mmu_lock);
@@ -769,19 +764,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
769 */ 764 */
770static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 765static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
771{ 766{
772 int i, offset, nr_present; 767 int i, nr_present = 0;
773 bool host_writable; 768 bool host_writable;
774 gpa_t first_pte_gpa; 769 gpa_t first_pte_gpa;
775 770
776 offset = nr_present = 0;
777
778 /* direct kvm_mmu_page can not be unsync. */ 771 /* direct kvm_mmu_page can not be unsync. */
779 BUG_ON(sp->role.direct); 772 BUG_ON(sp->role.direct);
780 773
781 if (PTTYPE == 32) 774 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
782 offset = sp->role.quadrant << PT64_LEVEL_BITS;
783
784 first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
785 775
786 for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 776 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
787 unsigned pte_access; 777 unsigned pte_access;