summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2019-06-24 07:06:21 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2019-07-05 07:48:46 -0400
commit3fcf2d1bdeb6a513523cb2c77012a6b047aa859c (patch)
treee47b758e480016d9d4bc4b69409e3ee4342a3207 /arch/x86
parent43fdcda96e2550c6d1c46fb8a78801aa2f7276ed (diff)
KVM: x86: make FNAME(fetch) and __direct_map more similar
These two functions are basically doing the same thing through kvm_mmu_get_page, link_shadow_page and mmu_set_spte; yet, for historical reasons, their code looks very different. This patch tries to take the best of each and make them very similar, so that it is easy to understand changes that apply to both of them. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c53
-rw-r--r--arch/x86/kvm/paging_tmpl.h30
2 files changed, 39 insertions, 44 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6fc5c389f5a1..af9dafa54f85 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3181,40 +3181,39 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
3181 __direct_pte_prefetch(vcpu, sp, sptep); 3181 __direct_pte_prefetch(vcpu, sp, sptep);
3182} 3182}
3183 3183
3184static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, 3184static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
3185 int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault) 3185 int map_writable, int level, kvm_pfn_t pfn,
3186 bool prefault)
3186{ 3187{
3187 struct kvm_shadow_walk_iterator iterator; 3188 struct kvm_shadow_walk_iterator it;
3188 struct kvm_mmu_page *sp; 3189 struct kvm_mmu_page *sp;
3189 int emulate = 0; 3190 int ret;
3190 gfn_t pseudo_gfn; 3191 gfn_t gfn = gpa >> PAGE_SHIFT;
3192 gfn_t base_gfn = gfn;
3191 3193
3192 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) 3194 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3193 return 0; 3195 return RET_PF_RETRY;
3194 3196
3195 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { 3197 for_each_shadow_entry(vcpu, gpa, it) {
3196 if (iterator.level == level) { 3198 base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
3197 emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, 3199 if (it.level == level)
3198 write, level, gfn, pfn, prefault,
3199 map_writable);
3200 direct_pte_prefetch(vcpu, iterator.sptep);
3201 ++vcpu->stat.pf_fixed;
3202 break; 3200 break;
3203 }
3204 3201
3205 drop_large_spte(vcpu, iterator.sptep); 3202 drop_large_spte(vcpu, it.sptep);
3206 if (!is_shadow_present_pte(*iterator.sptep)) { 3203 if (!is_shadow_present_pte(*it.sptep)) {
3207 u64 base_addr = iterator.addr; 3204 sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
3205 it.level - 1, true, ACC_ALL);
3208 3206
3209 base_addr &= PT64_LVL_ADDR_MASK(iterator.level); 3207 link_shadow_page(vcpu, it.sptep, sp);
3210 pseudo_gfn = base_addr >> PAGE_SHIFT;
3211 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
3212 iterator.level - 1, 1, ACC_ALL);
3213
3214 link_shadow_page(vcpu, iterator.sptep, sp);
3215 } 3208 }
3216 } 3209 }
3217 return emulate; 3210
3211 ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
3212 write, level, base_gfn, pfn, prefault,
3213 map_writable);
3214 direct_pte_prefetch(vcpu, it.sptep);
3215 ++vcpu->stat.pf_fixed;
3216 return ret;
3218} 3217}
3219 3218
3220static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) 3219static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
@@ -3538,8 +3537,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
3538 goto out_unlock; 3537 goto out_unlock;
3539 if (likely(!force_pt_level)) 3538 if (likely(!force_pt_level))
3540 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); 3539 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
3541 r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); 3540 r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
3542
3543out_unlock: 3541out_unlock:
3544 spin_unlock(&vcpu->kvm->mmu_lock); 3542 spin_unlock(&vcpu->kvm->mmu_lock);
3545 kvm_release_pfn_clean(pfn); 3543 kvm_release_pfn_clean(pfn);
@@ -4165,8 +4163,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
4165 goto out_unlock; 4163 goto out_unlock;
4166 if (likely(!force_pt_level)) 4164 if (likely(!force_pt_level))
4167 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); 4165 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
4168 r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); 4166 r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
4169
4170out_unlock: 4167out_unlock:
4171 spin_unlock(&vcpu->kvm->mmu_lock); 4168 spin_unlock(&vcpu->kvm->mmu_lock);
4172 kvm_release_pfn_clean(pfn); 4169 kvm_release_pfn_clean(pfn);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 2db96401178e..bfd89966832b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -623,6 +623,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
623 struct kvm_shadow_walk_iterator it; 623 struct kvm_shadow_walk_iterator it;
624 unsigned direct_access, access = gw->pt_access; 624 unsigned direct_access, access = gw->pt_access;
625 int top_level, ret; 625 int top_level, ret;
626 gfn_t base_gfn;
626 627
627 direct_access = gw->pte_access; 628 direct_access = gw->pte_access;
628 629
@@ -667,31 +668,29 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
667 link_shadow_page(vcpu, it.sptep, sp); 668 link_shadow_page(vcpu, it.sptep, sp);
668 } 669 }
669 670
670 for (; 671 base_gfn = gw->gfn;
671 shadow_walk_okay(&it) && it.level > hlevel;
672 shadow_walk_next(&it)) {
673 gfn_t direct_gfn;
674 672
673 for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
675 clear_sp_write_flooding_count(it.sptep); 674 clear_sp_write_flooding_count(it.sptep);
675 base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
676 if (it.level == hlevel)
677 break;
678
676 validate_direct_spte(vcpu, it.sptep, direct_access); 679 validate_direct_spte(vcpu, it.sptep, direct_access);
677 680
678 drop_large_spte(vcpu, it.sptep); 681 drop_large_spte(vcpu, it.sptep);
679 682
680 if (is_shadow_present_pte(*it.sptep)) 683 if (!is_shadow_present_pte(*it.sptep)) {
681 continue; 684 sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
682 685 it.level - 1, true, direct_access);
683 direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); 686 link_shadow_page(vcpu, it.sptep, sp);
684 687 }
685 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
686 true, direct_access);
687 link_shadow_page(vcpu, it.sptep, sp);
688 } 688 }
689 689
690 clear_sp_write_flooding_count(it.sptep);
691 ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, 690 ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
692 it.level, gw->gfn, pfn, prefault, map_writable); 691 it.level, base_gfn, pfn, prefault, map_writable);
693 FNAME(pte_prefetch)(vcpu, gw, it.sptep); 692 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
694 693 ++vcpu->stat.pf_fixed;
695 return ret; 694 return ret;
696 695
697out_gpte_changed: 696out_gpte_changed:
@@ -854,7 +853,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
854 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); 853 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
855 r = FNAME(fetch)(vcpu, addr, &walker, write_fault, 854 r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
856 level, pfn, map_writable, prefault); 855 level, pfn, map_writable, prefault);
857 ++vcpu->stat.pf_fixed;
858 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); 856 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
859 857
860out_unlock: 858out_unlock: