aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-11-22 22:08:42 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:49 -0500
commit407c61c6bd6a51b56d02f8bbad8aadf19db8c7b5 (patch)
tree47644adc7c216a453b215320d05ce4ee73fb3ea0 /arch/x86/kvm/paging_tmpl.h
parenta4a8e6f76ecf963fa7e4d74b3635655a2033a27b (diff)
KVM: MMU: abstract invalid guest pte mapping
Introduce a common function to map invalid gpte Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h71
1 files changed, 37 insertions, 34 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 60f00dbe327a..a43f4ccd30bb 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -299,25 +299,42 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker,
299 addr, access); 299 addr, access);
300} 300}
301 301
302static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
303 struct kvm_mmu_page *sp, u64 *spte,
304 pt_element_t gpte)
305{
306 u64 nonpresent = shadow_trap_nonpresent_pte;
307
308 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
309 goto no_present;
310
311 if (!is_present_gpte(gpte)) {
312 if (!sp->unsync)
313 nonpresent = shadow_notrap_nonpresent_pte;
314 goto no_present;
315 }
316
317 if (!(gpte & PT_ACCESSED_MASK))
318 goto no_present;
319
320 return false;
321
322no_present:
323 drop_spte(vcpu->kvm, spte, nonpresent);
324 return true;
325}
326
302static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 327static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
303 u64 *spte, const void *pte) 328 u64 *spte, const void *pte)
304{ 329{
305 pt_element_t gpte; 330 pt_element_t gpte;
306 unsigned pte_access; 331 unsigned pte_access;
307 pfn_t pfn; 332 pfn_t pfn;
308 u64 new_spte;
309 333
310 gpte = *(const pt_element_t *)pte; 334 gpte = *(const pt_element_t *)pte;
311 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 335 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
312 if (!is_present_gpte(gpte)) {
313 if (sp->unsync)
314 new_spte = shadow_trap_nonpresent_pte;
315 else
316 new_spte = shadow_notrap_nonpresent_pte;
317 __set_spte(spte, new_spte);
318 }
319 return; 336 return;
320 } 337
321 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 338 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
322 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 339 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
323 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn) 340 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
@@ -364,7 +381,6 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
364 u64 *sptep) 381 u64 *sptep)
365{ 382{
366 struct kvm_mmu_page *sp; 383 struct kvm_mmu_page *sp;
367 struct kvm_mmu *mmu = &vcpu->arch.mmu;
368 pt_element_t *gptep = gw->prefetch_ptes; 384 pt_element_t *gptep = gw->prefetch_ptes;
369 u64 *spte; 385 u64 *spte;
370 int i; 386 int i;
@@ -395,16 +411,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
395 411
396 gpte = gptep[i]; 412 gpte = gptep[i];
397 413
398 if (is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) 414 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
399 continue;
400
401 if (!is_present_gpte(gpte)) {
402 if (!sp->unsync)
403 __set_spte(spte, shadow_notrap_nonpresent_pte);
404 continue;
405 }
406
407 if (!(gpte & PT_ACCESSED_MASK))
408 continue; 415 continue;
409 416
410 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 417 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
@@ -761,7 +768,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
761 pt_element_t gpte; 768 pt_element_t gpte;
762 gpa_t pte_gpa; 769 gpa_t pte_gpa;
763 gfn_t gfn; 770 gfn_t gfn;
764 bool rsvd_bits_set;
765 771
766 if (!is_shadow_present_pte(sp->spt[i])) 772 if (!is_shadow_present_pte(sp->spt[i]))
767 continue; 773 continue;
@@ -773,18 +779,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
773 return -EINVAL; 779 return -EINVAL;
774 780
775 gfn = gpte_to_gfn(gpte); 781 gfn = gpte_to_gfn(gpte);
776 rsvd_bits_set = is_rsvd_bits_set(&vcpu->arch.mmu, gpte, 782
777 PT_PAGE_TABLE_LEVEL); 783 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
778 if (rsvd_bits_set || gfn != sp->gfns[i] || 784 kvm_flush_remote_tlbs(vcpu->kvm);
779 !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) { 785 continue;
780 u64 nonpresent; 786 }
781 787
782 if (rsvd_bits_set || is_present_gpte(gpte) || 788 if (gfn != sp->gfns[i]) {
783 sp->unsync) 789 drop_spte(vcpu->kvm, &sp->spt[i],
784 nonpresent = shadow_trap_nonpresent_pte; 790 shadow_trap_nonpresent_pte);
785 else
786 nonpresent = shadow_notrap_nonpresent_pte;
787 drop_spte(vcpu->kvm, &sp->spt[i], nonpresent);
788 kvm_flush_remote_tlbs(vcpu->kvm); 791 kvm_flush_remote_tlbs(vcpu->kvm);
789 continue; 792 continue;
790 } 793 }