diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-09-23 12:18:33 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-10-15 08:25:19 -0400 |
commit | e8bc217aef67d41d767ede6e7a7eb10f1d47c86c (patch) | |
tree | f58c99dbf212d25d2fa8145ce46bc7ed5904cb07 /arch/x86/kvm/paging_tmpl.h | |
parent | 38187c830cab84daecb41169948467f1f19317e3 (diff) |
KVM: MMU: mode specific sync_page
Examine guest pagetable and bring the shadow back in sync. Caller is responsible
for local TLB flush before re-entering guest mode.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index e9fbaa44d44..776fb6d2fd8 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -507,6 +507,60 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
507 | } | 507 | } |
508 | } | 508 | } |
509 | 509 | ||
510 | /* | ||
511 | * Using the cached information from sp->gfns is safe because: | ||
512 | * - The spte has a reference to the struct page, so the pfn for a given gfn | ||
513 | * can't change unless all sptes pointing to it are nuked first. | ||
514 | * - Alias changes zap the entire shadow cache. | ||
515 | */ | ||
516 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | ||
517 | { | ||
518 | int i, offset, nr_present; | ||
519 | |||
520 | offset = nr_present = 0; | ||
521 | |||
522 | if (PTTYPE == 32) | ||
523 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | ||
524 | |||
525 | for (i = 0; i < PT64_ENT_PER_PAGE; i++) { | ||
526 | unsigned pte_access; | ||
527 | pt_element_t gpte; | ||
528 | gpa_t pte_gpa; | ||
529 | gfn_t gfn = sp->gfns[i]; | ||
530 | |||
531 | if (!is_shadow_present_pte(sp->spt[i])) | ||
532 | continue; | ||
533 | |||
534 | pte_gpa = gfn_to_gpa(sp->gfn); | ||
535 | pte_gpa += (i+offset) * sizeof(pt_element_t); | ||
536 | |||
537 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, | ||
538 | sizeof(pt_element_t))) | ||
539 | return -EINVAL; | ||
540 | |||
541 | if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) || | ||
542 | !(gpte & PT_ACCESSED_MASK)) { | ||
543 | u64 nonpresent; | ||
544 | |||
545 | rmap_remove(vcpu->kvm, &sp->spt[i]); | ||
546 | if (is_present_pte(gpte)) | ||
547 | nonpresent = shadow_trap_nonpresent_pte; | ||
548 | else | ||
549 | nonpresent = shadow_notrap_nonpresent_pte; | ||
550 | set_shadow_pte(&sp->spt[i], nonpresent); | ||
551 | continue; | ||
552 | } | ||
553 | |||
554 | nr_present++; | ||
555 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); | ||
556 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, | ||
557 | is_dirty_pte(gpte), 0, gfn, | ||
558 | spte_to_pfn(sp->spt[i]), true); | ||
559 | } | ||
560 | |||
561 | return !nr_present; | ||
562 | } | ||
563 | |||
510 | #undef pt_element_t | 564 | #undef pt_element_t |
511 | #undef guest_walker | 565 | #undef guest_walker |
512 | #undef shadow_walker | 566 | #undef shadow_walker |