diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-09-23 12:18:33 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-10-15 08:25:19 -0400 |
commit | e8bc217aef67d41d767ede6e7a7eb10f1d47c86c (patch) | |
tree | f58c99dbf212d25d2fa8145ce46bc7ed5904cb07 /arch | |
parent | 38187c830cab84daecb41169948467f1f19317e3 (diff) |
KVM: MMU: mode specific sync_page
Examine guest pagetable and bring the shadow back in sync. Caller is responsible
for local TLB flush before re-entering guest mode.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 54 |
2 files changed, 64 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 731e6fe9cb07..90f01169c8f0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -871,6 +871,12 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, | |||
871 | sp->spt[i] = shadow_trap_nonpresent_pte; | 871 | sp->spt[i] = shadow_trap_nonpresent_pte; |
872 | } | 872 | } |
873 | 873 | ||
874 | static int nonpaging_sync_page(struct kvm_vcpu *vcpu, | ||
875 | struct kvm_mmu_page *sp) | ||
876 | { | ||
877 | return 1; | ||
878 | } | ||
879 | |||
874 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | 880 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) |
875 | { | 881 | { |
876 | unsigned index; | 882 | unsigned index; |
@@ -1547,6 +1553,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu) | |||
1547 | context->gva_to_gpa = nonpaging_gva_to_gpa; | 1553 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
1548 | context->free = nonpaging_free; | 1554 | context->free = nonpaging_free; |
1549 | context->prefetch_page = nonpaging_prefetch_page; | 1555 | context->prefetch_page = nonpaging_prefetch_page; |
1556 | context->sync_page = nonpaging_sync_page; | ||
1550 | context->root_level = 0; | 1557 | context->root_level = 0; |
1551 | context->shadow_root_level = PT32E_ROOT_LEVEL; | 1558 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
1552 | context->root_hpa = INVALID_PAGE; | 1559 | context->root_hpa = INVALID_PAGE; |
@@ -1594,6 +1601,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) | |||
1594 | context->page_fault = paging64_page_fault; | 1601 | context->page_fault = paging64_page_fault; |
1595 | context->gva_to_gpa = paging64_gva_to_gpa; | 1602 | context->gva_to_gpa = paging64_gva_to_gpa; |
1596 | context->prefetch_page = paging64_prefetch_page; | 1603 | context->prefetch_page = paging64_prefetch_page; |
1604 | context->sync_page = paging64_sync_page; | ||
1597 | context->free = paging_free; | 1605 | context->free = paging_free; |
1598 | context->root_level = level; | 1606 | context->root_level = level; |
1599 | context->shadow_root_level = level; | 1607 | context->shadow_root_level = level; |
@@ -1615,6 +1623,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu) | |||
1615 | context->gva_to_gpa = paging32_gva_to_gpa; | 1623 | context->gva_to_gpa = paging32_gva_to_gpa; |
1616 | context->free = paging_free; | 1624 | context->free = paging_free; |
1617 | context->prefetch_page = paging32_prefetch_page; | 1625 | context->prefetch_page = paging32_prefetch_page; |
1626 | context->sync_page = paging32_sync_page; | ||
1618 | context->root_level = PT32_ROOT_LEVEL; | 1627 | context->root_level = PT32_ROOT_LEVEL; |
1619 | context->shadow_root_level = PT32E_ROOT_LEVEL; | 1628 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
1620 | context->root_hpa = INVALID_PAGE; | 1629 | context->root_hpa = INVALID_PAGE; |
@@ -1634,6 +1643,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) | |||
1634 | context->page_fault = tdp_page_fault; | 1643 | context->page_fault = tdp_page_fault; |
1635 | context->free = nonpaging_free; | 1644 | context->free = nonpaging_free; |
1636 | context->prefetch_page = nonpaging_prefetch_page; | 1645 | context->prefetch_page = nonpaging_prefetch_page; |
1646 | context->sync_page = nonpaging_sync_page; | ||
1637 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); | 1647 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); |
1638 | context->root_hpa = INVALID_PAGE; | 1648 | context->root_hpa = INVALID_PAGE; |
1639 | 1649 | ||
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index e9fbaa44d444..776fb6d2fd81 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -507,6 +507,60 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
507 | } | 507 | } |
508 | } | 508 | } |
509 | 509 | ||
510 | /* | ||
511 | * Using the cached information from sp->gfns is safe because: | ||
512 | * - The spte has a reference to the struct page, so the pfn for a given gfn | ||
513 | * can't change unless all sptes pointing to it are nuked first. | ||
514 | * - Alias changes zap the entire shadow cache. | ||
515 | */ | ||
516 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | ||
517 | { | ||
518 | int i, offset, nr_present; | ||
519 | |||
520 | offset = nr_present = 0; | ||
521 | |||
522 | if (PTTYPE == 32) | ||
523 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | ||
524 | |||
525 | for (i = 0; i < PT64_ENT_PER_PAGE; i++) { | ||
526 | unsigned pte_access; | ||
527 | pt_element_t gpte; | ||
528 | gpa_t pte_gpa; | ||
529 | gfn_t gfn = sp->gfns[i]; | ||
530 | |||
531 | if (!is_shadow_present_pte(sp->spt[i])) | ||
532 | continue; | ||
533 | |||
534 | pte_gpa = gfn_to_gpa(sp->gfn); | ||
535 | pte_gpa += (i+offset) * sizeof(pt_element_t); | ||
536 | |||
537 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, | ||
538 | sizeof(pt_element_t))) | ||
539 | return -EINVAL; | ||
540 | |||
541 | if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) || | ||
542 | !(gpte & PT_ACCESSED_MASK)) { | ||
543 | u64 nonpresent; | ||
544 | |||
545 | rmap_remove(vcpu->kvm, &sp->spt[i]); | ||
546 | if (is_present_pte(gpte)) | ||
547 | nonpresent = shadow_trap_nonpresent_pte; | ||
548 | else | ||
549 | nonpresent = shadow_notrap_nonpresent_pte; | ||
550 | set_shadow_pte(&sp->spt[i], nonpresent); | ||
551 | continue; | ||
552 | } | ||
553 | |||
554 | nr_present++; | ||
555 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); | ||
556 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, | ||
557 | is_dirty_pte(gpte), 0, gfn, | ||
558 | spte_to_pfn(sp->spt[i]), true); | ||
559 | } | ||
560 | |||
561 | return !nr_present; | ||
562 | } | ||
563 | |||
510 | #undef pt_element_t | 564 | #undef pt_element_t |
511 | #undef guest_walker | 565 | #undef guest_walker |
512 | #undef shadow_walker | 566 | #undef shadow_walker |