diff options
-rw-r--r-- | arch/x86/kvm/mmu.c | 36 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 1 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 1 |
3 files changed, 38 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 90f01169c8f0..9d8c4bb68a81 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1471,6 +1471,41 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1471 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); | 1471 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
1472 | } | 1472 | } |
1473 | 1473 | ||
1474 | static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | ||
1475 | { | ||
1476 | } | ||
1477 | |||
1478 | static void mmu_sync_roots(struct kvm_vcpu *vcpu) | ||
1479 | { | ||
1480 | int i; | ||
1481 | struct kvm_mmu_page *sp; | ||
1482 | |||
1483 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | ||
1484 | return; | ||
1485 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { | ||
1486 | hpa_t root = vcpu->arch.mmu.root_hpa; | ||
1487 | sp = page_header(root); | ||
1488 | mmu_sync_children(vcpu, sp); | ||
1489 | return; | ||
1490 | } | ||
1491 | for (i = 0; i < 4; ++i) { | ||
1492 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | ||
1493 | |||
1494 | if (root) { | ||
1495 | root &= PT64_BASE_ADDR_MASK; | ||
1496 | sp = page_header(root); | ||
1497 | mmu_sync_children(vcpu, sp); | ||
1498 | } | ||
1499 | } | ||
1500 | } | ||
1501 | |||
1502 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) | ||
1503 | { | ||
1504 | spin_lock(&vcpu->kvm->mmu_lock); | ||
1505 | mmu_sync_roots(vcpu); | ||
1506 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
1507 | } | ||
1508 | |||
1474 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) | 1509 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) |
1475 | { | 1510 | { |
1476 | return vaddr; | 1511 | return vaddr; |
@@ -1715,6 +1750,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) | |||
1715 | spin_lock(&vcpu->kvm->mmu_lock); | 1750 | spin_lock(&vcpu->kvm->mmu_lock); |
1716 | kvm_mmu_free_some_pages(vcpu); | 1751 | kvm_mmu_free_some_pages(vcpu); |
1717 | mmu_alloc_roots(vcpu); | 1752 | mmu_alloc_roots(vcpu); |
1753 | mmu_sync_roots(vcpu); | ||
1718 | spin_unlock(&vcpu->kvm->mmu_lock); | 1754 | spin_unlock(&vcpu->kvm->mmu_lock); |
1719 | kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); | 1755 | kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); |
1720 | kvm_mmu_flush_tlb(vcpu); | 1756 | kvm_mmu_flush_tlb(vcpu); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 08edeabf15e6..88e6d9abbd2b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -594,6 +594,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); | |||
594 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 594 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
595 | { | 595 | { |
596 | if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { | 596 | if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { |
597 | kvm_mmu_sync_roots(vcpu); | ||
597 | kvm_mmu_flush_tlb(vcpu); | 598 | kvm_mmu_flush_tlb(vcpu); |
598 | return; | 599 | return; |
599 | } | 600 | } |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 8bad9bd9b37e..475d8ab83bff 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -584,6 +584,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | |||
584 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | 584 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
585 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 585 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
586 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 586 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
587 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | ||
587 | 588 | ||
588 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 589 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
589 | 590 | ||