aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-09-23 12:18:35 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:21 -0400
commita7052897b3bcd568a9f5bfaa558957039e7e7ec0 (patch)
tree5495e806032f0fddf4de1ad4381c9b3f5d49dfbc /arch/x86/kvm/mmu.c
parent0ba73cdadb8ac172f396df7e23c4a9cebd59b550 (diff)
KVM: x86: trap invlpg
With pages out of sync invlpg needs to be trapped. For now simply nuke the entry. Untested on AMD. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9d8c4bb68a81..e89af1df4fcd 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -877,6 +877,10 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
877 return 1; 877 return 1;
878} 878}
879 879
880static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
881{
882}
883
880static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) 884static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
881{ 885{
882 unsigned index; 886 unsigned index;
@@ -1589,6 +1593,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1589 context->free = nonpaging_free; 1593 context->free = nonpaging_free;
1590 context->prefetch_page = nonpaging_prefetch_page; 1594 context->prefetch_page = nonpaging_prefetch_page;
1591 context->sync_page = nonpaging_sync_page; 1595 context->sync_page = nonpaging_sync_page;
1596 context->invlpg = nonpaging_invlpg;
1592 context->root_level = 0; 1597 context->root_level = 0;
1593 context->shadow_root_level = PT32E_ROOT_LEVEL; 1598 context->shadow_root_level = PT32E_ROOT_LEVEL;
1594 context->root_hpa = INVALID_PAGE; 1599 context->root_hpa = INVALID_PAGE;
@@ -1637,6 +1642,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1637 context->gva_to_gpa = paging64_gva_to_gpa; 1642 context->gva_to_gpa = paging64_gva_to_gpa;
1638 context->prefetch_page = paging64_prefetch_page; 1643 context->prefetch_page = paging64_prefetch_page;
1639 context->sync_page = paging64_sync_page; 1644 context->sync_page = paging64_sync_page;
1645 context->invlpg = paging64_invlpg;
1640 context->free = paging_free; 1646 context->free = paging_free;
1641 context->root_level = level; 1647 context->root_level = level;
1642 context->shadow_root_level = level; 1648 context->shadow_root_level = level;
@@ -1659,6 +1665,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
1659 context->free = paging_free; 1665 context->free = paging_free;
1660 context->prefetch_page = paging32_prefetch_page; 1666 context->prefetch_page = paging32_prefetch_page;
1661 context->sync_page = paging32_sync_page; 1667 context->sync_page = paging32_sync_page;
1668 context->invlpg = paging32_invlpg;
1662 context->root_level = PT32_ROOT_LEVEL; 1669 context->root_level = PT32_ROOT_LEVEL;
1663 context->shadow_root_level = PT32E_ROOT_LEVEL; 1670 context->shadow_root_level = PT32E_ROOT_LEVEL;
1664 context->root_hpa = INVALID_PAGE; 1671 context->root_hpa = INVALID_PAGE;
@@ -1679,6 +1686,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1679 context->free = nonpaging_free; 1686 context->free = nonpaging_free;
1680 context->prefetch_page = nonpaging_prefetch_page; 1687 context->prefetch_page = nonpaging_prefetch_page;
1681 context->sync_page = nonpaging_sync_page; 1688 context->sync_page = nonpaging_sync_page;
1689 context->invlpg = nonpaging_invlpg;
1682 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 1690 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
1683 context->root_hpa = INVALID_PAGE; 1691 context->root_hpa = INVALID_PAGE;
1684 1692
@@ -2071,6 +2079,16 @@ out:
2071} 2079}
2072EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 2080EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2073 2081
2082void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2083{
2084 spin_lock(&vcpu->kvm->mmu_lock);
2085 vcpu->arch.mmu.invlpg(vcpu, gva);
2086 spin_unlock(&vcpu->kvm->mmu_lock);
2087 kvm_mmu_flush_tlb(vcpu);
2088 ++vcpu->stat.invlpg;
2089}
2090EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2091
2074void kvm_enable_tdp(void) 2092void kvm_enable_tdp(void)
2075{ 2093{
2076 tdp_enabled = true; 2094 tdp_enabled = true;