aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-09-23 12:18:35 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:21 -0400
commita7052897b3bcd568a9f5bfaa558957039e7e7ec0 (patch)
tree5495e806032f0fddf4de1ad4381c9b3f5d49dfbc /arch/x86/kvm
parent0ba73cdadb8ac172f396df7e23c4a9cebd59b550 (diff)
KVM: x86: trap invlpg
With pages out of sync invlpg needs to be trapped. For now simply nuke the entry. Untested on AMD. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c18
-rw-r--r--arch/x86/kvm/paging_tmpl.h25
-rw-r--r--arch/x86/kvm/svm.c13
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/kvm/x86.c1
5 files changed, 71 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9d8c4bb68a81..e89af1df4fcd 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -877,6 +877,10 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
877 return 1; 877 return 1;
878} 878}
879 879
880static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
881{
882}
883
880static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) 884static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
881{ 885{
882 unsigned index; 886 unsigned index;
@@ -1589,6 +1593,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1589 context->free = nonpaging_free; 1593 context->free = nonpaging_free;
1590 context->prefetch_page = nonpaging_prefetch_page; 1594 context->prefetch_page = nonpaging_prefetch_page;
1591 context->sync_page = nonpaging_sync_page; 1595 context->sync_page = nonpaging_sync_page;
1596 context->invlpg = nonpaging_invlpg;
1592 context->root_level = 0; 1597 context->root_level = 0;
1593 context->shadow_root_level = PT32E_ROOT_LEVEL; 1598 context->shadow_root_level = PT32E_ROOT_LEVEL;
1594 context->root_hpa = INVALID_PAGE; 1599 context->root_hpa = INVALID_PAGE;
@@ -1637,6 +1642,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1637 context->gva_to_gpa = paging64_gva_to_gpa; 1642 context->gva_to_gpa = paging64_gva_to_gpa;
1638 context->prefetch_page = paging64_prefetch_page; 1643 context->prefetch_page = paging64_prefetch_page;
1639 context->sync_page = paging64_sync_page; 1644 context->sync_page = paging64_sync_page;
1645 context->invlpg = paging64_invlpg;
1640 context->free = paging_free; 1646 context->free = paging_free;
1641 context->root_level = level; 1647 context->root_level = level;
1642 context->shadow_root_level = level; 1648 context->shadow_root_level = level;
@@ -1659,6 +1665,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
1659 context->free = paging_free; 1665 context->free = paging_free;
1660 context->prefetch_page = paging32_prefetch_page; 1666 context->prefetch_page = paging32_prefetch_page;
1661 context->sync_page = paging32_sync_page; 1667 context->sync_page = paging32_sync_page;
1668 context->invlpg = paging32_invlpg;
1662 context->root_level = PT32_ROOT_LEVEL; 1669 context->root_level = PT32_ROOT_LEVEL;
1663 context->shadow_root_level = PT32E_ROOT_LEVEL; 1670 context->shadow_root_level = PT32E_ROOT_LEVEL;
1664 context->root_hpa = INVALID_PAGE; 1671 context->root_hpa = INVALID_PAGE;
@@ -1679,6 +1686,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1679 context->free = nonpaging_free; 1686 context->free = nonpaging_free;
1680 context->prefetch_page = nonpaging_prefetch_page; 1687 context->prefetch_page = nonpaging_prefetch_page;
1681 context->sync_page = nonpaging_sync_page; 1688 context->sync_page = nonpaging_sync_page;
1689 context->invlpg = nonpaging_invlpg;
1682 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 1690 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
1683 context->root_hpa = INVALID_PAGE; 1691 context->root_hpa = INVALID_PAGE;
1684 1692
@@ -2071,6 +2079,16 @@ out:
2071} 2079}
2072EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 2080EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2073 2081
2082void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2083{
2084 spin_lock(&vcpu->kvm->mmu_lock);
2085 vcpu->arch.mmu.invlpg(vcpu, gva);
2086 spin_unlock(&vcpu->kvm->mmu_lock);
2087 kvm_mmu_flush_tlb(vcpu);
2088 ++vcpu->stat.invlpg;
2089}
2090EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2091
2074void kvm_enable_tdp(void) 2092void kvm_enable_tdp(void)
2075{ 2093{
2076 tdp_enabled = true; 2094 tdp_enabled = true;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 776fb6d2fd81..dc169e8148b1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -461,6 +461,31 @@ out_unlock:
461 return 0; 461 return 0;
462} 462}
463 463
464static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
465 struct kvm_vcpu *vcpu, u64 addr,
466 u64 *sptep, int level)
467{
468
469 if (level == PT_PAGE_TABLE_LEVEL) {
470 if (is_shadow_present_pte(*sptep))
471 rmap_remove(vcpu->kvm, sptep);
472 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
473 return 1;
474 }
475 if (!is_shadow_present_pte(*sptep))
476 return 1;
477 return 0;
478}
479
480static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
481{
482 struct shadow_walker walker = {
483 .walker = { .entry = FNAME(shadow_invlpg_entry), },
484 };
485
486 walk_shadow(&walker.walker, vcpu, gva);
487}
488
464static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 489static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
465{ 490{
466 struct guest_walker walker; 491 struct guest_walker walker;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9b54550fa4d2..9c4ce657d963 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -525,6 +525,7 @@ static void init_vmcb(struct vcpu_svm *svm)
525 (1ULL << INTERCEPT_CPUID) | 525 (1ULL << INTERCEPT_CPUID) |
526 (1ULL << INTERCEPT_INVD) | 526 (1ULL << INTERCEPT_INVD) |
527 (1ULL << INTERCEPT_HLT) | 527 (1ULL << INTERCEPT_HLT) |
528 (1ULL << INTERCEPT_INVLPG) |
528 (1ULL << INTERCEPT_INVLPGA) | 529 (1ULL << INTERCEPT_INVLPGA) |
529 (1ULL << INTERCEPT_IOIO_PROT) | 530 (1ULL << INTERCEPT_IOIO_PROT) |
530 (1ULL << INTERCEPT_MSR_PROT) | 531 (1ULL << INTERCEPT_MSR_PROT) |
@@ -589,7 +590,8 @@ static void init_vmcb(struct vcpu_svm *svm)
589 if (npt_enabled) { 590 if (npt_enabled) {
590 /* Setup VMCB for Nested Paging */ 591 /* Setup VMCB for Nested Paging */
591 control->nested_ctl = 1; 592 control->nested_ctl = 1;
592 control->intercept &= ~(1ULL << INTERCEPT_TASK_SWITCH); 593 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
594 (1ULL << INTERCEPT_INVLPG));
593 control->intercept_exceptions &= ~(1 << PF_VECTOR); 595 control->intercept_exceptions &= ~(1 << PF_VECTOR);
594 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK| 596 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
595 INTERCEPT_CR3_MASK); 597 INTERCEPT_CR3_MASK);
@@ -1164,6 +1166,13 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1164 return 1; 1166 return 1;
1165} 1167}
1166 1168
1169static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1170{
1171 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
1172 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1173 return 1;
1174}
1175
1167static int emulate_on_interception(struct vcpu_svm *svm, 1176static int emulate_on_interception(struct vcpu_svm *svm,
1168 struct kvm_run *kvm_run) 1177 struct kvm_run *kvm_run)
1169{ 1178{
@@ -1417,7 +1426,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1417 [SVM_EXIT_CPUID] = cpuid_interception, 1426 [SVM_EXIT_CPUID] = cpuid_interception,
1418 [SVM_EXIT_INVD] = emulate_on_interception, 1427 [SVM_EXIT_INVD] = emulate_on_interception,
1419 [SVM_EXIT_HLT] = halt_interception, 1428 [SVM_EXIT_HLT] = halt_interception,
1420 [SVM_EXIT_INVLPG] = emulate_on_interception, 1429 [SVM_EXIT_INVLPG] = invlpg_interception,
1421 [SVM_EXIT_INVLPGA] = invalid_op_interception, 1430 [SVM_EXIT_INVLPGA] = invalid_op_interception,
1422 [SVM_EXIT_IOIO] = io_interception, 1431 [SVM_EXIT_IOIO] = io_interception,
1423 [SVM_EXIT_MSR] = msr_interception, 1432 [SVM_EXIT_MSR] = msr_interception,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 025bf4011abc..4556cc3715bb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1130,7 +1130,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1130 CPU_BASED_CR3_STORE_EXITING | 1130 CPU_BASED_CR3_STORE_EXITING |
1131 CPU_BASED_USE_IO_BITMAPS | 1131 CPU_BASED_USE_IO_BITMAPS |
1132 CPU_BASED_MOV_DR_EXITING | 1132 CPU_BASED_MOV_DR_EXITING |
1133 CPU_BASED_USE_TSC_OFFSETING; 1133 CPU_BASED_USE_TSC_OFFSETING |
1134 CPU_BASED_INVLPG_EXITING;
1134 opt = CPU_BASED_TPR_SHADOW | 1135 opt = CPU_BASED_TPR_SHADOW |
1135 CPU_BASED_USE_MSR_BITMAPS | 1136 CPU_BASED_USE_MSR_BITMAPS |
1136 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 1137 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -1159,9 +1160,11 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1159 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; 1160 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1160#endif 1161#endif
1161 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 1162 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
1162 /* CR3 accesses don't need to cause VM Exits when EPT enabled */ 1163 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1164 enabled */
1163 min &= ~(CPU_BASED_CR3_LOAD_EXITING | 1165 min &= ~(CPU_BASED_CR3_LOAD_EXITING |
1164 CPU_BASED_CR3_STORE_EXITING); 1166 CPU_BASED_CR3_STORE_EXITING |
1167 CPU_BASED_INVLPG_EXITING);
1165 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 1168 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1166 &_cpu_based_exec_control) < 0) 1169 &_cpu_based_exec_control) < 0)
1167 return -EIO; 1170 return -EIO;
@@ -2790,6 +2793,15 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2790 return 1; 2793 return 1;
2791} 2794}
2792 2795
2796static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2797{
2798 u64 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
2799
2800 kvm_mmu_invlpg(vcpu, exit_qualification);
2801 skip_emulated_instruction(vcpu);
2802 return 1;
2803}
2804
2793static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2805static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2794{ 2806{
2795 skip_emulated_instruction(vcpu); 2807 skip_emulated_instruction(vcpu);
@@ -2958,6 +2970,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2958 [EXIT_REASON_MSR_WRITE] = handle_wrmsr, 2970 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
2959 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, 2971 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
2960 [EXIT_REASON_HLT] = handle_halt, 2972 [EXIT_REASON_HLT] = handle_halt,
2973 [EXIT_REASON_INVLPG] = handle_invlpg,
2961 [EXIT_REASON_VMCALL] = handle_vmcall, 2974 [EXIT_REASON_VMCALL] = handle_vmcall,
2962 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 2975 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
2963 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 2976 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 88e6d9abbd2b..efee85ba07e5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2341,6 +2341,7 @@ static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2341 2341
2342int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) 2342int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2343{ 2343{
2344 kvm_mmu_invlpg(vcpu, address);
2344 return X86EMUL_CONTINUE; 2345 return X86EMUL_CONTINUE;
2345} 2346}
2346 2347