diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 712 |
1 files changed, 488 insertions, 224 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7041cc52b56..2643b430d83 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/moduleparam.h> | 28 | #include <linux/moduleparam.h> |
29 | #include "kvm_cache_regs.h" | ||
30 | #include "x86.h" | ||
29 | 31 | ||
30 | #include <asm/io.h> | 32 | #include <asm/io.h> |
31 | #include <asm/desc.h> | 33 | #include <asm/desc.h> |
@@ -47,6 +49,9 @@ module_param(flexpriority_enabled, bool, 0); | |||
47 | static int enable_ept = 1; | 49 | static int enable_ept = 1; |
48 | module_param(enable_ept, bool, 0); | 50 | module_param(enable_ept, bool, 0); |
49 | 51 | ||
52 | static int emulate_invalid_guest_state = 0; | ||
53 | module_param(emulate_invalid_guest_state, bool, 0); | ||
54 | |||
50 | struct vmcs { | 55 | struct vmcs { |
51 | u32 revision_id; | 56 | u32 revision_id; |
52 | u32 abort; | 57 | u32 abort; |
@@ -56,6 +61,7 @@ struct vmcs { | |||
56 | struct vcpu_vmx { | 61 | struct vcpu_vmx { |
57 | struct kvm_vcpu vcpu; | 62 | struct kvm_vcpu vcpu; |
58 | struct list_head local_vcpus_link; | 63 | struct list_head local_vcpus_link; |
64 | unsigned long host_rsp; | ||
59 | int launched; | 65 | int launched; |
60 | u8 fail; | 66 | u8 fail; |
61 | u32 idt_vectoring_info; | 67 | u32 idt_vectoring_info; |
@@ -83,6 +89,7 @@ struct vcpu_vmx { | |||
83 | } irq; | 89 | } irq; |
84 | } rmode; | 90 | } rmode; |
85 | int vpid; | 91 | int vpid; |
92 | bool emulation_required; | ||
86 | }; | 93 | }; |
87 | 94 | ||
88 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | 95 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) |
@@ -468,7 +475,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
468 | if (!vcpu->fpu_active) | 475 | if (!vcpu->fpu_active) |
469 | eb |= 1u << NM_VECTOR; | 476 | eb |= 1u << NM_VECTOR; |
470 | if (vcpu->guest_debug.enabled) | 477 | if (vcpu->guest_debug.enabled) |
471 | eb |= 1u << 1; | 478 | eb |= 1u << DB_VECTOR; |
472 | if (vcpu->arch.rmode.active) | 479 | if (vcpu->arch.rmode.active) |
473 | eb = ~0; | 480 | eb = ~0; |
474 | if (vm_need_ept()) | 481 | if (vm_need_ept()) |
@@ -715,9 +722,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
715 | unsigned long rip; | 722 | unsigned long rip; |
716 | u32 interruptibility; | 723 | u32 interruptibility; |
717 | 724 | ||
718 | rip = vmcs_readl(GUEST_RIP); | 725 | rip = kvm_rip_read(vcpu); |
719 | rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | 726 | rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
720 | vmcs_writel(GUEST_RIP, rip); | 727 | kvm_rip_write(vcpu, rip); |
721 | 728 | ||
722 | /* | 729 | /* |
723 | * We emulated an instruction, so temporary interrupt blocking | 730 | * We emulated an instruction, so temporary interrupt blocking |
@@ -733,19 +740,35 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
733 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | 740 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, |
734 | bool has_error_code, u32 error_code) | 741 | bool has_error_code, u32 error_code) |
735 | { | 742 | { |
743 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
744 | |||
745 | if (has_error_code) | ||
746 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); | ||
747 | |||
748 | if (vcpu->arch.rmode.active) { | ||
749 | vmx->rmode.irq.pending = true; | ||
750 | vmx->rmode.irq.vector = nr; | ||
751 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | ||
752 | if (nr == BP_VECTOR) | ||
753 | vmx->rmode.irq.rip++; | ||
754 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
755 | nr | INTR_TYPE_SOFT_INTR | ||
756 | | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) | ||
757 | | INTR_INFO_VALID_MASK); | ||
758 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | ||
759 | kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); | ||
760 | return; | ||
761 | } | ||
762 | |||
736 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 763 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
737 | nr | INTR_TYPE_EXCEPTION | 764 | nr | INTR_TYPE_EXCEPTION |
738 | | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) | 765 | | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) |
739 | | INTR_INFO_VALID_MASK); | 766 | | INTR_INFO_VALID_MASK); |
740 | if (has_error_code) | ||
741 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); | ||
742 | } | 767 | } |
743 | 768 | ||
744 | static bool vmx_exception_injected(struct kvm_vcpu *vcpu) | 769 | static bool vmx_exception_injected(struct kvm_vcpu *vcpu) |
745 | { | 770 | { |
746 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 771 | return false; |
747 | |||
748 | return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); | ||
749 | } | 772 | } |
750 | 773 | ||
751 | /* | 774 | /* |
@@ -947,24 +970,19 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
947 | return ret; | 970 | return ret; |
948 | } | 971 | } |
949 | 972 | ||
950 | /* | 973 | static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) |
951 | * Sync the rsp and rip registers into the vcpu structure. This allows | ||
952 | * registers to be accessed by indexing vcpu->arch.regs. | ||
953 | */ | ||
954 | static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu) | ||
955 | { | ||
956 | vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); | ||
957 | vcpu->arch.rip = vmcs_readl(GUEST_RIP); | ||
958 | } | ||
959 | |||
960 | /* | ||
961 | * Syncs rsp and rip back into the vmcs. Should be called after possible | ||
962 | * modification. | ||
963 | */ | ||
964 | static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu) | ||
965 | { | 974 | { |
966 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | 975 | __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); |
967 | vmcs_writel(GUEST_RIP, vcpu->arch.rip); | 976 | switch (reg) { |
977 | case VCPU_REGS_RSP: | ||
978 | vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); | ||
979 | break; | ||
980 | case VCPU_REGS_RIP: | ||
981 | vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); | ||
982 | break; | ||
983 | default: | ||
984 | break; | ||
985 | } | ||
968 | } | 986 | } |
969 | 987 | ||
970 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | 988 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) |
@@ -1007,17 +1025,9 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | |||
1007 | 1025 | ||
1008 | static int vmx_get_irq(struct kvm_vcpu *vcpu) | 1026 | static int vmx_get_irq(struct kvm_vcpu *vcpu) |
1009 | { | 1027 | { |
1010 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1028 | if (!vcpu->arch.interrupt.pending) |
1011 | u32 idtv_info_field; | 1029 | return -1; |
1012 | 1030 | return vcpu->arch.interrupt.nr; | |
1013 | idtv_info_field = vmx->idt_vectoring_info; | ||
1014 | if (idtv_info_field & INTR_INFO_VALID_MASK) { | ||
1015 | if (is_external_interrupt(idtv_info_field)) | ||
1016 | return idtv_info_field & VECTORING_INFO_VECTOR_MASK; | ||
1017 | else | ||
1018 | printk(KERN_DEBUG "pending exception: not handled yet\n"); | ||
1019 | } | ||
1020 | return -1; | ||
1021 | } | 1031 | } |
1022 | 1032 | ||
1023 | static __init int cpu_has_kvm_support(void) | 1033 | static __init int cpu_has_kvm_support(void) |
@@ -1031,9 +1041,9 @@ static __init int vmx_disabled_by_bios(void) | |||
1031 | u64 msr; | 1041 | u64 msr; |
1032 | 1042 | ||
1033 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); | 1043 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); |
1034 | return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED | | 1044 | return (msr & (FEATURE_CONTROL_LOCKED | |
1035 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | 1045 | FEATURE_CONTROL_VMXON_ENABLED)) |
1036 | == MSR_IA32_FEATURE_CONTROL_LOCKED; | 1046 | == FEATURE_CONTROL_LOCKED; |
1037 | /* locked but not enabled */ | 1047 | /* locked but not enabled */ |
1038 | } | 1048 | } |
1039 | 1049 | ||
@@ -1045,14 +1055,14 @@ static void hardware_enable(void *garbage) | |||
1045 | 1055 | ||
1046 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); | 1056 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); |
1047 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | 1057 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
1048 | if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED | | 1058 | if ((old & (FEATURE_CONTROL_LOCKED | |
1049 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | 1059 | FEATURE_CONTROL_VMXON_ENABLED)) |
1050 | != (MSR_IA32_FEATURE_CONTROL_LOCKED | | 1060 | != (FEATURE_CONTROL_LOCKED | |
1051 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | 1061 | FEATURE_CONTROL_VMXON_ENABLED)) |
1052 | /* enable and lock */ | 1062 | /* enable and lock */ |
1053 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | | 1063 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | |
1054 | MSR_IA32_FEATURE_CONTROL_LOCKED | | 1064 | FEATURE_CONTROL_LOCKED | |
1055 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); | 1065 | FEATURE_CONTROL_VMXON_ENABLED); |
1056 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ | 1066 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
1057 | asm volatile (ASM_VMX_VMXON_RAX | 1067 | asm volatile (ASM_VMX_VMXON_RAX |
1058 | : : "a"(&phys_addr), "m"(phys_addr) | 1068 | : : "a"(&phys_addr), "m"(phys_addr) |
@@ -1120,7 +1130,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
1120 | CPU_BASED_CR3_STORE_EXITING | | 1130 | CPU_BASED_CR3_STORE_EXITING | |
1121 | CPU_BASED_USE_IO_BITMAPS | | 1131 | CPU_BASED_USE_IO_BITMAPS | |
1122 | CPU_BASED_MOV_DR_EXITING | | 1132 | CPU_BASED_MOV_DR_EXITING | |
1123 | CPU_BASED_USE_TSC_OFFSETING; | 1133 | CPU_BASED_USE_TSC_OFFSETING | |
1134 | CPU_BASED_INVLPG_EXITING; | ||
1124 | opt = CPU_BASED_TPR_SHADOW | | 1135 | opt = CPU_BASED_TPR_SHADOW | |
1125 | CPU_BASED_USE_MSR_BITMAPS | | 1136 | CPU_BASED_USE_MSR_BITMAPS | |
1126 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | 1137 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
@@ -1149,9 +1160,11 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
1149 | _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; | 1160 | _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; |
1150 | #endif | 1161 | #endif |
1151 | if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { | 1162 | if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { |
1152 | /* CR3 accesses don't need to cause VM Exits when EPT enabled */ | 1163 | /* CR3 accesses and invlpg don't need to cause VM Exits when EPT |
1164 | enabled */ | ||
1153 | min &= ~(CPU_BASED_CR3_LOAD_EXITING | | 1165 | min &= ~(CPU_BASED_CR3_LOAD_EXITING | |
1154 | CPU_BASED_CR3_STORE_EXITING); | 1166 | CPU_BASED_CR3_STORE_EXITING | |
1167 | CPU_BASED_INVLPG_EXITING); | ||
1155 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, | 1168 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, |
1156 | &_cpu_based_exec_control) < 0) | 1169 | &_cpu_based_exec_control) < 0) |
1157 | return -EIO; | 1170 | return -EIO; |
@@ -1288,7 +1301,9 @@ static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) | |||
1288 | static void enter_pmode(struct kvm_vcpu *vcpu) | 1301 | static void enter_pmode(struct kvm_vcpu *vcpu) |
1289 | { | 1302 | { |
1290 | unsigned long flags; | 1303 | unsigned long flags; |
1304 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1291 | 1305 | ||
1306 | vmx->emulation_required = 1; | ||
1292 | vcpu->arch.rmode.active = 0; | 1307 | vcpu->arch.rmode.active = 0; |
1293 | 1308 | ||
1294 | vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); | 1309 | vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); |
@@ -1305,6 +1320,9 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1305 | 1320 | ||
1306 | update_exception_bitmap(vcpu); | 1321 | update_exception_bitmap(vcpu); |
1307 | 1322 | ||
1323 | if (emulate_invalid_guest_state) | ||
1324 | return; | ||
1325 | |||
1308 | fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); | 1326 | fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); |
1309 | fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); | 1327 | fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); |
1310 | fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); | 1328 | fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); |
@@ -1345,7 +1363,9 @@ static void fix_rmode_seg(int seg, struct kvm_save_segment *save) | |||
1345 | static void enter_rmode(struct kvm_vcpu *vcpu) | 1363 | static void enter_rmode(struct kvm_vcpu *vcpu) |
1346 | { | 1364 | { |
1347 | unsigned long flags; | 1365 | unsigned long flags; |
1366 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1348 | 1367 | ||
1368 | vmx->emulation_required = 1; | ||
1349 | vcpu->arch.rmode.active = 1; | 1369 | vcpu->arch.rmode.active = 1; |
1350 | 1370 | ||
1351 | vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); | 1371 | vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); |
@@ -1367,6 +1387,9 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1367 | vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); | 1387 | vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); |
1368 | update_exception_bitmap(vcpu); | 1388 | update_exception_bitmap(vcpu); |
1369 | 1389 | ||
1390 | if (emulate_invalid_guest_state) | ||
1391 | goto continue_rmode; | ||
1392 | |||
1370 | vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); | 1393 | vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); |
1371 | vmcs_write32(GUEST_SS_LIMIT, 0xffff); | 1394 | vmcs_write32(GUEST_SS_LIMIT, 0xffff); |
1372 | vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); | 1395 | vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); |
@@ -1382,6 +1405,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1382 | fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); | 1405 | fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); |
1383 | fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); | 1406 | fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); |
1384 | 1407 | ||
1408 | continue_rmode: | ||
1385 | kvm_mmu_reset_context(vcpu); | 1409 | kvm_mmu_reset_context(vcpu); |
1386 | init_rmode(vcpu->kvm); | 1410 | init_rmode(vcpu->kvm); |
1387 | } | 1411 | } |
@@ -1715,6 +1739,186 @@ static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |||
1715 | vmcs_writel(GUEST_GDTR_BASE, dt->base); | 1739 | vmcs_writel(GUEST_GDTR_BASE, dt->base); |
1716 | } | 1740 | } |
1717 | 1741 | ||
1742 | static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) | ||
1743 | { | ||
1744 | struct kvm_segment var; | ||
1745 | u32 ar; | ||
1746 | |||
1747 | vmx_get_segment(vcpu, &var, seg); | ||
1748 | ar = vmx_segment_access_rights(&var); | ||
1749 | |||
1750 | if (var.base != (var.selector << 4)) | ||
1751 | return false; | ||
1752 | if (var.limit != 0xffff) | ||
1753 | return false; | ||
1754 | if (ar != 0xf3) | ||
1755 | return false; | ||
1756 | |||
1757 | return true; | ||
1758 | } | ||
1759 | |||
1760 | static bool code_segment_valid(struct kvm_vcpu *vcpu) | ||
1761 | { | ||
1762 | struct kvm_segment cs; | ||
1763 | unsigned int cs_rpl; | ||
1764 | |||
1765 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); | ||
1766 | cs_rpl = cs.selector & SELECTOR_RPL_MASK; | ||
1767 | |||
1768 | if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) | ||
1769 | return false; | ||
1770 | if (!cs.s) | ||
1771 | return false; | ||
1772 | if (!(~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK))) { | ||
1773 | if (cs.dpl > cs_rpl) | ||
1774 | return false; | ||
1775 | } else if (cs.type & AR_TYPE_CODE_MASK) { | ||
1776 | if (cs.dpl != cs_rpl) | ||
1777 | return false; | ||
1778 | } | ||
1779 | if (!cs.present) | ||
1780 | return false; | ||
1781 | |||
1782 | /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ | ||
1783 | return true; | ||
1784 | } | ||
1785 | |||
1786 | static bool stack_segment_valid(struct kvm_vcpu *vcpu) | ||
1787 | { | ||
1788 | struct kvm_segment ss; | ||
1789 | unsigned int ss_rpl; | ||
1790 | |||
1791 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); | ||
1792 | ss_rpl = ss.selector & SELECTOR_RPL_MASK; | ||
1793 | |||
1794 | if ((ss.type != 3) || (ss.type != 7)) | ||
1795 | return false; | ||
1796 | if (!ss.s) | ||
1797 | return false; | ||
1798 | if (ss.dpl != ss_rpl) /* DPL != RPL */ | ||
1799 | return false; | ||
1800 | if (!ss.present) | ||
1801 | return false; | ||
1802 | |||
1803 | return true; | ||
1804 | } | ||
1805 | |||
1806 | static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) | ||
1807 | { | ||
1808 | struct kvm_segment var; | ||
1809 | unsigned int rpl; | ||
1810 | |||
1811 | vmx_get_segment(vcpu, &var, seg); | ||
1812 | rpl = var.selector & SELECTOR_RPL_MASK; | ||
1813 | |||
1814 | if (!var.s) | ||
1815 | return false; | ||
1816 | if (!var.present) | ||
1817 | return false; | ||
1818 | if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) { | ||
1819 | if (var.dpl < rpl) /* DPL < RPL */ | ||
1820 | return false; | ||
1821 | } | ||
1822 | |||
1823 | /* TODO: Add other members to kvm_segment_field to allow checking for other access | ||
1824 | * rights flags | ||
1825 | */ | ||
1826 | return true; | ||
1827 | } | ||
1828 | |||
1829 | static bool tr_valid(struct kvm_vcpu *vcpu) | ||
1830 | { | ||
1831 | struct kvm_segment tr; | ||
1832 | |||
1833 | vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); | ||
1834 | |||
1835 | if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ | ||
1836 | return false; | ||
1837 | if ((tr.type != 3) || (tr.type != 11)) /* TODO: Check if guest is in IA32e mode */ | ||
1838 | return false; | ||
1839 | if (!tr.present) | ||
1840 | return false; | ||
1841 | |||
1842 | return true; | ||
1843 | } | ||
1844 | |||
1845 | static bool ldtr_valid(struct kvm_vcpu *vcpu) | ||
1846 | { | ||
1847 | struct kvm_segment ldtr; | ||
1848 | |||
1849 | vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); | ||
1850 | |||
1851 | if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ | ||
1852 | return false; | ||
1853 | if (ldtr.type != 2) | ||
1854 | return false; | ||
1855 | if (!ldtr.present) | ||
1856 | return false; | ||
1857 | |||
1858 | return true; | ||
1859 | } | ||
1860 | |||
1861 | static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) | ||
1862 | { | ||
1863 | struct kvm_segment cs, ss; | ||
1864 | |||
1865 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); | ||
1866 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); | ||
1867 | |||
1868 | return ((cs.selector & SELECTOR_RPL_MASK) == | ||
1869 | (ss.selector & SELECTOR_RPL_MASK)); | ||
1870 | } | ||
1871 | |||
1872 | /* | ||
1873 | * Check if guest state is valid. Returns true if valid, false if | ||
1874 | * not. | ||
1875 | * We assume that registers are always usable | ||
1876 | */ | ||
1877 | static bool guest_state_valid(struct kvm_vcpu *vcpu) | ||
1878 | { | ||
1879 | /* real mode guest state checks */ | ||
1880 | if (!(vcpu->arch.cr0 & X86_CR0_PE)) { | ||
1881 | if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) | ||
1882 | return false; | ||
1883 | if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) | ||
1884 | return false; | ||
1885 | if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) | ||
1886 | return false; | ||
1887 | if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) | ||
1888 | return false; | ||
1889 | if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) | ||
1890 | return false; | ||
1891 | if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) | ||
1892 | return false; | ||
1893 | } else { | ||
1894 | /* protected mode guest state checks */ | ||
1895 | if (!cs_ss_rpl_check(vcpu)) | ||
1896 | return false; | ||
1897 | if (!code_segment_valid(vcpu)) | ||
1898 | return false; | ||
1899 | if (!stack_segment_valid(vcpu)) | ||
1900 | return false; | ||
1901 | if (!data_segment_valid(vcpu, VCPU_SREG_DS)) | ||
1902 | return false; | ||
1903 | if (!data_segment_valid(vcpu, VCPU_SREG_ES)) | ||
1904 | return false; | ||
1905 | if (!data_segment_valid(vcpu, VCPU_SREG_FS)) | ||
1906 | return false; | ||
1907 | if (!data_segment_valid(vcpu, VCPU_SREG_GS)) | ||
1908 | return false; | ||
1909 | if (!tr_valid(vcpu)) | ||
1910 | return false; | ||
1911 | if (!ldtr_valid(vcpu)) | ||
1912 | return false; | ||
1913 | } | ||
1914 | /* TODO: | ||
1915 | * - Add checks on RIP | ||
1916 | * - Add checks on RFLAGS | ||
1917 | */ | ||
1918 | |||
1919 | return true; | ||
1920 | } | ||
1921 | |||
1718 | static int init_rmode_tss(struct kvm *kvm) | 1922 | static int init_rmode_tss(struct kvm *kvm) |
1719 | { | 1923 | { |
1720 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; | 1924 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; |
@@ -1726,7 +1930,8 @@ static int init_rmode_tss(struct kvm *kvm) | |||
1726 | if (r < 0) | 1930 | if (r < 0) |
1727 | goto out; | 1931 | goto out; |
1728 | data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; | 1932 | data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; |
1729 | r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16)); | 1933 | r = kvm_write_guest_page(kvm, fn++, &data, |
1934 | TSS_IOPB_BASE_OFFSET, sizeof(u16)); | ||
1730 | if (r < 0) | 1935 | if (r < 0) |
1731 | goto out; | 1936 | goto out; |
1732 | r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); | 1937 | r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); |
@@ -1789,7 +1994,7 @@ static void seg_setup(int seg) | |||
1789 | vmcs_write16(sf->selector, 0); | 1994 | vmcs_write16(sf->selector, 0); |
1790 | vmcs_writel(sf->base, 0); | 1995 | vmcs_writel(sf->base, 0); |
1791 | vmcs_write32(sf->limit, 0xffff); | 1996 | vmcs_write32(sf->limit, 0xffff); |
1792 | vmcs_write32(sf->ar_bytes, 0x93); | 1997 | vmcs_write32(sf->ar_bytes, 0xf3); |
1793 | } | 1998 | } |
1794 | 1999 | ||
1795 | static int alloc_apic_access_page(struct kvm *kvm) | 2000 | static int alloc_apic_access_page(struct kvm *kvm) |
@@ -1808,9 +2013,7 @@ static int alloc_apic_access_page(struct kvm *kvm) | |||
1808 | if (r) | 2013 | if (r) |
1809 | goto out; | 2014 | goto out; |
1810 | 2015 | ||
1811 | down_read(¤t->mm->mmap_sem); | ||
1812 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); | 2016 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); |
1813 | up_read(¤t->mm->mmap_sem); | ||
1814 | out: | 2017 | out: |
1815 | up_write(&kvm->slots_lock); | 2018 | up_write(&kvm->slots_lock); |
1816 | return r; | 2019 | return r; |
@@ -1832,10 +2035,8 @@ static int alloc_identity_pagetable(struct kvm *kvm) | |||
1832 | if (r) | 2035 | if (r) |
1833 | goto out; | 2036 | goto out; |
1834 | 2037 | ||
1835 | down_read(¤t->mm->mmap_sem); | ||
1836 | kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, | 2038 | kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, |
1837 | VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT); | 2039 | VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT); |
1838 | up_read(¤t->mm->mmap_sem); | ||
1839 | out: | 2040 | out: |
1840 | up_write(&kvm->slots_lock); | 2041 | up_write(&kvm->slots_lock); |
1841 | return r; | 2042 | return r; |
@@ -1917,7 +2118,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1917 | } | 2118 | } |
1918 | if (!vm_need_ept()) | 2119 | if (!vm_need_ept()) |
1919 | exec_control |= CPU_BASED_CR3_STORE_EXITING | | 2120 | exec_control |= CPU_BASED_CR3_STORE_EXITING | |
1920 | CPU_BASED_CR3_LOAD_EXITING; | 2121 | CPU_BASED_CR3_LOAD_EXITING | |
2122 | CPU_BASED_INVLPG_EXITING; | ||
1921 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); | 2123 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); |
1922 | 2124 | ||
1923 | if (cpu_has_secondary_exec_ctrls()) { | 2125 | if (cpu_has_secondary_exec_ctrls()) { |
@@ -2019,6 +2221,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2019 | u64 msr; | 2221 | u64 msr; |
2020 | int ret; | 2222 | int ret; |
2021 | 2223 | ||
2224 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); | ||
2022 | down_read(&vcpu->kvm->slots_lock); | 2225 | down_read(&vcpu->kvm->slots_lock); |
2023 | if (!init_rmode(vmx->vcpu.kvm)) { | 2226 | if (!init_rmode(vmx->vcpu.kvm)) { |
2024 | ret = -ENOMEM; | 2227 | ret = -ENOMEM; |
@@ -2036,6 +2239,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2036 | 2239 | ||
2037 | fx_init(&vmx->vcpu); | 2240 | fx_init(&vmx->vcpu); |
2038 | 2241 | ||
2242 | seg_setup(VCPU_SREG_CS); | ||
2039 | /* | 2243 | /* |
2040 | * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode | 2244 | * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode |
2041 | * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. | 2245 | * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. |
@@ -2047,8 +2251,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2047 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); | 2251 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); |
2048 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); | 2252 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); |
2049 | } | 2253 | } |
2050 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); | ||
2051 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); | ||
2052 | 2254 | ||
2053 | seg_setup(VCPU_SREG_DS); | 2255 | seg_setup(VCPU_SREG_DS); |
2054 | seg_setup(VCPU_SREG_ES); | 2256 | seg_setup(VCPU_SREG_ES); |
@@ -2072,10 +2274,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2072 | 2274 | ||
2073 | vmcs_writel(GUEST_RFLAGS, 0x02); | 2275 | vmcs_writel(GUEST_RFLAGS, 0x02); |
2074 | if (vmx->vcpu.vcpu_id == 0) | 2276 | if (vmx->vcpu.vcpu_id == 0) |
2075 | vmcs_writel(GUEST_RIP, 0xfff0); | 2277 | kvm_rip_write(vcpu, 0xfff0); |
2076 | else | 2278 | else |
2077 | vmcs_writel(GUEST_RIP, 0); | 2279 | kvm_rip_write(vcpu, 0); |
2078 | vmcs_writel(GUEST_RSP, 0); | 2280 | kvm_register_write(vcpu, VCPU_REGS_RSP, 0); |
2079 | 2281 | ||
2080 | /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ | 2282 | /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ |
2081 | vmcs_writel(GUEST_DR7, 0x400); | 2283 | vmcs_writel(GUEST_DR7, 0x400); |
@@ -2125,6 +2327,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2125 | 2327 | ||
2126 | ret = 0; | 2328 | ret = 0; |
2127 | 2329 | ||
2330 | /* HACK: Don't enable emulation on guest boot/reset */ | ||
2331 | vmx->emulation_required = 0; | ||
2332 | |||
2128 | out: | 2333 | out: |
2129 | up_read(&vcpu->kvm->slots_lock); | 2334 | up_read(&vcpu->kvm->slots_lock); |
2130 | return ret; | 2335 | return ret; |
@@ -2136,14 +2341,15 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | |||
2136 | 2341 | ||
2137 | KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); | 2342 | KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); |
2138 | 2343 | ||
2344 | ++vcpu->stat.irq_injections; | ||
2139 | if (vcpu->arch.rmode.active) { | 2345 | if (vcpu->arch.rmode.active) { |
2140 | vmx->rmode.irq.pending = true; | 2346 | vmx->rmode.irq.pending = true; |
2141 | vmx->rmode.irq.vector = irq; | 2347 | vmx->rmode.irq.vector = irq; |
2142 | vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); | 2348 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); |
2143 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 2349 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
2144 | irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); | 2350 | irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); |
2145 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | 2351 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); |
2146 | vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1); | 2352 | kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); |
2147 | return; | 2353 | return; |
2148 | } | 2354 | } |
2149 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 2355 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
@@ -2154,7 +2360,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | |||
2154 | { | 2360 | { |
2155 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 2361 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
2156 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); | 2362 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); |
2157 | vcpu->arch.nmi_pending = 0; | ||
2158 | } | 2363 | } |
2159 | 2364 | ||
2160 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | 2365 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) |
@@ -2166,7 +2371,7 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |||
2166 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); | 2371 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); |
2167 | if (!vcpu->arch.irq_pending[word_index]) | 2372 | if (!vcpu->arch.irq_pending[word_index]) |
2168 | clear_bit(word_index, &vcpu->arch.irq_summary); | 2373 | clear_bit(word_index, &vcpu->arch.irq_summary); |
2169 | vmx_inject_irq(vcpu, irq); | 2374 | kvm_queue_interrupt(vcpu, irq); |
2170 | } | 2375 | } |
2171 | 2376 | ||
2172 | 2377 | ||
@@ -2180,13 +2385,12 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |||
2180 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | 2385 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); |
2181 | 2386 | ||
2182 | if (vcpu->arch.interrupt_window_open && | 2387 | if (vcpu->arch.interrupt_window_open && |
2183 | vcpu->arch.irq_summary && | 2388 | vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) |
2184 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) | ||
2185 | /* | ||
2186 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | ||
2187 | */ | ||
2188 | kvm_do_inject_irq(vcpu); | 2389 | kvm_do_inject_irq(vcpu); |
2189 | 2390 | ||
2391 | if (vcpu->arch.interrupt_window_open && vcpu->arch.interrupt.pending) | ||
2392 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | ||
2393 | |||
2190 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 2394 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
2191 | if (!vcpu->arch.interrupt_window_open && | 2395 | if (!vcpu->arch.interrupt_window_open && |
2192 | (vcpu->arch.irq_summary || kvm_run->request_interrupt_window)) | 2396 | (vcpu->arch.irq_summary || kvm_run->request_interrupt_window)) |
@@ -2237,9 +2441,6 @@ static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) | |||
2237 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, | 2441 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, |
2238 | int vec, u32 err_code) | 2442 | int vec, u32 err_code) |
2239 | { | 2443 | { |
2240 | if (!vcpu->arch.rmode.active) | ||
2241 | return 0; | ||
2242 | |||
2243 | /* | 2444 | /* |
2244 | * Instruction with address size override prefix opcode 0x67 | 2445 | * Instruction with address size override prefix opcode 0x67 |
2245 | * Cause the #SS fault with 0 error code in VM86 mode. | 2446 | * Cause the #SS fault with 0 error code in VM86 mode. |
@@ -2247,6 +2448,25 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
2247 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) | 2448 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) |
2248 | if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) | 2449 | if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) |
2249 | return 1; | 2450 | return 1; |
2451 | /* | ||
2452 | * Forward all other exceptions that are valid in real mode. | ||
2453 | * FIXME: Breaks guest debugging in real mode, needs to be fixed with | ||
2454 | * the required debugging infrastructure rework. | ||
2455 | */ | ||
2456 | switch (vec) { | ||
2457 | case DE_VECTOR: | ||
2458 | case DB_VECTOR: | ||
2459 | case BP_VECTOR: | ||
2460 | case OF_VECTOR: | ||
2461 | case BR_VECTOR: | ||
2462 | case UD_VECTOR: | ||
2463 | case DF_VECTOR: | ||
2464 | case SS_VECTOR: | ||
2465 | case GP_VECTOR: | ||
2466 | case MF_VECTOR: | ||
2467 | kvm_queue_exception(vcpu, vec); | ||
2468 | return 1; | ||
2469 | } | ||
2250 | return 0; | 2470 | return 0; |
2251 | } | 2471 | } |
2252 | 2472 | ||
@@ -2288,7 +2508,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2288 | } | 2508 | } |
2289 | 2509 | ||
2290 | error_code = 0; | 2510 | error_code = 0; |
2291 | rip = vmcs_readl(GUEST_RIP); | 2511 | rip = kvm_rip_read(vcpu); |
2292 | if (intr_info & INTR_INFO_DELIVER_CODE_MASK) | 2512 | if (intr_info & INTR_INFO_DELIVER_CODE_MASK) |
2293 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); | 2513 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); |
2294 | if (is_page_fault(intr_info)) { | 2514 | if (is_page_fault(intr_info)) { |
@@ -2298,7 +2518,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2298 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 2518 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
2299 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, | 2519 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, |
2300 | (u32)((u64)cr2 >> 32), handler); | 2520 | (u32)((u64)cr2 >> 32), handler); |
2301 | if (vect_info & VECTORING_INFO_VALID_MASK) | 2521 | if (vcpu->arch.interrupt.pending || vcpu->arch.exception.pending) |
2302 | kvm_mmu_unprotect_page_virt(vcpu, cr2); | 2522 | kvm_mmu_unprotect_page_virt(vcpu, cr2); |
2303 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | 2523 | return kvm_mmu_page_fault(vcpu, cr2, error_code); |
2304 | } | 2524 | } |
@@ -2386,27 +2606,25 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2386 | reg = (exit_qualification >> 8) & 15; | 2606 | reg = (exit_qualification >> 8) & 15; |
2387 | switch ((exit_qualification >> 4) & 3) { | 2607 | switch ((exit_qualification >> 4) & 3) { |
2388 | case 0: /* mov to cr */ | 2608 | case 0: /* mov to cr */ |
2389 | KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg], | 2609 | KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, |
2390 | (u32)((u64)vcpu->arch.regs[reg] >> 32), handler); | 2610 | (u32)kvm_register_read(vcpu, reg), |
2611 | (u32)((u64)kvm_register_read(vcpu, reg) >> 32), | ||
2612 | handler); | ||
2391 | switch (cr) { | 2613 | switch (cr) { |
2392 | case 0: | 2614 | case 0: |
2393 | vcpu_load_rsp_rip(vcpu); | 2615 | kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg)); |
2394 | kvm_set_cr0(vcpu, vcpu->arch.regs[reg]); | ||
2395 | skip_emulated_instruction(vcpu); | 2616 | skip_emulated_instruction(vcpu); |
2396 | return 1; | 2617 | return 1; |
2397 | case 3: | 2618 | case 3: |
2398 | vcpu_load_rsp_rip(vcpu); | 2619 | kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg)); |
2399 | kvm_set_cr3(vcpu, vcpu->arch.regs[reg]); | ||
2400 | skip_emulated_instruction(vcpu); | 2620 | skip_emulated_instruction(vcpu); |
2401 | return 1; | 2621 | return 1; |
2402 | case 4: | 2622 | case 4: |
2403 | vcpu_load_rsp_rip(vcpu); | 2623 | kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg)); |
2404 | kvm_set_cr4(vcpu, vcpu->arch.regs[reg]); | ||
2405 | skip_emulated_instruction(vcpu); | 2624 | skip_emulated_instruction(vcpu); |
2406 | return 1; | 2625 | return 1; |
2407 | case 8: | 2626 | case 8: |
2408 | vcpu_load_rsp_rip(vcpu); | 2627 | kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg)); |
2409 | kvm_set_cr8(vcpu, vcpu->arch.regs[reg]); | ||
2410 | skip_emulated_instruction(vcpu); | 2628 | skip_emulated_instruction(vcpu); |
2411 | if (irqchip_in_kernel(vcpu->kvm)) | 2629 | if (irqchip_in_kernel(vcpu->kvm)) |
2412 | return 1; | 2630 | return 1; |
@@ -2415,7 +2633,6 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2415 | }; | 2633 | }; |
2416 | break; | 2634 | break; |
2417 | case 2: /* clts */ | 2635 | case 2: /* clts */ |
2418 | vcpu_load_rsp_rip(vcpu); | ||
2419 | vmx_fpu_deactivate(vcpu); | 2636 | vmx_fpu_deactivate(vcpu); |
2420 | vcpu->arch.cr0 &= ~X86_CR0_TS; | 2637 | vcpu->arch.cr0 &= ~X86_CR0_TS; |
2421 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); | 2638 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); |
@@ -2426,21 +2643,17 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2426 | case 1: /*mov from cr*/ | 2643 | case 1: /*mov from cr*/ |
2427 | switch (cr) { | 2644 | switch (cr) { |
2428 | case 3: | 2645 | case 3: |
2429 | vcpu_load_rsp_rip(vcpu); | 2646 | kvm_register_write(vcpu, reg, vcpu->arch.cr3); |
2430 | vcpu->arch.regs[reg] = vcpu->arch.cr3; | ||
2431 | vcpu_put_rsp_rip(vcpu); | ||
2432 | KVMTRACE_3D(CR_READ, vcpu, (u32)cr, | 2647 | KVMTRACE_3D(CR_READ, vcpu, (u32)cr, |
2433 | (u32)vcpu->arch.regs[reg], | 2648 | (u32)kvm_register_read(vcpu, reg), |
2434 | (u32)((u64)vcpu->arch.regs[reg] >> 32), | 2649 | (u32)((u64)kvm_register_read(vcpu, reg) >> 32), |
2435 | handler); | 2650 | handler); |
2436 | skip_emulated_instruction(vcpu); | 2651 | skip_emulated_instruction(vcpu); |
2437 | return 1; | 2652 | return 1; |
2438 | case 8: | 2653 | case 8: |
2439 | vcpu_load_rsp_rip(vcpu); | 2654 | kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu)); |
2440 | vcpu->arch.regs[reg] = kvm_get_cr8(vcpu); | ||
2441 | vcpu_put_rsp_rip(vcpu); | ||
2442 | KVMTRACE_2D(CR_READ, vcpu, (u32)cr, | 2655 | KVMTRACE_2D(CR_READ, vcpu, (u32)cr, |
2443 | (u32)vcpu->arch.regs[reg], handler); | 2656 | (u32)kvm_register_read(vcpu, reg), handler); |
2444 | skip_emulated_instruction(vcpu); | 2657 | skip_emulated_instruction(vcpu); |
2445 | return 1; | 2658 | return 1; |
2446 | } | 2659 | } |
@@ -2472,7 +2685,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2472 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 2685 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
2473 | dr = exit_qualification & 7; | 2686 | dr = exit_qualification & 7; |
2474 | reg = (exit_qualification >> 8) & 15; | 2687 | reg = (exit_qualification >> 8) & 15; |
2475 | vcpu_load_rsp_rip(vcpu); | ||
2476 | if (exit_qualification & 16) { | 2688 | if (exit_qualification & 16) { |
2477 | /* mov from dr */ | 2689 | /* mov from dr */ |
2478 | switch (dr) { | 2690 | switch (dr) { |
@@ -2485,12 +2697,11 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2485 | default: | 2697 | default: |
2486 | val = 0; | 2698 | val = 0; |
2487 | } | 2699 | } |
2488 | vcpu->arch.regs[reg] = val; | 2700 | kvm_register_write(vcpu, reg, val); |
2489 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); | 2701 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); |
2490 | } else { | 2702 | } else { |
2491 | /* mov to dr */ | 2703 | /* mov to dr */ |
2492 | } | 2704 | } |
2493 | vcpu_put_rsp_rip(vcpu); | ||
2494 | skip_emulated_instruction(vcpu); | 2705 | skip_emulated_instruction(vcpu); |
2495 | return 1; | 2706 | return 1; |
2496 | } | 2707 | } |
@@ -2583,6 +2794,15 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2583 | return 1; | 2794 | return 1; |
2584 | } | 2795 | } |
2585 | 2796 | ||
2797 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2798 | { | ||
2799 | u64 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); | ||
2800 | |||
2801 | kvm_mmu_invlpg(vcpu, exit_qualification); | ||
2802 | skip_emulated_instruction(vcpu); | ||
2803 | return 1; | ||
2804 | } | ||
2805 | |||
2586 | static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2806 | static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2587 | { | 2807 | { |
2588 | skip_emulated_instruction(vcpu); | 2808 | skip_emulated_instruction(vcpu); |
@@ -2695,6 +2915,43 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2695 | return 1; | 2915 | return 1; |
2696 | } | 2916 | } |
2697 | 2917 | ||
2918 | static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | ||
2919 | struct kvm_run *kvm_run) | ||
2920 | { | ||
2921 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2922 | int err; | ||
2923 | |||
2924 | preempt_enable(); | ||
2925 | local_irq_enable(); | ||
2926 | |||
2927 | while (!guest_state_valid(vcpu)) { | ||
2928 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | ||
2929 | |||
2930 | switch (err) { | ||
2931 | case EMULATE_DONE: | ||
2932 | break; | ||
2933 | case EMULATE_DO_MMIO: | ||
2934 | kvm_report_emulation_failure(vcpu, "mmio"); | ||
2935 | /* TODO: Handle MMIO */ | ||
2936 | return; | ||
2937 | default: | ||
2938 | kvm_report_emulation_failure(vcpu, "emulation failure"); | ||
2939 | return; | ||
2940 | } | ||
2941 | |||
2942 | if (signal_pending(current)) | ||
2943 | break; | ||
2944 | if (need_resched()) | ||
2945 | schedule(); | ||
2946 | } | ||
2947 | |||
2948 | local_irq_disable(); | ||
2949 | preempt_disable(); | ||
2950 | |||
2951 | /* Guest state should be valid now, no more emulation should be needed */ | ||
2952 | vmx->emulation_required = 0; | ||
2953 | } | ||
2954 | |||
2698 | /* | 2955 | /* |
2699 | * The exit handlers return 1 if the exit was handled fully and guest execution | 2956 | * The exit handlers return 1 if the exit was handled fully and guest execution |
2700 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | 2957 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
@@ -2714,6 +2971,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
2714 | [EXIT_REASON_MSR_WRITE] = handle_wrmsr, | 2971 | [EXIT_REASON_MSR_WRITE] = handle_wrmsr, |
2715 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, | 2972 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, |
2716 | [EXIT_REASON_HLT] = handle_halt, | 2973 | [EXIT_REASON_HLT] = handle_halt, |
2974 | [EXIT_REASON_INVLPG] = handle_invlpg, | ||
2717 | [EXIT_REASON_VMCALL] = handle_vmcall, | 2975 | [EXIT_REASON_VMCALL] = handle_vmcall, |
2718 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, | 2976 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, |
2719 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, | 2977 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, |
@@ -2735,8 +2993,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2735 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2993 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2736 | u32 vectoring_info = vmx->idt_vectoring_info; | 2994 | u32 vectoring_info = vmx->idt_vectoring_info; |
2737 | 2995 | ||
2738 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP), | 2996 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), |
2739 | (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit); | 2997 | (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit); |
2740 | 2998 | ||
2741 | /* Access CR3 don't cause VMExit in paging mode, so we need | 2999 | /* Access CR3 don't cause VMExit in paging mode, so we need |
2742 | * to sync with guest real CR3. */ | 3000 | * to sync with guest real CR3. */ |
@@ -2829,88 +3087,92 @@ static void enable_intr_window(struct kvm_vcpu *vcpu) | |||
2829 | enable_irq_window(vcpu); | 3087 | enable_irq_window(vcpu); |
2830 | } | 3088 | } |
2831 | 3089 | ||
2832 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) | 3090 | static void vmx_complete_interrupts(struct vcpu_vmx *vmx) |
2833 | { | 3091 | { |
2834 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3092 | u32 exit_intr_info; |
2835 | u32 idtv_info_field, intr_info_field, exit_intr_info_field; | 3093 | u32 idt_vectoring_info; |
2836 | int vector; | 3094 | bool unblock_nmi; |
3095 | u8 vector; | ||
3096 | int type; | ||
3097 | bool idtv_info_valid; | ||
3098 | u32 error; | ||
2837 | 3099 | ||
2838 | update_tpr_threshold(vcpu); | 3100 | exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
2839 | 3101 | if (cpu_has_virtual_nmis()) { | |
2840 | intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); | 3102 | unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; |
2841 | exit_intr_info_field = vmcs_read32(VM_EXIT_INTR_INFO); | 3103 | vector = exit_intr_info & INTR_INFO_VECTOR_MASK; |
2842 | idtv_info_field = vmx->idt_vectoring_info; | 3104 | /* |
2843 | if (intr_info_field & INTR_INFO_VALID_MASK) { | 3105 | * SDM 3: 25.7.1.2 |
2844 | if (idtv_info_field & INTR_INFO_VALID_MASK) { | 3106 | * Re-set bit "block by NMI" before VM entry if vmexit caused by |
2845 | /* TODO: fault when IDT_Vectoring */ | 3107 | * a guest IRET fault. |
2846 | if (printk_ratelimit()) | 3108 | */ |
2847 | printk(KERN_ERR "Fault when IDT_Vectoring\n"); | 3109 | if (unblock_nmi && vector != DF_VECTOR) |
2848 | } | 3110 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
2849 | enable_intr_window(vcpu); | 3111 | GUEST_INTR_STATE_NMI); |
2850 | return; | ||
2851 | } | 3112 | } |
2852 | if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { | ||
2853 | if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) | ||
2854 | == INTR_TYPE_EXT_INTR | ||
2855 | && vcpu->arch.rmode.active) { | ||
2856 | u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; | ||
2857 | |||
2858 | vmx_inject_irq(vcpu, vect); | ||
2859 | enable_intr_window(vcpu); | ||
2860 | return; | ||
2861 | } | ||
2862 | |||
2863 | KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler); | ||
2864 | 3113 | ||
3114 | idt_vectoring_info = vmx->idt_vectoring_info; | ||
3115 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; | ||
3116 | vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; | ||
3117 | type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; | ||
3118 | if (vmx->vcpu.arch.nmi_injected) { | ||
2865 | /* | 3119 | /* |
2866 | * SDM 3: 25.7.1.2 | 3120 | * SDM 3: 25.7.1.2 |
2867 | * Clear bit "block by NMI" before VM entry if a NMI delivery | 3121 | * Clear bit "block by NMI" before VM entry if a NMI delivery |
2868 | * faulted. | 3122 | * faulted. |
2869 | */ | 3123 | */ |
2870 | if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) | 3124 | if (idtv_info_valid && type == INTR_TYPE_NMI_INTR) |
2871 | == INTR_TYPE_NMI_INTR && cpu_has_virtual_nmis()) | 3125 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, |
2872 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | 3126 | GUEST_INTR_STATE_NMI); |
2873 | vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | 3127 | else |
2874 | ~GUEST_INTR_STATE_NMI); | 3128 | vmx->vcpu.arch.nmi_injected = false; |
2875 | 3129 | } | |
2876 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field | 3130 | kvm_clear_exception_queue(&vmx->vcpu); |
2877 | & ~INTR_INFO_RESVD_BITS_MASK); | 3131 | if (idtv_info_valid && type == INTR_TYPE_EXCEPTION) { |
2878 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, | 3132 | if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { |
2879 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); | 3133 | error = vmcs_read32(IDT_VECTORING_ERROR_CODE); |
2880 | 3134 | kvm_queue_exception_e(&vmx->vcpu, vector, error); | |
2881 | if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK)) | 3135 | } else |
2882 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, | 3136 | kvm_queue_exception(&vmx->vcpu, vector); |
2883 | vmcs_read32(IDT_VECTORING_ERROR_CODE)); | 3137 | vmx->idt_vectoring_info = 0; |
2884 | enable_intr_window(vcpu); | ||
2885 | return; | ||
2886 | } | 3138 | } |
3139 | kvm_clear_interrupt_queue(&vmx->vcpu); | ||
3140 | if (idtv_info_valid && type == INTR_TYPE_EXT_INTR) { | ||
3141 | kvm_queue_interrupt(&vmx->vcpu, vector); | ||
3142 | vmx->idt_vectoring_info = 0; | ||
3143 | } | ||
3144 | } | ||
3145 | |||
3146 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) | ||
3147 | { | ||
3148 | update_tpr_threshold(vcpu); | ||
3149 | |||
2887 | if (cpu_has_virtual_nmis()) { | 3150 | if (cpu_has_virtual_nmis()) { |
2888 | /* | 3151 | if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { |
2889 | * SDM 3: 25.7.1.2 | 3152 | if (vmx_nmi_enabled(vcpu)) { |
2890 | * Re-set bit "block by NMI" before VM entry if vmexit caused by | 3153 | vcpu->arch.nmi_pending = false; |
2891 | * a guest IRET fault. | 3154 | vcpu->arch.nmi_injected = true; |
2892 | */ | 3155 | } else { |
2893 | if ((exit_intr_info_field & INTR_INFO_UNBLOCK_NMI) && | 3156 | enable_intr_window(vcpu); |
2894 | (exit_intr_info_field & INTR_INFO_VECTOR_MASK) != 8) | 3157 | return; |
2895 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | 3158 | } |
2896 | vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) | | 3159 | } |
2897 | GUEST_INTR_STATE_NMI); | 3160 | if (vcpu->arch.nmi_injected) { |
2898 | else if (vcpu->arch.nmi_pending) { | 3161 | vmx_inject_nmi(vcpu); |
2899 | if (vmx_nmi_enabled(vcpu)) | ||
2900 | vmx_inject_nmi(vcpu); | ||
2901 | enable_intr_window(vcpu); | 3162 | enable_intr_window(vcpu); |
2902 | return; | 3163 | return; |
2903 | } | 3164 | } |
2904 | |||
2905 | } | 3165 | } |
2906 | if (!kvm_cpu_has_interrupt(vcpu)) | 3166 | if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { |
2907 | return; | 3167 | if (vmx_irq_enabled(vcpu)) |
2908 | if (vmx_irq_enabled(vcpu)) { | 3168 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); |
2909 | vector = kvm_cpu_get_interrupt(vcpu); | 3169 | else |
2910 | vmx_inject_irq(vcpu, vector); | 3170 | enable_irq_window(vcpu); |
2911 | kvm_timer_intr_post(vcpu, vector); | 3171 | } |
2912 | } else | 3172 | if (vcpu->arch.interrupt.pending) { |
2913 | enable_irq_window(vcpu); | 3173 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); |
3174 | kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr); | ||
3175 | } | ||
2914 | } | 3176 | } |
2915 | 3177 | ||
2916 | /* | 3178 | /* |
@@ -2922,9 +3184,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
2922 | static void fixup_rmode_irq(struct vcpu_vmx *vmx) | 3184 | static void fixup_rmode_irq(struct vcpu_vmx *vmx) |
2923 | { | 3185 | { |
2924 | vmx->rmode.irq.pending = 0; | 3186 | vmx->rmode.irq.pending = 0; |
2925 | if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip) | 3187 | if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip) |
2926 | return; | 3188 | return; |
2927 | vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip); | 3189 | kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip); |
2928 | if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { | 3190 | if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { |
2929 | vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; | 3191 | vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; |
2930 | vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; | 3192 | vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; |
@@ -2936,11 +3198,30 @@ static void fixup_rmode_irq(struct vcpu_vmx *vmx) | |||
2936 | | vmx->rmode.irq.vector; | 3198 | | vmx->rmode.irq.vector; |
2937 | } | 3199 | } |
2938 | 3200 | ||
3201 | #ifdef CONFIG_X86_64 | ||
3202 | #define R "r" | ||
3203 | #define Q "q" | ||
3204 | #else | ||
3205 | #define R "e" | ||
3206 | #define Q "l" | ||
3207 | #endif | ||
3208 | |||
2939 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3209 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2940 | { | 3210 | { |
2941 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3211 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2942 | u32 intr_info; | 3212 | u32 intr_info; |
2943 | 3213 | ||
3214 | /* Handle invalid guest state instead of entering VMX */ | ||
3215 | if (vmx->emulation_required && emulate_invalid_guest_state) { | ||
3216 | handle_invalid_guest_state(vcpu, kvm_run); | ||
3217 | return; | ||
3218 | } | ||
3219 | |||
3220 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) | ||
3221 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | ||
3222 | if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) | ||
3223 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); | ||
3224 | |||
2944 | /* | 3225 | /* |
2945 | * Loading guest fpu may have cleared host cr0.ts | 3226 | * Loading guest fpu may have cleared host cr0.ts |
2946 | */ | 3227 | */ |
@@ -2948,26 +3229,25 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2948 | 3229 | ||
2949 | asm( | 3230 | asm( |
2950 | /* Store host registers */ | 3231 | /* Store host registers */ |
2951 | #ifdef CONFIG_X86_64 | 3232 | "push %%"R"dx; push %%"R"bp;" |
2952 | "push %%rdx; push %%rbp;" | 3233 | "push %%"R"cx \n\t" |
2953 | "push %%rcx \n\t" | 3234 | "cmp %%"R"sp, %c[host_rsp](%0) \n\t" |
2954 | #else | 3235 | "je 1f \n\t" |
2955 | "push %%edx; push %%ebp;" | 3236 | "mov %%"R"sp, %c[host_rsp](%0) \n\t" |
2956 | "push %%ecx \n\t" | ||
2957 | #endif | ||
2958 | __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" | 3237 | __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" |
3238 | "1: \n\t" | ||
2959 | /* Check if vmlaunch of vmresume is needed */ | 3239 | /* Check if vmlaunch of vmresume is needed */ |
2960 | "cmpl $0, %c[launched](%0) \n\t" | 3240 | "cmpl $0, %c[launched](%0) \n\t" |
2961 | /* Load guest registers. Don't clobber flags. */ | 3241 | /* Load guest registers. Don't clobber flags. */ |
3242 | "mov %c[cr2](%0), %%"R"ax \n\t" | ||
3243 | "mov %%"R"ax, %%cr2 \n\t" | ||
3244 | "mov %c[rax](%0), %%"R"ax \n\t" | ||
3245 | "mov %c[rbx](%0), %%"R"bx \n\t" | ||
3246 | "mov %c[rdx](%0), %%"R"dx \n\t" | ||
3247 | "mov %c[rsi](%0), %%"R"si \n\t" | ||
3248 | "mov %c[rdi](%0), %%"R"di \n\t" | ||
3249 | "mov %c[rbp](%0), %%"R"bp \n\t" | ||
2962 | #ifdef CONFIG_X86_64 | 3250 | #ifdef CONFIG_X86_64 |
2963 | "mov %c[cr2](%0), %%rax \n\t" | ||
2964 | "mov %%rax, %%cr2 \n\t" | ||
2965 | "mov %c[rax](%0), %%rax \n\t" | ||
2966 | "mov %c[rbx](%0), %%rbx \n\t" | ||
2967 | "mov %c[rdx](%0), %%rdx \n\t" | ||
2968 | "mov %c[rsi](%0), %%rsi \n\t" | ||
2969 | "mov %c[rdi](%0), %%rdi \n\t" | ||
2970 | "mov %c[rbp](%0), %%rbp \n\t" | ||
2971 | "mov %c[r8](%0), %%r8 \n\t" | 3251 | "mov %c[r8](%0), %%r8 \n\t" |
2972 | "mov %c[r9](%0), %%r9 \n\t" | 3252 | "mov %c[r9](%0), %%r9 \n\t" |
2973 | "mov %c[r10](%0), %%r10 \n\t" | 3253 | "mov %c[r10](%0), %%r10 \n\t" |
@@ -2976,18 +3256,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2976 | "mov %c[r13](%0), %%r13 \n\t" | 3256 | "mov %c[r13](%0), %%r13 \n\t" |
2977 | "mov %c[r14](%0), %%r14 \n\t" | 3257 | "mov %c[r14](%0), %%r14 \n\t" |
2978 | "mov %c[r15](%0), %%r15 \n\t" | 3258 | "mov %c[r15](%0), %%r15 \n\t" |
2979 | "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */ | ||
2980 | #else | ||
2981 | "mov %c[cr2](%0), %%eax \n\t" | ||
2982 | "mov %%eax, %%cr2 \n\t" | ||
2983 | "mov %c[rax](%0), %%eax \n\t" | ||
2984 | "mov %c[rbx](%0), %%ebx \n\t" | ||
2985 | "mov %c[rdx](%0), %%edx \n\t" | ||
2986 | "mov %c[rsi](%0), %%esi \n\t" | ||
2987 | "mov %c[rdi](%0), %%edi \n\t" | ||
2988 | "mov %c[rbp](%0), %%ebp \n\t" | ||
2989 | "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */ | ||
2990 | #endif | 3259 | #endif |
3260 | "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */ | ||
3261 | |||
2991 | /* Enter guest mode */ | 3262 | /* Enter guest mode */ |
2992 | "jne .Llaunched \n\t" | 3263 | "jne .Llaunched \n\t" |
2993 | __ex(ASM_VMX_VMLAUNCH) "\n\t" | 3264 | __ex(ASM_VMX_VMLAUNCH) "\n\t" |
@@ -2995,15 +3266,15 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2995 | ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" | 3266 | ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" |
2996 | ".Lkvm_vmx_return: " | 3267 | ".Lkvm_vmx_return: " |
2997 | /* Save guest registers, load host registers, keep flags */ | 3268 | /* Save guest registers, load host registers, keep flags */ |
3269 | "xchg %0, (%%"R"sp) \n\t" | ||
3270 | "mov %%"R"ax, %c[rax](%0) \n\t" | ||
3271 | "mov %%"R"bx, %c[rbx](%0) \n\t" | ||
3272 | "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t" | ||
3273 | "mov %%"R"dx, %c[rdx](%0) \n\t" | ||
3274 | "mov %%"R"si, %c[rsi](%0) \n\t" | ||
3275 | "mov %%"R"di, %c[rdi](%0) \n\t" | ||
3276 | "mov %%"R"bp, %c[rbp](%0) \n\t" | ||
2998 | #ifdef CONFIG_X86_64 | 3277 | #ifdef CONFIG_X86_64 |
2999 | "xchg %0, (%%rsp) \n\t" | ||
3000 | "mov %%rax, %c[rax](%0) \n\t" | ||
3001 | "mov %%rbx, %c[rbx](%0) \n\t" | ||
3002 | "pushq (%%rsp); popq %c[rcx](%0) \n\t" | ||
3003 | "mov %%rdx, %c[rdx](%0) \n\t" | ||
3004 | "mov %%rsi, %c[rsi](%0) \n\t" | ||
3005 | "mov %%rdi, %c[rdi](%0) \n\t" | ||
3006 | "mov %%rbp, %c[rbp](%0) \n\t" | ||
3007 | "mov %%r8, %c[r8](%0) \n\t" | 3278 | "mov %%r8, %c[r8](%0) \n\t" |
3008 | "mov %%r9, %c[r9](%0) \n\t" | 3279 | "mov %%r9, %c[r9](%0) \n\t" |
3009 | "mov %%r10, %c[r10](%0) \n\t" | 3280 | "mov %%r10, %c[r10](%0) \n\t" |
@@ -3012,28 +3283,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3012 | "mov %%r13, %c[r13](%0) \n\t" | 3283 | "mov %%r13, %c[r13](%0) \n\t" |
3013 | "mov %%r14, %c[r14](%0) \n\t" | 3284 | "mov %%r14, %c[r14](%0) \n\t" |
3014 | "mov %%r15, %c[r15](%0) \n\t" | 3285 | "mov %%r15, %c[r15](%0) \n\t" |
3015 | "mov %%cr2, %%rax \n\t" | ||
3016 | "mov %%rax, %c[cr2](%0) \n\t" | ||
3017 | |||
3018 | "pop %%rbp; pop %%rbp; pop %%rdx \n\t" | ||
3019 | #else | ||
3020 | "xchg %0, (%%esp) \n\t" | ||
3021 | "mov %%eax, %c[rax](%0) \n\t" | ||
3022 | "mov %%ebx, %c[rbx](%0) \n\t" | ||
3023 | "pushl (%%esp); popl %c[rcx](%0) \n\t" | ||
3024 | "mov %%edx, %c[rdx](%0) \n\t" | ||
3025 | "mov %%esi, %c[rsi](%0) \n\t" | ||
3026 | "mov %%edi, %c[rdi](%0) \n\t" | ||
3027 | "mov %%ebp, %c[rbp](%0) \n\t" | ||
3028 | "mov %%cr2, %%eax \n\t" | ||
3029 | "mov %%eax, %c[cr2](%0) \n\t" | ||
3030 | |||
3031 | "pop %%ebp; pop %%ebp; pop %%edx \n\t" | ||
3032 | #endif | 3286 | #endif |
3287 | "mov %%cr2, %%"R"ax \n\t" | ||
3288 | "mov %%"R"ax, %c[cr2](%0) \n\t" | ||
3289 | |||
3290 | "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t" | ||
3033 | "setbe %c[fail](%0) \n\t" | 3291 | "setbe %c[fail](%0) \n\t" |
3034 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), | 3292 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), |
3035 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), | 3293 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), |
3036 | [fail]"i"(offsetof(struct vcpu_vmx, fail)), | 3294 | [fail]"i"(offsetof(struct vcpu_vmx, fail)), |
3295 | [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), | ||
3037 | [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), | 3296 | [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), |
3038 | [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), | 3297 | [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), |
3039 | [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), | 3298 | [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), |
@@ -3053,14 +3312,15 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3053 | #endif | 3312 | #endif |
3054 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) | 3313 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) |
3055 | : "cc", "memory" | 3314 | : "cc", "memory" |
3315 | , R"bx", R"di", R"si" | ||
3056 | #ifdef CONFIG_X86_64 | 3316 | #ifdef CONFIG_X86_64 |
3057 | , "rbx", "rdi", "rsi" | ||
3058 | , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" | 3317 | , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" |
3059 | #else | ||
3060 | , "ebx", "edi", "rsi" | ||
3061 | #endif | 3318 | #endif |
3062 | ); | 3319 | ); |
3063 | 3320 | ||
3321 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); | ||
3322 | vcpu->arch.regs_dirty = 0; | ||
3323 | |||
3064 | vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); | 3324 | vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); |
3065 | if (vmx->rmode.irq.pending) | 3325 | if (vmx->rmode.irq.pending) |
3066 | fixup_rmode_irq(vmx); | 3326 | fixup_rmode_irq(vmx); |
@@ -3080,8 +3340,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3080 | KVMTRACE_0D(NMI, vcpu, handler); | 3340 | KVMTRACE_0D(NMI, vcpu, handler); |
3081 | asm("int $2"); | 3341 | asm("int $2"); |
3082 | } | 3342 | } |
3343 | |||
3344 | vmx_complete_interrupts(vmx); | ||
3083 | } | 3345 | } |
3084 | 3346 | ||
3347 | #undef R | ||
3348 | #undef Q | ||
3349 | |||
3085 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | 3350 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) |
3086 | { | 3351 | { |
3087 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3352 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
@@ -3224,8 +3489,7 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3224 | .set_idt = vmx_set_idt, | 3489 | .set_idt = vmx_set_idt, |
3225 | .get_gdt = vmx_get_gdt, | 3490 | .get_gdt = vmx_get_gdt, |
3226 | .set_gdt = vmx_set_gdt, | 3491 | .set_gdt = vmx_set_gdt, |
3227 | .cache_regs = vcpu_load_rsp_rip, | 3492 | .cache_reg = vmx_cache_reg, |
3228 | .decache_regs = vcpu_put_rsp_rip, | ||
3229 | .get_rflags = vmx_get_rflags, | 3493 | .get_rflags = vmx_get_rflags, |
3230 | .set_rflags = vmx_set_rflags, | 3494 | .set_rflags = vmx_set_rflags, |
3231 | 3495 | ||