diff options
| author | Paolo Bonzini <pbonzini@redhat.com> | 2018-03-27 16:46:11 -0400 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-04-16 11:50:23 -0400 |
| commit | d5edb7f8e7ab9fd5fd54a77d957b1733f117a813 (patch) | |
| tree | b402239dcce86b8d49d27b9010b165f2de88af46 | |
| parent | dd259935e4eec844dc3e5b8a7cd951cd658b4fb6 (diff) | |
kvm: selftests: add vmx_tsc_adjust_test
The test checks the behavior of setting MSR_IA32_TSC in a nested guest,
and the TSC_OFFSET VMCS field in general. It also introduces the testing
infrastructure for Intel nested virtualization.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| -rw-r--r-- | tools/testing/selftests/kvm/Makefile | 3 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/include/kvm_util.h | 15 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/include/vmx.h | 494 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/kvm_util.c | 18 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/vmx.c | 243 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/vmx_tsc_adjust_test.c | 231 |
6 files changed, 991 insertions, 13 deletions
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 1da541e1ab75..2ddcc96ae456 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile | |||
| @@ -4,10 +4,11 @@ top_srcdir = ../../../../ | |||
| 4 | UNAME_M := $(shell uname -m) | 4 | UNAME_M := $(shell uname -m) |
| 5 | 5 | ||
| 6 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c | 6 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c |
| 7 | LIBKVM_x86_64 = lib/x86.c | 7 | LIBKVM_x86_64 = lib/x86.c lib/vmx.c |
| 8 | 8 | ||
| 9 | TEST_GEN_PROGS_x86_64 = set_sregs_test | 9 | TEST_GEN_PROGS_x86_64 = set_sregs_test |
| 10 | TEST_GEN_PROGS_x86_64 += sync_regs_test | 10 | TEST_GEN_PROGS_x86_64 += sync_regs_test |
| 11 | TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test | ||
| 11 | 12 | ||
| 12 | TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) | 13 | TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) |
| 13 | LIBKVM += $(LIBKVM_$(UNAME_M)) | 14 | LIBKVM += $(LIBKVM_$(UNAME_M)) |
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 57974ad46373..637b7017b6ee 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h | |||
| @@ -112,24 +112,27 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, | |||
| 112 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, | 112 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, |
| 113 | vm_paddr_t paddr_min, uint32_t memslot); | 113 | vm_paddr_t paddr_min, uint32_t memslot); |
| 114 | 114 | ||
| 115 | void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid); | 115 | struct kvm_cpuid2 *kvm_get_supported_cpuid(void); |
| 116 | void vcpu_set_cpuid( | 116 | void vcpu_set_cpuid( |
| 117 | struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); | 117 | struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); |
| 118 | 118 | ||
| 119 | struct kvm_cpuid2 *allocate_kvm_cpuid2(void); | ||
| 120 | struct kvm_cpuid_entry2 * | 119 | struct kvm_cpuid_entry2 * |
| 121 | find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, | 120 | kvm_get_supported_cpuid_index(uint32_t function, uint32_t index); |
| 122 | uint32_t index); | ||
| 123 | 121 | ||
| 124 | static inline struct kvm_cpuid_entry2 * | 122 | static inline struct kvm_cpuid_entry2 * |
| 125 | find_cpuid_entry(struct kvm_cpuid2 *cpuid, uint32_t function) | 123 | kvm_get_supported_cpuid_entry(uint32_t function) |
| 126 | { | 124 | { |
| 127 | return find_cpuid_index_entry(cpuid, function, 0); | 125 | return kvm_get_supported_cpuid_index(function, 0); |
| 128 | } | 126 | } |
| 129 | 127 | ||
| 130 | struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); | 128 | struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); |
| 131 | void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); | 129 | void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); |
| 132 | 130 | ||
| 131 | typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr, | ||
| 132 | vm_paddr_t vmxon_paddr, | ||
| 133 | vm_vaddr_t vmcs_vaddr, | ||
| 134 | vm_paddr_t vmcs_paddr); | ||
| 135 | |||
| 133 | struct kvm_userspace_memory_region * | 136 | struct kvm_userspace_memory_region * |
| 134 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, | 137 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, |
| 135 | uint64_t end); | 138 | uint64_t end); |
diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/vmx.h new file mode 100644 index 000000000000..6ed8499807fd --- /dev/null +++ b/tools/testing/selftests/kvm/include/vmx.h | |||
| @@ -0,0 +1,494 @@ | |||
| 1 | /* | ||
| 2 | * tools/testing/selftests/kvm/include/vmx.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2018, Google LLC. | ||
| 5 | * | ||
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef SELFTEST_KVM_VMX_H | ||
| 11 | #define SELFTEST_KVM_VMX_H | ||
| 12 | |||
| 13 | #include <stdint.h> | ||
| 14 | #include "x86.h" | ||
| 15 | |||
| 16 | #define CPUID_VMX_BIT 5 | ||
| 17 | |||
| 18 | #define CPUID_VMX (1 << 5) | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Definitions of Primary Processor-Based VM-Execution Controls. | ||
| 22 | */ | ||
| 23 | #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 | ||
| 24 | #define CPU_BASED_USE_TSC_OFFSETING 0x00000008 | ||
| 25 | #define CPU_BASED_HLT_EXITING 0x00000080 | ||
| 26 | #define CPU_BASED_INVLPG_EXITING 0x00000200 | ||
| 27 | #define CPU_BASED_MWAIT_EXITING 0x00000400 | ||
| 28 | #define CPU_BASED_RDPMC_EXITING 0x00000800 | ||
| 29 | #define CPU_BASED_RDTSC_EXITING 0x00001000 | ||
| 30 | #define CPU_BASED_CR3_LOAD_EXITING 0x00008000 | ||
| 31 | #define CPU_BASED_CR3_STORE_EXITING 0x00010000 | ||
| 32 | #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 | ||
| 33 | #define CPU_BASED_CR8_STORE_EXITING 0x00100000 | ||
| 34 | #define CPU_BASED_TPR_SHADOW 0x00200000 | ||
| 35 | #define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 | ||
| 36 | #define CPU_BASED_MOV_DR_EXITING 0x00800000 | ||
| 37 | #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 | ||
| 38 | #define CPU_BASED_USE_IO_BITMAPS 0x02000000 | ||
| 39 | #define CPU_BASED_MONITOR_TRAP 0x08000000 | ||
| 40 | #define CPU_BASED_USE_MSR_BITMAPS 0x10000000 | ||
| 41 | #define CPU_BASED_MONITOR_EXITING 0x20000000 | ||
| 42 | #define CPU_BASED_PAUSE_EXITING 0x40000000 | ||
| 43 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 | ||
| 44 | |||
| 45 | #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Definitions of Secondary Processor-Based VM-Execution Controls. | ||
| 49 | */ | ||
| 50 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 | ||
| 51 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 | ||
| 52 | #define SECONDARY_EXEC_DESC 0x00000004 | ||
| 53 | #define SECONDARY_EXEC_RDTSCP 0x00000008 | ||
| 54 | #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 | ||
| 55 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | ||
| 56 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | ||
| 57 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | ||
| 58 | #define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 | ||
| 59 | #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 | ||
| 60 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 | ||
| 61 | #define SECONDARY_EXEC_RDRAND_EXITING 0x00000800 | ||
| 62 | #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 | ||
| 63 | #define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 | ||
| 64 | #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 | ||
| 65 | #define SECONDARY_EXEC_RDSEED_EXITING 0x00010000 | ||
| 66 | #define SECONDARY_EXEC_ENABLE_PML 0x00020000 | ||
| 67 | #define SECONDARY_EPT_VE 0x00040000 | ||
| 68 | #define SECONDARY_ENABLE_XSAV_RESTORE 0x00100000 | ||
| 69 | #define SECONDARY_EXEC_TSC_SCALING 0x02000000 | ||
| 70 | |||
| 71 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | ||
| 72 | #define PIN_BASED_NMI_EXITING 0x00000008 | ||
| 73 | #define PIN_BASED_VIRTUAL_NMIS 0x00000020 | ||
| 74 | #define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 | ||
| 75 | #define PIN_BASED_POSTED_INTR 0x00000080 | ||
| 76 | |||
| 77 | #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 | ||
| 78 | |||
| 79 | #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 | ||
| 80 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 | ||
| 81 | #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 | ||
| 82 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 | ||
| 83 | #define VM_EXIT_SAVE_IA32_PAT 0x00040000 | ||
| 84 | #define VM_EXIT_LOAD_IA32_PAT 0x00080000 | ||
| 85 | #define VM_EXIT_SAVE_IA32_EFER 0x00100000 | ||
| 86 | #define VM_EXIT_LOAD_IA32_EFER 0x00200000 | ||
| 87 | #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 | ||
| 88 | |||
| 89 | #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff | ||
| 90 | |||
| 91 | #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 | ||
| 92 | #define VM_ENTRY_IA32E_MODE 0x00000200 | ||
| 93 | #define VM_ENTRY_SMM 0x00000400 | ||
| 94 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 | ||
| 95 | #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 | ||
| 96 | #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 | ||
| 97 | #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 | ||
| 98 | |||
| 99 | #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff | ||
| 100 | |||
| 101 | #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f | ||
| 102 | #define VMX_MISC_SAVE_EFER_LMA 0x00000020 | ||
| 103 | |||
| 104 | #define EXIT_REASON_FAILED_VMENTRY 0x80000000 | ||
| 105 | #define EXIT_REASON_EXCEPTION_NMI 0 | ||
| 106 | #define EXIT_REASON_EXTERNAL_INTERRUPT 1 | ||
| 107 | #define EXIT_REASON_TRIPLE_FAULT 2 | ||
| 108 | #define EXIT_REASON_PENDING_INTERRUPT 7 | ||
| 109 | #define EXIT_REASON_NMI_WINDOW 8 | ||
| 110 | #define EXIT_REASON_TASK_SWITCH 9 | ||
| 111 | #define EXIT_REASON_CPUID 10 | ||
| 112 | #define EXIT_REASON_HLT 12 | ||
| 113 | #define EXIT_REASON_INVD 13 | ||
| 114 | #define EXIT_REASON_INVLPG 14 | ||
| 115 | #define EXIT_REASON_RDPMC 15 | ||
| 116 | #define EXIT_REASON_RDTSC 16 | ||
| 117 | #define EXIT_REASON_VMCALL 18 | ||
| 118 | #define EXIT_REASON_VMCLEAR 19 | ||
| 119 | #define EXIT_REASON_VMLAUNCH 20 | ||
| 120 | #define EXIT_REASON_VMPTRLD 21 | ||
| 121 | #define EXIT_REASON_VMPTRST 22 | ||
| 122 | #define EXIT_REASON_VMREAD 23 | ||
| 123 | #define EXIT_REASON_VMRESUME 24 | ||
| 124 | #define EXIT_REASON_VMWRITE 25 | ||
| 125 | #define EXIT_REASON_VMOFF 26 | ||
| 126 | #define EXIT_REASON_VMON 27 | ||
| 127 | #define EXIT_REASON_CR_ACCESS 28 | ||
| 128 | #define EXIT_REASON_DR_ACCESS 29 | ||
| 129 | #define EXIT_REASON_IO_INSTRUCTION 30 | ||
| 130 | #define EXIT_REASON_MSR_READ 31 | ||
| 131 | #define EXIT_REASON_MSR_WRITE 32 | ||
| 132 | #define EXIT_REASON_INVALID_STATE 33 | ||
| 133 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | ||
| 134 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | ||
| 135 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | ||
| 136 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | ||
| 137 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | ||
| 138 | #define EXIT_REASON_APIC_ACCESS 44 | ||
| 139 | #define EXIT_REASON_EOI_INDUCED 45 | ||
| 140 | #define EXIT_REASON_EPT_VIOLATION 48 | ||
| 141 | #define EXIT_REASON_EPT_MISCONFIG 49 | ||
| 142 | #define EXIT_REASON_INVEPT 50 | ||
| 143 | #define EXIT_REASON_RDTSCP 51 | ||
| 144 | #define EXIT_REASON_PREEMPTION_TIMER 52 | ||
| 145 | #define EXIT_REASON_INVVPID 53 | ||
| 146 | #define EXIT_REASON_WBINVD 54 | ||
| 147 | #define EXIT_REASON_XSETBV 55 | ||
| 148 | #define EXIT_REASON_APIC_WRITE 56 | ||
| 149 | #define EXIT_REASON_INVPCID 58 | ||
| 150 | #define EXIT_REASON_PML_FULL 62 | ||
| 151 | #define EXIT_REASON_XSAVES 63 | ||
| 152 | #define EXIT_REASON_XRSTORS 64 | ||
| 153 | #define LAST_EXIT_REASON 64 | ||
| 154 | |||
| 155 | enum vmcs_field { | ||
| 156 | VIRTUAL_PROCESSOR_ID = 0x00000000, | ||
| 157 | POSTED_INTR_NV = 0x00000002, | ||
| 158 | GUEST_ES_SELECTOR = 0x00000800, | ||
| 159 | GUEST_CS_SELECTOR = 0x00000802, | ||
| 160 | GUEST_SS_SELECTOR = 0x00000804, | ||
| 161 | GUEST_DS_SELECTOR = 0x00000806, | ||
| 162 | GUEST_FS_SELECTOR = 0x00000808, | ||
| 163 | GUEST_GS_SELECTOR = 0x0000080a, | ||
| 164 | GUEST_LDTR_SELECTOR = 0x0000080c, | ||
| 165 | GUEST_TR_SELECTOR = 0x0000080e, | ||
| 166 | GUEST_INTR_STATUS = 0x00000810, | ||
| 167 | GUEST_PML_INDEX = 0x00000812, | ||
| 168 | HOST_ES_SELECTOR = 0x00000c00, | ||
| 169 | HOST_CS_SELECTOR = 0x00000c02, | ||
| 170 | HOST_SS_SELECTOR = 0x00000c04, | ||
| 171 | HOST_DS_SELECTOR = 0x00000c06, | ||
| 172 | HOST_FS_SELECTOR = 0x00000c08, | ||
| 173 | HOST_GS_SELECTOR = 0x00000c0a, | ||
| 174 | HOST_TR_SELECTOR = 0x00000c0c, | ||
| 175 | IO_BITMAP_A = 0x00002000, | ||
| 176 | IO_BITMAP_A_HIGH = 0x00002001, | ||
| 177 | IO_BITMAP_B = 0x00002002, | ||
| 178 | IO_BITMAP_B_HIGH = 0x00002003, | ||
| 179 | MSR_BITMAP = 0x00002004, | ||
| 180 | MSR_BITMAP_HIGH = 0x00002005, | ||
| 181 | VM_EXIT_MSR_STORE_ADDR = 0x00002006, | ||
| 182 | VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, | ||
| 183 | VM_EXIT_MSR_LOAD_ADDR = 0x00002008, | ||
| 184 | VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, | ||
| 185 | VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, | ||
| 186 | VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, | ||
| 187 | PML_ADDRESS = 0x0000200e, | ||
| 188 | PML_ADDRESS_HIGH = 0x0000200f, | ||
| 189 | TSC_OFFSET = 0x00002010, | ||
| 190 | TSC_OFFSET_HIGH = 0x00002011, | ||
| 191 | VIRTUAL_APIC_PAGE_ADDR = 0x00002012, | ||
| 192 | VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, | ||
| 193 | APIC_ACCESS_ADDR = 0x00002014, | ||
| 194 | APIC_ACCESS_ADDR_HIGH = 0x00002015, | ||
| 195 | POSTED_INTR_DESC_ADDR = 0x00002016, | ||
| 196 | POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, | ||
| 197 | EPT_POINTER = 0x0000201a, | ||
| 198 | EPT_POINTER_HIGH = 0x0000201b, | ||
| 199 | EOI_EXIT_BITMAP0 = 0x0000201c, | ||
| 200 | EOI_EXIT_BITMAP0_HIGH = 0x0000201d, | ||
| 201 | EOI_EXIT_BITMAP1 = 0x0000201e, | ||
| 202 | EOI_EXIT_BITMAP1_HIGH = 0x0000201f, | ||
| 203 | EOI_EXIT_BITMAP2 = 0x00002020, | ||
| 204 | EOI_EXIT_BITMAP2_HIGH = 0x00002021, | ||
| 205 | EOI_EXIT_BITMAP3 = 0x00002022, | ||
| 206 | EOI_EXIT_BITMAP3_HIGH = 0x00002023, | ||
| 207 | VMREAD_BITMAP = 0x00002026, | ||
| 208 | VMREAD_BITMAP_HIGH = 0x00002027, | ||
| 209 | VMWRITE_BITMAP = 0x00002028, | ||
| 210 | VMWRITE_BITMAP_HIGH = 0x00002029, | ||
| 211 | XSS_EXIT_BITMAP = 0x0000202C, | ||
| 212 | XSS_EXIT_BITMAP_HIGH = 0x0000202D, | ||
| 213 | TSC_MULTIPLIER = 0x00002032, | ||
| 214 | TSC_MULTIPLIER_HIGH = 0x00002033, | ||
| 215 | GUEST_PHYSICAL_ADDRESS = 0x00002400, | ||
| 216 | GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, | ||
| 217 | VMCS_LINK_POINTER = 0x00002800, | ||
| 218 | VMCS_LINK_POINTER_HIGH = 0x00002801, | ||
| 219 | GUEST_IA32_DEBUGCTL = 0x00002802, | ||
| 220 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, | ||
| 221 | GUEST_IA32_PAT = 0x00002804, | ||
| 222 | GUEST_IA32_PAT_HIGH = 0x00002805, | ||
| 223 | GUEST_IA32_EFER = 0x00002806, | ||
| 224 | GUEST_IA32_EFER_HIGH = 0x00002807, | ||
| 225 | GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, | ||
| 226 | GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809, | ||
| 227 | GUEST_PDPTR0 = 0x0000280a, | ||
| 228 | GUEST_PDPTR0_HIGH = 0x0000280b, | ||
| 229 | GUEST_PDPTR1 = 0x0000280c, | ||
| 230 | GUEST_PDPTR1_HIGH = 0x0000280d, | ||
| 231 | GUEST_PDPTR2 = 0x0000280e, | ||
| 232 | GUEST_PDPTR2_HIGH = 0x0000280f, | ||
| 233 | GUEST_PDPTR3 = 0x00002810, | ||
| 234 | GUEST_PDPTR3_HIGH = 0x00002811, | ||
| 235 | GUEST_BNDCFGS = 0x00002812, | ||
| 236 | GUEST_BNDCFGS_HIGH = 0x00002813, | ||
| 237 | HOST_IA32_PAT = 0x00002c00, | ||
| 238 | HOST_IA32_PAT_HIGH = 0x00002c01, | ||
| 239 | HOST_IA32_EFER = 0x00002c02, | ||
| 240 | HOST_IA32_EFER_HIGH = 0x00002c03, | ||
| 241 | HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, | ||
| 242 | HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05, | ||
| 243 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, | ||
| 244 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, | ||
| 245 | EXCEPTION_BITMAP = 0x00004004, | ||
| 246 | PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, | ||
| 247 | PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, | ||
| 248 | CR3_TARGET_COUNT = 0x0000400a, | ||
| 249 | VM_EXIT_CONTROLS = 0x0000400c, | ||
| 250 | VM_EXIT_MSR_STORE_COUNT = 0x0000400e, | ||
| 251 | VM_EXIT_MSR_LOAD_COUNT = 0x00004010, | ||
| 252 | VM_ENTRY_CONTROLS = 0x00004012, | ||
| 253 | VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, | ||
| 254 | VM_ENTRY_INTR_INFO_FIELD = 0x00004016, | ||
| 255 | VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, | ||
| 256 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, | ||
| 257 | TPR_THRESHOLD = 0x0000401c, | ||
| 258 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, | ||
| 259 | PLE_GAP = 0x00004020, | ||
| 260 | PLE_WINDOW = 0x00004022, | ||
| 261 | VM_INSTRUCTION_ERROR = 0x00004400, | ||
| 262 | VM_EXIT_REASON = 0x00004402, | ||
| 263 | VM_EXIT_INTR_INFO = 0x00004404, | ||
| 264 | VM_EXIT_INTR_ERROR_CODE = 0x00004406, | ||
| 265 | IDT_VECTORING_INFO_FIELD = 0x00004408, | ||
| 266 | IDT_VECTORING_ERROR_CODE = 0x0000440a, | ||
| 267 | VM_EXIT_INSTRUCTION_LEN = 0x0000440c, | ||
| 268 | VMX_INSTRUCTION_INFO = 0x0000440e, | ||
| 269 | GUEST_ES_LIMIT = 0x00004800, | ||
| 270 | GUEST_CS_LIMIT = 0x00004802, | ||
| 271 | GUEST_SS_LIMIT = 0x00004804, | ||
| 272 | GUEST_DS_LIMIT = 0x00004806, | ||
| 273 | GUEST_FS_LIMIT = 0x00004808, | ||
| 274 | GUEST_GS_LIMIT = 0x0000480a, | ||
| 275 | GUEST_LDTR_LIMIT = 0x0000480c, | ||
| 276 | GUEST_TR_LIMIT = 0x0000480e, | ||
| 277 | GUEST_GDTR_LIMIT = 0x00004810, | ||
| 278 | GUEST_IDTR_LIMIT = 0x00004812, | ||
| 279 | GUEST_ES_AR_BYTES = 0x00004814, | ||
| 280 | GUEST_CS_AR_BYTES = 0x00004816, | ||
| 281 | GUEST_SS_AR_BYTES = 0x00004818, | ||
| 282 | GUEST_DS_AR_BYTES = 0x0000481a, | ||
| 283 | GUEST_FS_AR_BYTES = 0x0000481c, | ||
| 284 | GUEST_GS_AR_BYTES = 0x0000481e, | ||
| 285 | GUEST_LDTR_AR_BYTES = 0x00004820, | ||
| 286 | GUEST_TR_AR_BYTES = 0x00004822, | ||
| 287 | GUEST_INTERRUPTIBILITY_INFO = 0x00004824, | ||
| 288 | GUEST_ACTIVITY_STATE = 0X00004826, | ||
| 289 | GUEST_SYSENTER_CS = 0x0000482A, | ||
| 290 | VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, | ||
| 291 | HOST_IA32_SYSENTER_CS = 0x00004c00, | ||
| 292 | CR0_GUEST_HOST_MASK = 0x00006000, | ||
| 293 | CR4_GUEST_HOST_MASK = 0x00006002, | ||
| 294 | CR0_READ_SHADOW = 0x00006004, | ||
| 295 | CR4_READ_SHADOW = 0x00006006, | ||
| 296 | CR3_TARGET_VALUE0 = 0x00006008, | ||
| 297 | CR3_TARGET_VALUE1 = 0x0000600a, | ||
| 298 | CR3_TARGET_VALUE2 = 0x0000600c, | ||
| 299 | CR3_TARGET_VALUE3 = 0x0000600e, | ||
| 300 | EXIT_QUALIFICATION = 0x00006400, | ||
| 301 | GUEST_LINEAR_ADDRESS = 0x0000640a, | ||
| 302 | GUEST_CR0 = 0x00006800, | ||
| 303 | GUEST_CR3 = 0x00006802, | ||
| 304 | GUEST_CR4 = 0x00006804, | ||
| 305 | GUEST_ES_BASE = 0x00006806, | ||
| 306 | GUEST_CS_BASE = 0x00006808, | ||
| 307 | GUEST_SS_BASE = 0x0000680a, | ||
| 308 | GUEST_DS_BASE = 0x0000680c, | ||
| 309 | GUEST_FS_BASE = 0x0000680e, | ||
| 310 | GUEST_GS_BASE = 0x00006810, | ||
| 311 | GUEST_LDTR_BASE = 0x00006812, | ||
| 312 | GUEST_TR_BASE = 0x00006814, | ||
| 313 | GUEST_GDTR_BASE = 0x00006816, | ||
| 314 | GUEST_IDTR_BASE = 0x00006818, | ||
| 315 | GUEST_DR7 = 0x0000681a, | ||
| 316 | GUEST_RSP = 0x0000681c, | ||
| 317 | GUEST_RIP = 0x0000681e, | ||
| 318 | GUEST_RFLAGS = 0x00006820, | ||
| 319 | GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, | ||
| 320 | GUEST_SYSENTER_ESP = 0x00006824, | ||
| 321 | GUEST_SYSENTER_EIP = 0x00006826, | ||
| 322 | HOST_CR0 = 0x00006c00, | ||
| 323 | HOST_CR3 = 0x00006c02, | ||
| 324 | HOST_CR4 = 0x00006c04, | ||
| 325 | HOST_FS_BASE = 0x00006c06, | ||
| 326 | HOST_GS_BASE = 0x00006c08, | ||
| 327 | HOST_TR_BASE = 0x00006c0a, | ||
| 328 | HOST_GDTR_BASE = 0x00006c0c, | ||
| 329 | HOST_IDTR_BASE = 0x00006c0e, | ||
| 330 | HOST_IA32_SYSENTER_ESP = 0x00006c10, | ||
| 331 | HOST_IA32_SYSENTER_EIP = 0x00006c12, | ||
| 332 | HOST_RSP = 0x00006c14, | ||
| 333 | HOST_RIP = 0x00006c16, | ||
| 334 | }; | ||
| 335 | |||
| 336 | struct vmx_msr_entry { | ||
| 337 | uint32_t index; | ||
| 338 | uint32_t reserved; | ||
| 339 | uint64_t value; | ||
| 340 | } __attribute__ ((aligned(16))); | ||
| 341 | |||
| 342 | static inline int vmxon(uint64_t phys) | ||
| 343 | { | ||
| 344 | uint8_t ret; | ||
| 345 | |||
| 346 | __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" | ||
| 347 | : [ret]"=rm"(ret) | ||
| 348 | : [pa]"m"(phys) | ||
| 349 | : "cc", "memory"); | ||
| 350 | |||
| 351 | return ret; | ||
| 352 | } | ||
| 353 | |||
| 354 | static inline void vmxoff(void) | ||
| 355 | { | ||
| 356 | __asm__ __volatile__("vmxoff"); | ||
| 357 | } | ||
| 358 | |||
| 359 | static inline int vmclear(uint64_t vmcs_pa) | ||
| 360 | { | ||
| 361 | uint8_t ret; | ||
| 362 | |||
| 363 | __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" | ||
| 364 | : [ret]"=rm"(ret) | ||
| 365 | : [pa]"m"(vmcs_pa) | ||
| 366 | : "cc", "memory"); | ||
| 367 | |||
| 368 | return ret; | ||
| 369 | } | ||
| 370 | |||
| 371 | static inline int vmptrld(uint64_t vmcs_pa) | ||
| 372 | { | ||
| 373 | uint8_t ret; | ||
| 374 | |||
| 375 | __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]" | ||
| 376 | : [ret]"=rm"(ret) | ||
| 377 | : [pa]"m"(vmcs_pa) | ||
| 378 | : "cc", "memory"); | ||
| 379 | |||
| 380 | return ret; | ||
| 381 | } | ||
| 382 | |||
| 383 | /* | ||
| 384 | * No guest state (e.g. GPRs) is established by this vmlaunch. | ||
| 385 | */ | ||
| 386 | static inline int vmlaunch(void) | ||
| 387 | { | ||
| 388 | int ret; | ||
| 389 | |||
| 390 | __asm__ __volatile__("push %%rbp;" | ||
| 391 | "push %%rcx;" | ||
| 392 | "push %%rdx;" | ||
| 393 | "push %%rsi;" | ||
| 394 | "push %%rdi;" | ||
| 395 | "push $0;" | ||
| 396 | "vmwrite %%rsp, %[host_rsp];" | ||
| 397 | "lea 1f(%%rip), %%rax;" | ||
| 398 | "vmwrite %%rax, %[host_rip];" | ||
| 399 | "vmlaunch;" | ||
| 400 | "incq (%%rsp);" | ||
| 401 | "1: pop %%rax;" | ||
| 402 | "pop %%rdi;" | ||
| 403 | "pop %%rsi;" | ||
| 404 | "pop %%rdx;" | ||
| 405 | "pop %%rcx;" | ||
| 406 | "pop %%rbp;" | ||
| 407 | : [ret]"=&a"(ret) | ||
| 408 | : [host_rsp]"r"((uint64_t)HOST_RSP), | ||
| 409 | [host_rip]"r"((uint64_t)HOST_RIP) | ||
| 410 | : "memory", "cc", "rbx", "r8", "r9", "r10", | ||
| 411 | "r11", "r12", "r13", "r14", "r15"); | ||
| 412 | return ret; | ||
| 413 | } | ||
| 414 | |||
| 415 | /* | ||
| 416 | * No guest state (e.g. GPRs) is established by this vmresume. | ||
| 417 | */ | ||
| 418 | static inline int vmresume(void) | ||
| 419 | { | ||
| 420 | int ret; | ||
| 421 | |||
| 422 | __asm__ __volatile__("push %%rbp;" | ||
| 423 | "push %%rcx;" | ||
| 424 | "push %%rdx;" | ||
| 425 | "push %%rsi;" | ||
| 426 | "push %%rdi;" | ||
| 427 | "push $0;" | ||
| 428 | "vmwrite %%rsp, %[host_rsp];" | ||
| 429 | "lea 1f(%%rip), %%rax;" | ||
| 430 | "vmwrite %%rax, %[host_rip];" | ||
| 431 | "vmresume;" | ||
| 432 | "incq (%%rsp);" | ||
| 433 | "1: pop %%rax;" | ||
| 434 | "pop %%rdi;" | ||
| 435 | "pop %%rsi;" | ||
| 436 | "pop %%rdx;" | ||
| 437 | "pop %%rcx;" | ||
| 438 | "pop %%rbp;" | ||
| 439 | : [ret]"=&a"(ret) | ||
| 440 | : [host_rsp]"r"((uint64_t)HOST_RSP), | ||
| 441 | [host_rip]"r"((uint64_t)HOST_RIP) | ||
| 442 | : "memory", "cc", "rbx", "r8", "r9", "r10", | ||
| 443 | "r11", "r12", "r13", "r14", "r15"); | ||
| 444 | return ret; | ||
| 445 | } | ||
| 446 | |||
| 447 | static inline int vmread(uint64_t encoding, uint64_t *value) | ||
| 448 | { | ||
| 449 | uint64_t tmp; | ||
| 450 | uint8_t ret; | ||
| 451 | |||
| 452 | __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]" | ||
| 453 | : [value]"=rm"(tmp), [ret]"=rm"(ret) | ||
| 454 | : [encoding]"r"(encoding) | ||
| 455 | : "cc", "memory"); | ||
| 456 | |||
| 457 | *value = tmp; | ||
| 458 | return ret; | ||
| 459 | } | ||
| 460 | |||
| 461 | /* | ||
| 462 | * A wrapper around vmread that ignores errors and returns zero if the | ||
| 463 | * vmread instruction fails. | ||
| 464 | */ | ||
| 465 | static inline uint64_t vmreadz(uint64_t encoding) | ||
| 466 | { | ||
| 467 | uint64_t value = 0; | ||
| 468 | vmread(encoding, &value); | ||
| 469 | return value; | ||
| 470 | } | ||
| 471 | |||
| 472 | static inline int vmwrite(uint64_t encoding, uint64_t value) | ||
| 473 | { | ||
| 474 | uint8_t ret; | ||
| 475 | |||
| 476 | __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]" | ||
| 477 | : [ret]"=rm"(ret) | ||
| 478 | : [value]"rm"(value), [encoding]"r"(encoding) | ||
| 479 | : "cc", "memory"); | ||
| 480 | |||
| 481 | return ret; | ||
| 482 | } | ||
| 483 | |||
| 484 | static inline uint32_t vmcs_revision(void) | ||
| 485 | { | ||
| 486 | return rdmsr(MSR_IA32_VMX_BASIC); | ||
| 487 | } | ||
| 488 | |||
| 489 | void prepare_for_vmx_operation(void); | ||
| 490 | void prepare_vmcs(void *guest_rip, void *guest_rsp); | ||
| 491 | struct kvm_vm *vm_create_default_vmx(uint32_t vcpuid, | ||
| 492 | vmx_guest_code_t guest_code); | ||
| 493 | |||
| 494 | #endif /* !SELFTEST_KVM_VMX_H */ | ||
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index e213d513dc61..2cedfda181d4 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
| @@ -378,7 +378,7 @@ int kvm_memcmp_hva_gva(void *hva, | |||
| 378 | * complicated. This function uses a reasonable default length for | 378 | * complicated. This function uses a reasonable default length for |
| 379 | * the array and performs the appropriate allocation. | 379 | * the array and performs the appropriate allocation. |
| 380 | */ | 380 | */ |
| 381 | struct kvm_cpuid2 *allocate_kvm_cpuid2(void) | 381 | static struct kvm_cpuid2 *allocate_kvm_cpuid2(void) |
| 382 | { | 382 | { |
| 383 | struct kvm_cpuid2 *cpuid; | 383 | struct kvm_cpuid2 *cpuid; |
| 384 | int nent = 100; | 384 | int nent = 100; |
| @@ -402,17 +402,21 @@ struct kvm_cpuid2 *allocate_kvm_cpuid2(void) | |||
| 402 | * Input Args: None | 402 | * Input Args: None |
| 403 | * | 403 | * |
| 404 | * Output Args: | 404 | * Output Args: |
| 405 | * cpuid - The supported KVM CPUID | ||
| 406 | * | 405 | * |
| 407 | * Return: void | 406 | * Return: The supported KVM CPUID |
| 408 | * | 407 | * |
| 409 | * Get the guest CPUID supported by KVM. | 408 | * Get the guest CPUID supported by KVM. |
| 410 | */ | 409 | */ |
| 411 | void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | 410 | struct kvm_cpuid2 *kvm_get_supported_cpuid(void) |
| 412 | { | 411 | { |
| 412 | static struct kvm_cpuid2 *cpuid; | ||
| 413 | int ret; | 413 | int ret; |
| 414 | int kvm_fd; | 414 | int kvm_fd; |
| 415 | 415 | ||
| 416 | if (cpuid) | ||
| 417 | return cpuid; | ||
| 418 | |||
| 419 | cpuid = allocate_kvm_cpuid2(); | ||
| 416 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | 420 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); |
| 417 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 421 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", |
| 418 | KVM_DEV_PATH, kvm_fd, errno); | 422 | KVM_DEV_PATH, kvm_fd, errno); |
| @@ -422,6 +426,7 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | |||
| 422 | ret, errno); | 426 | ret, errno); |
| 423 | 427 | ||
| 424 | close(kvm_fd); | 428 | close(kvm_fd); |
| 429 | return cpuid; | ||
| 425 | } | 430 | } |
| 426 | 431 | ||
| 427 | /* Locate a cpuid entry. | 432 | /* Locate a cpuid entry. |
| @@ -435,12 +440,13 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | |||
| 435 | * Return: A pointer to the cpuid entry. Never returns NULL. | 440 | * Return: A pointer to the cpuid entry. Never returns NULL. |
| 436 | */ | 441 | */ |
| 437 | struct kvm_cpuid_entry2 * | 442 | struct kvm_cpuid_entry2 * |
| 438 | find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, | 443 | kvm_get_supported_cpuid_index(uint32_t function, uint32_t index) |
| 439 | uint32_t index) | ||
| 440 | { | 444 | { |
| 445 | struct kvm_cpuid2 *cpuid; | ||
| 441 | struct kvm_cpuid_entry2 *entry = NULL; | 446 | struct kvm_cpuid_entry2 *entry = NULL; |
| 442 | int i; | 447 | int i; |
| 443 | 448 | ||
| 449 | cpuid = kvm_get_supported_cpuid(); | ||
| 444 | for (i = 0; i < cpuid->nent; i++) { | 450 | for (i = 0; i < cpuid->nent; i++) { |
| 445 | if (cpuid->entries[i].function == function && | 451 | if (cpuid->entries[i].function == function && |
| 446 | cpuid->entries[i].index == index) { | 452 | cpuid->entries[i].index == index) { |
diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/vmx.c new file mode 100644 index 000000000000..0231bc0aae7b --- /dev/null +++ b/tools/testing/selftests/kvm/lib/vmx.c | |||
| @@ -0,0 +1,243 @@ | |||
| 1 | /* | ||
| 2 | * tools/testing/selftests/kvm/lib/x86.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2018, Google LLC. | ||
| 5 | * | ||
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #define _GNU_SOURCE /* for program_invocation_name */ | ||
| 10 | |||
| 11 | #include "test_util.h" | ||
| 12 | #include "kvm_util.h" | ||
| 13 | #include "x86.h" | ||
| 14 | #include "vmx.h" | ||
| 15 | |||
| 16 | /* Create a default VM for VMX tests. | ||
| 17 | * | ||
| 18 | * Input Args: | ||
| 19 | * vcpuid - The id of the single VCPU to add to the VM. | ||
| 20 | * guest_code - The vCPU's entry point | ||
| 21 | * | ||
| 22 | * Output Args: None | ||
| 23 | * | ||
| 24 | * Return: | ||
| 25 | * Pointer to opaque structure that describes the created VM. | ||
| 26 | */ | ||
| 27 | struct kvm_vm * | ||
| 28 | vm_create_default_vmx(uint32_t vcpuid, vmx_guest_code_t guest_code) | ||
| 29 | { | ||
| 30 | struct kvm_cpuid2 *cpuid; | ||
| 31 | struct kvm_vm *vm; | ||
| 32 | vm_vaddr_t vmxon_vaddr; | ||
| 33 | vm_paddr_t vmxon_paddr; | ||
| 34 | vm_vaddr_t vmcs_vaddr; | ||
| 35 | vm_paddr_t vmcs_paddr; | ||
| 36 | |||
| 37 | vm = vm_create_default(vcpuid, (void *) guest_code); | ||
| 38 | |||
| 39 | /* Enable nesting in CPUID */ | ||
| 40 | vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); | ||
| 41 | |||
| 42 | /* Setup of a region of guest memory for the vmxon region. */ | ||
| 43 | vmxon_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); | ||
| 44 | vmxon_paddr = addr_gva2gpa(vm, vmxon_vaddr); | ||
| 45 | |||
| 46 | /* Setup of a region of guest memory for a vmcs. */ | ||
| 47 | vmcs_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); | ||
| 48 | vmcs_paddr = addr_gva2gpa(vm, vmcs_vaddr); | ||
| 49 | |||
| 50 | vcpu_args_set(vm, vcpuid, 4, vmxon_vaddr, vmxon_paddr, vmcs_vaddr, | ||
| 51 | vmcs_paddr); | ||
| 52 | |||
| 53 | return vm; | ||
| 54 | } | ||
| 55 | |||
| 56 | void prepare_for_vmx_operation(void) | ||
| 57 | { | ||
| 58 | uint64_t feature_control; | ||
| 59 | uint64_t required; | ||
| 60 | unsigned long cr0; | ||
| 61 | unsigned long cr4; | ||
| 62 | |||
| 63 | /* | ||
| 64 | * Ensure bits in CR0 and CR4 are valid in VMX operation: | ||
| 65 | * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. | ||
| 66 | * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. | ||
| 67 | */ | ||
| 68 | __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory"); | ||
| 69 | cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1); | ||
| 70 | cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0); | ||
| 71 | __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory"); | ||
| 72 | |||
| 73 | __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory"); | ||
| 74 | cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1); | ||
| 75 | cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0); | ||
| 76 | /* Enable VMX operation */ | ||
| 77 | cr4 |= X86_CR4_VMXE; | ||
| 78 | __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory"); | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: | ||
| 82 | * Bit 0: Lock bit. If clear, VMXON causes a #GP. | ||
| 83 | * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON | ||
| 84 | * outside of SMX causes a #GP. | ||
| 85 | */ | ||
| 86 | required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; | ||
| 87 | required |= FEATURE_CONTROL_LOCKED; | ||
| 88 | feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); | ||
| 89 | if ((feature_control & required) != required) | ||
| 90 | wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required); | ||
| 91 | } | ||
| 92 | |||
| 93 | /* | ||
| 94 | * Initialize the control fields to the most basic settings possible. | ||
| 95 | */ | ||
| 96 | static inline void init_vmcs_control_fields(void) | ||
| 97 | { | ||
| 98 | vmwrite(VIRTUAL_PROCESSOR_ID, 0); | ||
| 99 | vmwrite(POSTED_INTR_NV, 0); | ||
| 100 | |||
| 101 | vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PINBASED_CTLS)); | ||
| 102 | vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PROCBASED_CTLS)); | ||
| 103 | vmwrite(EXCEPTION_BITMAP, 0); | ||
| 104 | vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); | ||
| 105 | vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ | ||
| 106 | vmwrite(CR3_TARGET_COUNT, 0); | ||
| 107 | vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) | | ||
| 108 | VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */ | ||
| 109 | vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); | ||
| 110 | vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); | ||
| 111 | vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) | | ||
| 112 | VM_ENTRY_IA32E_MODE); /* 64-bit guest */ | ||
| 113 | vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); | ||
| 114 | vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); | ||
| 115 | vmwrite(TPR_THRESHOLD, 0); | ||
| 116 | vmwrite(SECONDARY_VM_EXEC_CONTROL, 0); | ||
| 117 | |||
| 118 | vmwrite(CR0_GUEST_HOST_MASK, 0); | ||
| 119 | vmwrite(CR4_GUEST_HOST_MASK, 0); | ||
| 120 | vmwrite(CR0_READ_SHADOW, get_cr0()); | ||
| 121 | vmwrite(CR4_READ_SHADOW, get_cr4()); | ||
| 122 | } | ||
| 123 | |||
| 124 | /* | ||
| 125 | * Initialize the host state fields based on the current host state, with | ||
| 126 | * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch | ||
| 127 | * or vmresume. | ||
| 128 | */ | ||
| 129 | static inline void init_vmcs_host_state(void) | ||
| 130 | { | ||
| 131 | uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); | ||
| 132 | |||
| 133 | vmwrite(HOST_ES_SELECTOR, get_es()); | ||
| 134 | vmwrite(HOST_CS_SELECTOR, get_cs()); | ||
| 135 | vmwrite(HOST_SS_SELECTOR, get_ss()); | ||
| 136 | vmwrite(HOST_DS_SELECTOR, get_ds()); | ||
| 137 | vmwrite(HOST_FS_SELECTOR, get_fs()); | ||
| 138 | vmwrite(HOST_GS_SELECTOR, get_gs()); | ||
| 139 | vmwrite(HOST_TR_SELECTOR, get_tr()); | ||
| 140 | |||
| 141 | if (exit_controls & VM_EXIT_LOAD_IA32_PAT) | ||
| 142 | vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT)); | ||
| 143 | if (exit_controls & VM_EXIT_LOAD_IA32_EFER) | ||
| 144 | vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER)); | ||
| 145 | if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) | ||
| 146 | vmwrite(HOST_IA32_PERF_GLOBAL_CTRL, | ||
| 147 | rdmsr(MSR_CORE_PERF_GLOBAL_CTRL)); | ||
| 148 | |||
| 149 | vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS)); | ||
| 150 | |||
| 151 | vmwrite(HOST_CR0, get_cr0()); | ||
| 152 | vmwrite(HOST_CR3, get_cr3()); | ||
| 153 | vmwrite(HOST_CR4, get_cr4()); | ||
| 154 | vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE)); | ||
| 155 | vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE)); | ||
| 156 | vmwrite(HOST_TR_BASE, | ||
| 157 | get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr()))); | ||
| 158 | vmwrite(HOST_GDTR_BASE, get_gdt_base()); | ||
| 159 | vmwrite(HOST_IDTR_BASE, get_idt_base()); | ||
| 160 | vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP)); | ||
| 161 | vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP)); | ||
| 162 | } | ||
| 163 | |||
| 164 | /* | ||
| 165 | * Initialize the guest state fields essentially as a clone of | ||
| 166 | * the host state fields. Some host state fields have fixed | ||
| 167 | * values, and we set the corresponding guest state fields accordingly. | ||
| 168 | */ | ||
| 169 | static inline void init_vmcs_guest_state(void *rip, void *rsp) | ||
| 170 | { | ||
| 171 | vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR)); | ||
| 172 | vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR)); | ||
| 173 | vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR)); | ||
| 174 | vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR)); | ||
| 175 | vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR)); | ||
| 176 | vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR)); | ||
| 177 | vmwrite(GUEST_LDTR_SELECTOR, 0); | ||
| 178 | vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR)); | ||
| 179 | vmwrite(GUEST_INTR_STATUS, 0); | ||
| 180 | vmwrite(GUEST_PML_INDEX, 0); | ||
| 181 | |||
| 182 | vmwrite(VMCS_LINK_POINTER, -1ll); | ||
| 183 | vmwrite(GUEST_IA32_DEBUGCTL, 0); | ||
| 184 | vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT)); | ||
| 185 | vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER)); | ||
| 186 | vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL, | ||
| 187 | vmreadz(HOST_IA32_PERF_GLOBAL_CTRL)); | ||
| 188 | |||
| 189 | vmwrite(GUEST_ES_LIMIT, -1); | ||
| 190 | vmwrite(GUEST_CS_LIMIT, -1); | ||
| 191 | vmwrite(GUEST_SS_LIMIT, -1); | ||
| 192 | vmwrite(GUEST_DS_LIMIT, -1); | ||
| 193 | vmwrite(GUEST_FS_LIMIT, -1); | ||
| 194 | vmwrite(GUEST_GS_LIMIT, -1); | ||
| 195 | vmwrite(GUEST_LDTR_LIMIT, -1); | ||
| 196 | vmwrite(GUEST_TR_LIMIT, 0x67); | ||
| 197 | vmwrite(GUEST_GDTR_LIMIT, 0xffff); | ||
| 198 | vmwrite(GUEST_IDTR_LIMIT, 0xffff); | ||
| 199 | vmwrite(GUEST_ES_AR_BYTES, | ||
| 200 | vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
| 201 | vmwrite(GUEST_CS_AR_BYTES, 0xa09b); | ||
| 202 | vmwrite(GUEST_SS_AR_BYTES, 0xc093); | ||
| 203 | vmwrite(GUEST_DS_AR_BYTES, | ||
| 204 | vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
| 205 | vmwrite(GUEST_FS_AR_BYTES, | ||
| 206 | vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
| 207 | vmwrite(GUEST_GS_AR_BYTES, | ||
| 208 | vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
| 209 | vmwrite(GUEST_LDTR_AR_BYTES, 0x10000); | ||
| 210 | vmwrite(GUEST_TR_AR_BYTES, 0x8b); | ||
| 211 | vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); | ||
| 212 | vmwrite(GUEST_ACTIVITY_STATE, 0); | ||
| 213 | vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS)); | ||
| 214 | vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0); | ||
| 215 | |||
| 216 | vmwrite(GUEST_CR0, vmreadz(HOST_CR0)); | ||
| 217 | vmwrite(GUEST_CR3, vmreadz(HOST_CR3)); | ||
| 218 | vmwrite(GUEST_CR4, vmreadz(HOST_CR4)); | ||
| 219 | vmwrite(GUEST_ES_BASE, 0); | ||
| 220 | vmwrite(GUEST_CS_BASE, 0); | ||
| 221 | vmwrite(GUEST_SS_BASE, 0); | ||
| 222 | vmwrite(GUEST_DS_BASE, 0); | ||
| 223 | vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE)); | ||
| 224 | vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE)); | ||
| 225 | vmwrite(GUEST_LDTR_BASE, 0); | ||
| 226 | vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE)); | ||
| 227 | vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); | ||
| 228 | vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); | ||
| 229 | vmwrite(GUEST_DR7, 0x400); | ||
| 230 | vmwrite(GUEST_RSP, (uint64_t)rsp); | ||
| 231 | vmwrite(GUEST_RIP, (uint64_t)rip); | ||
| 232 | vmwrite(GUEST_RFLAGS, 2); | ||
| 233 | vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); | ||
| 234 | vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); | ||
| 235 | vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP)); | ||
| 236 | } | ||
| 237 | |||
| 238 | void prepare_vmcs(void *guest_rip, void *guest_rsp) | ||
| 239 | { | ||
| 240 | init_vmcs_control_fields(); | ||
| 241 | init_vmcs_host_state(); | ||
| 242 | init_vmcs_guest_state(guest_rip, guest_rsp); | ||
| 243 | } | ||
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c new file mode 100644 index 000000000000..8f7f62093add --- /dev/null +++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c | |||
| @@ -0,0 +1,231 @@ | |||
| 1 | /* | ||
| 2 | * gtests/tests/vmx_tsc_adjust_test.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2018, Google LLC. | ||
| 5 | * | ||
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 7 | * | ||
| 8 | * | ||
| 9 | * IA32_TSC_ADJUST test | ||
| 10 | * | ||
| 11 | * According to the SDM, "if an execution of WRMSR to the | ||
| 12 | * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC, | ||
| 13 | * the logical processor also adds (or subtracts) value X from the | ||
| 14 | * IA32_TSC_ADJUST MSR. | ||
| 15 | * | ||
| 16 | * Note that when L1 doesn't intercept writes to IA32_TSC, a | ||
| 17 | * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC | ||
| 18 | * value. | ||
| 19 | * | ||
| 20 | * This test verifies that this unusual case is handled correctly. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include "test_util.h" | ||
| 24 | #include "kvm_util.h" | ||
| 25 | #include "x86.h" | ||
| 26 | #include "vmx.h" | ||
| 27 | |||
| 28 | #include <string.h> | ||
| 29 | #include <sys/ioctl.h> | ||
| 30 | |||
| 31 | #ifndef MSR_IA32_TSC_ADJUST | ||
| 32 | #define MSR_IA32_TSC_ADJUST 0x3b | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #define PAGE_SIZE 4096 | ||
| 36 | #define VCPU_ID 5 | ||
| 37 | |||
| 38 | #define TSC_ADJUST_VALUE (1ll << 32) | ||
| 39 | #define TSC_OFFSET_VALUE -(1ll << 48) | ||
| 40 | |||
| 41 | enum { | ||
| 42 | PORT_ABORT = 0x1000, | ||
| 43 | PORT_REPORT, | ||
| 44 | PORT_DONE, | ||
| 45 | }; | ||
| 46 | |||
| 47 | struct vmx_page { | ||
| 48 | vm_vaddr_t virt; | ||
| 49 | vm_paddr_t phys; | ||
| 50 | }; | ||
| 51 | |||
| 52 | enum { | ||
| 53 | VMXON_PAGE = 0, | ||
| 54 | VMCS_PAGE, | ||
| 55 | MSR_BITMAP_PAGE, | ||
| 56 | |||
| 57 | NUM_VMX_PAGES, | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct kvm_single_msr { | ||
| 61 | struct kvm_msrs header; | ||
| 62 | struct kvm_msr_entry entry; | ||
| 63 | } __attribute__((packed)); | ||
| 64 | |||
| 65 | /* The virtual machine object. */ | ||
| 66 | static struct kvm_vm *vm; | ||
| 67 | |||
| 68 | /* Array of vmx_page descriptors that is shared with the guest. */ | ||
| 69 | struct vmx_page *vmx_pages; | ||
| 70 | |||
| 71 | #define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg)) | ||
| 72 | static void do_exit_to_l0(uint16_t port, unsigned long arg) | ||
| 73 | { | ||
| 74 | __asm__ __volatile__("in %[port], %%al" | ||
| 75 | : | ||
| 76 | : [port]"d"(port), "D"(arg) | ||
| 77 | : "rax"); | ||
| 78 | } | ||
| 79 | |||
| 80 | |||
| 81 | #define GUEST_ASSERT(_condition) do { \ | ||
| 82 | if (!(_condition)) \ | ||
| 83 | exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \ | ||
| 84 | } while (0) | ||
| 85 | |||
| 86 | static void check_ia32_tsc_adjust(int64_t max) | ||
| 87 | { | ||
| 88 | int64_t adjust; | ||
| 89 | |||
| 90 | adjust = rdmsr(MSR_IA32_TSC_ADJUST); | ||
| 91 | exit_to_l0(PORT_REPORT, adjust); | ||
| 92 | GUEST_ASSERT(adjust <= max); | ||
| 93 | } | ||
| 94 | |||
| 95 | static void l2_guest_code(void) | ||
| 96 | { | ||
| 97 | uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; | ||
| 98 | |||
| 99 | wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); | ||
| 100 | check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); | ||
| 101 | |||
| 102 | /* Exit to L1 */ | ||
| 103 | __asm__ __volatile__("vmcall"); | ||
| 104 | } | ||
| 105 | |||
| 106 | static void l1_guest_code(struct vmx_page *vmx_pages) | ||
| 107 | { | ||
| 108 | #define L2_GUEST_STACK_SIZE 64 | ||
| 109 | unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; | ||
| 110 | uint32_t control; | ||
| 111 | uintptr_t save_cr3; | ||
| 112 | |||
| 113 | GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE); | ||
| 114 | wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE); | ||
| 115 | check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); | ||
| 116 | |||
| 117 | prepare_for_vmx_operation(); | ||
| 118 | |||
| 119 | /* Enter VMX root operation. */ | ||
| 120 | *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision(); | ||
| 121 | GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys)); | ||
| 122 | |||
| 123 | /* Load a VMCS. */ | ||
| 124 | *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision(); | ||
| 125 | GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys)); | ||
| 126 | GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys)); | ||
| 127 | |||
| 128 | /* Prepare the VMCS for L2 execution. */ | ||
| 129 | prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); | ||
| 130 | control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); | ||
| 131 | control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING; | ||
| 132 | vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); | ||
| 133 | vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys); | ||
| 134 | vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE); | ||
| 135 | |||
| 136 | /* Jump into L2. First, test failure to load guest CR3. */ | ||
| 137 | save_cr3 = vmreadz(GUEST_CR3); | ||
| 138 | vmwrite(GUEST_CR3, -1ull); | ||
| 139 | GUEST_ASSERT(!vmlaunch()); | ||
| 140 | GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == | ||
| 141 | (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE)); | ||
| 142 | check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); | ||
| 143 | vmwrite(GUEST_CR3, save_cr3); | ||
| 144 | |||
| 145 | GUEST_ASSERT(!vmlaunch()); | ||
| 146 | GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); | ||
| 147 | |||
| 148 | check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); | ||
| 149 | |||
| 150 | exit_to_l0(PORT_DONE, 0); | ||
| 151 | } | ||
| 152 | |||
| 153 | static void allocate_vmx_page(struct vmx_page *page) | ||
| 154 | { | ||
| 155 | vm_vaddr_t virt; | ||
| 156 | |||
| 157 | virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0); | ||
| 158 | memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE); | ||
| 159 | |||
| 160 | page->virt = virt; | ||
| 161 | page->phys = addr_gva2gpa(vm, virt); | ||
| 162 | } | ||
| 163 | |||
| 164 | static vm_vaddr_t allocate_vmx_pages(void) | ||
| 165 | { | ||
| 166 | vm_vaddr_t vmx_pages_vaddr; | ||
| 167 | int i; | ||
| 168 | |||
| 169 | vmx_pages_vaddr = vm_vaddr_alloc( | ||
| 170 | vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0); | ||
| 171 | |||
| 172 | vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr); | ||
| 173 | |||
| 174 | for (i = 0; i < NUM_VMX_PAGES; i++) | ||
| 175 | allocate_vmx_page(&vmx_pages[i]); | ||
| 176 | |||
| 177 | return vmx_pages_vaddr; | ||
| 178 | } | ||
| 179 | |||
| 180 | void report(int64_t val) | ||
| 181 | { | ||
| 182 | printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", | ||
| 183 | val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); | ||
| 184 | } | ||
| 185 | |||
| 186 | int main(int argc, char *argv[]) | ||
| 187 | { | ||
| 188 | vm_vaddr_t vmx_pages_vaddr; | ||
| 189 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); | ||
| 190 | |||
| 191 | if (!(entry->ecx & CPUID_VMX)) { | ||
| 192 | printf("nested VMX not enabled, skipping test"); | ||
| 193 | return 0; | ||
| 194 | } | ||
| 195 | |||
| 196 | vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); | ||
| 197 | |||
| 198 | /* Allocate VMX pages and shared descriptors (vmx_pages). */ | ||
| 199 | vmx_pages_vaddr = allocate_vmx_pages(); | ||
| 200 | vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr); | ||
| 201 | |||
| 202 | for (;;) { | ||
| 203 | volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); | ||
| 204 | struct kvm_regs regs; | ||
| 205 | |||
| 206 | vcpu_run(vm, VCPU_ID); | ||
| 207 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | ||
| 208 | "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n", | ||
| 209 | run->exit_reason, | ||
| 210 | exit_reason_str(run->exit_reason)); | ||
| 211 | |||
| 212 | vcpu_regs_get(vm, VCPU_ID, ®s); | ||
| 213 | |||
| 214 | switch (run->io.port) { | ||
| 215 | case PORT_ABORT: | ||
| 216 | TEST_ASSERT(false, "%s", (const char *) regs.rdi); | ||
| 217 | /* NOT REACHED */ | ||
| 218 | case PORT_REPORT: | ||
| 219 | report(regs.rdi); | ||
| 220 | break; | ||
| 221 | case PORT_DONE: | ||
| 222 | goto done; | ||
| 223 | default: | ||
| 224 | TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); | ||
| 225 | } | ||
| 226 | } | ||
| 227 | |||
| 228 | kvm_vm_free(vm); | ||
| 229 | done: | ||
| 230 | return 0; | ||
| 231 | } | ||
