aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 20:57:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 20:57:35 -0400
commit0d1e8b8d2bcd3150d51754d8d0fdbf44dc88b0d3 (patch)
tree2794cb2347daa76b00160a6ffb68663f4138dcc7 /tools
parent83c4087ce468601501ecde4d0ec5b2abd5f57c31 (diff)
parent22a7cdcae6a4a3c8974899e62851d270956f58ce (diff)
Merge tag 'kvm-4.20-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Radim Krčmář: "ARM: - Improved guest IPA space support (32 to 52 bits) - RAS event delivery for 32bit - PMU fixes - Guest entry hardening - Various cleanups - Port of dirty_log_test selftest PPC: - Nested HV KVM support for radix guests on POWER9. The performance is much better than with PR KVM. Migration and arbitrary level of nesting is supported. - Disable nested HV-KVM on early POWER9 chips that need a particular hardware bug workaround - One VM per core mode to prevent potential data leaks - PCI pass-through optimization - merge ppc-kvm topic branch and kvm-ppc-fixes to get a better base s390: - Initial version of AP crypto virtualization via vfio-mdev - Improvement for vfio-ap - Set the host program identifier - Optimize page table locking x86: - Enable nested virtualization by default - Implement Hyper-V IPI hypercalls - Improve #PF and #DB handling - Allow guests to use Enlightened VMCS - Add migration selftests for VMCS and Enlightened VMCS - Allow coalesced PIO accesses - Add an option to perform nested VMCS host state consistency check through hardware - Automatic tuning of lapic_timer_advance_ns - Many fixes, minor improvements, and cleanups" * tag 'kvm-4.20-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits) KVM/nVMX: Do not validate that posted_intr_desc_addr is page aligned Revert "kvm: x86: optimize dr6 restore" KVM: PPC: Optimize clearing TCEs for sparse tables x86/kvm/nVMX: tweak shadow fields selftests/kvm: add missing executables to .gitignore KVM: arm64: Safety check PSTATE when entering guest and handle IL KVM: PPC: Book3S HV: Don't use streamlined entry path on early POWER9 chips arm/arm64: KVM: Enable 32 bits kvm vcpu events support arm/arm64: KVM: Rename function kvm_arch_dev_ioctl_check_extension() KVM: arm64: Fix caching of host MDCR_EL2 value KVM: VMX: enable nested virtualization by default KVM/x86: Use 32bit xor to clear registers in svm.c kvm: x86: Introduce KVM_CAP_EXCEPTION_PAYLOAD kvm: vmx: Defer setting of DR6 until #DB delivery kvm: x86: Defer setting of CR2 until #PF delivery kvm: x86: Add payload operands to kvm_multiple_exception kvm: x86: Add exception payload fields to kvm_vcpu_events kvm: x86: Add has_payload and payload to kvm_queued_exception KVM: Documentation: Fix omission in struct kvm_vcpu_events KVM: selftests: add Enlightened VMCS test ...
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h10
-rw-r--r--tools/include/uapi/linux/kvm.h5
-rw-r--r--tools/perf/arch/powerpc/util/book3s_hv_exits.h1
-rw-r--r--tools/testing/selftests/kvm/.gitignore14
-rw-r--r--tools/testing/selftests/kvm/Makefile39
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c374
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h55
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h1098
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h169
-rw-r--r--tools/testing/selftests/kvm/include/sparsebit.h6
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h6
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h (renamed from tools/testing/selftests/kvm/include/x86.h)28
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h (renamed from tools/testing/selftests/kvm/include/vmx.h)35
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c311
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c564
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h33
-rw-r--r--tools/testing/selftests/kvm/lib/ucall.c144
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c (renamed from tools/testing/selftests/kvm/lib/x86.c)263
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c (renamed from tools/testing/selftests/kvm/lib/vmx.c)55
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c (renamed from tools/testing/selftests/kvm/cr4_cpuid_sync_test.c)14
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c160
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c (renamed from tools/testing/selftests/kvm/platform_info_test.c)14
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c (renamed from tools/testing/selftests/kvm/set_sregs_test.c)2
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c (renamed from tools/testing/selftests/kvm/state_test.c)47
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c (renamed from tools/testing/selftests/kvm/sync_regs_test.c)2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c (renamed from tools/testing/selftests/kvm/vmx_tsc_adjust_test.c)24
27 files changed, 2762 insertions, 713 deletions
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index fd23d5778ea1..8a6eff9c27f3 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -288,6 +288,7 @@ struct kvm_reinject_control {
288#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 288#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
289#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 289#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
290#define KVM_VCPUEVENT_VALID_SMM 0x00000008 290#define KVM_VCPUEVENT_VALID_SMM 0x00000008
291#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
291 292
292/* Interrupt shadow states */ 293/* Interrupt shadow states */
293#define KVM_X86_SHADOW_INT_MOV_SS 0x01 294#define KVM_X86_SHADOW_INT_MOV_SS 0x01
@@ -299,7 +300,10 @@ struct kvm_vcpu_events {
299 __u8 injected; 300 __u8 injected;
300 __u8 nr; 301 __u8 nr;
301 __u8 has_error_code; 302 __u8 has_error_code;
302 __u8 pad; 303 union {
304 __u8 pad;
305 __u8 pending;
306 };
303 __u32 error_code; 307 __u32 error_code;
304 } exception; 308 } exception;
305 struct { 309 struct {
@@ -322,7 +326,9 @@ struct kvm_vcpu_events {
322 __u8 smm_inside_nmi; 326 __u8 smm_inside_nmi;
323 __u8 latched_init; 327 __u8 latched_init;
324 } smi; 328 } smi;
325 __u32 reserved[9]; 329 __u8 reserved[27];
330 __u8 exception_has_payload;
331 __u64 exception_payload;
326}; 332};
327 333
328/* for KVM_GET/SET_DEBUGREGS */ 334/* for KVM_GET/SET_DEBUGREGS */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 251be353f950..2875ce85b322 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -719,6 +719,7 @@ struct kvm_ppc_one_seg_page_size {
719 719
720#define KVM_PPC_PAGE_SIZES_REAL 0x00000001 720#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
721#define KVM_PPC_1T_SEGMENTS 0x00000002 721#define KVM_PPC_1T_SEGMENTS 0x00000002
722#define KVM_PPC_NO_HASH 0x00000004
722 723
723struct kvm_ppc_smmu_info { 724struct kvm_ppc_smmu_info {
724 __u64 flags; 725 __u64 flags;
@@ -953,6 +954,10 @@ struct kvm_ppc_resize_hpt {
953#define KVM_CAP_NESTED_STATE 157 954#define KVM_CAP_NESTED_STATE 157
954#define KVM_CAP_ARM_INJECT_SERROR_ESR 158 955#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
955#define KVM_CAP_MSR_PLATFORM_INFO 159 956#define KVM_CAP_MSR_PLATFORM_INFO 159
957#define KVM_CAP_PPC_NESTED_HV 160
958#define KVM_CAP_HYPERV_SEND_IPI 161
959#define KVM_CAP_COALESCED_PIO 162
960#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
956 961
957#ifdef KVM_CAP_IRQ_ROUTING 962#ifdef KVM_CAP_IRQ_ROUTING
958 963
diff --git a/tools/perf/arch/powerpc/util/book3s_hv_exits.h b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
index 853b95d1e139..2011376c7ab5 100644
--- a/tools/perf/arch/powerpc/util/book3s_hv_exits.h
+++ b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
@@ -15,7 +15,6 @@
15 {0x400, "INST_STORAGE"}, \ 15 {0x400, "INST_STORAGE"}, \
16 {0x480, "INST_SEGMENT"}, \ 16 {0x480, "INST_SEGMENT"}, \
17 {0x500, "EXTERNAL"}, \ 17 {0x500, "EXTERNAL"}, \
18 {0x501, "EXTERNAL_LEVEL"}, \
19 {0x502, "EXTERNAL_HV"}, \ 18 {0x502, "EXTERNAL_HV"}, \
20 {0x600, "ALIGNMENT"}, \ 19 {0x600, "ALIGNMENT"}, \
21 {0x700, "PROGRAM"}, \ 20 {0x700, "PROGRAM"}, \
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 5c34752e1cff..6210ba41c29e 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,6 +1,8 @@
1cr4_cpuid_sync_test 1/x86_64/cr4_cpuid_sync_test
2platform_info_test 2/x86_64/evmcs_test
3set_sregs_test 3/x86_64/platform_info_test
4sync_regs_test 4/x86_64/set_sregs_test
5vmx_tsc_adjust_test 5/x86_64/sync_regs_test
6state_test 6/x86_64/vmx_tsc_adjust_test
7/x86_64/state_test
8/dirty_log_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index ec32dad3c3f0..01a219229238 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,26 +1,30 @@
1all: 1all:
2 2
3top_srcdir = ../../../../ 3top_srcdir = ../../../..
4UNAME_M := $(shell uname -m) 4UNAME_M := $(shell uname -m)
5 5
6LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c 6LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
7LIBKVM_x86_64 = lib/x86.c lib/vmx.c 7LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
8 8LIBKVM_aarch64 = lib/aarch64/processor.c
9TEST_GEN_PROGS_x86_64 = platform_info_test 9
10TEST_GEN_PROGS_x86_64 += set_sregs_test 10TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test
11TEST_GEN_PROGS_x86_64 += sync_regs_test 11TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
12TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test 12TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
13TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test 13TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
14TEST_GEN_PROGS_x86_64 += state_test 14TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
15TEST_GEN_PROGS_x86_64 += x86_64/state_test
16TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
15TEST_GEN_PROGS_x86_64 += dirty_log_test 17TEST_GEN_PROGS_x86_64 += dirty_log_test
16 18
19TEST_GEN_PROGS_aarch64 += dirty_log_test
20
17TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) 21TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
18LIBKVM += $(LIBKVM_$(UNAME_M)) 22LIBKVM += $(LIBKVM_$(UNAME_M))
19 23
20INSTALL_HDR_PATH = $(top_srcdir)/usr 24INSTALL_HDR_PATH = $(top_srcdir)/usr
21LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ 25LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
22LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include 26LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
23CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. 27CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
24LDFLAGS += -pthread 28LDFLAGS += -pthread
25 29
26# After inclusion, $(OUTPUT) is defined and 30# After inclusion, $(OUTPUT) is defined and
@@ -29,7 +33,7 @@ include ../lib.mk
29 33
30STATIC_LIBS := $(OUTPUT)/libkvm.a 34STATIC_LIBS := $(OUTPUT)/libkvm.a
31LIBKVM_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM)) 35LIBKVM_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM))
32EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) 36EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) cscope.*
33 37
34x := $(shell mkdir -p $(sort $(dir $(LIBKVM_OBJ)))) 38x := $(shell mkdir -p $(sort $(dir $(LIBKVM_OBJ))))
35$(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c 39$(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
@@ -41,3 +45,12 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
41all: $(STATIC_LIBS) 45all: $(STATIC_LIBS)
42$(TEST_GEN_PROGS): $(STATIC_LIBS) 46$(TEST_GEN_PROGS): $(STATIC_LIBS)
43$(STATIC_LIBS):| khdr 47$(STATIC_LIBS):| khdr
48
49cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
50cscope:
51 $(RM) cscope.*
52 (find $(include_paths) -name '*.h' \
53 -exec realpath --relative-base=$(PWD) {} \;; \
54 find . -name '*.c' \
55 -exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files
56 cscope -b
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 0c2cdc105f96..d59820cc2d39 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -5,6 +5,8 @@
5 * Copyright (C) 2018, Red Hat, Inc. 5 * Copyright (C) 2018, Red Hat, Inc.
6 */ 6 */
7 7
8#define _GNU_SOURCE /* for program_invocation_name */
9
8#include <stdio.h> 10#include <stdio.h>
9#include <stdlib.h> 11#include <stdlib.h>
10#include <unistd.h> 12#include <unistd.h>
@@ -15,76 +17,78 @@
15 17
16#include "test_util.h" 18#include "test_util.h"
17#include "kvm_util.h" 19#include "kvm_util.h"
20#include "processor.h"
21
22#define DEBUG printf
18 23
19#define DEBUG printf 24#define VCPU_ID 1
20 25
21#define VCPU_ID 1
22/* The memory slot index to track dirty pages */ 26/* The memory slot index to track dirty pages */
23#define TEST_MEM_SLOT_INDEX 1 27#define TEST_MEM_SLOT_INDEX 1
24/* 28
25 * GPA offset of the testing memory slot. Must be bigger than the 29/* Default guest test memory offset, 1G */
26 * default vm mem slot, which is DEFAULT_GUEST_PHY_PAGES. 30#define DEFAULT_GUEST_TEST_MEM 0x40000000
27 */ 31
28#define TEST_MEM_OFFSET (1ULL << 30) /* 1G */
29/* Size of the testing memory slot */
30#define TEST_MEM_PAGES (1ULL << 18) /* 1G for 4K pages */
31/* How many pages to dirty for each guest loop */ 32/* How many pages to dirty for each guest loop */
32#define TEST_PAGES_PER_LOOP 1024 33#define TEST_PAGES_PER_LOOP 1024
34
33/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */ 35/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
34#define TEST_HOST_LOOP_N 32 36#define TEST_HOST_LOOP_N 32
37
35/* Interval for each host loop (ms) */ 38/* Interval for each host loop (ms) */
36#define TEST_HOST_LOOP_INTERVAL 10 39#define TEST_HOST_LOOP_INTERVAL 10
40
41/*
42 * Guest/Host shared variables. Ensure addr_gva2hva() and/or
43 * sync_global_to/from_guest() are used when accessing from
44 * the host. READ/WRITE_ONCE() should also be used with anything
45 * that may change.
46 */
47static uint64_t host_page_size;
48static uint64_t guest_page_size;
49static uint64_t guest_num_pages;
50static uint64_t random_array[TEST_PAGES_PER_LOOP];
51static uint64_t iteration;
37 52
38/* 53/*
39 * Guest variables. We use these variables to share data between host 54 * GPA offset of the testing memory slot. Must be bigger than
40 * and guest. There are two copies of the variables, one in host memory 55 * DEFAULT_GUEST_PHY_PAGES.
41 * (which is unused) and one in guest memory. When the host wants to
42 * access these variables, it needs to call addr_gva2hva() to access the
43 * guest copy.
44 */ 56 */
45uint64_t guest_random_array[TEST_PAGES_PER_LOOP]; 57static uint64_t guest_test_mem = DEFAULT_GUEST_TEST_MEM;
46uint64_t guest_iteration;
47uint64_t guest_page_size;
48 58
49/* 59/*
50 * Writes to the first byte of a random page within the testing memory 60 * Continuously write to the first 8 bytes of a random pages within
51 * region continuously. 61 * the testing memory region.
52 */ 62 */
53void guest_code(void) 63static void guest_code(void)
54{ 64{
55 int i = 0; 65 int i;
56 uint64_t volatile *array = guest_random_array;
57 uint64_t volatile *guest_addr;
58 66
59 while (true) { 67 while (true) {
60 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) { 68 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
61 /* 69 uint64_t addr = guest_test_mem;
62 * Write to the first 8 bytes of a random page 70 addr += (READ_ONCE(random_array[i]) % guest_num_pages)
63 * on the testing memory region. 71 * guest_page_size;
64 */ 72 addr &= ~(host_page_size - 1);
65 guest_addr = (uint64_t *) 73 *(uint64_t *)addr = READ_ONCE(iteration);
66 (TEST_MEM_OFFSET +
67 (array[i] % TEST_MEM_PAGES) * guest_page_size);
68 *guest_addr = guest_iteration;
69 } 74 }
75
70 /* Tell the host that we need more random numbers */ 76 /* Tell the host that we need more random numbers */
71 GUEST_SYNC(1); 77 GUEST_SYNC(1);
72 } 78 }
73} 79}
74 80
75/* 81/* Host variables */
76 * Host variables. These variables should only be used by the host 82static bool host_quit;
77 * rather than the guest.
78 */
79bool host_quit;
80 83
81/* Points to the test VM memory region on which we track dirty logs */ 84/* Points to the test VM memory region on which we track dirty logs */
82void *host_test_mem; 85static void *host_test_mem;
86static uint64_t host_num_pages;
83 87
84/* For statistics only */ 88/* For statistics only */
85uint64_t host_dirty_count; 89static uint64_t host_dirty_count;
86uint64_t host_clear_count; 90static uint64_t host_clear_count;
87uint64_t host_track_next_count; 91static uint64_t host_track_next_count;
88 92
89/* 93/*
90 * We use this bitmap to track some pages that should have its dirty 94 * We use this bitmap to track some pages that should have its dirty
@@ -93,40 +97,34 @@ uint64_t host_track_next_count;
93 * page bit is cleared in the latest bitmap, then the system must 97 * page bit is cleared in the latest bitmap, then the system must
94 * report that write in the next get dirty log call. 98 * report that write in the next get dirty log call.
95 */ 99 */
96unsigned long *host_bmap_track; 100static unsigned long *host_bmap_track;
97 101
98void generate_random_array(uint64_t *guest_array, uint64_t size) 102static void generate_random_array(uint64_t *guest_array, uint64_t size)
99{ 103{
100 uint64_t i; 104 uint64_t i;
101 105
102 for (i = 0; i < size; i++) { 106 for (i = 0; i < size; i++)
103 guest_array[i] = random(); 107 guest_array[i] = random();
104 }
105} 108}
106 109
107void *vcpu_worker(void *data) 110static void *vcpu_worker(void *data)
108{ 111{
109 int ret; 112 int ret;
110 uint64_t loops, *guest_array, pages_count = 0;
111 struct kvm_vm *vm = data; 113 struct kvm_vm *vm = data;
114 uint64_t *guest_array;
115 uint64_t pages_count = 0;
112 struct kvm_run *run; 116 struct kvm_run *run;
113 struct guest_args args; 117 struct ucall uc;
114 118
115 run = vcpu_state(vm, VCPU_ID); 119 run = vcpu_state(vm, VCPU_ID);
116 120
117 /* Retrieve the guest random array pointer and cache it */ 121 guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
118 guest_array = addr_gva2hva(vm, (vm_vaddr_t)guest_random_array);
119
120 DEBUG("VCPU starts\n");
121
122 generate_random_array(guest_array, TEST_PAGES_PER_LOOP); 122 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
123 123
124 while (!READ_ONCE(host_quit)) { 124 while (!READ_ONCE(host_quit)) {
125 /* Let the guest to dirty these random pages */ 125 /* Let the guest dirty the random pages */
126 ret = _vcpu_run(vm, VCPU_ID); 126 ret = _vcpu_run(vm, VCPU_ID);
127 guest_args_read(vm, VCPU_ID, &args); 127 if (get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) {
128 if (run->exit_reason == KVM_EXIT_IO &&
129 args.port == GUEST_PORT_SYNC) {
130 pages_count += TEST_PAGES_PER_LOOP; 128 pages_count += TEST_PAGES_PER_LOOP;
131 generate_random_array(guest_array, TEST_PAGES_PER_LOOP); 129 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
132 } else { 130 } else {
@@ -137,18 +135,20 @@ void *vcpu_worker(void *data)
137 } 135 }
138 } 136 }
139 137
140 DEBUG("VCPU exits, dirtied %"PRIu64" pages\n", pages_count); 138 DEBUG("Dirtied %"PRIu64" pages\n", pages_count);
141 139
142 return NULL; 140 return NULL;
143} 141}
144 142
145void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration) 143static void vm_dirty_log_verify(unsigned long *bmap)
146{ 144{
147 uint64_t page; 145 uint64_t page;
148 uint64_t volatile *value_ptr; 146 uint64_t *value_ptr;
147 uint64_t step = host_page_size >= guest_page_size ? 1 :
148 guest_page_size / host_page_size;
149 149
150 for (page = 0; page < TEST_MEM_PAGES; page++) { 150 for (page = 0; page < host_num_pages; page += step) {
151 value_ptr = host_test_mem + page * getpagesize(); 151 value_ptr = host_test_mem + page * host_page_size;
152 152
153 /* If this is a special page that we were tracking... */ 153 /* If this is a special page that we were tracking... */
154 if (test_and_clear_bit(page, host_bmap_track)) { 154 if (test_and_clear_bit(page, host_bmap_track)) {
@@ -208,88 +208,117 @@ void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
208 } 208 }
209} 209}
210 210
211void help(char *name) 211static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
212 uint64_t extra_mem_pages, void *guest_code)
212{ 213{
213 puts(""); 214 struct kvm_vm *vm;
214 printf("usage: %s [-i iterations] [-I interval] [-h]\n", name); 215 uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
215 puts(""); 216
216 printf(" -i: specify iteration counts (default: %"PRIu64")\n", 217 vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
217 TEST_HOST_LOOP_N); 218 kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
218 printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n", 219#ifdef __x86_64__
219 TEST_HOST_LOOP_INTERVAL); 220 vm_create_irqchip(vm);
220 puts(""); 221#endif
221 exit(0); 222 vm_vcpu_add_default(vm, vcpuid, guest_code);
223 return vm;
222} 224}
223 225
224int main(int argc, char *argv[]) 226static void run_test(enum vm_guest_mode mode, unsigned long iterations,
227 unsigned long interval, bool top_offset)
225{ 228{
229 unsigned int guest_pa_bits, guest_page_shift;
226 pthread_t vcpu_thread; 230 pthread_t vcpu_thread;
227 struct kvm_vm *vm; 231 struct kvm_vm *vm;
228 uint64_t volatile *psize, *iteration; 232 uint64_t max_gfn;
229 unsigned long *bmap, iterations = TEST_HOST_LOOP_N, 233 unsigned long *bmap;
230 interval = TEST_HOST_LOOP_INTERVAL; 234
231 int opt; 235 switch (mode) {
232 236 case VM_MODE_P52V48_4K:
233 while ((opt = getopt(argc, argv, "hi:I:")) != -1) { 237 guest_pa_bits = 52;
234 switch (opt) { 238 guest_page_shift = 12;
235 case 'i': 239 break;
236 iterations = strtol(optarg, NULL, 10); 240 case VM_MODE_P52V48_64K:
237 break; 241 guest_pa_bits = 52;
238 case 'I': 242 guest_page_shift = 16;
239 interval = strtol(optarg, NULL, 10); 243 break;
240 break; 244 case VM_MODE_P40V48_4K:
241 case 'h': 245 guest_pa_bits = 40;
242 default: 246 guest_page_shift = 12;
243 help(argv[0]); 247 break;
244 break; 248 case VM_MODE_P40V48_64K:
245 } 249 guest_pa_bits = 40;
250 guest_page_shift = 16;
251 break;
252 default:
253 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
246 } 254 }
247 255
248 TEST_ASSERT(iterations > 2, "Iteration must be bigger than zero\n"); 256 DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
249 TEST_ASSERT(interval > 0, "Interval must be bigger than zero");
250 257
251 DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n", 258 max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
252 iterations, interval); 259 guest_page_size = (1ul << guest_page_shift);
260 /* 1G of guest page sized pages */
261 guest_num_pages = (1ul << (30 - guest_page_shift));
262 host_page_size = getpagesize();
263 host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
264 !!((guest_num_pages * guest_page_size) % host_page_size);
253 265
254 srandom(time(0)); 266 if (top_offset) {
267 guest_test_mem = (max_gfn - guest_num_pages) * guest_page_size;
268 guest_test_mem &= ~(host_page_size - 1);
269 }
255 270
256 bmap = bitmap_alloc(TEST_MEM_PAGES); 271 DEBUG("guest test mem offset: 0x%lx\n", guest_test_mem);
257 host_bmap_track = bitmap_alloc(TEST_MEM_PAGES);
258 272
259 vm = vm_create_default(VCPU_ID, TEST_MEM_PAGES, guest_code); 273 bmap = bitmap_alloc(host_num_pages);
274 host_bmap_track = bitmap_alloc(host_num_pages);
275
276 vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code);
260 277
261 /* Add an extra memory slot for testing dirty logging */ 278 /* Add an extra memory slot for testing dirty logging */
262 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 279 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
263 TEST_MEM_OFFSET, 280 guest_test_mem,
264 TEST_MEM_SLOT_INDEX, 281 TEST_MEM_SLOT_INDEX,
265 TEST_MEM_PAGES, 282 guest_num_pages,
266 KVM_MEM_LOG_DIRTY_PAGES); 283 KVM_MEM_LOG_DIRTY_PAGES);
267 /* Cache the HVA pointer of the region */
268 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET);
269 284
270 /* Do 1:1 mapping for the dirty track memory slot */ 285 /* Do 1:1 mapping for the dirty track memory slot */
271 virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET, 286 virt_map(vm, guest_test_mem, guest_test_mem,
272 TEST_MEM_PAGES * getpagesize(), 0); 287 guest_num_pages * guest_page_size, 0);
288
289 /* Cache the HVA pointer of the region */
290 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_mem);
273 291
292#ifdef __x86_64__
274 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 293 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
294#endif
295#ifdef __aarch64__
296 ucall_init(vm, UCALL_MMIO, NULL);
297#endif
275 298
276 /* Tell the guest about the page size on the system */ 299 /* Export the shared variables to the guest */
277 psize = addr_gva2hva(vm, (vm_vaddr_t)&guest_page_size); 300 sync_global_to_guest(vm, host_page_size);
278 *psize = getpagesize(); 301 sync_global_to_guest(vm, guest_page_size);
302 sync_global_to_guest(vm, guest_test_mem);
303 sync_global_to_guest(vm, guest_num_pages);
279 304
280 /* Start the iterations */ 305 /* Start the iterations */
281 iteration = addr_gva2hva(vm, (vm_vaddr_t)&guest_iteration); 306 iteration = 1;
282 *iteration = 1; 307 sync_global_to_guest(vm, iteration);
308 host_quit = false;
309 host_dirty_count = 0;
310 host_clear_count = 0;
311 host_track_next_count = 0;
283 312
284 /* Start dirtying pages */
285 pthread_create(&vcpu_thread, NULL, vcpu_worker, vm); 313 pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
286 314
287 while (*iteration < iterations) { 315 while (iteration < iterations) {
288 /* Give the vcpu thread some time to dirty some pages */ 316 /* Give the vcpu thread some time to dirty some pages */
289 usleep(interval * 1000); 317 usleep(interval * 1000);
290 kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap); 318 kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
291 vm_dirty_log_verify(bmap, *iteration); 319 vm_dirty_log_verify(bmap);
292 (*iteration)++; 320 iteration++;
321 sync_global_to_guest(vm, iteration);
293 } 322 }
294 323
295 /* Tell the vcpu thread to quit */ 324 /* Tell the vcpu thread to quit */
@@ -302,7 +331,118 @@ int main(int argc, char *argv[])
302 331
303 free(bmap); 332 free(bmap);
304 free(host_bmap_track); 333 free(host_bmap_track);
334 ucall_uninit(vm);
305 kvm_vm_free(vm); 335 kvm_vm_free(vm);
336}
337
338static struct vm_guest_modes {
339 enum vm_guest_mode mode;
340 bool supported;
341 bool enabled;
342} vm_guest_modes[NUM_VM_MODES] = {
343#if defined(__x86_64__)
344 { VM_MODE_P52V48_4K, 1, 1, },
345 { VM_MODE_P52V48_64K, 0, 0, },
346 { VM_MODE_P40V48_4K, 0, 0, },
347 { VM_MODE_P40V48_64K, 0, 0, },
348#elif defined(__aarch64__)
349 { VM_MODE_P52V48_4K, 0, 0, },
350 { VM_MODE_P52V48_64K, 0, 0, },
351 { VM_MODE_P40V48_4K, 1, 1, },
352 { VM_MODE_P40V48_64K, 1, 1, },
353#endif
354};
355
356static void help(char *name)
357{
358 int i;
359
360 puts("");
361 printf("usage: %s [-h] [-i iterations] [-I interval] "
362 "[-o offset] [-t] [-m mode]\n", name);
363 puts("");
364 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
365 TEST_HOST_LOOP_N);
366 printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
367 TEST_HOST_LOOP_INTERVAL);
368 printf(" -o: guest test memory offset (default: 0x%lx)\n",
369 DEFAULT_GUEST_TEST_MEM);
370 printf(" -t: map guest test memory at the top of the allowed "
371 "physical address range\n");
372 printf(" -m: specify the guest mode ID to test "
373 "(default: test all supported modes)\n"
374 " This option may be used multiple times.\n"
375 " Guest mode IDs:\n");
376 for (i = 0; i < NUM_VM_MODES; ++i) {
377 printf(" %d: %s%s\n",
378 vm_guest_modes[i].mode,
379 vm_guest_mode_string(vm_guest_modes[i].mode),
380 vm_guest_modes[i].supported ? " (supported)" : "");
381 }
382 puts("");
383 exit(0);
384}
385
386int main(int argc, char *argv[])
387{
388 unsigned long iterations = TEST_HOST_LOOP_N;
389 unsigned long interval = TEST_HOST_LOOP_INTERVAL;
390 bool mode_selected = false;
391 bool top_offset = false;
392 unsigned int mode;
393 int opt, i;
394
395 while ((opt = getopt(argc, argv, "hi:I:o:tm:")) != -1) {
396 switch (opt) {
397 case 'i':
398 iterations = strtol(optarg, NULL, 10);
399 break;
400 case 'I':
401 interval = strtol(optarg, NULL, 10);
402 break;
403 case 'o':
404 guest_test_mem = strtoull(optarg, NULL, 0);
405 break;
406 case 't':
407 top_offset = true;
408 break;
409 case 'm':
410 if (!mode_selected) {
411 for (i = 0; i < NUM_VM_MODES; ++i)
412 vm_guest_modes[i].enabled = 0;
413 mode_selected = true;
414 }
415 mode = strtoul(optarg, NULL, 10);
416 TEST_ASSERT(mode < NUM_VM_MODES,
417 "Guest mode ID %d too big", mode);
418 vm_guest_modes[mode].enabled = 1;
419 break;
420 case 'h':
421 default:
422 help(argv[0]);
423 break;
424 }
425 }
426
427 TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
428 TEST_ASSERT(interval > 0, "Interval must be greater than zero");
429 TEST_ASSERT(!top_offset || guest_test_mem == DEFAULT_GUEST_TEST_MEM,
430 "Cannot use both -o [offset] and -t at the same time");
431
432 DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
433 iterations, interval);
434
435 srandom(time(0));
436
437 for (i = 0; i < NUM_VM_MODES; ++i) {
438 if (!vm_guest_modes[i].enabled)
439 continue;
440 TEST_ASSERT(vm_guest_modes[i].supported,
441 "Guest mode ID %d (%s) not supported.",
442 vm_guest_modes[i].mode,
443 vm_guest_mode_string(vm_guest_modes[i].mode));
444 run_test(vm_guest_modes[i].mode, iterations, interval, top_offset);
445 }
306 446
307 return 0; 447 return 0;
308} 448}
diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
new file mode 100644
index 000000000000..9ef2ab1a0c08
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -0,0 +1,55 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * AArch64 processor specific defines
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7#ifndef SELFTEST_KVM_PROCESSOR_H
8#define SELFTEST_KVM_PROCESSOR_H
9
10#include "kvm_util.h"
11
12
13#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
14 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
15
16#define CPACR_EL1 3, 0, 1, 0, 2
17#define TCR_EL1 3, 0, 2, 0, 2
18#define MAIR_EL1 3, 0, 10, 2, 0
19#define TTBR0_EL1 3, 0, 2, 0, 0
20#define SCTLR_EL1 3, 0, 1, 0, 0
21
22/*
23 * Default MAIR
24 * index attribute
25 * DEVICE_nGnRnE 0 0000:0000
26 * DEVICE_nGnRE 1 0000:0100
27 * DEVICE_GRE 2 0000:1100
28 * NORMAL_NC 3 0100:0100
29 * NORMAL 4 1111:1111
30 * NORMAL_WT 5 1011:1011
31 */
32#define DEFAULT_MAIR_EL1 ((0x00ul << (0 * 8)) | \
33 (0x04ul << (1 * 8)) | \
34 (0x0cul << (2 * 8)) | \
35 (0x44ul << (3 * 8)) | \
36 (0xfful << (4 * 8)) | \
37 (0xbbul << (5 * 8)))
38
39static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr)
40{
41 struct kvm_one_reg reg;
42 reg.id = id;
43 reg.addr = (uint64_t)addr;
44 vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg);
45}
46
47static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val)
48{
49 struct kvm_one_reg reg;
50 reg.id = id;
51 reg.addr = (uint64_t)&val;
52 vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg);
53}
54
55#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
new file mode 100644
index 000000000000..4059014d93ea
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -0,0 +1,1098 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * tools/testing/selftests/kvm/include/vmx.h
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 *
7 */
8
9#ifndef SELFTEST_KVM_EVMCS_H
10#define SELFTEST_KVM_EVMCS_H
11
12#include <stdint.h>
13#include "vmx.h"
14
15#define u16 uint16_t
16#define u32 uint32_t
17#define u64 uint64_t
18
19extern bool enable_evmcs;
20
21struct hv_vp_assist_page {
22 __u32 apic_assist;
23 __u32 reserved;
24 __u64 vtl_control[2];
25 __u64 nested_enlightenments_control[2];
26 __u32 enlighten_vmentry;
27 __u64 current_nested_vmcs;
28};
29
30struct hv_enlightened_vmcs {
31 u32 revision_id;
32 u32 abort;
33
34 u16 host_es_selector;
35 u16 host_cs_selector;
36 u16 host_ss_selector;
37 u16 host_ds_selector;
38 u16 host_fs_selector;
39 u16 host_gs_selector;
40 u16 host_tr_selector;
41
42 u64 host_ia32_pat;
43 u64 host_ia32_efer;
44
45 u64 host_cr0;
46 u64 host_cr3;
47 u64 host_cr4;
48
49 u64 host_ia32_sysenter_esp;
50 u64 host_ia32_sysenter_eip;
51 u64 host_rip;
52 u32 host_ia32_sysenter_cs;
53
54 u32 pin_based_vm_exec_control;
55 u32 vm_exit_controls;
56 u32 secondary_vm_exec_control;
57
58 u64 io_bitmap_a;
59 u64 io_bitmap_b;
60 u64 msr_bitmap;
61
62 u16 guest_es_selector;
63 u16 guest_cs_selector;
64 u16 guest_ss_selector;
65 u16 guest_ds_selector;
66 u16 guest_fs_selector;
67 u16 guest_gs_selector;
68 u16 guest_ldtr_selector;
69 u16 guest_tr_selector;
70
71 u32 guest_es_limit;
72 u32 guest_cs_limit;
73 u32 guest_ss_limit;
74 u32 guest_ds_limit;
75 u32 guest_fs_limit;
76 u32 guest_gs_limit;
77 u32 guest_ldtr_limit;
78 u32 guest_tr_limit;
79 u32 guest_gdtr_limit;
80 u32 guest_idtr_limit;
81
82 u32 guest_es_ar_bytes;
83 u32 guest_cs_ar_bytes;
84 u32 guest_ss_ar_bytes;
85 u32 guest_ds_ar_bytes;
86 u32 guest_fs_ar_bytes;
87 u32 guest_gs_ar_bytes;
88 u32 guest_ldtr_ar_bytes;
89 u32 guest_tr_ar_bytes;
90
91 u64 guest_es_base;
92 u64 guest_cs_base;
93 u64 guest_ss_base;
94 u64 guest_ds_base;
95 u64 guest_fs_base;
96 u64 guest_gs_base;
97 u64 guest_ldtr_base;
98 u64 guest_tr_base;
99 u64 guest_gdtr_base;
100 u64 guest_idtr_base;
101
102 u64 padding64_1[3];
103
104 u64 vm_exit_msr_store_addr;
105 u64 vm_exit_msr_load_addr;
106 u64 vm_entry_msr_load_addr;
107
108 u64 cr3_target_value0;
109 u64 cr3_target_value1;
110 u64 cr3_target_value2;
111 u64 cr3_target_value3;
112
113 u32 page_fault_error_code_mask;
114 u32 page_fault_error_code_match;
115
116 u32 cr3_target_count;
117 u32 vm_exit_msr_store_count;
118 u32 vm_exit_msr_load_count;
119 u32 vm_entry_msr_load_count;
120
121 u64 tsc_offset;
122 u64 virtual_apic_page_addr;
123 u64 vmcs_link_pointer;
124
125 u64 guest_ia32_debugctl;
126 u64 guest_ia32_pat;
127 u64 guest_ia32_efer;
128
129 u64 guest_pdptr0;
130 u64 guest_pdptr1;
131 u64 guest_pdptr2;
132 u64 guest_pdptr3;
133
134 u64 guest_pending_dbg_exceptions;
135 u64 guest_sysenter_esp;
136 u64 guest_sysenter_eip;
137
138 u32 guest_activity_state;
139 u32 guest_sysenter_cs;
140
141 u64 cr0_guest_host_mask;
142 u64 cr4_guest_host_mask;
143 u64 cr0_read_shadow;
144 u64 cr4_read_shadow;
145 u64 guest_cr0;
146 u64 guest_cr3;
147 u64 guest_cr4;
148 u64 guest_dr7;
149
150 u64 host_fs_base;
151 u64 host_gs_base;
152 u64 host_tr_base;
153 u64 host_gdtr_base;
154 u64 host_idtr_base;
155 u64 host_rsp;
156
157 u64 ept_pointer;
158
159 u16 virtual_processor_id;
160 u16 padding16[3];
161
162 u64 padding64_2[5];
163 u64 guest_physical_address;
164
165 u32 vm_instruction_error;
166 u32 vm_exit_reason;
167 u32 vm_exit_intr_info;
168 u32 vm_exit_intr_error_code;
169 u32 idt_vectoring_info_field;
170 u32 idt_vectoring_error_code;
171 u32 vm_exit_instruction_len;
172 u32 vmx_instruction_info;
173
174 u64 exit_qualification;
175 u64 exit_io_instruction_ecx;
176 u64 exit_io_instruction_esi;
177 u64 exit_io_instruction_edi;
178 u64 exit_io_instruction_eip;
179
180 u64 guest_linear_address;
181 u64 guest_rsp;
182 u64 guest_rflags;
183
184 u32 guest_interruptibility_info;
185 u32 cpu_based_vm_exec_control;
186 u32 exception_bitmap;
187 u32 vm_entry_controls;
188 u32 vm_entry_intr_info_field;
189 u32 vm_entry_exception_error_code;
190 u32 vm_entry_instruction_len;
191 u32 tpr_threshold;
192
193 u64 guest_rip;
194
195 u32 hv_clean_fields;
196 u32 hv_padding_32;
197 u32 hv_synthetic_controls;
198 struct {
199 u32 nested_flush_hypercall:1;
200 u32 msr_bitmap:1;
201 u32 reserved:30;
202 } hv_enlightenments_control;
203 u32 hv_vp_id;
204
205 u64 hv_vm_id;
206 u64 partition_assist_page;
207 u64 padding64_4[4];
208 u64 guest_bndcfgs;
209 u64 padding64_5[7];
210 u64 xss_exit_bitmap;
211 u64 padding64_6[7];
212};
213
214#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
215#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
216#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
217#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
218 (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
219
220struct hv_enlightened_vmcs *current_evmcs;
221struct hv_vp_assist_page *current_vp_assist;
222
223static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
224{
225 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
226 HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
227
228 wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
229
230 current_vp_assist = vp_assist;
231
232 enable_evmcs = true;
233
234 return 0;
235}
236
237static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs)
238{
239 current_vp_assist->current_nested_vmcs = vmcs_pa;
240 current_vp_assist->enlighten_vmentry = 1;
241
242 current_evmcs = vmcs;
243
244 return 0;
245}
246
247static inline int evmcs_vmptrst(uint64_t *value)
248{
249 *value = current_vp_assist->current_nested_vmcs &
250 ~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
251
252 return 0;
253}
254
255static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
256{
257 switch (encoding) {
258 case GUEST_RIP:
259 *value = current_evmcs->guest_rip;
260 break;
261 case GUEST_RSP:
262 *value = current_evmcs->guest_rsp;
263 break;
264 case GUEST_RFLAGS:
265 *value = current_evmcs->guest_rflags;
266 break;
267 case HOST_IA32_PAT:
268 *value = current_evmcs->host_ia32_pat;
269 break;
270 case HOST_IA32_EFER:
271 *value = current_evmcs->host_ia32_efer;
272 break;
273 case HOST_CR0:
274 *value = current_evmcs->host_cr0;
275 break;
276 case HOST_CR3:
277 *value = current_evmcs->host_cr3;
278 break;
279 case HOST_CR4:
280 *value = current_evmcs->host_cr4;
281 break;
282 case HOST_IA32_SYSENTER_ESP:
283 *value = current_evmcs->host_ia32_sysenter_esp;
284 break;
285 case HOST_IA32_SYSENTER_EIP:
286 *value = current_evmcs->host_ia32_sysenter_eip;
287 break;
288 case HOST_RIP:
289 *value = current_evmcs->host_rip;
290 break;
291 case IO_BITMAP_A:
292 *value = current_evmcs->io_bitmap_a;
293 break;
294 case IO_BITMAP_B:
295 *value = current_evmcs->io_bitmap_b;
296 break;
297 case MSR_BITMAP:
298 *value = current_evmcs->msr_bitmap;
299 break;
300 case GUEST_ES_BASE:
301 *value = current_evmcs->guest_es_base;
302 break;
303 case GUEST_CS_BASE:
304 *value = current_evmcs->guest_cs_base;
305 break;
306 case GUEST_SS_BASE:
307 *value = current_evmcs->guest_ss_base;
308 break;
309 case GUEST_DS_BASE:
310 *value = current_evmcs->guest_ds_base;
311 break;
312 case GUEST_FS_BASE:
313 *value = current_evmcs->guest_fs_base;
314 break;
315 case GUEST_GS_BASE:
316 *value = current_evmcs->guest_gs_base;
317 break;
318 case GUEST_LDTR_BASE:
319 *value = current_evmcs->guest_ldtr_base;
320 break;
321 case GUEST_TR_BASE:
322 *value = current_evmcs->guest_tr_base;
323 break;
324 case GUEST_GDTR_BASE:
325 *value = current_evmcs->guest_gdtr_base;
326 break;
327 case GUEST_IDTR_BASE:
328 *value = current_evmcs->guest_idtr_base;
329 break;
330 case TSC_OFFSET:
331 *value = current_evmcs->tsc_offset;
332 break;
333 case VIRTUAL_APIC_PAGE_ADDR:
334 *value = current_evmcs->virtual_apic_page_addr;
335 break;
336 case VMCS_LINK_POINTER:
337 *value = current_evmcs->vmcs_link_pointer;
338 break;
339 case GUEST_IA32_DEBUGCTL:
340 *value = current_evmcs->guest_ia32_debugctl;
341 break;
342 case GUEST_IA32_PAT:
343 *value = current_evmcs->guest_ia32_pat;
344 break;
345 case GUEST_IA32_EFER:
346 *value = current_evmcs->guest_ia32_efer;
347 break;
348 case GUEST_PDPTR0:
349 *value = current_evmcs->guest_pdptr0;
350 break;
351 case GUEST_PDPTR1:
352 *value = current_evmcs->guest_pdptr1;
353 break;
354 case GUEST_PDPTR2:
355 *value = current_evmcs->guest_pdptr2;
356 break;
357 case GUEST_PDPTR3:
358 *value = current_evmcs->guest_pdptr3;
359 break;
360 case GUEST_PENDING_DBG_EXCEPTIONS:
361 *value = current_evmcs->guest_pending_dbg_exceptions;
362 break;
363 case GUEST_SYSENTER_ESP:
364 *value = current_evmcs->guest_sysenter_esp;
365 break;
366 case GUEST_SYSENTER_EIP:
367 *value = current_evmcs->guest_sysenter_eip;
368 break;
369 case CR0_GUEST_HOST_MASK:
370 *value = current_evmcs->cr0_guest_host_mask;
371 break;
372 case CR4_GUEST_HOST_MASK:
373 *value = current_evmcs->cr4_guest_host_mask;
374 break;
375 case CR0_READ_SHADOW:
376 *value = current_evmcs->cr0_read_shadow;
377 break;
378 case CR4_READ_SHADOW:
379 *value = current_evmcs->cr4_read_shadow;
380 break;
381 case GUEST_CR0:
382 *value = current_evmcs->guest_cr0;
383 break;
384 case GUEST_CR3:
385 *value = current_evmcs->guest_cr3;
386 break;
387 case GUEST_CR4:
388 *value = current_evmcs->guest_cr4;
389 break;
390 case GUEST_DR7:
391 *value = current_evmcs->guest_dr7;
392 break;
393 case HOST_FS_BASE:
394 *value = current_evmcs->host_fs_base;
395 break;
396 case HOST_GS_BASE:
397 *value = current_evmcs->host_gs_base;
398 break;
399 case HOST_TR_BASE:
400 *value = current_evmcs->host_tr_base;
401 break;
402 case HOST_GDTR_BASE:
403 *value = current_evmcs->host_gdtr_base;
404 break;
405 case HOST_IDTR_BASE:
406 *value = current_evmcs->host_idtr_base;
407 break;
408 case HOST_RSP:
409 *value = current_evmcs->host_rsp;
410 break;
411 case EPT_POINTER:
412 *value = current_evmcs->ept_pointer;
413 break;
414 case GUEST_BNDCFGS:
415 *value = current_evmcs->guest_bndcfgs;
416 break;
417 case XSS_EXIT_BITMAP:
418 *value = current_evmcs->xss_exit_bitmap;
419 break;
420 case GUEST_PHYSICAL_ADDRESS:
421 *value = current_evmcs->guest_physical_address;
422 break;
423 case EXIT_QUALIFICATION:
424 *value = current_evmcs->exit_qualification;
425 break;
426 case GUEST_LINEAR_ADDRESS:
427 *value = current_evmcs->guest_linear_address;
428 break;
429 case VM_EXIT_MSR_STORE_ADDR:
430 *value = current_evmcs->vm_exit_msr_store_addr;
431 break;
432 case VM_EXIT_MSR_LOAD_ADDR:
433 *value = current_evmcs->vm_exit_msr_load_addr;
434 break;
435 case VM_ENTRY_MSR_LOAD_ADDR:
436 *value = current_evmcs->vm_entry_msr_load_addr;
437 break;
438 case CR3_TARGET_VALUE0:
439 *value = current_evmcs->cr3_target_value0;
440 break;
441 case CR3_TARGET_VALUE1:
442 *value = current_evmcs->cr3_target_value1;
443 break;
444 case CR3_TARGET_VALUE2:
445 *value = current_evmcs->cr3_target_value2;
446 break;
447 case CR3_TARGET_VALUE3:
448 *value = current_evmcs->cr3_target_value3;
449 break;
450 case TPR_THRESHOLD:
451 *value = current_evmcs->tpr_threshold;
452 break;
453 case GUEST_INTERRUPTIBILITY_INFO:
454 *value = current_evmcs->guest_interruptibility_info;
455 break;
456 case CPU_BASED_VM_EXEC_CONTROL:
457 *value = current_evmcs->cpu_based_vm_exec_control;
458 break;
459 case EXCEPTION_BITMAP:
460 *value = current_evmcs->exception_bitmap;
461 break;
462 case VM_ENTRY_CONTROLS:
463 *value = current_evmcs->vm_entry_controls;
464 break;
465 case VM_ENTRY_INTR_INFO_FIELD:
466 *value = current_evmcs->vm_entry_intr_info_field;
467 break;
468 case VM_ENTRY_EXCEPTION_ERROR_CODE:
469 *value = current_evmcs->vm_entry_exception_error_code;
470 break;
471 case VM_ENTRY_INSTRUCTION_LEN:
472 *value = current_evmcs->vm_entry_instruction_len;
473 break;
474 case HOST_IA32_SYSENTER_CS:
475 *value = current_evmcs->host_ia32_sysenter_cs;
476 break;
477 case PIN_BASED_VM_EXEC_CONTROL:
478 *value = current_evmcs->pin_based_vm_exec_control;
479 break;
480 case VM_EXIT_CONTROLS:
481 *value = current_evmcs->vm_exit_controls;
482 break;
483 case SECONDARY_VM_EXEC_CONTROL:
484 *value = current_evmcs->secondary_vm_exec_control;
485 break;
486 case GUEST_ES_LIMIT:
487 *value = current_evmcs->guest_es_limit;
488 break;
489 case GUEST_CS_LIMIT:
490 *value = current_evmcs->guest_cs_limit;
491 break;
492 case GUEST_SS_LIMIT:
493 *value = current_evmcs->guest_ss_limit;
494 break;
495 case GUEST_DS_LIMIT:
496 *value = current_evmcs->guest_ds_limit;
497 break;
498 case GUEST_FS_LIMIT:
499 *value = current_evmcs->guest_fs_limit;
500 break;
501 case GUEST_GS_LIMIT:
502 *value = current_evmcs->guest_gs_limit;
503 break;
504 case GUEST_LDTR_LIMIT:
505 *value = current_evmcs->guest_ldtr_limit;
506 break;
507 case GUEST_TR_LIMIT:
508 *value = current_evmcs->guest_tr_limit;
509 break;
510 case GUEST_GDTR_LIMIT:
511 *value = current_evmcs->guest_gdtr_limit;
512 break;
513 case GUEST_IDTR_LIMIT:
514 *value = current_evmcs->guest_idtr_limit;
515 break;
516 case GUEST_ES_AR_BYTES:
517 *value = current_evmcs->guest_es_ar_bytes;
518 break;
519 case GUEST_CS_AR_BYTES:
520 *value = current_evmcs->guest_cs_ar_bytes;
521 break;
522 case GUEST_SS_AR_BYTES:
523 *value = current_evmcs->guest_ss_ar_bytes;
524 break;
525 case GUEST_DS_AR_BYTES:
526 *value = current_evmcs->guest_ds_ar_bytes;
527 break;
528 case GUEST_FS_AR_BYTES:
529 *value = current_evmcs->guest_fs_ar_bytes;
530 break;
531 case GUEST_GS_AR_BYTES:
532 *value = current_evmcs->guest_gs_ar_bytes;
533 break;
534 case GUEST_LDTR_AR_BYTES:
535 *value = current_evmcs->guest_ldtr_ar_bytes;
536 break;
537 case GUEST_TR_AR_BYTES:
538 *value = current_evmcs->guest_tr_ar_bytes;
539 break;
540 case GUEST_ACTIVITY_STATE:
541 *value = current_evmcs->guest_activity_state;
542 break;
543 case GUEST_SYSENTER_CS:
544 *value = current_evmcs->guest_sysenter_cs;
545 break;
546 case VM_INSTRUCTION_ERROR:
547 *value = current_evmcs->vm_instruction_error;
548 break;
549 case VM_EXIT_REASON:
550 *value = current_evmcs->vm_exit_reason;
551 break;
552 case VM_EXIT_INTR_INFO:
553 *value = current_evmcs->vm_exit_intr_info;
554 break;
555 case VM_EXIT_INTR_ERROR_CODE:
556 *value = current_evmcs->vm_exit_intr_error_code;
557 break;
558 case IDT_VECTORING_INFO_FIELD:
559 *value = current_evmcs->idt_vectoring_info_field;
560 break;
561 case IDT_VECTORING_ERROR_CODE:
562 *value = current_evmcs->idt_vectoring_error_code;
563 break;
564 case VM_EXIT_INSTRUCTION_LEN:
565 *value = current_evmcs->vm_exit_instruction_len;
566 break;
567 case VMX_INSTRUCTION_INFO:
568 *value = current_evmcs->vmx_instruction_info;
569 break;
570 case PAGE_FAULT_ERROR_CODE_MASK:
571 *value = current_evmcs->page_fault_error_code_mask;
572 break;
573 case PAGE_FAULT_ERROR_CODE_MATCH:
574 *value = current_evmcs->page_fault_error_code_match;
575 break;
576 case CR3_TARGET_COUNT:
577 *value = current_evmcs->cr3_target_count;
578 break;
579 case VM_EXIT_MSR_STORE_COUNT:
580 *value = current_evmcs->vm_exit_msr_store_count;
581 break;
582 case VM_EXIT_MSR_LOAD_COUNT:
583 *value = current_evmcs->vm_exit_msr_load_count;
584 break;
585 case VM_ENTRY_MSR_LOAD_COUNT:
586 *value = current_evmcs->vm_entry_msr_load_count;
587 break;
588 case HOST_ES_SELECTOR:
589 *value = current_evmcs->host_es_selector;
590 break;
591 case HOST_CS_SELECTOR:
592 *value = current_evmcs->host_cs_selector;
593 break;
594 case HOST_SS_SELECTOR:
595 *value = current_evmcs->host_ss_selector;
596 break;
597 case HOST_DS_SELECTOR:
598 *value = current_evmcs->host_ds_selector;
599 break;
600 case HOST_FS_SELECTOR:
601 *value = current_evmcs->host_fs_selector;
602 break;
603 case HOST_GS_SELECTOR:
604 *value = current_evmcs->host_gs_selector;
605 break;
606 case HOST_TR_SELECTOR:
607 *value = current_evmcs->host_tr_selector;
608 break;
609 case GUEST_ES_SELECTOR:
610 *value = current_evmcs->guest_es_selector;
611 break;
612 case GUEST_CS_SELECTOR:
613 *value = current_evmcs->guest_cs_selector;
614 break;
615 case GUEST_SS_SELECTOR:
616 *value = current_evmcs->guest_ss_selector;
617 break;
618 case GUEST_DS_SELECTOR:
619 *value = current_evmcs->guest_ds_selector;
620 break;
621 case GUEST_FS_SELECTOR:
622 *value = current_evmcs->guest_fs_selector;
623 break;
624 case GUEST_GS_SELECTOR:
625 *value = current_evmcs->guest_gs_selector;
626 break;
627 case GUEST_LDTR_SELECTOR:
628 *value = current_evmcs->guest_ldtr_selector;
629 break;
630 case GUEST_TR_SELECTOR:
631 *value = current_evmcs->guest_tr_selector;
632 break;
633 case VIRTUAL_PROCESSOR_ID:
634 *value = current_evmcs->virtual_processor_id;
635 break;
636 default: return 1;
637 }
638
639 return 0;
640}
641
642static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value)
643{
644 switch (encoding) {
645 case GUEST_RIP:
646 current_evmcs->guest_rip = value;
647 break;
648 case GUEST_RSP:
649 current_evmcs->guest_rsp = value;
650 break;
651 case GUEST_RFLAGS:
652 current_evmcs->guest_rflags = value;
653 break;
654 case HOST_IA32_PAT:
655 current_evmcs->host_ia32_pat = value;
656 break;
657 case HOST_IA32_EFER:
658 current_evmcs->host_ia32_efer = value;
659 break;
660 case HOST_CR0:
661 current_evmcs->host_cr0 = value;
662 break;
663 case HOST_CR3:
664 current_evmcs->host_cr3 = value;
665 break;
666 case HOST_CR4:
667 current_evmcs->host_cr4 = value;
668 break;
669 case HOST_IA32_SYSENTER_ESP:
670 current_evmcs->host_ia32_sysenter_esp = value;
671 break;
672 case HOST_IA32_SYSENTER_EIP:
673 current_evmcs->host_ia32_sysenter_eip = value;
674 break;
675 case HOST_RIP:
676 current_evmcs->host_rip = value;
677 break;
678 case IO_BITMAP_A:
679 current_evmcs->io_bitmap_a = value;
680 break;
681 case IO_BITMAP_B:
682 current_evmcs->io_bitmap_b = value;
683 break;
684 case MSR_BITMAP:
685 current_evmcs->msr_bitmap = value;
686 break;
687 case GUEST_ES_BASE:
688 current_evmcs->guest_es_base = value;
689 break;
690 case GUEST_CS_BASE:
691 current_evmcs->guest_cs_base = value;
692 break;
693 case GUEST_SS_BASE:
694 current_evmcs->guest_ss_base = value;
695 break;
696 case GUEST_DS_BASE:
697 current_evmcs->guest_ds_base = value;
698 break;
699 case GUEST_FS_BASE:
700 current_evmcs->guest_fs_base = value;
701 break;
702 case GUEST_GS_BASE:
703 current_evmcs->guest_gs_base = value;
704 break;
705 case GUEST_LDTR_BASE:
706 current_evmcs->guest_ldtr_base = value;
707 break;
708 case GUEST_TR_BASE:
709 current_evmcs->guest_tr_base = value;
710 break;
711 case GUEST_GDTR_BASE:
712 current_evmcs->guest_gdtr_base = value;
713 break;
714 case GUEST_IDTR_BASE:
715 current_evmcs->guest_idtr_base = value;
716 break;
717 case TSC_OFFSET:
718 current_evmcs->tsc_offset = value;
719 break;
720 case VIRTUAL_APIC_PAGE_ADDR:
721 current_evmcs->virtual_apic_page_addr = value;
722 break;
723 case VMCS_LINK_POINTER:
724 current_evmcs->vmcs_link_pointer = value;
725 break;
726 case GUEST_IA32_DEBUGCTL:
727 current_evmcs->guest_ia32_debugctl = value;
728 break;
729 case GUEST_IA32_PAT:
730 current_evmcs->guest_ia32_pat = value;
731 break;
732 case GUEST_IA32_EFER:
733 current_evmcs->guest_ia32_efer = value;
734 break;
735 case GUEST_PDPTR0:
736 current_evmcs->guest_pdptr0 = value;
737 break;
738 case GUEST_PDPTR1:
739 current_evmcs->guest_pdptr1 = value;
740 break;
741 case GUEST_PDPTR2:
742 current_evmcs->guest_pdptr2 = value;
743 break;
744 case GUEST_PDPTR3:
745 current_evmcs->guest_pdptr3 = value;
746 break;
747 case GUEST_PENDING_DBG_EXCEPTIONS:
748 current_evmcs->guest_pending_dbg_exceptions = value;
749 break;
750 case GUEST_SYSENTER_ESP:
751 current_evmcs->guest_sysenter_esp = value;
752 break;
753 case GUEST_SYSENTER_EIP:
754 current_evmcs->guest_sysenter_eip = value;
755 break;
756 case CR0_GUEST_HOST_MASK:
757 current_evmcs->cr0_guest_host_mask = value;
758 break;
759 case CR4_GUEST_HOST_MASK:
760 current_evmcs->cr4_guest_host_mask = value;
761 break;
762 case CR0_READ_SHADOW:
763 current_evmcs->cr0_read_shadow = value;
764 break;
765 case CR4_READ_SHADOW:
766 current_evmcs->cr4_read_shadow = value;
767 break;
768 case GUEST_CR0:
769 current_evmcs->guest_cr0 = value;
770 break;
771 case GUEST_CR3:
772 current_evmcs->guest_cr3 = value;
773 break;
774 case GUEST_CR4:
775 current_evmcs->guest_cr4 = value;
776 break;
777 case GUEST_DR7:
778 current_evmcs->guest_dr7 = value;
779 break;
780 case HOST_FS_BASE:
781 current_evmcs->host_fs_base = value;
782 break;
783 case HOST_GS_BASE:
784 current_evmcs->host_gs_base = value;
785 break;
786 case HOST_TR_BASE:
787 current_evmcs->host_tr_base = value;
788 break;
789 case HOST_GDTR_BASE:
790 current_evmcs->host_gdtr_base = value;
791 break;
792 case HOST_IDTR_BASE:
793 current_evmcs->host_idtr_base = value;
794 break;
795 case HOST_RSP:
796 current_evmcs->host_rsp = value;
797 break;
798 case EPT_POINTER:
799 current_evmcs->ept_pointer = value;
800 break;
801 case GUEST_BNDCFGS:
802 current_evmcs->guest_bndcfgs = value;
803 break;
804 case XSS_EXIT_BITMAP:
805 current_evmcs->xss_exit_bitmap = value;
806 break;
807 case GUEST_PHYSICAL_ADDRESS:
808 current_evmcs->guest_physical_address = value;
809 break;
810 case EXIT_QUALIFICATION:
811 current_evmcs->exit_qualification = value;
812 break;
813 case GUEST_LINEAR_ADDRESS:
814 current_evmcs->guest_linear_address = value;
815 break;
816 case VM_EXIT_MSR_STORE_ADDR:
817 current_evmcs->vm_exit_msr_store_addr = value;
818 break;
819 case VM_EXIT_MSR_LOAD_ADDR:
820 current_evmcs->vm_exit_msr_load_addr = value;
821 break;
822 case VM_ENTRY_MSR_LOAD_ADDR:
823 current_evmcs->vm_entry_msr_load_addr = value;
824 break;
825 case CR3_TARGET_VALUE0:
826 current_evmcs->cr3_target_value0 = value;
827 break;
828 case CR3_TARGET_VALUE1:
829 current_evmcs->cr3_target_value1 = value;
830 break;
831 case CR3_TARGET_VALUE2:
832 current_evmcs->cr3_target_value2 = value;
833 break;
834 case CR3_TARGET_VALUE3:
835 current_evmcs->cr3_target_value3 = value;
836 break;
837 case TPR_THRESHOLD:
838 current_evmcs->tpr_threshold = value;
839 break;
840 case GUEST_INTERRUPTIBILITY_INFO:
841 current_evmcs->guest_interruptibility_info = value;
842 break;
843 case CPU_BASED_VM_EXEC_CONTROL:
844 current_evmcs->cpu_based_vm_exec_control = value;
845 break;
846 case EXCEPTION_BITMAP:
847 current_evmcs->exception_bitmap = value;
848 break;
849 case VM_ENTRY_CONTROLS:
850 current_evmcs->vm_entry_controls = value;
851 break;
852 case VM_ENTRY_INTR_INFO_FIELD:
853 current_evmcs->vm_entry_intr_info_field = value;
854 break;
855 case VM_ENTRY_EXCEPTION_ERROR_CODE:
856 current_evmcs->vm_entry_exception_error_code = value;
857 break;
858 case VM_ENTRY_INSTRUCTION_LEN:
859 current_evmcs->vm_entry_instruction_len = value;
860 break;
861 case HOST_IA32_SYSENTER_CS:
862 current_evmcs->host_ia32_sysenter_cs = value;
863 break;
864 case PIN_BASED_VM_EXEC_CONTROL:
865 current_evmcs->pin_based_vm_exec_control = value;
866 break;
867 case VM_EXIT_CONTROLS:
868 current_evmcs->vm_exit_controls = value;
869 break;
870 case SECONDARY_VM_EXEC_CONTROL:
871 current_evmcs->secondary_vm_exec_control = value;
872 break;
873 case GUEST_ES_LIMIT:
874 current_evmcs->guest_es_limit = value;
875 break;
876 case GUEST_CS_LIMIT:
877 current_evmcs->guest_cs_limit = value;
878 break;
879 case GUEST_SS_LIMIT:
880 current_evmcs->guest_ss_limit = value;
881 break;
882 case GUEST_DS_LIMIT:
883 current_evmcs->guest_ds_limit = value;
884 break;
885 case GUEST_FS_LIMIT:
886 current_evmcs->guest_fs_limit = value;
887 break;
888 case GUEST_GS_LIMIT:
889 current_evmcs->guest_gs_limit = value;
890 break;
891 case GUEST_LDTR_LIMIT:
892 current_evmcs->guest_ldtr_limit = value;
893 break;
894 case GUEST_TR_LIMIT:
895 current_evmcs->guest_tr_limit = value;
896 break;
897 case GUEST_GDTR_LIMIT:
898 current_evmcs->guest_gdtr_limit = value;
899 break;
900 case GUEST_IDTR_LIMIT:
901 current_evmcs->guest_idtr_limit = value;
902 break;
903 case GUEST_ES_AR_BYTES:
904 current_evmcs->guest_es_ar_bytes = value;
905 break;
906 case GUEST_CS_AR_BYTES:
907 current_evmcs->guest_cs_ar_bytes = value;
908 break;
909 case GUEST_SS_AR_BYTES:
910 current_evmcs->guest_ss_ar_bytes = value;
911 break;
912 case GUEST_DS_AR_BYTES:
913 current_evmcs->guest_ds_ar_bytes = value;
914 break;
915 case GUEST_FS_AR_BYTES:
916 current_evmcs->guest_fs_ar_bytes = value;
917 break;
918 case GUEST_GS_AR_BYTES:
919 current_evmcs->guest_gs_ar_bytes = value;
920 break;
921 case GUEST_LDTR_AR_BYTES:
922 current_evmcs->guest_ldtr_ar_bytes = value;
923 break;
924 case GUEST_TR_AR_BYTES:
925 current_evmcs->guest_tr_ar_bytes = value;
926 break;
927 case GUEST_ACTIVITY_STATE:
928 current_evmcs->guest_activity_state = value;
929 break;
930 case GUEST_SYSENTER_CS:
931 current_evmcs->guest_sysenter_cs = value;
932 break;
933 case VM_INSTRUCTION_ERROR:
934 current_evmcs->vm_instruction_error = value;
935 break;
936 case VM_EXIT_REASON:
937 current_evmcs->vm_exit_reason = value;
938 break;
939 case VM_EXIT_INTR_INFO:
940 current_evmcs->vm_exit_intr_info = value;
941 break;
942 case VM_EXIT_INTR_ERROR_CODE:
943 current_evmcs->vm_exit_intr_error_code = value;
944 break;
945 case IDT_VECTORING_INFO_FIELD:
946 current_evmcs->idt_vectoring_info_field = value;
947 break;
948 case IDT_VECTORING_ERROR_CODE:
949 current_evmcs->idt_vectoring_error_code = value;
950 break;
951 case VM_EXIT_INSTRUCTION_LEN:
952 current_evmcs->vm_exit_instruction_len = value;
953 break;
954 case VMX_INSTRUCTION_INFO:
955 current_evmcs->vmx_instruction_info = value;
956 break;
957 case PAGE_FAULT_ERROR_CODE_MASK:
958 current_evmcs->page_fault_error_code_mask = value;
959 break;
960 case PAGE_FAULT_ERROR_CODE_MATCH:
961 current_evmcs->page_fault_error_code_match = value;
962 break;
963 case CR3_TARGET_COUNT:
964 current_evmcs->cr3_target_count = value;
965 break;
966 case VM_EXIT_MSR_STORE_COUNT:
967 current_evmcs->vm_exit_msr_store_count = value;
968 break;
969 case VM_EXIT_MSR_LOAD_COUNT:
970 current_evmcs->vm_exit_msr_load_count = value;
971 break;
972 case VM_ENTRY_MSR_LOAD_COUNT:
973 current_evmcs->vm_entry_msr_load_count = value;
974 break;
975 case HOST_ES_SELECTOR:
976 current_evmcs->host_es_selector = value;
977 break;
978 case HOST_CS_SELECTOR:
979 current_evmcs->host_cs_selector = value;
980 break;
981 case HOST_SS_SELECTOR:
982 current_evmcs->host_ss_selector = value;
983 break;
984 case HOST_DS_SELECTOR:
985 current_evmcs->host_ds_selector = value;
986 break;
987 case HOST_FS_SELECTOR:
988 current_evmcs->host_fs_selector = value;
989 break;
990 case HOST_GS_SELECTOR:
991 current_evmcs->host_gs_selector = value;
992 break;
993 case HOST_TR_SELECTOR:
994 current_evmcs->host_tr_selector = value;
995 break;
996 case GUEST_ES_SELECTOR:
997 current_evmcs->guest_es_selector = value;
998 break;
999 case GUEST_CS_SELECTOR:
1000 current_evmcs->guest_cs_selector = value;
1001 break;
1002 case GUEST_SS_SELECTOR:
1003 current_evmcs->guest_ss_selector = value;
1004 break;
1005 case GUEST_DS_SELECTOR:
1006 current_evmcs->guest_ds_selector = value;
1007 break;
1008 case GUEST_FS_SELECTOR:
1009 current_evmcs->guest_fs_selector = value;
1010 break;
1011 case GUEST_GS_SELECTOR:
1012 current_evmcs->guest_gs_selector = value;
1013 break;
1014 case GUEST_LDTR_SELECTOR:
1015 current_evmcs->guest_ldtr_selector = value;
1016 break;
1017 case GUEST_TR_SELECTOR:
1018 current_evmcs->guest_tr_selector = value;
1019 break;
1020 case VIRTUAL_PROCESSOR_ID:
1021 current_evmcs->virtual_processor_id = value;
1022 break;
1023 default: return 1;
1024 }
1025
1026 return 0;
1027}
1028
1029static inline int evmcs_vmlaunch(void)
1030{
1031 int ret;
1032
1033 current_evmcs->hv_clean_fields = 0;
1034
1035 __asm__ __volatile__("push %%rbp;"
1036 "push %%rcx;"
1037 "push %%rdx;"
1038 "push %%rsi;"
1039 "push %%rdi;"
1040 "push $0;"
1041 "mov %%rsp, (%[host_rsp]);"
1042 "lea 1f(%%rip), %%rax;"
1043 "mov %%rax, (%[host_rip]);"
1044 "vmlaunch;"
1045 "incq (%%rsp);"
1046 "1: pop %%rax;"
1047 "pop %%rdi;"
1048 "pop %%rsi;"
1049 "pop %%rdx;"
1050 "pop %%rcx;"
1051 "pop %%rbp;"
1052 : [ret]"=&a"(ret)
1053 : [host_rsp]"r"
1054 ((uint64_t)&current_evmcs->host_rsp),
1055 [host_rip]"r"
1056 ((uint64_t)&current_evmcs->host_rip)
1057 : "memory", "cc", "rbx", "r8", "r9", "r10",
1058 "r11", "r12", "r13", "r14", "r15");
1059 return ret;
1060}
1061
1062/*
1063 * No guest state (e.g. GPRs) is established by this vmresume.
1064 */
1065static inline int evmcs_vmresume(void)
1066{
1067 int ret;
1068
1069 current_evmcs->hv_clean_fields = 0;
1070
1071 __asm__ __volatile__("push %%rbp;"
1072 "push %%rcx;"
1073 "push %%rdx;"
1074 "push %%rsi;"
1075 "push %%rdi;"
1076 "push $0;"
1077 "mov %%rsp, (%[host_rsp]);"
1078 "lea 1f(%%rip), %%rax;"
1079 "mov %%rax, (%[host_rip]);"
1080 "vmresume;"
1081 "incq (%%rsp);"
1082 "1: pop %%rax;"
1083 "pop %%rdi;"
1084 "pop %%rsi;"
1085 "pop %%rdx;"
1086 "pop %%rcx;"
1087 "pop %%rbp;"
1088 : [ret]"=&a"(ret)
1089 : [host_rsp]"r"
1090 ((uint64_t)&current_evmcs->host_rsp),
1091 [host_rip]"r"
1092 ((uint64_t)&current_evmcs->host_rip)
1093 : "memory", "cc", "rbx", "r8", "r9", "r10",
1094 "r11", "r12", "r13", "r14", "r15");
1095 return ret;
1096}
1097
1098#endif /* !SELFTEST_KVM_EVMCS_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 3acf9a91704c..a4e59e3b4826 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -7,7 +7,7 @@
7 * 7 *
8 */ 8 */
9#ifndef SELFTEST_KVM_UTIL_H 9#ifndef SELFTEST_KVM_UTIL_H
10#define SELFTEST_KVM_UTIL_H 1 10#define SELFTEST_KVM_UTIL_H
11 11
12#include "test_util.h" 12#include "test_util.h"
13 13
@@ -17,12 +17,6 @@
17 17
18#include "sparsebit.h" 18#include "sparsebit.h"
19 19
20/*
21 * Memslots can't cover the gfn starting at this gpa otherwise vCPUs can't be
22 * created. Only applies to VMs using EPT.
23 */
24#define KVM_DEFAULT_IDENTITY_MAP_ADDRESS 0xfffbc000ul
25
26 20
27/* Callers of kvm_util only have an incomplete/opaque description of the 21/* Callers of kvm_util only have an incomplete/opaque description of the
28 * structure kvm_util is using to maintain the state of a VM. 22 * structure kvm_util is using to maintain the state of a VM.
@@ -33,16 +27,23 @@ typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
33typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ 27typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
34 28
35/* Minimum allocated guest virtual and physical addresses */ 29/* Minimum allocated guest virtual and physical addresses */
36#define KVM_UTIL_MIN_VADDR 0x2000 30#define KVM_UTIL_MIN_VADDR 0x2000
37 31
38#define DEFAULT_GUEST_PHY_PAGES 512 32#define DEFAULT_GUEST_PHY_PAGES 512
39#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 33#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
40#define DEFAULT_STACK_PGS 5 34#define DEFAULT_STACK_PGS 5
41 35
42enum vm_guest_mode { 36enum vm_guest_mode {
43 VM_MODE_FLAT48PG, 37 VM_MODE_P52V48_4K,
38 VM_MODE_P52V48_64K,
39 VM_MODE_P40V48_4K,
40 VM_MODE_P40V48_64K,
41 NUM_VM_MODES,
44}; 42};
45 43
44#define vm_guest_mode_string(m) vm_guest_mode_string[m]
45extern const char * const vm_guest_mode_string[];
46
46enum vm_mem_backing_src_type { 47enum vm_mem_backing_src_type {
47 VM_MEM_SRC_ANONYMOUS, 48 VM_MEM_SRC_ANONYMOUS,
48 VM_MEM_SRC_ANONYMOUS_THP, 49 VM_MEM_SRC_ANONYMOUS_THP,
@@ -58,15 +59,15 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm);
58void kvm_vm_release(struct kvm_vm *vmp); 59void kvm_vm_release(struct kvm_vm *vmp);
59void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log); 60void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
60 61
61int kvm_memcmp_hva_gva(void *hva, 62int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
62 struct kvm_vm *vm, const vm_vaddr_t gva, size_t len); 63 size_t len);
63 64
64void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename, 65void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
65 uint32_t data_memslot, uint32_t pgd_memslot); 66 uint32_t data_memslot, uint32_t pgd_memslot);
66 67
67void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 68void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
68void vcpu_dump(FILE *stream, struct kvm_vm *vm, 69void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
69 uint32_t vcpuid, uint8_t indent); 70 uint8_t indent);
70 71
71void vm_create_irqchip(struct kvm_vm *vm); 72void vm_create_irqchip(struct kvm_vm *vm);
72 73
@@ -75,13 +76,14 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
75 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 76 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
76 uint32_t flags); 77 uint32_t flags);
77 78
78void vcpu_ioctl(struct kvm_vm *vm, 79void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
79 uint32_t vcpuid, unsigned long ioctl, void *arg); 80 void *arg);
80void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); 81void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
81void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 82void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
82void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot); 83void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
84 int gdt_memslot);
83vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 85vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
84 uint32_t data_memslot, uint32_t pgd_memslot); 86 uint32_t data_memslot, uint32_t pgd_memslot);
85void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 87void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
86 size_t size, uint32_t pgd_memslot); 88 size_t size, uint32_t pgd_memslot);
87void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 89void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
@@ -93,56 +95,35 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
93void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); 95void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
94int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); 96int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
95void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, 97void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
96 struct kvm_mp_state *mp_state); 98 struct kvm_mp_state *mp_state);
97void vcpu_regs_get(struct kvm_vm *vm, 99void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
98 uint32_t vcpuid, struct kvm_regs *regs); 100void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
99void vcpu_regs_set(struct kvm_vm *vm,
100 uint32_t vcpuid, struct kvm_regs *regs);
101void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...); 101void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
102void vcpu_sregs_get(struct kvm_vm *vm, 102void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
103 uint32_t vcpuid, struct kvm_sregs *sregs); 103 struct kvm_sregs *sregs);
104void vcpu_sregs_set(struct kvm_vm *vm, 104void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
105 uint32_t vcpuid, struct kvm_sregs *sregs); 105 struct kvm_sregs *sregs);
106int _vcpu_sregs_set(struct kvm_vm *vm, 106int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
107 uint32_t vcpuid, struct kvm_sregs *sregs); 107 struct kvm_sregs *sregs);
108void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, 108void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
109 struct kvm_vcpu_events *events); 109 struct kvm_vcpu_events *events);
110void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, 110void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
111 struct kvm_vcpu_events *events); 111 struct kvm_vcpu_events *events);
112uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
113void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
114 uint64_t msr_value);
115 112
116const char *exit_reason_str(unsigned int exit_reason); 113const char *exit_reason_str(unsigned int exit_reason);
117 114
118void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot); 115void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
119void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 116void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
120 uint32_t pgd_memslot); 117 uint32_t pgd_memslot);
121vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, 118vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
122 vm_paddr_t paddr_min, uint32_t memslot); 119 uint32_t memslot);
123 120vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
124struct kvm_cpuid2 *kvm_get_supported_cpuid(void); 121 vm_paddr_t paddr_min, uint32_t memslot);
125void vcpu_set_cpuid(
126 struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid);
127
128struct kvm_cpuid_entry2 *
129kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
130
131static inline struct kvm_cpuid_entry2 *
132kvm_get_supported_cpuid_entry(uint32_t function)
133{
134 return kvm_get_supported_cpuid_index(function, 0);
135}
136 122
137struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size, 123struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
138 void *guest_code); 124 void *guest_code);
139void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); 125void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
140 126
141typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr,
142 vm_paddr_t vmxon_paddr,
143 vm_vaddr_t vmcs_vaddr,
144 vm_paddr_t vmcs_paddr);
145
146struct kvm_userspace_memory_region * 127struct kvm_userspace_memory_region *
147kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, 128kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
148 uint64_t end); 129 uint64_t end);
@@ -152,43 +133,49 @@ allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
152 133
153int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); 134int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
154 135
155#define GUEST_PORT_SYNC 0x1000 136#define sync_global_to_guest(vm, g) ({ \
156#define GUEST_PORT_ABORT 0x1001 137 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
157#define GUEST_PORT_DONE 0x1002 138 memcpy(_p, &(g), sizeof(g)); \
158 139})
159static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1) 140
160{ 141#define sync_global_from_guest(vm, g) ({ \
161 __asm__ __volatile__("in %[port], %%al" 142 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
162 : 143 memcpy(&(g), _p, sizeof(g)); \
163 : [port]"d"(port), "D"(arg0), "S"(arg1) 144})
164 : "rax"); 145
165} 146/* ucall implementation types */
166 147typedef enum {
167/* 148 UCALL_PIO,
168 * Allows to pass three arguments to the host: port is 16bit wide, 149 UCALL_MMIO,
169 * arg0 & arg1 are 64bit wide 150} ucall_type_t;
170 */ 151
171#define GUEST_SYNC_ARGS(_port, _arg0, _arg1) \ 152/* Common ucalls */
172 __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1)) 153enum {
173 154 UCALL_NONE,
174#define GUEST_ASSERT(_condition) do { \ 155 UCALL_SYNC,
175 if (!(_condition)) \ 156 UCALL_ABORT,
176 GUEST_SYNC_ARGS(GUEST_PORT_ABORT, \ 157 UCALL_DONE,
177 "Failed guest assert: " \ 158};
178 #_condition, __LINE__); \
179 } while (0)
180
181#define GUEST_SYNC(stage) GUEST_SYNC_ARGS(GUEST_PORT_SYNC, "hello", stage)
182 159
183#define GUEST_DONE() GUEST_SYNC_ARGS(GUEST_PORT_DONE, 0, 0) 160#define UCALL_MAX_ARGS 6
184 161
185struct guest_args { 162struct ucall {
186 uint64_t arg0; 163 uint64_t cmd;
187 uint64_t arg1; 164 uint64_t args[UCALL_MAX_ARGS];
188 uint16_t port; 165};
189} __attribute__ ((packed));
190 166
191void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id, 167void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg);
192 struct guest_args *args); 168void ucall_uninit(struct kvm_vm *vm);
169void ucall(uint64_t cmd, int nargs, ...);
170uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
171
172#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
173#define GUEST_DONE() ucall(UCALL_DONE, 0)
174#define GUEST_ASSERT(_condition) do { \
175 if (!(_condition)) \
176 ucall(UCALL_ABORT, 2, \
177 "Failed guest assert: " \
178 #_condition, __LINE__); \
179} while (0)
193 180
194#endif /* SELFTEST_KVM_UTIL_H */ 181#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h
index 54cfeb6568d3..31e030915c1f 100644
--- a/tools/testing/selftests/kvm/include/sparsebit.h
+++ b/tools/testing/selftests/kvm/include/sparsebit.h
@@ -15,8 +15,8 @@
15 * even in the case where most bits are set. 15 * even in the case where most bits are set.
16 */ 16 */
17 17
18#ifndef _TEST_SPARSEBIT_H_ 18#ifndef SELFTEST_KVM_SPARSEBIT_H
19#define _TEST_SPARSEBIT_H_ 19#define SELFTEST_KVM_SPARSEBIT_H
20 20
21#include <stdbool.h> 21#include <stdbool.h>
22#include <stdint.h> 22#include <stdint.h>
@@ -72,4 +72,4 @@ void sparsebit_validate_internal(struct sparsebit *sbit);
72} 72}
73#endif 73#endif
74 74
75#endif /* _TEST_SPARSEBIT_H_ */ 75#endif /* SELFTEST_KVM_SPARSEBIT_H */
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 73c3933436ec..c7dafe8bd02c 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -7,8 +7,8 @@
7 * 7 *
8 */ 8 */
9 9
10#ifndef TEST_UTIL_H 10#ifndef SELFTEST_KVM_TEST_UTIL_H
11#define TEST_UTIL_H 1 11#define SELFTEST_KVM_TEST_UTIL_H
12 12
13#include <stdlib.h> 13#include <stdlib.h>
14#include <stdarg.h> 14#include <stdarg.h>
@@ -41,4 +41,4 @@ void test_assert(bool exp, const char *exp_str,
41 #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \ 41 #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
42} while (0) 42} while (0)
43 43
44#endif /* TEST_UTIL_H */ 44#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/x86.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 42c3596815b8..e2884c2b81ff 100644
--- a/tools/testing/selftests/kvm/include/x86.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * tools/testing/selftests/kvm/include/x86.h 2 * tools/testing/selftests/kvm/include/x86_64/processor.h
3 * 3 *
4 * Copyright (C) 2018, Google LLC. 4 * Copyright (C) 2018, Google LLC.
5 * 5 *
@@ -7,8 +7,8 @@
7 * 7 *
8 */ 8 */
9 9
10#ifndef SELFTEST_KVM_X86_H 10#ifndef SELFTEST_KVM_PROCESSOR_H
11#define SELFTEST_KVM_X86_H 11#define SELFTEST_KVM_PROCESSOR_H
12 12
13#include <assert.h> 13#include <assert.h>
14#include <stdint.h> 14#include <stdint.h>
@@ -305,7 +305,25 @@ static inline unsigned long get_xmm(int n)
305 305
306struct kvm_x86_state; 306struct kvm_x86_state;
307struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid); 307struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
308void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state); 308void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
309 struct kvm_x86_state *state);
310
311struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
312void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
313 struct kvm_cpuid2 *cpuid);
314
315struct kvm_cpuid_entry2 *
316kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
317
318static inline struct kvm_cpuid_entry2 *
319kvm_get_supported_cpuid_entry(uint32_t function)
320{
321 return kvm_get_supported_cpuid_index(function, 0);
322}
323
324uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
325void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
326 uint64_t msr_value);
309 327
310/* 328/*
311 * Basic CPU control in CR0 329 * Basic CPU control in CR0
@@ -1044,4 +1062,4 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
1044#define MSR_VM_IGNNE 0xc0010115 1062#define MSR_VM_IGNNE 0xc0010115
1045#define MSR_VM_HSAVE_PA 0xc0010117 1063#define MSR_VM_HSAVE_PA 0xc0010117
1046 1064
1047#endif /* !SELFTEST_KVM_X86_H */ 1065#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index b9ffe1024d3a..c9bd935b939c 100644
--- a/tools/testing/selftests/kvm/include/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * tools/testing/selftests/kvm/include/vmx.h 2 * tools/testing/selftests/kvm/include/x86_64/vmx.h
3 * 3 *
4 * Copyright (C) 2018, Google LLC. 4 * Copyright (C) 2018, Google LLC.
5 * 5 *
@@ -11,7 +11,7 @@
11#define SELFTEST_KVM_VMX_H 11#define SELFTEST_KVM_VMX_H
12 12
13#include <stdint.h> 13#include <stdint.h>
14#include "x86.h" 14#include "processor.h"
15 15
16#define CPUID_VMX_BIT 5 16#define CPUID_VMX_BIT 5
17 17
@@ -339,6 +339,8 @@ struct vmx_msr_entry {
339 uint64_t value; 339 uint64_t value;
340} __attribute__ ((aligned(16))); 340} __attribute__ ((aligned(16)));
341 341
342#include "evmcs.h"
343
342static inline int vmxon(uint64_t phys) 344static inline int vmxon(uint64_t phys)
343{ 345{
344 uint8_t ret; 346 uint8_t ret;
@@ -372,6 +374,9 @@ static inline int vmptrld(uint64_t vmcs_pa)
372{ 374{
373 uint8_t ret; 375 uint8_t ret;
374 376
377 if (enable_evmcs)
378 return -1;
379
375 __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]" 380 __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
376 : [ret]"=rm"(ret) 381 : [ret]"=rm"(ret)
377 : [pa]"m"(vmcs_pa) 382 : [pa]"m"(vmcs_pa)
@@ -385,6 +390,9 @@ static inline int vmptrst(uint64_t *value)
385 uint64_t tmp; 390 uint64_t tmp;
386 uint8_t ret; 391 uint8_t ret;
387 392
393 if (enable_evmcs)
394 return evmcs_vmptrst(value);
395
388 __asm__ __volatile__("vmptrst %[value]; setna %[ret]" 396 __asm__ __volatile__("vmptrst %[value]; setna %[ret]"
389 : [value]"=m"(tmp), [ret]"=rm"(ret) 397 : [value]"=m"(tmp), [ret]"=rm"(ret)
390 : : "cc", "memory"); 398 : : "cc", "memory");
@@ -411,6 +419,9 @@ static inline int vmlaunch(void)
411{ 419{
412 int ret; 420 int ret;
413 421
422 if (enable_evmcs)
423 return evmcs_vmlaunch();
424
414 __asm__ __volatile__("push %%rbp;" 425 __asm__ __volatile__("push %%rbp;"
415 "push %%rcx;" 426 "push %%rcx;"
416 "push %%rdx;" 427 "push %%rdx;"
@@ -443,6 +454,9 @@ static inline int vmresume(void)
443{ 454{
444 int ret; 455 int ret;
445 456
457 if (enable_evmcs)
458 return evmcs_vmresume();
459
446 __asm__ __volatile__("push %%rbp;" 460 __asm__ __volatile__("push %%rbp;"
447 "push %%rcx;" 461 "push %%rcx;"
448 "push %%rdx;" 462 "push %%rdx;"
@@ -482,6 +496,9 @@ static inline int vmread(uint64_t encoding, uint64_t *value)
482 uint64_t tmp; 496 uint64_t tmp;
483 uint8_t ret; 497 uint8_t ret;
484 498
499 if (enable_evmcs)
500 return evmcs_vmread(encoding, value);
501
485 __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]" 502 __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
486 : [value]"=rm"(tmp), [ret]"=rm"(ret) 503 : [value]"=rm"(tmp), [ret]"=rm"(ret)
487 : [encoding]"r"(encoding) 504 : [encoding]"r"(encoding)
@@ -506,6 +523,9 @@ static inline int vmwrite(uint64_t encoding, uint64_t value)
506{ 523{
507 uint8_t ret; 524 uint8_t ret;
508 525
526 if (enable_evmcs)
527 return evmcs_vmwrite(encoding, value);
528
509 __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]" 529 __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
510 : [ret]"=rm"(ret) 530 : [ret]"=rm"(ret)
511 : [value]"rm"(value), [encoding]"r"(encoding) 531 : [value]"rm"(value), [encoding]"r"(encoding)
@@ -543,10 +563,19 @@ struct vmx_pages {
543 void *vmwrite_hva; 563 void *vmwrite_hva;
544 uint64_t vmwrite_gpa; 564 uint64_t vmwrite_gpa;
545 void *vmwrite; 565 void *vmwrite;
566
567 void *vp_assist_hva;
568 uint64_t vp_assist_gpa;
569 void *vp_assist;
570
571 void *enlightened_vmcs_hva;
572 uint64_t enlightened_vmcs_gpa;
573 void *enlightened_vmcs;
546}; 574};
547 575
548struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); 576struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
549bool prepare_for_vmx_operation(struct vmx_pages *vmx); 577bool prepare_for_vmx_operation(struct vmx_pages *vmx);
550void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); 578void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
579bool load_vmcs(struct vmx_pages *vmx);
551 580
552#endif /* !SELFTEST_KVM_VMX_H */ 581#endif /* SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
new file mode 100644
index 000000000000..b6022e2f116e
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -0,0 +1,311 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * AArch64 code
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8#define _GNU_SOURCE /* for program_invocation_name */
9
10#include "kvm_util.h"
11#include "../kvm_util_internal.h"
12#include "processor.h"
13
14#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
15#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
16
17static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
18{
19 return (v + vm->page_size) & ~(vm->page_size - 1);
20}
21
22static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
23{
24 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
25 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
26
27 return (gva >> shift) & mask;
28}
29
30static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
31{
32 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
33 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
34
35 TEST_ASSERT(vm->pgtable_levels == 4,
36 "Mode %d does not have 4 page table levels", vm->mode);
37
38 return (gva >> shift) & mask;
39}
40
41static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
42{
43 unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
44 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
45
46 TEST_ASSERT(vm->pgtable_levels >= 3,
47 "Mode %d does not have >= 3 page table levels", vm->mode);
48
49 return (gva >> shift) & mask;
50}
51
52static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
53{
54 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
55 return (gva >> vm->page_shift) & mask;
56}
57
58static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
59{
60 uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
61 return entry & mask;
62}
63
64static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
65{
66 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
67 return 1 << (vm->va_bits - shift);
68}
69
70static uint64_t ptrs_per_pte(struct kvm_vm *vm)
71{
72 return 1 << (vm->page_shift - 3);
73}
74
75void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
76{
77 int rc;
78
79 if (!vm->pgd_created) {
80 vm_paddr_t paddr = vm_phy_pages_alloc(vm,
81 page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
82 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
83 vm->pgd = paddr;
84 vm->pgd_created = true;
85 }
86}
87
88void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
89 uint32_t pgd_memslot, uint64_t flags)
90{
91 uint8_t attr_idx = flags & 7;
92 uint64_t *ptep;
93
94 TEST_ASSERT((vaddr % vm->page_size) == 0,
95 "Virtual address not on page boundary,\n"
96 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
97 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
98 (vaddr >> vm->page_shift)),
99 "Invalid virtual address, vaddr: 0x%lx", vaddr);
100 TEST_ASSERT((paddr % vm->page_size) == 0,
101 "Physical address not on page boundary,\n"
102 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
103 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
104 "Physical address beyond beyond maximum supported,\n"
105 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
106 paddr, vm->max_gfn, vm->page_size);
107
108 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
109 if (!*ptep) {
110 *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
111 *ptep |= 3;
112 }
113
114 switch (vm->pgtable_levels) {
115 case 4:
116 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
117 if (!*ptep) {
118 *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
119 *ptep |= 3;
120 }
121 /* fall through */
122 case 3:
123 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
124 if (!*ptep) {
125 *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
126 *ptep |= 3;
127 }
128 /* fall through */
129 case 2:
130 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
131 break;
132 default:
133 TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
134 }
135
136 *ptep = paddr | 3;
137 *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
138}
139
140void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
141 uint32_t pgd_memslot)
142{
143 uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
144
145 _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx);
146}
147
148vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
149{
150 uint64_t *ptep;
151
152 if (!vm->pgd_created)
153 goto unmapped_gva;
154
155 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
156 if (!ptep)
157 goto unmapped_gva;
158
159 switch (vm->pgtable_levels) {
160 case 4:
161 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
162 if (!ptep)
163 goto unmapped_gva;
164 /* fall through */
165 case 3:
166 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
167 if (!ptep)
168 goto unmapped_gva;
169 /* fall through */
170 case 2:
171 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
172 if (!ptep)
173 goto unmapped_gva;
174 break;
175 default:
176 TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
177 }
178
179 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
180
181unmapped_gva:
182 TEST_ASSERT(false, "No mapping for vm virtual address, "
183 "gva: 0x%lx", gva);
184}
185
186static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
187{
188#ifdef DEBUG_VM
189 static const char * const type[] = { "", "pud", "pmd", "pte" };
190 uint64_t pte, *ptep;
191
192 if (level == 4)
193 return;
194
195 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
196 ptep = addr_gpa2hva(vm, pte);
197 if (!*ptep)
198 continue;
199 printf("%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
200 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
201 }
202#endif
203}
204
205void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
206{
207 int level = 4 - (vm->pgtable_levels - 1);
208 uint64_t pgd, *ptep;
209
210 if (!vm->pgd_created)
211 return;
212
213 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
214 ptep = addr_gpa2hva(vm, pgd);
215 if (!*ptep)
216 continue;
217 printf("%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
218 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
219 }
220}
221
222struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
223 void *guest_code)
224{
225 uint64_t ptrs_per_4k_pte = 512;
226 uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
227 struct kvm_vm *vm;
228
229 vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
230
231 kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
232 vm_vcpu_add_default(vm, vcpuid, guest_code);
233
234 return vm;
235}
236
237void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
238{
239 size_t stack_size = vm->page_size == 4096 ?
240 DEFAULT_STACK_PGS * vm->page_size :
241 vm->page_size;
242 uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
243 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0);
244
245 vm_vcpu_add(vm, vcpuid, 0, 0);
246
247 set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
248 set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
249}
250
251void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
252{
253 struct kvm_vcpu_init init;
254 uint64_t sctlr_el1, tcr_el1;
255
256 memset(&init, 0, sizeof(init));
257 init.target = KVM_ARM_TARGET_GENERIC_V8;
258 vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, &init);
259
260 /*
261 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
262 * registers, which the variable argument list macros do.
263 */
264 set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
265
266 get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
267 get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
268
269 switch (vm->mode) {
270 case VM_MODE_P52V48_4K:
271 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
272 tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
273 break;
274 case VM_MODE_P52V48_64K:
275 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
276 tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
277 break;
278 case VM_MODE_P40V48_4K:
279 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
280 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
281 break;
282 case VM_MODE_P40V48_64K:
283 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
284 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
285 break;
286 default:
287 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
288 }
289
290 sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
291 /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
292 tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
293 tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
294
295 set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
296 set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
297 set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
298 set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
299}
300
301void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
302{
303 uint64_t pstate, pc;
304
305 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
306 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
307
308 fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
309 indent, "", pstate, pc);
310
311}
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index cd01144d27c8..6398efe67885 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -13,7 +13,7 @@
13#include <execinfo.h> 13#include <execinfo.h>
14#include <sys/syscall.h> 14#include <sys/syscall.h>
15 15
16#include "../../kselftest.h" 16#include "kselftest.h"
17 17
18/* Dumps the current stack trace to stderr. */ 18/* Dumps the current stack trace to stderr. */
19static void __attribute__((noinline)) test_dump_stack(void); 19static void __attribute__((noinline)) test_dump_stack(void);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 6fd8c089cafc..8c06da4f03db 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -16,10 +16,8 @@
16#include <sys/stat.h> 16#include <sys/stat.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18 18
19#define KVM_DEV_PATH "/dev/kvm"
20
21#define KVM_UTIL_PGS_PER_HUGEPG 512 19#define KVM_UTIL_PGS_PER_HUGEPG 512
22#define KVM_UTIL_MIN_PADDR 0x2000 20#define KVM_UTIL_MIN_PFN 2
23 21
24/* Aligns x up to the next multiple of size. Size must be a power of 2. */ 22/* Aligns x up to the next multiple of size. Size must be a power of 2. */
25static void *align(void *x, size_t size) 23static void *align(void *x, size_t size)
@@ -30,7 +28,8 @@ static void *align(void *x, size_t size)
30 return (void *) (((size_t) x + mask) & ~mask); 28 return (void *) (((size_t) x + mask) & ~mask);
31} 29}
32 30
33/* Capability 31/*
32 * Capability
34 * 33 *
35 * Input Args: 34 * Input Args:
36 * cap - Capability 35 * cap - Capability
@@ -92,16 +91,23 @@ static void vm_open(struct kvm_vm *vm, int perm)
92 if (vm->kvm_fd < 0) 91 if (vm->kvm_fd < 0)
93 exit(KSFT_SKIP); 92 exit(KSFT_SKIP);
94 93
95 /* Create VM. */
96 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL); 94 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL);
97 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " 95 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
98 "rc: %i errno: %i", vm->fd, errno); 96 "rc: %i errno: %i", vm->fd, errno);
99} 97}
100 98
101/* VM Create 99const char * const vm_guest_mode_string[] = {
100 "PA-bits:52, VA-bits:48, 4K pages",
101 "PA-bits:52, VA-bits:48, 64K pages",
102 "PA-bits:40, VA-bits:48, 4K pages",
103 "PA-bits:40, VA-bits:48, 64K pages",
104};
105
106/*
107 * VM Create
102 * 108 *
103 * Input Args: 109 * Input Args:
104 * mode - VM Mode (e.g. VM_MODE_FLAT48PG) 110 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
105 * phy_pages - Physical memory pages 111 * phy_pages - Physical memory pages
106 * perm - permission 112 * perm - permission
107 * 113 *
@@ -110,7 +116,7 @@ static void vm_open(struct kvm_vm *vm, int perm)
110 * Return: 116 * Return:
111 * Pointer to opaque structure that describes the created VM. 117 * Pointer to opaque structure that describes the created VM.
112 * 118 *
113 * Creates a VM with the mode specified by mode (e.g. VM_MODE_FLAT48PG). 119 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
114 * When phy_pages is non-zero, a memory region of phy_pages physical pages 120 * When phy_pages is non-zero, a memory region of phy_pages physical pages
115 * is created and mapped starting at guest physical address 0. The file 121 * is created and mapped starting at guest physical address 0. The file
116 * descriptor to control the created VM is created with the permissions 122 * descriptor to control the created VM is created with the permissions
@@ -121,7 +127,6 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
121 struct kvm_vm *vm; 127 struct kvm_vm *vm;
122 int kvm_fd; 128 int kvm_fd;
123 129
124 /* Allocate memory. */
125 vm = calloc(1, sizeof(*vm)); 130 vm = calloc(1, sizeof(*vm));
126 TEST_ASSERT(vm != NULL, "Insufficent Memory"); 131 TEST_ASSERT(vm != NULL, "Insufficent Memory");
127 132
@@ -130,26 +135,48 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
130 135
131 /* Setup mode specific traits. */ 136 /* Setup mode specific traits. */
132 switch (vm->mode) { 137 switch (vm->mode) {
133 case VM_MODE_FLAT48PG: 138 case VM_MODE_P52V48_4K:
139 vm->pgtable_levels = 4;
134 vm->page_size = 0x1000; 140 vm->page_size = 0x1000;
135 vm->page_shift = 12; 141 vm->page_shift = 12;
136 142 vm->va_bits = 48;
137 /* Limit to 48-bit canonical virtual addresses. */ 143 break;
138 vm->vpages_valid = sparsebit_alloc(); 144 case VM_MODE_P52V48_64K:
139 sparsebit_set_num(vm->vpages_valid, 145 vm->pgtable_levels = 3;
140 0, (1ULL << (48 - 1)) >> vm->page_shift); 146 vm->pa_bits = 52;
141 sparsebit_set_num(vm->vpages_valid, 147 vm->page_size = 0x10000;
142 (~((1ULL << (48 - 1)) - 1)) >> vm->page_shift, 148 vm->page_shift = 16;
143 (1ULL << (48 - 1)) >> vm->page_shift); 149 vm->va_bits = 48;
144 150 break;
145 /* Limit physical addresses to 52-bits. */ 151 case VM_MODE_P40V48_4K:
146 vm->max_gfn = ((1ULL << 52) >> vm->page_shift) - 1; 152 vm->pgtable_levels = 4;
153 vm->pa_bits = 40;
154 vm->va_bits = 48;
155 vm->page_size = 0x1000;
156 vm->page_shift = 12;
157 break;
158 case VM_MODE_P40V48_64K:
159 vm->pgtable_levels = 3;
160 vm->pa_bits = 40;
161 vm->va_bits = 48;
162 vm->page_size = 0x10000;
163 vm->page_shift = 16;
147 break; 164 break;
148
149 default: 165 default:
150 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode); 166 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
151 } 167 }
152 168
169 /* Limit to VA-bit canonical virtual addresses. */
170 vm->vpages_valid = sparsebit_alloc();
171 sparsebit_set_num(vm->vpages_valid,
172 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
173 sparsebit_set_num(vm->vpages_valid,
174 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
175 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
176
177 /* Limit physical addresses to PA-bits. */
178 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
179
153 /* Allocate and setup memory for guest. */ 180 /* Allocate and setup memory for guest. */
154 vm->vpages_mapped = sparsebit_alloc(); 181 vm->vpages_mapped = sparsebit_alloc();
155 if (phy_pages != 0) 182 if (phy_pages != 0)
@@ -159,7 +186,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
159 return vm; 186 return vm;
160} 187}
161 188
162/* VM Restart 189/*
190 * VM Restart
163 * 191 *
164 * Input Args: 192 * Input Args:
165 * vm - VM that has been released before 193 * vm - VM that has been released before
@@ -186,7 +214,8 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
186 " rc: %i errno: %i\n" 214 " rc: %i errno: %i\n"
187 " slot: %u flags: 0x%x\n" 215 " slot: %u flags: 0x%x\n"
188 " guest_phys_addr: 0x%lx size: 0x%lx", 216 " guest_phys_addr: 0x%lx size: 0x%lx",
189 ret, errno, region->region.slot, region->region.flags, 217 ret, errno, region->region.slot,
218 region->region.flags,
190 region->region.guest_phys_addr, 219 region->region.guest_phys_addr,
191 region->region.memory_size); 220 region->region.memory_size);
192 } 221 }
@@ -202,7 +231,8 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
202 strerror(-ret)); 231 strerror(-ret));
203} 232}
204 233
205/* Userspace Memory Region Find 234/*
235 * Userspace Memory Region Find
206 * 236 *
207 * Input Args: 237 * Input Args:
208 * vm - Virtual Machine 238 * vm - Virtual Machine
@@ -220,8 +250,8 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
220 * of the regions is returned. Null is returned only when no overlapping 250 * of the regions is returned. Null is returned only when no overlapping
221 * region exists. 251 * region exists.
222 */ 252 */
223static struct userspace_mem_region *userspace_mem_region_find( 253static struct userspace_mem_region *
224 struct kvm_vm *vm, uint64_t start, uint64_t end) 254userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
225{ 255{
226 struct userspace_mem_region *region; 256 struct userspace_mem_region *region;
227 257
@@ -237,7 +267,8 @@ static struct userspace_mem_region *userspace_mem_region_find(
237 return NULL; 267 return NULL;
238} 268}
239 269
240/* KVM Userspace Memory Region Find 270/*
271 * KVM Userspace Memory Region Find
241 * 272 *
242 * Input Args: 273 * Input Args:
243 * vm - Virtual Machine 274 * vm - Virtual Machine
@@ -265,7 +296,8 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
265 return &region->region; 296 return &region->region;
266} 297}
267 298
268/* VCPU Find 299/*
300 * VCPU Find
269 * 301 *
270 * Input Args: 302 * Input Args:
271 * vm - Virtual Machine 303 * vm - Virtual Machine
@@ -280,8 +312,7 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
280 * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU 312 * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU
281 * for the specified vcpuid. 313 * for the specified vcpuid.
282 */ 314 */
283struct vcpu *vcpu_find(struct kvm_vm *vm, 315struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
284 uint32_t vcpuid)
285{ 316{
286 struct vcpu *vcpup; 317 struct vcpu *vcpup;
287 318
@@ -293,7 +324,8 @@ struct vcpu *vcpu_find(struct kvm_vm *vm,
293 return NULL; 324 return NULL;
294} 325}
295 326
296/* VM VCPU Remove 327/*
328 * VM VCPU Remove
297 * 329 *
298 * Input Args: 330 * Input Args:
299 * vm - Virtual Machine 331 * vm - Virtual Machine
@@ -330,11 +362,9 @@ void kvm_vm_release(struct kvm_vm *vmp)
330{ 362{
331 int ret; 363 int ret;
332 364
333 /* Free VCPUs. */
334 while (vmp->vcpu_head) 365 while (vmp->vcpu_head)
335 vm_vcpu_rm(vmp, vmp->vcpu_head->id); 366 vm_vcpu_rm(vmp, vmp->vcpu_head->id);
336 367
337 /* Close file descriptor for the VM. */
338 ret = close(vmp->fd); 368 ret = close(vmp->fd);
339 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" 369 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
340 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); 370 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
@@ -344,7 +374,8 @@ void kvm_vm_release(struct kvm_vm *vmp)
344 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); 374 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
345} 375}
346 376
347/* Destroys and frees the VM pointed to by vmp. 377/*
378 * Destroys and frees the VM pointed to by vmp.
348 */ 379 */
349void kvm_vm_free(struct kvm_vm *vmp) 380void kvm_vm_free(struct kvm_vm *vmp)
350{ 381{
@@ -383,7 +414,8 @@ void kvm_vm_free(struct kvm_vm *vmp)
383 free(vmp); 414 free(vmp);
384} 415}
385 416
386/* Memory Compare, host virtual to guest virtual 417/*
418 * Memory Compare, host virtual to guest virtual
387 * 419 *
388 * Input Args: 420 * Input Args:
389 * hva - Starting host virtual address 421 * hva - Starting host virtual address
@@ -405,23 +437,25 @@ void kvm_vm_free(struct kvm_vm *vmp)
405 * a length of len, to the guest bytes starting at the guest virtual 437 * a length of len, to the guest bytes starting at the guest virtual
406 * address given by gva. 438 * address given by gva.
407 */ 439 */
408int kvm_memcmp_hva_gva(void *hva, 440int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
409 struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
410{ 441{
411 size_t amt; 442 size_t amt;
412 443
413 /* Compare a batch of bytes until either a match is found 444 /*
445 * Compare a batch of bytes until either a match is found
414 * or all the bytes have been compared. 446 * or all the bytes have been compared.
415 */ 447 */
416 for (uintptr_t offset = 0; offset < len; offset += amt) { 448 for (uintptr_t offset = 0; offset < len; offset += amt) {
417 uintptr_t ptr1 = (uintptr_t)hva + offset; 449 uintptr_t ptr1 = (uintptr_t)hva + offset;
418 450
419 /* Determine host address for guest virtual address 451 /*
452 * Determine host address for guest virtual address
420 * at offset. 453 * at offset.
421 */ 454 */
422 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); 455 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
423 456
424 /* Determine amount to compare on this pass. 457 /*
458 * Determine amount to compare on this pass.
425 * Don't allow the comparsion to cross a page boundary. 459 * Don't allow the comparsion to cross a page boundary.
426 */ 460 */
427 amt = len - offset; 461 amt = len - offset;
@@ -433,7 +467,8 @@ int kvm_memcmp_hva_gva(void *hva,
433 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); 467 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
434 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); 468 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
435 469
436 /* Perform the comparison. If there is a difference 470 /*
471 * Perform the comparison. If there is a difference
437 * return that result to the caller, otherwise need 472 * return that result to the caller, otherwise need
438 * to continue on looking for a mismatch. 473 * to continue on looking for a mismatch.
439 */ 474 */
@@ -442,109 +477,15 @@ int kvm_memcmp_hva_gva(void *hva,
442 return ret; 477 return ret;
443 } 478 }
444 479
445 /* No mismatch found. Let the caller know the two memory 480 /*
481 * No mismatch found. Let the caller know the two memory
446 * areas are equal. 482 * areas are equal.
447 */ 483 */
448 return 0; 484 return 0;
449} 485}
450 486
451/* Allocate an instance of struct kvm_cpuid2 487/*
452 * 488 * VM Userspace Memory Region Add
453 * Input Args: None
454 *
455 * Output Args: None
456 *
457 * Return: A pointer to the allocated struct. The caller is responsible
458 * for freeing this struct.
459 *
460 * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
461 * array to be decided at allocation time, allocation is slightly
462 * complicated. This function uses a reasonable default length for
463 * the array and performs the appropriate allocation.
464 */
465static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
466{
467 struct kvm_cpuid2 *cpuid;
468 int nent = 100;
469 size_t size;
470
471 size = sizeof(*cpuid);
472 size += nent * sizeof(struct kvm_cpuid_entry2);
473 cpuid = malloc(size);
474 if (!cpuid) {
475 perror("malloc");
476 abort();
477 }
478
479 cpuid->nent = nent;
480
481 return cpuid;
482}
483
484/* KVM Supported CPUID Get
485 *
486 * Input Args: None
487 *
488 * Output Args:
489 *
490 * Return: The supported KVM CPUID
491 *
492 * Get the guest CPUID supported by KVM.
493 */
494struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
495{
496 static struct kvm_cpuid2 *cpuid;
497 int ret;
498 int kvm_fd;
499
500 if (cpuid)
501 return cpuid;
502
503 cpuid = allocate_kvm_cpuid2();
504 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
505 if (kvm_fd < 0)
506 exit(KSFT_SKIP);
507
508 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
509 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
510 ret, errno);
511
512 close(kvm_fd);
513 return cpuid;
514}
515
516/* Locate a cpuid entry.
517 *
518 * Input Args:
519 * cpuid: The cpuid.
520 * function: The function of the cpuid entry to find.
521 *
522 * Output Args: None
523 *
524 * Return: A pointer to the cpuid entry. Never returns NULL.
525 */
526struct kvm_cpuid_entry2 *
527kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
528{
529 struct kvm_cpuid2 *cpuid;
530 struct kvm_cpuid_entry2 *entry = NULL;
531 int i;
532
533 cpuid = kvm_get_supported_cpuid();
534 for (i = 0; i < cpuid->nent; i++) {
535 if (cpuid->entries[i].function == function &&
536 cpuid->entries[i].index == index) {
537 entry = &cpuid->entries[i];
538 break;
539 }
540 }
541
542 TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
543 function, index);
544 return entry;
545}
546
547/* VM Userspace Memory Region Add
548 * 489 *
549 * Input Args: 490 * Input Args:
550 * vm - Virtual Machine 491 * vm - Virtual Machine
@@ -586,7 +527,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
586 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", 527 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
587 guest_paddr, npages, vm->max_gfn, vm->page_size); 528 guest_paddr, npages, vm->max_gfn, vm->page_size);
588 529
589 /* Confirm a mem region with an overlapping address doesn't 530 /*
531 * Confirm a mem region with an overlapping address doesn't
590 * already exist. 532 * already exist.
591 */ 533 */
592 region = (struct userspace_mem_region *) userspace_mem_region_find( 534 region = (struct userspace_mem_region *) userspace_mem_region_find(
@@ -677,7 +619,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
677 vm->userspace_mem_region_head = region; 619 vm->userspace_mem_region_head = region;
678} 620}
679 621
680/* Memslot to region 622/*
623 * Memslot to region
681 * 624 *
682 * Input Args: 625 * Input Args:
683 * vm - Virtual Machine 626 * vm - Virtual Machine
@@ -691,8 +634,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
691 * on error (e.g. currently no memory region using memslot as a KVM 634 * on error (e.g. currently no memory region using memslot as a KVM
692 * memory slot ID). 635 * memory slot ID).
693 */ 636 */
694static struct userspace_mem_region *memslot2region(struct kvm_vm *vm, 637static struct userspace_mem_region *
695 uint32_t memslot) 638memslot2region(struct kvm_vm *vm, uint32_t memslot)
696{ 639{
697 struct userspace_mem_region *region; 640 struct userspace_mem_region *region;
698 641
@@ -712,7 +655,8 @@ static struct userspace_mem_region *memslot2region(struct kvm_vm *vm,
712 return region; 655 return region;
713} 656}
714 657
715/* VM Memory Region Flags Set 658/*
659 * VM Memory Region Flags Set
716 * 660 *
717 * Input Args: 661 * Input Args:
718 * vm - Virtual Machine 662 * vm - Virtual Machine
@@ -730,7 +674,6 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
730 int ret; 674 int ret;
731 struct userspace_mem_region *region; 675 struct userspace_mem_region *region;
732 676
733 /* Locate memory region. */
734 region = memslot2region(vm, slot); 677 region = memslot2region(vm, slot);
735 678
736 region->region.flags = flags; 679 region->region.flags = flags;
@@ -742,7 +685,8 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
742 ret, errno, slot, flags); 685 ret, errno, slot, flags);
743} 686}
744 687
745/* VCPU mmap Size 688/*
689 * VCPU mmap Size
746 * 690 *
747 * Input Args: None 691 * Input Args: None
748 * 692 *
@@ -772,7 +716,8 @@ static int vcpu_mmap_sz(void)
772 return ret; 716 return ret;
773} 717}
774 718
775/* VM VCPU Add 719/*
720 * VM VCPU Add
776 * 721 *
777 * Input Args: 722 * Input Args:
778 * vm - Virtual Machine 723 * vm - Virtual Machine
@@ -785,7 +730,8 @@ static int vcpu_mmap_sz(void)
785 * Creates and adds to the VM specified by vm and virtual CPU with 730 * Creates and adds to the VM specified by vm and virtual CPU with
786 * the ID given by vcpuid. 731 * the ID given by vcpuid.
787 */ 732 */
788void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot) 733void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
734 int gdt_memslot)
789{ 735{
790 struct vcpu *vcpu; 736 struct vcpu *vcpu;
791 737
@@ -823,7 +769,8 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_me
823 vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot); 769 vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot);
824} 770}
825 771
826/* VM Virtual Address Unused Gap 772/*
773 * VM Virtual Address Unused Gap
827 * 774 *
828 * Input Args: 775 * Input Args:
829 * vm - Virtual Machine 776 * vm - Virtual Machine
@@ -843,14 +790,14 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_me
843 * sz unallocated bytes >= vaddr_min is available. 790 * sz unallocated bytes >= vaddr_min is available.
844 */ 791 */
845static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 792static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
846 vm_vaddr_t vaddr_min) 793 vm_vaddr_t vaddr_min)
847{ 794{
848 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 795 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
849 796
850 /* Determine lowest permitted virtual page index. */ 797 /* Determine lowest permitted virtual page index. */
851 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 798 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
852 if ((pgidx_start * vm->page_size) < vaddr_min) 799 if ((pgidx_start * vm->page_size) < vaddr_min)
853 goto no_va_found; 800 goto no_va_found;
854 801
855 /* Loop over section with enough valid virtual page indexes. */ 802 /* Loop over section with enough valid virtual page indexes. */
856 if (!sparsebit_is_set_num(vm->vpages_valid, 803 if (!sparsebit_is_set_num(vm->vpages_valid,
@@ -909,7 +856,8 @@ va_found:
909 return pgidx_start * vm->page_size; 856 return pgidx_start * vm->page_size;
910} 857}
911 858
912/* VM Virtual Address Allocate 859/*
860 * VM Virtual Address Allocate
913 * 861 *
914 * Input Args: 862 * Input Args:
915 * vm - Virtual Machine 863 * vm - Virtual Machine
@@ -930,13 +878,14 @@ va_found:
930 * a page. 878 * a page.
931 */ 879 */
932vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 880vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
933 uint32_t data_memslot, uint32_t pgd_memslot) 881 uint32_t data_memslot, uint32_t pgd_memslot)
934{ 882{
935 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 883 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
936 884
937 virt_pgd_alloc(vm, pgd_memslot); 885 virt_pgd_alloc(vm, pgd_memslot);
938 886
939 /* Find an unused range of virtual page addresses of at least 887 /*
888 * Find an unused range of virtual page addresses of at least
940 * pages in length. 889 * pages in length.
941 */ 890 */
942 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 891 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
@@ -946,7 +895,8 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
946 pages--, vaddr += vm->page_size) { 895 pages--, vaddr += vm->page_size) {
947 vm_paddr_t paddr; 896 vm_paddr_t paddr;
948 897
949 paddr = vm_phy_page_alloc(vm, KVM_UTIL_MIN_PADDR, data_memslot); 898 paddr = vm_phy_page_alloc(vm,
899 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
950 900
951 virt_pg_map(vm, vaddr, paddr, pgd_memslot); 901 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
952 902
@@ -990,7 +940,8 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
990 } 940 }
991} 941}
992 942
993/* Address VM Physical to Host Virtual 943/*
944 * Address VM Physical to Host Virtual
994 * 945 *
995 * Input Args: 946 * Input Args:
996 * vm - Virtual Machine 947 * vm - Virtual Machine
@@ -1022,7 +973,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1022 return NULL; 973 return NULL;
1023} 974}
1024 975
1025/* Address Host Virtual to VM Physical 976/*
977 * Address Host Virtual to VM Physical
1026 * 978 *
1027 * Input Args: 979 * Input Args:
1028 * vm - Virtual Machine 980 * vm - Virtual Machine
@@ -1056,7 +1008,8 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1056 return -1; 1008 return -1;
1057} 1009}
1058 1010
1059/* VM Create IRQ Chip 1011/*
1012 * VM Create IRQ Chip
1060 * 1013 *
1061 * Input Args: 1014 * Input Args:
1062 * vm - Virtual Machine 1015 * vm - Virtual Machine
@@ -1078,7 +1031,8 @@ void vm_create_irqchip(struct kvm_vm *vm)
1078 vm->has_irqchip = true; 1031 vm->has_irqchip = true;
1079} 1032}
1080 1033
1081/* VM VCPU State 1034/*
1035 * VM VCPU State
1082 * 1036 *
1083 * Input Args: 1037 * Input Args:
1084 * vm - Virtual Machine 1038 * vm - Virtual Machine
@@ -1100,7 +1054,8 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1100 return vcpu->state; 1054 return vcpu->state;
1101} 1055}
1102 1056
1103/* VM VCPU Run 1057/*
1058 * VM VCPU Run
1104 * 1059 *
1105 * Input Args: 1060 * Input Args:
1106 * vm - Virtual Machine 1061 * vm - Virtual Machine
@@ -1126,13 +1081,14 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1126 int rc; 1081 int rc;
1127 1082
1128 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1083 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1129 do { 1084 do {
1130 rc = ioctl(vcpu->fd, KVM_RUN, NULL); 1085 rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1131 } while (rc == -1 && errno == EINTR); 1086 } while (rc == -1 && errno == EINTR);
1132 return rc; 1087 return rc;
1133} 1088}
1134 1089
1135/* VM VCPU Set MP State 1090/*
1091 * VM VCPU Set MP State
1136 * 1092 *
1137 * Input Args: 1093 * Input Args:
1138 * vm - Virtual Machine 1094 * vm - Virtual Machine
@@ -1147,7 +1103,7 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1147 * by mp_state. 1103 * by mp_state.
1148 */ 1104 */
1149void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, 1105void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1150 struct kvm_mp_state *mp_state) 1106 struct kvm_mp_state *mp_state)
1151{ 1107{
1152 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1108 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1153 int ret; 1109 int ret;
@@ -1159,7 +1115,8 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1159 "rc: %i errno: %i", ret, errno); 1115 "rc: %i errno: %i", ret, errno);
1160} 1116}
1161 1117
1162/* VM VCPU Regs Get 1118/*
1119 * VM VCPU Regs Get
1163 * 1120 *
1164 * Input Args: 1121 * Input Args:
1165 * vm - Virtual Machine 1122 * vm - Virtual Machine
@@ -1173,21 +1130,20 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1173 * Obtains the current register state for the VCPU specified by vcpuid 1130 * Obtains the current register state for the VCPU specified by vcpuid
1174 * and stores it at the location given by regs. 1131 * and stores it at the location given by regs.
1175 */ 1132 */
1176void vcpu_regs_get(struct kvm_vm *vm, 1133void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1177 uint32_t vcpuid, struct kvm_regs *regs)
1178{ 1134{
1179 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1135 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1180 int ret; 1136 int ret;
1181 1137
1182 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1138 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1183 1139
1184 /* Get the regs. */
1185 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs); 1140 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1186 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i", 1141 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1187 ret, errno); 1142 ret, errno);
1188} 1143}
1189 1144
1190/* VM VCPU Regs Set 1145/*
1146 * VM VCPU Regs Set
1191 * 1147 *
1192 * Input Args: 1148 * Input Args:
1193 * vm - Virtual Machine 1149 * vm - Virtual Machine
@@ -1201,165 +1157,46 @@ void vcpu_regs_get(struct kvm_vm *vm,
1201 * Sets the regs of the VCPU specified by vcpuid to the values 1157 * Sets the regs of the VCPU specified by vcpuid to the values
1202 * given by regs. 1158 * given by regs.
1203 */ 1159 */
1204void vcpu_regs_set(struct kvm_vm *vm, 1160void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1205 uint32_t vcpuid, struct kvm_regs *regs)
1206{ 1161{
1207 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1162 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1208 int ret; 1163 int ret;
1209 1164
1210 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1165 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1211 1166
1212 /* Set the regs. */
1213 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs); 1167 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1214 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i", 1168 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1215 ret, errno); 1169 ret, errno);
1216} 1170}
1217 1171
1218void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, 1172void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
1219 struct kvm_vcpu_events *events) 1173 struct kvm_vcpu_events *events)
1220{ 1174{
1221 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1175 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1222 int ret; 1176 int ret;
1223 1177
1224 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1178 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1225 1179
1226 /* Get the regs. */
1227 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events); 1180 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1228 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i", 1181 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1229 ret, errno); 1182 ret, errno);
1230} 1183}
1231 1184
1232void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, 1185void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1233 struct kvm_vcpu_events *events) 1186 struct kvm_vcpu_events *events)
1234{ 1187{
1235 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1188 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1236 int ret; 1189 int ret;
1237 1190
1238 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1191 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1239 1192
1240 /* Set the regs. */
1241 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events); 1193 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1242 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i", 1194 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1243 ret, errno); 1195 ret, errno);
1244} 1196}
1245 1197
1246/* VCPU Get MSR 1198/*
1247 * 1199 * VM VCPU System Regs Get
1248 * Input Args:
1249 * vm - Virtual Machine
1250 * vcpuid - VCPU ID
1251 * msr_index - Index of MSR
1252 *
1253 * Output Args: None
1254 *
1255 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
1256 *
1257 * Get value of MSR for VCPU.
1258 */
1259uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
1260{
1261 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1262 struct {
1263 struct kvm_msrs header;
1264 struct kvm_msr_entry entry;
1265 } buffer = {};
1266 int r;
1267
1268 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1269 buffer.header.nmsrs = 1;
1270 buffer.entry.index = msr_index;
1271 r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
1272 TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
1273 " rc: %i errno: %i", r, errno);
1274
1275 return buffer.entry.data;
1276}
1277
1278/* VCPU Set MSR
1279 *
1280 * Input Args:
1281 * vm - Virtual Machine
1282 * vcpuid - VCPU ID
1283 * msr_index - Index of MSR
1284 * msr_value - New value of MSR
1285 *
1286 * Output Args: None
1287 *
1288 * Return: On success, nothing. On failure a TEST_ASSERT is produced.
1289 *
1290 * Set value of MSR for VCPU.
1291 */
1292void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
1293 uint64_t msr_value)
1294{
1295 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1296 struct {
1297 struct kvm_msrs header;
1298 struct kvm_msr_entry entry;
1299 } buffer = {};
1300 int r;
1301
1302 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1303 memset(&buffer, 0, sizeof(buffer));
1304 buffer.header.nmsrs = 1;
1305 buffer.entry.index = msr_index;
1306 buffer.entry.data = msr_value;
1307 r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
1308 TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
1309 " rc: %i errno: %i", r, errno);
1310}
1311
1312/* VM VCPU Args Set
1313 *
1314 * Input Args:
1315 * vm - Virtual Machine
1316 * vcpuid - VCPU ID
1317 * num - number of arguments
1318 * ... - arguments, each of type uint64_t
1319 *
1320 * Output Args: None
1321 *
1322 * Return: None
1323 *
1324 * Sets the first num function input arguments to the values
1325 * given as variable args. Each of the variable args is expected to
1326 * be of type uint64_t.
1327 */
1328void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
1329{
1330 va_list ap;
1331 struct kvm_regs regs;
1332
1333 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
1334 " num: %u\n",
1335 num);
1336
1337 va_start(ap, num);
1338 vcpu_regs_get(vm, vcpuid, &regs);
1339
1340 if (num >= 1)
1341 regs.rdi = va_arg(ap, uint64_t);
1342
1343 if (num >= 2)
1344 regs.rsi = va_arg(ap, uint64_t);
1345
1346 if (num >= 3)
1347 regs.rdx = va_arg(ap, uint64_t);
1348
1349 if (num >= 4)
1350 regs.rcx = va_arg(ap, uint64_t);
1351
1352 if (num >= 5)
1353 regs.r8 = va_arg(ap, uint64_t);
1354
1355 if (num >= 6)
1356 regs.r9 = va_arg(ap, uint64_t);
1357
1358 vcpu_regs_set(vm, vcpuid, &regs);
1359 va_end(ap);
1360}
1361
1362/* VM VCPU System Regs Get
1363 * 1200 *
1364 * Input Args: 1201 * Input Args:
1365 * vm - Virtual Machine 1202 * vm - Virtual Machine
@@ -1373,22 +1210,20 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
1373 * Obtains the current system register state for the VCPU specified by 1210 * Obtains the current system register state for the VCPU specified by
1374 * vcpuid and stores it at the location given by sregs. 1211 * vcpuid and stores it at the location given by sregs.
1375 */ 1212 */
1376void vcpu_sregs_get(struct kvm_vm *vm, 1213void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1377 uint32_t vcpuid, struct kvm_sregs *sregs)
1378{ 1214{
1379 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1215 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1380 int ret; 1216 int ret;
1381 1217
1382 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1218 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1383 1219
1384 /* Get the regs. */
1385 /* Get the regs. */
1386 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs); 1220 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1387 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i", 1221 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1388 ret, errno); 1222 ret, errno);
1389} 1223}
1390 1224
1391/* VM VCPU System Regs Set 1225/*
1226 * VM VCPU System Regs Set
1392 * 1227 *
1393 * Input Args: 1228 * Input Args:
1394 * vm - Virtual Machine 1229 * vm - Virtual Machine
@@ -1402,27 +1237,25 @@ void vcpu_sregs_get(struct kvm_vm *vm,
1402 * Sets the system regs of the VCPU specified by vcpuid to the values 1237 * Sets the system regs of the VCPU specified by vcpuid to the values
1403 * given by sregs. 1238 * given by sregs.
1404 */ 1239 */
1405void vcpu_sregs_set(struct kvm_vm *vm, 1240void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1406 uint32_t vcpuid, struct kvm_sregs *sregs)
1407{ 1241{
1408 int ret = _vcpu_sregs_set(vm, vcpuid, sregs); 1242 int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1409 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " 1243 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1410 "rc: %i errno: %i", ret, errno); 1244 "rc: %i errno: %i", ret, errno);
1411} 1245}
1412 1246
1413int _vcpu_sregs_set(struct kvm_vm *vm, 1247int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1414 uint32_t vcpuid, struct kvm_sregs *sregs)
1415{ 1248{
1416 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1249 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1417 int ret; 1250 int ret;
1418 1251
1419 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1252 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1420 1253
1421 /* Get the regs. */
1422 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); 1254 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1423} 1255}
1424 1256
1425/* VCPU Ioctl 1257/*
1258 * VCPU Ioctl
1426 * 1259 *
1427 * Input Args: 1260 * Input Args:
1428 * vm - Virtual Machine 1261 * vm - Virtual Machine
@@ -1434,8 +1267,8 @@ int _vcpu_sregs_set(struct kvm_vm *vm,
1434 * 1267 *
1435 * Issues an arbitrary ioctl on a VCPU fd. 1268 * Issues an arbitrary ioctl on a VCPU fd.
1436 */ 1269 */
1437void vcpu_ioctl(struct kvm_vm *vm, 1270void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1438 uint32_t vcpuid, unsigned long cmd, void *arg) 1271 unsigned long cmd, void *arg)
1439{ 1272{
1440 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1273 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1441 int ret; 1274 int ret;
@@ -1447,7 +1280,8 @@ void vcpu_ioctl(struct kvm_vm *vm,
1447 cmd, ret, errno, strerror(errno)); 1280 cmd, ret, errno, strerror(errno));
1448} 1281}
1449 1282
1450/* VM Ioctl 1283/*
1284 * VM Ioctl
1451 * 1285 *
1452 * Input Args: 1286 * Input Args:
1453 * vm - Virtual Machine 1287 * vm - Virtual Machine
@@ -1467,7 +1301,8 @@ void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1467 cmd, ret, errno, strerror(errno)); 1301 cmd, ret, errno, strerror(errno));
1468} 1302}
1469 1303
1470/* VM Dump 1304/*
1305 * VM Dump
1471 * 1306 *
1472 * Input Args: 1307 * Input Args:
1473 * vm - Virtual Machine 1308 * vm - Virtual Machine
@@ -1514,38 +1349,6 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1514 vcpu_dump(stream, vm, vcpu->id, indent + 2); 1349 vcpu_dump(stream, vm, vcpu->id, indent + 2);
1515} 1350}
1516 1351
1517/* VM VCPU Dump
1518 *
1519 * Input Args:
1520 * vm - Virtual Machine
1521 * vcpuid - VCPU ID
1522 * indent - Left margin indent amount
1523 *
1524 * Output Args:
1525 * stream - Output FILE stream
1526 *
1527 * Return: None
1528 *
1529 * Dumps the current state of the VCPU specified by vcpuid, within the VM
1530 * given by vm, to the FILE stream given by stream.
1531 */
1532void vcpu_dump(FILE *stream, struct kvm_vm *vm,
1533 uint32_t vcpuid, uint8_t indent)
1534{
1535 struct kvm_regs regs;
1536 struct kvm_sregs sregs;
1537
1538 fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
1539
1540 fprintf(stream, "%*sregs:\n", indent + 2, "");
1541 vcpu_regs_get(vm, vcpuid, &regs);
1542 regs_dump(stream, &regs, indent + 4);
1543
1544 fprintf(stream, "%*ssregs:\n", indent + 2, "");
1545 vcpu_sregs_get(vm, vcpuid, &sregs);
1546 sregs_dump(stream, &sregs, indent + 4);
1547}
1548
1549/* Known KVM exit reasons */ 1352/* Known KVM exit reasons */
1550static struct exit_reason { 1353static struct exit_reason {
1551 unsigned int reason; 1354 unsigned int reason;
@@ -1576,7 +1379,8 @@ static struct exit_reason {
1576#endif 1379#endif
1577}; 1380};
1578 1381
1579/* Exit Reason String 1382/*
1383 * Exit Reason String
1580 * 1384 *
1581 * Input Args: 1385 * Input Args:
1582 * exit_reason - Exit reason 1386 * exit_reason - Exit reason
@@ -1602,10 +1406,12 @@ const char *exit_reason_str(unsigned int exit_reason)
1602 return "Unknown"; 1406 return "Unknown";
1603} 1407}
1604 1408
1605/* Physical Page Allocate 1409/*
1410 * Physical Contiguous Page Allocator
1606 * 1411 *
1607 * Input Args: 1412 * Input Args:
1608 * vm - Virtual Machine 1413 * vm - Virtual Machine
1414 * num - number of pages
1609 * paddr_min - Physical address minimum 1415 * paddr_min - Physical address minimum
1610 * memslot - Memory region to allocate page from 1416 * memslot - Memory region to allocate page from
1611 * 1417 *
@@ -1614,47 +1420,59 @@ const char *exit_reason_str(unsigned int exit_reason)
1614 * Return: 1420 * Return:
1615 * Starting physical address 1421 * Starting physical address
1616 * 1422 *
1617 * Within the VM specified by vm, locates an available physical page 1423 * Within the VM specified by vm, locates a range of available physical
1618 * at or above paddr_min. If found, the page is marked as in use 1424 * pages at or above paddr_min. If found, the pages are marked as in use
1619 * and its address is returned. A TEST_ASSERT failure occurs if no 1425 * and thier base address is returned. A TEST_ASSERT failure occurs if
1620 * page is available at or above paddr_min. 1426 * not enough pages are available at or above paddr_min.
1621 */ 1427 */
1622vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, 1428vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1623 vm_paddr_t paddr_min, uint32_t memslot) 1429 vm_paddr_t paddr_min, uint32_t memslot)
1624{ 1430{
1625 struct userspace_mem_region *region; 1431 struct userspace_mem_region *region;
1626 sparsebit_idx_t pg; 1432 sparsebit_idx_t pg, base;
1433
1434 TEST_ASSERT(num > 0, "Must allocate at least one page");
1627 1435
1628 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 1436 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1629 "not divisible by page size.\n" 1437 "not divisible by page size.\n"
1630 " paddr_min: 0x%lx page_size: 0x%x", 1438 " paddr_min: 0x%lx page_size: 0x%x",
1631 paddr_min, vm->page_size); 1439 paddr_min, vm->page_size);
1632 1440
1633 /* Locate memory region. */
1634 region = memslot2region(vm, memslot); 1441 region = memslot2region(vm, memslot);
1442 base = pg = paddr_min >> vm->page_shift;
1635 1443
1636 /* Locate next available physical page at or above paddr_min. */ 1444 do {
1637 pg = paddr_min >> vm->page_shift; 1445 for (; pg < base + num; ++pg) {
1638 1446 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1639 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { 1447 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1640 pg = sparsebit_next_set(region->unused_phy_pages, pg); 1448 break;
1641 if (pg == 0) { 1449 }
1642 fprintf(stderr, "No guest physical page available, "
1643 "paddr_min: 0x%lx page_size: 0x%x memslot: %u",
1644 paddr_min, vm->page_size, memslot);
1645 fputs("---- vm dump ----\n", stderr);
1646 vm_dump(stderr, vm, 2);
1647 abort();
1648 } 1450 }
1451 } while (pg && pg != base + num);
1452
1453 if (pg == 0) {
1454 fprintf(stderr, "No guest physical page available, "
1455 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1456 paddr_min, vm->page_size, memslot);
1457 fputs("---- vm dump ----\n", stderr);
1458 vm_dump(stderr, vm, 2);
1459 abort();
1649 } 1460 }
1650 1461
1651 /* Specify page as in use and return its address. */ 1462 for (pg = base; pg < base + num; ++pg)
1652 sparsebit_clear(region->unused_phy_pages, pg); 1463 sparsebit_clear(region->unused_phy_pages, pg);
1464
1465 return base * vm->page_size;
1466}
1653 1467
1654 return pg * vm->page_size; 1468vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1469 uint32_t memslot)
1470{
1471 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1655} 1472}
1656 1473
1657/* Address Guest Virtual to Host Virtual 1474/*
1475 * Address Guest Virtual to Host Virtual
1658 * 1476 *
1659 * Input Args: 1477 * Input Args:
1660 * vm - Virtual Machine 1478 * vm - Virtual Machine
@@ -1669,17 +1487,3 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1669{ 1487{
1670 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 1488 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1671} 1489}
1672
1673void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
1674 struct guest_args *args)
1675{
1676 struct kvm_run *run = vcpu_state(vm, vcpu_id);
1677 struct kvm_regs regs;
1678
1679 memset(&regs, 0, sizeof(regs));
1680 vcpu_regs_get(vm, vcpu_id, &regs);
1681
1682 args->port = run->io.port;
1683 args->arg0 = regs.rdi;
1684 args->arg1 = regs.rsi;
1685}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 542ed606b338..52701db0f253 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -1,28 +1,29 @@
1/* 1/*
2 * tools/testing/selftests/kvm/lib/kvm_util.c 2 * tools/testing/selftests/kvm/lib/kvm_util_internal.h
3 * 3 *
4 * Copyright (C) 2018, Google LLC. 4 * Copyright (C) 2018, Google LLC.
5 * 5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. 6 * This work is licensed under the terms of the GNU GPL, version 2.
7 */ 7 */
8 8
9#ifndef KVM_UTIL_INTERNAL_H 9#ifndef SELFTEST_KVM_UTIL_INTERNAL_H
10#define KVM_UTIL_INTERNAL_H 1 10#define SELFTEST_KVM_UTIL_INTERNAL_H
11 11
12#include "sparsebit.h" 12#include "sparsebit.h"
13 13
14#define KVM_DEV_PATH "/dev/kvm"
15
14#ifndef BITS_PER_BYTE 16#ifndef BITS_PER_BYTE
15#define BITS_PER_BYTE 8 17#define BITS_PER_BYTE 8
16#endif 18#endif
17 19
18#ifndef BITS_PER_LONG 20#ifndef BITS_PER_LONG
19#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) 21#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
20#endif 22#endif
21 23
22#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 24#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
23#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG) 25#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
24 26
25/* Concrete definition of struct kvm_vm. */
26struct userspace_mem_region { 27struct userspace_mem_region {
27 struct userspace_mem_region *next, *prev; 28 struct userspace_mem_region *next, *prev;
28 struct kvm_userspace_memory_region region; 29 struct kvm_userspace_memory_region region;
@@ -45,14 +46,16 @@ struct kvm_vm {
45 int mode; 46 int mode;
46 int kvm_fd; 47 int kvm_fd;
47 int fd; 48 int fd;
49 unsigned int pgtable_levels;
48 unsigned int page_size; 50 unsigned int page_size;
49 unsigned int page_shift; 51 unsigned int page_shift;
52 unsigned int pa_bits;
53 unsigned int va_bits;
50 uint64_t max_gfn; 54 uint64_t max_gfn;
51 struct vcpu *vcpu_head; 55 struct vcpu *vcpu_head;
52 struct userspace_mem_region *userspace_mem_region_head; 56 struct userspace_mem_region *userspace_mem_region_head;
53 struct sparsebit *vpages_valid; 57 struct sparsebit *vpages_valid;
54 struct sparsebit *vpages_mapped; 58 struct sparsebit *vpages_mapped;
55
56 bool has_irqchip; 59 bool has_irqchip;
57 bool pgd_created; 60 bool pgd_created;
58 vm_paddr_t pgd; 61 vm_paddr_t pgd;
@@ -60,13 +63,11 @@ struct kvm_vm {
60 vm_vaddr_t tss; 63 vm_vaddr_t tss;
61}; 64};
62 65
63struct vcpu *vcpu_find(struct kvm_vm *vm, 66struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
64 uint32_t vcpuid); 67void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot,
65void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot); 68 int gdt_memslot);
66void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 69void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
67void regs_dump(FILE *stream, struct kvm_regs *regs, 70void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
68 uint8_t indent); 71void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
69void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
70 uint8_t indent);
71 72
72#endif 73#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
diff --git a/tools/testing/selftests/kvm/lib/ucall.c b/tools/testing/selftests/kvm/lib/ucall.c
new file mode 100644
index 000000000000..4777f9bb5194
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/ucall.c
@@ -0,0 +1,144 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ucall support. A ucall is a "hypercall to userspace".
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7#include "kvm_util.h"
8#include "kvm_util_internal.h"
9
10#define UCALL_PIO_PORT ((uint16_t)0x1000)
11
12static ucall_type_t ucall_type;
13static vm_vaddr_t *ucall_exit_mmio_addr;
14
15static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
16{
17 if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
18 return false;
19
20 virt_pg_map(vm, gpa, gpa, 0);
21
22 ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
23 sync_global_to_guest(vm, ucall_exit_mmio_addr);
24
25 return true;
26}
27
28void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
29{
30 ucall_type = type;
31 sync_global_to_guest(vm, ucall_type);
32
33 if (type == UCALL_PIO)
34 return;
35
36 if (type == UCALL_MMIO) {
37 vm_paddr_t gpa, start, end, step;
38 bool ret;
39
40 if (arg) {
41 gpa = (vm_paddr_t)arg;
42 ret = ucall_mmio_init(vm, gpa);
43 TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
44 return;
45 }
46
47 /*
48 * Find an address within the allowed virtual address space,
49 * that does _not_ have a KVM memory region associated with it.
50 * Identity mapping an address like this allows the guest to
51 * access it, but as KVM doesn't know what to do with it, it
52 * will assume it's something userspace handles and exit with
53 * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
54 * Here we start with a guess that the addresses around two
55 * thirds of the VA space are unmapped and then work both down
56 * and up from there in 1/6 VA space sized steps.
57 */
58 start = 1ul << (vm->va_bits * 2 / 3);
59 end = 1ul << vm->va_bits;
60 step = 1ul << (vm->va_bits / 6);
61 for (gpa = start; gpa >= 0; gpa -= step) {
62 if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
63 return;
64 }
65 for (gpa = start + step; gpa < end; gpa += step) {
66 if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
67 return;
68 }
69 TEST_ASSERT(false, "Can't find a ucall mmio address");
70 }
71}
72
73void ucall_uninit(struct kvm_vm *vm)
74{
75 ucall_type = 0;
76 sync_global_to_guest(vm, ucall_type);
77 ucall_exit_mmio_addr = 0;
78 sync_global_to_guest(vm, ucall_exit_mmio_addr);
79}
80
81static void ucall_pio_exit(struct ucall *uc)
82{
83#ifdef __x86_64__
84 asm volatile("in %[port], %%al"
85 : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax");
86#endif
87}
88
89static void ucall_mmio_exit(struct ucall *uc)
90{
91 *ucall_exit_mmio_addr = (vm_vaddr_t)uc;
92}
93
94void ucall(uint64_t cmd, int nargs, ...)
95{
96 struct ucall uc = {
97 .cmd = cmd,
98 };
99 va_list va;
100 int i;
101
102 nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
103
104 va_start(va, nargs);
105 for (i = 0; i < nargs; ++i)
106 uc.args[i] = va_arg(va, uint64_t);
107 va_end(va);
108
109 switch (ucall_type) {
110 case UCALL_PIO:
111 ucall_pio_exit(&uc);
112 break;
113 case UCALL_MMIO:
114 ucall_mmio_exit(&uc);
115 break;
116 };
117}
118
119uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
120{
121 struct kvm_run *run = vcpu_state(vm, vcpu_id);
122
123 memset(uc, 0, sizeof(*uc));
124
125#ifdef __x86_64__
126 if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO &&
127 run->io.port == UCALL_PIO_PORT) {
128 struct kvm_regs regs;
129 vcpu_regs_get(vm, vcpu_id, &regs);
130 memcpy(uc, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(*uc));
131 return uc->cmd;
132 }
133#endif
134 if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO &&
135 run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
136 vm_vaddr_t gva;
137 TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
138 "Unexpected ucall exit mmio address access");
139 gva = *(vm_vaddr_t *)run->mmio.data;
140 memcpy(uc, addr_gva2hva(vm, gva), sizeof(*uc));
141 }
142
143 return uc->cmd;
144}
diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index a3122f1949a8..f28127f4a3af 100644
--- a/tools/testing/selftests/kvm/lib/x86.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * tools/testing/selftests/kvm/lib/x86.c 2 * tools/testing/selftests/kvm/lib/x86_64/processor.c
3 * 3 *
4 * Copyright (C) 2018, Google LLC. 4 * Copyright (C) 2018, Google LLC.
5 * 5 *
@@ -10,8 +10,8 @@
10 10
11#include "test_util.h" 11#include "test_util.h"
12#include "kvm_util.h" 12#include "kvm_util.h"
13#include "kvm_util_internal.h" 13#include "../kvm_util_internal.h"
14#include "x86.h" 14#include "processor.h"
15 15
16/* Minimum physical address used for virtual translation tables. */ 16/* Minimum physical address used for virtual translation tables. */
17#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 17#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
@@ -231,7 +231,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
231{ 231{
232 int rc; 232 int rc;
233 233
234 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use " 234 TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
235 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); 235 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
236 236
237 /* If needed, create page map l4 table. */ 237 /* If needed, create page map l4 table. */
@@ -264,7 +264,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
264 uint16_t index[4]; 264 uint16_t index[4];
265 struct pageMapL4Entry *pml4e; 265 struct pageMapL4Entry *pml4e;
266 266
267 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use " 267 TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
268 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); 268 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
269 269
270 TEST_ASSERT((vaddr % vm->page_size) == 0, 270 TEST_ASSERT((vaddr % vm->page_size) == 0,
@@ -551,7 +551,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
551 struct pageTableEntry *pte; 551 struct pageTableEntry *pte;
552 void *hva; 552 void *hva;
553 553
554 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use " 554 TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
555 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); 555 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
556 556
557 index[0] = (gva >> 12) & 0x1ffu; 557 index[0] = (gva >> 12) & 0x1ffu;
@@ -624,9 +624,9 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
624 kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot); 624 kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
625 625
626 switch (vm->mode) { 626 switch (vm->mode) {
627 case VM_MODE_FLAT48PG: 627 case VM_MODE_P52V48_4K:
628 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG; 628 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
629 sregs.cr4 |= X86_CR4_PAE; 629 sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
630 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); 630 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
631 631
632 kvm_seg_set_unusable(&sregs.ldt); 632 kvm_seg_set_unusable(&sregs.ldt);
@@ -672,6 +672,102 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
672 vcpu_set_mp_state(vm, vcpuid, &mp_state); 672 vcpu_set_mp_state(vm, vcpuid, &mp_state);
673} 673}
674 674
675/* Allocate an instance of struct kvm_cpuid2
676 *
677 * Input Args: None
678 *
679 * Output Args: None
680 *
681 * Return: A pointer to the allocated struct. The caller is responsible
682 * for freeing this struct.
683 *
684 * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
685 * array to be decided at allocation time, allocation is slightly
686 * complicated. This function uses a reasonable default length for
687 * the array and performs the appropriate allocation.
688 */
689static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
690{
691 struct kvm_cpuid2 *cpuid;
692 int nent = 100;
693 size_t size;
694
695 size = sizeof(*cpuid);
696 size += nent * sizeof(struct kvm_cpuid_entry2);
697 cpuid = malloc(size);
698 if (!cpuid) {
699 perror("malloc");
700 abort();
701 }
702
703 cpuid->nent = nent;
704
705 return cpuid;
706}
707
708/* KVM Supported CPUID Get
709 *
710 * Input Args: None
711 *
712 * Output Args:
713 *
714 * Return: The supported KVM CPUID
715 *
716 * Get the guest CPUID supported by KVM.
717 */
718struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
719{
720 static struct kvm_cpuid2 *cpuid;
721 int ret;
722 int kvm_fd;
723
724 if (cpuid)
725 return cpuid;
726
727 cpuid = allocate_kvm_cpuid2();
728 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
729 if (kvm_fd < 0)
730 exit(KSFT_SKIP);
731
732 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
733 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
734 ret, errno);
735
736 close(kvm_fd);
737 return cpuid;
738}
739
740/* Locate a cpuid entry.
741 *
742 * Input Args:
743 * cpuid: The cpuid.
744 * function: The function of the cpuid entry to find.
745 *
746 * Output Args: None
747 *
748 * Return: A pointer to the cpuid entry. Never returns NULL.
749 */
750struct kvm_cpuid_entry2 *
751kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
752{
753 struct kvm_cpuid2 *cpuid;
754 struct kvm_cpuid_entry2 *entry = NULL;
755 int i;
756
757 cpuid = kvm_get_supported_cpuid();
758 for (i = 0; i < cpuid->nent; i++) {
759 if (cpuid->entries[i].function == function &&
760 cpuid->entries[i].index == index) {
761 entry = &cpuid->entries[i];
762 break;
763 }
764 }
765
766 TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
767 function, index);
768 return entry;
769}
770
675/* VM VCPU CPUID Set 771/* VM VCPU CPUID Set
676 * 772 *
677 * Input Args: 773 * Input Args:
@@ -698,6 +794,7 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
698 rc, errno); 794 rc, errno);
699 795
700} 796}
797
701/* Create a VM with reasonable defaults 798/* Create a VM with reasonable defaults
702 * 799 *
703 * Input Args: 800 * Input Args:
@@ -726,7 +823,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
726 uint64_t extra_pg_pages = extra_mem_pages / 512 * 2; 823 uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
727 824
728 /* Create VM */ 825 /* Create VM */
729 vm = vm_create(VM_MODE_FLAT48PG, 826 vm = vm_create(VM_MODE_P52V48_4K,
730 DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, 827 DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
731 O_RDWR); 828 O_RDWR);
732 829
@@ -742,6 +839,154 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
742 return vm; 839 return vm;
743} 840}
744 841
842/* VCPU Get MSR
843 *
844 * Input Args:
845 * vm - Virtual Machine
846 * vcpuid - VCPU ID
847 * msr_index - Index of MSR
848 *
849 * Output Args: None
850 *
851 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
852 *
853 * Get value of MSR for VCPU.
854 */
855uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
856{
857 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
858 struct {
859 struct kvm_msrs header;
860 struct kvm_msr_entry entry;
861 } buffer = {};
862 int r;
863
864 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
865 buffer.header.nmsrs = 1;
866 buffer.entry.index = msr_index;
867 r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
868 TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
869 " rc: %i errno: %i", r, errno);
870
871 return buffer.entry.data;
872}
873
874/* VCPU Set MSR
875 *
876 * Input Args:
877 * vm - Virtual Machine
878 * vcpuid - VCPU ID
879 * msr_index - Index of MSR
880 * msr_value - New value of MSR
881 *
882 * Output Args: None
883 *
884 * Return: On success, nothing. On failure a TEST_ASSERT is produced.
885 *
886 * Set value of MSR for VCPU.
887 */
888void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
889 uint64_t msr_value)
890{
891 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
892 struct {
893 struct kvm_msrs header;
894 struct kvm_msr_entry entry;
895 } buffer = {};
896 int r;
897
898 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
899 memset(&buffer, 0, sizeof(buffer));
900 buffer.header.nmsrs = 1;
901 buffer.entry.index = msr_index;
902 buffer.entry.data = msr_value;
903 r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
904 TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
905 " rc: %i errno: %i", r, errno);
906}
907
908/* VM VCPU Args Set
909 *
910 * Input Args:
911 * vm - Virtual Machine
912 * vcpuid - VCPU ID
913 * num - number of arguments
914 * ... - arguments, each of type uint64_t
915 *
916 * Output Args: None
917 *
918 * Return: None
919 *
920 * Sets the first num function input arguments to the values
921 * given as variable args. Each of the variable args is expected to
922 * be of type uint64_t.
923 */
924void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
925{
926 va_list ap;
927 struct kvm_regs regs;
928
929 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
930 " num: %u\n",
931 num);
932
933 va_start(ap, num);
934 vcpu_regs_get(vm, vcpuid, &regs);
935
936 if (num >= 1)
937 regs.rdi = va_arg(ap, uint64_t);
938
939 if (num >= 2)
940 regs.rsi = va_arg(ap, uint64_t);
941
942 if (num >= 3)
943 regs.rdx = va_arg(ap, uint64_t);
944
945 if (num >= 4)
946 regs.rcx = va_arg(ap, uint64_t);
947
948 if (num >= 5)
949 regs.r8 = va_arg(ap, uint64_t);
950
951 if (num >= 6)
952 regs.r9 = va_arg(ap, uint64_t);
953
954 vcpu_regs_set(vm, vcpuid, &regs);
955 va_end(ap);
956}
957
958/*
959 * VM VCPU Dump
960 *
961 * Input Args:
962 * vm - Virtual Machine
963 * vcpuid - VCPU ID
964 * indent - Left margin indent amount
965 *
966 * Output Args:
967 * stream - Output FILE stream
968 *
969 * Return: None
970 *
971 * Dumps the current state of the VCPU specified by vcpuid, within the VM
972 * given by vm, to the FILE stream given by stream.
973 */
974void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
975{
976 struct kvm_regs regs;
977 struct kvm_sregs sregs;
978
979 fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
980
981 fprintf(stream, "%*sregs:\n", indent + 2, "");
982 vcpu_regs_get(vm, vcpuid, &regs);
983 regs_dump(stream, &regs, indent + 4);
984
985 fprintf(stream, "%*ssregs:\n", indent + 2, "");
986 vcpu_sregs_get(vm, vcpuid, &sregs);
987 sregs_dump(stream, &sregs, indent + 4);
988}
989
745struct kvm_x86_state { 990struct kvm_x86_state {
746 struct kvm_vcpu_events events; 991 struct kvm_vcpu_events events;
747 struct kvm_mp_state mp_state; 992 struct kvm_mp_state mp_state;
diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index b987c3c970eb..771ba6bf751c 100644
--- a/tools/testing/selftests/kvm/lib/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * tools/testing/selftests/kvm/lib/x86.c 2 * tools/testing/selftests/kvm/lib/x86_64/vmx.c
3 * 3 *
4 * Copyright (C) 2018, Google LLC. 4 * Copyright (C) 2018, Google LLC.
5 * 5 *
@@ -10,9 +10,11 @@
10 10
11#include "test_util.h" 11#include "test_util.h"
12#include "kvm_util.h" 12#include "kvm_util.h"
13#include "x86.h" 13#include "processor.h"
14#include "vmx.h" 14#include "vmx.h"
15 15
16bool enable_evmcs;
17
16/* Allocate memory regions for nested VMX tests. 18/* Allocate memory regions for nested VMX tests.
17 * 19 *
18 * Input Args: 20 * Input Args:
@@ -62,6 +64,20 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
62 vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); 64 vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
63 memset(vmx->vmwrite_hva, 0, getpagesize()); 65 memset(vmx->vmwrite_hva, 0, getpagesize());
64 66
67 /* Setup of a region of guest memory for the VP Assist page. */
68 vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(),
69 0x10000, 0, 0);
70 vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
71 vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
72
73 /* Setup of a region of guest memory for the enlightened VMCS. */
74 vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(),
75 0x10000, 0, 0);
76 vmx->enlightened_vmcs_hva =
77 addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
78 vmx->enlightened_vmcs_gpa =
79 addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
80
65 *p_vmx_gva = vmx_gva; 81 *p_vmx_gva = vmx_gva;
66 return vmx; 82 return vmx;
67} 83}
@@ -107,18 +123,31 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
107 if (vmxon(vmx->vmxon_gpa)) 123 if (vmxon(vmx->vmxon_gpa))
108 return false; 124 return false;
109 125
110 /* Load a VMCS. */ 126 return true;
111 *(uint32_t *)(vmx->vmcs) = vmcs_revision(); 127}
112 if (vmclear(vmx->vmcs_gpa))
113 return false;
114
115 if (vmptrld(vmx->vmcs_gpa))
116 return false;
117 128
118 /* Setup shadow VMCS, do not load it yet. */ 129bool load_vmcs(struct vmx_pages *vmx)
119 *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; 130{
120 if (vmclear(vmx->shadow_vmcs_gpa)) 131 if (!enable_evmcs) {
121 return false; 132 /* Load a VMCS. */
133 *(uint32_t *)(vmx->vmcs) = vmcs_revision();
134 if (vmclear(vmx->vmcs_gpa))
135 return false;
136
137 if (vmptrld(vmx->vmcs_gpa))
138 return false;
139
140 /* Setup shadow VMCS, do not load it yet. */
141 *(uint32_t *)(vmx->shadow_vmcs) =
142 vmcs_revision() | 0x80000000ul;
143 if (vmclear(vmx->shadow_vmcs_gpa))
144 return false;
145 } else {
146 if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
147 vmx->enlightened_vmcs))
148 return false;
149 current_evmcs->revision_id = vmcs_revision();
150 }
122 151
123 return true; 152 return true;
124} 153}
diff --git a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index 11ec358bf969..d503a51fad30 100644
--- a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -17,7 +17,7 @@
17#include "test_util.h" 17#include "test_util.h"
18 18
19#include "kvm_util.h" 19#include "kvm_util.h"
20#include "x86.h" 20#include "processor.h"
21 21
22#define X86_FEATURE_XSAVE (1<<26) 22#define X86_FEATURE_XSAVE (1<<26)
23#define X86_FEATURE_OSXSAVE (1<<27) 23#define X86_FEATURE_OSXSAVE (1<<27)
@@ -67,6 +67,7 @@ int main(int argc, char *argv[])
67 struct kvm_vm *vm; 67 struct kvm_vm *vm;
68 struct kvm_sregs sregs; 68 struct kvm_sregs sregs;
69 struct kvm_cpuid_entry2 *entry; 69 struct kvm_cpuid_entry2 *entry;
70 struct ucall uc;
70 int rc; 71 int rc;
71 72
72 entry = kvm_get_supported_cpuid_entry(1); 73 entry = kvm_get_supported_cpuid_entry(1);
@@ -87,21 +88,20 @@ int main(int argc, char *argv[])
87 rc = _vcpu_run(vm, VCPU_ID); 88 rc = _vcpu_run(vm, VCPU_ID);
88 89
89 if (run->exit_reason == KVM_EXIT_IO) { 90 if (run->exit_reason == KVM_EXIT_IO) {
90 switch (run->io.port) { 91 switch (get_ucall(vm, VCPU_ID, &uc)) {
91 case GUEST_PORT_SYNC: 92 case UCALL_SYNC:
92 /* emulate hypervisor clearing CR4.OSXSAVE */ 93 /* emulate hypervisor clearing CR4.OSXSAVE */
93 vcpu_sregs_get(vm, VCPU_ID, &sregs); 94 vcpu_sregs_get(vm, VCPU_ID, &sregs);
94 sregs.cr4 &= ~X86_CR4_OSXSAVE; 95 sregs.cr4 &= ~X86_CR4_OSXSAVE;
95 vcpu_sregs_set(vm, VCPU_ID, &sregs); 96 vcpu_sregs_set(vm, VCPU_ID, &sregs);
96 break; 97 break;
97 case GUEST_PORT_ABORT: 98 case UCALL_ABORT:
98 TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); 99 TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
99 break; 100 break;
100 case GUEST_PORT_DONE: 101 case UCALL_DONE:
101 goto done; 102 goto done;
102 default: 103 default:
103 TEST_ASSERT(false, "Unknown port 0x%x.", 104 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
104 run->io.port);
105 } 105 }
106 } 106 }
107 } 107 }
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
new file mode 100644
index 000000000000..92c2cfd1b182
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -0,0 +1,160 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018, Red Hat, Inc.
4 *
5 * Tests for Enlightened VMCS, including nested guest state.
6 */
7#define _GNU_SOURCE /* for program_invocation_short_name */
8#include <fcntl.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12#include <sys/ioctl.h>
13
14#include "test_util.h"
15
16#include "kvm_util.h"
17
18#include "vmx.h"
19
20#define VCPU_ID 5
21
22static bool have_nested_state;
23
24void l2_guest_code(void)
25{
26 GUEST_SYNC(6);
27
28 GUEST_SYNC(7);
29
30 /* Done, exit to L1 and never come back. */
31 vmcall();
32}
33
34void l1_guest_code(struct vmx_pages *vmx_pages)
35{
36#define L2_GUEST_STACK_SIZE 64
37 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
38
39 enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
40
41 GUEST_ASSERT(vmx_pages->vmcs_gpa);
42 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
43 GUEST_SYNC(3);
44 GUEST_ASSERT(load_vmcs(vmx_pages));
45 GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
46
47 GUEST_SYNC(4);
48 GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
49
50 prepare_vmcs(vmx_pages, l2_guest_code,
51 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
52
53 GUEST_SYNC(5);
54 GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
55 GUEST_ASSERT(!vmlaunch());
56 GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
57 GUEST_SYNC(8);
58 GUEST_ASSERT(!vmresume());
59 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
60 GUEST_SYNC(9);
61}
62
63void guest_code(struct vmx_pages *vmx_pages)
64{
65 GUEST_SYNC(1);
66 GUEST_SYNC(2);
67
68 if (vmx_pages)
69 l1_guest_code(vmx_pages);
70
71 GUEST_DONE();
72}
73
74int main(int argc, char *argv[])
75{
76 struct vmx_pages *vmx_pages = NULL;
77 vm_vaddr_t vmx_pages_gva = 0;
78
79 struct kvm_regs regs1, regs2;
80 struct kvm_vm *vm;
81 struct kvm_run *run;
82 struct kvm_x86_state *state;
83 struct ucall uc;
84 int stage;
85 uint16_t evmcs_ver;
86 struct kvm_enable_cap enable_evmcs_cap = {
87 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
88 .args[0] = (unsigned long)&evmcs_ver
89 };
90
91 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
92
93 /* Create VM */
94 vm = vm_create_default(VCPU_ID, 0, guest_code);
95
96 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
97
98 if (!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
99 !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
100 printf("capabilities not available, skipping test\n");
101 exit(KSFT_SKIP);
102 }
103
104 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
105
106 run = vcpu_state(vm, VCPU_ID);
107
108 vcpu_regs_get(vm, VCPU_ID, &regs1);
109
110 vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
111 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
112
113 for (stage = 1;; stage++) {
114 _vcpu_run(vm, VCPU_ID);
115 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
116 "Unexpected exit reason: %u (%s),\n",
117 run->exit_reason,
118 exit_reason_str(run->exit_reason));
119
120 memset(&regs1, 0, sizeof(regs1));
121 vcpu_regs_get(vm, VCPU_ID, &regs1);
122 switch (get_ucall(vm, VCPU_ID, &uc)) {
123 case UCALL_ABORT:
124 TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
125 __FILE__, uc.args[1]);
126 /* NOT REACHED */
127 case UCALL_SYNC:
128 break;
129 case UCALL_DONE:
130 goto done;
131 default:
132 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
133 }
134
135 /* UCALL_SYNC is handled here. */
136 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
137 uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
138 stage, (ulong)uc.args[1]);
139
140 state = vcpu_save_state(vm, VCPU_ID);
141 kvm_vm_release(vm);
142
143 /* Restore state in a new VM. */
144 kvm_vm_restart(vm, O_RDWR);
145 vm_vcpu_add(vm, VCPU_ID, 0, 0);
146 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
147 vcpu_load_state(vm, VCPU_ID, state);
148 run = vcpu_state(vm, VCPU_ID);
149 free(state);
150
151 memset(&regs2, 0, sizeof(regs2));
152 vcpu_regs_get(vm, VCPU_ID, &regs2);
153 TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
154 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
155 (ulong) regs2.rdi, (ulong) regs2.rsi);
156 }
157
158done:
159 kvm_vm_free(vm);
160}
diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 3764e7121265..eb3e7a838cb4 100644
--- a/tools/testing/selftests/kvm/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -19,7 +19,7 @@
19 19
20#include "test_util.h" 20#include "test_util.h"
21#include "kvm_util.h" 21#include "kvm_util.h"
22#include "x86.h" 22#include "processor.h"
23 23
24#define VCPU_ID 0 24#define VCPU_ID 0
25#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00 25#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
@@ -48,7 +48,7 @@ static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
48static void test_msr_platform_info_enabled(struct kvm_vm *vm) 48static void test_msr_platform_info_enabled(struct kvm_vm *vm)
49{ 49{
50 struct kvm_run *run = vcpu_state(vm, VCPU_ID); 50 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
51 struct guest_args args; 51 struct ucall uc;
52 52
53 set_msr_platform_info_enabled(vm, true); 53 set_msr_platform_info_enabled(vm, true);
54 vcpu_run(vm, VCPU_ID); 54 vcpu_run(vm, VCPU_ID);
@@ -56,11 +56,11 @@ static void test_msr_platform_info_enabled(struct kvm_vm *vm)
56 "Exit_reason other than KVM_EXIT_IO: %u (%s),\n", 56 "Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
57 run->exit_reason, 57 run->exit_reason,
58 exit_reason_str(run->exit_reason)); 58 exit_reason_str(run->exit_reason));
59 guest_args_read(vm, VCPU_ID, &args); 59 get_ucall(vm, VCPU_ID, &uc);
60 TEST_ASSERT(args.port == GUEST_PORT_SYNC, 60 TEST_ASSERT(uc.cmd == UCALL_SYNC,
61 "Received IO from port other than PORT_HOST_SYNC: %u\n", 61 "Received ucall other than UCALL_SYNC: %u\n",
62 run->io.port); 62 ucall);
63 TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == 63 TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
64 MSR_PLATFORM_INFO_MAX_TURBO_RATIO, 64 MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
65 "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.", 65 "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
66 MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 66 MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
diff --git a/tools/testing/selftests/kvm/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
index 881419d5746e..35640e8e95bc 100644
--- a/tools/testing/selftests/kvm/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
@@ -22,7 +22,7 @@
22#include "test_util.h" 22#include "test_util.h"
23 23
24#include "kvm_util.h" 24#include "kvm_util.h"
25#include "x86.h" 25#include "processor.h"
26 26
27#define VCPU_ID 5 27#define VCPU_ID 5
28 28
diff --git a/tools/testing/selftests/kvm/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 900e3e9dfb9f..03da41f0f736 100644
--- a/tools/testing/selftests/kvm/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -17,7 +17,7 @@
17#include "test_util.h" 17#include "test_util.h"
18 18
19#include "kvm_util.h" 19#include "kvm_util.h"
20#include "x86.h" 20#include "processor.h"
21#include "vmx.h" 21#include "vmx.h"
22 22
23#define VCPU_ID 5 23#define VCPU_ID 5
@@ -26,20 +26,20 @@ static bool have_nested_state;
26 26
27void l2_guest_code(void) 27void l2_guest_code(void)
28{ 28{
29 GUEST_SYNC(5); 29 GUEST_SYNC(6);
30 30
31 /* Exit to L1 */ 31 /* Exit to L1 */
32 vmcall(); 32 vmcall();
33 33
34 /* L1 has now set up a shadow VMCS for us. */ 34 /* L1 has now set up a shadow VMCS for us. */
35 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); 35 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
36 GUEST_SYNC(9); 36 GUEST_SYNC(10);
37 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); 37 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
38 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee)); 38 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
39 GUEST_SYNC(10); 39 GUEST_SYNC(11);
40 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee); 40 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
41 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee)); 41 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
42 GUEST_SYNC(11); 42 GUEST_SYNC(12);
43 43
44 /* Done, exit to L1 and never come back. */ 44 /* Done, exit to L1 and never come back. */
45 vmcall(); 45 vmcall();
@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
52 52
53 GUEST_ASSERT(vmx_pages->vmcs_gpa); 53 GUEST_ASSERT(vmx_pages->vmcs_gpa);
54 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 54 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
55 GUEST_SYNC(3);
56 GUEST_ASSERT(load_vmcs(vmx_pages));
55 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 57 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
56 58
57 GUEST_SYNC(3); 59 GUEST_SYNC(4);
58 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 60 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
59 61
60 prepare_vmcs(vmx_pages, l2_guest_code, 62 prepare_vmcs(vmx_pages, l2_guest_code,
61 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 63 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
62 64
63 GUEST_SYNC(4); 65 GUEST_SYNC(5);
64 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 66 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
65 GUEST_ASSERT(!vmlaunch()); 67 GUEST_ASSERT(!vmlaunch());
66 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 68 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
72 GUEST_ASSERT(!vmresume()); 74 GUEST_ASSERT(!vmresume());
73 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 75 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
74 76
75 GUEST_SYNC(6); 77 GUEST_SYNC(7);
76 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 78 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
77 79
78 GUEST_ASSERT(!vmresume()); 80 GUEST_ASSERT(!vmresume());
@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
85 87
86 GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa)); 88 GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
87 GUEST_ASSERT(vmlaunch()); 89 GUEST_ASSERT(vmlaunch());
88 GUEST_SYNC(7); 90 GUEST_SYNC(8);
89 GUEST_ASSERT(vmlaunch()); 91 GUEST_ASSERT(vmlaunch());
90 GUEST_ASSERT(vmresume()); 92 GUEST_ASSERT(vmresume());
91 93
92 vmwrite(GUEST_RIP, 0xc0ffee); 94 vmwrite(GUEST_RIP, 0xc0ffee);
93 GUEST_SYNC(8); 95 GUEST_SYNC(9);
94 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); 96 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
95 97
96 GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa)); 98 GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
101 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); 103 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
102 GUEST_ASSERT(vmlaunch()); 104 GUEST_ASSERT(vmlaunch());
103 GUEST_ASSERT(vmresume()); 105 GUEST_ASSERT(vmresume());
104 GUEST_SYNC(12); 106 GUEST_SYNC(13);
105 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); 107 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
106 GUEST_ASSERT(vmlaunch()); 108 GUEST_ASSERT(vmlaunch());
107 GUEST_ASSERT(vmresume()); 109 GUEST_ASSERT(vmresume());
@@ -127,6 +129,7 @@ int main(int argc, char *argv[])
127 struct kvm_vm *vm; 129 struct kvm_vm *vm;
128 struct kvm_run *run; 130 struct kvm_run *run;
129 struct kvm_x86_state *state; 131 struct kvm_x86_state *state;
132 struct ucall uc;
130 int stage; 133 int stage;
131 134
132 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 135 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
@@ -155,23 +158,23 @@ int main(int argc, char *argv[])
155 158
156 memset(&regs1, 0, sizeof(regs1)); 159 memset(&regs1, 0, sizeof(regs1));
157 vcpu_regs_get(vm, VCPU_ID, &regs1); 160 vcpu_regs_get(vm, VCPU_ID, &regs1);
158 switch (run->io.port) { 161 switch (get_ucall(vm, VCPU_ID, &uc)) {
159 case GUEST_PORT_ABORT: 162 case UCALL_ABORT:
160 TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi, 163 TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
161 __FILE__, regs1.rsi); 164 __FILE__, uc.args[1]);
162 /* NOT REACHED */ 165 /* NOT REACHED */
163 case GUEST_PORT_SYNC: 166 case UCALL_SYNC:
164 break; 167 break;
165 case GUEST_PORT_DONE: 168 case UCALL_DONE:
166 goto done; 169 goto done;
167 default: 170 default:
168 TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); 171 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
169 } 172 }
170 173
171 /* PORT_SYNC is handled here. */ 174 /* UCALL_SYNC is handled here. */
172 TEST_ASSERT(!strcmp((const char *)regs1.rdi, "hello") && 175 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
173 regs1.rsi == stage, "Unexpected register values vmexit #%lx, got %lx", 176 uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
174 stage, (ulong) regs1.rsi); 177 stage, (ulong)uc.args[1]);
175 178
176 state = vcpu_save_state(vm, VCPU_ID); 179 state = vcpu_save_state(vm, VCPU_ID);
177 kvm_vm_release(vm); 180 kvm_vm_release(vm);
diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index 213343e5dff9..c8478ce9ea77 100644
--- a/tools/testing/selftests/kvm/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -19,7 +19,7 @@
19 19
20#include "test_util.h" 20#include "test_util.h"
21#include "kvm_util.h" 21#include "kvm_util.h"
22#include "x86.h" 22#include "processor.h"
23 23
24#define VCPU_ID 5 24#define VCPU_ID 5
25 25
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 49bcc68b0235..18fa64db0d7a 100644
--- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * gtests/tests/vmx_tsc_adjust_test.c 2 * vmx_tsc_adjust_test
3 * 3 *
4 * Copyright (C) 2018, Google LLC. 4 * Copyright (C) 2018, Google LLC.
5 * 5 *
@@ -22,13 +22,13 @@
22 22
23#include "test_util.h" 23#include "test_util.h"
24#include "kvm_util.h" 24#include "kvm_util.h"
25#include "x86.h" 25#include "processor.h"
26#include "vmx.h" 26#include "vmx.h"
27 27
28#include <string.h> 28#include <string.h>
29#include <sys/ioctl.h> 29#include <sys/ioctl.h>
30 30
31#include "../kselftest.h" 31#include "kselftest.h"
32 32
33#ifndef MSR_IA32_TSC_ADJUST 33#ifndef MSR_IA32_TSC_ADJUST
34#define MSR_IA32_TSC_ADJUST 0x3b 34#define MSR_IA32_TSC_ADJUST 0x3b
@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
94 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); 94 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
95 95
96 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 96 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
97 GUEST_ASSERT(load_vmcs(vmx_pages));
97 98
98 /* Prepare the VMCS for L2 execution. */ 99 /* Prepare the VMCS for L2 execution. */
99 prepare_vmcs(vmx_pages, l2_guest_code, 100 prepare_vmcs(vmx_pages, l2_guest_code,
@@ -146,26 +147,25 @@ int main(int argc, char *argv[])
146 147
147 for (;;) { 148 for (;;) {
148 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); 149 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
149 struct guest_args args; 150 struct ucall uc;
150 151
151 vcpu_run(vm, VCPU_ID); 152 vcpu_run(vm, VCPU_ID);
152 guest_args_read(vm, VCPU_ID, &args);
153 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 153 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
154 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 154 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
155 run->exit_reason, 155 run->exit_reason,
156 exit_reason_str(run->exit_reason)); 156 exit_reason_str(run->exit_reason));
157 157
158 switch (args.port) { 158 switch (get_ucall(vm, VCPU_ID, &uc)) {
159 case GUEST_PORT_ABORT: 159 case UCALL_ABORT:
160 TEST_ASSERT(false, "%s", (const char *) args.arg0); 160 TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
161 /* NOT REACHED */ 161 /* NOT REACHED */
162 case GUEST_PORT_SYNC: 162 case UCALL_SYNC:
163 report(args.arg1); 163 report(uc.args[1]);
164 break; 164 break;
165 case GUEST_PORT_DONE: 165 case UCALL_DONE:
166 goto done; 166 goto done;
167 default: 167 default:
168 TEST_ASSERT(false, "Unknown port 0x%x.", args.port); 168 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
169 } 169 }
170 } 170 }
171 171