aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 16:52:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 16:52:44 -0400
commitb372115311942202346d93849991f07382783ef1 (patch)
tree14d52554acb0cdba1774be95d3877c47fda8bbff /tools
parent5bed49adfe899667887db0739830190309c9011b (diff)
parent0027ff2a75f9dcf0537ac0a65c5840b0e21a4950 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull second set of KVM updates from Paolo Bonzini: "ARM: - Support for Group0 interrupts in guests - Cache management optimizations for ARMv8.4 systems - Userspace interface for RAS - Fault path optimization - Emulated physical timer fixes - Random cleanups x86: - fixes for L1TF - a new test case - non-support for SGX (inject the right exception in the guest) - fix lockdep false positive" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (49 commits) KVM: VMX: fixes for vmentry_l1d_flush module parameter kvm: selftest: add dirty logging test kvm: selftest: pass in extra memory when create vm kvm: selftest: include the tools headers kvm: selftest: unify the guest port macros tools: introduce test_and_clear_bit KVM: x86: SVM: Call x86_spec_ctrl_set_guest/host() with interrupts disabled KVM: vmx: Inject #UD for SGX ENCLS instruction in guest KVM: vmx: Add defines for SGX ENCLS exiting x86/kvm/vmx: Fix coding style in vmx_setup_l1d_flush() x86: kvm: avoid unused variable warning KVM: Documentation: rename the capability of KVM_CAP_ARM_SET_SERROR_ESR KVM: arm/arm64: Skip updating PTE entry if no change KVM: arm/arm64: Skip updating PMD entry if no change KVM: arm: Use true and false for boolean values KVM: arm/arm64: vgic: Do not use spin_lock_irqsave/restore with irq disabled KVM: arm/arm64: vgic: Move DEBUG_SPINLOCK_BUG_ON to vgic.h KVM: arm: vgic-v3: Add support for ICC_SGI0R and ICC_ASGI1R accesses KVM: arm64: vgic-v3: Add support for ICC_SGI0R_EL1 and ICC_ASGI1R_EL1 accesses KVM: arm/arm64: vgic-v3: Add core support for Group0 SGIs ...
Diffstat (limited to 'tools')
-rw-r--r--tools/include/linux/bitmap.h17
-rw-r--r--tools/testing/selftests/kvm/Makefile5
-rw-r--r--tools/testing/selftests/kvm/cr4_cpuid_sync_test.c32
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c308
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h45
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c58
-rw-r--r--tools/testing/selftests/kvm/lib/x86.c18
-rw-r--r--tools/testing/selftests/kvm/set_sregs_test.c2
-rw-r--r--tools/testing/selftests/kvm/state_test.c32
-rw-r--r--tools/testing/selftests/kvm/sync_regs_test.c21
-rw-r--r--tools/testing/selftests/kvm/vmx_tsc_adjust_test.c43
12 files changed, 477 insertions, 106 deletions
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 63440cc8d618..e63662db131b 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -97,6 +97,23 @@ static inline int test_and_set_bit(int nr, unsigned long *addr)
97} 97}
98 98
99/** 99/**
100 * test_and_clear_bit - Clear a bit and return its old value
101 * @nr: Bit to clear
102 * @addr: Address to count from
103 */
104static inline int test_and_clear_bit(int nr, unsigned long *addr)
105{
106 unsigned long mask = BIT_MASK(nr);
107 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
108 unsigned long old;
109
110 old = *p;
111 *p = old & ~mask;
112
113 return (old & mask) != 0;
114}
115
116/**
100 * bitmap_alloc - Allocate bitmap 117 * bitmap_alloc - Allocate bitmap
101 * @nbits: Number of bits 118 * @nbits: Number of bits
102 */ 119 */
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index dd0e5163f01f..03b0f551bedf 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -11,13 +11,16 @@ TEST_GEN_PROGS_x86_64 += sync_regs_test
11TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test 11TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
12TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test 12TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test
13TEST_GEN_PROGS_x86_64 += state_test 13TEST_GEN_PROGS_x86_64 += state_test
14TEST_GEN_PROGS_x86_64 += dirty_log_test
14 15
15TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) 16TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
16LIBKVM += $(LIBKVM_$(UNAME_M)) 17LIBKVM += $(LIBKVM_$(UNAME_M))
17 18
18INSTALL_HDR_PATH = $(top_srcdir)/usr 19INSTALL_HDR_PATH = $(top_srcdir)/usr
19LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ 20LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
20CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. 21LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include
22CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
23LDFLAGS += -lpthread
21 24
22# After inclusion, $(OUTPUT) is defined and 25# After inclusion, $(OUTPUT) is defined and
23# $(TEST_GEN_PROGS) starts with $(OUTPUT)/ 26# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
diff --git a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c
index 8346b33c2073..11ec358bf969 100644
--- a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c
@@ -23,20 +23,6 @@
23#define X86_FEATURE_OSXSAVE (1<<27) 23#define X86_FEATURE_OSXSAVE (1<<27)
24#define VCPU_ID 1 24#define VCPU_ID 1
25 25
26enum {
27 GUEST_UPDATE_CR4 = 0x1000,
28 GUEST_FAILED,
29 GUEST_DONE,
30};
31
32static void exit_to_hv(uint16_t port)
33{
34 __asm__ __volatile__("in %[port], %%al"
35 :
36 : [port]"d"(port)
37 : "rax");
38}
39
40static inline bool cr4_cpuid_is_sync(void) 26static inline bool cr4_cpuid_is_sync(void)
41{ 27{
42 int func, subfunc; 28 int func, subfunc;
@@ -64,17 +50,15 @@ static void guest_code(void)
64 set_cr4(cr4); 50 set_cr4(cr4);
65 51
66 /* verify CR4.OSXSAVE == CPUID.OSXSAVE */ 52 /* verify CR4.OSXSAVE == CPUID.OSXSAVE */
67 if (!cr4_cpuid_is_sync()) 53 GUEST_ASSERT(cr4_cpuid_is_sync());
68 exit_to_hv(GUEST_FAILED);
69 54
70 /* notify hypervisor to change CR4 */ 55 /* notify hypervisor to change CR4 */
71 exit_to_hv(GUEST_UPDATE_CR4); 56 GUEST_SYNC(0);
72 57
73 /* check again */ 58 /* check again */
74 if (!cr4_cpuid_is_sync()) 59 GUEST_ASSERT(cr4_cpuid_is_sync());
75 exit_to_hv(GUEST_FAILED);
76 60
77 exit_to_hv(GUEST_DONE); 61 GUEST_DONE();
78} 62}
79 63
80int main(int argc, char *argv[]) 64int main(int argc, char *argv[])
@@ -95,7 +79,7 @@ int main(int argc, char *argv[])
95 setbuf(stdout, NULL); 79 setbuf(stdout, NULL);
96 80
97 /* Create VM */ 81 /* Create VM */
98 vm = vm_create_default(VCPU_ID, guest_code); 82 vm = vm_create_default(VCPU_ID, 0, guest_code);
99 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 83 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
100 run = vcpu_state(vm, VCPU_ID); 84 run = vcpu_state(vm, VCPU_ID);
101 85
@@ -104,16 +88,16 @@ int main(int argc, char *argv[])
104 88
105 if (run->exit_reason == KVM_EXIT_IO) { 89 if (run->exit_reason == KVM_EXIT_IO) {
106 switch (run->io.port) { 90 switch (run->io.port) {
107 case GUEST_UPDATE_CR4: 91 case GUEST_PORT_SYNC:
108 /* emulate hypervisor clearing CR4.OSXSAVE */ 92 /* emulate hypervisor clearing CR4.OSXSAVE */
109 vcpu_sregs_get(vm, VCPU_ID, &sregs); 93 vcpu_sregs_get(vm, VCPU_ID, &sregs);
110 sregs.cr4 &= ~X86_CR4_OSXSAVE; 94 sregs.cr4 &= ~X86_CR4_OSXSAVE;
111 vcpu_sregs_set(vm, VCPU_ID, &sregs); 95 vcpu_sregs_set(vm, VCPU_ID, &sregs);
112 break; 96 break;
113 case GUEST_FAILED: 97 case GUEST_PORT_ABORT:
114 TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); 98 TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
115 break; 99 break;
116 case GUEST_DONE: 100 case GUEST_PORT_DONE:
117 goto done; 101 goto done;
118 default: 102 default:
119 TEST_ASSERT(false, "Unknown port 0x%x.", 103 TEST_ASSERT(false, "Unknown port 0x%x.",
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
new file mode 100644
index 000000000000..0c2cdc105f96
--- /dev/null
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -0,0 +1,308 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM dirty page logging test
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <time.h>
12#include <pthread.h>
13#include <linux/bitmap.h>
14#include <linux/bitops.h>
15
16#include "test_util.h"
17#include "kvm_util.h"
18
19#define DEBUG printf
20
21#define VCPU_ID 1
22/* The memory slot index to track dirty pages */
23#define TEST_MEM_SLOT_INDEX 1
24/*
25 * GPA offset of the testing memory slot. Must be bigger than the
26 * default vm mem slot, which is DEFAULT_GUEST_PHY_PAGES.
27 */
28#define TEST_MEM_OFFSET (1ULL << 30) /* 1G */
29/* Size of the testing memory slot */
30#define TEST_MEM_PAGES (1ULL << 18) /* 1G for 4K pages */
31/* How many pages to dirty for each guest loop */
32#define TEST_PAGES_PER_LOOP 1024
33/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
34#define TEST_HOST_LOOP_N 32
35/* Interval for each host loop (ms) */
36#define TEST_HOST_LOOP_INTERVAL 10
37
38/*
39 * Guest variables. We use these variables to share data between host
40 * and guest. There are two copies of the variables, one in host memory
41 * (which is unused) and one in guest memory. When the host wants to
42 * access these variables, it needs to call addr_gva2hva() to access the
43 * guest copy.
44 */
45uint64_t guest_random_array[TEST_PAGES_PER_LOOP];
46uint64_t guest_iteration;
47uint64_t guest_page_size;
48
49/*
50 * Writes to the first byte of a random page within the testing memory
51 * region continuously.
52 */
53void guest_code(void)
54{
55 int i = 0;
56 uint64_t volatile *array = guest_random_array;
57 uint64_t volatile *guest_addr;
58
59 while (true) {
60 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
61 /*
62 * Write to the first 8 bytes of a random page
63 * on the testing memory region.
64 */
65 guest_addr = (uint64_t *)
66 (TEST_MEM_OFFSET +
67 (array[i] % TEST_MEM_PAGES) * guest_page_size);
68 *guest_addr = guest_iteration;
69 }
70 /* Tell the host that we need more random numbers */
71 GUEST_SYNC(1);
72 }
73}
74
75/*
76 * Host variables. These variables should only be used by the host
77 * rather than the guest.
78 */
79bool host_quit;
80
81/* Points to the test VM memory region on which we track dirty logs */
82void *host_test_mem;
83
84/* For statistics only */
85uint64_t host_dirty_count;
86uint64_t host_clear_count;
87uint64_t host_track_next_count;
88
89/*
90 * We use this bitmap to track some pages that should have its dirty
91 * bit set in the _next_ iteration. For example, if we detected the
92 * page value changed to current iteration but at the same time the
93 * page bit is cleared in the latest bitmap, then the system must
94 * report that write in the next get dirty log call.
95 */
96unsigned long *host_bmap_track;
97
98void generate_random_array(uint64_t *guest_array, uint64_t size)
99{
100 uint64_t i;
101
102 for (i = 0; i < size; i++) {
103 guest_array[i] = random();
104 }
105}
106
107void *vcpu_worker(void *data)
108{
109 int ret;
110 uint64_t loops, *guest_array, pages_count = 0;
111 struct kvm_vm *vm = data;
112 struct kvm_run *run;
113 struct guest_args args;
114
115 run = vcpu_state(vm, VCPU_ID);
116
117 /* Retrieve the guest random array pointer and cache it */
118 guest_array = addr_gva2hva(vm, (vm_vaddr_t)guest_random_array);
119
120 DEBUG("VCPU starts\n");
121
122 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
123
124 while (!READ_ONCE(host_quit)) {
125 /* Let the guest to dirty these random pages */
126 ret = _vcpu_run(vm, VCPU_ID);
127 guest_args_read(vm, VCPU_ID, &args);
128 if (run->exit_reason == KVM_EXIT_IO &&
129 args.port == GUEST_PORT_SYNC) {
130 pages_count += TEST_PAGES_PER_LOOP;
131 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
132 } else {
133 TEST_ASSERT(false,
134 "Invalid guest sync status: "
135 "exit_reason=%s\n",
136 exit_reason_str(run->exit_reason));
137 }
138 }
139
140 DEBUG("VCPU exits, dirtied %"PRIu64" pages\n", pages_count);
141
142 return NULL;
143}
144
145void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
146{
147 uint64_t page;
148 uint64_t volatile *value_ptr;
149
150 for (page = 0; page < TEST_MEM_PAGES; page++) {
151 value_ptr = host_test_mem + page * getpagesize();
152
153 /* If this is a special page that we were tracking... */
154 if (test_and_clear_bit(page, host_bmap_track)) {
155 host_track_next_count++;
156 TEST_ASSERT(test_bit(page, bmap),
157 "Page %"PRIu64" should have its dirty bit "
158 "set in this iteration but it is missing",
159 page);
160 }
161
162 if (test_bit(page, bmap)) {
163 host_dirty_count++;
164 /*
165 * If the bit is set, the value written onto
166 * the corresponding page should be either the
167 * previous iteration number or the current one.
168 */
169 TEST_ASSERT(*value_ptr == iteration ||
170 *value_ptr == iteration - 1,
171 "Set page %"PRIu64" value %"PRIu64
172 " incorrect (iteration=%"PRIu64")",
173 page, *value_ptr, iteration);
174 } else {
175 host_clear_count++;
176 /*
177 * If cleared, the value written can be any
178 * value smaller or equals to the iteration
179 * number. Note that the value can be exactly
180 * (iteration-1) if that write can happen
181 * like this:
182 *
183 * (1) increase loop count to "iteration-1"
184 * (2) write to page P happens (with value
185 * "iteration-1")
186 * (3) get dirty log for "iteration-1"; we'll
187 * see that page P bit is set (dirtied),
188 * and not set the bit in host_bmap_track
189 * (4) increase loop count to "iteration"
190 * (which is current iteration)
191 * (5) get dirty log for current iteration,
192 * we'll see that page P is cleared, with
193 * value "iteration-1".
194 */
195 TEST_ASSERT(*value_ptr <= iteration,
196 "Clear page %"PRIu64" value %"PRIu64
197 " incorrect (iteration=%"PRIu64")",
198 page, *value_ptr, iteration);
199 if (*value_ptr == iteration) {
200 /*
201 * This page is _just_ modified; it
202 * should report its dirtyness in the
203 * next run
204 */
205 set_bit(page, host_bmap_track);
206 }
207 }
208 }
209}
210
211void help(char *name)
212{
213 puts("");
214 printf("usage: %s [-i iterations] [-I interval] [-h]\n", name);
215 puts("");
216 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
217 TEST_HOST_LOOP_N);
218 printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
219 TEST_HOST_LOOP_INTERVAL);
220 puts("");
221 exit(0);
222}
223
224int main(int argc, char *argv[])
225{
226 pthread_t vcpu_thread;
227 struct kvm_vm *vm;
228 uint64_t volatile *psize, *iteration;
229 unsigned long *bmap, iterations = TEST_HOST_LOOP_N,
230 interval = TEST_HOST_LOOP_INTERVAL;
231 int opt;
232
233 while ((opt = getopt(argc, argv, "hi:I:")) != -1) {
234 switch (opt) {
235 case 'i':
236 iterations = strtol(optarg, NULL, 10);
237 break;
238 case 'I':
239 interval = strtol(optarg, NULL, 10);
240 break;
241 case 'h':
242 default:
243 help(argv[0]);
244 break;
245 }
246 }
247
248 TEST_ASSERT(iterations > 2, "Iteration must be bigger than zero\n");
249 TEST_ASSERT(interval > 0, "Interval must be bigger than zero");
250
251 DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
252 iterations, interval);
253
254 srandom(time(0));
255
256 bmap = bitmap_alloc(TEST_MEM_PAGES);
257 host_bmap_track = bitmap_alloc(TEST_MEM_PAGES);
258
259 vm = vm_create_default(VCPU_ID, TEST_MEM_PAGES, guest_code);
260
261 /* Add an extra memory slot for testing dirty logging */
262 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
263 TEST_MEM_OFFSET,
264 TEST_MEM_SLOT_INDEX,
265 TEST_MEM_PAGES,
266 KVM_MEM_LOG_DIRTY_PAGES);
267 /* Cache the HVA pointer of the region */
268 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET);
269
270 /* Do 1:1 mapping for the dirty track memory slot */
271 virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET,
272 TEST_MEM_PAGES * getpagesize(), 0);
273
274 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
275
276 /* Tell the guest about the page size on the system */
277 psize = addr_gva2hva(vm, (vm_vaddr_t)&guest_page_size);
278 *psize = getpagesize();
279
280 /* Start the iterations */
281 iteration = addr_gva2hva(vm, (vm_vaddr_t)&guest_iteration);
282 *iteration = 1;
283
284 /* Start dirtying pages */
285 pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
286
287 while (*iteration < iterations) {
288 /* Give the vcpu thread some time to dirty some pages */
289 usleep(interval * 1000);
290 kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
291 vm_dirty_log_verify(bmap, *iteration);
292 (*iteration)++;
293 }
294
295 /* Tell the vcpu thread to quit */
296 host_quit = true;
297 pthread_join(vcpu_thread, NULL);
298
299 DEBUG("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
300 "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
301 host_track_next_count);
302
303 free(bmap);
304 free(host_bmap_track);
305 kvm_vm_free(vm);
306
307 return 0;
308}
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index d32632f71ab8..bb5a25fb82c6 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -55,6 +55,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
55void kvm_vm_free(struct kvm_vm *vmp); 55void kvm_vm_free(struct kvm_vm *vmp);
56void kvm_vm_restart(struct kvm_vm *vmp, int perm); 56void kvm_vm_restart(struct kvm_vm *vmp, int perm);
57void kvm_vm_release(struct kvm_vm *vmp); 57void kvm_vm_release(struct kvm_vm *vmp);
58void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
58 59
59int kvm_memcmp_hva_gva(void *hva, 60int kvm_memcmp_hva_gva(void *hva,
60 struct kvm_vm *vm, const vm_vaddr_t gva, size_t len); 61 struct kvm_vm *vm, const vm_vaddr_t gva, size_t len);
@@ -80,6 +81,8 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
80void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot); 81void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot);
81vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 82vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
82 uint32_t data_memslot, uint32_t pgd_memslot); 83 uint32_t data_memslot, uint32_t pgd_memslot);
84void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
85 size_t size, uint32_t pgd_memslot);
83void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 86void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
84void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 87void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
85vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 88vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
@@ -127,7 +130,8 @@ kvm_get_supported_cpuid_entry(uint32_t function)
127 return kvm_get_supported_cpuid_index(function, 0); 130 return kvm_get_supported_cpuid_index(function, 0);
128} 131}
129 132
130struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); 133struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
134 void *guest_code);
131void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); 135void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
132 136
133typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr, 137typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr,
@@ -144,4 +148,43 @@ allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
144 148
145int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); 149int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
146 150
151#define GUEST_PORT_SYNC 0x1000
152#define GUEST_PORT_ABORT 0x1001
153#define GUEST_PORT_DONE 0x1002
154
155static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)
156{
157 __asm__ __volatile__("in %[port], %%al"
158 :
159 : [port]"d"(port), "D"(arg0), "S"(arg1)
160 : "rax");
161}
162
163/*
164 * Allows to pass three arguments to the host: port is 16bit wide,
165 * arg0 & arg1 are 64bit wide
166 */
167#define GUEST_SYNC_ARGS(_port, _arg0, _arg1) \
168 __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1))
169
170#define GUEST_ASSERT(_condition) do { \
171 if (!(_condition)) \
172 GUEST_SYNC_ARGS(GUEST_PORT_ABORT, \
173 "Failed guest assert: " \
174 #_condition, __LINE__); \
175 } while (0)
176
177#define GUEST_SYNC(stage) GUEST_SYNC_ARGS(GUEST_PORT_SYNC, "hello", stage)
178
179#define GUEST_DONE() GUEST_SYNC_ARGS(GUEST_PORT_DONE, 0, 0)
180
181struct guest_args {
182 uint64_t arg0;
183 uint64_t arg1;
184 uint16_t port;
185} __attribute__ ((packed));
186
187void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
188 struct guest_args *args);
189
147#endif /* SELFTEST_KVM_UTIL_H */ 190#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index ac53730b30aa..73c3933436ec 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -28,8 +28,6 @@ int test_seq_read(const char *path, char **bufp, size_t *sizep);
28void test_assert(bool exp, const char *exp_str, 28void test_assert(bool exp, const char *exp_str,
29 const char *file, unsigned int line, const char *fmt, ...); 29 const char *file, unsigned int line, const char *fmt, ...);
30 30
31#define ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0]))
32
33#define TEST_ASSERT(e, fmt, ...) \ 31#define TEST_ASSERT(e, fmt, ...) \
34 test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__) 32 test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
35 33
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 643309d6de74..e9ba389c48db 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -14,6 +14,7 @@
14#include <sys/mman.h> 14#include <sys/mman.h>
15#include <sys/types.h> 15#include <sys/types.h>
16#include <sys/stat.h> 16#include <sys/stat.h>
17#include <linux/kernel.h>
17 18
18#define KVM_DEV_PATH "/dev/kvm" 19#define KVM_DEV_PATH "/dev/kvm"
19 20
@@ -168,6 +169,16 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
168 } 169 }
169} 170}
170 171
172void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
173{
174 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
175 int ret;
176
177 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
178 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
179 strerror(-ret));
180}
181
171/* Userspace Memory Region Find 182/* Userspace Memory Region Find
172 * 183 *
173 * Input Args: 184 * Input Args:
@@ -923,6 +934,39 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
923 return vaddr_start; 934 return vaddr_start;
924} 935}
925 936
937/*
938 * Map a range of VM virtual address to the VM's physical address
939 *
940 * Input Args:
941 * vm - Virtual Machine
942 * vaddr - Virtuall address to map
943 * paddr - VM Physical Address
944 * size - The size of the range to map
945 * pgd_memslot - Memory region slot for new virtual translation tables
946 *
947 * Output Args: None
948 *
949 * Return: None
950 *
951 * Within the VM given by vm, creates a virtual translation for the
952 * page range starting at vaddr to the page range starting at paddr.
953 */
954void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
955 size_t size, uint32_t pgd_memslot)
956{
957 size_t page_size = vm->page_size;
958 size_t npages = size / page_size;
959
960 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
961 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
962
963 while (npages--) {
964 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
965 vaddr += page_size;
966 paddr += page_size;
967 }
968}
969
926/* Address VM Physical to Host Virtual 970/* Address VM Physical to Host Virtual
927 * 971 *
928 * Input Args: 972 * Input Args:
@@ -1536,3 +1580,17 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1536{ 1580{
1537 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 1581 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1538} 1582}
1583
1584void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
1585 struct guest_args *args)
1586{
1587 struct kvm_run *run = vcpu_state(vm, vcpu_id);
1588 struct kvm_regs regs;
1589
1590 memset(&regs, 0, sizeof(regs));
1591 vcpu_regs_get(vm, vcpu_id, &regs);
1592
1593 args->port = run->io.port;
1594 args->arg0 = regs.rdi;
1595 args->arg1 = regs.rsi;
1596}
diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86.c
index e38345252df5..a3122f1949a8 100644
--- a/tools/testing/selftests/kvm/lib/x86.c
+++ b/tools/testing/selftests/kvm/lib/x86.c
@@ -702,6 +702,9 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
702 * 702 *
703 * Input Args: 703 * Input Args:
704 * vcpuid - The id of the single VCPU to add to the VM. 704 * vcpuid - The id of the single VCPU to add to the VM.
705 * extra_mem_pages - The size of extra memories to add (this will
706 * decide how much extra space we will need to
707 * setup the page tables using mem slot 0)
705 * guest_code - The vCPU's entry point 708 * guest_code - The vCPU's entry point
706 * 709 *
707 * Output Args: None 710 * Output Args: None
@@ -709,12 +712,23 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
709 * Return: 712 * Return:
710 * Pointer to opaque structure that describes the created VM. 713 * Pointer to opaque structure that describes the created VM.
711 */ 714 */
712struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code) 715struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
716 void *guest_code)
713{ 717{
714 struct kvm_vm *vm; 718 struct kvm_vm *vm;
719 /*
720 * For x86 the maximum page table size for a memory region
721 * will be when only 4K pages are used. In that case the
722 * total extra size for page tables (for extra N pages) will
723 * be: N/512+N/512^2+N/512^3+... which is definitely smaller
724 * than N/512*2.
725 */
726 uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
715 727
716 /* Create VM */ 728 /* Create VM */
717 vm = vm_create(VM_MODE_FLAT48PG, DEFAULT_GUEST_PHY_PAGES, O_RDWR); 729 vm = vm_create(VM_MODE_FLAT48PG,
730 DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
731 O_RDWR);
718 732
719 /* Setup guest code */ 733 /* Setup guest code */
720 kvm_vm_elf_load(vm, program_invocation_name, 0, 0); 734 kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
diff --git a/tools/testing/selftests/kvm/set_sregs_test.c b/tools/testing/selftests/kvm/set_sregs_test.c
index 090fd3f19352..881419d5746e 100644
--- a/tools/testing/selftests/kvm/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/set_sregs_test.c
@@ -36,7 +36,7 @@ int main(int argc, char *argv[])
36 setbuf(stdout, NULL); 36 setbuf(stdout, NULL);
37 37
38 /* Create VM */ 38 /* Create VM */
39 vm = vm_create_default(VCPU_ID, NULL); 39 vm = vm_create_default(VCPU_ID, 0, NULL);
40 40
41 vcpu_sregs_get(vm, VCPU_ID, &sregs); 41 vcpu_sregs_get(vm, VCPU_ID, &sregs);
42 sregs.apic_base = 1 << 10; 42 sregs.apic_base = 1 << 10;
diff --git a/tools/testing/selftests/kvm/state_test.c b/tools/testing/selftests/kvm/state_test.c
index ecabf25b7077..900e3e9dfb9f 100644
--- a/tools/testing/selftests/kvm/state_test.c
+++ b/tools/testing/selftests/kvm/state_test.c
@@ -21,28 +21,6 @@
21#include "vmx.h" 21#include "vmx.h"
22 22
23#define VCPU_ID 5 23#define VCPU_ID 5
24#define PORT_SYNC 0x1000
25#define PORT_ABORT 0x1001
26#define PORT_DONE 0x1002
27
28static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)
29{
30 __asm__ __volatile__("in %[port], %%al"
31 :
32 : [port]"d"(port), "D"(arg0), "S"(arg1)
33 : "rax");
34}
35
36#define exit_to_l0(_port, _arg0, _arg1) \
37 __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1))
38
39#define GUEST_ASSERT(_condition) do { \
40 if (!(_condition)) \
41 exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition, __LINE__);\
42} while (0)
43
44#define GUEST_SYNC(stage) \
45 exit_to_l0(PORT_SYNC, "hello", stage);
46 24
47static bool have_nested_state; 25static bool have_nested_state;
48 26
@@ -137,7 +115,7 @@ void guest_code(struct vmx_pages *vmx_pages)
137 if (vmx_pages) 115 if (vmx_pages)
138 l1_guest_code(vmx_pages); 116 l1_guest_code(vmx_pages);
139 117
140 exit_to_l0(PORT_DONE, 0, 0); 118 GUEST_DONE();
141} 119}
142 120
143int main(int argc, char *argv[]) 121int main(int argc, char *argv[])
@@ -154,7 +132,7 @@ int main(int argc, char *argv[])
154 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 132 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
155 133
156 /* Create VM */ 134 /* Create VM */
157 vm = vm_create_default(VCPU_ID, guest_code); 135 vm = vm_create_default(VCPU_ID, 0, guest_code);
158 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 136 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
159 run = vcpu_state(vm, VCPU_ID); 137 run = vcpu_state(vm, VCPU_ID);
160 138
@@ -178,13 +156,13 @@ int main(int argc, char *argv[])
178 memset(&regs1, 0, sizeof(regs1)); 156 memset(&regs1, 0, sizeof(regs1));
179 vcpu_regs_get(vm, VCPU_ID, &regs1); 157 vcpu_regs_get(vm, VCPU_ID, &regs1);
180 switch (run->io.port) { 158 switch (run->io.port) {
181 case PORT_ABORT: 159 case GUEST_PORT_ABORT:
182 TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi, 160 TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi,
183 __FILE__, regs1.rsi); 161 __FILE__, regs1.rsi);
184 /* NOT REACHED */ 162 /* NOT REACHED */
185 case PORT_SYNC: 163 case GUEST_PORT_SYNC:
186 break; 164 break;
187 case PORT_DONE: 165 case GUEST_PORT_DONE:
188 goto done; 166 goto done;
189 default: 167 default:
190 TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); 168 TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/sync_regs_test.c
index eae1ece3c31b..213343e5dff9 100644
--- a/tools/testing/selftests/kvm/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/sync_regs_test.c
@@ -22,28 +22,11 @@
22#include "x86.h" 22#include "x86.h"
23 23
24#define VCPU_ID 5 24#define VCPU_ID 5
25#define PORT_HOST_SYNC 0x1000
26
27static void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)
28{
29 __asm__ __volatile__("in %[port], %%al"
30 :
31 : [port]"d"(port), "D"(arg0), "S"(arg1)
32 : "rax");
33}
34
35#define exit_to_l0(_port, _arg0, _arg1) \
36 __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1))
37
38#define GUEST_ASSERT(_condition) do { \
39 if (!(_condition)) \
40 exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition, 0);\
41} while (0)
42 25
43void guest_code(void) 26void guest_code(void)
44{ 27{
45 for (;;) { 28 for (;;) {
46 exit_to_l0(PORT_HOST_SYNC, "hello", 0); 29 GUEST_SYNC(0);
47 asm volatile ("inc %r11"); 30 asm volatile ("inc %r11");
48 } 31 }
49} 32}
@@ -111,7 +94,7 @@ int main(int argc, char *argv[])
111 } 94 }
112 95
113 /* Create VM */ 96 /* Create VM */
114 vm = vm_create_default(VCPU_ID, guest_code); 97 vm = vm_create_default(VCPU_ID, 0, guest_code);
115 98
116 run = vcpu_state(vm, VCPU_ID); 99 run = vcpu_state(vm, VCPU_ID);
117 100
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
index fc414c284368..49bcc68b0235 100644
--- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
@@ -62,27 +62,12 @@ struct kvm_single_msr {
62/* The virtual machine object. */ 62/* The virtual machine object. */
63static struct kvm_vm *vm; 63static struct kvm_vm *vm;
64 64
65#define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg))
66static void do_exit_to_l0(uint16_t port, unsigned long arg)
67{
68 __asm__ __volatile__("in %[port], %%al"
69 :
70 : [port]"d"(port), "D"(arg)
71 : "rax");
72}
73
74
75#define GUEST_ASSERT(_condition) do { \
76 if (!(_condition)) \
77 exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \
78} while (0)
79
80static void check_ia32_tsc_adjust(int64_t max) 65static void check_ia32_tsc_adjust(int64_t max)
81{ 66{
82 int64_t adjust; 67 int64_t adjust;
83 68
84 adjust = rdmsr(MSR_IA32_TSC_ADJUST); 69 adjust = rdmsr(MSR_IA32_TSC_ADJUST);
85 exit_to_l0(PORT_REPORT, adjust); 70 GUEST_SYNC(adjust);
86 GUEST_ASSERT(adjust <= max); 71 GUEST_ASSERT(adjust <= max);
87} 72}
88 73
@@ -132,7 +117,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
132 117
133 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); 118 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
134 119
135 exit_to_l0(PORT_DONE, 0); 120 GUEST_DONE();
136} 121}
137 122
138void report(int64_t val) 123void report(int64_t val)
@@ -152,7 +137,7 @@ int main(int argc, char *argv[])
152 exit(KSFT_SKIP); 137 exit(KSFT_SKIP);
153 } 138 }
154 139
155 vm = vm_create_default(VCPU_ID, (void *) l1_guest_code); 140 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
156 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 141 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
157 142
158 /* Allocate VMX pages and shared descriptors (vmx_pages). */ 143 /* Allocate VMX pages and shared descriptors (vmx_pages). */
@@ -161,26 +146,26 @@ int main(int argc, char *argv[])
161 146
162 for (;;) { 147 for (;;) {
163 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); 148 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
164 struct kvm_regs regs; 149 struct guest_args args;
165 150
166 vcpu_run(vm, VCPU_ID); 151 vcpu_run(vm, VCPU_ID);
167 vcpu_regs_get(vm, VCPU_ID, &regs); 152 guest_args_read(vm, VCPU_ID, &args);
168 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 153 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
169 "Got exit_reason other than KVM_EXIT_IO: %u (%s), rip=%lx\n", 154 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
170 run->exit_reason, 155 run->exit_reason,
171 exit_reason_str(run->exit_reason), regs.rip); 156 exit_reason_str(run->exit_reason));
172 157
173 switch (run->io.port) { 158 switch (args.port) {
174 case PORT_ABORT: 159 case GUEST_PORT_ABORT:
175 TEST_ASSERT(false, "%s", (const char *) regs.rdi); 160 TEST_ASSERT(false, "%s", (const char *) args.arg0);
176 /* NOT REACHED */ 161 /* NOT REACHED */
177 case PORT_REPORT: 162 case GUEST_PORT_SYNC:
178 report(regs.rdi); 163 report(args.arg1);
179 break; 164 break;
180 case PORT_DONE: 165 case GUEST_PORT_DONE:
181 goto done; 166 goto done;
182 default: 167 default:
183 TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); 168 TEST_ASSERT(false, "Unknown port 0x%x.", args.port);
184 } 169 }
185 } 170 }
186 171