aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2019-09-26 09:01:15 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2019-09-27 07:13:40 -0400
commit094444204570a5420d9e6ce3d4558877c3487856 (patch)
tree0c799260bcfb97716794f3d62c5605e0d97a9486 /tools
parent1f4e5fc83a4217fc61b23370b07573827329d7bd (diff)
selftests: kvm: add test for dirty logging inside nested guests
Check that accesses by nested guests are logged according to the L1 physical addresses rather than L2. Most of the patch is really adding EPT support to the testing framework. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h3
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h14
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h3
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c201
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c156
7 files changed, 377 insertions, 3 deletions
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 62c591f87dab..fd84b7a78dcf 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -22,6 +22,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
22TEST_GEN_PROGS_x86_64 += x86_64/state_test 22TEST_GEN_PROGS_x86_64 += x86_64/state_test
23TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test 23TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
24TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test 24TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
25TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
25TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test 26TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
26TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test 27TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
27TEST_GEN_PROGS_x86_64 += clear_dirty_log_test 28TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 0c17f2ee685e..ff234018219c 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1083,6 +1083,9 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
1083#define VMX_BASIC_MEM_TYPE_WB 6LLU 1083#define VMX_BASIC_MEM_TYPE_WB 6LLU
1084#define VMX_BASIC_INOUT 0x0040000000000000LLU 1084#define VMX_BASIC_INOUT 0x0040000000000000LLU
1085 1085
1086/* VMX_EPT_VPID_CAP bits */
1087#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
1088
1086/* MSR_IA32_VMX_MISC bits */ 1089/* MSR_IA32_VMX_MISC bits */
1087#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) 1090#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
1088#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F 1091#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 69b17055f63d..6ae5a47fe067 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -569,6 +569,10 @@ struct vmx_pages {
569 void *enlightened_vmcs_hva; 569 void *enlightened_vmcs_hva;
570 uint64_t enlightened_vmcs_gpa; 570 uint64_t enlightened_vmcs_gpa;
571 void *enlightened_vmcs; 571 void *enlightened_vmcs;
572
573 void *eptp_hva;
574 uint64_t eptp_gpa;
575 void *eptp;
572}; 576};
573 577
574struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); 578struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
@@ -576,4 +580,14 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
576void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); 580void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
577bool load_vmcs(struct vmx_pages *vmx); 581bool load_vmcs(struct vmx_pages *vmx);
578 582
583void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
584 uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
585void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
586 uint64_t nested_paddr, uint64_t paddr, uint64_t size,
587 uint32_t eptp_memslot);
588void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
589 uint32_t memslot, uint32_t eptp_memslot);
590void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
591 uint32_t eptp_memslot);
592
579#endif /* SELFTEST_KVM_VMX_H */ 593#endif /* SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 80a338b5403c..41cf45416060 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -705,7 +705,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
705 * on error (e.g. currently no memory region using memslot as a KVM 705 * on error (e.g. currently no memory region using memslot as a KVM
706 * memory slot ID). 706 * memory slot ID).
707 */ 707 */
708static struct userspace_mem_region * 708struct userspace_mem_region *
709memslot2region(struct kvm_vm *vm, uint32_t memslot) 709memslot2region(struct kvm_vm *vm, uint32_t memslot)
710{ 710{
711 struct userspace_mem_region *region; 711 struct userspace_mem_region *region;
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index f36262e0f655..ac50c42750cf 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -68,4 +68,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
68void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent); 68void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
69void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent); 69void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
70 70
71struct userspace_mem_region *
72memslot2region(struct kvm_vm *vm, uint32_t memslot);
73
71#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */ 74#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 9cef0455b819..fab8f6b0bf52 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -7,11 +7,39 @@
7 7
8#include "test_util.h" 8#include "test_util.h"
9#include "kvm_util.h" 9#include "kvm_util.h"
10#include "../kvm_util_internal.h"
10#include "processor.h" 11#include "processor.h"
11#include "vmx.h" 12#include "vmx.h"
12 13
14#define PAGE_SHIFT_4K 12
15
16#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
17
13bool enable_evmcs; 18bool enable_evmcs;
14 19
20struct eptPageTableEntry {
21 uint64_t readable:1;
22 uint64_t writable:1;
23 uint64_t executable:1;
24 uint64_t memory_type:3;
25 uint64_t ignore_pat:1;
26 uint64_t page_size:1;
27 uint64_t accessed:1;
28 uint64_t dirty:1;
29 uint64_t ignored_11_10:2;
30 uint64_t address:40;
31 uint64_t ignored_62_52:11;
32 uint64_t suppress_ve:1;
33};
34
35struct eptPageTablePointer {
36 uint64_t memory_type:3;
37 uint64_t page_walk_length:3;
38 uint64_t ad_enabled:1;
39 uint64_t reserved_11_07:5;
40 uint64_t address:40;
41 uint64_t reserved_63_52:12;
42};
15int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) 43int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
16{ 44{
17 uint16_t evmcs_ver; 45 uint16_t evmcs_ver;
@@ -174,15 +202,35 @@ bool load_vmcs(struct vmx_pages *vmx)
174 */ 202 */
175static inline void init_vmcs_control_fields(struct vmx_pages *vmx) 203static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
176{ 204{
205 uint32_t sec_exec_ctl = 0;
206
177 vmwrite(VIRTUAL_PROCESSOR_ID, 0); 207 vmwrite(VIRTUAL_PROCESSOR_ID, 0);
178 vmwrite(POSTED_INTR_NV, 0); 208 vmwrite(POSTED_INTR_NV, 0);
179 209
180 vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); 210 vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
181 if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, 0)) 211
212 if (vmx->eptp_gpa) {
213 uint64_t ept_paddr;
214 struct eptPageTablePointer eptp = {
215 .memory_type = VMX_BASIC_MEM_TYPE_WB,
216 .page_walk_length = 3, /* + 1 */
217 .ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS),
218 .address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
219 };
220
221 memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
222 vmwrite(EPT_POINTER, ept_paddr);
223 sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
224 }
225
226 if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
182 vmwrite(CPU_BASED_VM_EXEC_CONTROL, 227 vmwrite(CPU_BASED_VM_EXEC_CONTROL,
183 rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); 228 rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
184 else 229 else {
185 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS)); 230 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
231 GUEST_ASSERT(!sec_exec_ctl);
232 }
233
186 vmwrite(EXCEPTION_BITMAP, 0); 234 vmwrite(EXCEPTION_BITMAP, 0);
187 vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); 235 vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
188 vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ 236 vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
@@ -327,3 +375,152 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
327 init_vmcs_host_state(); 375 init_vmcs_host_state();
328 init_vmcs_guest_state(guest_rip, guest_rsp); 376 init_vmcs_guest_state(guest_rip, guest_rsp);
329} 377}
378
379void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
380 uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
381{
382 uint16_t index[4];
383 struct eptPageTableEntry *pml4e;
384
385 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
386 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
387
388 TEST_ASSERT((nested_paddr % vm->page_size) == 0,
389 "Nested physical address not on page boundary,\n"
390 " nested_paddr: 0x%lx vm->page_size: 0x%x",
391 nested_paddr, vm->page_size);
392 TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
393 "Physical address beyond beyond maximum supported,\n"
394 " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
395 paddr, vm->max_gfn, vm->page_size);
396 TEST_ASSERT((paddr % vm->page_size) == 0,
397 "Physical address not on page boundary,\n"
398 " paddr: 0x%lx vm->page_size: 0x%x",
399 paddr, vm->page_size);
400 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
401 "Physical address beyond beyond maximum supported,\n"
402 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
403 paddr, vm->max_gfn, vm->page_size);
404
405 index[0] = (nested_paddr >> 12) & 0x1ffu;
406 index[1] = (nested_paddr >> 21) & 0x1ffu;
407 index[2] = (nested_paddr >> 30) & 0x1ffu;
408 index[3] = (nested_paddr >> 39) & 0x1ffu;
409
410 /* Allocate page directory pointer table if not present. */
411 pml4e = vmx->eptp_hva;
412 if (!pml4e[index[3]].readable) {
413 pml4e[index[3]].address = vm_phy_page_alloc(vm,
414 KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
415 >> vm->page_shift;
416 pml4e[index[3]].writable = true;
417 pml4e[index[3]].readable = true;
418 pml4e[index[3]].executable = true;
419 }
420
421 /* Allocate page directory table if not present. */
422 struct eptPageTableEntry *pdpe;
423 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
424 if (!pdpe[index[2]].readable) {
425 pdpe[index[2]].address = vm_phy_page_alloc(vm,
426 KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
427 >> vm->page_shift;
428 pdpe[index[2]].writable = true;
429 pdpe[index[2]].readable = true;
430 pdpe[index[2]].executable = true;
431 }
432
433 /* Allocate page table if not present. */
434 struct eptPageTableEntry *pde;
435 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
436 if (!pde[index[1]].readable) {
437 pde[index[1]].address = vm_phy_page_alloc(vm,
438 KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
439 >> vm->page_shift;
440 pde[index[1]].writable = true;
441 pde[index[1]].readable = true;
442 pde[index[1]].executable = true;
443 }
444
445 /* Fill in page table entry. */
446 struct eptPageTableEntry *pte;
447 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
448 pte[index[0]].address = paddr >> vm->page_shift;
449 pte[index[0]].writable = true;
450 pte[index[0]].readable = true;
451 pte[index[0]].executable = true;
452
453 /*
454 * For now mark these as accessed and dirty because the only
455 * testcase we have needs that. Can be reconsidered later.
456 */
457 pte[index[0]].accessed = true;
458 pte[index[0]].dirty = true;
459}
460
461/*
462 * Map a range of EPT guest physical addresses to the VM's physical address
463 *
464 * Input Args:
465 * vm - Virtual Machine
466 * nested_paddr - Nested guest physical address to map
467 * paddr - VM Physical Address
468 * size - The size of the range to map
469 * eptp_memslot - Memory region slot for new virtual translation tables
470 *
471 * Output Args: None
472 *
473 * Return: None
474 *
475 * Within the VM given by vm, creates a nested guest translation for the
476 * page range starting at nested_paddr to the page range starting at paddr.
477 */
478void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
479 uint64_t nested_paddr, uint64_t paddr, uint64_t size,
480 uint32_t eptp_memslot)
481{
482 size_t page_size = vm->page_size;
483 size_t npages = size / page_size;
484
485 TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
486 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
487
488 while (npages--) {
489 nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot);
490 nested_paddr += page_size;
491 paddr += page_size;
492 }
493}
494
495/* Prepare an identity extended page table that maps all the
496 * physical pages in VM.
497 */
498void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
499 uint32_t memslot, uint32_t eptp_memslot)
500{
501 sparsebit_idx_t i, last;
502 struct userspace_mem_region *region =
503 memslot2region(vm, memslot);
504
505 i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
506 last = i + (region->region.memory_size >> vm->page_shift);
507 for (;;) {
508 i = sparsebit_next_clear(region->unused_phy_pages, i);
509 if (i > last)
510 break;
511
512 nested_map(vmx, vm,
513 (uint64_t)i << vm->page_shift,
514 (uint64_t)i << vm->page_shift,
515 1 << vm->page_shift,
516 eptp_memslot);
517 }
518}
519
520void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
521 uint32_t eptp_memslot)
522{
523 vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
524 vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
525 vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
526}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
new file mode 100644
index 000000000000..0bca1cfe2c1e
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -0,0 +1,156 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM dirty page logging test
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8#define _GNU_SOURCE /* for program_invocation_name */
9
10#include <stdio.h>
11#include <stdlib.h>
12#include <linux/bitmap.h>
13#include <linux/bitops.h>
14
15#include "test_util.h"
16#include "kvm_util.h"
17#include "processor.h"
18#include "vmx.h"
19
20#define VCPU_ID 1
21
22/* The memory slot index to track dirty pages */
23#define TEST_MEM_SLOT_INDEX 1
24#define TEST_MEM_SIZE 3
25
26/* L1 guest test virtual memory offset */
27#define GUEST_TEST_MEM 0xc0000000
28
29/* L2 guest test virtual memory offset */
30#define NESTED_TEST_MEM1 0xc0001000
31#define NESTED_TEST_MEM2 0xc0002000
32
33static void l2_guest_code(void)
34{
35 *(volatile uint64_t *)NESTED_TEST_MEM1;
36 *(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
37 GUEST_SYNC(true);
38 GUEST_SYNC(false);
39
40 *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
41 GUEST_SYNC(true);
42 *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
43 GUEST_SYNC(true);
44 GUEST_SYNC(false);
45
46 /* Exit to L1 and never come back. */
47 vmcall();
48}
49
50void l1_guest_code(struct vmx_pages *vmx)
51{
52#define L2_GUEST_STACK_SIZE 64
53 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
54
55 GUEST_ASSERT(vmx->vmcs_gpa);
56 GUEST_ASSERT(prepare_for_vmx_operation(vmx));
57 GUEST_ASSERT(load_vmcs(vmx));
58
59 prepare_vmcs(vmx, l2_guest_code,
60 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
61
62 GUEST_SYNC(false);
63 GUEST_ASSERT(!vmlaunch());
64 GUEST_SYNC(false);
65 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
66 GUEST_DONE();
67}
68
69int main(int argc, char *argv[])
70{
71 vm_vaddr_t vmx_pages_gva = 0;
72 struct vmx_pages *vmx;
73 unsigned long *bmap;
74 uint64_t *host_test_mem;
75
76 struct kvm_vm *vm;
77 struct kvm_run *run;
78 struct ucall uc;
79 bool done = false;
80
81 /* Create VM */
82 vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
83 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
84 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
85 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
86 run = vcpu_state(vm, VCPU_ID);
87
88 /* Add an extra memory slot for testing dirty logging */
89 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
90 GUEST_TEST_MEM,
91 TEST_MEM_SLOT_INDEX,
92 TEST_MEM_SIZE,
93 KVM_MEM_LOG_DIRTY_PAGES);
94
95 /*
96 * Add an identity map for GVA range [0xc0000000, 0xc0002000). This
97 * affects both L1 and L2. However...
98 */
99 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM,
100 TEST_MEM_SIZE * 4096, 0);
101
102 /*
103 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
104 * 0xc0000000.
105 *
106 * Note that prepare_eptp should be called only L1's GPA map is done,
107 * meaning after the last call to virt_map.
108 */
109 prepare_eptp(vmx, vm, 0);
110 nested_map_memslot(vmx, vm, 0, 0);
111 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
112 nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
113
114 bmap = bitmap_alloc(TEST_MEM_SIZE);
115 host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
116
117 while (!done) {
118 memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096);
119 _vcpu_run(vm, VCPU_ID);
120 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
121 "Unexpected exit reason: %u (%s),\n",
122 run->exit_reason,
123 exit_reason_str(run->exit_reason));
124
125 switch (get_ucall(vm, VCPU_ID, &uc)) {
126 case UCALL_ABORT:
127 TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
128 __FILE__, uc.args[1]);
129 /* NOT REACHED */
130 case UCALL_SYNC:
131 /*
132 * The nested guest wrote at offset 0x1000 in the memslot, but the
133 * dirty bitmap must be filled in according to L1 GPA, not L2.
134 */
135 kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
136 if (uc.args[1]) {
137 TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
138 TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
139 } else {
140 TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
141 TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
142 }
143
144 TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
145 TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
146 TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
147 TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
148 break;
149 case UCALL_DONE:
150 done = true;
151 break;
152 default:
153 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
154 }
155 }
156}