aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2018-10-16 12:50:08 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2018-10-16 18:30:18 -0400
commit1e7ecd1b3d21a6302f3ee4a3720f682eb2467a3c (patch)
treee25ed1e05c29d87a2404b48fe8bbc62f72415777
parenta1b0c1c64dfef0cff8555bb708bfc5d7c66c6ca4 (diff)
KVM: selftests: state_test: test bare VMXON migration
Split prepare_for_vmx_operation() into prepare_for_vmx_operation() and load_vmcs() so we can inject GUEST_SYNC() in between. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h1
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c22
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c1
4 files changed, 19 insertions, 10 deletions
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 12ebd836f7ef..4bbee8560292 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -548,5 +548,6 @@ struct vmx_pages {
548struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); 548struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
549bool prepare_for_vmx_operation(struct vmx_pages *vmx); 549bool prepare_for_vmx_operation(struct vmx_pages *vmx);
550void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); 550void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
551bool load_vmcs(struct vmx_pages *vmx);
551 552
552#endif /* SELFTEST_KVM_VMX_H */ 553#endif /* SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index d7c401472247..cc356da9b3d8 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -107,6 +107,11 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
107 if (vmxon(vmx->vmxon_gpa)) 107 if (vmxon(vmx->vmxon_gpa))
108 return false; 108 return false;
109 109
110 return true;
111}
112
113bool load_vmcs(struct vmx_pages *vmx)
114{
110 /* Load a VMCS. */ 115 /* Load a VMCS. */
111 *(uint32_t *)(vmx->vmcs) = vmcs_revision(); 116 *(uint32_t *)(vmx->vmcs) = vmcs_revision();
112 if (vmclear(vmx->vmcs_gpa)) 117 if (vmclear(vmx->vmcs_gpa))
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 43df194a7c1e..03da41f0f736 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -26,20 +26,20 @@ static bool have_nested_state;
26 26
27void l2_guest_code(void) 27void l2_guest_code(void)
28{ 28{
29 GUEST_SYNC(5); 29 GUEST_SYNC(6);
30 30
31 /* Exit to L1 */ 31 /* Exit to L1 */
32 vmcall(); 32 vmcall();
33 33
34 /* L1 has now set up a shadow VMCS for us. */ 34 /* L1 has now set up a shadow VMCS for us. */
35 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); 35 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
36 GUEST_SYNC(9); 36 GUEST_SYNC(10);
37 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); 37 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
38 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee)); 38 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
39 GUEST_SYNC(10); 39 GUEST_SYNC(11);
40 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee); 40 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
41 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee)); 41 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
42 GUEST_SYNC(11); 42 GUEST_SYNC(12);
43 43
44 /* Done, exit to L1 and never come back. */ 44 /* Done, exit to L1 and never come back. */
45 vmcall(); 45 vmcall();
@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
52 52
53 GUEST_ASSERT(vmx_pages->vmcs_gpa); 53 GUEST_ASSERT(vmx_pages->vmcs_gpa);
54 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 54 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
55 GUEST_SYNC(3);
56 GUEST_ASSERT(load_vmcs(vmx_pages));
55 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 57 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
56 58
57 GUEST_SYNC(3); 59 GUEST_SYNC(4);
58 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 60 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
59 61
60 prepare_vmcs(vmx_pages, l2_guest_code, 62 prepare_vmcs(vmx_pages, l2_guest_code,
61 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 63 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
62 64
63 GUEST_SYNC(4); 65 GUEST_SYNC(5);
64 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 66 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
65 GUEST_ASSERT(!vmlaunch()); 67 GUEST_ASSERT(!vmlaunch());
66 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 68 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
72 GUEST_ASSERT(!vmresume()); 74 GUEST_ASSERT(!vmresume());
73 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 75 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
74 76
75 GUEST_SYNC(6); 77 GUEST_SYNC(7);
76 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 78 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
77 79
78 GUEST_ASSERT(!vmresume()); 80 GUEST_ASSERT(!vmresume());
@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
85 87
86 GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa)); 88 GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
87 GUEST_ASSERT(vmlaunch()); 89 GUEST_ASSERT(vmlaunch());
88 GUEST_SYNC(7); 90 GUEST_SYNC(8);
89 GUEST_ASSERT(vmlaunch()); 91 GUEST_ASSERT(vmlaunch());
90 GUEST_ASSERT(vmresume()); 92 GUEST_ASSERT(vmresume());
91 93
92 vmwrite(GUEST_RIP, 0xc0ffee); 94 vmwrite(GUEST_RIP, 0xc0ffee);
93 GUEST_SYNC(8); 95 GUEST_SYNC(9);
94 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); 96 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
95 97
96 GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa)); 98 GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
101 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); 103 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
102 GUEST_ASSERT(vmlaunch()); 104 GUEST_ASSERT(vmlaunch());
103 GUEST_ASSERT(vmresume()); 105 GUEST_ASSERT(vmresume());
104 GUEST_SYNC(12); 106 GUEST_SYNC(13);
105 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); 107 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
106 GUEST_ASSERT(vmlaunch()); 108 GUEST_ASSERT(vmlaunch());
107 GUEST_ASSERT(vmresume()); 109 GUEST_ASSERT(vmresume());
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 38a91a5f04ac..18fa64db0d7a 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
94 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); 94 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
95 95
96 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 96 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
97 GUEST_ASSERT(load_vmcs(vmx_pages));
97 98
98 /* Prepare the VMCS for L2 execution. */ 99 /* Prepare the VMCS for L2 execution. */
99 prepare_vmcs(vmx_pages, l2_guest_code, 100 prepare_vmcs(vmx_pages, l2_guest_code,