aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/kvm/lib/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/lib/x86.c')
-rw-r--r--tools/testing/selftests/kvm/lib/x86.c697
1 files changed, 697 insertions, 0 deletions
diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86.c
new file mode 100644
index 000000000000..12df46280b23
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86.c
@@ -0,0 +1,697 @@
1/*
2 * tools/testing/selftests/kvm/lib/x86.c
3 *
4 * Copyright (C) 2018, Google LLC.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 */
8
9#define _GNU_SOURCE /* for program_invocation_name */
10
11#include "test_util.h"
12#include "kvm_util.h"
13#include "kvm_util_internal.h"
14#include "x86.h"
15
16/* Minimum physical address used for virtual translation tables. */
17#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
18
19/* Virtual translation table structure declarations */
20struct pageMapL4Entry {
21 uint64_t present:1;
22 uint64_t writable:1;
23 uint64_t user:1;
24 uint64_t write_through:1;
25 uint64_t cache_disable:1;
26 uint64_t accessed:1;
27 uint64_t ignored_06:1;
28 uint64_t page_size:1;
29 uint64_t ignored_11_08:4;
30 uint64_t address:40;
31 uint64_t ignored_62_52:11;
32 uint64_t execute_disable:1;
33};
34
35struct pageDirectoryPointerEntry {
36 uint64_t present:1;
37 uint64_t writable:1;
38 uint64_t user:1;
39 uint64_t write_through:1;
40 uint64_t cache_disable:1;
41 uint64_t accessed:1;
42 uint64_t ignored_06:1;
43 uint64_t page_size:1;
44 uint64_t ignored_11_08:4;
45 uint64_t address:40;
46 uint64_t ignored_62_52:11;
47 uint64_t execute_disable:1;
48};
49
50struct pageDirectoryEntry {
51 uint64_t present:1;
52 uint64_t writable:1;
53 uint64_t user:1;
54 uint64_t write_through:1;
55 uint64_t cache_disable:1;
56 uint64_t accessed:1;
57 uint64_t ignored_06:1;
58 uint64_t page_size:1;
59 uint64_t ignored_11_08:4;
60 uint64_t address:40;
61 uint64_t ignored_62_52:11;
62 uint64_t execute_disable:1;
63};
64
65struct pageTableEntry {
66 uint64_t present:1;
67 uint64_t writable:1;
68 uint64_t user:1;
69 uint64_t write_through:1;
70 uint64_t cache_disable:1;
71 uint64_t accessed:1;
72 uint64_t dirty:1;
73 uint64_t reserved_07:1;
74 uint64_t global:1;
75 uint64_t ignored_11_09:3;
76 uint64_t address:40;
77 uint64_t ignored_62_52:11;
78 uint64_t execute_disable:1;
79};
80
81/* Register Dump
82 *
83 * Input Args:
84 * indent - Left margin indent amount
85 * regs - register
86 *
87 * Output Args:
88 * stream - Output FILE stream
89 *
90 * Return: None
91 *
92 * Dumps the state of the registers given by regs, to the FILE stream
93 * given by steam.
94 */
95void regs_dump(FILE *stream, struct kvm_regs *regs,
96 uint8_t indent)
97{
98 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
99 "rcx: 0x%.16llx rdx: 0x%.16llx\n",
100 indent, "",
101 regs->rax, regs->rbx, regs->rcx, regs->rdx);
102 fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
103 "rsp: 0x%.16llx rbp: 0x%.16llx\n",
104 indent, "",
105 regs->rsi, regs->rdi, regs->rsp, regs->rbp);
106 fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
107 "r10: 0x%.16llx r11: 0x%.16llx\n",
108 indent, "",
109 regs->r8, regs->r9, regs->r10, regs->r11);
110 fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
111 "r14: 0x%.16llx r15: 0x%.16llx\n",
112 indent, "",
113 regs->r12, regs->r13, regs->r14, regs->r15);
114 fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
115 indent, "",
116 regs->rip, regs->rflags);
117}
118
119/* Segment Dump
120 *
121 * Input Args:
122 * indent - Left margin indent amount
123 * segment - KVM segment
124 *
125 * Output Args:
126 * stream - Output FILE stream
127 *
128 * Return: None
129 *
130 * Dumps the state of the KVM segment given by segment, to the FILE stream
131 * given by steam.
132 */
133static void segment_dump(FILE *stream, struct kvm_segment *segment,
134 uint8_t indent)
135{
136 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
137 "selector: 0x%.4x type: 0x%.2x\n",
138 indent, "", segment->base, segment->limit,
139 segment->selector, segment->type);
140 fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
141 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
142 indent, "", segment->present, segment->dpl,
143 segment->db, segment->s, segment->l);
144 fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
145 "unusable: 0x%.2x padding: 0x%.2x\n",
146 indent, "", segment->g, segment->avl,
147 segment->unusable, segment->padding);
148}
149
150/* dtable Dump
151 *
152 * Input Args:
153 * indent - Left margin indent amount
154 * dtable - KVM dtable
155 *
156 * Output Args:
157 * stream - Output FILE stream
158 *
159 * Return: None
160 *
161 * Dumps the state of the KVM dtable given by dtable, to the FILE stream
162 * given by steam.
163 */
164static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
165 uint8_t indent)
166{
167 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
168 "padding: 0x%.4x 0x%.4x 0x%.4x\n",
169 indent, "", dtable->base, dtable->limit,
170 dtable->padding[0], dtable->padding[1], dtable->padding[2]);
171}
172
173/* System Register Dump
174 *
175 * Input Args:
176 * indent - Left margin indent amount
177 * sregs - System registers
178 *
179 * Output Args:
180 * stream - Output FILE stream
181 *
182 * Return: None
183 *
184 * Dumps the state of the system registers given by sregs, to the FILE stream
185 * given by steam.
186 */
187void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
188 uint8_t indent)
189{
190 unsigned int i;
191
192 fprintf(stream, "%*scs:\n", indent, "");
193 segment_dump(stream, &sregs->cs, indent + 2);
194 fprintf(stream, "%*sds:\n", indent, "");
195 segment_dump(stream, &sregs->ds, indent + 2);
196 fprintf(stream, "%*ses:\n", indent, "");
197 segment_dump(stream, &sregs->es, indent + 2);
198 fprintf(stream, "%*sfs:\n", indent, "");
199 segment_dump(stream, &sregs->fs, indent + 2);
200 fprintf(stream, "%*sgs:\n", indent, "");
201 segment_dump(stream, &sregs->gs, indent + 2);
202 fprintf(stream, "%*sss:\n", indent, "");
203 segment_dump(stream, &sregs->ss, indent + 2);
204 fprintf(stream, "%*str:\n", indent, "");
205 segment_dump(stream, &sregs->tr, indent + 2);
206 fprintf(stream, "%*sldt:\n", indent, "");
207 segment_dump(stream, &sregs->ldt, indent + 2);
208
209 fprintf(stream, "%*sgdt:\n", indent, "");
210 dtable_dump(stream, &sregs->gdt, indent + 2);
211 fprintf(stream, "%*sidt:\n", indent, "");
212 dtable_dump(stream, &sregs->idt, indent + 2);
213
214 fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
215 "cr3: 0x%.16llx cr4: 0x%.16llx\n",
216 indent, "",
217 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
218 fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
219 "apic_base: 0x%.16llx\n",
220 indent, "",
221 sregs->cr8, sregs->efer, sregs->apic_base);
222
223 fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
224 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
225 fprintf(stream, "%*s%.16llx\n", indent + 2, "",
226 sregs->interrupt_bitmap[i]);
227 }
228}
229
230void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
231{
232 int rc;
233
234 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
235 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
236
237 /* If needed, create page map l4 table. */
238 if (!vm->pgd_created) {
239 vm_paddr_t paddr = vm_phy_page_alloc(vm,
240 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
241 vm->pgd = paddr;
242
243 /* Set pointer to pgd tables in all the VCPUs that
244 * have already been created. Future VCPUs will have
245 * the value set as each one is created.
246 */
247 for (struct vcpu *vcpu = vm->vcpu_head; vcpu;
248 vcpu = vcpu->next) {
249 struct kvm_sregs sregs;
250
251 /* Obtain the current system register settings */
252 vcpu_sregs_get(vm, vcpu->id, &sregs);
253
254 /* Set and store the pointer to the start of the
255 * pgd tables.
256 */
257 sregs.cr3 = vm->pgd;
258 vcpu_sregs_set(vm, vcpu->id, &sregs);
259 }
260
261 vm->pgd_created = true;
262 }
263}
264
265/* VM Virtual Page Map
266 *
267 * Input Args:
268 * vm - Virtual Machine
269 * vaddr - VM Virtual Address
270 * paddr - VM Physical Address
271 * pgd_memslot - Memory region slot for new virtual translation tables
272 *
273 * Output Args: None
274 *
275 * Return: None
276 *
277 * Within the VM given by vm, creates a virtual translation for the page
278 * starting at vaddr to the page starting at paddr.
279 */
280void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
281 uint32_t pgd_memslot)
282{
283 uint16_t index[4];
284 struct pageMapL4Entry *pml4e;
285
286 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
287 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
288
289 TEST_ASSERT((vaddr % vm->page_size) == 0,
290 "Virtual address not on page boundary,\n"
291 " vaddr: 0x%lx vm->page_size: 0x%x",
292 vaddr, vm->page_size);
293 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
294 (vaddr >> vm->page_shift)),
295 "Invalid virtual address, vaddr: 0x%lx",
296 vaddr);
297 TEST_ASSERT((paddr % vm->page_size) == 0,
298 "Physical address not on page boundary,\n"
299 " paddr: 0x%lx vm->page_size: 0x%x",
300 paddr, vm->page_size);
301 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
302 "Physical address beyond beyond maximum supported,\n"
303 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
304 paddr, vm->max_gfn, vm->page_size);
305
306 index[0] = (vaddr >> 12) & 0x1ffu;
307 index[1] = (vaddr >> 21) & 0x1ffu;
308 index[2] = (vaddr >> 30) & 0x1ffu;
309 index[3] = (vaddr >> 39) & 0x1ffu;
310
311 /* Allocate page directory pointer table if not present. */
312 pml4e = addr_gpa2hva(vm, vm->pgd);
313 if (!pml4e[index[3]].present) {
314 pml4e[index[3]].address = vm_phy_page_alloc(vm,
315 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
316 >> vm->page_shift;
317 pml4e[index[3]].writable = true;
318 pml4e[index[3]].present = true;
319 }
320
321 /* Allocate page directory table if not present. */
322 struct pageDirectoryPointerEntry *pdpe;
323 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
324 if (!pdpe[index[2]].present) {
325 pdpe[index[2]].address = vm_phy_page_alloc(vm,
326 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
327 >> vm->page_shift;
328 pdpe[index[2]].writable = true;
329 pdpe[index[2]].present = true;
330 }
331
332 /* Allocate page table if not present. */
333 struct pageDirectoryEntry *pde;
334 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
335 if (!pde[index[1]].present) {
336 pde[index[1]].address = vm_phy_page_alloc(vm,
337 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
338 >> vm->page_shift;
339 pde[index[1]].writable = true;
340 pde[index[1]].present = true;
341 }
342
343 /* Fill in page table entry. */
344 struct pageTableEntry *pte;
345 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
346 pte[index[0]].address = paddr >> vm->page_shift;
347 pte[index[0]].writable = true;
348 pte[index[0]].present = 1;
349}
350
351/* Virtual Translation Tables Dump
352 *
353 * Input Args:
354 * vm - Virtual Machine
355 * indent - Left margin indent amount
356 *
357 * Output Args:
358 * stream - Output FILE stream
359 *
360 * Return: None
361 *
362 * Dumps to the FILE stream given by stream, the contents of all the
363 * virtual translation tables for the VM given by vm.
364 */
365void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
366{
367 struct pageMapL4Entry *pml4e, *pml4e_start;
368 struct pageDirectoryPointerEntry *pdpe, *pdpe_start;
369 struct pageDirectoryEntry *pde, *pde_start;
370 struct pageTableEntry *pte, *pte_start;
371
372 if (!vm->pgd_created)
373 return;
374
375 fprintf(stream, "%*s "
376 " no\n", indent, "");
377 fprintf(stream, "%*s index hvaddr gpaddr "
378 "addr w exec dirty\n",
379 indent, "");
380 pml4e_start = (struct pageMapL4Entry *) addr_gpa2hva(vm,
381 vm->pgd);
382 for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
383 pml4e = &pml4e_start[n1];
384 if (!pml4e->present)
385 continue;
386 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
387 " %u\n",
388 indent, "",
389 pml4e - pml4e_start, pml4e,
390 addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->address,
391 pml4e->writable, pml4e->execute_disable);
392
393 pdpe_start = addr_gpa2hva(vm, pml4e->address
394 * vm->page_size);
395 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
396 pdpe = &pdpe_start[n2];
397 if (!pdpe->present)
398 continue;
399 fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
400 "%u %u\n",
401 indent, "",
402 pdpe - pdpe_start, pdpe,
403 addr_hva2gpa(vm, pdpe),
404 (uint64_t) pdpe->address, pdpe->writable,
405 pdpe->execute_disable);
406
407 pde_start = addr_gpa2hva(vm,
408 pdpe->address * vm->page_size);
409 for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
410 pde = &pde_start[n3];
411 if (!pde->present)
412 continue;
413 fprintf(stream, "%*spde 0x%-3zx %p "
414 "0x%-12lx 0x%-10lx %u %u\n",
415 indent, "", pde - pde_start, pde,
416 addr_hva2gpa(vm, pde),
417 (uint64_t) pde->address, pde->writable,
418 pde->execute_disable);
419
420 pte_start = addr_gpa2hva(vm,
421 pde->address * vm->page_size);
422 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
423 pte = &pte_start[n4];
424 if (!pte->present)
425 continue;
426 fprintf(stream, "%*spte 0x%-3zx %p "
427 "0x%-12lx 0x%-10lx %u %u "
428 " %u 0x%-10lx\n",
429 indent, "",
430 pte - pte_start, pte,
431 addr_hva2gpa(vm, pte),
432 (uint64_t) pte->address,
433 pte->writable,
434 pte->execute_disable,
435 pte->dirty,
436 ((uint64_t) n1 << 27)
437 | ((uint64_t) n2 << 18)
438 | ((uint64_t) n3 << 9)
439 | ((uint64_t) n4));
440 }
441 }
442 }
443 }
444}
445
446/* Set Unusable Segment
447 *
448 * Input Args: None
449 *
450 * Output Args:
451 * segp - Pointer to segment register
452 *
453 * Return: None
454 *
455 * Sets the segment register pointed to by segp to an unusable state.
456 */
457static void kvm_seg_set_unusable(struct kvm_segment *segp)
458{
459 memset(segp, 0, sizeof(*segp));
460 segp->unusable = true;
461}
462
463/* Set Long Mode Flat Kernel Code Segment
464 *
465 * Input Args:
466 * selector - selector value
467 *
468 * Output Args:
469 * segp - Pointer to KVM segment
470 *
471 * Return: None
472 *
473 * Sets up the KVM segment pointed to by segp, to be a code segment
474 * with the selector value given by selector.
475 */
476static void kvm_seg_set_kernel_code_64bit(uint16_t selector,
477 struct kvm_segment *segp)
478{
479 memset(segp, 0, sizeof(*segp));
480 segp->selector = selector;
481 segp->limit = 0xFFFFFFFFu;
482 segp->s = 0x1; /* kTypeCodeData */
483 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
484 * | kFlagCodeReadable
485 */
486 segp->g = true;
487 segp->l = true;
488 segp->present = 1;
489}
490
491/* Set Long Mode Flat Kernel Data Segment
492 *
493 * Input Args:
494 * selector - selector value
495 *
496 * Output Args:
497 * segp - Pointer to KVM segment
498 *
499 * Return: None
500 *
501 * Sets up the KVM segment pointed to by segp, to be a data segment
502 * with the selector value given by selector.
503 */
504static void kvm_seg_set_kernel_data_64bit(uint16_t selector,
505 struct kvm_segment *segp)
506{
507 memset(segp, 0, sizeof(*segp));
508 segp->selector = selector;
509 segp->limit = 0xFFFFFFFFu;
510 segp->s = 0x1; /* kTypeCodeData */
511 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
512 * | kFlagDataWritable
513 */
514 segp->g = true;
515 segp->present = true;
516}
517
518/* Address Guest Virtual to Guest Physical
519 *
520 * Input Args:
521 * vm - Virtual Machine
522 * gpa - VM virtual address
523 *
524 * Output Args: None
525 *
526 * Return:
527 * Equivalent VM physical address
528 *
529 * Translates the VM virtual address given by gva to a VM physical
530 * address and then locates the memory region containing the VM
531 * physical address, within the VM given by vm. When found, the host
532 * virtual address providing the memory to the vm physical address is returned.
533 * A TEST_ASSERT failure occurs if no region containing translated
534 * VM virtual address exists.
535 */
536vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
537{
538 uint16_t index[4];
539 struct pageMapL4Entry *pml4e;
540 struct pageDirectoryPointerEntry *pdpe;
541 struct pageDirectoryEntry *pde;
542 struct pageTableEntry *pte;
543 void *hva;
544
545 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
546 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
547
548 index[0] = (gva >> 12) & 0x1ffu;
549 index[1] = (gva >> 21) & 0x1ffu;
550 index[2] = (gva >> 30) & 0x1ffu;
551 index[3] = (gva >> 39) & 0x1ffu;
552
553 if (!vm->pgd_created)
554 goto unmapped_gva;
555 pml4e = addr_gpa2hva(vm, vm->pgd);
556 if (!pml4e[index[3]].present)
557 goto unmapped_gva;
558
559 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
560 if (!pdpe[index[2]].present)
561 goto unmapped_gva;
562
563 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
564 if (!pde[index[1]].present)
565 goto unmapped_gva;
566
567 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
568 if (!pte[index[0]].present)
569 goto unmapped_gva;
570
571 return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu);
572
573unmapped_gva:
574 TEST_ASSERT(false, "No mapping for vm virtual address, "
575 "gva: 0x%lx", gva);
576}
577
578void vcpu_setup(struct kvm_vm *vm, int vcpuid)
579{
580 struct kvm_sregs sregs;
581
582 /* Set mode specific system register values. */
583 vcpu_sregs_get(vm, vcpuid, &sregs);
584
585 switch (vm->mode) {
586 case VM_MODE_FLAT48PG:
587 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
588 sregs.cr4 |= X86_CR4_PAE;
589 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
590
591 kvm_seg_set_unusable(&sregs.ldt);
592 kvm_seg_set_kernel_code_64bit(0x8, &sregs.cs);
593 kvm_seg_set_kernel_data_64bit(0x10, &sregs.ds);
594 kvm_seg_set_kernel_data_64bit(0x10, &sregs.es);
595 break;
596
597 default:
598 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
599 }
600 vcpu_sregs_set(vm, vcpuid, &sregs);
601
602 /* If virtual translation table have been setup, set system register
603 * to point to the tables. It's okay if they haven't been setup yet,
604 * in that the code that sets up the virtual translation tables, will
605 * go back through any VCPUs that have already been created and set
606 * their values.
607 */
608 if (vm->pgd_created) {
609 struct kvm_sregs sregs;
610
611 vcpu_sregs_get(vm, vcpuid, &sregs);
612
613 sregs.cr3 = vm->pgd;
614 vcpu_sregs_set(vm, vcpuid, &sregs);
615 }
616}
617/* Adds a vCPU with reasonable defaults (i.e., a stack)
618 *
619 * Input Args:
620 * vcpuid - The id of the VCPU to add to the VM.
621 * guest_code - The vCPU's entry point
622 */
623void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
624{
625 struct kvm_mp_state mp_state;
626 struct kvm_regs regs;
627 vm_vaddr_t stack_vaddr;
628 stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
629 DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
630
631 /* Create VCPU */
632 vm_vcpu_add(vm, vcpuid);
633
634 /* Setup guest general purpose registers */
635 vcpu_regs_get(vm, vcpuid, &regs);
636 regs.rflags = regs.rflags | 0x2;
637 regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
638 regs.rip = (unsigned long) guest_code;
639 vcpu_regs_set(vm, vcpuid, &regs);
640
641 /* Setup the MP state */
642 mp_state.mp_state = 0;
643 vcpu_set_mp_state(vm, vcpuid, &mp_state);
644}
645
646/* VM VCPU CPUID Set
647 *
648 * Input Args:
649 * vm - Virtual Machine
650 * vcpuid - VCPU id
651 * cpuid - The CPUID values to set.
652 *
653 * Output Args: None
654 *
655 * Return: void
656 *
657 * Set the VCPU's CPUID.
658 */
659void vcpu_set_cpuid(struct kvm_vm *vm,
660 uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
661{
662 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
663 int rc;
664
665 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
666
667 rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
668 TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
669 rc, errno);
670
671}
672/* Create a VM with reasonable defaults
673 *
674 * Input Args:
675 * vcpuid - The id of the single VCPU to add to the VM.
676 * guest_code - The vCPU's entry point
677 *
678 * Output Args: None
679 *
680 * Return:
681 * Pointer to opaque structure that describes the created VM.
682 */
683struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code)
684{
685 struct kvm_vm *vm;
686
687 /* Create VM */
688 vm = vm_create(VM_MODE_FLAT48PG, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
689
690 /* Setup IRQ Chip */
691 vm_create_irqchip(vm);
692
693 /* Add the first vCPU. */
694 vm_vcpu_add_default(vm, vcpuid, guest_code);
695
696 return vm;
697}