diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-07-27 03:16:56 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:20 -0400 |
commit | fb3f0f51d92d1496f9628ca6f0fb06a48dc9ed2a (patch) | |
tree | 38da1073dae5f30fd8f162669bb5a86959f8ace5 /drivers/kvm/svm.c | |
parent | a2fa3e9f52d875f7d4ca98434603b8756be71ba8 (diff) |
KVM: Dynamically allocate vcpus
This patch converts the vcpus array in "struct kvm" to a pointer
array, and changes the "vcpu_create" and "vcpu_setup" hooks into one
"vcpu_create" call which does the allocation and initialization of the
vcpu (calling back into the kvm_vcpu_init core helper).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r-- | drivers/kvm/svm.c | 177 |
1 files changed, 86 insertions, 91 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 32481876d98b..0feec8558599 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -51,7 +51,7 @@ MODULE_LICENSE("GPL"); | |||
51 | 51 | ||
52 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) | 52 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) |
53 | { | 53 | { |
54 | return (struct vcpu_svm*)vcpu->_priv; | 54 | return container_of(vcpu, struct vcpu_svm, vcpu); |
55 | } | 55 | } |
56 | 56 | ||
57 | unsigned long iopm_base; | 57 | unsigned long iopm_base; |
@@ -466,11 +466,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) | |||
466 | seg->base = 0; | 466 | seg->base = 0; |
467 | } | 467 | } |
468 | 468 | ||
469 | static int svm_vcpu_setup(struct kvm_vcpu *vcpu) | ||
470 | { | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static void init_vmcb(struct vmcb *vmcb) | 469 | static void init_vmcb(struct vmcb *vmcb) |
475 | { | 470 | { |
476 | struct vmcb_control_area *control = &vmcb->control; | 471 | struct vmcb_control_area *control = &vmcb->control; |
@@ -576,19 +571,27 @@ static void init_vmcb(struct vmcb *vmcb) | |||
576 | /* rdx = ?? */ | 571 | /* rdx = ?? */ |
577 | } | 572 | } |
578 | 573 | ||
579 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) | 574 | static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) |
580 | { | 575 | { |
581 | struct vcpu_svm *svm; | 576 | struct vcpu_svm *svm; |
582 | struct page *page; | 577 | struct page *page; |
583 | int r; | 578 | int err; |
584 | 579 | ||
585 | r = -ENOMEM; | ||
586 | svm = kzalloc(sizeof *svm, GFP_KERNEL); | 580 | svm = kzalloc(sizeof *svm, GFP_KERNEL); |
587 | if (!svm) | 581 | if (!svm) { |
588 | goto out1; | 582 | err = -ENOMEM; |
583 | goto out; | ||
584 | } | ||
585 | |||
586 | err = kvm_vcpu_init(&svm->vcpu, kvm, id); | ||
587 | if (err) | ||
588 | goto free_svm; | ||
589 | |||
589 | page = alloc_page(GFP_KERNEL); | 590 | page = alloc_page(GFP_KERNEL); |
590 | if (!page) | 591 | if (!page) { |
591 | goto out2; | 592 | err = -ENOMEM; |
593 | goto uninit; | ||
594 | } | ||
592 | 595 | ||
593 | svm->vmcb = page_address(page); | 596 | svm->vmcb = page_address(page); |
594 | clear_page(svm->vmcb); | 597 | clear_page(svm->vmcb); |
@@ -597,33 +600,29 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) | |||
597 | memset(svm->db_regs, 0, sizeof(svm->db_regs)); | 600 | memset(svm->db_regs, 0, sizeof(svm->db_regs)); |
598 | init_vmcb(svm->vmcb); | 601 | init_vmcb(svm->vmcb); |
599 | 602 | ||
600 | svm->vcpu = vcpu; | 603 | fx_init(&svm->vcpu); |
601 | vcpu->_priv = svm; | 604 | svm->vcpu.fpu_active = 1; |
605 | svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | ||
606 | if (svm->vcpu.vcpu_id == 0) | ||
607 | svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP; | ||
602 | 608 | ||
603 | fx_init(vcpu); | 609 | return &svm->vcpu; |
604 | vcpu->fpu_active = 1; | ||
605 | vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | ||
606 | if (vcpu->vcpu_id == 0) | ||
607 | vcpu->apic_base |= MSR_IA32_APICBASE_BSP; | ||
608 | 610 | ||
609 | return 0; | 611 | uninit: |
610 | 612 | kvm_vcpu_uninit(&svm->vcpu); | |
611 | out2: | 613 | free_svm: |
612 | kfree(svm); | 614 | kfree(svm); |
613 | out1: | 615 | out: |
614 | return r; | 616 | return ERR_PTR(err); |
615 | } | 617 | } |
616 | 618 | ||
617 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) | 619 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) |
618 | { | 620 | { |
619 | struct vcpu_svm *svm = to_svm(vcpu); | 621 | struct vcpu_svm *svm = to_svm(vcpu); |
620 | 622 | ||
621 | if (!svm) | 623 | __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); |
622 | return; | 624 | kvm_vcpu_uninit(vcpu); |
623 | if (svm->vmcb) | ||
624 | __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); | ||
625 | kfree(svm); | 625 | kfree(svm); |
626 | vcpu->_priv = NULL; | ||
627 | } | 626 | } |
628 | 627 | ||
629 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) | 628 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) |
@@ -1591,34 +1590,33 @@ again: | |||
1591 | #endif | 1590 | #endif |
1592 | 1591 | ||
1593 | #ifdef CONFIG_X86_64 | 1592 | #ifdef CONFIG_X86_64 |
1594 | "mov %c[rbx](%[vcpu]), %%rbx \n\t" | 1593 | "mov %c[rbx](%[svm]), %%rbx \n\t" |
1595 | "mov %c[rcx](%[vcpu]), %%rcx \n\t" | 1594 | "mov %c[rcx](%[svm]), %%rcx \n\t" |
1596 | "mov %c[rdx](%[vcpu]), %%rdx \n\t" | 1595 | "mov %c[rdx](%[svm]), %%rdx \n\t" |
1597 | "mov %c[rsi](%[vcpu]), %%rsi \n\t" | 1596 | "mov %c[rsi](%[svm]), %%rsi \n\t" |
1598 | "mov %c[rdi](%[vcpu]), %%rdi \n\t" | 1597 | "mov %c[rdi](%[svm]), %%rdi \n\t" |
1599 | "mov %c[rbp](%[vcpu]), %%rbp \n\t" | 1598 | "mov %c[rbp](%[svm]), %%rbp \n\t" |
1600 | "mov %c[r8](%[vcpu]), %%r8 \n\t" | 1599 | "mov %c[r8](%[svm]), %%r8 \n\t" |
1601 | "mov %c[r9](%[vcpu]), %%r9 \n\t" | 1600 | "mov %c[r9](%[svm]), %%r9 \n\t" |
1602 | "mov %c[r10](%[vcpu]), %%r10 \n\t" | 1601 | "mov %c[r10](%[svm]), %%r10 \n\t" |
1603 | "mov %c[r11](%[vcpu]), %%r11 \n\t" | 1602 | "mov %c[r11](%[svm]), %%r11 \n\t" |
1604 | "mov %c[r12](%[vcpu]), %%r12 \n\t" | 1603 | "mov %c[r12](%[svm]), %%r12 \n\t" |
1605 | "mov %c[r13](%[vcpu]), %%r13 \n\t" | 1604 | "mov %c[r13](%[svm]), %%r13 \n\t" |
1606 | "mov %c[r14](%[vcpu]), %%r14 \n\t" | 1605 | "mov %c[r14](%[svm]), %%r14 \n\t" |
1607 | "mov %c[r15](%[vcpu]), %%r15 \n\t" | 1606 | "mov %c[r15](%[svm]), %%r15 \n\t" |
1608 | #else | 1607 | #else |
1609 | "mov %c[rbx](%[vcpu]), %%ebx \n\t" | 1608 | "mov %c[rbx](%[svm]), %%ebx \n\t" |
1610 | "mov %c[rcx](%[vcpu]), %%ecx \n\t" | 1609 | "mov %c[rcx](%[svm]), %%ecx \n\t" |
1611 | "mov %c[rdx](%[vcpu]), %%edx \n\t" | 1610 | "mov %c[rdx](%[svm]), %%edx \n\t" |
1612 | "mov %c[rsi](%[vcpu]), %%esi \n\t" | 1611 | "mov %c[rsi](%[svm]), %%esi \n\t" |
1613 | "mov %c[rdi](%[vcpu]), %%edi \n\t" | 1612 | "mov %c[rdi](%[svm]), %%edi \n\t" |
1614 | "mov %c[rbp](%[vcpu]), %%ebp \n\t" | 1613 | "mov %c[rbp](%[svm]), %%ebp \n\t" |
1615 | #endif | 1614 | #endif |
1616 | 1615 | ||
1617 | #ifdef CONFIG_X86_64 | 1616 | #ifdef CONFIG_X86_64 |
1618 | /* Enter guest mode */ | 1617 | /* Enter guest mode */ |
1619 | "push %%rax \n\t" | 1618 | "push %%rax \n\t" |
1620 | "mov %c[svm](%[vcpu]), %%rax \n\t" | 1619 | "mov %c[vmcb](%[svm]), %%rax \n\t" |
1621 | "mov %c[vmcb](%%rax), %%rax \n\t" | ||
1622 | SVM_VMLOAD "\n\t" | 1620 | SVM_VMLOAD "\n\t" |
1623 | SVM_VMRUN "\n\t" | 1621 | SVM_VMRUN "\n\t" |
1624 | SVM_VMSAVE "\n\t" | 1622 | SVM_VMSAVE "\n\t" |
@@ -1626,8 +1624,7 @@ again: | |||
1626 | #else | 1624 | #else |
1627 | /* Enter guest mode */ | 1625 | /* Enter guest mode */ |
1628 | "push %%eax \n\t" | 1626 | "push %%eax \n\t" |
1629 | "mov %c[svm](%[vcpu]), %%eax \n\t" | 1627 | "mov %c[vmcb](%[svm]), %%eax \n\t" |
1630 | "mov %c[vmcb](%%eax), %%eax \n\t" | ||
1631 | SVM_VMLOAD "\n\t" | 1628 | SVM_VMLOAD "\n\t" |
1632 | SVM_VMRUN "\n\t" | 1629 | SVM_VMRUN "\n\t" |
1633 | SVM_VMSAVE "\n\t" | 1630 | SVM_VMSAVE "\n\t" |
@@ -1636,55 +1633,54 @@ again: | |||
1636 | 1633 | ||
1637 | /* Save guest registers, load host registers */ | 1634 | /* Save guest registers, load host registers */ |
1638 | #ifdef CONFIG_X86_64 | 1635 | #ifdef CONFIG_X86_64 |
1639 | "mov %%rbx, %c[rbx](%[vcpu]) \n\t" | 1636 | "mov %%rbx, %c[rbx](%[svm]) \n\t" |
1640 | "mov %%rcx, %c[rcx](%[vcpu]) \n\t" | 1637 | "mov %%rcx, %c[rcx](%[svm]) \n\t" |
1641 | "mov %%rdx, %c[rdx](%[vcpu]) \n\t" | 1638 | "mov %%rdx, %c[rdx](%[svm]) \n\t" |
1642 | "mov %%rsi, %c[rsi](%[vcpu]) \n\t" | 1639 | "mov %%rsi, %c[rsi](%[svm]) \n\t" |
1643 | "mov %%rdi, %c[rdi](%[vcpu]) \n\t" | 1640 | "mov %%rdi, %c[rdi](%[svm]) \n\t" |
1644 | "mov %%rbp, %c[rbp](%[vcpu]) \n\t" | 1641 | "mov %%rbp, %c[rbp](%[svm]) \n\t" |
1645 | "mov %%r8, %c[r8](%[vcpu]) \n\t" | 1642 | "mov %%r8, %c[r8](%[svm]) \n\t" |
1646 | "mov %%r9, %c[r9](%[vcpu]) \n\t" | 1643 | "mov %%r9, %c[r9](%[svm]) \n\t" |
1647 | "mov %%r10, %c[r10](%[vcpu]) \n\t" | 1644 | "mov %%r10, %c[r10](%[svm]) \n\t" |
1648 | "mov %%r11, %c[r11](%[vcpu]) \n\t" | 1645 | "mov %%r11, %c[r11](%[svm]) \n\t" |
1649 | "mov %%r12, %c[r12](%[vcpu]) \n\t" | 1646 | "mov %%r12, %c[r12](%[svm]) \n\t" |
1650 | "mov %%r13, %c[r13](%[vcpu]) \n\t" | 1647 | "mov %%r13, %c[r13](%[svm]) \n\t" |
1651 | "mov %%r14, %c[r14](%[vcpu]) \n\t" | 1648 | "mov %%r14, %c[r14](%[svm]) \n\t" |
1652 | "mov %%r15, %c[r15](%[vcpu]) \n\t" | 1649 | "mov %%r15, %c[r15](%[svm]) \n\t" |
1653 | 1650 | ||
1654 | "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" | 1651 | "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" |
1655 | "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" | 1652 | "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" |
1656 | "pop %%rbp; pop %%rdi; pop %%rsi;" | 1653 | "pop %%rbp; pop %%rdi; pop %%rsi;" |
1657 | "pop %%rdx; pop %%rcx; pop %%rbx; \n\t" | 1654 | "pop %%rdx; pop %%rcx; pop %%rbx; \n\t" |
1658 | #else | 1655 | #else |
1659 | "mov %%ebx, %c[rbx](%[vcpu]) \n\t" | 1656 | "mov %%ebx, %c[rbx](%[svm]) \n\t" |
1660 | "mov %%ecx, %c[rcx](%[vcpu]) \n\t" | 1657 | "mov %%ecx, %c[rcx](%[svm]) \n\t" |
1661 | "mov %%edx, %c[rdx](%[vcpu]) \n\t" | 1658 | "mov %%edx, %c[rdx](%[svm]) \n\t" |
1662 | "mov %%esi, %c[rsi](%[vcpu]) \n\t" | 1659 | "mov %%esi, %c[rsi](%[svm]) \n\t" |
1663 | "mov %%edi, %c[rdi](%[vcpu]) \n\t" | 1660 | "mov %%edi, %c[rdi](%[svm]) \n\t" |
1664 | "mov %%ebp, %c[rbp](%[vcpu]) \n\t" | 1661 | "mov %%ebp, %c[rbp](%[svm]) \n\t" |
1665 | 1662 | ||
1666 | "pop %%ebp; pop %%edi; pop %%esi;" | 1663 | "pop %%ebp; pop %%edi; pop %%esi;" |
1667 | "pop %%edx; pop %%ecx; pop %%ebx; \n\t" | 1664 | "pop %%edx; pop %%ecx; pop %%ebx; \n\t" |
1668 | #endif | 1665 | #endif |
1669 | : | 1666 | : |
1670 | : [vcpu]"a"(vcpu), | 1667 | : [svm]"a"(svm), |
1671 | [svm]"i"(offsetof(struct kvm_vcpu, _priv)), | ||
1672 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), | 1668 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), |
1673 | [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), | 1669 | [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])), |
1674 | [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), | 1670 | [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])), |
1675 | [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])), | 1671 | [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])), |
1676 | [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), | 1672 | [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])), |
1677 | [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), | 1673 | [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])), |
1678 | [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])) | 1674 | [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP])) |
1679 | #ifdef CONFIG_X86_64 | 1675 | #ifdef CONFIG_X86_64 |
1680 | ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), | 1676 | ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])), |
1681 | [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), | 1677 | [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])), |
1682 | [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), | 1678 | [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])), |
1683 | [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])), | 1679 | [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])), |
1684 | [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])), | 1680 | [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])), |
1685 | [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])), | 1681 | [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])), |
1686 | [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])), | 1682 | [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])), |
1687 | [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])) | 1683 | [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15])) |
1688 | #endif | 1684 | #endif |
1689 | : "cc", "memory" ); | 1685 | : "cc", "memory" ); |
1690 | 1686 | ||
@@ -1865,7 +1861,6 @@ static struct kvm_arch_ops svm_arch_ops = { | |||
1865 | 1861 | ||
1866 | .run = svm_vcpu_run, | 1862 | .run = svm_vcpu_run, |
1867 | .skip_emulated_instruction = skip_emulated_instruction, | 1863 | .skip_emulated_instruction = skip_emulated_instruction, |
1868 | .vcpu_setup = svm_vcpu_setup, | ||
1869 | .patch_hypercall = svm_patch_hypercall, | 1864 | .patch_hypercall = svm_patch_hypercall, |
1870 | }; | 1865 | }; |
1871 | 1866 | ||