diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-08-07 05:49:33 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:33:24 -0400 |
commit | e6aa9abd7381557c67be6a9e7240eb132ca00d66 (patch) | |
tree | 59913543c5e774e68fa79af23577ad4af35cad80 /arch/x86/kvm | |
parent | a5c3832dfe6324862b4fd1d90831266b15d4b58e (diff) |
KVM: SVM: move nested svm state into seperate struct
This makes it more clear for which purpose these members in the vcpu_svm
exist.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/svm.c | 62 |
1 files changed, 33 insertions, 29 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 825b82540f01..fbadaa7cb27a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -70,6 +70,18 @@ static const u32 host_save_user_msrs[] = { | |||
70 | 70 | ||
71 | struct kvm_vcpu; | 71 | struct kvm_vcpu; |
72 | 72 | ||
73 | struct nested_state { | ||
74 | struct vmcb *hsave; | ||
75 | u64 hsave_msr; | ||
76 | u64 vmcb; | ||
77 | |||
78 | /* These are the merged vectors */ | ||
79 | u32 *msrpm; | ||
80 | |||
81 | /* gpa pointers to the real vectors */ | ||
82 | u64 vmcb_msrpm; | ||
83 | }; | ||
84 | |||
73 | struct vcpu_svm { | 85 | struct vcpu_svm { |
74 | struct kvm_vcpu vcpu; | 86 | struct kvm_vcpu vcpu; |
75 | struct vmcb *vmcb; | 87 | struct vmcb *vmcb; |
@@ -85,16 +97,8 @@ struct vcpu_svm { | |||
85 | u64 host_gs_base; | 97 | u64 host_gs_base; |
86 | 98 | ||
87 | u32 *msrpm; | 99 | u32 *msrpm; |
88 | struct vmcb *hsave; | ||
89 | u64 hsave_msr; | ||
90 | |||
91 | u64 nested_vmcb; | ||
92 | 100 | ||
93 | /* These are the merged vectors */ | 101 | struct nested_state nested; |
94 | u32 *nested_msrpm; | ||
95 | |||
96 | /* gpa pointers to the real vectors */ | ||
97 | u64 nested_vmcb_msrpm; | ||
98 | }; | 102 | }; |
99 | 103 | ||
100 | /* enable NPT for AMD64 and X86 with PAE */ | 104 | /* enable NPT for AMD64 and X86 with PAE */ |
@@ -127,7 +131,7 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) | |||
127 | 131 | ||
128 | static inline bool is_nested(struct vcpu_svm *svm) | 132 | static inline bool is_nested(struct vcpu_svm *svm) |
129 | { | 133 | { |
130 | return svm->nested_vmcb; | 134 | return svm->nested.vmcb; |
131 | } | 135 | } |
132 | 136 | ||
133 | static inline void enable_gif(struct vcpu_svm *svm) | 137 | static inline void enable_gif(struct vcpu_svm *svm) |
@@ -636,7 +640,7 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
636 | } | 640 | } |
637 | force_new_asid(&svm->vcpu); | 641 | force_new_asid(&svm->vcpu); |
638 | 642 | ||
639 | svm->nested_vmcb = 0; | 643 | svm->nested.vmcb = 0; |
640 | svm->vcpu.arch.hflags = 0; | 644 | svm->vcpu.arch.hflags = 0; |
641 | 645 | ||
642 | enable_gif(svm); | 646 | enable_gif(svm); |
@@ -699,9 +703,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
699 | hsave_page = alloc_page(GFP_KERNEL); | 703 | hsave_page = alloc_page(GFP_KERNEL); |
700 | if (!hsave_page) | 704 | if (!hsave_page) |
701 | goto uninit; | 705 | goto uninit; |
702 | svm->hsave = page_address(hsave_page); | 706 | svm->nested.hsave = page_address(hsave_page); |
703 | 707 | ||
704 | svm->nested_msrpm = page_address(nested_msrpm_pages); | 708 | svm->nested.msrpm = page_address(nested_msrpm_pages); |
705 | 709 | ||
706 | svm->vmcb = page_address(page); | 710 | svm->vmcb = page_address(page); |
707 | clear_page(svm->vmcb); | 711 | clear_page(svm->vmcb); |
@@ -731,8 +735,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) | |||
731 | 735 | ||
732 | __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); | 736 | __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); |
733 | __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); | 737 | __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); |
734 | __free_page(virt_to_page(svm->hsave)); | 738 | __free_page(virt_to_page(svm->nested.hsave)); |
735 | __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER); | 739 | __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); |
736 | kvm_vcpu_uninit(vcpu); | 740 | kvm_vcpu_uninit(vcpu); |
737 | kmem_cache_free(kvm_vcpu_cache, svm); | 741 | kmem_cache_free(kvm_vcpu_cache, svm); |
738 | } | 742 | } |
@@ -1558,13 +1562,13 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override) | |||
1558 | 1562 | ||
1559 | switch (svm->vmcb->control.exit_code) { | 1563 | switch (svm->vmcb->control.exit_code) { |
1560 | case SVM_EXIT_MSR: | 1564 | case SVM_EXIT_MSR: |
1561 | return nested_svm_do(svm, svm->nested_vmcb, | 1565 | return nested_svm_do(svm, svm->nested.vmcb, |
1562 | svm->nested_vmcb_msrpm, NULL, | 1566 | svm->nested.vmcb_msrpm, NULL, |
1563 | nested_svm_exit_handled_msr); | 1567 | nested_svm_exit_handled_msr); |
1564 | default: break; | 1568 | default: break; |
1565 | } | 1569 | } |
1566 | 1570 | ||
1567 | return nested_svm_do(svm, svm->nested_vmcb, 0, &k, | 1571 | return nested_svm_do(svm, svm->nested.vmcb, 0, &k, |
1568 | nested_svm_exit_handled_real); | 1572 | nested_svm_exit_handled_real); |
1569 | } | 1573 | } |
1570 | 1574 | ||
@@ -1604,7 +1608,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, | |||
1604 | void *arg2, void *opaque) | 1608 | void *arg2, void *opaque) |
1605 | { | 1609 | { |
1606 | struct vmcb *nested_vmcb = (struct vmcb *)arg1; | 1610 | struct vmcb *nested_vmcb = (struct vmcb *)arg1; |
1607 | struct vmcb *hsave = svm->hsave; | 1611 | struct vmcb *hsave = svm->nested.hsave; |
1608 | struct vmcb *vmcb = svm->vmcb; | 1612 | struct vmcb *vmcb = svm->vmcb; |
1609 | 1613 | ||
1610 | /* Give the current vmcb to the guest */ | 1614 | /* Give the current vmcb to the guest */ |
@@ -1679,7 +1683,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, | |||
1679 | svm->vmcb->control.exit_int_info = 0; | 1683 | svm->vmcb->control.exit_int_info = 0; |
1680 | 1684 | ||
1681 | /* Exit nested SVM mode */ | 1685 | /* Exit nested SVM mode */ |
1682 | svm->nested_vmcb = 0; | 1686 | svm->nested.vmcb = 0; |
1683 | 1687 | ||
1684 | return 0; | 1688 | return 0; |
1685 | } | 1689 | } |
@@ -1687,7 +1691,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, | |||
1687 | static int nested_svm_vmexit(struct vcpu_svm *svm) | 1691 | static int nested_svm_vmexit(struct vcpu_svm *svm) |
1688 | { | 1692 | { |
1689 | nsvm_printk("VMexit\n"); | 1693 | nsvm_printk("VMexit\n"); |
1690 | if (nested_svm_do(svm, svm->nested_vmcb, 0, | 1694 | if (nested_svm_do(svm, svm->nested.vmcb, 0, |
1691 | NULL, nested_svm_vmexit_real)) | 1695 | NULL, nested_svm_vmexit_real)) |
1692 | return 1; | 1696 | return 1; |
1693 | 1697 | ||
@@ -1703,8 +1707,8 @@ static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1, | |||
1703 | int i; | 1707 | int i; |
1704 | u32 *nested_msrpm = (u32*)arg1; | 1708 | u32 *nested_msrpm = (u32*)arg1; |
1705 | for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) | 1709 | for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) |
1706 | svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; | 1710 | svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; |
1707 | svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm); | 1711 | svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); |
1708 | 1712 | ||
1709 | return 0; | 1713 | return 0; |
1710 | } | 1714 | } |
@@ -1713,11 +1717,11 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1, | |||
1713 | void *arg2, void *opaque) | 1717 | void *arg2, void *opaque) |
1714 | { | 1718 | { |
1715 | struct vmcb *nested_vmcb = (struct vmcb *)arg1; | 1719 | struct vmcb *nested_vmcb = (struct vmcb *)arg1; |
1716 | struct vmcb *hsave = svm->hsave; | 1720 | struct vmcb *hsave = svm->nested.hsave; |
1717 | struct vmcb *vmcb = svm->vmcb; | 1721 | struct vmcb *vmcb = svm->vmcb; |
1718 | 1722 | ||
1719 | /* nested_vmcb is our indicator if nested SVM is activated */ | 1723 | /* nested_vmcb is our indicator if nested SVM is activated */ |
1720 | svm->nested_vmcb = svm->vmcb->save.rax; | 1724 | svm->nested.vmcb = svm->vmcb->save.rax; |
1721 | 1725 | ||
1722 | /* Clear internal status */ | 1726 | /* Clear internal status */ |
1723 | kvm_clear_exception_queue(&svm->vcpu); | 1727 | kvm_clear_exception_queue(&svm->vcpu); |
@@ -1795,7 +1799,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1, | |||
1795 | 1799 | ||
1796 | svm->vmcb->control.intercept |= nested_vmcb->control.intercept; | 1800 | svm->vmcb->control.intercept |= nested_vmcb->control.intercept; |
1797 | 1801 | ||
1798 | svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; | 1802 | svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; |
1799 | 1803 | ||
1800 | force_new_asid(&svm->vcpu); | 1804 | force_new_asid(&svm->vcpu); |
1801 | svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info; | 1805 | svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info; |
@@ -1897,7 +1901,7 @@ static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1897 | NULL, nested_svm_vmrun)) | 1901 | NULL, nested_svm_vmrun)) |
1898 | return 1; | 1902 | return 1; |
1899 | 1903 | ||
1900 | if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0, | 1904 | if (nested_svm_do(svm, svm->nested.vmcb_msrpm, 0, |
1901 | NULL, nested_svm_vmrun_msrpm)) | 1905 | NULL, nested_svm_vmrun_msrpm)) |
1902 | return 1; | 1906 | return 1; |
1903 | 1907 | ||
@@ -2107,7 +2111,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
2107 | *data = svm->vmcb->save.last_excp_to; | 2111 | *data = svm->vmcb->save.last_excp_to; |
2108 | break; | 2112 | break; |
2109 | case MSR_VM_HSAVE_PA: | 2113 | case MSR_VM_HSAVE_PA: |
2110 | *data = svm->hsave_msr; | 2114 | *data = svm->nested.hsave_msr; |
2111 | break; | 2115 | break; |
2112 | case MSR_VM_CR: | 2116 | case MSR_VM_CR: |
2113 | *data = 0; | 2117 | *data = 0; |
@@ -2195,7 +2199,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
2195 | svm_disable_lbrv(svm); | 2199 | svm_disable_lbrv(svm); |
2196 | break; | 2200 | break; |
2197 | case MSR_VM_HSAVE_PA: | 2201 | case MSR_VM_HSAVE_PA: |
2198 | svm->hsave_msr = data; | 2202 | svm->nested.hsave_msr = data; |
2199 | break; | 2203 | break; |
2200 | case MSR_VM_CR: | 2204 | case MSR_VM_CR: |
2201 | case MSR_VM_IGNNE: | 2205 | case MSR_VM_IGNNE: |