aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-01-31 08:57:39 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:18 -0400
commit9f62e19a1107466b9e9501e23a9dd5acb81fdca1 (patch)
treebb99f0372f4ac48a1d341ce340a422abe0c4d9aa /arch/x86/kvm/vmx.c
parent50a37eb4e05efaa7bac6a948fd4db1a48c728b99 (diff)
KVM: VMX: unifdef the EFER specific code
To allow access to the EFER register in 32bit KVM the EFER specific code has to be exported to the x86 generic code. This patch does this in a backwards compatible manner. [avi: add check for EFER-less hosts] Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a509910f6b53..76944f2c883b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1335,14 +1335,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1335 vcpu->arch.cr4 = cr4; 1335 vcpu->arch.cr4 = cr4;
1336} 1336}
1337 1337
1338#ifdef CONFIG_X86_64
1339
1340static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 1338static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1341{ 1339{
1342 struct vcpu_vmx *vmx = to_vmx(vcpu); 1340 struct vcpu_vmx *vmx = to_vmx(vcpu);
1343 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); 1341 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1344 1342
1345 vcpu->arch.shadow_efer = efer; 1343 vcpu->arch.shadow_efer = efer;
1344 if (!msr)
1345 return;
1346 if (efer & EFER_LMA) { 1346 if (efer & EFER_LMA) {
1347 vmcs_write32(VM_ENTRY_CONTROLS, 1347 vmcs_write32(VM_ENTRY_CONTROLS,
1348 vmcs_read32(VM_ENTRY_CONTROLS) | 1348 vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1359,8 +1359,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1359 setup_msrs(vmx); 1359 setup_msrs(vmx);
1360} 1360}
1361 1361
1362#endif
1363
1364static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 1362static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1365{ 1363{
1366 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 1364 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -1775,9 +1773,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1775 vmx->vcpu.arch.cr0 = 0x60000010; 1773 vmx->vcpu.arch.cr0 = 0x60000010;
1776 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ 1774 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
1777 vmx_set_cr4(&vmx->vcpu, 0); 1775 vmx_set_cr4(&vmx->vcpu, 0);
1778#ifdef CONFIG_X86_64
1779 vmx_set_efer(&vmx->vcpu, 0); 1776 vmx_set_efer(&vmx->vcpu, 0);
1780#endif
1781 vmx_fpu_activate(&vmx->vcpu); 1777 vmx_fpu_activate(&vmx->vcpu);
1782 update_exception_bitmap(&vmx->vcpu); 1778 update_exception_bitmap(&vmx->vcpu);
1783 1779
@@ -2668,9 +2664,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
2668 .set_cr0 = vmx_set_cr0, 2664 .set_cr0 = vmx_set_cr0,
2669 .set_cr3 = vmx_set_cr3, 2665 .set_cr3 = vmx_set_cr3,
2670 .set_cr4 = vmx_set_cr4, 2666 .set_cr4 = vmx_set_cr4,
2671#ifdef CONFIG_X86_64
2672 .set_efer = vmx_set_efer, 2667 .set_efer = vmx_set_efer,
2673#endif
2674 .get_idt = vmx_get_idt, 2668 .get_idt = vmx_get_idt,
2675 .set_idt = vmx_set_idt, 2669 .set_idt = vmx_set_idt,
2676 .get_gdt = vmx_get_gdt, 2670 .get_gdt = vmx_get_gdt,