diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-01-31 08:57:40 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 04:53:19 -0400 |
commit | 9457a712a2f464c4b21bb7f78998775c69673a0c (patch) | |
tree | 29297b37bb28b1a6255f75da6a5f745fcf3af589 | |
parent | 9f62e19a1107466b9e9501e23a9dd5acb81fdca1 (diff) |
KVM: allow access to EFER in 32bit KVM
This patch makes the EFER register accessible on a 32bit KVM host. This is
necessary to boot 32 bit PAE guests under SVM.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | arch/x86/kvm/x86.c | 10 |
1 files changed, 0 insertions, 10 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index db16f2353e4b..38edb2f558ea 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -432,8 +432,6 @@ static u32 emulated_msrs[] = { | |||
432 | MSR_IA32_MISC_ENABLE, | 432 | MSR_IA32_MISC_ENABLE, |
433 | }; | 433 | }; |
434 | 434 | ||
435 | #ifdef CONFIG_X86_64 | ||
436 | |||
437 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | 435 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
438 | { | 436 | { |
439 | if (efer & efer_reserved_bits) { | 437 | if (efer & efer_reserved_bits) { |
@@ -458,8 +456,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
458 | vcpu->arch.shadow_efer = efer; | 456 | vcpu->arch.shadow_efer = efer; |
459 | } | 457 | } |
460 | 458 | ||
461 | #endif | ||
462 | |||
463 | void kvm_enable_efer_bits(u64 mask) | 459 | void kvm_enable_efer_bits(u64 mask) |
464 | { | 460 | { |
465 | efer_reserved_bits &= ~mask; | 461 | efer_reserved_bits &= ~mask; |
@@ -489,11 +485,9 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) | |||
489 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 485 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
490 | { | 486 | { |
491 | switch (msr) { | 487 | switch (msr) { |
492 | #ifdef CONFIG_X86_64 | ||
493 | case MSR_EFER: | 488 | case MSR_EFER: |
494 | set_efer(vcpu, data); | 489 | set_efer(vcpu, data); |
495 | break; | 490 | break; |
496 | #endif | ||
497 | case MSR_IA32_MC0_STATUS: | 491 | case MSR_IA32_MC0_STATUS: |
498 | pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", | 492 | pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", |
499 | __FUNCTION__, data); | 493 | __FUNCTION__, data); |
@@ -571,11 +565,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
571 | case MSR_IA32_MISC_ENABLE: | 565 | case MSR_IA32_MISC_ENABLE: |
572 | data = vcpu->arch.ia32_misc_enable_msr; | 566 | data = vcpu->arch.ia32_misc_enable_msr; |
573 | break; | 567 | break; |
574 | #ifdef CONFIG_X86_64 | ||
575 | case MSR_EFER: | 568 | case MSR_EFER: |
576 | data = vcpu->arch.shadow_efer; | 569 | data = vcpu->arch.shadow_efer; |
577 | break; | 570 | break; |
578 | #endif | ||
579 | default: | 571 | default: |
580 | pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); | 572 | pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); |
581 | return 1; | 573 | return 1; |
@@ -2880,9 +2872,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
2880 | set_cr8(vcpu, sregs->cr8); | 2872 | set_cr8(vcpu, sregs->cr8); |
2881 | 2873 | ||
2882 | mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; | 2874 | mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; |
2883 | #ifdef CONFIG_X86_64 | ||
2884 | kvm_x86_ops->set_efer(vcpu, sregs->efer); | 2875 | kvm_x86_ops->set_efer(vcpu, sregs->efer); |
2885 | #endif | ||
2886 | kvm_set_apic_base(vcpu, sregs->apic_base); | 2876 | kvm_set_apic_base(vcpu, sregs->apic_base); |
2887 | 2877 | ||
2888 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); | 2878 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |