diff options
author | Avi Kivity <avi@redhat.com> | 2010-01-21 08:31:50 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:36:04 -0500 |
commit | f6801dff23bd1902473902194667f4ac1eb6ea26 (patch) | |
tree | f0c147b1e5ec8fc67e87e93df08235814f6587cb /arch/x86/kvm/vmx.c | |
parent | 836a1b3c3456042704c86aaa3d837b976de9343b (diff) |
KVM: Rename vcpu->shadow_efer to efer
None of the other registers have the shadow_ prefix.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2e894954069f..a680d939546f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -613,7 +613,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) | |||
613 | u64 guest_efer; | 613 | u64 guest_efer; |
614 | u64 ignore_bits; | 614 | u64 ignore_bits; |
615 | 615 | ||
616 | guest_efer = vmx->vcpu.arch.shadow_efer; | 616 | guest_efer = vmx->vcpu.arch.efer; |
617 | 617 | ||
618 | /* | 618 | /* |
619 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless | 619 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless |
@@ -955,7 +955,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
955 | * if efer.sce is enabled. | 955 | * if efer.sce is enabled. |
956 | */ | 956 | */ |
957 | index = __find_msr_index(vmx, MSR_K6_STAR); | 957 | index = __find_msr_index(vmx, MSR_K6_STAR); |
958 | if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) | 958 | if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) |
959 | move_msr_up(vmx, index, save_nmsrs++); | 959 | move_msr_up(vmx, index, save_nmsrs++); |
960 | } | 960 | } |
961 | #endif | 961 | #endif |
@@ -1600,7 +1600,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
1600 | * of this msr depends on is_long_mode(). | 1600 | * of this msr depends on is_long_mode(). |
1601 | */ | 1601 | */ |
1602 | vmx_load_host_state(to_vmx(vcpu)); | 1602 | vmx_load_host_state(to_vmx(vcpu)); |
1603 | vcpu->arch.shadow_efer = efer; | 1603 | vcpu->arch.efer = efer; |
1604 | if (!msr) | 1604 | if (!msr) |
1605 | return; | 1605 | return; |
1606 | if (efer & EFER_LMA) { | 1606 | if (efer & EFER_LMA) { |
@@ -1632,13 +1632,13 @@ static void enter_lmode(struct kvm_vcpu *vcpu) | |||
1632 | (guest_tr_ar & ~AR_TYPE_MASK) | 1632 | (guest_tr_ar & ~AR_TYPE_MASK) |
1633 | | AR_TYPE_BUSY_64_TSS); | 1633 | | AR_TYPE_BUSY_64_TSS); |
1634 | } | 1634 | } |
1635 | vcpu->arch.shadow_efer |= EFER_LMA; | 1635 | vcpu->arch.efer |= EFER_LMA; |
1636 | vmx_set_efer(vcpu, vcpu->arch.shadow_efer); | 1636 | vmx_set_efer(vcpu, vcpu->arch.efer); |
1637 | } | 1637 | } |
1638 | 1638 | ||
1639 | static void exit_lmode(struct kvm_vcpu *vcpu) | 1639 | static void exit_lmode(struct kvm_vcpu *vcpu) |
1640 | { | 1640 | { |
1641 | vcpu->arch.shadow_efer &= ~EFER_LMA; | 1641 | vcpu->arch.efer &= ~EFER_LMA; |
1642 | 1642 | ||
1643 | vmcs_write32(VM_ENTRY_CONTROLS, | 1643 | vmcs_write32(VM_ENTRY_CONTROLS, |
1644 | vmcs_read32(VM_ENTRY_CONTROLS) | 1644 | vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1745,7 +1745,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1745 | enter_rmode(vcpu); | 1745 | enter_rmode(vcpu); |
1746 | 1746 | ||
1747 | #ifdef CONFIG_X86_64 | 1747 | #ifdef CONFIG_X86_64 |
1748 | if (vcpu->arch.shadow_efer & EFER_LME) { | 1748 | if (vcpu->arch.efer & EFER_LME) { |
1749 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) | 1749 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) |
1750 | enter_lmode(vcpu); | 1750 | enter_lmode(vcpu); |
1751 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) | 1751 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) |