aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-01-21 08:31:50 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:36:04 -0500
commitf6801dff23bd1902473902194667f4ac1eb6ea26 (patch)
treef0c147b1e5ec8fc67e87e93df08235814f6587cb /arch/x86/kvm
parent836a1b3c3456042704c86aaa3d837b976de9343b (diff)
KVM: Rename vcpu->shadow_efer to efer
None of the other registers have the shadow_ prefix. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/svm.c12
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.c14
-rw-r--r--arch/x86/kvm/x86.h2
5 files changed, 22 insertions, 22 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6f7158f4fbfd..599c422c390f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -237,7 +237,7 @@ static int is_cpuid_PSE36(void)
237 237
238static int is_nx(struct kvm_vcpu *vcpu) 238static int is_nx(struct kvm_vcpu *vcpu)
239{ 239{
240 return vcpu->arch.shadow_efer & EFER_NX; 240 return vcpu->arch.efer & EFER_NX;
241} 241}
242 242
243static int is_shadow_present_pte(u64 pte) 243static int is_shadow_present_pte(u64 pte)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 800208a60a51..9596cc86d6dd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -231,7 +231,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
231 efer &= ~EFER_LME; 231 efer &= ~EFER_LME;
232 232
233 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; 233 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
234 vcpu->arch.shadow_efer = efer; 234 vcpu->arch.efer = efer;
235} 235}
236 236
237static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, 237static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
@@ -996,14 +996,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
996 struct vcpu_svm *svm = to_svm(vcpu); 996 struct vcpu_svm *svm = to_svm(vcpu);
997 997
998#ifdef CONFIG_X86_64 998#ifdef CONFIG_X86_64
999 if (vcpu->arch.shadow_efer & EFER_LME) { 999 if (vcpu->arch.efer & EFER_LME) {
1000 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 1000 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1001 vcpu->arch.shadow_efer |= EFER_LMA; 1001 vcpu->arch.efer |= EFER_LMA;
1002 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 1002 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1003 } 1003 }
1004 1004
1005 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { 1005 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1006 vcpu->arch.shadow_efer &= ~EFER_LMA; 1006 vcpu->arch.efer &= ~EFER_LMA;
1007 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 1007 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1008 } 1008 }
1009 } 1009 }
@@ -1361,7 +1361,7 @@ static int vmmcall_interception(struct vcpu_svm *svm)
1361 1361
1362static int nested_svm_check_permissions(struct vcpu_svm *svm) 1362static int nested_svm_check_permissions(struct vcpu_svm *svm)
1363{ 1363{
1364 if (!(svm->vcpu.arch.shadow_efer & EFER_SVME) 1364 if (!(svm->vcpu.arch.efer & EFER_SVME)
1365 || !is_paging(&svm->vcpu)) { 1365 || !is_paging(&svm->vcpu)) {
1366 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 1366 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1367 return 1; 1367 return 1;
@@ -1764,7 +1764,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1764 hsave->save.ds = vmcb->save.ds; 1764 hsave->save.ds = vmcb->save.ds;
1765 hsave->save.gdtr = vmcb->save.gdtr; 1765 hsave->save.gdtr = vmcb->save.gdtr;
1766 hsave->save.idtr = vmcb->save.idtr; 1766 hsave->save.idtr = vmcb->save.idtr;
1767 hsave->save.efer = svm->vcpu.arch.shadow_efer; 1767 hsave->save.efer = svm->vcpu.arch.efer;
1768 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); 1768 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
1769 hsave->save.cr4 = svm->vcpu.arch.cr4; 1769 hsave->save.cr4 = svm->vcpu.arch.cr4;
1770 hsave->save.rflags = vmcb->save.rflags; 1770 hsave->save.rflags = vmcb->save.rflags;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2e894954069f..a680d939546f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -613,7 +613,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
613 u64 guest_efer; 613 u64 guest_efer;
614 u64 ignore_bits; 614 u64 ignore_bits;
615 615
616 guest_efer = vmx->vcpu.arch.shadow_efer; 616 guest_efer = vmx->vcpu.arch.efer;
617 617
618 /* 618 /*
619 * NX is emulated; LMA and LME handled by hardware; SCE meaninless 619 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
@@ -955,7 +955,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
955 * if efer.sce is enabled. 955 * if efer.sce is enabled.
956 */ 956 */
957 index = __find_msr_index(vmx, MSR_K6_STAR); 957 index = __find_msr_index(vmx, MSR_K6_STAR);
958 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) 958 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
959 move_msr_up(vmx, index, save_nmsrs++); 959 move_msr_up(vmx, index, save_nmsrs++);
960 } 960 }
961#endif 961#endif
@@ -1600,7 +1600,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1600 * of this msr depends on is_long_mode(). 1600 * of this msr depends on is_long_mode().
1601 */ 1601 */
1602 vmx_load_host_state(to_vmx(vcpu)); 1602 vmx_load_host_state(to_vmx(vcpu));
1603 vcpu->arch.shadow_efer = efer; 1603 vcpu->arch.efer = efer;
1604 if (!msr) 1604 if (!msr)
1605 return; 1605 return;
1606 if (efer & EFER_LMA) { 1606 if (efer & EFER_LMA) {
@@ -1632,13 +1632,13 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
1632 (guest_tr_ar & ~AR_TYPE_MASK) 1632 (guest_tr_ar & ~AR_TYPE_MASK)
1633 | AR_TYPE_BUSY_64_TSS); 1633 | AR_TYPE_BUSY_64_TSS);
1634 } 1634 }
1635 vcpu->arch.shadow_efer |= EFER_LMA; 1635 vcpu->arch.efer |= EFER_LMA;
1636 vmx_set_efer(vcpu, vcpu->arch.shadow_efer); 1636 vmx_set_efer(vcpu, vcpu->arch.efer);
1637} 1637}
1638 1638
1639static void exit_lmode(struct kvm_vcpu *vcpu) 1639static void exit_lmode(struct kvm_vcpu *vcpu)
1640{ 1640{
1641 vcpu->arch.shadow_efer &= ~EFER_LMA; 1641 vcpu->arch.efer &= ~EFER_LMA;
1642 1642
1643 vmcs_write32(VM_ENTRY_CONTROLS, 1643 vmcs_write32(VM_ENTRY_CONTROLS,
1644 vmcs_read32(VM_ENTRY_CONTROLS) 1644 vmcs_read32(VM_ENTRY_CONTROLS)
@@ -1745,7 +1745,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1745 enter_rmode(vcpu); 1745 enter_rmode(vcpu);
1746 1746
1747#ifdef CONFIG_X86_64 1747#ifdef CONFIG_X86_64
1748 if (vcpu->arch.shadow_efer & EFER_LME) { 1748 if (vcpu->arch.efer & EFER_LME) {
1749 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) 1749 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1750 enter_lmode(vcpu); 1750 enter_lmode(vcpu);
1751 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) 1751 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a4a7d1892f72..27af6e353b06 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -456,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
456 456
457 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 457 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
458#ifdef CONFIG_X86_64 458#ifdef CONFIG_X86_64
459 if ((vcpu->arch.shadow_efer & EFER_LME)) { 459 if ((vcpu->arch.efer & EFER_LME)) {
460 int cs_db, cs_l; 460 int cs_db, cs_l;
461 461
462 if (!is_pae(vcpu)) { 462 if (!is_pae(vcpu)) {
@@ -655,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
655 } 655 }
656 656
657 if (is_paging(vcpu) 657 if (is_paging(vcpu)
658 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) { 658 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
659 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); 659 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
660 kvm_inject_gp(vcpu, 0); 660 kvm_inject_gp(vcpu, 0);
661 return; 661 return;
@@ -686,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
686 kvm_x86_ops->set_efer(vcpu, efer); 686 kvm_x86_ops->set_efer(vcpu, efer);
687 687
688 efer &= ~EFER_LMA; 688 efer &= ~EFER_LMA;
689 efer |= vcpu->arch.shadow_efer & EFER_LMA; 689 efer |= vcpu->arch.efer & EFER_LMA;
690 690
691 vcpu->arch.shadow_efer = efer; 691 vcpu->arch.efer = efer;
692 692
693 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; 693 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
694 kvm_mmu_reset_context(vcpu); 694 kvm_mmu_reset_context(vcpu);
@@ -1426,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1426 data |= (((uint64_t)4ULL) << 40); 1426 data |= (((uint64_t)4ULL) << 40);
1427 break; 1427 break;
1428 case MSR_EFER: 1428 case MSR_EFER:
1429 data = vcpu->arch.shadow_efer; 1429 data = vcpu->arch.efer;
1430 break; 1430 break;
1431 case MSR_KVM_WALL_CLOCK: 1431 case MSR_KVM_WALL_CLOCK:
1432 data = vcpu->kvm->arch.wall_clock; 1432 data = vcpu->kvm->arch.wall_clock;
@@ -4569,7 +4569,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4569 sregs->cr3 = vcpu->arch.cr3; 4569 sregs->cr3 = vcpu->arch.cr3;
4570 sregs->cr4 = kvm_read_cr4(vcpu); 4570 sregs->cr4 = kvm_read_cr4(vcpu);
4571 sregs->cr8 = kvm_get_cr8(vcpu); 4571 sregs->cr8 = kvm_get_cr8(vcpu);
4572 sregs->efer = vcpu->arch.shadow_efer; 4572 sregs->efer = vcpu->arch.efer;
4573 sregs->apic_base = kvm_get_apic_base(vcpu); 4573 sregs->apic_base = kvm_get_apic_base(vcpu);
4574 4574
4575 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); 4575 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
@@ -5059,7 +5059,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5059 5059
5060 kvm_set_cr8(vcpu, sregs->cr8); 5060 kvm_set_cr8(vcpu, sregs->cr8);
5061 5061
5062 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; 5062 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
5063 kvm_x86_ops->set_efer(vcpu, sregs->efer); 5063 kvm_x86_ops->set_efer(vcpu, sregs->efer);
5064 kvm_set_apic_base(vcpu, sregs->apic_base); 5064 kvm_set_apic_base(vcpu, sregs->apic_base);
5065 5065
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 2dc24a755b6d..2d101639bd8d 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -44,7 +44,7 @@ static inline bool is_protmode(struct kvm_vcpu *vcpu)
44static inline int is_long_mode(struct kvm_vcpu *vcpu) 44static inline int is_long_mode(struct kvm_vcpu *vcpu)
45{ 45{
46#ifdef CONFIG_X86_64 46#ifdef CONFIG_X86_64
47 return vcpu->arch.shadow_efer & EFER_LMA; 47 return vcpu->arch.efer & EFER_LMA;
48#else 48#else
49 return 0; 49 return 0;
50#endif 50#endif