diff options
author | Nitin A Kamble <nitin.a.kamble@intel.com> | 2009-06-04 18:04:08 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:49:00 -0400 |
commit | 56b237e31abf4d6dbc6e2a0214049b9a23be4883 (patch) | |
tree | 92a50055512d6b95331bae708de740631a04322b /arch/x86/kvm/vmx.c | |
parent | 20f65983e30f222e5383f77206e3f571d1d64610 (diff) |
KVM: VMX: Rename rmode.active to rmode.vm86_active
That way the interpretation of rmode.active becomes more clear with
unrestricted guest code.
Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fe2ce2b40504..c379a3472fa9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -495,7 +495,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
495 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | 495 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
496 | eb |= 1u << BP_VECTOR; | 496 | eb |= 1u << BP_VECTOR; |
497 | } | 497 | } |
498 | if (vcpu->arch.rmode.active) | 498 | if (vcpu->arch.rmode.vm86_active) |
499 | eb = ~0; | 499 | eb = ~0; |
500 | if (enable_ept) | 500 | if (enable_ept) |
501 | eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ | 501 | eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ |
@@ -731,7 +731,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | |||
731 | 731 | ||
732 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 732 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
733 | { | 733 | { |
734 | if (vcpu->arch.rmode.active) | 734 | if (vcpu->arch.rmode.vm86_active) |
735 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 735 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
736 | vmcs_writel(GUEST_RFLAGS, rflags); | 736 | vmcs_writel(GUEST_RFLAGS, rflags); |
737 | } | 737 | } |
@@ -788,7 +788,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
788 | intr_info |= INTR_INFO_DELIVER_CODE_MASK; | 788 | intr_info |= INTR_INFO_DELIVER_CODE_MASK; |
789 | } | 789 | } |
790 | 790 | ||
791 | if (vcpu->arch.rmode.active) { | 791 | if (vcpu->arch.rmode.vm86_active) { |
792 | vmx->rmode.irq.pending = true; | 792 | vmx->rmode.irq.pending = true; |
793 | vmx->rmode.irq.vector = nr; | 793 | vmx->rmode.irq.vector = nr; |
794 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | 794 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); |
@@ -1363,7 +1363,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1363 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1363 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1364 | 1364 | ||
1365 | vmx->emulation_required = 1; | 1365 | vmx->emulation_required = 1; |
1366 | vcpu->arch.rmode.active = 0; | 1366 | vcpu->arch.rmode.vm86_active = 0; |
1367 | 1367 | ||
1368 | vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); | 1368 | vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); |
1369 | vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit); | 1369 | vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit); |
@@ -1425,7 +1425,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1425 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1425 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1426 | 1426 | ||
1427 | vmx->emulation_required = 1; | 1427 | vmx->emulation_required = 1; |
1428 | vcpu->arch.rmode.active = 1; | 1428 | vcpu->arch.rmode.vm86_active = 1; |
1429 | 1429 | ||
1430 | vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); | 1430 | vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); |
1431 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); | 1431 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); |
@@ -1594,10 +1594,10 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1594 | 1594 | ||
1595 | vmx_fpu_deactivate(vcpu); | 1595 | vmx_fpu_deactivate(vcpu); |
1596 | 1596 | ||
1597 | if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE)) | 1597 | if (vcpu->arch.rmode.vm86_active && (cr0 & X86_CR0_PE)) |
1598 | enter_pmode(vcpu); | 1598 | enter_pmode(vcpu); |
1599 | 1599 | ||
1600 | if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE)) | 1600 | if (!vcpu->arch.rmode.vm86_active && !(cr0 & X86_CR0_PE)) |
1601 | enter_rmode(vcpu); | 1601 | enter_rmode(vcpu); |
1602 | 1602 | ||
1603 | #ifdef CONFIG_X86_64 | 1603 | #ifdef CONFIG_X86_64 |
@@ -1655,7 +1655,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
1655 | 1655 | ||
1656 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 1656 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
1657 | { | 1657 | { |
1658 | unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.active ? | 1658 | unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.vm86_active ? |
1659 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); | 1659 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); |
1660 | 1660 | ||
1661 | vcpu->arch.cr4 = cr4; | 1661 | vcpu->arch.cr4 = cr4; |
@@ -1738,7 +1738,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
1738 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | 1738 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
1739 | u32 ar; | 1739 | u32 ar; |
1740 | 1740 | ||
1741 | if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) { | 1741 | if (vcpu->arch.rmode.vm86_active && seg == VCPU_SREG_TR) { |
1742 | vcpu->arch.rmode.tr.selector = var->selector; | 1742 | vcpu->arch.rmode.tr.selector = var->selector; |
1743 | vcpu->arch.rmode.tr.base = var->base; | 1743 | vcpu->arch.rmode.tr.base = var->base; |
1744 | vcpu->arch.rmode.tr.limit = var->limit; | 1744 | vcpu->arch.rmode.tr.limit = var->limit; |
@@ -1748,7 +1748,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
1748 | vmcs_writel(sf->base, var->base); | 1748 | vmcs_writel(sf->base, var->base); |
1749 | vmcs_write32(sf->limit, var->limit); | 1749 | vmcs_write32(sf->limit, var->limit); |
1750 | vmcs_write16(sf->selector, var->selector); | 1750 | vmcs_write16(sf->selector, var->selector); |
1751 | if (vcpu->arch.rmode.active && var->s) { | 1751 | if (vcpu->arch.rmode.vm86_active && var->s) { |
1752 | /* | 1752 | /* |
1753 | * Hack real-mode segments into vm86 compatibility. | 1753 | * Hack real-mode segments into vm86 compatibility. |
1754 | */ | 1754 | */ |
@@ -2317,7 +2317,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2317 | goto out; | 2317 | goto out; |
2318 | } | 2318 | } |
2319 | 2319 | ||
2320 | vmx->vcpu.arch.rmode.active = 0; | 2320 | vmx->vcpu.arch.rmode.vm86_active = 0; |
2321 | 2321 | ||
2322 | vmx->soft_vnmi_blocked = 0; | 2322 | vmx->soft_vnmi_blocked = 0; |
2323 | 2323 | ||
@@ -2455,7 +2455,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu) | |||
2455 | KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); | 2455 | KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); |
2456 | 2456 | ||
2457 | ++vcpu->stat.irq_injections; | 2457 | ++vcpu->stat.irq_injections; |
2458 | if (vcpu->arch.rmode.active) { | 2458 | if (vcpu->arch.rmode.vm86_active) { |
2459 | vmx->rmode.irq.pending = true; | 2459 | vmx->rmode.irq.pending = true; |
2460 | vmx->rmode.irq.vector = irq; | 2460 | vmx->rmode.irq.vector = irq; |
2461 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | 2461 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); |
@@ -2493,7 +2493,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | |||
2493 | } | 2493 | } |
2494 | 2494 | ||
2495 | ++vcpu->stat.nmi_injections; | 2495 | ++vcpu->stat.nmi_injections; |
2496 | if (vcpu->arch.rmode.active) { | 2496 | if (vcpu->arch.rmode.vm86_active) { |
2497 | vmx->rmode.irq.pending = true; | 2497 | vmx->rmode.irq.pending = true; |
2498 | vmx->rmode.irq.vector = NMI_VECTOR; | 2498 | vmx->rmode.irq.vector = NMI_VECTOR; |
2499 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | 2499 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); |
@@ -2629,7 +2629,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2629 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | 2629 | return kvm_mmu_page_fault(vcpu, cr2, error_code); |
2630 | } | 2630 | } |
2631 | 2631 | ||
2632 | if (vcpu->arch.rmode.active && | 2632 | if (vcpu->arch.rmode.vm86_active && |
2633 | handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, | 2633 | handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, |
2634 | error_code)) { | 2634 | error_code)) { |
2635 | if (vcpu->arch.halt_request) { | 2635 | if (vcpu->arch.halt_request) { |