aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-06-09 07:10:45 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:50 -0400
commit7ffd92c53c5ebd0ad5a68ac3ca033c3a06374d19 (patch)
treec6107ada98bd730a8d681b3cd8c35e1582a6b5fd /arch/x86/kvm/vmx.c
parent6a4a98397331723dce25a7537270548d91523431 (diff)
KVM: VMX: Move rmode structure to vmx-specific code
rmode is only used in vmx, so move it to vmx.c Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c78
1 files changed, 44 insertions, 34 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f0f9773f0b0f..ae682929a642 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -88,6 +88,14 @@ struct vcpu_vmx {
88 int guest_efer_loaded; 88 int guest_efer_loaded;
89 } host_state; 89 } host_state;
90 struct { 90 struct {
91 int vm86_active;
92 u8 save_iopl;
93 struct kvm_save_segment {
94 u16 selector;
95 unsigned long base;
96 u32 limit;
97 u32 ar;
98 } tr, es, ds, fs, gs;
91 struct { 99 struct {
92 bool pending; 100 bool pending;
93 u8 vector; 101 u8 vector;
@@ -516,7 +524,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
516 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 524 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
517 eb |= 1u << BP_VECTOR; 525 eb |= 1u << BP_VECTOR;
518 } 526 }
519 if (vcpu->arch.rmode.vm86_active) 527 if (to_vmx(vcpu)->rmode.vm86_active)
520 eb = ~0; 528 eb = ~0;
521 if (enable_ept) 529 if (enable_ept)
522 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ 530 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
@@ -752,7 +760,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
752 760
753static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 761static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
754{ 762{
755 if (vcpu->arch.rmode.vm86_active) 763 if (to_vmx(vcpu)->rmode.vm86_active)
756 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 764 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
757 vmcs_writel(GUEST_RFLAGS, rflags); 765 vmcs_writel(GUEST_RFLAGS, rflags);
758} 766}
@@ -809,7 +817,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
809 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 817 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
810 } 818 }
811 819
812 if (vcpu->arch.rmode.vm86_active) { 820 if (vmx->rmode.vm86_active) {
813 vmx->rmode.irq.pending = true; 821 vmx->rmode.irq.pending = true;
814 vmx->rmode.irq.vector = nr; 822 vmx->rmode.irq.vector = nr;
815 vmx->rmode.irq.rip = kvm_rip_read(vcpu); 823 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
@@ -1395,15 +1403,15 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1395 struct vcpu_vmx *vmx = to_vmx(vcpu); 1403 struct vcpu_vmx *vmx = to_vmx(vcpu);
1396 1404
1397 vmx->emulation_required = 1; 1405 vmx->emulation_required = 1;
1398 vcpu->arch.rmode.vm86_active = 0; 1406 vmx->rmode.vm86_active = 0;
1399 1407
1400 vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); 1408 vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
1401 vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit); 1409 vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
1402 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar); 1410 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1403 1411
1404 flags = vmcs_readl(GUEST_RFLAGS); 1412 flags = vmcs_readl(GUEST_RFLAGS);
1405 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); 1413 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1406 flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT); 1414 flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
1407 vmcs_writel(GUEST_RFLAGS, flags); 1415 vmcs_writel(GUEST_RFLAGS, flags);
1408 1416
1409 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 1417 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1414,10 +1422,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1414 if (emulate_invalid_guest_state) 1422 if (emulate_invalid_guest_state)
1415 return; 1423 return;
1416 1424
1417 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); 1425 fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
1418 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); 1426 fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
1419 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); 1427 fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
1420 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); 1428 fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
1421 1429
1422 vmcs_write16(GUEST_SS_SELECTOR, 0); 1430 vmcs_write16(GUEST_SS_SELECTOR, 0);
1423 vmcs_write32(GUEST_SS_AR_BYTES, 0x93); 1431 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
@@ -1460,19 +1468,19 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1460 return; 1468 return;
1461 1469
1462 vmx->emulation_required = 1; 1470 vmx->emulation_required = 1;
1463 vcpu->arch.rmode.vm86_active = 1; 1471 vmx->rmode.vm86_active = 1;
1464 1472
1465 vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); 1473 vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1466 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); 1474 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1467 1475
1468 vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); 1476 vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1469 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 1477 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1470 1478
1471 vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); 1479 vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1472 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 1480 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1473 1481
1474 flags = vmcs_readl(GUEST_RFLAGS); 1482 flags = vmcs_readl(GUEST_RFLAGS);
1475 vcpu->arch.rmode.save_iopl 1483 vmx->rmode.save_iopl
1476 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 1484 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1477 1485
1478 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1486 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
@@ -1494,10 +1502,10 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1494 vmcs_writel(GUEST_CS_BASE, 0xf0000); 1502 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1495 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); 1503 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1496 1504
1497 fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es); 1505 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
1498 fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); 1506 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
1499 fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); 1507 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
1500 fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); 1508 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
1501 1509
1502continue_rmode: 1510continue_rmode:
1503 kvm_mmu_reset_context(vcpu); 1511 kvm_mmu_reset_context(vcpu);
@@ -1638,6 +1646,7 @@ static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
1638 1646
1639static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1647static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1640{ 1648{
1649 struct vcpu_vmx *vmx = to_vmx(vcpu);
1641 unsigned long hw_cr0; 1650 unsigned long hw_cr0;
1642 1651
1643 if (enable_unrestricted_guest) 1652 if (enable_unrestricted_guest)
@@ -1648,10 +1657,10 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1648 1657
1649 vmx_fpu_deactivate(vcpu); 1658 vmx_fpu_deactivate(vcpu);
1650 1659
1651 if (vcpu->arch.rmode.vm86_active && (cr0 & X86_CR0_PE)) 1660 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
1652 enter_pmode(vcpu); 1661 enter_pmode(vcpu);
1653 1662
1654 if (!vcpu->arch.rmode.vm86_active && !(cr0 & X86_CR0_PE)) 1663 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
1655 enter_rmode(vcpu); 1664 enter_rmode(vcpu);
1656 1665
1657#ifdef CONFIG_X86_64 1666#ifdef CONFIG_X86_64
@@ -1707,7 +1716,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1707 1716
1708static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1717static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1709{ 1718{
1710 unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.vm86_active ? 1719 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
1711 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); 1720 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
1712 1721
1713 vcpu->arch.cr4 = cr4; 1722 vcpu->arch.cr4 = cr4;
@@ -1787,20 +1796,21 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
1787static void vmx_set_segment(struct kvm_vcpu *vcpu, 1796static void vmx_set_segment(struct kvm_vcpu *vcpu,
1788 struct kvm_segment *var, int seg) 1797 struct kvm_segment *var, int seg)
1789{ 1798{
1799 struct vcpu_vmx *vmx = to_vmx(vcpu);
1790 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 1800 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1791 u32 ar; 1801 u32 ar;
1792 1802
1793 if (vcpu->arch.rmode.vm86_active && seg == VCPU_SREG_TR) { 1803 if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
1794 vcpu->arch.rmode.tr.selector = var->selector; 1804 vmx->rmode.tr.selector = var->selector;
1795 vcpu->arch.rmode.tr.base = var->base; 1805 vmx->rmode.tr.base = var->base;
1796 vcpu->arch.rmode.tr.limit = var->limit; 1806 vmx->rmode.tr.limit = var->limit;
1797 vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var); 1807 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
1798 return; 1808 return;
1799 } 1809 }
1800 vmcs_writel(sf->base, var->base); 1810 vmcs_writel(sf->base, var->base);
1801 vmcs_write32(sf->limit, var->limit); 1811 vmcs_write32(sf->limit, var->limit);
1802 vmcs_write16(sf->selector, var->selector); 1812 vmcs_write16(sf->selector, var->selector);
1803 if (vcpu->arch.rmode.vm86_active && var->s) { 1813 if (vmx->rmode.vm86_active && var->s) {
1804 /* 1814 /*
1805 * Hack real-mode segments into vm86 compatibility. 1815 * Hack real-mode segments into vm86 compatibility.
1806 */ 1816 */
@@ -2394,7 +2404,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2394 goto out; 2404 goto out;
2395 } 2405 }
2396 2406
2397 vmx->vcpu.arch.rmode.vm86_active = 0; 2407 vmx->rmode.vm86_active = 0;
2398 2408
2399 vmx->soft_vnmi_blocked = 0; 2409 vmx->soft_vnmi_blocked = 0;
2400 2410
@@ -2532,7 +2542,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu)
2532 KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); 2542 KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
2533 2543
2534 ++vcpu->stat.irq_injections; 2544 ++vcpu->stat.irq_injections;
2535 if (vcpu->arch.rmode.vm86_active) { 2545 if (vmx->rmode.vm86_active) {
2536 vmx->rmode.irq.pending = true; 2546 vmx->rmode.irq.pending = true;
2537 vmx->rmode.irq.vector = irq; 2547 vmx->rmode.irq.vector = irq;
2538 vmx->rmode.irq.rip = kvm_rip_read(vcpu); 2548 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
@@ -2573,7 +2583,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2573 } 2583 }
2574 2584
2575 ++vcpu->stat.nmi_injections; 2585 ++vcpu->stat.nmi_injections;
2576 if (vcpu->arch.rmode.vm86_active) { 2586 if (vmx->rmode.vm86_active) {
2577 vmx->rmode.irq.pending = true; 2587 vmx->rmode.irq.pending = true;
2578 vmx->rmode.irq.vector = NMI_VECTOR; 2588 vmx->rmode.irq.vector = NMI_VECTOR;
2579 vmx->rmode.irq.rip = kvm_rip_read(vcpu); 2589 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
@@ -2737,7 +2747,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2737 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2747 return kvm_mmu_page_fault(vcpu, cr2, error_code);
2738 } 2748 }
2739 2749
2740 if (vcpu->arch.rmode.vm86_active && 2750 if (vmx->rmode.vm86_active &&
2741 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, 2751 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
2742 error_code)) { 2752 error_code)) {
2743 if (vcpu->arch.halt_request) { 2753 if (vcpu->arch.halt_request) {