aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/x86.c
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2007-10-31 18:24:24 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:00 -0500
commit8776e5194f7bb847906e3561c4dba12ed66ebfb6 (patch)
treea6c23e44a89873abf711b73dfd7444673d5b6d1b /drivers/kvm/x86.c
parent417bc3041f5e66df1ce7f03d8fc481c3b12f250a (diff)
KVM: Portability: Move x86 instruction emulation code to x86.c
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/x86.c')
-rw-r--r--drivers/kvm/x86.c175
1 files changed, 175 insertions, 0 deletions
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index c1f10e58f4d..2cf7ebab50f 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1610,3 +1610,178 @@ __init void kvm_arch_init(void)
1610{ 1610{
1611 kvm_init_msr_list(); 1611 kvm_init_msr_list();
1612} 1612}
1613
1614int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1615{
1616 ++vcpu->stat.halt_exits;
1617 if (irqchip_in_kernel(vcpu->kvm)) {
1618 vcpu->mp_state = VCPU_MP_STATE_HALTED;
1619 kvm_vcpu_block(vcpu);
1620 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
1621 return -EINTR;
1622 return 1;
1623 } else {
1624 vcpu->run->exit_reason = KVM_EXIT_HLT;
1625 return 0;
1626 }
1627}
1628EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1629
1630int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
1631{
1632 unsigned long nr, a0, a1, a2, a3, ret;
1633
1634 kvm_x86_ops->cache_regs(vcpu);
1635
1636 nr = vcpu->regs[VCPU_REGS_RAX];
1637 a0 = vcpu->regs[VCPU_REGS_RBX];
1638 a1 = vcpu->regs[VCPU_REGS_RCX];
1639 a2 = vcpu->regs[VCPU_REGS_RDX];
1640 a3 = vcpu->regs[VCPU_REGS_RSI];
1641
1642 if (!is_long_mode(vcpu)) {
1643 nr &= 0xFFFFFFFF;
1644 a0 &= 0xFFFFFFFF;
1645 a1 &= 0xFFFFFFFF;
1646 a2 &= 0xFFFFFFFF;
1647 a3 &= 0xFFFFFFFF;
1648 }
1649
1650 switch (nr) {
1651 default:
1652 ret = -KVM_ENOSYS;
1653 break;
1654 }
1655 vcpu->regs[VCPU_REGS_RAX] = ret;
1656 kvm_x86_ops->decache_regs(vcpu);
1657 return 0;
1658}
1659EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
1660
1661int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
1662{
1663 char instruction[3];
1664 int ret = 0;
1665
1666 mutex_lock(&vcpu->kvm->lock);
1667
1668 /*
1669 * Blow out the MMU to ensure that no other VCPU has an active mapping
1670 * to ensure that the updated hypercall appears atomically across all
1671 * VCPUs.
1672 */
1673 kvm_mmu_zap_all(vcpu->kvm);
1674
1675 kvm_x86_ops->cache_regs(vcpu);
1676 kvm_x86_ops->patch_hypercall(vcpu, instruction);
1677 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
1678 != X86EMUL_CONTINUE)
1679 ret = -EFAULT;
1680
1681 mutex_unlock(&vcpu->kvm->lock);
1682
1683 return ret;
1684}
1685
1686static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1687{
1688 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1689}
1690
1691void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1692{
1693 struct descriptor_table dt = { limit, base };
1694
1695 kvm_x86_ops->set_gdt(vcpu, &dt);
1696}
1697
1698void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1699{
1700 struct descriptor_table dt = { limit, base };
1701
1702 kvm_x86_ops->set_idt(vcpu, &dt);
1703}
1704
1705void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1706 unsigned long *rflags)
1707{
1708 lmsw(vcpu, msw);
1709 *rflags = kvm_x86_ops->get_rflags(vcpu);
1710}
1711
1712unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1713{
1714 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
1715 switch (cr) {
1716 case 0:
1717 return vcpu->cr0;
1718 case 2:
1719 return vcpu->cr2;
1720 case 3:
1721 return vcpu->cr3;
1722 case 4:
1723 return vcpu->cr4;
1724 default:
1725 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1726 return 0;
1727 }
1728}
1729
1730void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1731 unsigned long *rflags)
1732{
1733 switch (cr) {
1734 case 0:
1735 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1736 *rflags = kvm_x86_ops->get_rflags(vcpu);
1737 break;
1738 case 2:
1739 vcpu->cr2 = val;
1740 break;
1741 case 3:
1742 set_cr3(vcpu, val);
1743 break;
1744 case 4:
1745 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1746 break;
1747 default:
1748 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1749 }
1750}
1751
1752void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1753{
1754 int i;
1755 u32 function;
1756 struct kvm_cpuid_entry *e, *best;
1757
1758 kvm_x86_ops->cache_regs(vcpu);
1759 function = vcpu->regs[VCPU_REGS_RAX];
1760 vcpu->regs[VCPU_REGS_RAX] = 0;
1761 vcpu->regs[VCPU_REGS_RBX] = 0;
1762 vcpu->regs[VCPU_REGS_RCX] = 0;
1763 vcpu->regs[VCPU_REGS_RDX] = 0;
1764 best = NULL;
1765 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1766 e = &vcpu->cpuid_entries[i];
1767 if (e->function == function) {
1768 best = e;
1769 break;
1770 }
1771 /*
1772 * Both basic or both extended?
1773 */
1774 if (((e->function ^ function) & 0x80000000) == 0)
1775 if (!best || e->function > best->function)
1776 best = e;
1777 }
1778 if (best) {
1779 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1780 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1781 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1782 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1783 }
1784 kvm_x86_ops->decache_regs(vcpu);
1785 kvm_x86_ops->skip_emulated_instruction(vcpu);
1786}
1787EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);