aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2007-10-31 18:24:24 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:00 -0500
commit8776e5194f7bb847906e3561c4dba12ed66ebfb6 (patch)
treea6c23e44a89873abf711b73dfd7444673d5b6d1b /drivers
parent417bc3041f5e66df1ce7f03d8fc481c3b12f250a (diff)
KVM: Portability: Move x86 instruction emulation code to x86.c
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm.h1
-rw-r--r--drivers/kvm/kvm_main.c177
-rw-r--r--drivers/kvm/x86.c175
3 files changed, 177 insertions, 176 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index d030a82966f5..ef2a6a8328ea 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -591,6 +591,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
591 591
592void fx_init(struct kvm_vcpu *vcpu); 592void fx_init(struct kvm_vcpu *vcpu);
593 593
594void kvm_vcpu_block(struct kvm_vcpu *vcpu);
594void kvm_resched(struct kvm_vcpu *vcpu); 595void kvm_resched(struct kvm_vcpu *vcpu);
595void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 596void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
596void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 597void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 58a5f399ad85..57573ebf02ba 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -789,7 +789,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
789/* 789/*
790 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 790 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
791 */ 791 */
792static void kvm_vcpu_block(struct kvm_vcpu *vcpu) 792void kvm_vcpu_block(struct kvm_vcpu *vcpu)
793{ 793{
794 DECLARE_WAITQUEUE(wait, current); 794 DECLARE_WAITQUEUE(wait, current);
795 795
@@ -812,144 +812,6 @@ static void kvm_vcpu_block(struct kvm_vcpu *vcpu)
812 remove_wait_queue(&vcpu->wq, &wait); 812 remove_wait_queue(&vcpu->wq, &wait);
813} 813}
814 814
815int kvm_emulate_halt(struct kvm_vcpu *vcpu)
816{
817 ++vcpu->stat.halt_exits;
818 if (irqchip_in_kernel(vcpu->kvm)) {
819 vcpu->mp_state = VCPU_MP_STATE_HALTED;
820 kvm_vcpu_block(vcpu);
821 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
822 return -EINTR;
823 return 1;
824 } else {
825 vcpu->run->exit_reason = KVM_EXIT_HLT;
826 return 0;
827 }
828}
829EXPORT_SYMBOL_GPL(kvm_emulate_halt);
830
831int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
832{
833 unsigned long nr, a0, a1, a2, a3, ret;
834
835 kvm_x86_ops->cache_regs(vcpu);
836
837 nr = vcpu->regs[VCPU_REGS_RAX];
838 a0 = vcpu->regs[VCPU_REGS_RBX];
839 a1 = vcpu->regs[VCPU_REGS_RCX];
840 a2 = vcpu->regs[VCPU_REGS_RDX];
841 a3 = vcpu->regs[VCPU_REGS_RSI];
842
843 if (!is_long_mode(vcpu)) {
844 nr &= 0xFFFFFFFF;
845 a0 &= 0xFFFFFFFF;
846 a1 &= 0xFFFFFFFF;
847 a2 &= 0xFFFFFFFF;
848 a3 &= 0xFFFFFFFF;
849 }
850
851 switch (nr) {
852 default:
853 ret = -KVM_ENOSYS;
854 break;
855 }
856 vcpu->regs[VCPU_REGS_RAX] = ret;
857 kvm_x86_ops->decache_regs(vcpu);
858 return 0;
859}
860EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
861
862int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
863{
864 char instruction[3];
865 int ret = 0;
866
867 mutex_lock(&vcpu->kvm->lock);
868
869 /*
870 * Blow out the MMU to ensure that no other VCPU has an active mapping
871 * to ensure that the updated hypercall appears atomically across all
872 * VCPUs.
873 */
874 kvm_mmu_zap_all(vcpu->kvm);
875
876 kvm_x86_ops->cache_regs(vcpu);
877 kvm_x86_ops->patch_hypercall(vcpu, instruction);
878 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
879 != X86EMUL_CONTINUE)
880 ret = -EFAULT;
881
882 mutex_unlock(&vcpu->kvm->lock);
883
884 return ret;
885}
886
887static u64 mk_cr_64(u64 curr_cr, u32 new_val)
888{
889 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
890}
891
892void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
893{
894 struct descriptor_table dt = { limit, base };
895
896 kvm_x86_ops->set_gdt(vcpu, &dt);
897}
898
899void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
900{
901 struct descriptor_table dt = { limit, base };
902
903 kvm_x86_ops->set_idt(vcpu, &dt);
904}
905
906void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
907 unsigned long *rflags)
908{
909 lmsw(vcpu, msw);
910 *rflags = kvm_x86_ops->get_rflags(vcpu);
911}
912
913unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
914{
915 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
916 switch (cr) {
917 case 0:
918 return vcpu->cr0;
919 case 2:
920 return vcpu->cr2;
921 case 3:
922 return vcpu->cr3;
923 case 4:
924 return vcpu->cr4;
925 default:
926 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
927 return 0;
928 }
929}
930
931void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
932 unsigned long *rflags)
933{
934 switch (cr) {
935 case 0:
936 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
937 *rflags = kvm_x86_ops->get_rflags(vcpu);
938 break;
939 case 2:
940 vcpu->cr2 = val;
941 break;
942 case 3:
943 set_cr3(vcpu, val);
944 break;
945 case 4:
946 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
947 break;
948 default:
949 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
950 }
951}
952
953void kvm_resched(struct kvm_vcpu *vcpu) 815void kvm_resched(struct kvm_vcpu *vcpu)
954{ 816{
955 if (!need_resched()) 817 if (!need_resched())
@@ -958,43 +820,6 @@ void kvm_resched(struct kvm_vcpu *vcpu)
958} 820}
959EXPORT_SYMBOL_GPL(kvm_resched); 821EXPORT_SYMBOL_GPL(kvm_resched);
960 822
961void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
962{
963 int i;
964 u32 function;
965 struct kvm_cpuid_entry *e, *best;
966
967 kvm_x86_ops->cache_regs(vcpu);
968 function = vcpu->regs[VCPU_REGS_RAX];
969 vcpu->regs[VCPU_REGS_RAX] = 0;
970 vcpu->regs[VCPU_REGS_RBX] = 0;
971 vcpu->regs[VCPU_REGS_RCX] = 0;
972 vcpu->regs[VCPU_REGS_RDX] = 0;
973 best = NULL;
974 for (i = 0; i < vcpu->cpuid_nent; ++i) {
975 e = &vcpu->cpuid_entries[i];
976 if (e->function == function) {
977 best = e;
978 break;
979 }
980 /*
981 * Both basic or both extended?
982 */
983 if (((e->function ^ function) & 0x80000000) == 0)
984 if (!best || e->function > best->function)
985 best = e;
986 }
987 if (best) {
988 vcpu->regs[VCPU_REGS_RAX] = best->eax;
989 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
990 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
991 vcpu->regs[VCPU_REGS_RDX] = best->edx;
992 }
993 kvm_x86_ops->decache_regs(vcpu);
994 kvm_x86_ops->skip_emulated_instruction(vcpu);
995}
996EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
997
998/* 823/*
999 * Check if userspace requested an interrupt window, and that the 824 * Check if userspace requested an interrupt window, and that the
1000 * interrupt window is open. 825 * interrupt window is open.
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index c1f10e58f4d2..2cf7ebab50f4 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1610,3 +1610,178 @@ __init void kvm_arch_init(void)
1610{ 1610{
1611 kvm_init_msr_list(); 1611 kvm_init_msr_list();
1612} 1612}
1613
1614int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1615{
1616 ++vcpu->stat.halt_exits;
1617 if (irqchip_in_kernel(vcpu->kvm)) {
1618 vcpu->mp_state = VCPU_MP_STATE_HALTED;
1619 kvm_vcpu_block(vcpu);
1620 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
1621 return -EINTR;
1622 return 1;
1623 } else {
1624 vcpu->run->exit_reason = KVM_EXIT_HLT;
1625 return 0;
1626 }
1627}
1628EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1629
1630int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
1631{
1632 unsigned long nr, a0, a1, a2, a3, ret;
1633
1634 kvm_x86_ops->cache_regs(vcpu);
1635
1636 nr = vcpu->regs[VCPU_REGS_RAX];
1637 a0 = vcpu->regs[VCPU_REGS_RBX];
1638 a1 = vcpu->regs[VCPU_REGS_RCX];
1639 a2 = vcpu->regs[VCPU_REGS_RDX];
1640 a3 = vcpu->regs[VCPU_REGS_RSI];
1641
1642 if (!is_long_mode(vcpu)) {
1643 nr &= 0xFFFFFFFF;
1644 a0 &= 0xFFFFFFFF;
1645 a1 &= 0xFFFFFFFF;
1646 a2 &= 0xFFFFFFFF;
1647 a3 &= 0xFFFFFFFF;
1648 }
1649
1650 switch (nr) {
1651 default:
1652 ret = -KVM_ENOSYS;
1653 break;
1654 }
1655 vcpu->regs[VCPU_REGS_RAX] = ret;
1656 kvm_x86_ops->decache_regs(vcpu);
1657 return 0;
1658}
1659EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
1660
1661int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
1662{
1663 char instruction[3];
1664 int ret = 0;
1665
1666 mutex_lock(&vcpu->kvm->lock);
1667
1668 /*
1669 * Blow out the MMU to ensure that no other VCPU has an active mapping
1670 * to ensure that the updated hypercall appears atomically across all
1671 * VCPUs.
1672 */
1673 kvm_mmu_zap_all(vcpu->kvm);
1674
1675 kvm_x86_ops->cache_regs(vcpu);
1676 kvm_x86_ops->patch_hypercall(vcpu, instruction);
1677 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
1678 != X86EMUL_CONTINUE)
1679 ret = -EFAULT;
1680
1681 mutex_unlock(&vcpu->kvm->lock);
1682
1683 return ret;
1684}
1685
1686static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1687{
1688 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1689}
1690
1691void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1692{
1693 struct descriptor_table dt = { limit, base };
1694
1695 kvm_x86_ops->set_gdt(vcpu, &dt);
1696}
1697
1698void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1699{
1700 struct descriptor_table dt = { limit, base };
1701
1702 kvm_x86_ops->set_idt(vcpu, &dt);
1703}
1704
1705void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1706 unsigned long *rflags)
1707{
1708 lmsw(vcpu, msw);
1709 *rflags = kvm_x86_ops->get_rflags(vcpu);
1710}
1711
1712unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1713{
1714 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
1715 switch (cr) {
1716 case 0:
1717 return vcpu->cr0;
1718 case 2:
1719 return vcpu->cr2;
1720 case 3:
1721 return vcpu->cr3;
1722 case 4:
1723 return vcpu->cr4;
1724 default:
1725 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1726 return 0;
1727 }
1728}
1729
1730void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1731 unsigned long *rflags)
1732{
1733 switch (cr) {
1734 case 0:
1735 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1736 *rflags = kvm_x86_ops->get_rflags(vcpu);
1737 break;
1738 case 2:
1739 vcpu->cr2 = val;
1740 break;
1741 case 3:
1742 set_cr3(vcpu, val);
1743 break;
1744 case 4:
1745 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1746 break;
1747 default:
1748 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1749 }
1750}
1751
1752void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1753{
1754 int i;
1755 u32 function;
1756 struct kvm_cpuid_entry *e, *best;
1757
1758 kvm_x86_ops->cache_regs(vcpu);
1759 function = vcpu->regs[VCPU_REGS_RAX];
1760 vcpu->regs[VCPU_REGS_RAX] = 0;
1761 vcpu->regs[VCPU_REGS_RBX] = 0;
1762 vcpu->regs[VCPU_REGS_RCX] = 0;
1763 vcpu->regs[VCPU_REGS_RDX] = 0;
1764 best = NULL;
1765 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1766 e = &vcpu->cpuid_entries[i];
1767 if (e->function == function) {
1768 best = e;
1769 break;
1770 }
1771 /*
1772 * Both basic or both extended?
1773 */
1774 if (((e->function ^ function) & 0x80000000) == 0)
1775 if (!best || e->function > best->function)
1776 best = e;
1777 }
1778 if (best) {
1779 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1780 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1781 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1782 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1783 }
1784 kvm_x86_ops->decache_regs(vcpu);
1785 kvm_x86_ops->skip_emulated_instruction(vcpu);
1786}
1787EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);