diff options
author | Carsten Otte <cotte@de.ibm.com> | 2007-10-30 13:44:21 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:58 -0500 |
commit | bbd9b64e37aff5aa715ec5e168425790f5983bf1 (patch) | |
tree | ad69fb6e400801430e7a69019bd8fc40599ff20d /drivers/kvm/kvm_main.c | |
parent | 15c4a6406f6c40632260861e1db7c539e79dcf1a (diff) |
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 357 |
1 files changed, 0 insertions, 357 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 7acf4cb07793..1a56d76560de 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -827,369 +827,12 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
827 | } | 827 | } |
828 | } | 828 | } |
829 | 829 | ||
830 | int emulator_read_std(unsigned long addr, | ||
831 | void *val, | ||
832 | unsigned int bytes, | ||
833 | struct kvm_vcpu *vcpu) | ||
834 | { | ||
835 | void *data = val; | ||
836 | |||
837 | while (bytes) { | ||
838 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | ||
839 | unsigned offset = addr & (PAGE_SIZE-1); | ||
840 | unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); | ||
841 | int ret; | ||
842 | |||
843 | if (gpa == UNMAPPED_GVA) | ||
844 | return X86EMUL_PROPAGATE_FAULT; | ||
845 | ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy); | ||
846 | if (ret < 0) | ||
847 | return X86EMUL_UNHANDLEABLE; | ||
848 | |||
849 | bytes -= tocopy; | ||
850 | data += tocopy; | ||
851 | addr += tocopy; | ||
852 | } | ||
853 | |||
854 | return X86EMUL_CONTINUE; | ||
855 | } | ||
856 | EXPORT_SYMBOL_GPL(emulator_read_std); | ||
857 | |||
858 | static int emulator_write_std(unsigned long addr, | ||
859 | const void *val, | ||
860 | unsigned int bytes, | ||
861 | struct kvm_vcpu *vcpu) | ||
862 | { | ||
863 | pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes); | ||
864 | return X86EMUL_UNHANDLEABLE; | ||
865 | } | ||
866 | |||
867 | /* | ||
868 | * Only apic need an MMIO device hook, so shortcut now.. | ||
869 | */ | ||
870 | static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu, | ||
871 | gpa_t addr) | ||
872 | { | ||
873 | struct kvm_io_device *dev; | ||
874 | |||
875 | if (vcpu->apic) { | ||
876 | dev = &vcpu->apic->dev; | ||
877 | if (dev->in_range(dev, addr)) | ||
878 | return dev; | ||
879 | } | ||
880 | return NULL; | ||
881 | } | ||
882 | |||
883 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | ||
884 | gpa_t addr) | ||
885 | { | ||
886 | struct kvm_io_device *dev; | ||
887 | |||
888 | dev = vcpu_find_pervcpu_dev(vcpu, addr); | ||
889 | if (dev == NULL) | ||
890 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); | ||
891 | return dev; | ||
892 | } | ||
893 | |||
894 | static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, | 830 | static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, |
895 | gpa_t addr) | 831 | gpa_t addr) |
896 | { | 832 | { |
897 | return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr); | 833 | return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr); |
898 | } | 834 | } |
899 | 835 | ||
900 | static int emulator_read_emulated(unsigned long addr, | ||
901 | void *val, | ||
902 | unsigned int bytes, | ||
903 | struct kvm_vcpu *vcpu) | ||
904 | { | ||
905 | struct kvm_io_device *mmio_dev; | ||
906 | gpa_t gpa; | ||
907 | |||
908 | if (vcpu->mmio_read_completed) { | ||
909 | memcpy(val, vcpu->mmio_data, bytes); | ||
910 | vcpu->mmio_read_completed = 0; | ||
911 | return X86EMUL_CONTINUE; | ||
912 | } | ||
913 | |||
914 | gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | ||
915 | |||
916 | /* For APIC access vmexit */ | ||
917 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | ||
918 | goto mmio; | ||
919 | |||
920 | if (emulator_read_std(addr, val, bytes, vcpu) | ||
921 | == X86EMUL_CONTINUE) | ||
922 | return X86EMUL_CONTINUE; | ||
923 | if (gpa == UNMAPPED_GVA) | ||
924 | return X86EMUL_PROPAGATE_FAULT; | ||
925 | |||
926 | mmio: | ||
927 | /* | ||
928 | * Is this MMIO handled locally? | ||
929 | */ | ||
930 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); | ||
931 | if (mmio_dev) { | ||
932 | kvm_iodevice_read(mmio_dev, gpa, bytes, val); | ||
933 | return X86EMUL_CONTINUE; | ||
934 | } | ||
935 | |||
936 | vcpu->mmio_needed = 1; | ||
937 | vcpu->mmio_phys_addr = gpa; | ||
938 | vcpu->mmio_size = bytes; | ||
939 | vcpu->mmio_is_write = 0; | ||
940 | |||
941 | return X86EMUL_UNHANDLEABLE; | ||
942 | } | ||
943 | |||
944 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | ||
945 | const void *val, int bytes) | ||
946 | { | ||
947 | int ret; | ||
948 | |||
949 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); | ||
950 | if (ret < 0) | ||
951 | return 0; | ||
952 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); | ||
953 | return 1; | ||
954 | } | ||
955 | |||
956 | static int emulator_write_emulated_onepage(unsigned long addr, | ||
957 | const void *val, | ||
958 | unsigned int bytes, | ||
959 | struct kvm_vcpu *vcpu) | ||
960 | { | ||
961 | struct kvm_io_device *mmio_dev; | ||
962 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | ||
963 | |||
964 | if (gpa == UNMAPPED_GVA) { | ||
965 | kvm_x86_ops->inject_page_fault(vcpu, addr, 2); | ||
966 | return X86EMUL_PROPAGATE_FAULT; | ||
967 | } | ||
968 | |||
969 | /* For APIC access vmexit */ | ||
970 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | ||
971 | goto mmio; | ||
972 | |||
973 | if (emulator_write_phys(vcpu, gpa, val, bytes)) | ||
974 | return X86EMUL_CONTINUE; | ||
975 | |||
976 | mmio: | ||
977 | /* | ||
978 | * Is this MMIO handled locally? | ||
979 | */ | ||
980 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); | ||
981 | if (mmio_dev) { | ||
982 | kvm_iodevice_write(mmio_dev, gpa, bytes, val); | ||
983 | return X86EMUL_CONTINUE; | ||
984 | } | ||
985 | |||
986 | vcpu->mmio_needed = 1; | ||
987 | vcpu->mmio_phys_addr = gpa; | ||
988 | vcpu->mmio_size = bytes; | ||
989 | vcpu->mmio_is_write = 1; | ||
990 | memcpy(vcpu->mmio_data, val, bytes); | ||
991 | |||
992 | return X86EMUL_CONTINUE; | ||
993 | } | ||
994 | |||
995 | int emulator_write_emulated(unsigned long addr, | ||
996 | const void *val, | ||
997 | unsigned int bytes, | ||
998 | struct kvm_vcpu *vcpu) | ||
999 | { | ||
1000 | /* Crossing a page boundary? */ | ||
1001 | if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { | ||
1002 | int rc, now; | ||
1003 | |||
1004 | now = -addr & ~PAGE_MASK; | ||
1005 | rc = emulator_write_emulated_onepage(addr, val, now, vcpu); | ||
1006 | if (rc != X86EMUL_CONTINUE) | ||
1007 | return rc; | ||
1008 | addr += now; | ||
1009 | val += now; | ||
1010 | bytes -= now; | ||
1011 | } | ||
1012 | return emulator_write_emulated_onepage(addr, val, bytes, vcpu); | ||
1013 | } | ||
1014 | EXPORT_SYMBOL_GPL(emulator_write_emulated); | ||
1015 | |||
1016 | static int emulator_cmpxchg_emulated(unsigned long addr, | ||
1017 | const void *old, | ||
1018 | const void *new, | ||
1019 | unsigned int bytes, | ||
1020 | struct kvm_vcpu *vcpu) | ||
1021 | { | ||
1022 | static int reported; | ||
1023 | |||
1024 | if (!reported) { | ||
1025 | reported = 1; | ||
1026 | printk(KERN_WARNING "kvm: emulating exchange as write\n"); | ||
1027 | } | ||
1028 | return emulator_write_emulated(addr, new, bytes, vcpu); | ||
1029 | } | ||
1030 | |||
1031 | static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) | ||
1032 | { | ||
1033 | return kvm_x86_ops->get_segment_base(vcpu, seg); | ||
1034 | } | ||
1035 | |||
1036 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) | ||
1037 | { | ||
1038 | return X86EMUL_CONTINUE; | ||
1039 | } | ||
1040 | |||
1041 | int emulate_clts(struct kvm_vcpu *vcpu) | ||
1042 | { | ||
1043 | kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS); | ||
1044 | return X86EMUL_CONTINUE; | ||
1045 | } | ||
1046 | |||
1047 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) | ||
1048 | { | ||
1049 | struct kvm_vcpu *vcpu = ctxt->vcpu; | ||
1050 | |||
1051 | switch (dr) { | ||
1052 | case 0 ... 3: | ||
1053 | *dest = kvm_x86_ops->get_dr(vcpu, dr); | ||
1054 | return X86EMUL_CONTINUE; | ||
1055 | default: | ||
1056 | pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr); | ||
1057 | return X86EMUL_UNHANDLEABLE; | ||
1058 | } | ||
1059 | } | ||
1060 | |||
1061 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) | ||
1062 | { | ||
1063 | unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; | ||
1064 | int exception; | ||
1065 | |||
1066 | kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception); | ||
1067 | if (exception) { | ||
1068 | /* FIXME: better handling */ | ||
1069 | return X86EMUL_UNHANDLEABLE; | ||
1070 | } | ||
1071 | return X86EMUL_CONTINUE; | ||
1072 | } | ||
1073 | |||
1074 | void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) | ||
1075 | { | ||
1076 | static int reported; | ||
1077 | u8 opcodes[4]; | ||
1078 | unsigned long rip = vcpu->rip; | ||
1079 | unsigned long rip_linear; | ||
1080 | |||
1081 | rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); | ||
1082 | |||
1083 | if (reported) | ||
1084 | return; | ||
1085 | |||
1086 | emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu); | ||
1087 | |||
1088 | printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", | ||
1089 | context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); | ||
1090 | reported = 1; | ||
1091 | } | ||
1092 | EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); | ||
1093 | |||
1094 | struct x86_emulate_ops emulate_ops = { | ||
1095 | .read_std = emulator_read_std, | ||
1096 | .write_std = emulator_write_std, | ||
1097 | .read_emulated = emulator_read_emulated, | ||
1098 | .write_emulated = emulator_write_emulated, | ||
1099 | .cmpxchg_emulated = emulator_cmpxchg_emulated, | ||
1100 | }; | ||
1101 | |||
1102 | int emulate_instruction(struct kvm_vcpu *vcpu, | ||
1103 | struct kvm_run *run, | ||
1104 | unsigned long cr2, | ||
1105 | u16 error_code, | ||
1106 | int no_decode) | ||
1107 | { | ||
1108 | int r; | ||
1109 | |||
1110 | vcpu->mmio_fault_cr2 = cr2; | ||
1111 | kvm_x86_ops->cache_regs(vcpu); | ||
1112 | |||
1113 | vcpu->mmio_is_write = 0; | ||
1114 | vcpu->pio.string = 0; | ||
1115 | |||
1116 | if (!no_decode) { | ||
1117 | int cs_db, cs_l; | ||
1118 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | ||
1119 | |||
1120 | vcpu->emulate_ctxt.vcpu = vcpu; | ||
1121 | vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); | ||
1122 | vcpu->emulate_ctxt.cr2 = cr2; | ||
1123 | vcpu->emulate_ctxt.mode = | ||
1124 | (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM) | ||
1125 | ? X86EMUL_MODE_REAL : cs_l | ||
1126 | ? X86EMUL_MODE_PROT64 : cs_db | ||
1127 | ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; | ||
1128 | |||
1129 | if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) { | ||
1130 | vcpu->emulate_ctxt.cs_base = 0; | ||
1131 | vcpu->emulate_ctxt.ds_base = 0; | ||
1132 | vcpu->emulate_ctxt.es_base = 0; | ||
1133 | vcpu->emulate_ctxt.ss_base = 0; | ||
1134 | } else { | ||
1135 | vcpu->emulate_ctxt.cs_base = | ||
1136 | get_segment_base(vcpu, VCPU_SREG_CS); | ||
1137 | vcpu->emulate_ctxt.ds_base = | ||
1138 | get_segment_base(vcpu, VCPU_SREG_DS); | ||
1139 | vcpu->emulate_ctxt.es_base = | ||
1140 | get_segment_base(vcpu, VCPU_SREG_ES); | ||
1141 | vcpu->emulate_ctxt.ss_base = | ||
1142 | get_segment_base(vcpu, VCPU_SREG_SS); | ||
1143 | } | ||
1144 | |||
1145 | vcpu->emulate_ctxt.gs_base = | ||
1146 | get_segment_base(vcpu, VCPU_SREG_GS); | ||
1147 | vcpu->emulate_ctxt.fs_base = | ||
1148 | get_segment_base(vcpu, VCPU_SREG_FS); | ||
1149 | |||
1150 | r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops); | ||
1151 | if (r) { | ||
1152 | if (kvm_mmu_unprotect_page_virt(vcpu, cr2)) | ||
1153 | return EMULATE_DONE; | ||
1154 | return EMULATE_FAIL; | ||
1155 | } | ||
1156 | } | ||
1157 | |||
1158 | r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops); | ||
1159 | |||
1160 | if (vcpu->pio.string) | ||
1161 | return EMULATE_DO_MMIO; | ||
1162 | |||
1163 | if ((r || vcpu->mmio_is_write) && run) { | ||
1164 | run->exit_reason = KVM_EXIT_MMIO; | ||
1165 | run->mmio.phys_addr = vcpu->mmio_phys_addr; | ||
1166 | memcpy(run->mmio.data, vcpu->mmio_data, 8); | ||
1167 | run->mmio.len = vcpu->mmio_size; | ||
1168 | run->mmio.is_write = vcpu->mmio_is_write; | ||
1169 | } | ||
1170 | |||
1171 | if (r) { | ||
1172 | if (kvm_mmu_unprotect_page_virt(vcpu, cr2)) | ||
1173 | return EMULATE_DONE; | ||
1174 | if (!vcpu->mmio_needed) { | ||
1175 | kvm_report_emulation_failure(vcpu, "mmio"); | ||
1176 | return EMULATE_FAIL; | ||
1177 | } | ||
1178 | return EMULATE_DO_MMIO; | ||
1179 | } | ||
1180 | |||
1181 | kvm_x86_ops->decache_regs(vcpu); | ||
1182 | kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags); | ||
1183 | |||
1184 | if (vcpu->mmio_is_write) { | ||
1185 | vcpu->mmio_needed = 0; | ||
1186 | return EMULATE_DO_MMIO; | ||
1187 | } | ||
1188 | |||
1189 | return EMULATE_DONE; | ||
1190 | } | ||
1191 | EXPORT_SYMBOL_GPL(emulate_instruction); | ||
1192 | |||
1193 | /* | 836 | /* |
1194 | * The vCPU has executed a HLT instruction with in-kernel mode enabled. | 837 | * The vCPU has executed a HLT instruction with in-kernel mode enabled. |
1195 | */ | 838 | */ |