aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/svm.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-07-30 06:07:08 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:21 -0400
commite756fc626d7d8a220864dd6bc6634d9d933650b0 (patch)
tree03ec44448f28c9cf42f0c0e6c514c876102275a6 /drivers/kvm/svm.c
parent3077c4513c46f66537c1205acc464e49c9847dc0 (diff)
KVM: SVM: de-containization
container_of is wonderful, but not casting at all is better. This patch changes svm.c's internal functions to pass "struct vcpu_svm" instead of "struct kvm_vcpu" and using container_of. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r--drivers/kvm/svm.c232
1 files changed, 108 insertions, 124 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index c18f0b2d3d3e..504fb50662d9 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -98,9 +98,9 @@ static inline u32 svm_has(u32 feat)
98 return svm_features & feat; 98 return svm_features & feat;
99} 99}
100 100
101static unsigned get_addr_size(struct kvm_vcpu *vcpu) 101static unsigned get_addr_size(struct vcpu_svm *svm)
102{ 102{
103 struct vmcb_save_area *sa = &to_svm(vcpu)->vmcb->save; 103 struct vmcb_save_area *sa = &svm->vmcb->save;
104 u16 cs_attrib; 104 u16 cs_attrib;
105 105
106 if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM)) 106 if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM))
@@ -865,17 +865,15 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
865#endif 865#endif
866} 866}
867 867
868static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) 868static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
869{ 869{
870 struct vcpu_svm *svm = to_svm(vcpu);
871
872 if (svm_data->next_asid > svm_data->max_asid) { 870 if (svm_data->next_asid > svm_data->max_asid) {
873 ++svm_data->asid_generation; 871 ++svm_data->asid_generation;
874 svm_data->next_asid = 1; 872 svm_data->next_asid = 1;
875 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 873 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
876 } 874 }
877 875
878 vcpu->cpu = svm_data->cpu; 876 svm->vcpu.cpu = svm_data->cpu;
879 svm->asid_generation = svm_data->asid_generation; 877 svm->asid_generation = svm_data->asid_generation;
880 svm->vmcb->control.asid = svm_data->next_asid++; 878 svm->vmcb->control.asid = svm_data->next_asid++;
881} 879}
@@ -929,42 +927,43 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
929 } 927 }
930} 928}
931 929
932static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 930static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
933{ 931{
934 struct vcpu_svm *svm = to_svm(vcpu);
935 u32 exit_int_info = svm->vmcb->control.exit_int_info; 932 u32 exit_int_info = svm->vmcb->control.exit_int_info;
933 struct kvm *kvm = svm->vcpu.kvm;
936 u64 fault_address; 934 u64 fault_address;
937 u32 error_code; 935 u32 error_code;
938 enum emulation_result er; 936 enum emulation_result er;
939 int r; 937 int r;
940 938
941 if (is_external_interrupt(exit_int_info)) 939 if (is_external_interrupt(exit_int_info))
942 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 940 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
943 941
944 mutex_lock(&vcpu->kvm->lock); 942 mutex_lock(&kvm->lock);
945 943
946 fault_address = svm->vmcb->control.exit_info_2; 944 fault_address = svm->vmcb->control.exit_info_2;
947 error_code = svm->vmcb->control.exit_info_1; 945 error_code = svm->vmcb->control.exit_info_1;
948 r = kvm_mmu_page_fault(vcpu, fault_address, error_code); 946 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
949 if (r < 0) { 947 if (r < 0) {
950 mutex_unlock(&vcpu->kvm->lock); 948 mutex_unlock(&kvm->lock);
951 return r; 949 return r;
952 } 950 }
953 if (!r) { 951 if (!r) {
954 mutex_unlock(&vcpu->kvm->lock); 952 mutex_unlock(&kvm->lock);
955 return 1; 953 return 1;
956 } 954 }
957 er = emulate_instruction(vcpu, kvm_run, fault_address, error_code); 955 er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
958 mutex_unlock(&vcpu->kvm->lock); 956 error_code);
957 mutex_unlock(&kvm->lock);
959 958
960 switch (er) { 959 switch (er) {
961 case EMULATE_DONE: 960 case EMULATE_DONE:
962 return 1; 961 return 1;
963 case EMULATE_DO_MMIO: 962 case EMULATE_DO_MMIO:
964 ++vcpu->stat.mmio_exits; 963 ++svm->vcpu.stat.mmio_exits;
965 return 0; 964 return 0;
966 case EMULATE_FAIL: 965 case EMULATE_FAIL:
967 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__); 966 vcpu_printf(&svm->vcpu, "%s: emulate fail\n", __FUNCTION__);
968 break; 967 break;
969 default: 968 default:
970 BUG(); 969 BUG();
@@ -974,21 +973,18 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
974 return 0; 973 return 0;
975} 974}
976 975
977static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 976static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
978{ 977{
979 struct vcpu_svm *svm = to_svm(vcpu);
980
981 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 978 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
982 if (!(vcpu->cr0 & X86_CR0_TS)) 979 if (!(svm->vcpu.cr0 & X86_CR0_TS))
983 svm->vmcb->save.cr0 &= ~X86_CR0_TS; 980 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
984 vcpu->fpu_active = 1; 981 svm->vcpu.fpu_active = 1;
985 982
986 return 1; 983 return 1;
987} 984}
988 985
989static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 986static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
990{ 987{
991 struct vcpu_svm *svm = to_svm(vcpu);
992 /* 988 /*
993 * VMCB is undefined after a SHUTDOWN intercept 989 * VMCB is undefined after a SHUTDOWN intercept
994 * so reinitialize it. 990 * so reinitialize it.
@@ -1000,11 +996,10 @@ static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1000 return 0; 996 return 0;
1001} 997}
1002 998
1003static int io_get_override(struct kvm_vcpu *vcpu, 999static int io_get_override(struct vcpu_svm *svm,
1004 struct vmcb_seg **seg, 1000 struct vmcb_seg **seg,
1005 int *addr_override) 1001 int *addr_override)
1006{ 1002{
1007 struct vcpu_svm *svm = to_svm(vcpu);
1008 u8 inst[MAX_INST_SIZE]; 1003 u8 inst[MAX_INST_SIZE];
1009 unsigned ins_length; 1004 unsigned ins_length;
1010 gva_t rip; 1005 gva_t rip;
@@ -1024,7 +1019,7 @@ static int io_get_override(struct kvm_vcpu *vcpu,
1024 svm->vmcb->control.exit_info_2, 1019 svm->vmcb->control.exit_info_2,
1025 ins_length); 1020 ins_length);
1026 1021
1027 if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length) 1022 if (kvm_read_guest(&svm->vcpu, rip, ins_length, inst) != ins_length)
1028 /* #PF */ 1023 /* #PF */
1029 return 0; 1024 return 0;
1030 1025
@@ -1065,28 +1060,27 @@ static int io_get_override(struct kvm_vcpu *vcpu,
1065 return 0; 1060 return 0;
1066} 1061}
1067 1062
1068static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) 1063static unsigned long io_adress(struct vcpu_svm *svm, int ins, gva_t *address)
1069{ 1064{
1070 unsigned long addr_mask; 1065 unsigned long addr_mask;
1071 unsigned long *reg; 1066 unsigned long *reg;
1072 struct vmcb_seg *seg; 1067 struct vmcb_seg *seg;
1073 int addr_override; 1068 int addr_override;
1074 struct vcpu_svm *svm = to_svm(vcpu);
1075 struct vmcb_save_area *save_area = &svm->vmcb->save; 1069 struct vmcb_save_area *save_area = &svm->vmcb->save;
1076 u16 cs_attrib = save_area->cs.attrib; 1070 u16 cs_attrib = save_area->cs.attrib;
1077 unsigned addr_size = get_addr_size(vcpu); 1071 unsigned addr_size = get_addr_size(svm);
1078 1072
1079 if (!io_get_override(vcpu, &seg, &addr_override)) 1073 if (!io_get_override(svm, &seg, &addr_override))
1080 return 0; 1074 return 0;
1081 1075
1082 if (addr_override) 1076 if (addr_override)
1083 addr_size = (addr_size == 2) ? 4: (addr_size >> 1); 1077 addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
1084 1078
1085 if (ins) { 1079 if (ins) {
1086 reg = &vcpu->regs[VCPU_REGS_RDI]; 1080 reg = &svm->vcpu.regs[VCPU_REGS_RDI];
1087 seg = &svm->vmcb->save.es; 1081 seg = &svm->vmcb->save.es;
1088 } else { 1082 } else {
1089 reg = &vcpu->regs[VCPU_REGS_RSI]; 1083 reg = &svm->vcpu.regs[VCPU_REGS_RSI];
1090 seg = (seg) ? seg : &svm->vmcb->save.ds; 1084 seg = (seg) ? seg : &svm->vmcb->save.ds;
1091 } 1085 }
1092 1086
@@ -1099,7 +1093,7 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
1099 } 1093 }
1100 1094
1101 if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) { 1095 if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
1102 svm_inject_gp(vcpu, 0); 1096 svm_inject_gp(&svm->vcpu, 0);
1103 return 0; 1097 return 0;
1104 } 1098 }
1105 1099
@@ -1107,16 +1101,15 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
1107 return addr_mask; 1101 return addr_mask;
1108} 1102}
1109 1103
1110static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1104static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1111{ 1105{
1112 struct vcpu_svm *svm = to_svm(vcpu);
1113 u32 io_info = svm->vmcb->control.exit_info_1; //address size bug? 1106 u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
1114 int size, down, in, string, rep; 1107 int size, down, in, string, rep;
1115 unsigned port; 1108 unsigned port;
1116 unsigned long count; 1109 unsigned long count;
1117 gva_t address = 0; 1110 gva_t address = 0;
1118 1111
1119 ++vcpu->stat.io_exits; 1112 ++svm->vcpu.stat.io_exits;
1120 1113
1121 svm->next_rip = svm->vmcb->control.exit_info_2; 1114 svm->next_rip = svm->vmcb->control.exit_info_2;
1122 1115
@@ -1131,7 +1124,7 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1131 if (string) { 1124 if (string) {
1132 unsigned addr_mask; 1125 unsigned addr_mask;
1133 1126
1134 addr_mask = io_adress(vcpu, in, &address); 1127 addr_mask = io_adress(svm, in, &address);
1135 if (!addr_mask) { 1128 if (!addr_mask) {
1136 printk(KERN_DEBUG "%s: get io address failed\n", 1129 printk(KERN_DEBUG "%s: get io address failed\n",
1137 __FUNCTION__); 1130 __FUNCTION__);
@@ -1139,60 +1132,57 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1139 } 1132 }
1140 1133
1141 if (rep) 1134 if (rep)
1142 count = vcpu->regs[VCPU_REGS_RCX] & addr_mask; 1135 count = svm->vcpu.regs[VCPU_REGS_RCX] & addr_mask;
1143 } 1136 }
1144 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down, 1137 return kvm_setup_pio(&svm->vcpu, kvm_run, in, size, count, string,
1145 address, rep, port); 1138 down, address, rep, port);
1146} 1139}
1147 1140
1148static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1141static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1149{ 1142{
1150 return 1; 1143 return 1;
1151} 1144}
1152 1145
1153static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1146static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1154{ 1147{
1155 struct vcpu_svm *svm = to_svm(vcpu);
1156
1157 svm->next_rip = svm->vmcb->save.rip + 1; 1148 svm->next_rip = svm->vmcb->save.rip + 1;
1158 skip_emulated_instruction(vcpu); 1149 skip_emulated_instruction(&svm->vcpu);
1159 return kvm_emulate_halt(vcpu); 1150 return kvm_emulate_halt(&svm->vcpu);
1160} 1151}
1161 1152
1162static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1153static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1163{ 1154{
1164 struct vcpu_svm *svm = to_svm(vcpu);
1165
1166 svm->next_rip = svm->vmcb->save.rip + 3; 1155 svm->next_rip = svm->vmcb->save.rip + 3;
1167 skip_emulated_instruction(vcpu); 1156 skip_emulated_instruction(&svm->vcpu);
1168 return kvm_hypercall(vcpu, kvm_run); 1157 return kvm_hypercall(&svm->vcpu, kvm_run);
1169} 1158}
1170 1159
1171static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1160static int invalid_op_interception(struct vcpu_svm *svm,
1161 struct kvm_run *kvm_run)
1172{ 1162{
1173 inject_ud(vcpu); 1163 inject_ud(&svm->vcpu);
1174 return 1; 1164 return 1;
1175} 1165}
1176 1166
1177static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1167static int task_switch_interception(struct vcpu_svm *svm,
1168 struct kvm_run *kvm_run)
1178{ 1169{
1179 printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__); 1170 printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
1180 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 1171 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1181 return 0; 1172 return 0;
1182} 1173}
1183 1174
1184static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1175static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1185{ 1176{
1186 struct vcpu_svm *svm = to_svm(vcpu);
1187
1188 svm->next_rip = svm->vmcb->save.rip + 2; 1177 svm->next_rip = svm->vmcb->save.rip + 2;
1189 kvm_emulate_cpuid(vcpu); 1178 kvm_emulate_cpuid(&svm->vcpu);
1190 return 1; 1179 return 1;
1191} 1180}
1192 1181
1193static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1182static int emulate_on_interception(struct vcpu_svm *svm,
1183 struct kvm_run *kvm_run)
1194{ 1184{
1195 if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE) 1185 if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE)
1196 printk(KERN_ERR "%s: failed\n", __FUNCTION__); 1186 printk(KERN_ERR "%s: failed\n", __FUNCTION__);
1197 return 1; 1187 return 1;
1198} 1188}
@@ -1241,19 +1231,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1241 return 0; 1231 return 0;
1242} 1232}
1243 1233
1244static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1234static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1245{ 1235{
1246 struct vcpu_svm *svm = to_svm(vcpu); 1236 u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
1247 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1248 u64 data; 1237 u64 data;
1249 1238
1250 if (svm_get_msr(vcpu, ecx, &data)) 1239 if (svm_get_msr(&svm->vcpu, ecx, &data))
1251 svm_inject_gp(vcpu, 0); 1240 svm_inject_gp(&svm->vcpu, 0);
1252 else { 1241 else {
1253 svm->vmcb->save.rax = data & 0xffffffff; 1242 svm->vmcb->save.rax = data & 0xffffffff;
1254 vcpu->regs[VCPU_REGS_RDX] = data >> 32; 1243 svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
1255 svm->next_rip = svm->vmcb->save.rip + 2; 1244 svm->next_rip = svm->vmcb->save.rip + 2;
1256 skip_emulated_instruction(vcpu); 1245 skip_emulated_instruction(&svm->vcpu);
1257 } 1246 }
1258 return 1; 1247 return 1;
1259} 1248}
@@ -1302,29 +1291,28 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1302 return 0; 1291 return 0;
1303} 1292}
1304 1293
1305static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1294static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1306{ 1295{
1307 struct vcpu_svm *svm = to_svm(vcpu); 1296 u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
1308 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1309 u64 data = (svm->vmcb->save.rax & -1u) 1297 u64 data = (svm->vmcb->save.rax & -1u)
1310 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); 1298 | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
1311 svm->next_rip = svm->vmcb->save.rip + 2; 1299 svm->next_rip = svm->vmcb->save.rip + 2;
1312 if (svm_set_msr(vcpu, ecx, data)) 1300 if (svm_set_msr(&svm->vcpu, ecx, data))
1313 svm_inject_gp(vcpu, 0); 1301 svm_inject_gp(&svm->vcpu, 0);
1314 else 1302 else
1315 skip_emulated_instruction(vcpu); 1303 skip_emulated_instruction(&svm->vcpu);
1316 return 1; 1304 return 1;
1317} 1305}
1318 1306
1319static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1307static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1320{ 1308{
1321 if (to_svm(vcpu)->vmcb->control.exit_info_1) 1309 if (svm->vmcb->control.exit_info_1)
1322 return wrmsr_interception(vcpu, kvm_run); 1310 return wrmsr_interception(svm, kvm_run);
1323 else 1311 else
1324 return rdmsr_interception(vcpu, kvm_run); 1312 return rdmsr_interception(svm, kvm_run);
1325} 1313}
1326 1314
1327static int interrupt_window_interception(struct kvm_vcpu *vcpu, 1315static int interrupt_window_interception(struct vcpu_svm *svm,
1328 struct kvm_run *kvm_run) 1316 struct kvm_run *kvm_run)
1329{ 1317{
1330 /* 1318 /*
@@ -1332,8 +1320,8 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu,
1332 * possible 1320 * possible
1333 */ 1321 */
1334 if (kvm_run->request_interrupt_window && 1322 if (kvm_run->request_interrupt_window &&
1335 !vcpu->irq_summary) { 1323 !svm->vcpu.irq_summary) {
1336 ++vcpu->stat.irq_window_exits; 1324 ++svm->vcpu.stat.irq_window_exits;
1337 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 1325 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1338 return 0; 1326 return 0;
1339 } 1327 }
@@ -1341,7 +1329,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu,
1341 return 1; 1329 return 1;
1342} 1330}
1343 1331
1344static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, 1332static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1345 struct kvm_run *kvm_run) = { 1333 struct kvm_run *kvm_run) = {
1346 [SVM_EXIT_READ_CR0] = emulate_on_interception, 1334 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1347 [SVM_EXIT_READ_CR3] = emulate_on_interception, 1335 [SVM_EXIT_READ_CR3] = emulate_on_interception,
@@ -1388,9 +1376,8 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1388}; 1376};
1389 1377
1390 1378
1391static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1379static int handle_exit(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1392{ 1380{
1393 struct vcpu_svm *svm = to_svm(vcpu);
1394 u32 exit_code = svm->vmcb->control.exit_code; 1381 u32 exit_code = svm->vmcb->control.exit_code;
1395 1382
1396 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && 1383 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
@@ -1407,7 +1394,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1407 return 0; 1394 return 0;
1408 } 1395 }
1409 1396
1410 return svm_exit_handlers[exit_code](vcpu, kvm_run); 1397 return svm_exit_handlers[exit_code](svm, kvm_run);
1411} 1398}
1412 1399
1413static void reload_tss(struct kvm_vcpu *vcpu) 1400static void reload_tss(struct kvm_vcpu *vcpu)
@@ -1419,80 +1406,77 @@ static void reload_tss(struct kvm_vcpu *vcpu)
1419 load_TR_desc(); 1406 load_TR_desc();
1420} 1407}
1421 1408
1422static void pre_svm_run(struct kvm_vcpu *vcpu) 1409static void pre_svm_run(struct vcpu_svm *svm)
1423{ 1410{
1424 struct vcpu_svm *svm = to_svm(vcpu);
1425 int cpu = raw_smp_processor_id(); 1411 int cpu = raw_smp_processor_id();
1426 1412
1427 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 1413 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1428 1414
1429 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 1415 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
1430 if (vcpu->cpu != cpu || 1416 if (svm->vcpu.cpu != cpu ||
1431 svm->asid_generation != svm_data->asid_generation) 1417 svm->asid_generation != svm_data->asid_generation)
1432 new_asid(vcpu, svm_data); 1418 new_asid(svm, svm_data);
1433} 1419}
1434 1420
1435 1421
1436static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) 1422static inline void kvm_do_inject_irq(struct vcpu_svm *svm)
1437{ 1423{
1438 struct vmcb_control_area *control; 1424 struct vmcb_control_area *control;
1439 1425
1440 control = &to_svm(vcpu)->vmcb->control; 1426 control = &svm->vmcb->control;
1441 control->int_vector = pop_irq(vcpu); 1427 control->int_vector = pop_irq(&svm->vcpu);
1442 control->int_ctl &= ~V_INTR_PRIO_MASK; 1428 control->int_ctl &= ~V_INTR_PRIO_MASK;
1443 control->int_ctl |= V_IRQ_MASK | 1429 control->int_ctl |= V_IRQ_MASK |
1444 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); 1430 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1445} 1431}
1446 1432
1447static void kvm_reput_irq(struct kvm_vcpu *vcpu) 1433static void kvm_reput_irq(struct vcpu_svm *svm)
1448{ 1434{
1449 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; 1435 struct vmcb_control_area *control = &svm->vmcb->control;
1450 1436
1451 if (control->int_ctl & V_IRQ_MASK) { 1437 if (control->int_ctl & V_IRQ_MASK) {
1452 control->int_ctl &= ~V_IRQ_MASK; 1438 control->int_ctl &= ~V_IRQ_MASK;
1453 push_irq(vcpu, control->int_vector); 1439 push_irq(&svm->vcpu, control->int_vector);
1454 } 1440 }
1455 1441
1456 vcpu->interrupt_window_open = 1442 svm->vcpu.interrupt_window_open =
1457 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); 1443 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1458} 1444}
1459 1445
1460static void do_interrupt_requests(struct kvm_vcpu *vcpu, 1446static void do_interrupt_requests(struct vcpu_svm *svm,
1461 struct kvm_run *kvm_run) 1447 struct kvm_run *kvm_run)
1462{ 1448{
1463 struct vcpu_svm *svm = to_svm(vcpu);
1464 struct vmcb_control_area *control = &svm->vmcb->control; 1449 struct vmcb_control_area *control = &svm->vmcb->control;
1465 1450
1466 vcpu->interrupt_window_open = 1451 svm->vcpu.interrupt_window_open =
1467 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && 1452 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1468 (svm->vmcb->save.rflags & X86_EFLAGS_IF)); 1453 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
1469 1454
1470 if (vcpu->interrupt_window_open && vcpu->irq_summary) 1455 if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary)
1471 /* 1456 /*
1472 * If interrupts enabled, and not blocked by sti or mov ss. Good. 1457 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1473 */ 1458 */
1474 kvm_do_inject_irq(vcpu); 1459 kvm_do_inject_irq(svm);
1475 1460
1476 /* 1461 /*
1477 * Interrupts blocked. Wait for unblock. 1462 * Interrupts blocked. Wait for unblock.
1478 */ 1463 */
1479 if (!vcpu->interrupt_window_open && 1464 if (!svm->vcpu.interrupt_window_open &&
1480 (vcpu->irq_summary || kvm_run->request_interrupt_window)) { 1465 (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) {
1481 control->intercept |= 1ULL << INTERCEPT_VINTR; 1466 control->intercept |= 1ULL << INTERCEPT_VINTR;
1482 } else 1467 } else
1483 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 1468 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1484} 1469}
1485 1470
1486static void post_kvm_run_save(struct kvm_vcpu *vcpu, 1471static void post_kvm_run_save(struct vcpu_svm *svm,
1487 struct kvm_run *kvm_run) 1472 struct kvm_run *kvm_run)
1488{ 1473{
1489 struct vcpu_svm *svm = to_svm(vcpu); 1474 kvm_run->ready_for_interrupt_injection
1490 1475 = (svm->vcpu.interrupt_window_open &&
1491 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && 1476 svm->vcpu.irq_summary == 0);
1492 vcpu->irq_summary == 0);
1493 kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; 1477 kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1494 kvm_run->cr8 = vcpu->cr8; 1478 kvm_run->cr8 = svm->vcpu.cr8;
1495 kvm_run->apic_base = vcpu->apic_base; 1479 kvm_run->apic_base = svm->vcpu.apic_base;
1496} 1480}
1497 1481
1498/* 1482/*
@@ -1501,13 +1485,13 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1501 * 1485 *
1502 * No need to exit to userspace if we already have an interrupt queued. 1486 * No need to exit to userspace if we already have an interrupt queued.
1503 */ 1487 */
1504static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, 1488static int dm_request_for_irq_injection(struct vcpu_svm *svm,
1505 struct kvm_run *kvm_run) 1489 struct kvm_run *kvm_run)
1506{ 1490{
1507 return (!vcpu->irq_summary && 1491 return (!svm->vcpu.irq_summary &&
1508 kvm_run->request_interrupt_window && 1492 kvm_run->request_interrupt_window &&
1509 vcpu->interrupt_window_open && 1493 svm->vcpu.interrupt_window_open &&
1510 (to_svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF)); 1494 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
1511} 1495}
1512 1496
1513static void save_db_regs(unsigned long *db_regs) 1497static void save_db_regs(unsigned long *db_regs)
@@ -1545,7 +1529,7 @@ again:
1545 return r; 1529 return r;
1546 1530
1547 if (!vcpu->mmio_read_completed) 1531 if (!vcpu->mmio_read_completed)
1548 do_interrupt_requests(vcpu, kvm_run); 1532 do_interrupt_requests(svm, kvm_run);
1549 1533
1550 clgi(); 1534 clgi();
1551 1535
@@ -1554,7 +1538,7 @@ again:
1554 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests)) 1538 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
1555 svm_flush_tlb(vcpu); 1539 svm_flush_tlb(vcpu);
1556 1540
1557 pre_svm_run(vcpu); 1541 pre_svm_run(svm);
1558 1542
1559 save_host_msrs(vcpu); 1543 save_host_msrs(vcpu);
1560 fs_selector = read_fs(); 1544 fs_selector = read_fs();
@@ -1714,7 +1698,7 @@ again:
1714 1698
1715 stgi(); 1699 stgi();
1716 1700
1717 kvm_reput_irq(vcpu); 1701 kvm_reput_irq(svm);
1718 1702
1719 svm->next_rip = 0; 1703 svm->next_rip = 0;
1720 1704
@@ -1722,29 +1706,29 @@ again:
1722 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 1706 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1723 kvm_run->fail_entry.hardware_entry_failure_reason 1707 kvm_run->fail_entry.hardware_entry_failure_reason
1724 = svm->vmcb->control.exit_code; 1708 = svm->vmcb->control.exit_code;
1725 post_kvm_run_save(vcpu, kvm_run); 1709 post_kvm_run_save(svm, kvm_run);
1726 return 0; 1710 return 0;
1727 } 1711 }
1728 1712
1729 r = handle_exit(vcpu, kvm_run); 1713 r = handle_exit(svm, kvm_run);
1730 if (r > 0) { 1714 if (r > 0) {
1731 if (signal_pending(current)) { 1715 if (signal_pending(current)) {
1732 ++vcpu->stat.signal_exits; 1716 ++vcpu->stat.signal_exits;
1733 post_kvm_run_save(vcpu, kvm_run); 1717 post_kvm_run_save(svm, kvm_run);
1734 kvm_run->exit_reason = KVM_EXIT_INTR; 1718 kvm_run->exit_reason = KVM_EXIT_INTR;
1735 return -EINTR; 1719 return -EINTR;
1736 } 1720 }
1737 1721
1738 if (dm_request_for_irq_injection(vcpu, kvm_run)) { 1722 if (dm_request_for_irq_injection(svm, kvm_run)) {
1739 ++vcpu->stat.request_irq_exits; 1723 ++vcpu->stat.request_irq_exits;
1740 post_kvm_run_save(vcpu, kvm_run); 1724 post_kvm_run_save(svm, kvm_run);
1741 kvm_run->exit_reason = KVM_EXIT_INTR; 1725 kvm_run->exit_reason = KVM_EXIT_INTR;
1742 return -EINTR; 1726 return -EINTR;
1743 } 1727 }
1744 kvm_resched(vcpu); 1728 kvm_resched(vcpu);
1745 goto again; 1729 goto again;
1746 } 1730 }
1747 post_kvm_run_save(vcpu, kvm_run); 1731 post_kvm_run_save(svm, kvm_run);
1748 return r; 1732 return r;
1749} 1733}
1750 1734