aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c102
1 files changed, 54 insertions, 48 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c17404add91f..92048a626d4e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -286,7 +286,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
286 struct vcpu_svm *svm = to_svm(vcpu); 286 struct vcpu_svm *svm = to_svm(vcpu);
287 287
288 if (!svm->next_rip) { 288 if (!svm->next_rip) {
289 if (emulate_instruction(vcpu, vcpu->run, 0, 0, EMULTYPE_SKIP) != 289 if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
290 EMULATE_DONE) 290 EMULATE_DONE)
291 printk(KERN_DEBUG "%s: NOP\n", __func__); 291 printk(KERN_DEBUG "%s: NOP\n", __func__);
292 return; 292 return;
@@ -1180,7 +1180,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
1180 } 1180 }
1181} 1181}
1182 1182
1183static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1183static int pf_interception(struct vcpu_svm *svm)
1184{ 1184{
1185 u64 fault_address; 1185 u64 fault_address;
1186 u32 error_code; 1186 u32 error_code;
@@ -1194,8 +1194,10 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1194 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1194 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1195} 1195}
1196 1196
1197static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1197static int db_interception(struct vcpu_svm *svm)
1198{ 1198{
1199 struct kvm_run *kvm_run = svm->vcpu.run;
1200
1199 if (!(svm->vcpu.guest_debug & 1201 if (!(svm->vcpu.guest_debug &
1200 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && 1202 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1201 !svm->vcpu.arch.singlestep) { 1203 !svm->vcpu.arch.singlestep) {
@@ -1223,25 +1225,27 @@ static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1223 return 1; 1225 return 1;
1224} 1226}
1225 1227
1226static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1228static int bp_interception(struct vcpu_svm *svm)
1227{ 1229{
1230 struct kvm_run *kvm_run = svm->vcpu.run;
1231
1228 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1232 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1229 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; 1233 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1230 kvm_run->debug.arch.exception = BP_VECTOR; 1234 kvm_run->debug.arch.exception = BP_VECTOR;
1231 return 0; 1235 return 0;
1232} 1236}
1233 1237
1234static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1238static int ud_interception(struct vcpu_svm *svm)
1235{ 1239{
1236 int er; 1240 int er;
1237 1241
1238 er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD); 1242 er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
1239 if (er != EMULATE_DONE) 1243 if (er != EMULATE_DONE)
1240 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 1244 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1241 return 1; 1245 return 1;
1242} 1246}
1243 1247
1244static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1248static int nm_interception(struct vcpu_svm *svm)
1245{ 1249{
1246 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 1250 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
1247 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) 1251 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
@@ -1251,7 +1255,7 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1251 return 1; 1255 return 1;
1252} 1256}
1253 1257
1254static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1258static int mc_interception(struct vcpu_svm *svm)
1255{ 1259{
1256 /* 1260 /*
1257 * On an #MC intercept the MCE handler is not called automatically in 1261 * On an #MC intercept the MCE handler is not called automatically in
@@ -1264,8 +1268,10 @@ static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1264 return 1; 1268 return 1;
1265} 1269}
1266 1270
1267static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1271static int shutdown_interception(struct vcpu_svm *svm)
1268{ 1272{
1273 struct kvm_run *kvm_run = svm->vcpu.run;
1274
1269 /* 1275 /*
1270 * VMCB is undefined after a SHUTDOWN intercept 1276 * VMCB is undefined after a SHUTDOWN intercept
1271 * so reinitialize it. 1277 * so reinitialize it.
@@ -1277,7 +1283,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1277 return 0; 1283 return 0;
1278} 1284}
1279 1285
1280static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1286static int io_interception(struct vcpu_svm *svm)
1281{ 1287{
1282 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ 1288 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1283 int size, in, string; 1289 int size, in, string;
@@ -1291,7 +1297,7 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1291 1297
1292 if (string) { 1298 if (string) {
1293 if (emulate_instruction(&svm->vcpu, 1299 if (emulate_instruction(&svm->vcpu,
1294 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) 1300 0, 0, 0) == EMULATE_DO_MMIO)
1295 return 0; 1301 return 0;
1296 return 1; 1302 return 1;
1297 } 1303 }
@@ -1301,33 +1307,33 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1301 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 1307 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1302 1308
1303 skip_emulated_instruction(&svm->vcpu); 1309 skip_emulated_instruction(&svm->vcpu);
1304 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); 1310 return kvm_emulate_pio(&svm->vcpu, in, size, port);
1305} 1311}
1306 1312
1307static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1313static int nmi_interception(struct vcpu_svm *svm)
1308{ 1314{
1309 return 1; 1315 return 1;
1310} 1316}
1311 1317
1312static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1318static int intr_interception(struct vcpu_svm *svm)
1313{ 1319{
1314 ++svm->vcpu.stat.irq_exits; 1320 ++svm->vcpu.stat.irq_exits;
1315 return 1; 1321 return 1;
1316} 1322}
1317 1323
1318static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1324static int nop_on_interception(struct vcpu_svm *svm)
1319{ 1325{
1320 return 1; 1326 return 1;
1321} 1327}
1322 1328
1323static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1329static int halt_interception(struct vcpu_svm *svm)
1324{ 1330{
1325 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; 1331 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1326 skip_emulated_instruction(&svm->vcpu); 1332 skip_emulated_instruction(&svm->vcpu);
1327 return kvm_emulate_halt(&svm->vcpu); 1333 return kvm_emulate_halt(&svm->vcpu);
1328} 1334}
1329 1335
1330static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1336static int vmmcall_interception(struct vcpu_svm *svm)
1331{ 1337{
1332 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1338 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1333 skip_emulated_instruction(&svm->vcpu); 1339 skip_emulated_instruction(&svm->vcpu);
@@ -1837,7 +1843,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1837 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 1843 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1838} 1844}
1839 1845
1840static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1846static int vmload_interception(struct vcpu_svm *svm)
1841{ 1847{
1842 struct vmcb *nested_vmcb; 1848 struct vmcb *nested_vmcb;
1843 1849
@@ -1857,7 +1863,7 @@ static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1857 return 1; 1863 return 1;
1858} 1864}
1859 1865
1860static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1866static int vmsave_interception(struct vcpu_svm *svm)
1861{ 1867{
1862 struct vmcb *nested_vmcb; 1868 struct vmcb *nested_vmcb;
1863 1869
@@ -1877,7 +1883,7 @@ static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1877 return 1; 1883 return 1;
1878} 1884}
1879 1885
1880static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1886static int vmrun_interception(struct vcpu_svm *svm)
1881{ 1887{
1882 nsvm_printk("VMrun\n"); 1888 nsvm_printk("VMrun\n");
1883 1889
@@ -1907,7 +1913,7 @@ failed:
1907 return 1; 1913 return 1;
1908} 1914}
1909 1915
1910static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1916static int stgi_interception(struct vcpu_svm *svm)
1911{ 1917{
1912 if (nested_svm_check_permissions(svm)) 1918 if (nested_svm_check_permissions(svm))
1913 return 1; 1919 return 1;
@@ -1920,7 +1926,7 @@ static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1920 return 1; 1926 return 1;
1921} 1927}
1922 1928
1923static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1929static int clgi_interception(struct vcpu_svm *svm)
1924{ 1930{
1925 if (nested_svm_check_permissions(svm)) 1931 if (nested_svm_check_permissions(svm))
1926 return 1; 1932 return 1;
@@ -1937,7 +1943,7 @@ static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1937 return 1; 1943 return 1;
1938} 1944}
1939 1945
1940static int invlpga_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1946static int invlpga_interception(struct vcpu_svm *svm)
1941{ 1947{
1942 struct kvm_vcpu *vcpu = &svm->vcpu; 1948 struct kvm_vcpu *vcpu = &svm->vcpu;
1943 nsvm_printk("INVLPGA\n"); 1949 nsvm_printk("INVLPGA\n");
@@ -1950,15 +1956,13 @@ static int invlpga_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1950 return 1; 1956 return 1;
1951} 1957}
1952 1958
1953static int invalid_op_interception(struct vcpu_svm *svm, 1959static int invalid_op_interception(struct vcpu_svm *svm)
1954 struct kvm_run *kvm_run)
1955{ 1960{
1956 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 1961 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1957 return 1; 1962 return 1;
1958} 1963}
1959 1964
1960static int task_switch_interception(struct vcpu_svm *svm, 1965static int task_switch_interception(struct vcpu_svm *svm)
1961 struct kvm_run *kvm_run)
1962{ 1966{
1963 u16 tss_selector; 1967 u16 tss_selector;
1964 int reason; 1968 int reason;
@@ -2008,14 +2012,14 @@ static int task_switch_interception(struct vcpu_svm *svm,
2008 return kvm_task_switch(&svm->vcpu, tss_selector, reason); 2012 return kvm_task_switch(&svm->vcpu, tss_selector, reason);
2009} 2013}
2010 2014
2011static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 2015static int cpuid_interception(struct vcpu_svm *svm)
2012{ 2016{
2013 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 2017 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2014 kvm_emulate_cpuid(&svm->vcpu); 2018 kvm_emulate_cpuid(&svm->vcpu);
2015 return 1; 2019 return 1;
2016} 2020}
2017 2021
2018static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 2022static int iret_interception(struct vcpu_svm *svm)
2019{ 2023{
2020 ++svm->vcpu.stat.nmi_window_exits; 2024 ++svm->vcpu.stat.nmi_window_exits;
2021 svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); 2025 svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
@@ -2023,26 +2027,27 @@ static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
2023 return 1; 2027 return 1;
2024} 2028}
2025 2029
2026static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 2030static int invlpg_interception(struct vcpu_svm *svm)
2027{ 2031{
2028 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) 2032 if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
2029 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); 2033 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
2030 return 1; 2034 return 1;
2031} 2035}
2032 2036
2033static int emulate_on_interception(struct vcpu_svm *svm, 2037static int emulate_on_interception(struct vcpu_svm *svm)
2034 struct kvm_run *kvm_run)
2035{ 2038{
2036 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE) 2039 if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
2037 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); 2040 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
2038 return 1; 2041 return 1;
2039} 2042}
2040 2043
2041static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 2044static int cr8_write_interception(struct vcpu_svm *svm)
2042{ 2045{
2046 struct kvm_run *kvm_run = svm->vcpu.run;
2047
2043 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); 2048 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2044 /* instruction emulation calls kvm_set_cr8() */ 2049 /* instruction emulation calls kvm_set_cr8() */
2045 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); 2050 emulate_instruction(&svm->vcpu, 0, 0, 0);
2046 if (irqchip_in_kernel(svm->vcpu.kvm)) { 2051 if (irqchip_in_kernel(svm->vcpu.kvm)) {
2047 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; 2052 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
2048 return 1; 2053 return 1;
@@ -2128,7 +2133,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2128 return 0; 2133 return 0;
2129} 2134}
2130 2135
2131static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 2136static int rdmsr_interception(struct vcpu_svm *svm)
2132{ 2137{
2133 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 2138 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2134 u64 data; 2139 u64 data;
@@ -2221,7 +2226,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2221 return 0; 2226 return 0;
2222} 2227}
2223 2228
2224static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 2229static int wrmsr_interception(struct vcpu_svm *svm)
2225{ 2230{
2226 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 2231 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2227 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) 2232 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
@@ -2237,17 +2242,18 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
2237 return 1; 2242 return 1;
2238} 2243}
2239 2244
2240static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 2245static int msr_interception(struct vcpu_svm *svm)
2241{ 2246{
2242 if (svm->vmcb->control.exit_info_1) 2247 if (svm->vmcb->control.exit_info_1)
2243 return wrmsr_interception(svm, kvm_run); 2248 return wrmsr_interception(svm);
2244 else 2249 else
2245 return rdmsr_interception(svm, kvm_run); 2250 return rdmsr_interception(svm);
2246} 2251}
2247 2252
2248static int interrupt_window_interception(struct vcpu_svm *svm, 2253static int interrupt_window_interception(struct vcpu_svm *svm)
2249 struct kvm_run *kvm_run)
2250{ 2254{
2255 struct kvm_run *kvm_run = svm->vcpu.run;
2256
2251 svm_clear_vintr(svm); 2257 svm_clear_vintr(svm);
2252 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; 2258 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2253 /* 2259 /*
@@ -2265,8 +2271,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
2265 return 1; 2271 return 1;
2266} 2272}
2267 2273
2268static int (*svm_exit_handlers[])(struct vcpu_svm *svm, 2274static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2269 struct kvm_run *kvm_run) = {
2270 [SVM_EXIT_READ_CR0] = emulate_on_interception, 2275 [SVM_EXIT_READ_CR0] = emulate_on_interception,
2271 [SVM_EXIT_READ_CR3] = emulate_on_interception, 2276 [SVM_EXIT_READ_CR3] = emulate_on_interception,
2272 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2277 [SVM_EXIT_READ_CR4] = emulate_on_interception,
@@ -2321,9 +2326,10 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
2321 [SVM_EXIT_NPF] = pf_interception, 2326 [SVM_EXIT_NPF] = pf_interception,
2322}; 2327};
2323 2328
2324static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 2329static int handle_exit(struct kvm_vcpu *vcpu)
2325{ 2330{
2326 struct vcpu_svm *svm = to_svm(vcpu); 2331 struct vcpu_svm *svm = to_svm(vcpu);
2332 struct kvm_run *kvm_run = vcpu->run;
2327 u32 exit_code = svm->vmcb->control.exit_code; 2333 u32 exit_code = svm->vmcb->control.exit_code;
2328 2334
2329 trace_kvm_exit(exit_code, svm->vmcb->save.rip); 2335 trace_kvm_exit(exit_code, svm->vmcb->save.rip);
@@ -2383,7 +2389,7 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2383 return 0; 2389 return 0;
2384 } 2390 }
2385 2391
2386 return svm_exit_handlers[exit_code](svm, kvm_run); 2392 return svm_exit_handlers[exit_code](svm);
2387} 2393}
2388 2394
2389static void reload_tss(struct kvm_vcpu *vcpu) 2395static void reload_tss(struct kvm_vcpu *vcpu)
@@ -2588,7 +2594,7 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
2588#define R "e" 2594#define R "e"
2589#endif 2595#endif
2590 2596
2591static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2597static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2592{ 2598{
2593 struct vcpu_svm *svm = to_svm(vcpu); 2599 struct vcpu_svm *svm = to_svm(vcpu);
2594 u16 fs_selector; 2600 u16 fs_selector;