diff options
author | Avi Kivity <avi@redhat.com> | 2009-08-24 04:10:17 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-12-03 02:32:06 -0500 |
commit | 851ba6922ac575b749f63dee0ae072808163ba6a (patch) | |
tree | 665111285e65fea316ce6614f1208261a255fb70 /arch/x86/kvm | |
parent | d8769fedd4e8323d8afea9a1b2bdebff4f1d2d37 (diff) |
KVM: Don't pass kvm_run arguments
They're just copies of vcpu->run, which is readily accessible.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/emulate.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 102 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 113 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 50 |
5 files changed, 136 insertions, 137 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 1f0ff4afa73e..0644d3df621a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1826,7 +1826,7 @@ special_insn: | |||
1826 | break; | 1826 | break; |
1827 | case 0x6c: /* insb */ | 1827 | case 0x6c: /* insb */ |
1828 | case 0x6d: /* insw/insd */ | 1828 | case 0x6d: /* insw/insd */ |
1829 | if (kvm_emulate_pio_string(ctxt->vcpu, NULL, | 1829 | if (kvm_emulate_pio_string(ctxt->vcpu, |
1830 | 1, | 1830 | 1, |
1831 | (c->d & ByteOp) ? 1 : c->op_bytes, | 1831 | (c->d & ByteOp) ? 1 : c->op_bytes, |
1832 | c->rep_prefix ? | 1832 | c->rep_prefix ? |
@@ -1842,7 +1842,7 @@ special_insn: | |||
1842 | return 0; | 1842 | return 0; |
1843 | case 0x6e: /* outsb */ | 1843 | case 0x6e: /* outsb */ |
1844 | case 0x6f: /* outsw/outsd */ | 1844 | case 0x6f: /* outsw/outsd */ |
1845 | if (kvm_emulate_pio_string(ctxt->vcpu, NULL, | 1845 | if (kvm_emulate_pio_string(ctxt->vcpu, |
1846 | 0, | 1846 | 0, |
1847 | (c->d & ByteOp) ? 1 : c->op_bytes, | 1847 | (c->d & ByteOp) ? 1 : c->op_bytes, |
1848 | c->rep_prefix ? | 1848 | c->rep_prefix ? |
@@ -2135,7 +2135,7 @@ special_insn: | |||
2135 | case 0xef: /* out (e/r)ax,dx */ | 2135 | case 0xef: /* out (e/r)ax,dx */ |
2136 | port = c->regs[VCPU_REGS_RDX]; | 2136 | port = c->regs[VCPU_REGS_RDX]; |
2137 | io_dir_in = 0; | 2137 | io_dir_in = 0; |
2138 | do_io: if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in, | 2138 | do_io: if (kvm_emulate_pio(ctxt->vcpu, io_dir_in, |
2139 | (c->d & ByteOp) ? 1 : c->op_bytes, | 2139 | (c->d & ByteOp) ? 1 : c->op_bytes, |
2140 | port) != 0) { | 2140 | port) != 0) { |
2141 | c->eip = saved_eip; | 2141 | c->eip = saved_eip; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 818b92ad82cf..a9024797b21f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2789,7 +2789,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) | |||
2789 | if (r) | 2789 | if (r) |
2790 | goto out; | 2790 | goto out; |
2791 | 2791 | ||
2792 | er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0); | 2792 | er = emulate_instruction(vcpu, cr2, error_code, 0); |
2793 | 2793 | ||
2794 | switch (er) { | 2794 | switch (er) { |
2795 | case EMULATE_DONE: | 2795 | case EMULATE_DONE: |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c17404add91f..92048a626d4e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -286,7 +286,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
286 | struct vcpu_svm *svm = to_svm(vcpu); | 286 | struct vcpu_svm *svm = to_svm(vcpu); |
287 | 287 | ||
288 | if (!svm->next_rip) { | 288 | if (!svm->next_rip) { |
289 | if (emulate_instruction(vcpu, vcpu->run, 0, 0, EMULTYPE_SKIP) != | 289 | if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) != |
290 | EMULATE_DONE) | 290 | EMULATE_DONE) |
291 | printk(KERN_DEBUG "%s: NOP\n", __func__); | 291 | printk(KERN_DEBUG "%s: NOP\n", __func__); |
292 | return; | 292 | return; |
@@ -1180,7 +1180,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
1180 | } | 1180 | } |
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1183 | static int pf_interception(struct vcpu_svm *svm) |
1184 | { | 1184 | { |
1185 | u64 fault_address; | 1185 | u64 fault_address; |
1186 | u32 error_code; | 1186 | u32 error_code; |
@@ -1194,8 +1194,10 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1194 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); | 1194 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); |
1195 | } | 1195 | } |
1196 | 1196 | ||
1197 | static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1197 | static int db_interception(struct vcpu_svm *svm) |
1198 | { | 1198 | { |
1199 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
1200 | |||
1199 | if (!(svm->vcpu.guest_debug & | 1201 | if (!(svm->vcpu.guest_debug & |
1200 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && | 1202 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
1201 | !svm->vcpu.arch.singlestep) { | 1203 | !svm->vcpu.arch.singlestep) { |
@@ -1223,25 +1225,27 @@ static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1223 | return 1; | 1225 | return 1; |
1224 | } | 1226 | } |
1225 | 1227 | ||
1226 | static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1228 | static int bp_interception(struct vcpu_svm *svm) |
1227 | { | 1229 | { |
1230 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
1231 | |||
1228 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 1232 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
1229 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; | 1233 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
1230 | kvm_run->debug.arch.exception = BP_VECTOR; | 1234 | kvm_run->debug.arch.exception = BP_VECTOR; |
1231 | return 0; | 1235 | return 0; |
1232 | } | 1236 | } |
1233 | 1237 | ||
1234 | static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1238 | static int ud_interception(struct vcpu_svm *svm) |
1235 | { | 1239 | { |
1236 | int er; | 1240 | int er; |
1237 | 1241 | ||
1238 | er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD); | 1242 | er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD); |
1239 | if (er != EMULATE_DONE) | 1243 | if (er != EMULATE_DONE) |
1240 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | 1244 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
1241 | return 1; | 1245 | return 1; |
1242 | } | 1246 | } |
1243 | 1247 | ||
1244 | static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1248 | static int nm_interception(struct vcpu_svm *svm) |
1245 | { | 1249 | { |
1246 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 1250 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
1247 | if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) | 1251 | if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) |
@@ -1251,7 +1255,7 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1251 | return 1; | 1255 | return 1; |
1252 | } | 1256 | } |
1253 | 1257 | ||
1254 | static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1258 | static int mc_interception(struct vcpu_svm *svm) |
1255 | { | 1259 | { |
1256 | /* | 1260 | /* |
1257 | * On an #MC intercept the MCE handler is not called automatically in | 1261 | * On an #MC intercept the MCE handler is not called automatically in |
@@ -1264,8 +1268,10 @@ static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1264 | return 1; | 1268 | return 1; |
1265 | } | 1269 | } |
1266 | 1270 | ||
1267 | static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1271 | static int shutdown_interception(struct vcpu_svm *svm) |
1268 | { | 1272 | { |
1273 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
1274 | |||
1269 | /* | 1275 | /* |
1270 | * VMCB is undefined after a SHUTDOWN intercept | 1276 | * VMCB is undefined after a SHUTDOWN intercept |
1271 | * so reinitialize it. | 1277 | * so reinitialize it. |
@@ -1277,7 +1283,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1277 | return 0; | 1283 | return 0; |
1278 | } | 1284 | } |
1279 | 1285 | ||
1280 | static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1286 | static int io_interception(struct vcpu_svm *svm) |
1281 | { | 1287 | { |
1282 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ | 1288 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ |
1283 | int size, in, string; | 1289 | int size, in, string; |
@@ -1291,7 +1297,7 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1291 | 1297 | ||
1292 | if (string) { | 1298 | if (string) { |
1293 | if (emulate_instruction(&svm->vcpu, | 1299 | if (emulate_instruction(&svm->vcpu, |
1294 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | 1300 | 0, 0, 0) == EMULATE_DO_MMIO) |
1295 | return 0; | 1301 | return 0; |
1296 | return 1; | 1302 | return 1; |
1297 | } | 1303 | } |
@@ -1301,33 +1307,33 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1301 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; | 1307 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
1302 | 1308 | ||
1303 | skip_emulated_instruction(&svm->vcpu); | 1309 | skip_emulated_instruction(&svm->vcpu); |
1304 | return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); | 1310 | return kvm_emulate_pio(&svm->vcpu, in, size, port); |
1305 | } | 1311 | } |
1306 | 1312 | ||
1307 | static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1313 | static int nmi_interception(struct vcpu_svm *svm) |
1308 | { | 1314 | { |
1309 | return 1; | 1315 | return 1; |
1310 | } | 1316 | } |
1311 | 1317 | ||
1312 | static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1318 | static int intr_interception(struct vcpu_svm *svm) |
1313 | { | 1319 | { |
1314 | ++svm->vcpu.stat.irq_exits; | 1320 | ++svm->vcpu.stat.irq_exits; |
1315 | return 1; | 1321 | return 1; |
1316 | } | 1322 | } |
1317 | 1323 | ||
1318 | static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1324 | static int nop_on_interception(struct vcpu_svm *svm) |
1319 | { | 1325 | { |
1320 | return 1; | 1326 | return 1; |
1321 | } | 1327 | } |
1322 | 1328 | ||
1323 | static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1329 | static int halt_interception(struct vcpu_svm *svm) |
1324 | { | 1330 | { |
1325 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; | 1331 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; |
1326 | skip_emulated_instruction(&svm->vcpu); | 1332 | skip_emulated_instruction(&svm->vcpu); |
1327 | return kvm_emulate_halt(&svm->vcpu); | 1333 | return kvm_emulate_halt(&svm->vcpu); |
1328 | } | 1334 | } |
1329 | 1335 | ||
1330 | static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1336 | static int vmmcall_interception(struct vcpu_svm *svm) |
1331 | { | 1337 | { |
1332 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 1338 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
1333 | skip_emulated_instruction(&svm->vcpu); | 1339 | skip_emulated_instruction(&svm->vcpu); |
@@ -1837,7 +1843,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) | |||
1837 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; | 1843 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; |
1838 | } | 1844 | } |
1839 | 1845 | ||
1840 | static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1846 | static int vmload_interception(struct vcpu_svm *svm) |
1841 | { | 1847 | { |
1842 | struct vmcb *nested_vmcb; | 1848 | struct vmcb *nested_vmcb; |
1843 | 1849 | ||
@@ -1857,7 +1863,7 @@ static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1857 | return 1; | 1863 | return 1; |
1858 | } | 1864 | } |
1859 | 1865 | ||
1860 | static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1866 | static int vmsave_interception(struct vcpu_svm *svm) |
1861 | { | 1867 | { |
1862 | struct vmcb *nested_vmcb; | 1868 | struct vmcb *nested_vmcb; |
1863 | 1869 | ||
@@ -1877,7 +1883,7 @@ static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1877 | return 1; | 1883 | return 1; |
1878 | } | 1884 | } |
1879 | 1885 | ||
1880 | static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1886 | static int vmrun_interception(struct vcpu_svm *svm) |
1881 | { | 1887 | { |
1882 | nsvm_printk("VMrun\n"); | 1888 | nsvm_printk("VMrun\n"); |
1883 | 1889 | ||
@@ -1907,7 +1913,7 @@ failed: | |||
1907 | return 1; | 1913 | return 1; |
1908 | } | 1914 | } |
1909 | 1915 | ||
1910 | static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1916 | static int stgi_interception(struct vcpu_svm *svm) |
1911 | { | 1917 | { |
1912 | if (nested_svm_check_permissions(svm)) | 1918 | if (nested_svm_check_permissions(svm)) |
1913 | return 1; | 1919 | return 1; |
@@ -1920,7 +1926,7 @@ static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1920 | return 1; | 1926 | return 1; |
1921 | } | 1927 | } |
1922 | 1928 | ||
1923 | static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1929 | static int clgi_interception(struct vcpu_svm *svm) |
1924 | { | 1930 | { |
1925 | if (nested_svm_check_permissions(svm)) | 1931 | if (nested_svm_check_permissions(svm)) |
1926 | return 1; | 1932 | return 1; |
@@ -1937,7 +1943,7 @@ static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1937 | return 1; | 1943 | return 1; |
1938 | } | 1944 | } |
1939 | 1945 | ||
1940 | static int invlpga_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1946 | static int invlpga_interception(struct vcpu_svm *svm) |
1941 | { | 1947 | { |
1942 | struct kvm_vcpu *vcpu = &svm->vcpu; | 1948 | struct kvm_vcpu *vcpu = &svm->vcpu; |
1943 | nsvm_printk("INVLPGA\n"); | 1949 | nsvm_printk("INVLPGA\n"); |
@@ -1950,15 +1956,13 @@ static int invlpga_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1950 | return 1; | 1956 | return 1; |
1951 | } | 1957 | } |
1952 | 1958 | ||
1953 | static int invalid_op_interception(struct vcpu_svm *svm, | 1959 | static int invalid_op_interception(struct vcpu_svm *svm) |
1954 | struct kvm_run *kvm_run) | ||
1955 | { | 1960 | { |
1956 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | 1961 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
1957 | return 1; | 1962 | return 1; |
1958 | } | 1963 | } |
1959 | 1964 | ||
1960 | static int task_switch_interception(struct vcpu_svm *svm, | 1965 | static int task_switch_interception(struct vcpu_svm *svm) |
1961 | struct kvm_run *kvm_run) | ||
1962 | { | 1966 | { |
1963 | u16 tss_selector; | 1967 | u16 tss_selector; |
1964 | int reason; | 1968 | int reason; |
@@ -2008,14 +2012,14 @@ static int task_switch_interception(struct vcpu_svm *svm, | |||
2008 | return kvm_task_switch(&svm->vcpu, tss_selector, reason); | 2012 | return kvm_task_switch(&svm->vcpu, tss_selector, reason); |
2009 | } | 2013 | } |
2010 | 2014 | ||
2011 | static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2015 | static int cpuid_interception(struct vcpu_svm *svm) |
2012 | { | 2016 | { |
2013 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; | 2017 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; |
2014 | kvm_emulate_cpuid(&svm->vcpu); | 2018 | kvm_emulate_cpuid(&svm->vcpu); |
2015 | return 1; | 2019 | return 1; |
2016 | } | 2020 | } |
2017 | 2021 | ||
2018 | static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2022 | static int iret_interception(struct vcpu_svm *svm) |
2019 | { | 2023 | { |
2020 | ++svm->vcpu.stat.nmi_window_exits; | 2024 | ++svm->vcpu.stat.nmi_window_exits; |
2021 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); | 2025 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); |
@@ -2023,26 +2027,27 @@ static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
2023 | return 1; | 2027 | return 1; |
2024 | } | 2028 | } |
2025 | 2029 | ||
2026 | static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2030 | static int invlpg_interception(struct vcpu_svm *svm) |
2027 | { | 2031 | { |
2028 | if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) | 2032 | if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE) |
2029 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); | 2033 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); |
2030 | return 1; | 2034 | return 1; |
2031 | } | 2035 | } |
2032 | 2036 | ||
2033 | static int emulate_on_interception(struct vcpu_svm *svm, | 2037 | static int emulate_on_interception(struct vcpu_svm *svm) |
2034 | struct kvm_run *kvm_run) | ||
2035 | { | 2038 | { |
2036 | if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE) | 2039 | if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE) |
2037 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); | 2040 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); |
2038 | return 1; | 2041 | return 1; |
2039 | } | 2042 | } |
2040 | 2043 | ||
2041 | static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2044 | static int cr8_write_interception(struct vcpu_svm *svm) |
2042 | { | 2045 | { |
2046 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
2047 | |||
2043 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); | 2048 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); |
2044 | /* instruction emulation calls kvm_set_cr8() */ | 2049 | /* instruction emulation calls kvm_set_cr8() */ |
2045 | emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); | 2050 | emulate_instruction(&svm->vcpu, 0, 0, 0); |
2046 | if (irqchip_in_kernel(svm->vcpu.kvm)) { | 2051 | if (irqchip_in_kernel(svm->vcpu.kvm)) { |
2047 | svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; | 2052 | svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; |
2048 | return 1; | 2053 | return 1; |
@@ -2128,7 +2133,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
2128 | return 0; | 2133 | return 0; |
2129 | } | 2134 | } |
2130 | 2135 | ||
2131 | static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2136 | static int rdmsr_interception(struct vcpu_svm *svm) |
2132 | { | 2137 | { |
2133 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | 2138 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
2134 | u64 data; | 2139 | u64 data; |
@@ -2221,7 +2226,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
2221 | return 0; | 2226 | return 0; |
2222 | } | 2227 | } |
2223 | 2228 | ||
2224 | static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2229 | static int wrmsr_interception(struct vcpu_svm *svm) |
2225 | { | 2230 | { |
2226 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | 2231 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
2227 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) | 2232 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) |
@@ -2237,17 +2242,18 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
2237 | return 1; | 2242 | return 1; |
2238 | } | 2243 | } |
2239 | 2244 | ||
2240 | static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2245 | static int msr_interception(struct vcpu_svm *svm) |
2241 | { | 2246 | { |
2242 | if (svm->vmcb->control.exit_info_1) | 2247 | if (svm->vmcb->control.exit_info_1) |
2243 | return wrmsr_interception(svm, kvm_run); | 2248 | return wrmsr_interception(svm); |
2244 | else | 2249 | else |
2245 | return rdmsr_interception(svm, kvm_run); | 2250 | return rdmsr_interception(svm); |
2246 | } | 2251 | } |
2247 | 2252 | ||
2248 | static int interrupt_window_interception(struct vcpu_svm *svm, | 2253 | static int interrupt_window_interception(struct vcpu_svm *svm) |
2249 | struct kvm_run *kvm_run) | ||
2250 | { | 2254 | { |
2255 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
2256 | |||
2251 | svm_clear_vintr(svm); | 2257 | svm_clear_vintr(svm); |
2252 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; | 2258 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; |
2253 | /* | 2259 | /* |
@@ -2265,8 +2271,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm, | |||
2265 | return 1; | 2271 | return 1; |
2266 | } | 2272 | } |
2267 | 2273 | ||
2268 | static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | 2274 | static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { |
2269 | struct kvm_run *kvm_run) = { | ||
2270 | [SVM_EXIT_READ_CR0] = emulate_on_interception, | 2275 | [SVM_EXIT_READ_CR0] = emulate_on_interception, |
2271 | [SVM_EXIT_READ_CR3] = emulate_on_interception, | 2276 | [SVM_EXIT_READ_CR3] = emulate_on_interception, |
2272 | [SVM_EXIT_READ_CR4] = emulate_on_interception, | 2277 | [SVM_EXIT_READ_CR4] = emulate_on_interception, |
@@ -2321,9 +2326,10 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | |||
2321 | [SVM_EXIT_NPF] = pf_interception, | 2326 | [SVM_EXIT_NPF] = pf_interception, |
2322 | }; | 2327 | }; |
2323 | 2328 | ||
2324 | static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 2329 | static int handle_exit(struct kvm_vcpu *vcpu) |
2325 | { | 2330 | { |
2326 | struct vcpu_svm *svm = to_svm(vcpu); | 2331 | struct vcpu_svm *svm = to_svm(vcpu); |
2332 | struct kvm_run *kvm_run = vcpu->run; | ||
2327 | u32 exit_code = svm->vmcb->control.exit_code; | 2333 | u32 exit_code = svm->vmcb->control.exit_code; |
2328 | 2334 | ||
2329 | trace_kvm_exit(exit_code, svm->vmcb->save.rip); | 2335 | trace_kvm_exit(exit_code, svm->vmcb->save.rip); |
@@ -2383,7 +2389,7 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2383 | return 0; | 2389 | return 0; |
2384 | } | 2390 | } |
2385 | 2391 | ||
2386 | return svm_exit_handlers[exit_code](svm, kvm_run); | 2392 | return svm_exit_handlers[exit_code](svm); |
2387 | } | 2393 | } |
2388 | 2394 | ||
2389 | static void reload_tss(struct kvm_vcpu *vcpu) | 2395 | static void reload_tss(struct kvm_vcpu *vcpu) |
@@ -2588,7 +2594,7 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) | |||
2588 | #define R "e" | 2594 | #define R "e" |
2589 | #endif | 2595 | #endif |
2590 | 2596 | ||
2591 | static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2597 | static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
2592 | { | 2598 | { |
2593 | struct vcpu_svm *svm = to_svm(vcpu); | 2599 | struct vcpu_svm *svm = to_svm(vcpu); |
2594 | u16 fs_selector; | 2600 | u16 fs_selector; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ed53b42caba1..4635298d000a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2659,7 +2659,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
2659 | * Cause the #SS fault with 0 error code in VM86 mode. | 2659 | * Cause the #SS fault with 0 error code in VM86 mode. |
2660 | */ | 2660 | */ |
2661 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) | 2661 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) |
2662 | if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) | 2662 | if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE) |
2663 | return 1; | 2663 | return 1; |
2664 | /* | 2664 | /* |
2665 | * Forward all other exceptions that are valid in real mode. | 2665 | * Forward all other exceptions that are valid in real mode. |
@@ -2710,15 +2710,16 @@ static void kvm_machine_check(void) | |||
2710 | #endif | 2710 | #endif |
2711 | } | 2711 | } |
2712 | 2712 | ||
2713 | static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2713 | static int handle_machine_check(struct kvm_vcpu *vcpu) |
2714 | { | 2714 | { |
2715 | /* already handled by vcpu_run */ | 2715 | /* already handled by vcpu_run */ |
2716 | return 1; | 2716 | return 1; |
2717 | } | 2717 | } |
2718 | 2718 | ||
2719 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2719 | static int handle_exception(struct kvm_vcpu *vcpu) |
2720 | { | 2720 | { |
2721 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2721 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2722 | struct kvm_run *kvm_run = vcpu->run; | ||
2722 | u32 intr_info, ex_no, error_code; | 2723 | u32 intr_info, ex_no, error_code; |
2723 | unsigned long cr2, rip, dr6; | 2724 | unsigned long cr2, rip, dr6; |
2724 | u32 vect_info; | 2725 | u32 vect_info; |
@@ -2728,7 +2729,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2728 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 2729 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
2729 | 2730 | ||
2730 | if (is_machine_check(intr_info)) | 2731 | if (is_machine_check(intr_info)) |
2731 | return handle_machine_check(vcpu, kvm_run); | 2732 | return handle_machine_check(vcpu); |
2732 | 2733 | ||
2733 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | 2734 | if ((vect_info & VECTORING_INFO_VALID_MASK) && |
2734 | !is_page_fault(intr_info)) | 2735 | !is_page_fault(intr_info)) |
@@ -2744,7 +2745,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2744 | } | 2745 | } |
2745 | 2746 | ||
2746 | if (is_invalid_opcode(intr_info)) { | 2747 | if (is_invalid_opcode(intr_info)) { |
2747 | er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD); | 2748 | er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD); |
2748 | if (er != EMULATE_DONE) | 2749 | if (er != EMULATE_DONE) |
2749 | kvm_queue_exception(vcpu, UD_VECTOR); | 2750 | kvm_queue_exception(vcpu, UD_VECTOR); |
2750 | return 1; | 2751 | return 1; |
@@ -2803,20 +2804,19 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2803 | return 0; | 2804 | return 0; |
2804 | } | 2805 | } |
2805 | 2806 | ||
2806 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | 2807 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) |
2807 | struct kvm_run *kvm_run) | ||
2808 | { | 2808 | { |
2809 | ++vcpu->stat.irq_exits; | 2809 | ++vcpu->stat.irq_exits; |
2810 | return 1; | 2810 | return 1; |
2811 | } | 2811 | } |
2812 | 2812 | ||
2813 | static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2813 | static int handle_triple_fault(struct kvm_vcpu *vcpu) |
2814 | { | 2814 | { |
2815 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | 2815 | vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; |
2816 | return 0; | 2816 | return 0; |
2817 | } | 2817 | } |
2818 | 2818 | ||
2819 | static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2819 | static int handle_io(struct kvm_vcpu *vcpu) |
2820 | { | 2820 | { |
2821 | unsigned long exit_qualification; | 2821 | unsigned long exit_qualification; |
2822 | int size, in, string; | 2822 | int size, in, string; |
@@ -2827,8 +2827,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2827 | string = (exit_qualification & 16) != 0; | 2827 | string = (exit_qualification & 16) != 0; |
2828 | 2828 | ||
2829 | if (string) { | 2829 | if (string) { |
2830 | if (emulate_instruction(vcpu, | 2830 | if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO) |
2831 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | ||
2832 | return 0; | 2831 | return 0; |
2833 | return 1; | 2832 | return 1; |
2834 | } | 2833 | } |
@@ -2838,7 +2837,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2838 | port = exit_qualification >> 16; | 2837 | port = exit_qualification >> 16; |
2839 | 2838 | ||
2840 | skip_emulated_instruction(vcpu); | 2839 | skip_emulated_instruction(vcpu); |
2841 | return kvm_emulate_pio(vcpu, kvm_run, in, size, port); | 2840 | return kvm_emulate_pio(vcpu, in, size, port); |
2842 | } | 2841 | } |
2843 | 2842 | ||
2844 | static void | 2843 | static void |
@@ -2852,7 +2851,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |||
2852 | hypercall[2] = 0xc1; | 2851 | hypercall[2] = 0xc1; |
2853 | } | 2852 | } |
2854 | 2853 | ||
2855 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2854 | static int handle_cr(struct kvm_vcpu *vcpu) |
2856 | { | 2855 | { |
2857 | unsigned long exit_qualification, val; | 2856 | unsigned long exit_qualification, val; |
2858 | int cr; | 2857 | int cr; |
@@ -2887,7 +2886,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2887 | return 1; | 2886 | return 1; |
2888 | if (cr8_prev <= cr8) | 2887 | if (cr8_prev <= cr8) |
2889 | return 1; | 2888 | return 1; |
2890 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | 2889 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
2891 | return 0; | 2890 | return 0; |
2892 | } | 2891 | } |
2893 | }; | 2892 | }; |
@@ -2922,13 +2921,13 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2922 | default: | 2921 | default: |
2923 | break; | 2922 | break; |
2924 | } | 2923 | } |
2925 | kvm_run->exit_reason = 0; | 2924 | vcpu->run->exit_reason = 0; |
2926 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", | 2925 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", |
2927 | (int)(exit_qualification >> 4) & 3, cr); | 2926 | (int)(exit_qualification >> 4) & 3, cr); |
2928 | return 0; | 2927 | return 0; |
2929 | } | 2928 | } |
2930 | 2929 | ||
2931 | static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2930 | static int handle_dr(struct kvm_vcpu *vcpu) |
2932 | { | 2931 | { |
2933 | unsigned long exit_qualification; | 2932 | unsigned long exit_qualification; |
2934 | unsigned long val; | 2933 | unsigned long val; |
@@ -2944,13 +2943,13 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2944 | * guest debugging itself. | 2943 | * guest debugging itself. |
2945 | */ | 2944 | */ |
2946 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { | 2945 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { |
2947 | kvm_run->debug.arch.dr6 = vcpu->arch.dr6; | 2946 | vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; |
2948 | kvm_run->debug.arch.dr7 = dr; | 2947 | vcpu->run->debug.arch.dr7 = dr; |
2949 | kvm_run->debug.arch.pc = | 2948 | vcpu->run->debug.arch.pc = |
2950 | vmcs_readl(GUEST_CS_BASE) + | 2949 | vmcs_readl(GUEST_CS_BASE) + |
2951 | vmcs_readl(GUEST_RIP); | 2950 | vmcs_readl(GUEST_RIP); |
2952 | kvm_run->debug.arch.exception = DB_VECTOR; | 2951 | vcpu->run->debug.arch.exception = DB_VECTOR; |
2953 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 2952 | vcpu->run->exit_reason = KVM_EXIT_DEBUG; |
2954 | return 0; | 2953 | return 0; |
2955 | } else { | 2954 | } else { |
2956 | vcpu->arch.dr7 &= ~DR7_GD; | 2955 | vcpu->arch.dr7 &= ~DR7_GD; |
@@ -3016,13 +3015,13 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3016 | return 1; | 3015 | return 1; |
3017 | } | 3016 | } |
3018 | 3017 | ||
3019 | static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3018 | static int handle_cpuid(struct kvm_vcpu *vcpu) |
3020 | { | 3019 | { |
3021 | kvm_emulate_cpuid(vcpu); | 3020 | kvm_emulate_cpuid(vcpu); |
3022 | return 1; | 3021 | return 1; |
3023 | } | 3022 | } |
3024 | 3023 | ||
3025 | static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3024 | static int handle_rdmsr(struct kvm_vcpu *vcpu) |
3026 | { | 3025 | { |
3027 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | 3026 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
3028 | u64 data; | 3027 | u64 data; |
@@ -3041,7 +3040,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3041 | return 1; | 3040 | return 1; |
3042 | } | 3041 | } |
3043 | 3042 | ||
3044 | static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3043 | static int handle_wrmsr(struct kvm_vcpu *vcpu) |
3045 | { | 3044 | { |
3046 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | 3045 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
3047 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | 3046 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
@@ -3058,14 +3057,12 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3058 | return 1; | 3057 | return 1; |
3059 | } | 3058 | } |
3060 | 3059 | ||
3061 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu, | 3060 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) |
3062 | struct kvm_run *kvm_run) | ||
3063 | { | 3061 | { |
3064 | return 1; | 3062 | return 1; |
3065 | } | 3063 | } |
3066 | 3064 | ||
3067 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, | 3065 | static int handle_interrupt_window(struct kvm_vcpu *vcpu) |
3068 | struct kvm_run *kvm_run) | ||
3069 | { | 3066 | { |
3070 | u32 cpu_based_vm_exec_control; | 3067 | u32 cpu_based_vm_exec_control; |
3071 | 3068 | ||
@@ -3081,34 +3078,34 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
3081 | * possible | 3078 | * possible |
3082 | */ | 3079 | */ |
3083 | if (!irqchip_in_kernel(vcpu->kvm) && | 3080 | if (!irqchip_in_kernel(vcpu->kvm) && |
3084 | kvm_run->request_interrupt_window && | 3081 | vcpu->run->request_interrupt_window && |
3085 | !kvm_cpu_has_interrupt(vcpu)) { | 3082 | !kvm_cpu_has_interrupt(vcpu)) { |
3086 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 3083 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
3087 | return 0; | 3084 | return 0; |
3088 | } | 3085 | } |
3089 | return 1; | 3086 | return 1; |
3090 | } | 3087 | } |
3091 | 3088 | ||
3092 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3089 | static int handle_halt(struct kvm_vcpu *vcpu) |
3093 | { | 3090 | { |
3094 | skip_emulated_instruction(vcpu); | 3091 | skip_emulated_instruction(vcpu); |
3095 | return kvm_emulate_halt(vcpu); | 3092 | return kvm_emulate_halt(vcpu); |
3096 | } | 3093 | } |
3097 | 3094 | ||
3098 | static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3095 | static int handle_vmcall(struct kvm_vcpu *vcpu) |
3099 | { | 3096 | { |
3100 | skip_emulated_instruction(vcpu); | 3097 | skip_emulated_instruction(vcpu); |
3101 | kvm_emulate_hypercall(vcpu); | 3098 | kvm_emulate_hypercall(vcpu); |
3102 | return 1; | 3099 | return 1; |
3103 | } | 3100 | } |
3104 | 3101 | ||
3105 | static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3102 | static int handle_vmx_insn(struct kvm_vcpu *vcpu) |
3106 | { | 3103 | { |
3107 | kvm_queue_exception(vcpu, UD_VECTOR); | 3104 | kvm_queue_exception(vcpu, UD_VECTOR); |
3108 | return 1; | 3105 | return 1; |
3109 | } | 3106 | } |
3110 | 3107 | ||
3111 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3108 | static int handle_invlpg(struct kvm_vcpu *vcpu) |
3112 | { | 3109 | { |
3113 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 3110 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
3114 | 3111 | ||
@@ -3117,14 +3114,14 @@ static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3117 | return 1; | 3114 | return 1; |
3118 | } | 3115 | } |
3119 | 3116 | ||
3120 | static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3117 | static int handle_wbinvd(struct kvm_vcpu *vcpu) |
3121 | { | 3118 | { |
3122 | skip_emulated_instruction(vcpu); | 3119 | skip_emulated_instruction(vcpu); |
3123 | /* TODO: Add support for VT-d/pass-through device */ | 3120 | /* TODO: Add support for VT-d/pass-through device */ |
3124 | return 1; | 3121 | return 1; |
3125 | } | 3122 | } |
3126 | 3123 | ||
3127 | static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3124 | static int handle_apic_access(struct kvm_vcpu *vcpu) |
3128 | { | 3125 | { |
3129 | unsigned long exit_qualification; | 3126 | unsigned long exit_qualification; |
3130 | enum emulation_result er; | 3127 | enum emulation_result er; |
@@ -3133,7 +3130,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3133 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 3130 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
3134 | offset = exit_qualification & 0xffful; | 3131 | offset = exit_qualification & 0xffful; |
3135 | 3132 | ||
3136 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3133 | er = emulate_instruction(vcpu, 0, 0, 0); |
3137 | 3134 | ||
3138 | if (er != EMULATE_DONE) { | 3135 | if (er != EMULATE_DONE) { |
3139 | printk(KERN_ERR | 3136 | printk(KERN_ERR |
@@ -3144,7 +3141,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3144 | return 1; | 3141 | return 1; |
3145 | } | 3142 | } |
3146 | 3143 | ||
3147 | static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3144 | static int handle_task_switch(struct kvm_vcpu *vcpu) |
3148 | { | 3145 | { |
3149 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3146 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3150 | unsigned long exit_qualification; | 3147 | unsigned long exit_qualification; |
@@ -3198,7 +3195,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3198 | return 1; | 3195 | return 1; |
3199 | } | 3196 | } |
3200 | 3197 | ||
3201 | static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3198 | static int handle_ept_violation(struct kvm_vcpu *vcpu) |
3202 | { | 3199 | { |
3203 | unsigned long exit_qualification; | 3200 | unsigned long exit_qualification; |
3204 | gpa_t gpa; | 3201 | gpa_t gpa; |
@@ -3219,8 +3216,8 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3219 | vmcs_readl(GUEST_LINEAR_ADDRESS)); | 3216 | vmcs_readl(GUEST_LINEAR_ADDRESS)); |
3220 | printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", | 3217 | printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", |
3221 | (long unsigned int)exit_qualification); | 3218 | (long unsigned int)exit_qualification); |
3222 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3219 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3223 | kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; | 3220 | vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; |
3224 | return 0; | 3221 | return 0; |
3225 | } | 3222 | } |
3226 | 3223 | ||
@@ -3290,7 +3287,7 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, | |||
3290 | } | 3287 | } |
3291 | } | 3288 | } |
3292 | 3289 | ||
3293 | static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3290 | static int handle_ept_misconfig(struct kvm_vcpu *vcpu) |
3294 | { | 3291 | { |
3295 | u64 sptes[4]; | 3292 | u64 sptes[4]; |
3296 | int nr_sptes, i; | 3293 | int nr_sptes, i; |
@@ -3306,13 +3303,13 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3306 | for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) | 3303 | for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) |
3307 | ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); | 3304 | ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); |
3308 | 3305 | ||
3309 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3306 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3310 | kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; | 3307 | vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; |
3311 | 3308 | ||
3312 | return 0; | 3309 | return 0; |
3313 | } | 3310 | } |
3314 | 3311 | ||
3315 | static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3312 | static int handle_nmi_window(struct kvm_vcpu *vcpu) |
3316 | { | 3313 | { |
3317 | u32 cpu_based_vm_exec_control; | 3314 | u32 cpu_based_vm_exec_control; |
3318 | 3315 | ||
@@ -3325,8 +3322,7 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3325 | return 1; | 3322 | return 1; |
3326 | } | 3323 | } |
3327 | 3324 | ||
3328 | static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | 3325 | static void handle_invalid_guest_state(struct kvm_vcpu *vcpu) |
3329 | struct kvm_run *kvm_run) | ||
3330 | { | 3326 | { |
3331 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3327 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3332 | enum emulation_result err = EMULATE_DONE; | 3328 | enum emulation_result err = EMULATE_DONE; |
@@ -3335,7 +3331,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
3335 | preempt_enable(); | 3331 | preempt_enable(); |
3336 | 3332 | ||
3337 | while (!guest_state_valid(vcpu)) { | 3333 | while (!guest_state_valid(vcpu)) { |
3338 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3334 | err = emulate_instruction(vcpu, 0, 0, 0); |
3339 | 3335 | ||
3340 | if (err == EMULATE_DO_MMIO) | 3336 | if (err == EMULATE_DO_MMIO) |
3341 | break; | 3337 | break; |
@@ -3362,8 +3358,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
3362 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | 3358 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
3363 | * to be done to userspace and return 0. | 3359 | * to be done to userspace and return 0. |
3364 | */ | 3360 | */ |
3365 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | 3361 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
3366 | struct kvm_run *kvm_run) = { | ||
3367 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | 3362 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, |
3368 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 3363 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
3369 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, | 3364 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, |
@@ -3403,7 +3398,7 @@ static const int kvm_vmx_max_exit_handlers = | |||
3403 | * The guest has exited. See if we can fix it or if we need userspace | 3398 | * The guest has exited. See if we can fix it or if we need userspace |
3404 | * assistance. | 3399 | * assistance. |
3405 | */ | 3400 | */ |
3406 | static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 3401 | static int vmx_handle_exit(struct kvm_vcpu *vcpu) |
3407 | { | 3402 | { |
3408 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3403 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3409 | u32 exit_reason = vmx->exit_reason; | 3404 | u32 exit_reason = vmx->exit_reason; |
@@ -3425,8 +3420,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3425 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | 3420 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
3426 | 3421 | ||
3427 | if (unlikely(vmx->fail)) { | 3422 | if (unlikely(vmx->fail)) { |
3428 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 3423 | vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
3429 | kvm_run->fail_entry.hardware_entry_failure_reason | 3424 | vcpu->run->fail_entry.hardware_entry_failure_reason |
3430 | = vmcs_read32(VM_INSTRUCTION_ERROR); | 3425 | = vmcs_read32(VM_INSTRUCTION_ERROR); |
3431 | return 0; | 3426 | return 0; |
3432 | } | 3427 | } |
@@ -3459,10 +3454,10 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3459 | 3454 | ||
3460 | if (exit_reason < kvm_vmx_max_exit_handlers | 3455 | if (exit_reason < kvm_vmx_max_exit_handlers |
3461 | && kvm_vmx_exit_handlers[exit_reason]) | 3456 | && kvm_vmx_exit_handlers[exit_reason]) |
3462 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); | 3457 | return kvm_vmx_exit_handlers[exit_reason](vcpu); |
3463 | else { | 3458 | else { |
3464 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3459 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3465 | kvm_run->hw.hardware_exit_reason = exit_reason; | 3460 | vcpu->run->hw.hardware_exit_reason = exit_reason; |
3466 | } | 3461 | } |
3467 | return 0; | 3462 | return 0; |
3468 | } | 3463 | } |
@@ -3600,7 +3595,7 @@ static void fixup_rmode_irq(struct vcpu_vmx *vmx) | |||
3600 | #define Q "l" | 3595 | #define Q "l" |
3601 | #endif | 3596 | #endif |
3602 | 3597 | ||
3603 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3598 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu) |
3604 | { | 3599 | { |
3605 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3600 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3606 | 3601 | ||
@@ -3614,7 +3609,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3614 | 3609 | ||
3615 | /* Handle invalid guest state instead of entering VMX */ | 3610 | /* Handle invalid guest state instead of entering VMX */ |
3616 | if (vmx->emulation_required && emulate_invalid_guest_state) { | 3611 | if (vmx->emulation_required && emulate_invalid_guest_state) { |
3617 | handle_invalid_guest_state(vcpu, kvm_run); | 3612 | handle_invalid_guest_state(vcpu); |
3618 | return; | 3613 | return; |
3619 | } | 3614 | } |
3620 | 3615 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ae07d261527c..1687d12b122a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2757,13 +2757,13 @@ static void cache_all_regs(struct kvm_vcpu *vcpu) | |||
2757 | } | 2757 | } |
2758 | 2758 | ||
2759 | int emulate_instruction(struct kvm_vcpu *vcpu, | 2759 | int emulate_instruction(struct kvm_vcpu *vcpu, |
2760 | struct kvm_run *run, | ||
2761 | unsigned long cr2, | 2760 | unsigned long cr2, |
2762 | u16 error_code, | 2761 | u16 error_code, |
2763 | int emulation_type) | 2762 | int emulation_type) |
2764 | { | 2763 | { |
2765 | int r, shadow_mask; | 2764 | int r, shadow_mask; |
2766 | struct decode_cache *c; | 2765 | struct decode_cache *c; |
2766 | struct kvm_run *run = vcpu->run; | ||
2767 | 2767 | ||
2768 | kvm_clear_exception_queue(vcpu); | 2768 | kvm_clear_exception_queue(vcpu); |
2769 | vcpu->arch.mmio_fault_cr2 = cr2; | 2769 | vcpu->arch.mmio_fault_cr2 = cr2; |
@@ -2969,8 +2969,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu) | |||
2969 | return r; | 2969 | return r; |
2970 | } | 2970 | } |
2971 | 2971 | ||
2972 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 2972 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port) |
2973 | int size, unsigned port) | ||
2974 | { | 2973 | { |
2975 | unsigned long val; | 2974 | unsigned long val; |
2976 | 2975 | ||
@@ -2999,7 +2998,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2999 | } | 2998 | } |
3000 | EXPORT_SYMBOL_GPL(kvm_emulate_pio); | 2999 | EXPORT_SYMBOL_GPL(kvm_emulate_pio); |
3001 | 3000 | ||
3002 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 3001 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, |
3003 | int size, unsigned long count, int down, | 3002 | int size, unsigned long count, int down, |
3004 | gva_t address, int rep, unsigned port) | 3003 | gva_t address, int rep, unsigned port) |
3005 | { | 3004 | { |
@@ -3453,17 +3452,17 @@ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); | |||
3453 | * | 3452 | * |
3454 | * No need to exit to userspace if we already have an interrupt queued. | 3453 | * No need to exit to userspace if we already have an interrupt queued. |
3455 | */ | 3454 | */ |
3456 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | 3455 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) |
3457 | struct kvm_run *kvm_run) | ||
3458 | { | 3456 | { |
3459 | return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && | 3457 | return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && |
3460 | kvm_run->request_interrupt_window && | 3458 | vcpu->run->request_interrupt_window && |
3461 | kvm_arch_interrupt_allowed(vcpu)); | 3459 | kvm_arch_interrupt_allowed(vcpu)); |
3462 | } | 3460 | } |
3463 | 3461 | ||
3464 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | 3462 | static void post_kvm_run_save(struct kvm_vcpu *vcpu) |
3465 | struct kvm_run *kvm_run) | ||
3466 | { | 3463 | { |
3464 | struct kvm_run *kvm_run = vcpu->run; | ||
3465 | |||
3467 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; | 3466 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; |
3468 | kvm_run->cr8 = kvm_get_cr8(vcpu); | 3467 | kvm_run->cr8 = kvm_get_cr8(vcpu); |
3469 | kvm_run->apic_base = kvm_get_apic_base(vcpu); | 3468 | kvm_run->apic_base = kvm_get_apic_base(vcpu); |
@@ -3525,7 +3524,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) | |||
3525 | kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); | 3524 | kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); |
3526 | } | 3525 | } |
3527 | 3526 | ||
3528 | static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3527 | static void inject_pending_event(struct kvm_vcpu *vcpu) |
3529 | { | 3528 | { |
3530 | /* try to reinject previous events if any */ | 3529 | /* try to reinject previous events if any */ |
3531 | if (vcpu->arch.exception.pending) { | 3530 | if (vcpu->arch.exception.pending) { |
@@ -3561,11 +3560,11 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3561 | } | 3560 | } |
3562 | } | 3561 | } |
3563 | 3562 | ||
3564 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3563 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) |
3565 | { | 3564 | { |
3566 | int r; | 3565 | int r; |
3567 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | 3566 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && |
3568 | kvm_run->request_interrupt_window; | 3567 | vcpu->run->request_interrupt_window; |
3569 | 3568 | ||
3570 | if (vcpu->requests) | 3569 | if (vcpu->requests) |
3571 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 3570 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) |
@@ -3586,12 +3585,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3586 | kvm_x86_ops->tlb_flush(vcpu); | 3585 | kvm_x86_ops->tlb_flush(vcpu); |
3587 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, | 3586 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, |
3588 | &vcpu->requests)) { | 3587 | &vcpu->requests)) { |
3589 | kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; | 3588 | vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; |
3590 | r = 0; | 3589 | r = 0; |
3591 | goto out; | 3590 | goto out; |
3592 | } | 3591 | } |
3593 | if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) { | 3592 | if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) { |
3594 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | 3593 | vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; |
3595 | r = 0; | 3594 | r = 0; |
3596 | goto out; | 3595 | goto out; |
3597 | } | 3596 | } |
@@ -3615,7 +3614,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3615 | goto out; | 3614 | goto out; |
3616 | } | 3615 | } |
3617 | 3616 | ||
3618 | inject_pending_event(vcpu, kvm_run); | 3617 | inject_pending_event(vcpu); |
3619 | 3618 | ||
3620 | /* enable NMI/IRQ window open exits if needed */ | 3619 | /* enable NMI/IRQ window open exits if needed */ |
3621 | if (vcpu->arch.nmi_pending) | 3620 | if (vcpu->arch.nmi_pending) |
@@ -3641,7 +3640,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3641 | } | 3640 | } |
3642 | 3641 | ||
3643 | trace_kvm_entry(vcpu->vcpu_id); | 3642 | trace_kvm_entry(vcpu->vcpu_id); |
3644 | kvm_x86_ops->run(vcpu, kvm_run); | 3643 | kvm_x86_ops->run(vcpu); |
3645 | 3644 | ||
3646 | if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) { | 3645 | if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) { |
3647 | set_debugreg(current->thread.debugreg0, 0); | 3646 | set_debugreg(current->thread.debugreg0, 0); |
@@ -3682,13 +3681,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3682 | 3681 | ||
3683 | kvm_lapic_sync_from_vapic(vcpu); | 3682 | kvm_lapic_sync_from_vapic(vcpu); |
3684 | 3683 | ||
3685 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); | 3684 | r = kvm_x86_ops->handle_exit(vcpu); |
3686 | out: | 3685 | out: |
3687 | return r; | 3686 | return r; |
3688 | } | 3687 | } |
3689 | 3688 | ||
3690 | 3689 | ||
3691 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3690 | static int __vcpu_run(struct kvm_vcpu *vcpu) |
3692 | { | 3691 | { |
3693 | int r; | 3692 | int r; |
3694 | 3693 | ||
@@ -3708,7 +3707,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3708 | r = 1; | 3707 | r = 1; |
3709 | while (r > 0) { | 3708 | while (r > 0) { |
3710 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) | 3709 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) |
3711 | r = vcpu_enter_guest(vcpu, kvm_run); | 3710 | r = vcpu_enter_guest(vcpu); |
3712 | else { | 3711 | else { |
3713 | up_read(&vcpu->kvm->slots_lock); | 3712 | up_read(&vcpu->kvm->slots_lock); |
3714 | kvm_vcpu_block(vcpu); | 3713 | kvm_vcpu_block(vcpu); |
@@ -3736,14 +3735,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3736 | if (kvm_cpu_has_pending_timer(vcpu)) | 3735 | if (kvm_cpu_has_pending_timer(vcpu)) |
3737 | kvm_inject_pending_timer_irqs(vcpu); | 3736 | kvm_inject_pending_timer_irqs(vcpu); |
3738 | 3737 | ||
3739 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | 3738 | if (dm_request_for_irq_injection(vcpu)) { |
3740 | r = -EINTR; | 3739 | r = -EINTR; |
3741 | kvm_run->exit_reason = KVM_EXIT_INTR; | 3740 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
3742 | ++vcpu->stat.request_irq_exits; | 3741 | ++vcpu->stat.request_irq_exits; |
3743 | } | 3742 | } |
3744 | if (signal_pending(current)) { | 3743 | if (signal_pending(current)) { |
3745 | r = -EINTR; | 3744 | r = -EINTR; |
3746 | kvm_run->exit_reason = KVM_EXIT_INTR; | 3745 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
3747 | ++vcpu->stat.signal_exits; | 3746 | ++vcpu->stat.signal_exits; |
3748 | } | 3747 | } |
3749 | if (need_resched()) { | 3748 | if (need_resched()) { |
@@ -3754,7 +3753,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3754 | } | 3753 | } |
3755 | 3754 | ||
3756 | up_read(&vcpu->kvm->slots_lock); | 3755 | up_read(&vcpu->kvm->slots_lock); |
3757 | post_kvm_run_save(vcpu, kvm_run); | 3756 | post_kvm_run_save(vcpu); |
3758 | 3757 | ||
3759 | vapic_exit(vcpu); | 3758 | vapic_exit(vcpu); |
3760 | 3759 | ||
@@ -3794,8 +3793,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3794 | vcpu->mmio_needed = 0; | 3793 | vcpu->mmio_needed = 0; |
3795 | 3794 | ||
3796 | down_read(&vcpu->kvm->slots_lock); | 3795 | down_read(&vcpu->kvm->slots_lock); |
3797 | r = emulate_instruction(vcpu, kvm_run, | 3796 | r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0, |
3798 | vcpu->arch.mmio_fault_cr2, 0, | ||
3799 | EMULTYPE_NO_DECODE); | 3797 | EMULTYPE_NO_DECODE); |
3800 | up_read(&vcpu->kvm->slots_lock); | 3798 | up_read(&vcpu->kvm->slots_lock); |
3801 | if (r == EMULATE_DO_MMIO) { | 3799 | if (r == EMULATE_DO_MMIO) { |
@@ -3811,7 +3809,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3811 | kvm_register_write(vcpu, VCPU_REGS_RAX, | 3809 | kvm_register_write(vcpu, VCPU_REGS_RAX, |
3812 | kvm_run->hypercall.ret); | 3810 | kvm_run->hypercall.ret); |
3813 | 3811 | ||
3814 | r = __vcpu_run(vcpu, kvm_run); | 3812 | r = __vcpu_run(vcpu); |
3815 | 3813 | ||
3816 | out: | 3814 | out: |
3817 | if (vcpu->sigset_active) | 3815 | if (vcpu->sigset_active) |