aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-06-27 13:58:02 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:13:57 -0400
commit5fdbf9765b7ba6a45100851154768de703d51e76 (patch)
treeec34ec9357575dc4190e5228a6eabfd5f81b66a5 /arch
parentca60dfbb69afb549e33527cbf676e4daf8febfb5 (diff)
KVM: x86: accessors for guest registers
As suggested by Avi, introduce accessors to read/write guest registers. This simplifies the ->cache_regs/->decache_regs interface, and improves register caching which is important for VMX, where the cost of vmcs_read/vmcs_write is significant. [avi: fix warnings] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h32
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/svm.c56
-rw-r--r--arch/x86/kvm/vmx.c103
-rw-r--r--arch/x86/kvm/x86.c268
-rw-r--r--arch/x86/kvm/x86_emulate.c19
6 files changed, 254 insertions, 228 deletions
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
new file mode 100644
index 000000000000..1ff819dce7d3
--- /dev/null
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -0,0 +1,32 @@
1#ifndef ASM_KVM_CACHE_REGS_H
2#define ASM_KVM_CACHE_REGS_H
3
4static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
5 enum kvm_reg reg)
6{
7 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
8 kvm_x86_ops->cache_reg(vcpu, reg);
9
10 return vcpu->arch.regs[reg];
11}
12
13static inline void kvm_register_write(struct kvm_vcpu *vcpu,
14 enum kvm_reg reg,
15 unsigned long val)
16{
17 vcpu->arch.regs[reg] = val;
18 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
19 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
20}
21
22static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
23{
24 return kvm_register_read(vcpu, VCPU_REGS_RIP);
25}
26
27static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
28{
29 kvm_register_write(vcpu, VCPU_REGS_RIP, val);
30}
31
32#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 73f43de69f67..9fde0ac24268 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -32,6 +32,7 @@
32#include <asm/current.h> 32#include <asm/current.h>
33#include <asm/apicdef.h> 33#include <asm/apicdef.h>
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include "kvm_cache_regs.h"
35#include "irq.h" 36#include "irq.h"
36 37
37#define PRId64 "d" 38#define PRId64 "d"
@@ -558,8 +559,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write)
558 struct kvm_run *run = vcpu->run; 559 struct kvm_run *run = vcpu->run;
559 560
560 set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests); 561 set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests);
561 kvm_x86_ops->cache_regs(vcpu); 562 run->tpr_access.rip = kvm_rip_read(vcpu);
562 run->tpr_access.rip = vcpu->arch.rip;
563 run->tpr_access.is_write = write; 563 run->tpr_access.is_write = write;
564} 564}
565 565
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8233b86c778c..54b0bf33e21e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -18,6 +18,7 @@
18#include "kvm_svm.h" 18#include "kvm_svm.h"
19#include "irq.h" 19#include "irq.h"
20#include "mmu.h" 20#include "mmu.h"
21#include "kvm_cache_regs.h"
21 22
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -236,13 +237,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
236 printk(KERN_DEBUG "%s: NOP\n", __func__); 237 printk(KERN_DEBUG "%s: NOP\n", __func__);
237 return; 238 return;
238 } 239 }
239 if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) 240 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
240 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", 241 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
241 __func__, 242 __func__, kvm_rip_read(vcpu), svm->next_rip);
242 svm->vmcb->save.rip,
243 svm->next_rip);
244 243
245 vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip; 244 kvm_rip_write(vcpu, svm->next_rip);
246 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 245 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
247 246
248 vcpu->arch.interrupt_window_open = 1; 247 vcpu->arch.interrupt_window_open = 1;
@@ -581,6 +580,7 @@ static void init_vmcb(struct vcpu_svm *svm)
581 save->dr7 = 0x400; 580 save->dr7 = 0x400;
582 save->rflags = 2; 581 save->rflags = 2;
583 save->rip = 0x0000fff0; 582 save->rip = 0x0000fff0;
583 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
584 584
585 /* 585 /*
586 * cr0 val on cpu init should be 0x60000010, we enable cpu 586 * cr0 val on cpu init should be 0x60000010, we enable cpu
@@ -615,10 +615,12 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
615 init_vmcb(svm); 615 init_vmcb(svm);
616 616
617 if (vcpu->vcpu_id != 0) { 617 if (vcpu->vcpu_id != 0) {
618 svm->vmcb->save.rip = 0; 618 kvm_rip_write(vcpu, 0);
619 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; 619 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
620 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; 620 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
621 } 621 }
622 vcpu->arch.regs_avail = ~0;
623 vcpu->arch.regs_dirty = ~0;
622 624
623 return 0; 625 return 0;
624} 626}
@@ -721,23 +723,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
721 rdtscll(vcpu->arch.host_tsc); 723 rdtscll(vcpu->arch.host_tsc);
722} 724}
723 725
724static void svm_cache_regs(struct kvm_vcpu *vcpu)
725{
726 struct vcpu_svm *svm = to_svm(vcpu);
727
728 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
729 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
730 vcpu->arch.rip = svm->vmcb->save.rip;
731}
732
733static void svm_decache_regs(struct kvm_vcpu *vcpu)
734{
735 struct vcpu_svm *svm = to_svm(vcpu);
736 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
737 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
738 svm->vmcb->save.rip = vcpu->arch.rip;
739}
740
741static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 726static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
742{ 727{
743 return to_svm(vcpu)->vmcb->save.rflags; 728 return to_svm(vcpu)->vmcb->save.rflags;
@@ -1139,14 +1124,14 @@ static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1139 1124
1140static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1125static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1141{ 1126{
1142 svm->next_rip = svm->vmcb->save.rip + 1; 1127 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1143 skip_emulated_instruction(&svm->vcpu); 1128 skip_emulated_instruction(&svm->vcpu);
1144 return kvm_emulate_halt(&svm->vcpu); 1129 return kvm_emulate_halt(&svm->vcpu);
1145} 1130}
1146 1131
1147static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1132static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1148{ 1133{
1149 svm->next_rip = svm->vmcb->save.rip + 3; 1134 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1150 skip_emulated_instruction(&svm->vcpu); 1135 skip_emulated_instruction(&svm->vcpu);
1151 kvm_emulate_hypercall(&svm->vcpu); 1136 kvm_emulate_hypercall(&svm->vcpu);
1152 return 1; 1137 return 1;
@@ -1178,7 +1163,7 @@ static int task_switch_interception(struct vcpu_svm *svm,
1178 1163
1179static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1164static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1180{ 1165{
1181 svm->next_rip = svm->vmcb->save.rip + 2; 1166 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1182 kvm_emulate_cpuid(&svm->vcpu); 1167 kvm_emulate_cpuid(&svm->vcpu);
1183 return 1; 1168 return 1;
1184} 1169}
@@ -1273,9 +1258,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1273 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data, 1258 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1274 (u32)(data >> 32), handler); 1259 (u32)(data >> 32), handler);
1275 1260
1276 svm->vmcb->save.rax = data & 0xffffffff; 1261 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
1277 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; 1262 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
1278 svm->next_rip = svm->vmcb->save.rip + 2; 1263 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1279 skip_emulated_instruction(&svm->vcpu); 1264 skip_emulated_instruction(&svm->vcpu);
1280 } 1265 }
1281 return 1; 1266 return 1;
@@ -1359,13 +1344,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1359static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1344static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1360{ 1345{
1361 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 1346 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1362 u64 data = (svm->vmcb->save.rax & -1u) 1347 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
1363 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); 1348 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
1364 1349
1365 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32), 1350 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
1366 handler); 1351 handler);
1367 1352
1368 svm->next_rip = svm->vmcb->save.rip + 2; 1353 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1369 if (svm_set_msr(&svm->vcpu, ecx, data)) 1354 if (svm_set_msr(&svm->vcpu, ecx, data))
1370 kvm_inject_gp(&svm->vcpu, 0); 1355 kvm_inject_gp(&svm->vcpu, 0);
1371 else 1356 else
@@ -1723,6 +1708,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1723 u16 gs_selector; 1708 u16 gs_selector;
1724 u16 ldt_selector; 1709 u16 ldt_selector;
1725 1710
1711 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
1712 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
1713 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
1714
1726 pre_svm_run(svm); 1715 pre_svm_run(svm);
1727 1716
1728 sync_lapic_to_cr8(vcpu); 1717 sync_lapic_to_cr8(vcpu);
@@ -1858,6 +1847,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1858 load_db_regs(svm->host_db_regs); 1847 load_db_regs(svm->host_db_regs);
1859 1848
1860 vcpu->arch.cr2 = svm->vmcb->save.cr2; 1849 vcpu->arch.cr2 = svm->vmcb->save.cr2;
1850 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1851 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1852 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
1861 1853
1862 write_dr6(svm->host_dr6); 1854 write_dr6(svm->host_dr6);
1863 write_dr7(svm->host_dr7); 1855 write_dr7(svm->host_dr7);
@@ -1977,8 +1969,6 @@ static struct kvm_x86_ops svm_x86_ops = {
1977 .set_gdt = svm_set_gdt, 1969 .set_gdt = svm_set_gdt,
1978 .get_dr = svm_get_dr, 1970 .get_dr = svm_get_dr,
1979 .set_dr = svm_set_dr, 1971 .set_dr = svm_set_dr,
1980 .cache_regs = svm_cache_regs,
1981 .decache_regs = svm_decache_regs,
1982 .get_rflags = svm_get_rflags, 1972 .get_rflags = svm_get_rflags,
1983 .set_rflags = svm_set_rflags, 1973 .set_rflags = svm_set_rflags,
1984 1974
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 81dac721ec31..5a3a0326c277 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -26,6 +26,7 @@
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
29#include "kvm_cache_regs.h"
29 30
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
@@ -715,9 +716,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
715 unsigned long rip; 716 unsigned long rip;
716 u32 interruptibility; 717 u32 interruptibility;
717 718
718 rip = vmcs_readl(GUEST_RIP); 719 rip = kvm_rip_read(vcpu);
719 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 720 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
720 vmcs_writel(GUEST_RIP, rip); 721 kvm_rip_write(vcpu, rip);
721 722
722 /* 723 /*
723 * We emulated an instruction, so temporary interrupt blocking 724 * We emulated an instruction, so temporary interrupt blocking
@@ -947,24 +948,19 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
947 return ret; 948 return ret;
948} 949}
949 950
950/* 951static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
951 * Sync the rsp and rip registers into the vcpu structure. This allows
952 * registers to be accessed by indexing vcpu->arch.regs.
953 */
954static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
955{
956 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
957 vcpu->arch.rip = vmcs_readl(GUEST_RIP);
958}
959
960/*
961 * Syncs rsp and rip back into the vmcs. Should be called after possible
962 * modification.
963 */
964static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
965{ 952{
966 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 953 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
967 vmcs_writel(GUEST_RIP, vcpu->arch.rip); 954 switch (reg) {
955 case VCPU_REGS_RSP:
956 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
957 break;
958 case VCPU_REGS_RIP:
959 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
960 break;
961 default:
962 break;
963 }
968} 964}
969 965
970static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 966static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
@@ -2019,6 +2015,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2019 u64 msr; 2015 u64 msr;
2020 int ret; 2016 int ret;
2021 2017
2018 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2022 down_read(&vcpu->kvm->slots_lock); 2019 down_read(&vcpu->kvm->slots_lock);
2023 if (!init_rmode(vmx->vcpu.kvm)) { 2020 if (!init_rmode(vmx->vcpu.kvm)) {
2024 ret = -ENOMEM; 2021 ret = -ENOMEM;
@@ -2072,10 +2069,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2072 2069
2073 vmcs_writel(GUEST_RFLAGS, 0x02); 2070 vmcs_writel(GUEST_RFLAGS, 0x02);
2074 if (vmx->vcpu.vcpu_id == 0) 2071 if (vmx->vcpu.vcpu_id == 0)
2075 vmcs_writel(GUEST_RIP, 0xfff0); 2072 kvm_rip_write(vcpu, 0xfff0);
2076 else 2073 else
2077 vmcs_writel(GUEST_RIP, 0); 2074 kvm_rip_write(vcpu, 0);
2078 vmcs_writel(GUEST_RSP, 0); 2075 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
2079 2076
2080 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ 2077 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
2081 vmcs_writel(GUEST_DR7, 0x400); 2078 vmcs_writel(GUEST_DR7, 0x400);
@@ -2139,11 +2136,11 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
2139 if (vcpu->arch.rmode.active) { 2136 if (vcpu->arch.rmode.active) {
2140 vmx->rmode.irq.pending = true; 2137 vmx->rmode.irq.pending = true;
2141 vmx->rmode.irq.vector = irq; 2138 vmx->rmode.irq.vector = irq;
2142 vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); 2139 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2143 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2140 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2144 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); 2141 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2145 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); 2142 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2146 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1); 2143 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2147 return; 2144 return;
2148 } 2145 }
2149 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2146 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -2288,7 +2285,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2288 } 2285 }
2289 2286
2290 error_code = 0; 2287 error_code = 0;
2291 rip = vmcs_readl(GUEST_RIP); 2288 rip = kvm_rip_read(vcpu);
2292 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 2289 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
2293 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 2290 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
2294 if (is_page_fault(intr_info)) { 2291 if (is_page_fault(intr_info)) {
@@ -2386,27 +2383,25 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2386 reg = (exit_qualification >> 8) & 15; 2383 reg = (exit_qualification >> 8) & 15;
2387 switch ((exit_qualification >> 4) & 3) { 2384 switch ((exit_qualification >> 4) & 3) {
2388 case 0: /* mov to cr */ 2385 case 0: /* mov to cr */
2389 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg], 2386 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
2390 (u32)((u64)vcpu->arch.regs[reg] >> 32), handler); 2387 (u32)kvm_register_read(vcpu, reg),
2388 (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
2389 handler);
2391 switch (cr) { 2390 switch (cr) {
2392 case 0: 2391 case 0:
2393 vcpu_load_rsp_rip(vcpu); 2392 kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
2394 kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
2395 skip_emulated_instruction(vcpu); 2393 skip_emulated_instruction(vcpu);
2396 return 1; 2394 return 1;
2397 case 3: 2395 case 3:
2398 vcpu_load_rsp_rip(vcpu); 2396 kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
2399 kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
2400 skip_emulated_instruction(vcpu); 2397 skip_emulated_instruction(vcpu);
2401 return 1; 2398 return 1;
2402 case 4: 2399 case 4:
2403 vcpu_load_rsp_rip(vcpu); 2400 kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
2404 kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
2405 skip_emulated_instruction(vcpu); 2401 skip_emulated_instruction(vcpu);
2406 return 1; 2402 return 1;
2407 case 8: 2403 case 8:
2408 vcpu_load_rsp_rip(vcpu); 2404 kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg));
2409 kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
2410 skip_emulated_instruction(vcpu); 2405 skip_emulated_instruction(vcpu);
2411 if (irqchip_in_kernel(vcpu->kvm)) 2406 if (irqchip_in_kernel(vcpu->kvm))
2412 return 1; 2407 return 1;
@@ -2415,7 +2410,6 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2415 }; 2410 };
2416 break; 2411 break;
2417 case 2: /* clts */ 2412 case 2: /* clts */
2418 vcpu_load_rsp_rip(vcpu);
2419 vmx_fpu_deactivate(vcpu); 2413 vmx_fpu_deactivate(vcpu);
2420 vcpu->arch.cr0 &= ~X86_CR0_TS; 2414 vcpu->arch.cr0 &= ~X86_CR0_TS;
2421 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); 2415 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
@@ -2426,21 +2420,17 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2426 case 1: /*mov from cr*/ 2420 case 1: /*mov from cr*/
2427 switch (cr) { 2421 switch (cr) {
2428 case 3: 2422 case 3:
2429 vcpu_load_rsp_rip(vcpu); 2423 kvm_register_write(vcpu, reg, vcpu->arch.cr3);
2430 vcpu->arch.regs[reg] = vcpu->arch.cr3;
2431 vcpu_put_rsp_rip(vcpu);
2432 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, 2424 KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
2433 (u32)vcpu->arch.regs[reg], 2425 (u32)kvm_register_read(vcpu, reg),
2434 (u32)((u64)vcpu->arch.regs[reg] >> 32), 2426 (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
2435 handler); 2427 handler);
2436 skip_emulated_instruction(vcpu); 2428 skip_emulated_instruction(vcpu);
2437 return 1; 2429 return 1;
2438 case 8: 2430 case 8:
2439 vcpu_load_rsp_rip(vcpu); 2431 kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
2440 vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
2441 vcpu_put_rsp_rip(vcpu);
2442 KVMTRACE_2D(CR_READ, vcpu, (u32)cr, 2432 KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
2443 (u32)vcpu->arch.regs[reg], handler); 2433 (u32)kvm_register_read(vcpu, reg), handler);
2444 skip_emulated_instruction(vcpu); 2434 skip_emulated_instruction(vcpu);
2445 return 1; 2435 return 1;
2446 } 2436 }
@@ -2472,7 +2462,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2472 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 2462 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2473 dr = exit_qualification & 7; 2463 dr = exit_qualification & 7;
2474 reg = (exit_qualification >> 8) & 15; 2464 reg = (exit_qualification >> 8) & 15;
2475 vcpu_load_rsp_rip(vcpu);
2476 if (exit_qualification & 16) { 2465 if (exit_qualification & 16) {
2477 /* mov from dr */ 2466 /* mov from dr */
2478 switch (dr) { 2467 switch (dr) {
@@ -2485,12 +2474,11 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2485 default: 2474 default:
2486 val = 0; 2475 val = 0;
2487 } 2476 }
2488 vcpu->arch.regs[reg] = val; 2477 kvm_register_write(vcpu, reg, val);
2489 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); 2478 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
2490 } else { 2479 } else {
2491 /* mov to dr */ 2480 /* mov to dr */
2492 } 2481 }
2493 vcpu_put_rsp_rip(vcpu);
2494 skip_emulated_instruction(vcpu); 2482 skip_emulated_instruction(vcpu);
2495 return 1; 2483 return 1;
2496} 2484}
@@ -2735,8 +2723,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2735 struct vcpu_vmx *vmx = to_vmx(vcpu); 2723 struct vcpu_vmx *vmx = to_vmx(vcpu);
2736 u32 vectoring_info = vmx->idt_vectoring_info; 2724 u32 vectoring_info = vmx->idt_vectoring_info;
2737 2725
2738 KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP), 2726 KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
2739 (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit); 2727 (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
2740 2728
2741 /* Access CR3 don't cause VMExit in paging mode, so we need 2729 /* Access CR3 don't cause VMExit in paging mode, so we need
2742 * to sync with guest real CR3. */ 2730 * to sync with guest real CR3. */
@@ -2922,9 +2910,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2922static void fixup_rmode_irq(struct vcpu_vmx *vmx) 2910static void fixup_rmode_irq(struct vcpu_vmx *vmx)
2923{ 2911{
2924 vmx->rmode.irq.pending = 0; 2912 vmx->rmode.irq.pending = 0;
2925 if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip) 2913 if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
2926 return; 2914 return;
2927 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip); 2915 kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
2928 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { 2916 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
2929 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; 2917 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
2930 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; 2918 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
@@ -2941,6 +2929,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2941 struct vcpu_vmx *vmx = to_vmx(vcpu); 2929 struct vcpu_vmx *vmx = to_vmx(vcpu);
2942 u32 intr_info; 2930 u32 intr_info;
2943 2931
2932 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
2933 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
2934 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
2935 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
2936
2944 /* 2937 /*
2945 * Loading guest fpu may have cleared host cr0.ts 2938 * Loading guest fpu may have cleared host cr0.ts
2946 */ 2939 */
@@ -3061,6 +3054,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3061#endif 3054#endif
3062 ); 3055 );
3063 3056
3057 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3058 vcpu->arch.regs_dirty = 0;
3059
3064 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 3060 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3065 if (vmx->rmode.irq.pending) 3061 if (vmx->rmode.irq.pending)
3066 fixup_rmode_irq(vmx); 3062 fixup_rmode_irq(vmx);
@@ -3224,8 +3220,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
3224 .set_idt = vmx_set_idt, 3220 .set_idt = vmx_set_idt,
3225 .get_gdt = vmx_get_gdt, 3221 .get_gdt = vmx_get_gdt,
3226 .set_gdt = vmx_set_gdt, 3222 .set_gdt = vmx_set_gdt,
3227 .cache_regs = vcpu_load_rsp_rip, 3223 .cache_reg = vmx_cache_reg,
3228 .decache_regs = vcpu_put_rsp_rip,
3229 .get_rflags = vmx_get_rflags, 3224 .get_rflags = vmx_get_rflags,
3230 .set_rflags = vmx_set_rflags, 3225 .set_rflags = vmx_set_rflags,
3231 3226
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0d682fc6aeb3..2f0696bc7d2f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -19,6 +19,7 @@
19#include "mmu.h" 19#include "mmu.h"
20#include "i8254.h" 20#include "i8254.h"
21#include "tss.h" 21#include "tss.h"
22#include "kvm_cache_regs.h"
22 23
23#include <linux/clocksource.h> 24#include <linux/clocksource.h>
24#include <linux/kvm.h> 25#include <linux/kvm.h>
@@ -61,6 +62,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
61 struct kvm_cpuid_entry2 __user *entries); 62 struct kvm_cpuid_entry2 __user *entries);
62 63
63struct kvm_x86_ops *kvm_x86_ops; 64struct kvm_x86_ops *kvm_x86_ops;
65EXPORT_SYMBOL_GPL(kvm_x86_ops);
64 66
65struct kvm_stats_debugfs_item debugfs_entries[] = { 67struct kvm_stats_debugfs_item debugfs_entries[] = {
66 { "pf_fixed", VCPU_STAT(pf_fixed) }, 68 { "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -2080,7 +2082,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2080void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) 2082void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2081{ 2083{
2082 u8 opcodes[4]; 2084 u8 opcodes[4];
2083 unsigned long rip = vcpu->arch.rip; 2085 unsigned long rip = kvm_rip_read(vcpu);
2084 unsigned long rip_linear; 2086 unsigned long rip_linear;
2085 2087
2086 if (!printk_ratelimit()) 2088 if (!printk_ratelimit())
@@ -2102,6 +2104,14 @@ static struct x86_emulate_ops emulate_ops = {
2102 .cmpxchg_emulated = emulator_cmpxchg_emulated, 2104 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2103}; 2105};
2104 2106
2107static void cache_all_regs(struct kvm_vcpu *vcpu)
2108{
2109 kvm_register_read(vcpu, VCPU_REGS_RAX);
2110 kvm_register_read(vcpu, VCPU_REGS_RSP);
2111 kvm_register_read(vcpu, VCPU_REGS_RIP);
2112 vcpu->arch.regs_dirty = ~0;
2113}
2114
2105int emulate_instruction(struct kvm_vcpu *vcpu, 2115int emulate_instruction(struct kvm_vcpu *vcpu,
2106 struct kvm_run *run, 2116 struct kvm_run *run,
2107 unsigned long cr2, 2117 unsigned long cr2,
@@ -2112,7 +2122,13 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
2112 struct decode_cache *c; 2122 struct decode_cache *c;
2113 2123
2114 vcpu->arch.mmio_fault_cr2 = cr2; 2124 vcpu->arch.mmio_fault_cr2 = cr2;
2115 kvm_x86_ops->cache_regs(vcpu); 2125 /*
2126 * TODO: fix x86_emulate.c to use guest_read/write_register
2127 * instead of direct ->regs accesses, can save hundred cycles
2128 * on Intel for instructions that don't read/change RSP, for
2129 * for example.
2130 */
2131 cache_all_regs(vcpu);
2116 2132
2117 vcpu->mmio_is_write = 0; 2133 vcpu->mmio_is_write = 0;
2118 vcpu->arch.pio.string = 0; 2134 vcpu->arch.pio.string = 0;
@@ -2172,7 +2188,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
2172 return EMULATE_DO_MMIO; 2188 return EMULATE_DO_MMIO;
2173 } 2189 }
2174 2190
2175 kvm_x86_ops->decache_regs(vcpu);
2176 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 2191 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
2177 2192
2178 if (vcpu->mmio_is_write) { 2193 if (vcpu->mmio_is_write) {
@@ -2225,20 +2240,19 @@ int complete_pio(struct kvm_vcpu *vcpu)
2225 struct kvm_pio_request *io = &vcpu->arch.pio; 2240 struct kvm_pio_request *io = &vcpu->arch.pio;
2226 long delta; 2241 long delta;
2227 int r; 2242 int r;
2228 2243 unsigned long val;
2229 kvm_x86_ops->cache_regs(vcpu);
2230 2244
2231 if (!io->string) { 2245 if (!io->string) {
2232 if (io->in) 2246 if (io->in) {
2233 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data, 2247 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2234 io->size); 2248 memcpy(&val, vcpu->arch.pio_data, io->size);
2249 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2250 }
2235 } else { 2251 } else {
2236 if (io->in) { 2252 if (io->in) {
2237 r = pio_copy_data(vcpu); 2253 r = pio_copy_data(vcpu);
2238 if (r) { 2254 if (r)
2239 kvm_x86_ops->cache_regs(vcpu);
2240 return r; 2255 return r;
2241 }
2242 } 2256 }
2243 2257
2244 delta = 1; 2258 delta = 1;
@@ -2248,19 +2262,24 @@ int complete_pio(struct kvm_vcpu *vcpu)
2248 * The size of the register should really depend on 2262 * The size of the register should really depend on
2249 * current address size. 2263 * current address size.
2250 */ 2264 */
2251 vcpu->arch.regs[VCPU_REGS_RCX] -= delta; 2265 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2266 val -= delta;
2267 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
2252 } 2268 }
2253 if (io->down) 2269 if (io->down)
2254 delta = -delta; 2270 delta = -delta;
2255 delta *= io->size; 2271 delta *= io->size;
2256 if (io->in) 2272 if (io->in) {
2257 vcpu->arch.regs[VCPU_REGS_RDI] += delta; 2273 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2258 else 2274 val += delta;
2259 vcpu->arch.regs[VCPU_REGS_RSI] += delta; 2275 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2276 } else {
2277 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2278 val += delta;
2279 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2280 }
2260 } 2281 }
2261 2282
2262 kvm_x86_ops->decache_regs(vcpu);
2263
2264 io->count -= io->cur_count; 2283 io->count -= io->cur_count;
2265 io->cur_count = 0; 2284 io->cur_count = 0;
2266 2285
@@ -2313,6 +2332,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2313 int size, unsigned port) 2332 int size, unsigned port)
2314{ 2333{
2315 struct kvm_io_device *pio_dev; 2334 struct kvm_io_device *pio_dev;
2335 unsigned long val;
2316 2336
2317 vcpu->run->exit_reason = KVM_EXIT_IO; 2337 vcpu->run->exit_reason = KVM_EXIT_IO;
2318 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 2338 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -2333,8 +2353,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2333 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size, 2353 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2334 handler); 2354 handler);
2335 2355
2336 kvm_x86_ops->cache_regs(vcpu); 2356 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2337 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4); 2357 memcpy(vcpu->arch.pio_data, &val, 4);
2338 2358
2339 kvm_x86_ops->skip_emulated_instruction(vcpu); 2359 kvm_x86_ops->skip_emulated_instruction(vcpu);
2340 2360
@@ -2519,13 +2539,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2519 unsigned long nr, a0, a1, a2, a3, ret; 2539 unsigned long nr, a0, a1, a2, a3, ret;
2520 int r = 1; 2540 int r = 1;
2521 2541
2522 kvm_x86_ops->cache_regs(vcpu); 2542 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2523 2543 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2524 nr = vcpu->arch.regs[VCPU_REGS_RAX]; 2544 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2525 a0 = vcpu->arch.regs[VCPU_REGS_RBX]; 2545 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2526 a1 = vcpu->arch.regs[VCPU_REGS_RCX]; 2546 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
2527 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2528 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
2529 2547
2530 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler); 2548 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2531 2549
@@ -2548,8 +2566,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2548 ret = -KVM_ENOSYS; 2566 ret = -KVM_ENOSYS;
2549 break; 2567 break;
2550 } 2568 }
2551 vcpu->arch.regs[VCPU_REGS_RAX] = ret; 2569 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
2552 kvm_x86_ops->decache_regs(vcpu);
2553 ++vcpu->stat.hypercalls; 2570 ++vcpu->stat.hypercalls;
2554 return r; 2571 return r;
2555} 2572}
@@ -2559,6 +2576,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2559{ 2576{
2560 char instruction[3]; 2577 char instruction[3];
2561 int ret = 0; 2578 int ret = 0;
2579 unsigned long rip = kvm_rip_read(vcpu);
2562 2580
2563 2581
2564 /* 2582 /*
@@ -2568,9 +2586,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2568 */ 2586 */
2569 kvm_mmu_zap_all(vcpu->kvm); 2587 kvm_mmu_zap_all(vcpu->kvm);
2570 2588
2571 kvm_x86_ops->cache_regs(vcpu);
2572 kvm_x86_ops->patch_hypercall(vcpu, instruction); 2589 kvm_x86_ops->patch_hypercall(vcpu, instruction);
2573 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu) 2590 if (emulator_write_emulated(rip, instruction, 3, vcpu)
2574 != X86EMUL_CONTINUE) 2591 != X86EMUL_CONTINUE)
2575 ret = -EFAULT; 2592 ret = -EFAULT;
2576 2593
@@ -2700,13 +2717,12 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2700 u32 function, index; 2717 u32 function, index;
2701 struct kvm_cpuid_entry2 *e, *best; 2718 struct kvm_cpuid_entry2 *e, *best;
2702 2719
2703 kvm_x86_ops->cache_regs(vcpu); 2720 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
2704 function = vcpu->arch.regs[VCPU_REGS_RAX]; 2721 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
2705 index = vcpu->arch.regs[VCPU_REGS_RCX]; 2722 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
2706 vcpu->arch.regs[VCPU_REGS_RAX] = 0; 2723 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
2707 vcpu->arch.regs[VCPU_REGS_RBX] = 0; 2724 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
2708 vcpu->arch.regs[VCPU_REGS_RCX] = 0; 2725 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
2709 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
2710 best = NULL; 2726 best = NULL;
2711 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 2727 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2712 e = &vcpu->arch.cpuid_entries[i]; 2728 e = &vcpu->arch.cpuid_entries[i];
@@ -2724,18 +2740,17 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2724 best = e; 2740 best = e;
2725 } 2741 }
2726 if (best) { 2742 if (best) {
2727 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax; 2743 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
2728 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx; 2744 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
2729 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx; 2745 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
2730 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx; 2746 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
2731 } 2747 }
2732 kvm_x86_ops->decache_regs(vcpu);
2733 kvm_x86_ops->skip_emulated_instruction(vcpu); 2748 kvm_x86_ops->skip_emulated_instruction(vcpu);
2734 KVMTRACE_5D(CPUID, vcpu, function, 2749 KVMTRACE_5D(CPUID, vcpu, function,
2735 (u32)vcpu->arch.regs[VCPU_REGS_RAX], 2750 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
2736 (u32)vcpu->arch.regs[VCPU_REGS_RBX], 2751 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
2737 (u32)vcpu->arch.regs[VCPU_REGS_RCX], 2752 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
2738 (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler); 2753 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
2739} 2754}
2740EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 2755EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2741 2756
@@ -2917,8 +2932,8 @@ again:
2917 * Profile KVM exit RIPs: 2932 * Profile KVM exit RIPs:
2918 */ 2933 */
2919 if (unlikely(prof_on == KVM_PROFILING)) { 2934 if (unlikely(prof_on == KVM_PROFILING)) {
2920 kvm_x86_ops->cache_regs(vcpu); 2935 unsigned long rip = kvm_rip_read(vcpu);
2921 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip); 2936 profile_hit(KVM_PROFILING, (void *)rip);
2922 } 2937 }
2923 2938
2924 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu)) 2939 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
@@ -2999,11 +3014,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2999 } 3014 }
3000 } 3015 }
3001#endif 3016#endif
3002 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { 3017 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3003 kvm_x86_ops->cache_regs(vcpu); 3018 kvm_register_write(vcpu, VCPU_REGS_RAX,
3004 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; 3019 kvm_run->hypercall.ret);
3005 kvm_x86_ops->decache_regs(vcpu);
3006 }
3007 3020
3008 r = __vcpu_run(vcpu, kvm_run); 3021 r = __vcpu_run(vcpu, kvm_run);
3009 3022
@@ -3019,28 +3032,26 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3019{ 3032{
3020 vcpu_load(vcpu); 3033 vcpu_load(vcpu);
3021 3034
3022 kvm_x86_ops->cache_regs(vcpu); 3035 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3023 3036 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3024 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX]; 3037 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3025 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX]; 3038 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3026 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX]; 3039 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3027 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX]; 3040 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3028 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI]; 3041 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3029 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI]; 3042 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3030 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3031 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
3032#ifdef CONFIG_X86_64 3043#ifdef CONFIG_X86_64
3033 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8]; 3044 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3034 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9]; 3045 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3035 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10]; 3046 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3036 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11]; 3047 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3037 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12]; 3048 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3038 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13]; 3049 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3039 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14]; 3050 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3040 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15]; 3051 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
3041#endif 3052#endif
3042 3053
3043 regs->rip = vcpu->arch.rip; 3054 regs->rip = kvm_rip_read(vcpu);
3044 regs->rflags = kvm_x86_ops->get_rflags(vcpu); 3055 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3045 3056
3046 /* 3057 /*
@@ -3058,29 +3069,29 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3058{ 3069{
3059 vcpu_load(vcpu); 3070 vcpu_load(vcpu);
3060 3071
3061 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax; 3072 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3062 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx; 3073 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3063 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx; 3074 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3064 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx; 3075 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3065 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi; 3076 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3066 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi; 3077 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3067 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp; 3078 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3068 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp; 3079 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
3069#ifdef CONFIG_X86_64 3080#ifdef CONFIG_X86_64
3070 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8; 3081 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3071 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9; 3082 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3072 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10; 3083 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3073 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11; 3084 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3074 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12; 3085 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3075 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13; 3086 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3076 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14; 3087 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3077 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15; 3088 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3089
3078#endif 3090#endif
3079 3091
3080 vcpu->arch.rip = regs->rip; 3092 kvm_rip_write(vcpu, regs->rip);
3081 kvm_x86_ops->set_rflags(vcpu, regs->rflags); 3093 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3082 3094
3083 kvm_x86_ops->decache_regs(vcpu);
3084 3095
3085 vcpu->arch.exception.pending = false; 3096 vcpu->arch.exception.pending = false;
3086 3097
@@ -3316,17 +3327,16 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3316 struct tss_segment_32 *tss) 3327 struct tss_segment_32 *tss)
3317{ 3328{
3318 tss->cr3 = vcpu->arch.cr3; 3329 tss->cr3 = vcpu->arch.cr3;
3319 tss->eip = vcpu->arch.rip; 3330 tss->eip = kvm_rip_read(vcpu);
3320 tss->eflags = kvm_x86_ops->get_rflags(vcpu); 3331 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3321 tss->eax = vcpu->arch.regs[VCPU_REGS_RAX]; 3332 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3322 tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX]; 3333 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3323 tss->edx = vcpu->arch.regs[VCPU_REGS_RDX]; 3334 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3324 tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX]; 3335 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3325 tss->esp = vcpu->arch.regs[VCPU_REGS_RSP]; 3336 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3326 tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP]; 3337 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3327 tss->esi = vcpu->arch.regs[VCPU_REGS_RSI]; 3338 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3328 tss->edi = vcpu->arch.regs[VCPU_REGS_RDI]; 3339 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3329
3330 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES); 3340 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3331 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS); 3341 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3332 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS); 3342 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
@@ -3342,17 +3352,17 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3342{ 3352{
3343 kvm_set_cr3(vcpu, tss->cr3); 3353 kvm_set_cr3(vcpu, tss->cr3);
3344 3354
3345 vcpu->arch.rip = tss->eip; 3355 kvm_rip_write(vcpu, tss->eip);
3346 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2); 3356 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3347 3357
3348 vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax; 3358 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3349 vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx; 3359 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3350 vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx; 3360 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3351 vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx; 3361 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3352 vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp; 3362 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3353 vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp; 3363 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3354 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi; 3364 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3355 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi; 3365 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
3356 3366
3357 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) 3367 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3358 return 1; 3368 return 1;
@@ -3380,16 +3390,16 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3380static void save_state_to_tss16(struct kvm_vcpu *vcpu, 3390static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3381 struct tss_segment_16 *tss) 3391 struct tss_segment_16 *tss)
3382{ 3392{
3383 tss->ip = vcpu->arch.rip; 3393 tss->ip = kvm_rip_read(vcpu);
3384 tss->flag = kvm_x86_ops->get_rflags(vcpu); 3394 tss->flag = kvm_x86_ops->get_rflags(vcpu);
3385 tss->ax = vcpu->arch.regs[VCPU_REGS_RAX]; 3395 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3386 tss->cx = vcpu->arch.regs[VCPU_REGS_RCX]; 3396 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3387 tss->dx = vcpu->arch.regs[VCPU_REGS_RDX]; 3397 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3388 tss->bx = vcpu->arch.regs[VCPU_REGS_RBX]; 3398 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3389 tss->sp = vcpu->arch.regs[VCPU_REGS_RSP]; 3399 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3390 tss->bp = vcpu->arch.regs[VCPU_REGS_RBP]; 3400 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3391 tss->si = vcpu->arch.regs[VCPU_REGS_RSI]; 3401 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3392 tss->di = vcpu->arch.regs[VCPU_REGS_RDI]; 3402 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
3393 3403
3394 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES); 3404 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3395 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS); 3405 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
@@ -3402,16 +3412,16 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3402static int load_state_from_tss16(struct kvm_vcpu *vcpu, 3412static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3403 struct tss_segment_16 *tss) 3413 struct tss_segment_16 *tss)
3404{ 3414{
3405 vcpu->arch.rip = tss->ip; 3415 kvm_rip_write(vcpu, tss->ip);
3406 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2); 3416 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3407 vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax; 3417 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3408 vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx; 3418 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3409 vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx; 3419 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3410 vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx; 3420 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3411 vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp; 3421 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3412 vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp; 3422 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3413 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si; 3423 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3414 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di; 3424 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
3415 3425
3416 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) 3426 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3417 return 1; 3427 return 1;
@@ -3534,7 +3544,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3534 } 3544 }
3535 3545
3536 kvm_x86_ops->skip_emulated_instruction(vcpu); 3546 kvm_x86_ops->skip_emulated_instruction(vcpu);
3537 kvm_x86_ops->cache_regs(vcpu);
3538 3547
3539 if (nseg_desc.type & 8) 3548 if (nseg_desc.type & 8)
3540 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base, 3549 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
@@ -3559,7 +3568,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3559 tr_seg.type = 11; 3568 tr_seg.type = 11;
3560 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3569 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3561out: 3570out:
3562 kvm_x86_ops->decache_regs(vcpu);
3563 return ret; 3571 return ret;
3564} 3572}
3565EXPORT_SYMBOL_GPL(kvm_task_switch); 3573EXPORT_SYMBOL_GPL(kvm_task_switch);
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index f2f90468f8b1..d5da7f14d536 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -26,6 +26,7 @@
26#define DPRINTF(_f, _a ...) printf(_f , ## _a) 26#define DPRINTF(_f, _a ...) printf(_f , ## _a)
27#else 27#else
28#include <linux/kvm_host.h> 28#include <linux/kvm_host.h>
29#include "kvm_cache_regs.h"
29#define DPRINTF(x...) do {} while (0) 30#define DPRINTF(x...) do {} while (0)
30#endif 31#endif
31#include <linux/module.h> 32#include <linux/module.h>
@@ -839,7 +840,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
839 /* Shadow copy of register state. Committed on successful emulation. */ 840 /* Shadow copy of register state. Committed on successful emulation. */
840 841
841 memset(c, 0, sizeof(struct decode_cache)); 842 memset(c, 0, sizeof(struct decode_cache));
842 c->eip = ctxt->vcpu->arch.rip; 843 c->eip = kvm_rip_read(ctxt->vcpu);
843 ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); 844 ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
844 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); 845 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
845 846
@@ -1267,7 +1268,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1267 if (c->rep_prefix && (c->d & String)) { 1268 if (c->rep_prefix && (c->d & String)) {
1268 /* All REP prefixes have the same first termination condition */ 1269 /* All REP prefixes have the same first termination condition */
1269 if (c->regs[VCPU_REGS_RCX] == 0) { 1270 if (c->regs[VCPU_REGS_RCX] == 0) {
1270 ctxt->vcpu->arch.rip = c->eip; 1271 kvm_rip_write(ctxt->vcpu, c->eip);
1271 goto done; 1272 goto done;
1272 } 1273 }
1273 /* The second termination condition only applies for REPE 1274 /* The second termination condition only applies for REPE
@@ -1281,17 +1282,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1281 (c->b == 0xae) || (c->b == 0xaf)) { 1282 (c->b == 0xae) || (c->b == 0xaf)) {
1282 if ((c->rep_prefix == REPE_PREFIX) && 1283 if ((c->rep_prefix == REPE_PREFIX) &&
1283 ((ctxt->eflags & EFLG_ZF) == 0)) { 1284 ((ctxt->eflags & EFLG_ZF) == 0)) {
1284 ctxt->vcpu->arch.rip = c->eip; 1285 kvm_rip_write(ctxt->vcpu, c->eip);
1285 goto done; 1286 goto done;
1286 } 1287 }
1287 if ((c->rep_prefix == REPNE_PREFIX) && 1288 if ((c->rep_prefix == REPNE_PREFIX) &&
1288 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) { 1289 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
1289 ctxt->vcpu->arch.rip = c->eip; 1290 kvm_rip_write(ctxt->vcpu, c->eip);
1290 goto done; 1291 goto done;
1291 } 1292 }
1292 } 1293 }
1293 c->regs[VCPU_REGS_RCX]--; 1294 c->regs[VCPU_REGS_RCX]--;
1294 c->eip = ctxt->vcpu->arch.rip; 1295 c->eip = kvm_rip_read(ctxt->vcpu);
1295 } 1296 }
1296 1297
1297 if (c->src.type == OP_MEM) { 1298 if (c->src.type == OP_MEM) {
@@ -1768,7 +1769,7 @@ writeback:
1768 1769
1769 /* Commit shadow register state. */ 1770 /* Commit shadow register state. */
1770 memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); 1771 memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
1771 ctxt->vcpu->arch.rip = c->eip; 1772 kvm_rip_write(ctxt->vcpu, c->eip);
1772 1773
1773done: 1774done:
1774 if (rc == X86EMUL_UNHANDLEABLE) { 1775 if (rc == X86EMUL_UNHANDLEABLE) {
@@ -1793,7 +1794,7 @@ twobyte_insn:
1793 goto done; 1794 goto done;
1794 1795
1795 /* Let the processor re-execute the fixed hypercall */ 1796 /* Let the processor re-execute the fixed hypercall */
1796 c->eip = ctxt->vcpu->arch.rip; 1797 c->eip = kvm_rip_read(ctxt->vcpu);
1797 /* Disable writeback. */ 1798 /* Disable writeback. */
1798 c->dst.type = OP_NONE; 1799 c->dst.type = OP_NONE;
1799 break; 1800 break;
@@ -1889,7 +1890,7 @@ twobyte_insn:
1889 rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); 1890 rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
1890 if (rc) { 1891 if (rc) {
1891 kvm_inject_gp(ctxt->vcpu, 0); 1892 kvm_inject_gp(ctxt->vcpu, 0);
1892 c->eip = ctxt->vcpu->arch.rip; 1893 c->eip = kvm_rip_read(ctxt->vcpu);
1893 } 1894 }
1894 rc = X86EMUL_CONTINUE; 1895 rc = X86EMUL_CONTINUE;
1895 c->dst.type = OP_NONE; 1896 c->dst.type = OP_NONE;
@@ -1899,7 +1900,7 @@ twobyte_insn:
1899 rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); 1900 rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
1900 if (rc) { 1901 if (rc) {
1901 kvm_inject_gp(ctxt->vcpu, 0); 1902 kvm_inject_gp(ctxt->vcpu, 0);
1902 c->eip = ctxt->vcpu->arch.rip; 1903 c->eip = kvm_rip_read(ctxt->vcpu);
1903 } else { 1904 } else {
1904 c->regs[VCPU_REGS_RAX] = (u32)msr_data; 1905 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
1905 c->regs[VCPU_REGS_RDX] = msr_data >> 32; 1906 c->regs[VCPU_REGS_RDX] = msr_data >> 32;