aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-06-27 13:58:02 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:13:57 -0400
commit5fdbf9765b7ba6a45100851154768de703d51e76 (patch)
treeec34ec9357575dc4190e5228a6eabfd5f81b66a5 /arch/x86/kvm/svm.c
parentca60dfbb69afb549e33527cbf676e4daf8febfb5 (diff)
KVM: x86: accessors for guest registers
As suggested by Avi, introduce accessors to read/write guest registers. This simplifies the ->cache_regs/->decache_regs interface, and improves register caching which is important for VMX, where the cost of vmcs_read/vmcs_write is significant. [avi: fix warnings] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c56
1 files changed, 23 insertions, 33 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8233b86c778c..54b0bf33e21e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -18,6 +18,7 @@
18#include "kvm_svm.h" 18#include "kvm_svm.h"
19#include "irq.h" 19#include "irq.h"
20#include "mmu.h" 20#include "mmu.h"
21#include "kvm_cache_regs.h"
21 22
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -236,13 +237,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
236 printk(KERN_DEBUG "%s: NOP\n", __func__); 237 printk(KERN_DEBUG "%s: NOP\n", __func__);
237 return; 238 return;
238 } 239 }
239 if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) 240 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
240 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", 241 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
241 __func__, 242 __func__, kvm_rip_read(vcpu), svm->next_rip);
242 svm->vmcb->save.rip,
243 svm->next_rip);
244 243
245 vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip; 244 kvm_rip_write(vcpu, svm->next_rip);
246 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 245 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
247 246
248 vcpu->arch.interrupt_window_open = 1; 247 vcpu->arch.interrupt_window_open = 1;
@@ -581,6 +580,7 @@ static void init_vmcb(struct vcpu_svm *svm)
581 save->dr7 = 0x400; 580 save->dr7 = 0x400;
582 save->rflags = 2; 581 save->rflags = 2;
583 save->rip = 0x0000fff0; 582 save->rip = 0x0000fff0;
583 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
584 584
585 /* 585 /*
586 * cr0 val on cpu init should be 0x60000010, we enable cpu 586 * cr0 val on cpu init should be 0x60000010, we enable cpu
@@ -615,10 +615,12 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
615 init_vmcb(svm); 615 init_vmcb(svm);
616 616
617 if (vcpu->vcpu_id != 0) { 617 if (vcpu->vcpu_id != 0) {
618 svm->vmcb->save.rip = 0; 618 kvm_rip_write(vcpu, 0);
619 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; 619 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
620 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; 620 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
621 } 621 }
622 vcpu->arch.regs_avail = ~0;
623 vcpu->arch.regs_dirty = ~0;
622 624
623 return 0; 625 return 0;
624} 626}
@@ -721,23 +723,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
721 rdtscll(vcpu->arch.host_tsc); 723 rdtscll(vcpu->arch.host_tsc);
722} 724}
723 725
724static void svm_cache_regs(struct kvm_vcpu *vcpu)
725{
726 struct vcpu_svm *svm = to_svm(vcpu);
727
728 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
729 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
730 vcpu->arch.rip = svm->vmcb->save.rip;
731}
732
733static void svm_decache_regs(struct kvm_vcpu *vcpu)
734{
735 struct vcpu_svm *svm = to_svm(vcpu);
736 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
737 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
738 svm->vmcb->save.rip = vcpu->arch.rip;
739}
740
741static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 726static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
742{ 727{
743 return to_svm(vcpu)->vmcb->save.rflags; 728 return to_svm(vcpu)->vmcb->save.rflags;
@@ -1139,14 +1124,14 @@ static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1139 1124
1140static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1125static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1141{ 1126{
1142 svm->next_rip = svm->vmcb->save.rip + 1; 1127 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1143 skip_emulated_instruction(&svm->vcpu); 1128 skip_emulated_instruction(&svm->vcpu);
1144 return kvm_emulate_halt(&svm->vcpu); 1129 return kvm_emulate_halt(&svm->vcpu);
1145} 1130}
1146 1131
1147static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1132static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1148{ 1133{
1149 svm->next_rip = svm->vmcb->save.rip + 3; 1134 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1150 skip_emulated_instruction(&svm->vcpu); 1135 skip_emulated_instruction(&svm->vcpu);
1151 kvm_emulate_hypercall(&svm->vcpu); 1136 kvm_emulate_hypercall(&svm->vcpu);
1152 return 1; 1137 return 1;
@@ -1178,7 +1163,7 @@ static int task_switch_interception(struct vcpu_svm *svm,
1178 1163
1179static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1164static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1180{ 1165{
1181 svm->next_rip = svm->vmcb->save.rip + 2; 1166 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1182 kvm_emulate_cpuid(&svm->vcpu); 1167 kvm_emulate_cpuid(&svm->vcpu);
1183 return 1; 1168 return 1;
1184} 1169}
@@ -1273,9 +1258,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1273 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data, 1258 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1274 (u32)(data >> 32), handler); 1259 (u32)(data >> 32), handler);
1275 1260
1276 svm->vmcb->save.rax = data & 0xffffffff; 1261 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
1277 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; 1262 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
1278 svm->next_rip = svm->vmcb->save.rip + 2; 1263 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1279 skip_emulated_instruction(&svm->vcpu); 1264 skip_emulated_instruction(&svm->vcpu);
1280 } 1265 }
1281 return 1; 1266 return 1;
@@ -1359,13 +1344,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1359static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1344static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1360{ 1345{
1361 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 1346 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1362 u64 data = (svm->vmcb->save.rax & -1u) 1347 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
1363 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); 1348 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
1364 1349
1365 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32), 1350 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
1366 handler); 1351 handler);
1367 1352
1368 svm->next_rip = svm->vmcb->save.rip + 2; 1353 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1369 if (svm_set_msr(&svm->vcpu, ecx, data)) 1354 if (svm_set_msr(&svm->vcpu, ecx, data))
1370 kvm_inject_gp(&svm->vcpu, 0); 1355 kvm_inject_gp(&svm->vcpu, 0);
1371 else 1356 else
@@ -1723,6 +1708,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1723 u16 gs_selector; 1708 u16 gs_selector;
1724 u16 ldt_selector; 1709 u16 ldt_selector;
1725 1710
1711 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
1712 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
1713 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
1714
1726 pre_svm_run(svm); 1715 pre_svm_run(svm);
1727 1716
1728 sync_lapic_to_cr8(vcpu); 1717 sync_lapic_to_cr8(vcpu);
@@ -1858,6 +1847,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1858 load_db_regs(svm->host_db_regs); 1847 load_db_regs(svm->host_db_regs);
1859 1848
1860 vcpu->arch.cr2 = svm->vmcb->save.cr2; 1849 vcpu->arch.cr2 = svm->vmcb->save.cr2;
1850 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1851 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1852 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
1861 1853
1862 write_dr6(svm->host_dr6); 1854 write_dr6(svm->host_dr6);
1863 write_dr7(svm->host_dr7); 1855 write_dr7(svm->host_dr7);
@@ -1977,8 +1969,6 @@ static struct kvm_x86_ops svm_x86_ops = {
1977 .set_gdt = svm_set_gdt, 1969 .set_gdt = svm_set_gdt,
1978 .get_dr = svm_get_dr, 1970 .get_dr = svm_get_dr,
1979 .set_dr = svm_set_dr, 1971 .set_dr = svm_set_dr,
1980 .cache_regs = svm_cache_regs,
1981 .decache_regs = svm_decache_regs,
1982 .get_rflags = svm_get_rflags, 1972 .get_rflags = svm_get_rflags,
1983 .set_rflags = svm_set_rflags, 1973 .set_rflags = svm_set_rflags,
1984 1974