aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-06-27 13:58:02 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:13:57 -0400
commit5fdbf9765b7ba6a45100851154768de703d51e76 (patch)
treeec34ec9357575dc4190e5228a6eabfd5f81b66a5 /arch/x86/kvm/vmx.c
parentca60dfbb69afb549e33527cbf676e4daf8febfb5 (diff)
KVM: x86: accessors for guest registers
As suggested by Avi, introduce accessors to read/write guest registers. This simplifies the ->cache_regs/->decache_regs interface, and improves register caching which is important for VMX, where the cost of vmcs_read/vmcs_write is significant. [avi: fix warnings] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c103
1 files changed, 49 insertions, 54 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 81dac721ec31..5a3a0326c277 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -26,6 +26,7 @@
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
29#include "kvm_cache_regs.h"
29 30
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
@@ -715,9 +716,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
715 unsigned long rip; 716 unsigned long rip;
716 u32 interruptibility; 717 u32 interruptibility;
717 718
718 rip = vmcs_readl(GUEST_RIP); 719 rip = kvm_rip_read(vcpu);
719 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 720 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
720 vmcs_writel(GUEST_RIP, rip); 721 kvm_rip_write(vcpu, rip);
721 722
722 /* 723 /*
723 * We emulated an instruction, so temporary interrupt blocking 724 * We emulated an instruction, so temporary interrupt blocking
@@ -947,24 +948,19 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
947 return ret; 948 return ret;
948} 949}
949 950
950/* 951static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
951 * Sync the rsp and rip registers into the vcpu structure. This allows
952 * registers to be accessed by indexing vcpu->arch.regs.
953 */
954static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
955{
956 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
957 vcpu->arch.rip = vmcs_readl(GUEST_RIP);
958}
959
960/*
961 * Syncs rsp and rip back into the vmcs. Should be called after possible
962 * modification.
963 */
964static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
965{ 952{
966 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 953 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
967 vmcs_writel(GUEST_RIP, vcpu->arch.rip); 954 switch (reg) {
955 case VCPU_REGS_RSP:
956 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
957 break;
958 case VCPU_REGS_RIP:
959 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
960 break;
961 default:
962 break;
963 }
968} 964}
969 965
970static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 966static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
@@ -2019,6 +2015,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2019 u64 msr; 2015 u64 msr;
2020 int ret; 2016 int ret;
2021 2017
2018 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2022 down_read(&vcpu->kvm->slots_lock); 2019 down_read(&vcpu->kvm->slots_lock);
2023 if (!init_rmode(vmx->vcpu.kvm)) { 2020 if (!init_rmode(vmx->vcpu.kvm)) {
2024 ret = -ENOMEM; 2021 ret = -ENOMEM;
@@ -2072,10 +2069,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2072 2069
2073 vmcs_writel(GUEST_RFLAGS, 0x02); 2070 vmcs_writel(GUEST_RFLAGS, 0x02);
2074 if (vmx->vcpu.vcpu_id == 0) 2071 if (vmx->vcpu.vcpu_id == 0)
2075 vmcs_writel(GUEST_RIP, 0xfff0); 2072 kvm_rip_write(vcpu, 0xfff0);
2076 else 2073 else
2077 vmcs_writel(GUEST_RIP, 0); 2074 kvm_rip_write(vcpu, 0);
2078 vmcs_writel(GUEST_RSP, 0); 2075 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
2079 2076
2080 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ 2077 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
2081 vmcs_writel(GUEST_DR7, 0x400); 2078 vmcs_writel(GUEST_DR7, 0x400);
@@ -2139,11 +2136,11 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
2139 if (vcpu->arch.rmode.active) { 2136 if (vcpu->arch.rmode.active) {
2140 vmx->rmode.irq.pending = true; 2137 vmx->rmode.irq.pending = true;
2141 vmx->rmode.irq.vector = irq; 2138 vmx->rmode.irq.vector = irq;
2142 vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); 2139 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2143 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2140 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2144 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); 2141 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2145 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); 2142 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2146 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1); 2143 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2147 return; 2144 return;
2148 } 2145 }
2149 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2146 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -2288,7 +2285,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2288 } 2285 }
2289 2286
2290 error_code = 0; 2287 error_code = 0;
2291 rip = vmcs_readl(GUEST_RIP); 2288 rip = kvm_rip_read(vcpu);
2292 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 2289 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
2293 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 2290 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
2294 if (is_page_fault(intr_info)) { 2291 if (is_page_fault(intr_info)) {
@@ -2386,27 +2383,25 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2386 reg = (exit_qualification >> 8) & 15; 2383 reg = (exit_qualification >> 8) & 15;
2387 switch ((exit_qualification >> 4) & 3) { 2384 switch ((exit_qualification >> 4) & 3) {
2388 case 0: /* mov to cr */ 2385 case 0: /* mov to cr */
2389 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg], 2386 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
2390 (u32)((u64)vcpu->arch.regs[reg] >> 32), handler); 2387 (u32)kvm_register_read(vcpu, reg),
2388 (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
2389 handler);
2391 switch (cr) { 2390 switch (cr) {
2392 case 0: 2391 case 0:
2393 vcpu_load_rsp_rip(vcpu); 2392 kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
2394 kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
2395 skip_emulated_instruction(vcpu); 2393 skip_emulated_instruction(vcpu);
2396 return 1; 2394 return 1;
2397 case 3: 2395 case 3:
2398 vcpu_load_rsp_rip(vcpu); 2396 kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
2399 kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
2400 skip_emulated_instruction(vcpu); 2397 skip_emulated_instruction(vcpu);
2401 return 1; 2398 return 1;
2402 case 4: 2399 case 4:
2403 vcpu_load_rsp_rip(vcpu); 2400 kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
2404 kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
2405 skip_emulated_instruction(vcpu); 2401 skip_emulated_instruction(vcpu);
2406 return 1; 2402 return 1;
2407 case 8: 2403 case 8:
2408 vcpu_load_rsp_rip(vcpu); 2404 kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg));
2409 kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
2410 skip_emulated_instruction(vcpu); 2405 skip_emulated_instruction(vcpu);
2411 if (irqchip_in_kernel(vcpu->kvm)) 2406 if (irqchip_in_kernel(vcpu->kvm))
2412 return 1; 2407 return 1;
@@ -2415,7 +2410,6 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2415 }; 2410 };
2416 break; 2411 break;
2417 case 2: /* clts */ 2412 case 2: /* clts */
2418 vcpu_load_rsp_rip(vcpu);
2419 vmx_fpu_deactivate(vcpu); 2413 vmx_fpu_deactivate(vcpu);
2420 vcpu->arch.cr0 &= ~X86_CR0_TS; 2414 vcpu->arch.cr0 &= ~X86_CR0_TS;
2421 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); 2415 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
@@ -2426,21 +2420,17 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2426 case 1: /*mov from cr*/ 2420 case 1: /*mov from cr*/
2427 switch (cr) { 2421 switch (cr) {
2428 case 3: 2422 case 3:
2429 vcpu_load_rsp_rip(vcpu); 2423 kvm_register_write(vcpu, reg, vcpu->arch.cr3);
2430 vcpu->arch.regs[reg] = vcpu->arch.cr3;
2431 vcpu_put_rsp_rip(vcpu);
2432 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, 2424 KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
2433 (u32)vcpu->arch.regs[reg], 2425 (u32)kvm_register_read(vcpu, reg),
2434 (u32)((u64)vcpu->arch.regs[reg] >> 32), 2426 (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
2435 handler); 2427 handler);
2436 skip_emulated_instruction(vcpu); 2428 skip_emulated_instruction(vcpu);
2437 return 1; 2429 return 1;
2438 case 8: 2430 case 8:
2439 vcpu_load_rsp_rip(vcpu); 2431 kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
2440 vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
2441 vcpu_put_rsp_rip(vcpu);
2442 KVMTRACE_2D(CR_READ, vcpu, (u32)cr, 2432 KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
2443 (u32)vcpu->arch.regs[reg], handler); 2433 (u32)kvm_register_read(vcpu, reg), handler);
2444 skip_emulated_instruction(vcpu); 2434 skip_emulated_instruction(vcpu);
2445 return 1; 2435 return 1;
2446 } 2436 }
@@ -2472,7 +2462,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2472 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 2462 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2473 dr = exit_qualification & 7; 2463 dr = exit_qualification & 7;
2474 reg = (exit_qualification >> 8) & 15; 2464 reg = (exit_qualification >> 8) & 15;
2475 vcpu_load_rsp_rip(vcpu);
2476 if (exit_qualification & 16) { 2465 if (exit_qualification & 16) {
2477 /* mov from dr */ 2466 /* mov from dr */
2478 switch (dr) { 2467 switch (dr) {
@@ -2485,12 +2474,11 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2485 default: 2474 default:
2486 val = 0; 2475 val = 0;
2487 } 2476 }
2488 vcpu->arch.regs[reg] = val; 2477 kvm_register_write(vcpu, reg, val);
2489 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); 2478 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
2490 } else { 2479 } else {
2491 /* mov to dr */ 2480 /* mov to dr */
2492 } 2481 }
2493 vcpu_put_rsp_rip(vcpu);
2494 skip_emulated_instruction(vcpu); 2482 skip_emulated_instruction(vcpu);
2495 return 1; 2483 return 1;
2496} 2484}
@@ -2735,8 +2723,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2735 struct vcpu_vmx *vmx = to_vmx(vcpu); 2723 struct vcpu_vmx *vmx = to_vmx(vcpu);
2736 u32 vectoring_info = vmx->idt_vectoring_info; 2724 u32 vectoring_info = vmx->idt_vectoring_info;
2737 2725
2738 KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP), 2726 KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
2739 (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit); 2727 (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
2740 2728
2741 /* Access CR3 don't cause VMExit in paging mode, so we need 2729 /* Access CR3 don't cause VMExit in paging mode, so we need
2742 * to sync with guest real CR3. */ 2730 * to sync with guest real CR3. */
@@ -2922,9 +2910,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2922static void fixup_rmode_irq(struct vcpu_vmx *vmx) 2910static void fixup_rmode_irq(struct vcpu_vmx *vmx)
2923{ 2911{
2924 vmx->rmode.irq.pending = 0; 2912 vmx->rmode.irq.pending = 0;
2925 if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip) 2913 if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
2926 return; 2914 return;
2927 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip); 2915 kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
2928 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { 2916 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
2929 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; 2917 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
2930 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; 2918 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
@@ -2941,6 +2929,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2941 struct vcpu_vmx *vmx = to_vmx(vcpu); 2929 struct vcpu_vmx *vmx = to_vmx(vcpu);
2942 u32 intr_info; 2930 u32 intr_info;
2943 2931
2932 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
2933 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
2934 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
2935 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
2936
2944 /* 2937 /*
2945 * Loading guest fpu may have cleared host cr0.ts 2938 * Loading guest fpu may have cleared host cr0.ts
2946 */ 2939 */
@@ -3061,6 +3054,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3061#endif 3054#endif
3062 ); 3055 );
3063 3056
3057 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3058 vcpu->arch.regs_dirty = 0;
3059
3064 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 3060 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3065 if (vmx->rmode.irq.pending) 3061 if (vmx->rmode.irq.pending)
3066 fixup_rmode_irq(vmx); 3062 fixup_rmode_irq(vmx);
@@ -3224,8 +3220,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
3224 .set_idt = vmx_set_idt, 3220 .set_idt = vmx_set_idt,
3225 .get_gdt = vmx_get_gdt, 3221 .get_gdt = vmx_get_gdt,
3226 .set_gdt = vmx_set_gdt, 3222 .set_gdt = vmx_set_gdt,
3227 .cache_regs = vcpu_load_rsp_rip, 3223 .cache_reg = vmx_cache_reg,
3228 .decache_regs = vcpu_put_rsp_rip,
3229 .get_rflags = vmx_get_rflags, 3224 .get_rflags = vmx_get_rflags,
3230 .set_rflags = vmx_set_rflags, 3225 .set_rflags = vmx_set_rflags,
3231 3226