aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-06-27 13:58:02 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:13:57 -0400
commit5fdbf9765b7ba6a45100851154768de703d51e76 (patch)
treeec34ec9357575dc4190e5228a6eabfd5f81b66a5 /arch/x86/kvm/x86.c
parentca60dfbb69afb549e33527cbf676e4daf8febfb5 (diff)
KVM: x86: accessors for guest registers
As suggested by Avi, introduce accessors to read/write guest registers. This simplifies the ->cache_regs/->decache_regs interface, and improves register caching which is important for VMX, where the cost of vmcs_read/vmcs_write is significant. [avi: fix warnings] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c268
1 files changed, 138 insertions, 130 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0d682fc6aeb3..2f0696bc7d2f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -19,6 +19,7 @@
19#include "mmu.h" 19#include "mmu.h"
20#include "i8254.h" 20#include "i8254.h"
21#include "tss.h" 21#include "tss.h"
22#include "kvm_cache_regs.h"
22 23
23#include <linux/clocksource.h> 24#include <linux/clocksource.h>
24#include <linux/kvm.h> 25#include <linux/kvm.h>
@@ -61,6 +62,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
61 struct kvm_cpuid_entry2 __user *entries); 62 struct kvm_cpuid_entry2 __user *entries);
62 63
63struct kvm_x86_ops *kvm_x86_ops; 64struct kvm_x86_ops *kvm_x86_ops;
65EXPORT_SYMBOL_GPL(kvm_x86_ops);
64 66
65struct kvm_stats_debugfs_item debugfs_entries[] = { 67struct kvm_stats_debugfs_item debugfs_entries[] = {
66 { "pf_fixed", VCPU_STAT(pf_fixed) }, 68 { "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -2080,7 +2082,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2080void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) 2082void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2081{ 2083{
2082 u8 opcodes[4]; 2084 u8 opcodes[4];
2083 unsigned long rip = vcpu->arch.rip; 2085 unsigned long rip = kvm_rip_read(vcpu);
2084 unsigned long rip_linear; 2086 unsigned long rip_linear;
2085 2087
2086 if (!printk_ratelimit()) 2088 if (!printk_ratelimit())
@@ -2102,6 +2104,14 @@ static struct x86_emulate_ops emulate_ops = {
2102 .cmpxchg_emulated = emulator_cmpxchg_emulated, 2104 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2103}; 2105};
2104 2106
2107static void cache_all_regs(struct kvm_vcpu *vcpu)
2108{
2109 kvm_register_read(vcpu, VCPU_REGS_RAX);
2110 kvm_register_read(vcpu, VCPU_REGS_RSP);
2111 kvm_register_read(vcpu, VCPU_REGS_RIP);
2112 vcpu->arch.regs_dirty = ~0;
2113}
2114
2105int emulate_instruction(struct kvm_vcpu *vcpu, 2115int emulate_instruction(struct kvm_vcpu *vcpu,
2106 struct kvm_run *run, 2116 struct kvm_run *run,
2107 unsigned long cr2, 2117 unsigned long cr2,
@@ -2112,7 +2122,13 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
2112 struct decode_cache *c; 2122 struct decode_cache *c;
2113 2123
2114 vcpu->arch.mmio_fault_cr2 = cr2; 2124 vcpu->arch.mmio_fault_cr2 = cr2;
2115 kvm_x86_ops->cache_regs(vcpu); 2125 /*
2126 * TODO: fix x86_emulate.c to use guest_read/write_register
2127 * instead of direct ->regs accesses, can save hundred cycles
2128 * on Intel for instructions that don't read/change RSP, for
2129 * for example.
2130 */
2131 cache_all_regs(vcpu);
2116 2132
2117 vcpu->mmio_is_write = 0; 2133 vcpu->mmio_is_write = 0;
2118 vcpu->arch.pio.string = 0; 2134 vcpu->arch.pio.string = 0;
@@ -2172,7 +2188,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
2172 return EMULATE_DO_MMIO; 2188 return EMULATE_DO_MMIO;
2173 } 2189 }
2174 2190
2175 kvm_x86_ops->decache_regs(vcpu);
2176 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 2191 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
2177 2192
2178 if (vcpu->mmio_is_write) { 2193 if (vcpu->mmio_is_write) {
@@ -2225,20 +2240,19 @@ int complete_pio(struct kvm_vcpu *vcpu)
2225 struct kvm_pio_request *io = &vcpu->arch.pio; 2240 struct kvm_pio_request *io = &vcpu->arch.pio;
2226 long delta; 2241 long delta;
2227 int r; 2242 int r;
2228 2243 unsigned long val;
2229 kvm_x86_ops->cache_regs(vcpu);
2230 2244
2231 if (!io->string) { 2245 if (!io->string) {
2232 if (io->in) 2246 if (io->in) {
2233 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data, 2247 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2234 io->size); 2248 memcpy(&val, vcpu->arch.pio_data, io->size);
2249 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2250 }
2235 } else { 2251 } else {
2236 if (io->in) { 2252 if (io->in) {
2237 r = pio_copy_data(vcpu); 2253 r = pio_copy_data(vcpu);
2238 if (r) { 2254 if (r)
2239 kvm_x86_ops->cache_regs(vcpu);
2240 return r; 2255 return r;
2241 }
2242 } 2256 }
2243 2257
2244 delta = 1; 2258 delta = 1;
@@ -2248,19 +2262,24 @@ int complete_pio(struct kvm_vcpu *vcpu)
2248 * The size of the register should really depend on 2262 * The size of the register should really depend on
2249 * current address size. 2263 * current address size.
2250 */ 2264 */
2251 vcpu->arch.regs[VCPU_REGS_RCX] -= delta; 2265 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2266 val -= delta;
2267 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
2252 } 2268 }
2253 if (io->down) 2269 if (io->down)
2254 delta = -delta; 2270 delta = -delta;
2255 delta *= io->size; 2271 delta *= io->size;
2256 if (io->in) 2272 if (io->in) {
2257 vcpu->arch.regs[VCPU_REGS_RDI] += delta; 2273 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2258 else 2274 val += delta;
2259 vcpu->arch.regs[VCPU_REGS_RSI] += delta; 2275 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2276 } else {
2277 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2278 val += delta;
2279 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2280 }
2260 } 2281 }
2261 2282
2262 kvm_x86_ops->decache_regs(vcpu);
2263
2264 io->count -= io->cur_count; 2283 io->count -= io->cur_count;
2265 io->cur_count = 0; 2284 io->cur_count = 0;
2266 2285
@@ -2313,6 +2332,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2313 int size, unsigned port) 2332 int size, unsigned port)
2314{ 2333{
2315 struct kvm_io_device *pio_dev; 2334 struct kvm_io_device *pio_dev;
2335 unsigned long val;
2316 2336
2317 vcpu->run->exit_reason = KVM_EXIT_IO; 2337 vcpu->run->exit_reason = KVM_EXIT_IO;
2318 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 2338 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -2333,8 +2353,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2333 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size, 2353 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2334 handler); 2354 handler);
2335 2355
2336 kvm_x86_ops->cache_regs(vcpu); 2356 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2337 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4); 2357 memcpy(vcpu->arch.pio_data, &val, 4);
2338 2358
2339 kvm_x86_ops->skip_emulated_instruction(vcpu); 2359 kvm_x86_ops->skip_emulated_instruction(vcpu);
2340 2360
@@ -2519,13 +2539,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2519 unsigned long nr, a0, a1, a2, a3, ret; 2539 unsigned long nr, a0, a1, a2, a3, ret;
2520 int r = 1; 2540 int r = 1;
2521 2541
2522 kvm_x86_ops->cache_regs(vcpu); 2542 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2523 2543 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2524 nr = vcpu->arch.regs[VCPU_REGS_RAX]; 2544 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2525 a0 = vcpu->arch.regs[VCPU_REGS_RBX]; 2545 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2526 a1 = vcpu->arch.regs[VCPU_REGS_RCX]; 2546 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
2527 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2528 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
2529 2547
2530 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler); 2548 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2531 2549
@@ -2548,8 +2566,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2548 ret = -KVM_ENOSYS; 2566 ret = -KVM_ENOSYS;
2549 break; 2567 break;
2550 } 2568 }
2551 vcpu->arch.regs[VCPU_REGS_RAX] = ret; 2569 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
2552 kvm_x86_ops->decache_regs(vcpu);
2553 ++vcpu->stat.hypercalls; 2570 ++vcpu->stat.hypercalls;
2554 return r; 2571 return r;
2555} 2572}
@@ -2559,6 +2576,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2559{ 2576{
2560 char instruction[3]; 2577 char instruction[3];
2561 int ret = 0; 2578 int ret = 0;
2579 unsigned long rip = kvm_rip_read(vcpu);
2562 2580
2563 2581
2564 /* 2582 /*
@@ -2568,9 +2586,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2568 */ 2586 */
2569 kvm_mmu_zap_all(vcpu->kvm); 2587 kvm_mmu_zap_all(vcpu->kvm);
2570 2588
2571 kvm_x86_ops->cache_regs(vcpu);
2572 kvm_x86_ops->patch_hypercall(vcpu, instruction); 2589 kvm_x86_ops->patch_hypercall(vcpu, instruction);
2573 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu) 2590 if (emulator_write_emulated(rip, instruction, 3, vcpu)
2574 != X86EMUL_CONTINUE) 2591 != X86EMUL_CONTINUE)
2575 ret = -EFAULT; 2592 ret = -EFAULT;
2576 2593
@@ -2700,13 +2717,12 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2700 u32 function, index; 2717 u32 function, index;
2701 struct kvm_cpuid_entry2 *e, *best; 2718 struct kvm_cpuid_entry2 *e, *best;
2702 2719
2703 kvm_x86_ops->cache_regs(vcpu); 2720 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
2704 function = vcpu->arch.regs[VCPU_REGS_RAX]; 2721 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
2705 index = vcpu->arch.regs[VCPU_REGS_RCX]; 2722 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
2706 vcpu->arch.regs[VCPU_REGS_RAX] = 0; 2723 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
2707 vcpu->arch.regs[VCPU_REGS_RBX] = 0; 2724 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
2708 vcpu->arch.regs[VCPU_REGS_RCX] = 0; 2725 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
2709 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
2710 best = NULL; 2726 best = NULL;
2711 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 2727 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2712 e = &vcpu->arch.cpuid_entries[i]; 2728 e = &vcpu->arch.cpuid_entries[i];
@@ -2724,18 +2740,17 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2724 best = e; 2740 best = e;
2725 } 2741 }
2726 if (best) { 2742 if (best) {
2727 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax; 2743 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
2728 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx; 2744 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
2729 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx; 2745 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
2730 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx; 2746 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
2731 } 2747 }
2732 kvm_x86_ops->decache_regs(vcpu);
2733 kvm_x86_ops->skip_emulated_instruction(vcpu); 2748 kvm_x86_ops->skip_emulated_instruction(vcpu);
2734 KVMTRACE_5D(CPUID, vcpu, function, 2749 KVMTRACE_5D(CPUID, vcpu, function,
2735 (u32)vcpu->arch.regs[VCPU_REGS_RAX], 2750 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
2736 (u32)vcpu->arch.regs[VCPU_REGS_RBX], 2751 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
2737 (u32)vcpu->arch.regs[VCPU_REGS_RCX], 2752 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
2738 (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler); 2753 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
2739} 2754}
2740EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 2755EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2741 2756
@@ -2917,8 +2932,8 @@ again:
2917 * Profile KVM exit RIPs: 2932 * Profile KVM exit RIPs:
2918 */ 2933 */
2919 if (unlikely(prof_on == KVM_PROFILING)) { 2934 if (unlikely(prof_on == KVM_PROFILING)) {
2920 kvm_x86_ops->cache_regs(vcpu); 2935 unsigned long rip = kvm_rip_read(vcpu);
2921 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip); 2936 profile_hit(KVM_PROFILING, (void *)rip);
2922 } 2937 }
2923 2938
2924 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu)) 2939 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
@@ -2999,11 +3014,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2999 } 3014 }
3000 } 3015 }
3001#endif 3016#endif
3002 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { 3017 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3003 kvm_x86_ops->cache_regs(vcpu); 3018 kvm_register_write(vcpu, VCPU_REGS_RAX,
3004 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; 3019 kvm_run->hypercall.ret);
3005 kvm_x86_ops->decache_regs(vcpu);
3006 }
3007 3020
3008 r = __vcpu_run(vcpu, kvm_run); 3021 r = __vcpu_run(vcpu, kvm_run);
3009 3022
@@ -3019,28 +3032,26 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3019{ 3032{
3020 vcpu_load(vcpu); 3033 vcpu_load(vcpu);
3021 3034
3022 kvm_x86_ops->cache_regs(vcpu); 3035 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3023 3036 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3024 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX]; 3037 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3025 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX]; 3038 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3026 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX]; 3039 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3027 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX]; 3040 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3028 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI]; 3041 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3029 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI]; 3042 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3030 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3031 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
3032#ifdef CONFIG_X86_64 3043#ifdef CONFIG_X86_64
3033 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8]; 3044 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3034 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9]; 3045 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3035 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10]; 3046 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3036 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11]; 3047 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3037 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12]; 3048 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3038 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13]; 3049 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3039 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14]; 3050 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3040 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15]; 3051 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
3041#endif 3052#endif
3042 3053
3043 regs->rip = vcpu->arch.rip; 3054 regs->rip = kvm_rip_read(vcpu);
3044 regs->rflags = kvm_x86_ops->get_rflags(vcpu); 3055 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3045 3056
3046 /* 3057 /*
@@ -3058,29 +3069,29 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3058{ 3069{
3059 vcpu_load(vcpu); 3070 vcpu_load(vcpu);
3060 3071
3061 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax; 3072 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3062 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx; 3073 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3063 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx; 3074 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3064 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx; 3075 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3065 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi; 3076 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3066 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi; 3077 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3067 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp; 3078 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3068 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp; 3079 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
3069#ifdef CONFIG_X86_64 3080#ifdef CONFIG_X86_64
3070 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8; 3081 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3071 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9; 3082 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3072 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10; 3083 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3073 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11; 3084 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3074 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12; 3085 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3075 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13; 3086 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3076 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14; 3087 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3077 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15; 3088 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3089
3078#endif 3090#endif
3079 3091
3080 vcpu->arch.rip = regs->rip; 3092 kvm_rip_write(vcpu, regs->rip);
3081 kvm_x86_ops->set_rflags(vcpu, regs->rflags); 3093 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3082 3094
3083 kvm_x86_ops->decache_regs(vcpu);
3084 3095
3085 vcpu->arch.exception.pending = false; 3096 vcpu->arch.exception.pending = false;
3086 3097
@@ -3316,17 +3327,16 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3316 struct tss_segment_32 *tss) 3327 struct tss_segment_32 *tss)
3317{ 3328{
3318 tss->cr3 = vcpu->arch.cr3; 3329 tss->cr3 = vcpu->arch.cr3;
3319 tss->eip = vcpu->arch.rip; 3330 tss->eip = kvm_rip_read(vcpu);
3320 tss->eflags = kvm_x86_ops->get_rflags(vcpu); 3331 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3321 tss->eax = vcpu->arch.regs[VCPU_REGS_RAX]; 3332 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3322 tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX]; 3333 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3323 tss->edx = vcpu->arch.regs[VCPU_REGS_RDX]; 3334 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3324 tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX]; 3335 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3325 tss->esp = vcpu->arch.regs[VCPU_REGS_RSP]; 3336 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3326 tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP]; 3337 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3327 tss->esi = vcpu->arch.regs[VCPU_REGS_RSI]; 3338 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3328 tss->edi = vcpu->arch.regs[VCPU_REGS_RDI]; 3339 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3329
3330 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES); 3340 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3331 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS); 3341 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3332 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS); 3342 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
@@ -3342,17 +3352,17 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3342{ 3352{
3343 kvm_set_cr3(vcpu, tss->cr3); 3353 kvm_set_cr3(vcpu, tss->cr3);
3344 3354
3345 vcpu->arch.rip = tss->eip; 3355 kvm_rip_write(vcpu, tss->eip);
3346 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2); 3356 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3347 3357
3348 vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax; 3358 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3349 vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx; 3359 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3350 vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx; 3360 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3351 vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx; 3361 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3352 vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp; 3362 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3353 vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp; 3363 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3354 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi; 3364 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3355 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi; 3365 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
3356 3366
3357 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) 3367 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3358 return 1; 3368 return 1;
@@ -3380,16 +3390,16 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3380static void save_state_to_tss16(struct kvm_vcpu *vcpu, 3390static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3381 struct tss_segment_16 *tss) 3391 struct tss_segment_16 *tss)
3382{ 3392{
3383 tss->ip = vcpu->arch.rip; 3393 tss->ip = kvm_rip_read(vcpu);
3384 tss->flag = kvm_x86_ops->get_rflags(vcpu); 3394 tss->flag = kvm_x86_ops->get_rflags(vcpu);
3385 tss->ax = vcpu->arch.regs[VCPU_REGS_RAX]; 3395 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3386 tss->cx = vcpu->arch.regs[VCPU_REGS_RCX]; 3396 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3387 tss->dx = vcpu->arch.regs[VCPU_REGS_RDX]; 3397 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3388 tss->bx = vcpu->arch.regs[VCPU_REGS_RBX]; 3398 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3389 tss->sp = vcpu->arch.regs[VCPU_REGS_RSP]; 3399 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3390 tss->bp = vcpu->arch.regs[VCPU_REGS_RBP]; 3400 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3391 tss->si = vcpu->arch.regs[VCPU_REGS_RSI]; 3401 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3392 tss->di = vcpu->arch.regs[VCPU_REGS_RDI]; 3402 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
3393 3403
3394 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES); 3404 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3395 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS); 3405 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
@@ -3402,16 +3412,16 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3402static int load_state_from_tss16(struct kvm_vcpu *vcpu, 3412static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3403 struct tss_segment_16 *tss) 3413 struct tss_segment_16 *tss)
3404{ 3414{
3405 vcpu->arch.rip = tss->ip; 3415 kvm_rip_write(vcpu, tss->ip);
3406 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2); 3416 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3407 vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax; 3417 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3408 vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx; 3418 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3409 vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx; 3419 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3410 vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx; 3420 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3411 vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp; 3421 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3412 vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp; 3422 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3413 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si; 3423 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3414 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di; 3424 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
3415 3425
3416 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) 3426 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3417 return 1; 3427 return 1;
@@ -3534,7 +3544,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3534 } 3544 }
3535 3545
3536 kvm_x86_ops->skip_emulated_instruction(vcpu); 3546 kvm_x86_ops->skip_emulated_instruction(vcpu);
3537 kvm_x86_ops->cache_regs(vcpu);
3538 3547
3539 if (nseg_desc.type & 8) 3548 if (nseg_desc.type & 8)
3540 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base, 3549 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
@@ -3559,7 +3568,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3559 tr_seg.type = 11; 3568 tr_seg.type = 11;
3560 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3569 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3561out: 3570out:
3562 kvm_x86_ops->decache_regs(vcpu);
3563 return ret; 3571 return ret;
3564} 3572}
3565EXPORT_SYMBOL_GPL(kvm_task_switch); 3573EXPORT_SYMBOL_GPL(kvm_task_switch);