aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-09-10 11:10:54 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:28 -0400
commit04d2cc7780d48a212843e38d46402d97fa1f4774 (patch)
treea209131bad59abcf574abbaae23145db3c4005e0 /drivers/kvm/vmx.c
parent29bd8a78082f2d7e2165a735f50b5c716ef3213b (diff)
KVM: Move main vcpu loop into subarch independent code
This simplifies adding new code as well as reducing overall code size. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r--drivers/kvm/vmx.c129
1 files changed, 17 insertions, 112 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 713f78a8959..fa4277d520c 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -25,7 +25,6 @@
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/profile.h>
29#include <linux/sched.h> 28#include <linux/sched.h>
30 29
31#include <asm/io.h> 30#include <asm/io.h>
@@ -355,8 +354,10 @@ static void load_transition_efer(struct vcpu_vmx *vmx)
355 vmx->vcpu.stat.efer_reload++; 354 vmx->vcpu.stat.efer_reload++;
356} 355}
357 356
358static void vmx_save_host_state(struct vcpu_vmx *vmx) 357static void vmx_save_host_state(struct kvm_vcpu *vcpu)
359{ 358{
359 struct vcpu_vmx *vmx = to_vmx(vcpu);
360
360 if (vmx->host_state.loaded) 361 if (vmx->host_state.loaded)
361 return; 362 return;
362 363
@@ -1598,6 +1599,13 @@ out:
1598 return ret; 1599 return ret;
1599} 1600}
1600 1601
1602static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1603{
1604 struct vcpu_vmx *vmx = to_vmx(vcpu);
1605
1606 vmx_vcpu_setup(vmx);
1607}
1608
1601static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq) 1609static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1602{ 1610{
1603 u16 ent[2]; 1611 u16 ent[2];
@@ -2019,20 +2027,6 @@ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2019 return 1; 2027 return 1;
2020} 2028}
2021 2029
2022static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2023 struct kvm_run *kvm_run)
2024{
2025 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
2026 kvm_run->cr8 = get_cr8(vcpu);
2027 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2028 if (irqchip_in_kernel(vcpu->kvm))
2029 kvm_run->ready_for_interrupt_injection = 1;
2030 else
2031 kvm_run->ready_for_interrupt_injection =
2032 (vcpu->interrupt_window_open &&
2033 vcpu->irq_summary == 0);
2034}
2035
2036static int handle_interrupt_window(struct kvm_vcpu *vcpu, 2030static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2037 struct kvm_run *kvm_run) 2031 struct kvm_run *kvm_run)
2038{ 2032{
@@ -2123,21 +2117,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2123 return 0; 2117 return 0;
2124} 2118}
2125 2119
2126/*
2127 * Check if userspace requested an interrupt window, and that the
2128 * interrupt window is open.
2129 *
2130 * No need to exit to userspace if we already have an interrupt queued.
2131 */
2132static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2133 struct kvm_run *kvm_run)
2134{
2135 return (!vcpu->irq_summary &&
2136 kvm_run->request_interrupt_window &&
2137 vcpu->interrupt_window_open &&
2138 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
2139}
2140
2141static void vmx_flush_tlb(struct kvm_vcpu *vcpu) 2120static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2142{ 2121{
2143} 2122}
@@ -2214,59 +2193,15 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2214 enable_irq_window(vcpu); 2193 enable_irq_window(vcpu);
2215} 2194}
2216 2195
2217static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2196static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2218{ 2197{
2219 struct vcpu_vmx *vmx = to_vmx(vcpu); 2198 struct vcpu_vmx *vmx = to_vmx(vcpu);
2220 int r;
2221
2222 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2223 printk("vcpu %d received sipi with vector # %x\n",
2224 vcpu->vcpu_id, vcpu->sipi_vector);
2225 kvm_lapic_reset(vcpu);
2226 vmx_vcpu_setup(vmx);
2227 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2228 }
2229
2230preempted:
2231 if (vcpu->guest_debug.enabled)
2232 kvm_guest_debug_pre(vcpu);
2233
2234again:
2235 r = kvm_mmu_reload(vcpu);
2236 if (unlikely(r))
2237 goto out;
2238
2239 preempt_disable();
2240
2241 vmx_save_host_state(vmx);
2242 kvm_load_guest_fpu(vcpu);
2243 2199
2244 /* 2200 /*
2245 * Loading guest fpu may have cleared host cr0.ts 2201 * Loading guest fpu may have cleared host cr0.ts
2246 */ 2202 */
2247 vmcs_writel(HOST_CR0, read_cr0()); 2203 vmcs_writel(HOST_CR0, read_cr0());
2248 2204
2249 local_irq_disable();
2250
2251 if (signal_pending(current)) {
2252 local_irq_enable();
2253 preempt_enable();
2254 r = -EINTR;
2255 kvm_run->exit_reason = KVM_EXIT_INTR;
2256 ++vcpu->stat.signal_exits;
2257 goto out;
2258 }
2259
2260 if (irqchip_in_kernel(vcpu->kvm))
2261 vmx_intr_assist(vcpu);
2262 else if (!vcpu->mmio_read_completed)
2263 do_interrupt_requests(vcpu, kvm_run);
2264
2265 vcpu->guest_mode = 1;
2266 if (vcpu->requests)
2267 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2268 vmx_flush_tlb(vcpu);
2269
2270 asm ( 2205 asm (
2271 /* Store host registers */ 2206 /* Store host registers */
2272#ifdef CONFIG_X86_64 2207#ifdef CONFIG_X86_64
@@ -2383,46 +2318,10 @@ again:
2383 [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) 2318 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2384 : "cc", "memory" ); 2319 : "cc", "memory" );
2385 2320
2386 vcpu->guest_mode = 0;
2387 local_irq_enable();
2388
2389 ++vcpu->stat.exits;
2390
2391 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2321 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2392 2322
2393 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2323 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2394 vmx->launched = 1; 2324 vmx->launched = 1;
2395
2396 preempt_enable();
2397
2398 /*
2399 * Profile KVM exit RIPs:
2400 */
2401 if (unlikely(prof_on == KVM_PROFILING))
2402 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2403
2404 r = kvm_handle_exit(kvm_run, vcpu);
2405 if (r > 0) {
2406 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2407 r = -EINTR;
2408 kvm_run->exit_reason = KVM_EXIT_INTR;
2409 ++vcpu->stat.request_irq_exits;
2410 goto out;
2411 }
2412 if (!need_resched()) {
2413 ++vcpu->stat.light_exits;
2414 goto again;
2415 }
2416 }
2417
2418out:
2419 if (r > 0) {
2420 kvm_resched(vcpu);
2421 goto preempted;
2422 }
2423
2424 post_kvm_run_save(vcpu, kvm_run);
2425 return r;
2426} 2325}
2427 2326
2428static void vmx_inject_page_fault(struct kvm_vcpu *vcpu, 2327static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
@@ -2560,12 +2459,15 @@ static struct kvm_x86_ops vmx_x86_ops = {
2560 2459
2561 .vcpu_create = vmx_create_vcpu, 2460 .vcpu_create = vmx_create_vcpu,
2562 .vcpu_free = vmx_free_vcpu, 2461 .vcpu_free = vmx_free_vcpu,
2462 .vcpu_reset = vmx_vcpu_reset,
2563 2463
2464 .prepare_guest_switch = vmx_save_host_state,
2564 .vcpu_load = vmx_vcpu_load, 2465 .vcpu_load = vmx_vcpu_load,
2565 .vcpu_put = vmx_vcpu_put, 2466 .vcpu_put = vmx_vcpu_put,
2566 .vcpu_decache = vmx_vcpu_decache, 2467 .vcpu_decache = vmx_vcpu_decache,
2567 2468
2568 .set_guest_debug = set_guest_debug, 2469 .set_guest_debug = set_guest_debug,
2470 .guest_debug_pre = kvm_guest_debug_pre,
2569 .get_msr = vmx_get_msr, 2471 .get_msr = vmx_get_msr,
2570 .set_msr = vmx_set_msr, 2472 .set_msr = vmx_set_msr,
2571 .get_segment_base = vmx_get_segment_base, 2473 .get_segment_base = vmx_get_segment_base,
@@ -2594,10 +2496,13 @@ static struct kvm_x86_ops vmx_x86_ops = {
2594 .inject_gp = vmx_inject_gp, 2496 .inject_gp = vmx_inject_gp,
2595 2497
2596 .run = vmx_vcpu_run, 2498 .run = vmx_vcpu_run,
2499 .handle_exit = kvm_handle_exit,
2597 .skip_emulated_instruction = skip_emulated_instruction, 2500 .skip_emulated_instruction = skip_emulated_instruction,
2598 .patch_hypercall = vmx_patch_hypercall, 2501 .patch_hypercall = vmx_patch_hypercall,
2599 .get_irq = vmx_get_irq, 2502 .get_irq = vmx_get_irq,
2600 .set_irq = vmx_inject_irq, 2503 .set_irq = vmx_inject_irq,
2504 .inject_pending_irq = vmx_intr_assist,
2505 .inject_pending_vectors = do_interrupt_requests,
2601}; 2506};
2602 2507
2603static int __init vmx_init(void) 2508static int __init vmx_init(void)