aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-07-11 11:17:21 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:20 -0400
commit15ad71460d75fd7ca41bb248a2310f3f39b302ba (patch)
tree1ea549e5c5629561c121a54def146fb6b706c2d4 /drivers/kvm/vmx.c
parent519ef35341b4f360f072ea74e398b70a5a2fc270 (diff)
KVM: Use the scheduler preemption notifiers to make kvm preemptible
Current kvm disables preemption while the new virtualization registers are in use. This of course is not very good for latency sensitive workloads (one use of virtualization is to offload user interface and other latency insensitive stuff to a container, so that it is easier to analyze the remaining workload). This patch re-enables preemption for kvm; preemption is now only disabled when switching the registers in and out, and during the switch to guest mode and back. Contains fixes from Shaohua Li <shaohua.li@intel.com>. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r--drivers/kvm/vmx.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 18f9b0b3fb1f..8c87d20f8e39 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -396,6 +396,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
396static void vmx_load_host_state(struct kvm_vcpu *vcpu) 396static void vmx_load_host_state(struct kvm_vcpu *vcpu)
397{ 397{
398 struct vcpu_vmx *vmx = to_vmx(vcpu); 398 struct vcpu_vmx *vmx = to_vmx(vcpu);
399 unsigned long flags;
399 400
400 if (!vmx->host_state.loaded) 401 if (!vmx->host_state.loaded)
401 return; 402 return;
@@ -408,12 +409,12 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
408 * If we have to reload gs, we must take care to 409 * If we have to reload gs, we must take care to
409 * preserve our gs base. 410 * preserve our gs base.
410 */ 411 */
411 local_irq_disable(); 412 local_irq_save(flags);
412 load_gs(vmx->host_state.gs_sel); 413 load_gs(vmx->host_state.gs_sel);
413#ifdef CONFIG_X86_64 414#ifdef CONFIG_X86_64
414 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 415 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
415#endif 416#endif
416 local_irq_enable(); 417 local_irq_restore(flags);
417 418
418 reload_tss(); 419 reload_tss();
419 } 420 }
@@ -427,15 +428,12 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
427 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 428 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
428 * vcpu mutex is already taken. 429 * vcpu mutex is already taken.
429 */ 430 */
430static void vmx_vcpu_load(struct kvm_vcpu *vcpu) 431static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
431{ 432{
432 struct vcpu_vmx *vmx = to_vmx(vcpu); 433 struct vcpu_vmx *vmx = to_vmx(vcpu);
433 u64 phys_addr = __pa(vmx->vmcs); 434 u64 phys_addr = __pa(vmx->vmcs);
434 int cpu;
435 u64 tsc_this, delta; 435 u64 tsc_this, delta;
436 436
437 cpu = get_cpu();
438
439 if (vcpu->cpu != cpu) 437 if (vcpu->cpu != cpu)
440 vcpu_clear(vcpu); 438 vcpu_clear(vcpu);
441 439
@@ -480,7 +478,6 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
480{ 478{
481 vmx_load_host_state(vcpu); 479 vmx_load_host_state(vcpu);
482 kvm_put_guest_fpu(vcpu); 480 kvm_put_guest_fpu(vcpu);
483 put_cpu();
484} 481}
485 482
486static void vmx_fpu_activate(struct kvm_vcpu *vcpu) 483static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
@@ -2127,6 +2124,8 @@ again:
2127 if (unlikely(r)) 2124 if (unlikely(r))
2128 goto out; 2125 goto out;
2129 2126
2127 preempt_disable();
2128
2130 if (!vcpu->mmio_read_completed) 2129 if (!vcpu->mmio_read_completed)
2131 do_interrupt_requests(vcpu, kvm_run); 2130 do_interrupt_requests(vcpu, kvm_run);
2132 2131
@@ -2269,6 +2268,9 @@ again:
2269 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2268 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2270 2269
2271 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2270 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2271 vmx->launched = 1;
2272
2273 preempt_enable();
2272 2274
2273 if (unlikely(fail)) { 2275 if (unlikely(fail)) {
2274 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 2276 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -2283,7 +2285,6 @@ again:
2283 if (unlikely(prof_on == KVM_PROFILING)) 2285 if (unlikely(prof_on == KVM_PROFILING))
2284 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); 2286 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2285 2287
2286 vmx->launched = 1;
2287 r = kvm_handle_exit(kvm_run, vcpu); 2288 r = kvm_handle_exit(kvm_run, vcpu);
2288 if (r > 0) { 2289 if (r > 0) {
2289 /* Give scheduler a change to reschedule. */ 2290 /* Give scheduler a change to reschedule. */
@@ -2372,6 +2373,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2372{ 2373{
2373 int err; 2374 int err;
2374 struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL); 2375 struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
2376 int cpu;
2375 2377
2376 if (!vmx) 2378 if (!vmx)
2377 return ERR_PTR(-ENOMEM); 2379 return ERR_PTR(-ENOMEM);
@@ -2396,9 +2398,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2396 2398
2397 vmcs_clear(vmx->vmcs); 2399 vmcs_clear(vmx->vmcs);
2398 2400
2399 vmx_vcpu_load(&vmx->vcpu); 2401 cpu = get_cpu();
2402 vmx_vcpu_load(&vmx->vcpu, cpu);
2400 err = vmx_vcpu_setup(&vmx->vcpu); 2403 err = vmx_vcpu_setup(&vmx->vcpu);
2401 vmx_vcpu_put(&vmx->vcpu); 2404 vmx_vcpu_put(&vmx->vcpu);
2405 put_cpu();
2402 if (err) 2406 if (err)
2403 goto free_vmcs; 2407 goto free_vmcs;
2404 2408