diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-06-06 15:37:35 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-06-24 05:16:52 -0400 |
commit | d4acf7e7abe45457e751525a2a4d5b693dfdd597 (patch) | |
tree | 2ff1cf6f59b0591ea39c0457705188d5f46cb118 | |
parent | 62786b9e81a2dbe9c073a2ade52d33a2627d6d85 (diff) |
KVM: Fix race between timer migration and vcpu migration
A guest vcpu instance can be scheduled to a different physical CPU
between the test for KVM_REQ_MIGRATE_TIMER and local_irq_disable().
If that happens, the timer will only be migrated to the current pCPU on
the next exit, meaning that guest LAPIC timer event can be delayed until
a host interrupt is triggered.
Fix it by cancelling guest entry if any vcpu request is pending. This
has the side effect of nicely consolidating vcpu->requests checks.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | arch/x86/kvm/x86.c | 15 |
1 files changed, 3 insertions, 12 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 00acf1301a15..b90744a1dc3a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2759,6 +2759,8 @@ again: | |||
2759 | if (vcpu->requests) { | 2759 | if (vcpu->requests) { |
2760 | if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) | 2760 | if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) |
2761 | __kvm_migrate_timers(vcpu); | 2761 | __kvm_migrate_timers(vcpu); |
2762 | if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | ||
2763 | kvm_x86_ops->tlb_flush(vcpu); | ||
2762 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, | 2764 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, |
2763 | &vcpu->requests)) { | 2765 | &vcpu->requests)) { |
2764 | kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; | 2766 | kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; |
@@ -2781,21 +2783,13 @@ again: | |||
2781 | 2783 | ||
2782 | local_irq_disable(); | 2784 | local_irq_disable(); |
2783 | 2785 | ||
2784 | if (need_resched()) { | 2786 | if (vcpu->requests || need_resched()) { |
2785 | local_irq_enable(); | 2787 | local_irq_enable(); |
2786 | preempt_enable(); | 2788 | preempt_enable(); |
2787 | r = 1; | 2789 | r = 1; |
2788 | goto out; | 2790 | goto out; |
2789 | } | 2791 | } |
2790 | 2792 | ||
2791 | if (vcpu->requests) | ||
2792 | if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) { | ||
2793 | local_irq_enable(); | ||
2794 | preempt_enable(); | ||
2795 | r = 1; | ||
2796 | goto out; | ||
2797 | } | ||
2798 | |||
2799 | if (signal_pending(current)) { | 2793 | if (signal_pending(current)) { |
2800 | local_irq_enable(); | 2794 | local_irq_enable(); |
2801 | preempt_enable(); | 2795 | preempt_enable(); |
@@ -2825,9 +2819,6 @@ again: | |||
2825 | 2819 | ||
2826 | kvm_guest_enter(); | 2820 | kvm_guest_enter(); |
2827 | 2821 | ||
2828 | if (vcpu->requests) | ||
2829 | if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | ||
2830 | kvm_x86_ops->tlb_flush(vcpu); | ||
2831 | 2822 | ||
2832 | KVMTRACE_0D(VMENTRY, vcpu, entryexit); | 2823 | KVMTRACE_0D(VMENTRY, vcpu, entryexit); |
2833 | kvm_x86_ops->run(vcpu, kvm_run); | 2824 | kvm_x86_ops->run(vcpu, kvm_run); |