summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-04-04 09:48:24 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2018-04-17 04:18:26 -0400
commitf0cf47d939d0b4b4f660c5aaa4276fa3488f3391 (patch)
tree8652a4432798ce84444c66d9ae7c2f0d0330cbd3 /virt
parent60cc43fc888428bb2f18f08997432d426a243338 (diff)
KVM: arm/arm64: Close VMID generation race
Before entering the guest, we check whether our VMID is still part of the current generation. In order to avoid taking a lock, we start with checking that the generation is still current, and only if not current do we take the lock, recheck, and update the generation and VMID. This leaves open a small race: A vcpu can bump up the global generation number as well as the VM's, but has not updated the VMID itself yet. At that point another vcpu from the same VM comes in, checks the generation (and finds it not needing anything), and jumps into the guest. At this point, we end-up with two vcpus belonging to the same VM running with two different VMIDs. Eventually, the VMID used by the second vcpu will get reassigned, and things will really go wrong... A simple solution would be to drop this initial check, and always take the lock. This is likely to cause performance issues. A middle ground is to convert the spinlock to a rwlock, and only take the read lock on the fast path. If the check fails at that point, drop it and acquire the write lock, rechecking the condition. This ensures that the above scenario doesn't occur. Cc: stable@vger.kernel.org Reported-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Shannon Zhao <zhaoshenglong@huawei.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index dba629c5f8ac..a4c1b76240df 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
64static u32 kvm_next_vmid; 64static u32 kvm_next_vmid;
65static unsigned int kvm_vmid_bits __read_mostly; 65static unsigned int kvm_vmid_bits __read_mostly;
66static DEFINE_SPINLOCK(kvm_vmid_lock); 66static DEFINE_RWLOCK(kvm_vmid_lock);
67 67
68static bool vgic_present; 68static bool vgic_present;
69 69
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
473{ 473{
474 phys_addr_t pgd_phys; 474 phys_addr_t pgd_phys;
475 u64 vmid; 475 u64 vmid;
476 bool new_gen;
476 477
477 if (!need_new_vmid_gen(kvm)) 478 read_lock(&kvm_vmid_lock);
479 new_gen = need_new_vmid_gen(kvm);
480 read_unlock(&kvm_vmid_lock);
481
482 if (!new_gen)
478 return; 483 return;
479 484
480 spin_lock(&kvm_vmid_lock); 485 write_lock(&kvm_vmid_lock);
481 486
482 /* 487 /*
483 * We need to re-check the vmid_gen here to ensure that if another vcpu 488 * We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
485 * use the same vmid. 490 * use the same vmid.
486 */ 491 */
487 if (!need_new_vmid_gen(kvm)) { 492 if (!need_new_vmid_gen(kvm)) {
488 spin_unlock(&kvm_vmid_lock); 493 write_unlock(&kvm_vmid_lock);
489 return; 494 return;
490 } 495 }
491 496
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
519 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); 524 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
520 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; 525 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
521 526
522 spin_unlock(&kvm_vmid_lock); 527 write_unlock(&kvm_vmid_lock);
523} 528}
524 529
525static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 530static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)