diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/arm.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index dba629c5f8ac..a4c1b76240df 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); | |||
63 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); | 63 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); |
64 | static u32 kvm_next_vmid; | 64 | static u32 kvm_next_vmid; |
65 | static unsigned int kvm_vmid_bits __read_mostly; | 65 | static unsigned int kvm_vmid_bits __read_mostly; |
66 | static DEFINE_SPINLOCK(kvm_vmid_lock); | 66 | static DEFINE_RWLOCK(kvm_vmid_lock); |
67 | 67 | ||
68 | static bool vgic_present; | 68 | static bool vgic_present; |
69 | 69 | ||
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm) | |||
473 | { | 473 | { |
474 | phys_addr_t pgd_phys; | 474 | phys_addr_t pgd_phys; |
475 | u64 vmid; | 475 | u64 vmid; |
476 | bool new_gen; | ||
476 | 477 | ||
477 | if (!need_new_vmid_gen(kvm)) | 478 | read_lock(&kvm_vmid_lock); |
479 | new_gen = need_new_vmid_gen(kvm); | ||
480 | read_unlock(&kvm_vmid_lock); | ||
481 | |||
482 | if (!new_gen) | ||
478 | return; | 483 | return; |
479 | 484 | ||
480 | spin_lock(&kvm_vmid_lock); | 485 | write_lock(&kvm_vmid_lock); |
481 | 486 | ||
482 | /* | 487 | /* |
483 | * We need to re-check the vmid_gen here to ensure that if another vcpu | 488 | * We need to re-check the vmid_gen here to ensure that if another vcpu |
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm) | |||
485 | * use the same vmid. | 490 | * use the same vmid. |
486 | */ | 491 | */ |
487 | if (!need_new_vmid_gen(kvm)) { | 492 | if (!need_new_vmid_gen(kvm)) { |
488 | spin_unlock(&kvm_vmid_lock); | 493 | write_unlock(&kvm_vmid_lock); |
489 | return; | 494 | return; |
490 | } | 495 | } |
491 | 496 | ||
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm) | |||
519 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); | 524 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); |
520 | kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; | 525 | kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; |
521 | 526 | ||
522 | spin_unlock(&kvm_vmid_lock); | 527 | write_unlock(&kvm_vmid_lock); |
523 | } | 528 | } |
524 | 529 | ||
525 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | 530 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |