aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2018-12-11 09:26:31 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2019-02-19 16:05:35 -0500
commite329fb75d519e3dc3eb11b22d5bb846516be3521 (patch)
treeff5077b7233df0346b04fb23e5408edde1cca00e /virt/kvm
parent32f139551954512bfdf9d558341af453bb8b12b4 (diff)
KVM: arm/arm64: Factor out VMID into struct kvm_vmid
In preparation for nested virtualization where we are going to have more than a single VMID per VM, let's factor out the VMID data into a separate VMID data structure and change the VMID allocator to operate on this new structure instead of using a struct kvm. This also means that udate_vttbr now becomes update_vmid, and that the vttbr itself is generated on the fly based on the stage 2 page table base address and the vmid. We cache the physical address of the pgd when allocating the pgd to avoid doing the calculation on every entry to the guest and to avoid calling into potentially non-hyp-mapped code from hyp/EL2. If we wanted to merge the VMID allocator with the arm64 ASID allocator at some point in the future, it should actually become easier to do that after this patch. Note that to avoid mapping the kvm_vmid_bits variable into hyp, we simply forego the masking of the vmid value in kvm_get_vttbr and rely on update_vmid to always assign a valid vmid value (within the supported range). Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> [maz: minor cleanups] Reviewed-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/arm.c57
-rw-r--r--virt/kvm/arm/mmu.c7
2 files changed, 27 insertions, 37 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 3dd240ea9e76..b77db673bb03 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -65,7 +65,6 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
65/* The VMID used in the VTTBR */ 65/* The VMID used in the VTTBR */
66static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 66static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
67static u32 kvm_next_vmid; 67static u32 kvm_next_vmid;
68static unsigned int kvm_vmid_bits __read_mostly;
69static DEFINE_SPINLOCK(kvm_vmid_lock); 68static DEFINE_SPINLOCK(kvm_vmid_lock);
70 69
71static bool vgic_present; 70static bool vgic_present;
@@ -142,7 +141,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
142 kvm_vgic_early_init(kvm); 141 kvm_vgic_early_init(kvm);
143 142
144 /* Mark the initial VMID generation invalid */ 143 /* Mark the initial VMID generation invalid */
145 kvm->arch.vmid_gen = 0; 144 kvm->arch.vmid.vmid_gen = 0;
146 145
147 /* The maximum number of VCPUs is limited by the host's GIC model */ 146 /* The maximum number of VCPUs is limited by the host's GIC model */
148 kvm->arch.max_vcpus = vgic_present ? 147 kvm->arch.max_vcpus = vgic_present ?
@@ -472,37 +471,31 @@ void force_vm_exit(const cpumask_t *mask)
472 471
473/** 472/**
474 * need_new_vmid_gen - check that the VMID is still valid 473 * need_new_vmid_gen - check that the VMID is still valid
475 * @kvm: The VM's VMID to check 474 * @vmid: The VMID to check
476 * 475 *
477 * return true if there is a new generation of VMIDs being used 476 * return true if there is a new generation of VMIDs being used
478 * 477 *
479 * The hardware supports only 256 values with the value zero reserved for the 478 * The hardware supports a limited set of values with the value zero reserved
480 * host, so we check if an assigned value belongs to a previous generation, 479 * for the host, so we check if an assigned value belongs to a previous
481 * which which requires us to assign a new value. If we're the first to use a 480 * generation, which which requires us to assign a new value. If we're the
482 * VMID for the new generation, we must flush necessary caches and TLBs on all 481 * first to use a VMID for the new generation, we must flush necessary caches
483 * CPUs. 482 * and TLBs on all CPUs.
484 */ 483 */
485static bool need_new_vmid_gen(struct kvm *kvm) 484static bool need_new_vmid_gen(struct kvm_vmid *vmid)
486{ 485{
487 u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); 486 u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
488 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ 487 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
489 return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen); 488 return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
490} 489}
491 490
492/** 491/**
493 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs 492 * update_vmid - Update the vmid with a valid VMID for the current generation
494 * @kvm The guest that we are about to run 493 * @kvm: The guest that struct vmid belongs to
495 * 494 * @vmid: The stage-2 VMID information struct
496 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
497 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
498 * caches and TLBs.
499 */ 495 */
500static void update_vttbr(struct kvm *kvm) 496static void update_vmid(struct kvm_vmid *vmid)
501{ 497{
502 phys_addr_t pgd_phys; 498 if (!need_new_vmid_gen(vmid))
503 u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
504
505 if (!need_new_vmid_gen(kvm))
506 return; 499 return;
507 500
508 spin_lock(&kvm_vmid_lock); 501 spin_lock(&kvm_vmid_lock);
@@ -512,7 +505,7 @@ static void update_vttbr(struct kvm *kvm)
512 * already allocated a valid vmid for this vm, then this vcpu should 505 * already allocated a valid vmid for this vm, then this vcpu should
513 * use the same vmid. 506 * use the same vmid.
514 */ 507 */
515 if (!need_new_vmid_gen(kvm)) { 508 if (!need_new_vmid_gen(vmid)) {
516 spin_unlock(&kvm_vmid_lock); 509 spin_unlock(&kvm_vmid_lock);
517 return; 510 return;
518 } 511 }
@@ -536,18 +529,12 @@ static void update_vttbr(struct kvm *kvm)
536 kvm_call_hyp(__kvm_flush_vm_context); 529 kvm_call_hyp(__kvm_flush_vm_context);
537 } 530 }
538 531
539 kvm->arch.vmid = kvm_next_vmid; 532 vmid->vmid = kvm_next_vmid;
540 kvm_next_vmid++; 533 kvm_next_vmid++;
541 kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; 534 kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
542
543 /* update vttbr to be used with the new vmid */
544 pgd_phys = virt_to_phys(kvm->arch.pgd);
545 BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
546 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
547 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
548 535
549 smp_wmb(); 536 smp_wmb();
550 WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen)); 537 WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
551 538
552 spin_unlock(&kvm_vmid_lock); 539 spin_unlock(&kvm_vmid_lock);
553} 540}
@@ -690,7 +677,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
690 */ 677 */
691 cond_resched(); 678 cond_resched();
692 679
693 update_vttbr(vcpu->kvm); 680 update_vmid(&vcpu->kvm->arch.vmid);
694 681
695 check_vcpu_requests(vcpu); 682 check_vcpu_requests(vcpu);
696 683
@@ -739,7 +726,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
739 */ 726 */
740 smp_store_mb(vcpu->mode, IN_GUEST_MODE); 727 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
741 728
742 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || 729 if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
743 kvm_request_pending(vcpu)) { 730 kvm_request_pending(vcpu)) {
744 vcpu->mode = OUTSIDE_GUEST_MODE; 731 vcpu->mode = OUTSIDE_GUEST_MODE;
745 isb(); /* Ensure work in x_flush_hwstate is committed */ 732 isb(); /* Ensure work in x_flush_hwstate is committed */
@@ -1417,10 +1404,6 @@ static inline void hyp_cpu_pm_exit(void)
1417 1404
1418static int init_common_resources(void) 1405static int init_common_resources(void)
1419{ 1406{
1420 /* set size of VMID supported by CPU */
1421 kvm_vmid_bits = kvm_get_vmid_bits();
1422 kvm_info("%d-bit VMID\n", kvm_vmid_bits);
1423
1424 kvm_set_ipa_limit(); 1407 kvm_set_ipa_limit();
1425 1408
1426 return 0; 1409 return 0;
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..f8dda452ea24 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -908,6 +908,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
908 */ 908 */
909int kvm_alloc_stage2_pgd(struct kvm *kvm) 909int kvm_alloc_stage2_pgd(struct kvm *kvm)
910{ 910{
911 phys_addr_t pgd_phys;
911 pgd_t *pgd; 912 pgd_t *pgd;
912 913
913 if (kvm->arch.pgd != NULL) { 914 if (kvm->arch.pgd != NULL) {
@@ -920,7 +921,12 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
920 if (!pgd) 921 if (!pgd)
921 return -ENOMEM; 922 return -ENOMEM;
922 923
924 pgd_phys = virt_to_phys(pgd);
925 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
926 return -EINVAL;
927
923 kvm->arch.pgd = pgd; 928 kvm->arch.pgd = pgd;
929 kvm->arch.pgd_phys = pgd_phys;
924 return 0; 930 return 0;
925} 931}
926 932
@@ -1008,6 +1014,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
1008 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm)); 1014 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
1009 pgd = READ_ONCE(kvm->arch.pgd); 1015 pgd = READ_ONCE(kvm->arch.pgd);
1010 kvm->arch.pgd = NULL; 1016 kvm->arch.pgd = NULL;
1017 kvm->arch.pgd_phys = 0;
1011 } 1018 }
1012 spin_unlock(&kvm->mmu_lock); 1019 spin_unlock(&kvm->mmu_lock);
1013 1020