summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2019-03-18 06:17:39 -0400
committerMarc Zyngier <maz@kernel.org>2019-08-18 13:38:52 -0400
commit89489ee9ced8924f64f99c0470eae38e9e4e204b (patch)
tree189c4994752e0ed0879127e2ee3d900fab480514 /virt/kvm
parentcbfda481d87e92ce635e426099946cd413b251be (diff)
KVM: arm/arm64: vgic-its: Cache successful MSI->LPI translation
On a successful translation, preserve the parameters in the LPI translation cache. Each translation is reusing the last slot in the list, naturally evicting the least recently used entry. Tested-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c86
1 files changed, 86 insertions, 0 deletions
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index d3e90a9d0a7a..e61d3ea0ab40 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -535,6 +535,90 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
535 return 0; 535 return 0;
536} 536}
537 537
538static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
539 phys_addr_t db,
540 u32 devid, u32 eventid)
541{
542 struct vgic_translation_cache_entry *cte;
543
544 list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
545 /*
546 * If we hit a NULL entry, there is nothing after this
547 * point.
548 */
549 if (!cte->irq)
550 break;
551
552 if (cte->db != db || cte->devid != devid ||
553 cte->eventid != eventid)
554 continue;
555
556 /*
557 * Move this entry to the head, as it is the most
558 * recently used.
559 */
560 if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
561 list_move(&cte->entry, &dist->lpi_translation_cache);
562
563 return cte->irq;
564 }
565
566 return NULL;
567}
568
569static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
570 u32 devid, u32 eventid,
571 struct vgic_irq *irq)
572{
573 struct vgic_dist *dist = &kvm->arch.vgic;
574 struct vgic_translation_cache_entry *cte;
575 unsigned long flags;
576 phys_addr_t db;
577
578 /* Do not cache a directly injected interrupt */
579 if (irq->hw)
580 return;
581
582 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
583
584 if (unlikely(list_empty(&dist->lpi_translation_cache)))
585 goto out;
586
587 /*
588 * We could have raced with another CPU caching the same
589 * translation behind our back, so let's check it is not in
590 * already
591 */
592 db = its->vgic_its_base + GITS_TRANSLATER;
593 if (__vgic_its_check_cache(dist, db, devid, eventid))
594 goto out;
595
596 /* Always reuse the last entry (LRU policy) */
597 cte = list_last_entry(&dist->lpi_translation_cache,
598 typeof(*cte), entry);
599
600 /*
601 * Caching the translation implies having an extra reference
602 * to the interrupt, so drop the potential reference on what
603 * was in the cache, and increment it on the new interrupt.
604 */
605 if (cte->irq)
606 __vgic_put_lpi_locked(kvm, cte->irq);
607
608 vgic_get_irq_kref(irq);
609
610 cte->db = db;
611 cte->devid = devid;
612 cte->eventid = eventid;
613 cte->irq = irq;
614
615 /* Move the new translation to the head of the list */
616 list_move(&cte->entry, &dist->lpi_translation_cache);
617
618out:
619 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
620}
621
538void vgic_its_invalidate_cache(struct kvm *kvm) 622void vgic_its_invalidate_cache(struct kvm *kvm)
539{ 623{
540 struct vgic_dist *dist = &kvm->arch.vgic; 624 struct vgic_dist *dist = &kvm->arch.vgic;
@@ -578,6 +662,8 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
578 if (!vcpu->arch.vgic_cpu.lpis_enabled) 662 if (!vcpu->arch.vgic_cpu.lpis_enabled)
579 return -EBUSY; 663 return -EBUSY;
580 664
665 vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
666
581 *irq = ite->irq; 667 *irq = ite->irq;
582 return 0; 668 return 0;
583} 669}