summaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 12:49:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 12:49:13 -0400
commitfe38bd6862074c0a2b9be7f31f043aaa70b2af5f (patch)
tree34edf3f546188b108c513b3f8499e45afe37aad9 /virt/kvm/arm
parent404e634fdb96a3c99c7517353bfafbd88e04ab41 (diff)
parentfb3925d06c285e1acb248addc5d80b33ea771b0f (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "s390: - ioctl hardening - selftests ARM: - ITS translation cache - support for 512 vCPUs - various cleanups and bugfixes PPC: - various minor fixes and preparation x86: - bugfixes all over the place (posted interrupts, SVM, emulation corner cases, blocked INIT) - some IPI optimizations" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (75 commits) KVM: X86: Use IPI shorthands in kvm guest when support KVM: x86: Fix INIT signal handling in various CPU states KVM: VMX: Introduce exit reason for receiving INIT signal on guest-mode KVM: VMX: Stop the preemption timer during vCPU reset KVM: LAPIC: Micro optimize IPI latency kvm: Nested KVM MMUs need PAE root too KVM: x86: set ctxt->have_exception in x86_decode_insn() KVM: x86: always stop emulation on page fault KVM: nVMX: trace nested VM-Enter failures detected by H/W KVM: nVMX: add tracepoint for failed nested VM-Enter x86: KVM: svm: Fix a check in nested_svm_vmrun() KVM: x86: Return to userspace with internal error on unexpected exit reason KVM: x86: Add kvm_emulate_{rd,wr}msr() to consolidate VXM/SVM code KVM: x86: Refactor up kvm_{g,s}et_msr() to simplify callers doc: kvm: Fix return description of KVM_SET_MSRS KVM: X86: Tune PLE Window tracepoint KVM: VMX: Change ple_window type to unsigned int KVM: X86: Remove tailing newline for tracepoints KVM: X86: Trace vcpu_id for vmexit KVM: x86: Manually calculate reserved bits when loading PDPTRS ...
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c207
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c85
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c7
-rw-r--r--virt/kvm/arm/vgic/vgic.c26
-rw-r--r--virt/kvm/arm/vgic/vgic.h5
9 files changed, 302 insertions, 81 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 35a069815baf..86c6aa1cb58e 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -196,6 +196,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
196 case KVM_CAP_MP_STATE: 196 case KVM_CAP_MP_STATE:
197 case KVM_CAP_IMMEDIATE_EXIT: 197 case KVM_CAP_IMMEDIATE_EXIT:
198 case KVM_CAP_VCPU_EVENTS: 198 case KVM_CAP_VCPU_EVENTS:
199 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
199 r = 1; 200 r = 1;
200 break; 201 break;
201 case KVM_CAP_ARM_SET_DEVICE_ADDR: 202 case KVM_CAP_ARM_SET_DEVICE_ADDR:
@@ -888,6 +889,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
888 889
889 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; 890 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
890 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; 891 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
892 vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
891 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; 893 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
892 894
893 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); 895 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index e621b5d45b27..6f50c429196d 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -54,6 +54,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
54 struct vgic_dist *dist = &kvm->arch.vgic; 54 struct vgic_dist *dist = &kvm->arch.vgic;
55 55
56 INIT_LIST_HEAD(&dist->lpi_list_head); 56 INIT_LIST_HEAD(&dist->lpi_list_head);
57 INIT_LIST_HEAD(&dist->lpi_translation_cache);
57 raw_spin_lock_init(&dist->lpi_list_lock); 58 raw_spin_lock_init(&dist->lpi_list_lock);
58} 59}
59 60
@@ -199,7 +200,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
199 int i; 200 int i;
200 201
201 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 202 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
202 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
203 203
204 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 204 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
205 raw_spin_lock_init(&vgic_cpu->ap_list_lock); 205 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
@@ -304,6 +304,7 @@ int vgic_init(struct kvm *kvm)
304 } 304 }
305 305
306 if (vgic_has_its(kvm)) { 306 if (vgic_has_its(kvm)) {
307 vgic_lpi_translation_cache_init(kvm);
307 ret = vgic_v4_init(kvm); 308 ret = vgic_v4_init(kvm);
308 if (ret) 309 if (ret)
309 goto out; 310 goto out;
@@ -345,6 +346,9 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
345 INIT_LIST_HEAD(&dist->rd_regions); 346 INIT_LIST_HEAD(&dist->rd_regions);
346 } 347 }
347 348
349 if (vgic_has_its(kvm))
350 vgic_lpi_translation_cache_destroy(kvm);
351
348 if (vgic_supports_direct_msis(kvm)) 352 if (vgic_supports_direct_msis(kvm))
349 vgic_v4_teardown(kvm); 353 vgic_v4_teardown(kvm);
350} 354}
@@ -515,7 +519,7 @@ int kvm_vgic_hyp_init(void)
515 break; 519 break;
516 default: 520 default:
517 ret = -ENODEV; 521 ret = -ENODEV;
518 }; 522 }
519 523
520 if (ret) 524 if (ret)
521 return ret; 525 return ret;
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index c9304b88e720..d8cdfea5cc96 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -66,6 +66,15 @@ out:
66 return r; 66 return r;
67} 67}
68 68
69static void kvm_populate_msi(struct kvm_kernel_irq_routing_entry *e,
70 struct kvm_msi *msi)
71{
72 msi->address_lo = e->msi.address_lo;
73 msi->address_hi = e->msi.address_hi;
74 msi->data = e->msi.data;
75 msi->flags = e->msi.flags;
76 msi->devid = e->msi.devid;
77}
69/** 78/**
70 * kvm_set_msi: inject the MSI corresponding to the 79 * kvm_set_msi: inject the MSI corresponding to the
71 * MSI routing entry 80 * MSI routing entry
@@ -79,21 +88,36 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
79{ 88{
80 struct kvm_msi msi; 89 struct kvm_msi msi;
81 90
82 msi.address_lo = e->msi.address_lo;
83 msi.address_hi = e->msi.address_hi;
84 msi.data = e->msi.data;
85 msi.flags = e->msi.flags;
86 msi.devid = e->msi.devid;
87
88 if (!vgic_has_its(kvm)) 91 if (!vgic_has_its(kvm))
89 return -ENODEV; 92 return -ENODEV;
90 93
91 if (!level) 94 if (!level)
92 return -1; 95 return -1;
93 96
97 kvm_populate_msi(e, &msi);
94 return vgic_its_inject_msi(kvm, &msi); 98 return vgic_its_inject_msi(kvm, &msi);
95} 99}
96 100
101/**
102 * kvm_arch_set_irq_inatomic: fast-path for irqfd injection
103 *
104 * Currently only direct MSI injection is supported.
105 */
106int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
107 struct kvm *kvm, int irq_source_id, int level,
108 bool line_status)
109{
110 if (e->type == KVM_IRQ_ROUTING_MSI && vgic_has_its(kvm) && level) {
111 struct kvm_msi msi;
112
113 kvm_populate_msi(e, &msi);
114 if (!vgic_its_inject_cached_translation(kvm, &msi))
115 return 0;
116 }
117
118 return -EWOULDBLOCK;
119}
120
97int kvm_vgic_setup_default_irq_routing(struct kvm *kvm) 121int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
98{ 122{
99 struct kvm_irq_routing_entry *entries; 123 struct kvm_irq_routing_entry *entries;
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 482036612adf..2be6b66b3856 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -138,6 +138,14 @@ struct its_ite {
138 u32 event_id; 138 u32 event_id;
139}; 139};
140 140
141struct vgic_translation_cache_entry {
142 struct list_head entry;
143 phys_addr_t db;
144 u32 devid;
145 u32 eventid;
146 struct vgic_irq *irq;
147};
148
141/** 149/**
142 * struct vgic_its_abi - ITS abi ops and settings 150 * struct vgic_its_abi - ITS abi ops and settings
143 * @cte_esz: collection table entry size 151 * @cte_esz: collection table entry size
@@ -527,6 +535,127 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
527 return 0; 535 return 0;
528} 536}
529 537
538static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
539 phys_addr_t db,
540 u32 devid, u32 eventid)
541{
542 struct vgic_translation_cache_entry *cte;
543
544 list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
545 /*
546 * If we hit a NULL entry, there is nothing after this
547 * point.
548 */
549 if (!cte->irq)
550 break;
551
552 if (cte->db != db || cte->devid != devid ||
553 cte->eventid != eventid)
554 continue;
555
556 /*
557 * Move this entry to the head, as it is the most
558 * recently used.
559 */
560 if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
561 list_move(&cte->entry, &dist->lpi_translation_cache);
562
563 return cte->irq;
564 }
565
566 return NULL;
567}
568
569static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
570 u32 devid, u32 eventid)
571{
572 struct vgic_dist *dist = &kvm->arch.vgic;
573 struct vgic_irq *irq;
574 unsigned long flags;
575
576 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
577 irq = __vgic_its_check_cache(dist, db, devid, eventid);
578 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
579
580 return irq;
581}
582
583static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
584 u32 devid, u32 eventid,
585 struct vgic_irq *irq)
586{
587 struct vgic_dist *dist = &kvm->arch.vgic;
588 struct vgic_translation_cache_entry *cte;
589 unsigned long flags;
590 phys_addr_t db;
591
592 /* Do not cache a directly injected interrupt */
593 if (irq->hw)
594 return;
595
596 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
597
598 if (unlikely(list_empty(&dist->lpi_translation_cache)))
599 goto out;
600
601 /*
602 * We could have raced with another CPU caching the same
603 * translation behind our back, so let's check it is not in
604 * already
605 */
606 db = its->vgic_its_base + GITS_TRANSLATER;
607 if (__vgic_its_check_cache(dist, db, devid, eventid))
608 goto out;
609
610 /* Always reuse the last entry (LRU policy) */
611 cte = list_last_entry(&dist->lpi_translation_cache,
612 typeof(*cte), entry);
613
614 /*
615 * Caching the translation implies having an extra reference
616 * to the interrupt, so drop the potential reference on what
617 * was in the cache, and increment it on the new interrupt.
618 */
619 if (cte->irq)
620 __vgic_put_lpi_locked(kvm, cte->irq);
621
622 vgic_get_irq_kref(irq);
623
624 cte->db = db;
625 cte->devid = devid;
626 cte->eventid = eventid;
627 cte->irq = irq;
628
629 /* Move the new translation to the head of the list */
630 list_move(&cte->entry, &dist->lpi_translation_cache);
631
632out:
633 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
634}
635
636void vgic_its_invalidate_cache(struct kvm *kvm)
637{
638 struct vgic_dist *dist = &kvm->arch.vgic;
639 struct vgic_translation_cache_entry *cte;
640 unsigned long flags;
641
642 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
643
644 list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
645 /*
646 * If we hit a NULL entry, there is nothing after this
647 * point.
648 */
649 if (!cte->irq)
650 break;
651
652 __vgic_put_lpi_locked(kvm, cte->irq);
653 cte->irq = NULL;
654 }
655
656 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
657}
658
530int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, 659int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
531 u32 devid, u32 eventid, struct vgic_irq **irq) 660 u32 devid, u32 eventid, struct vgic_irq **irq)
532{ 661{
@@ -547,6 +676,8 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
547 if (!vcpu->arch.vgic_cpu.lpis_enabled) 676 if (!vcpu->arch.vgic_cpu.lpis_enabled)
548 return -EBUSY; 677 return -EBUSY;
549 678
679 vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
680
550 *irq = ite->irq; 681 *irq = ite->irq;
551 return 0; 682 return 0;
552} 683}
@@ -608,6 +739,25 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
608 return 0; 739 return 0;
609} 740}
610 741
742int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
743{
744 struct vgic_irq *irq;
745 unsigned long flags;
746 phys_addr_t db;
747
748 db = (u64)msi->address_hi << 32 | msi->address_lo;
749 irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
750
751 if (!irq)
752 return -1;
753
754 raw_spin_lock_irqsave(&irq->irq_lock, flags);
755 irq->pending_latch = true;
756 vgic_queue_irq_unlock(kvm, irq, flags);
757
758 return 0;
759}
760
611/* 761/*
612 * Queries the KVM IO bus framework to get the ITS pointer from the given 762 * Queries the KVM IO bus framework to get the ITS pointer from the given
613 * doorbell address. 763 * doorbell address.
@@ -619,6 +769,9 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
619 struct vgic_its *its; 769 struct vgic_its *its;
620 int ret; 770 int ret;
621 771
772 if (!vgic_its_inject_cached_translation(kvm, msi))
773 return 1;
774
622 its = vgic_msi_to_its(kvm, msi); 775 its = vgic_msi_to_its(kvm, msi);
623 if (IS_ERR(its)) 776 if (IS_ERR(its))
624 return PTR_ERR(its); 777 return PTR_ERR(its);
@@ -691,6 +844,8 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
691 * don't bother here since we clear the ITTE anyway and the 844 * don't bother here since we clear the ITTE anyway and the
692 * pending state is a property of the ITTE struct. 845 * pending state is a property of the ITTE struct.
693 */ 846 */
847 vgic_its_invalidate_cache(kvm);
848
694 its_free_ite(kvm, ite); 849 its_free_ite(kvm, ite);
695 return 0; 850 return 0;
696 } 851 }
@@ -726,6 +881,8 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
726 ite->collection = collection; 881 ite->collection = collection;
727 vcpu = kvm_get_vcpu(kvm, collection->target_addr); 882 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
728 883
884 vgic_its_invalidate_cache(kvm);
885
729 return update_affinity(ite->irq, vcpu); 886 return update_affinity(ite->irq, vcpu);
730} 887}
731 888
@@ -954,6 +1111,8 @@ static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
954 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list) 1111 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
955 its_free_ite(kvm, ite); 1112 its_free_ite(kvm, ite);
956 1113
1114 vgic_its_invalidate_cache(kvm);
1115
957 list_del(&device->dev_list); 1116 list_del(&device->dev_list);
958 kfree(device); 1117 kfree(device);
959} 1118}
@@ -1059,6 +1218,7 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1059 1218
1060 if (!valid) { 1219 if (!valid) {
1061 vgic_its_free_collection(its, coll_id); 1220 vgic_its_free_collection(its, coll_id);
1221 vgic_its_invalidate_cache(kvm);
1062 } else { 1222 } else {
1063 collection = find_collection(its, coll_id); 1223 collection = find_collection(its, coll_id);
1064 1224
@@ -1207,6 +1367,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1207 vgic_put_irq(kvm, irq); 1367 vgic_put_irq(kvm, irq);
1208 } 1368 }
1209 1369
1370 vgic_its_invalidate_cache(kvm);
1371
1210 kfree(intids); 1372 kfree(intids);
1211 return 0; 1373 return 0;
1212} 1374}
@@ -1557,6 +1719,8 @@ static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1557 goto out; 1719 goto out;
1558 1720
1559 its->enabled = !!(val & GITS_CTLR_ENABLE); 1721 its->enabled = !!(val & GITS_CTLR_ENABLE);
1722 if (!its->enabled)
1723 vgic_its_invalidate_cache(kvm);
1560 1724
1561 /* 1725 /*
1562 * Try to process any pending commands. This function bails out early 1726 * Try to process any pending commands. This function bails out early
@@ -1657,6 +1821,47 @@ out:
1657 return ret; 1821 return ret;
1658} 1822}
1659 1823
1824/* Default is 16 cached LPIs per vcpu */
1825#define LPI_DEFAULT_PCPU_CACHE_SIZE 16
1826
1827void vgic_lpi_translation_cache_init(struct kvm *kvm)
1828{
1829 struct vgic_dist *dist = &kvm->arch.vgic;
1830 unsigned int sz;
1831 int i;
1832
1833 if (!list_empty(&dist->lpi_translation_cache))
1834 return;
1835
1836 sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
1837
1838 for (i = 0; i < sz; i++) {
1839 struct vgic_translation_cache_entry *cte;
1840
1841 /* An allocation failure is not fatal */
1842 cte = kzalloc(sizeof(*cte), GFP_KERNEL);
1843 if (WARN_ON(!cte))
1844 break;
1845
1846 INIT_LIST_HEAD(&cte->entry);
1847 list_add(&cte->entry, &dist->lpi_translation_cache);
1848 }
1849}
1850
1851void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
1852{
1853 struct vgic_dist *dist = &kvm->arch.vgic;
1854 struct vgic_translation_cache_entry *cte, *tmp;
1855
1856 vgic_its_invalidate_cache(kvm);
1857
1858 list_for_each_entry_safe(cte, tmp,
1859 &dist->lpi_translation_cache, entry) {
1860 list_del(&cte->entry);
1861 kfree(cte);
1862 }
1863}
1864
1660#define INITIAL_BASER_VALUE \ 1865#define INITIAL_BASER_VALUE \
1661 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \ 1866 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1662 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \ 1867 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
@@ -1685,6 +1890,8 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
1685 kfree(its); 1890 kfree(its);
1686 return ret; 1891 return ret;
1687 } 1892 }
1893
1894 vgic_lpi_translation_cache_init(dev->kvm);
1688 } 1895 }
1689 1896
1690 mutex_init(&its->its_lock); 1897 mutex_init(&its->its_lock);
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index c45e2d7e942f..7dfd15dbb308 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -192,8 +192,10 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
192 192
193 vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS; 193 vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
194 194
195 if (was_enabled && !vgic_cpu->lpis_enabled) 195 if (was_enabled && !vgic_cpu->lpis_enabled) {
196 vgic_flush_pending_lpis(vcpu); 196 vgic_flush_pending_lpis(vcpu);
197 vgic_its_invalidate_cache(vcpu->kvm);
198 }
197 199
198 if (!was_enabled && vgic_cpu->lpis_enabled) 200 if (!was_enabled && vgic_cpu->lpis_enabled)
199 vgic_enable_lpis(vcpu); 201 vgic_enable_lpis(vcpu);
@@ -515,7 +517,8 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
515 VGIC_ACCESS_32bit), 517 VGIC_ACCESS_32bit),
516}; 518};
517 519
518static const struct vgic_register_region vgic_v3_rdbase_registers[] = { 520static const struct vgic_register_region vgic_v3_rd_registers[] = {
521 /* RD_base registers */
519 REGISTER_DESC_WITH_LENGTH(GICR_CTLR, 522 REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
520 vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4, 523 vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
521 VGIC_ACCESS_32bit), 524 VGIC_ACCESS_32bit),
@@ -540,44 +543,42 @@ static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
540 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS, 543 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
541 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, 544 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
542 VGIC_ACCESS_32bit), 545 VGIC_ACCESS_32bit),
543}; 546 /* SGI_base registers */
544 547 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
545static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
546 REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0,
547 vgic_mmio_read_group, vgic_mmio_write_group, 4, 548 vgic_mmio_read_group, vgic_mmio_write_group, 4,
548 VGIC_ACCESS_32bit), 549 VGIC_ACCESS_32bit),
549 REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0, 550 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISENABLER0,
550 vgic_mmio_read_enable, vgic_mmio_write_senable, 4, 551 vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
551 VGIC_ACCESS_32bit), 552 VGIC_ACCESS_32bit),
552 REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0, 553 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICENABLER0,
553 vgic_mmio_read_enable, vgic_mmio_write_cenable, 4, 554 vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
554 VGIC_ACCESS_32bit), 555 VGIC_ACCESS_32bit),
555 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISPENDR0, 556 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
556 vgic_mmio_read_pending, vgic_mmio_write_spending, 557 vgic_mmio_read_pending, vgic_mmio_write_spending,
557 vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4, 558 vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
558 VGIC_ACCESS_32bit), 559 VGIC_ACCESS_32bit),
559 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICPENDR0, 560 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0,
560 vgic_mmio_read_pending, vgic_mmio_write_cpending, 561 vgic_mmio_read_pending, vgic_mmio_write_cpending,
561 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4, 562 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4,
562 VGIC_ACCESS_32bit), 563 VGIC_ACCESS_32bit),
563 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISACTIVER0, 564 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
564 vgic_mmio_read_active, vgic_mmio_write_sactive, 565 vgic_mmio_read_active, vgic_mmio_write_sactive,
565 NULL, vgic_mmio_uaccess_write_sactive, 566 NULL, vgic_mmio_uaccess_write_sactive,
566 4, VGIC_ACCESS_32bit), 567 4, VGIC_ACCESS_32bit),
567 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICACTIVER0, 568 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
568 vgic_mmio_read_active, vgic_mmio_write_cactive, 569 vgic_mmio_read_active, vgic_mmio_write_cactive,
569 NULL, vgic_mmio_uaccess_write_cactive, 570 NULL, vgic_mmio_uaccess_write_cactive,
570 4, VGIC_ACCESS_32bit), 571 4, VGIC_ACCESS_32bit),
571 REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0, 572 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
572 vgic_mmio_read_priority, vgic_mmio_write_priority, 32, 573 vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
573 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 574 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
574 REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0, 575 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICFGR0,
575 vgic_mmio_read_config, vgic_mmio_write_config, 8, 576 vgic_mmio_read_config, vgic_mmio_write_config, 8,
576 VGIC_ACCESS_32bit), 577 VGIC_ACCESS_32bit),
577 REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0, 578 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGRPMODR0,
578 vgic_mmio_read_raz, vgic_mmio_write_wi, 4, 579 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
579 VGIC_ACCESS_32bit), 580 VGIC_ACCESS_32bit),
580 REGISTER_DESC_WITH_LENGTH(GICR_NSACR, 581 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR,
581 vgic_mmio_read_raz, vgic_mmio_write_wi, 4, 582 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
582 VGIC_ACCESS_32bit), 583 VGIC_ACCESS_32bit),
583}; 584};
@@ -607,9 +608,8 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
607 struct vgic_dist *vgic = &kvm->arch.vgic; 608 struct vgic_dist *vgic = &kvm->arch.vgic;
608 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 609 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
609 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; 610 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
610 struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
611 struct vgic_redist_region *rdreg; 611 struct vgic_redist_region *rdreg;
612 gpa_t rd_base, sgi_base; 612 gpa_t rd_base;
613 int ret; 613 int ret;
614 614
615 if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) 615 if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
@@ -631,52 +631,31 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
631 vgic_cpu->rdreg = rdreg; 631 vgic_cpu->rdreg = rdreg;
632 632
633 rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE; 633 rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
634 sgi_base = rd_base + SZ_64K;
635 634
636 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); 635 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
637 rd_dev->base_addr = rd_base; 636 rd_dev->base_addr = rd_base;
638 rd_dev->iodev_type = IODEV_REDIST; 637 rd_dev->iodev_type = IODEV_REDIST;
639 rd_dev->regions = vgic_v3_rdbase_registers; 638 rd_dev->regions = vgic_v3_rd_registers;
640 rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers); 639 rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
641 rd_dev->redist_vcpu = vcpu; 640 rd_dev->redist_vcpu = vcpu;
642 641
643 mutex_lock(&kvm->slots_lock); 642 mutex_lock(&kvm->slots_lock);
644 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, 643 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
645 SZ_64K, &rd_dev->dev); 644 2 * SZ_64K, &rd_dev->dev);
646 mutex_unlock(&kvm->slots_lock); 645 mutex_unlock(&kvm->slots_lock);
647 646
648 if (ret) 647 if (ret)
649 return ret; 648 return ret;
650 649
651 kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops);
652 sgi_dev->base_addr = sgi_base;
653 sgi_dev->iodev_type = IODEV_REDIST;
654 sgi_dev->regions = vgic_v3_sgibase_registers;
655 sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers);
656 sgi_dev->redist_vcpu = vcpu;
657
658 mutex_lock(&kvm->slots_lock);
659 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
660 SZ_64K, &sgi_dev->dev);
661 if (ret) {
662 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
663 &rd_dev->dev);
664 goto out;
665 }
666
667 rdreg->free_index++; 650 rdreg->free_index++;
668out: 651 return 0;
669 mutex_unlock(&kvm->slots_lock);
670 return ret;
671} 652}
672 653
673static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) 654static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
674{ 655{
675 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; 656 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
676 struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
677 657
678 kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev); 658 kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
679 kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &sgi_dev->dev);
680} 659}
681 660
682static int vgic_register_all_redist_iodevs(struct kvm *kvm) 661static int vgic_register_all_redist_iodevs(struct kvm *kvm)
@@ -826,8 +805,8 @@ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
826 iodev.base_addr = 0; 805 iodev.base_addr = 0;
827 break; 806 break;
828 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{ 807 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
829 iodev.regions = vgic_v3_rdbase_registers; 808 iodev.regions = vgic_v3_rd_registers;
830 iodev.nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers); 809 iodev.nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
831 iodev.base_addr = 0; 810 iodev.base_addr = 0;
832 break; 811 break;
833 } 812 }
@@ -985,21 +964,11 @@ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
985 int offset, u32 *val) 964 int offset, u32 *val)
986{ 965{
987 struct vgic_io_device rd_dev = { 966 struct vgic_io_device rd_dev = {
988 .regions = vgic_v3_rdbase_registers, 967 .regions = vgic_v3_rd_registers,
989 .nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers), 968 .nr_regions = ARRAY_SIZE(vgic_v3_rd_registers),
990 }; 969 };
991 970
992 struct vgic_io_device sgi_dev = { 971 return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
993 .regions = vgic_v3_sgibase_registers,
994 .nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers),
995 };
996
997 /* SGI_base is the next 64K frame after RD_base */
998 if (offset >= SZ_64K)
999 return vgic_uaccess(vcpu, &sgi_dev, is_write, offset - SZ_64K,
1000 val);
1001 else
1002 return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
1003} 972}
1004 973
1005int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write, 974int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index b00aa304c260..621cc168fe3f 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -357,10 +357,11 @@ out:
357DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap); 357DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
358 358
359/** 359/**
360 * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT 360 * vgic_v2_probe - probe for a VGICv2 compatible interrupt controller
361 * @node: pointer to the DT node 361 * @info: pointer to the GIC description
362 * 362 *
363 * Returns 0 if a GICv2 has been found, returns an error code otherwise 363 * Returns 0 if the VGICv2 has been probed successfully, returns an error code
364 * otherwise
364 */ 365 */
365int vgic_v2_probe(const struct gic_kvm_info *info) 366int vgic_v2_probe(const struct gic_kvm_info *info)
366{ 367{
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index a4ad431c92a9..8d69f007dd0c 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -573,10 +573,11 @@ static int __init early_gicv4_enable(char *buf)
573early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable); 573early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
574 574
575/** 575/**
576 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT 576 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
577 * @node: pointer to the DT node 577 * @info: pointer to the GIC description
578 * 578 *
579 * Returns 0 if a GICv3 has been found, returns an error code otherwise 579 * Returns 0 if the VGICv3 has been probed successfully, returns an error code
580 * otherwise
580 */ 581 */
581int vgic_v3_probe(const struct gic_kvm_info *info) 582int vgic_v3_probe(const struct gic_kvm_info *info)
582{ 583{
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e7bde65ba67c..45a870cb63f5 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -119,6 +119,22 @@ static void vgic_irq_release(struct kref *ref)
119{ 119{
120} 120}
121 121
122/*
123 * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
124 */
125void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
126{
127 struct vgic_dist *dist = &kvm->arch.vgic;
128
129 if (!kref_put(&irq->refcount, vgic_irq_release))
130 return;
131
132 list_del(&irq->lpi_list);
133 dist->lpi_list_count--;
134
135 kfree(irq);
136}
137
122void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) 138void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
123{ 139{
124 struct vgic_dist *dist = &kvm->arch.vgic; 140 struct vgic_dist *dist = &kvm->arch.vgic;
@@ -128,16 +144,8 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
128 return; 144 return;
129 145
130 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 146 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
131 if (!kref_put(&irq->refcount, vgic_irq_release)) { 147 __vgic_put_lpi_locked(kvm, irq);
132 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
133 return;
134 };
135
136 list_del(&irq->lpi_list);
137 dist->lpi_list_count--;
138 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 148 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
139
140 kfree(irq);
141} 149}
142 150
143void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu) 151void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 797e05004d80..83066a81b16a 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -161,6 +161,7 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
161 gpa_t addr, int len); 161 gpa_t addr, int len);
162struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, 162struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
163 u32 intid); 163 u32 intid);
164void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
164void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); 165void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
165bool vgic_get_phys_line_level(struct vgic_irq *irq); 166bool vgic_get_phys_line_level(struct vgic_irq *irq);
166void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending); 167void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
@@ -307,6 +308,10 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
307int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, 308int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
308 u32 devid, u32 eventid, struct vgic_irq **irq); 309 u32 devid, u32 eventid, struct vgic_irq **irq);
309struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi); 310struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
311int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
312void vgic_lpi_translation_cache_init(struct kvm *kvm);
313void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
314void vgic_its_invalidate_cache(struct kvm *kvm);
310 315
311bool vgic_supports_direct_msis(struct kvm *kvm); 316bool vgic_supports_direct_msis(struct kvm *kvm);
312int vgic_v4_init(struct kvm *kvm); 317int vgic_v4_init(struct kvm *kvm);