aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-01-21 19:36:14 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2013-02-11 13:59:20 -0500
commit9d949dce523df878f1fce9f4d7738a5834650927 (patch)
tree4d8105c25a5f8dd08afd9fd450be5296a1befc33 /arch/arm
parentb47ef92af8efc30f4fbdeac041397df01b7134af (diff)
ARM: KVM: VGIC virtual CPU interface management
Add VGIC virtual CPU interface code, picking pending interrupts from the distributor and stashing them in the VGIC control interface list registers. Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_vgic.h31
-rw-r--r--arch/arm/kvm/vgic.c327
2 files changed, 357 insertions, 1 deletions
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
index 4d4f47426a4a..c2dc8574ea3a 100644
--- a/arch/arm/include/asm/kvm_vgic.h
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -33,6 +33,7 @@
33#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) 33#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
34#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS) 34#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
35#define VGIC_MAX_CPUS KVM_MAX_VCPUS 35#define VGIC_MAX_CPUS KVM_MAX_VCPUS
36#define VGIC_MAX_LRS (1 << 6)
36 37
37/* Sanity checks... */ 38/* Sanity checks... */
38#if (VGIC_MAX_CPUS > 8) 39#if (VGIC_MAX_CPUS > 8)
@@ -110,8 +111,33 @@ struct vgic_dist {
110}; 111};
111 112
112struct vgic_cpu { 113struct vgic_cpu {
114#ifdef CONFIG_KVM_ARM_VGIC
115 /* per IRQ to LR mapping */
116 u8 vgic_irq_lr_map[VGIC_NR_IRQS];
117
118 /* Pending interrupts on this VCPU */
119 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
120 DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
121
122 /* Bitmap of used/free list registers */
123 DECLARE_BITMAP( lr_used, VGIC_MAX_LRS);
124
125 /* Number of list registers on this CPU */
126 int nr_lr;
127
128 /* CPU vif control registers for world switch */
129 u32 vgic_hcr;
130 u32 vgic_vmcr;
131 u32 vgic_misr; /* Saved only */
132 u32 vgic_eisr[2]; /* Saved only */
133 u32 vgic_elrsr[2]; /* Saved only */
134 u32 vgic_apr;
135 u32 vgic_lr[VGIC_MAX_LRS];
136#endif
113}; 137};
114 138
139#define LR_EMPTY 0xff
140
115struct kvm; 141struct kvm;
116struct kvm_vcpu; 142struct kvm_vcpu;
117struct kvm_run; 143struct kvm_run;
@@ -119,9 +145,14 @@ struct kvm_exit_mmio;
119 145
120#ifdef CONFIG_KVM_ARM_VGIC 146#ifdef CONFIG_KVM_ARM_VGIC
121int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr); 147int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr);
148void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
149void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
150int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
122bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, 151bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
123 struct kvm_exit_mmio *mmio); 152 struct kvm_exit_mmio *mmio);
124 153
154#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
155
125#else 156#else
126static inline int kvm_vgic_hyp_init(void) 157static inline int kvm_vgic_hyp_init(void)
127{ 158{
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index 815069f22e8b..8f32702108b2 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -152,6 +152,34 @@ static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
152 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); 152 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
153} 153}
154 154
155static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
156{
157 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
158
159 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
160}
161
162static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
163{
164 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
165
166 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
167}
168
169static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
170{
171 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
172
173 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
174}
175
176static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
177{
178 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
179
180 return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
181}
182
155static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq) 183static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
156{ 184{
157 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 185 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -723,7 +751,30 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
723 751
724static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) 752static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
725{ 753{
726 return 0; 754 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
755 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
756 unsigned long pending_private, pending_shared;
757 int vcpu_id;
758
759 vcpu_id = vcpu->vcpu_id;
760 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
761 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
762
763 pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
764 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
765 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
766
767 pending = vgic_bitmap_get_shared_map(&dist->irq_state);
768 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
769 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
770 bitmap_and(pend_shared, pend_shared,
771 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
772 VGIC_NR_SHARED_IRQS);
773
774 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
775 pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
776 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
777 pending_shared < VGIC_NR_SHARED_IRQS);
727} 778}
728 779
729/* 780/*
@@ -749,6 +800,280 @@ static void vgic_update_state(struct kvm *kvm)
749 } 800 }
750} 801}
751 802
803#define LR_CPUID(lr) \
804 (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
805#define MK_LR_PEND(src, irq) \
806 (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
807/*
808 * Queue an interrupt to a CPU virtual interface. Return true on success,
809 * or false if it wasn't possible to queue it.
810 */
811static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
812{
813 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
814 int lr;
815
816 /* Sanitize the input... */
817 BUG_ON(sgi_source_id & ~7);
818 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
819 BUG_ON(irq >= VGIC_NR_IRQS);
820
821 kvm_debug("Queue IRQ%d\n", irq);
822
823 lr = vgic_cpu->vgic_irq_lr_map[irq];
824
825 /* Do we have an active interrupt for the same CPUID? */
826 if (lr != LR_EMPTY &&
827 (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
828 kvm_debug("LR%d piggyback for IRQ%d %x\n",
829 lr, irq, vgic_cpu->vgic_lr[lr]);
830 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
831 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
832
833 goto out;
834 }
835
836 /* Try to use another LR for this interrupt */
837 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
838 vgic_cpu->nr_lr);
839 if (lr >= vgic_cpu->nr_lr)
840 return false;
841
842 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
843 vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
844 vgic_cpu->vgic_irq_lr_map[irq] = lr;
845 set_bit(lr, vgic_cpu->lr_used);
846
847out:
848 if (!vgic_irq_is_edge(vcpu, irq))
849 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
850
851 return true;
852}
853
854static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
855{
856 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
857 unsigned long sources;
858 int vcpu_id = vcpu->vcpu_id;
859 int c;
860
861 sources = dist->irq_sgi_sources[vcpu_id][irq];
862
863 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
864 if (vgic_queue_irq(vcpu, c, irq))
865 clear_bit(c, &sources);
866 }
867
868 dist->irq_sgi_sources[vcpu_id][irq] = sources;
869
870 /*
871 * If the sources bitmap has been cleared it means that we
872 * could queue all the SGIs onto link registers (see the
873 * clear_bit above), and therefore we are done with them in
874 * our emulated gic and can get rid of them.
875 */
876 if (!sources) {
877 vgic_dist_irq_clear(vcpu, irq);
878 vgic_cpu_irq_clear(vcpu, irq);
879 return true;
880 }
881
882 return false;
883}
884
885static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
886{
887 if (vgic_irq_is_active(vcpu, irq))
888 return true; /* level interrupt, already queued */
889
890 if (vgic_queue_irq(vcpu, 0, irq)) {
891 if (vgic_irq_is_edge(vcpu, irq)) {
892 vgic_dist_irq_clear(vcpu, irq);
893 vgic_cpu_irq_clear(vcpu, irq);
894 } else {
895 vgic_irq_set_active(vcpu, irq);
896 }
897
898 return true;
899 }
900
901 return false;
902}
903
904/*
905 * Fill the list registers with pending interrupts before running the
906 * guest.
907 */
908static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
909{
910 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
911 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
912 int i, vcpu_id;
913 int overflow = 0;
914
915 vcpu_id = vcpu->vcpu_id;
916
917 /*
918 * We may not have any pending interrupt, or the interrupts
919 * may have been serviced from another vcpu. In all cases,
920 * move along.
921 */
922 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
923 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
924 goto epilog;
925 }
926
927 /* SGIs */
928 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
929 if (!vgic_queue_sgi(vcpu, i))
930 overflow = 1;
931 }
932
933 /* PPIs */
934 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
935 if (!vgic_queue_hwirq(vcpu, i))
936 overflow = 1;
937 }
938
939 /* SPIs */
940 for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
941 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
942 overflow = 1;
943 }
944
945epilog:
946 if (overflow) {
947 vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
948 } else {
949 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
950 /*
951 * We're about to run this VCPU, and we've consumed
952 * everything the distributor had in store for
953 * us. Claim we don't have anything pending. We'll
954 * adjust that if needed while exiting.
955 */
956 clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
957 }
958}
959
960static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
961{
962 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
963 bool level_pending = false;
964
965 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
966
967 /*
968 * We do not need to take the distributor lock here, since the only
969 * action we perform is clearing the irq_active_bit for an EOIed
970 * level interrupt. There is a potential race with
971 * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
972 * check if the interrupt is already active. Two possibilities:
973 *
974 * - The queuing is occurring on the same vcpu: cannot happen,
975 * as we're already in the context of this vcpu, and
976 * executing the handler
977 * - The interrupt has been migrated to another vcpu, and we
978 * ignore this interrupt for this run. Big deal. It is still
979 * pending though, and will get considered when this vcpu
980 * exits.
981 */
982 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
983 /*
984 * Some level interrupts have been EOIed. Clear their
985 * active bit.
986 */
987 int lr, irq;
988
989 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
990 vgic_cpu->nr_lr) {
991 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
992
993 vgic_irq_clear_active(vcpu, irq);
994 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
995
996 /* Any additional pending interrupt? */
997 if (vgic_dist_irq_is_pending(vcpu, irq)) {
998 vgic_cpu_irq_set(vcpu, irq);
999 level_pending = true;
1000 } else {
1001 vgic_cpu_irq_clear(vcpu, irq);
1002 }
1003 }
1004 }
1005
1006 if (vgic_cpu->vgic_misr & GICH_MISR_U)
1007 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1008
1009 return level_pending;
1010}
1011
1012/*
1013 * Sync back the VGIC state after a guest run. We do not really touch
1014 * the distributor here (the irq_pending_on_cpu bit is safe to set),
1015 * so there is no need for taking its lock.
1016 */
1017static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1018{
1019 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1020 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1021 int lr, pending;
1022 bool level_pending;
1023
1024 level_pending = vgic_process_maintenance(vcpu);
1025
1026 /* Clear mappings for empty LRs */
1027 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
1028 vgic_cpu->nr_lr) {
1029 int irq;
1030
1031 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1032 continue;
1033
1034 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1035
1036 BUG_ON(irq >= VGIC_NR_IRQS);
1037 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1038 }
1039
1040 /* Check if we still have something up our sleeve... */
1041 pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
1042 vgic_cpu->nr_lr);
1043 if (level_pending || pending < vgic_cpu->nr_lr)
1044 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1045}
1046
1047void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1048{
1049 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1050
1051 if (!irqchip_in_kernel(vcpu->kvm))
1052 return;
1053
1054 spin_lock(&dist->lock);
1055 __kvm_vgic_flush_hwstate(vcpu);
1056 spin_unlock(&dist->lock);
1057}
1058
1059void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1060{
1061 if (!irqchip_in_kernel(vcpu->kvm))
1062 return;
1063
1064 __kvm_vgic_sync_hwstate(vcpu);
1065}
1066
1067int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1068{
1069 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1070
1071 if (!irqchip_in_kernel(vcpu->kvm))
1072 return 0;
1073
1074 return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1075}
1076
752static bool vgic_ioaddr_overlap(struct kvm *kvm) 1077static bool vgic_ioaddr_overlap(struct kvm *kvm)
753{ 1078{
754 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; 1079 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;