aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@arm.com>2014-06-03 04:13:13 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2015-01-20 12:25:32 -0500
commit6d52f35af10cf24d59b43f3fd8c938ad23cab543 (patch)
treec20bda289d5090b4a4411a53b0a9a346ffa3df1b /virt/kvm/arm
parent7e5802781c3e109558ddfd8b02155ad24d872ee7 (diff)
arm64: KVM: add SGI generation register emulation
While the generation of a (virtual) inter-processor interrupt (SGI) on a GICv2 works by writing to a MMIO register, GICv3 uses the system register ICC_SGI1R_EL1 to trigger them. Add a trap handler function that calls the new SGI register handler in the GICv3 code. As ICC_SRE_EL1.SRE at this point is still always 0, this will not trap yet, but will only be used later when all the data structures have been initialized properly. Signed-off-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c111
1 files changed, 111 insertions, 0 deletions
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index 8db1db597223..2d2199d85b74 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -841,6 +841,117 @@ void vgic_v3_init_emulation(struct kvm *kvm)
841 kvm->arch.max_vcpus = KVM_MAX_VCPUS; 841 kvm->arch.max_vcpus = KVM_MAX_VCPUS;
842} 842}
843 843
844/*
845 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
846 * generation register ICC_SGI1R_EL1) with a given VCPU.
847 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
848 * return -1.
849 */
850static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
851{
852 unsigned long affinity;
853 int level0;
854
855 /*
856 * Split the current VCPU's MPIDR into affinity level 0 and the
857 * rest as this is what we have to compare against.
858 */
859 affinity = kvm_vcpu_get_mpidr_aff(vcpu);
860 level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
861 affinity &= ~MPIDR_LEVEL_MASK;
862
863 /* bail out if the upper three levels don't match */
864 if (sgi_aff != affinity)
865 return -1;
866
867 /* Is this VCPU's bit set in the mask ? */
868 if (!(sgi_cpu_mask & BIT(level0)))
869 return -1;
870
871 return level0;
872}
873
874#define SGI_AFFINITY_LEVEL(reg, level) \
875 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
876 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
877
878/**
879 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
880 * @vcpu: The VCPU requesting a SGI
881 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
882 *
883 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
884 * This will trap in sys_regs.c and call this function.
885 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
886 * target processors as well as a bitmask of 16 Aff0 CPUs.
887 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
888 * check for matching ones. If this bit is set, we signal all, but not the
889 * calling VCPU.
890 */
891void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
892{
893 struct kvm *kvm = vcpu->kvm;
894 struct kvm_vcpu *c_vcpu;
895 struct vgic_dist *dist = &kvm->arch.vgic;
896 u16 target_cpus;
897 u64 mpidr;
898 int sgi, c;
899 int vcpu_id = vcpu->vcpu_id;
900 bool broadcast;
901 int updated = 0;
902
903 sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
904 broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
905 target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
906 mpidr = SGI_AFFINITY_LEVEL(reg, 3);
907 mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
908 mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
909
910 /*
911 * We take the dist lock here, because we come from the sysregs
912 * code path and not from the MMIO one (which already takes the lock).
913 */
914 spin_lock(&dist->lock);
915
916 /*
917 * We iterate over all VCPUs to find the MPIDRs matching the request.
918 * If we have handled one CPU, we clear it's bit to detect early
919 * if we are already finished. This avoids iterating through all
920 * VCPUs when most of the times we just signal a single VCPU.
921 */
922 kvm_for_each_vcpu(c, c_vcpu, kvm) {
923
924 /* Exit early if we have dealt with all requested CPUs */
925 if (!broadcast && target_cpus == 0)
926 break;
927
928 /* Don't signal the calling VCPU */
929 if (broadcast && c == vcpu_id)
930 continue;
931
932 if (!broadcast) {
933 int level0;
934
935 level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
936 if (level0 == -1)
937 continue;
938
939 /* remove this matching VCPU from the mask */
940 target_cpus &= ~BIT(level0);
941 }
942
943 /* Flag the SGI as pending */
944 vgic_dist_irq_set_pending(c_vcpu, sgi);
945 updated = 1;
946 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
947 }
948 if (updated)
949 vgic_update_state(vcpu->kvm);
950 spin_unlock(&dist->lock);
951 if (updated)
952 vgic_kick_vcpus(vcpu->kvm);
953}
954
844static int vgic_v3_create(struct kvm_device *dev, u32 type) 955static int vgic_v3_create(struct kvm_device *dev, u32 type)
845{ 956{
846 return kvm_vgic_create(dev->kvm, type); 957 return kvm_vgic_create(dev->kvm, type);