aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-07-17 16:52:55 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2016-07-18 13:15:19 -0400
commit6d03a68f8054430cba28e49d9e46c1cd4db39a70 (patch)
tree02befdebe755a1039ae8c7d69965e2caa8371b18
parentbb7176449f6da27534a0faf3a67997bf2c3172aa (diff)
KVM: arm64: vgic-its: Turn device_id validation into generic ID validation
There is no need to have separate functions to validate devices and collections, as the architecture doesn't really distinguish the two, and they are supposed to be managed the same way. Let's turn the DevID checker into a generic one. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c134
1 files changed, 62 insertions, 72 deletions
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index dcae567c522d..996e3e19b53f 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -581,12 +581,73 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
581 return 0; 581 return 0;
582} 582}
583 583
584/*
585 * Check whether an ID can be stored into the corresponding guest table.
586 * For a direct table this is pretty easy, but gets a bit nasty for
587 * indirect tables. We check whether the resulting guest physical address
588 * is actually valid (covered by a memslot and guest accessbible).
589 * For this we have to read the respective first level entry.
590 */
591static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
592{
593 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
594 int index;
595 u64 indirect_ptr;
596 gfn_t gfn;
597
598 if (!(baser & GITS_BASER_INDIRECT)) {
599 phys_addr_t addr;
600
601 if (id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(baser)))
602 return false;
603
604 addr = BASER_ADDRESS(baser) + id * GITS_BASER_ENTRY_SIZE(baser);
605 gfn = addr >> PAGE_SHIFT;
606
607 return kvm_is_visible_gfn(its->dev->kvm, gfn);
608 }
609
610 /* calculate and check the index into the 1st level */
611 index = id / (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
612 if (index >= (l1_tbl_size / sizeof(u64)))
613 return false;
614
615 /* Each 1st level entry is represented by a 64-bit value. */
616 if (kvm_read_guest(its->dev->kvm,
617 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
618 &indirect_ptr, sizeof(indirect_ptr)))
619 return false;
620
621 indirect_ptr = le64_to_cpu(indirect_ptr);
622
623 /* check the valid bit of the first level entry */
624 if (!(indirect_ptr & BIT_ULL(63)))
625 return false;
626
627 /*
628 * Mask the guest physical address and calculate the frame number.
629 * Any address beyond our supported 48 bits of PA will be caught
630 * by the actual check in the final step.
631 */
632 indirect_ptr &= GENMASK_ULL(51, 16);
633
634 /* Find the address of the actual entry */
635 index = id % (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
636 indirect_ptr += index * GITS_BASER_ENTRY_SIZE(baser);
637 gfn = indirect_ptr >> PAGE_SHIFT;
638
639 return kvm_is_visible_gfn(its->dev->kvm, gfn);
640}
641
584static int vgic_its_alloc_collection(struct vgic_its *its, 642static int vgic_its_alloc_collection(struct vgic_its *its,
585 struct its_collection **colp, 643 struct its_collection **colp,
586 u32 coll_id) 644 u32 coll_id)
587{ 645{
588 struct its_collection *collection; 646 struct its_collection *collection;
589 647
648 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id))
649 return E_ITS_MAPC_COLLECTION_OOR;
650
590 collection = kzalloc(sizeof(*collection), GFP_KERNEL); 651 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
591 652
592 collection->collection_id = coll_id; 653 collection->collection_id = coll_id;
@@ -709,67 +770,6 @@ static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
709} 770}
710 771
711/* 772/*
712 * Check whether a device ID can be stored into the guest device tables.
713 * For a direct table this is pretty easy, but gets a bit nasty for
714 * indirect tables. We check whether the resulting guest physical address
715 * is actually valid (covered by a memslot and guest accessbible).
716 * For this we have to read the respective first level entry.
717 */
718static bool vgic_its_check_device_id(struct kvm *kvm, struct vgic_its *its,
719 int device_id)
720{
721 u64 r = its->baser_device_table;
722 int l1_tbl_size = GITS_BASER_NR_PAGES(r) * SZ_64K;
723 int index;
724 u64 indirect_ptr;
725 gfn_t gfn;
726
727
728 if (!(r & GITS_BASER_INDIRECT)) {
729 phys_addr_t addr;
730
731 if (device_id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(r)))
732 return false;
733
734 addr = BASER_ADDRESS(r) + device_id * GITS_BASER_ENTRY_SIZE(r);
735 gfn = addr >> PAGE_SHIFT;
736
737 return kvm_is_visible_gfn(kvm, gfn);
738 }
739
740 /* calculate and check the index into the 1st level */
741 index = device_id / (SZ_64K / GITS_BASER_ENTRY_SIZE(r));
742 if (index >= (l1_tbl_size / sizeof(u64)))
743 return false;
744
745 /* Each 1st level entry is represented by a 64-bit value. */
746 if (kvm_read_guest(kvm,
747 BASER_ADDRESS(r) + index * sizeof(indirect_ptr),
748 &indirect_ptr, sizeof(indirect_ptr)))
749 return false;
750
751 indirect_ptr = le64_to_cpu(indirect_ptr);
752
753 /* check the valid bit of the first level entry */
754 if (!(indirect_ptr & BIT_ULL(63)))
755 return false;
756
757 /*
758 * Mask the guest physical address and calculate the frame number.
759 * Any address beyond our supported 48 bits of PA will be caught
760 * by the actual check in the final step.
761 */
762 indirect_ptr &= GENMASK_ULL(51, 16);
763
764 /* Find the address of the actual entry */
765 index = device_id % (SZ_64K / GITS_BASER_ENTRY_SIZE(r));
766 indirect_ptr += index * GITS_BASER_ENTRY_SIZE(r);
767 gfn = indirect_ptr >> PAGE_SHIFT;
768
769 return kvm_is_visible_gfn(kvm, gfn);
770}
771
772/*
773 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs). 773 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
774 * Must be called with the its_lock mutex held. 774 * Must be called with the its_lock mutex held.
775 */ 775 */
@@ -780,7 +780,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
780 bool valid = its_cmd_get_validbit(its_cmd); 780 bool valid = its_cmd_get_validbit(its_cmd);
781 struct its_device *device; 781 struct its_device *device;
782 782
783 if (!vgic_its_check_device_id(kvm, its, device_id)) 783 if (!vgic_its_check_id(its, its->baser_device_table, device_id))
784 return E_ITS_MAPD_DEVICE_OOR; 784 return E_ITS_MAPD_DEVICE_OOR;
785 785
786 device = find_its_device(its, device_id); 786 device = find_its_device(its, device_id);
@@ -812,13 +812,6 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
812 return 0; 812 return 0;
813} 813}
814 814
815static int vgic_its_nr_collection_ids(struct vgic_its *its)
816{
817 u64 r = its->baser_coll_table;
818
819 return (GITS_BASER_NR_PAGES(r) * SZ_64K) / GITS_BASER_ENTRY_SIZE(r);
820}
821
822/* 815/*
823 * The MAPC command maps collection IDs to redistributors. 816 * The MAPC command maps collection IDs to redistributors.
824 * Must be called with the its_lock mutex held. 817 * Must be called with the its_lock mutex held.
@@ -838,9 +831,6 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
838 if (target_addr >= atomic_read(&kvm->online_vcpus)) 831 if (target_addr >= atomic_read(&kvm->online_vcpus))
839 return E_ITS_MAPC_PROCNUM_OOR; 832 return E_ITS_MAPC_PROCNUM_OOR;
840 833
841 if (coll_id >= vgic_its_nr_collection_ids(its))
842 return E_ITS_MAPC_COLLECTION_OOR;
843
844 if (!valid) { 834 if (!valid) {
845 vgic_its_free_collection(its, coll_id); 835 vgic_its_free_collection(its, coll_id);
846 } else { 836 } else {