aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-11-17 18:31:12 -0500
committerWill Deacon <will.deacon@arm.com>2015-01-19 09:46:44 -0500
commitfe4b991dcd84e0104cf2e29223a819335ed048a7 (patch)
treee5c7b24b7e4e2400023b3a89d4ae4ca63cf75c76 /drivers/iommu
parente1d3c0fd701df831169b116cd5c5d6203ac07f70 (diff)
iommu: add self-consistency tests to ARM LPAE IO page table allocator
This patch adds a series of basic self-consistency tests to the ARM LPAE IO page table allocator that exercise corner cases in map/unmap, as well as testing all valid configurations of pagesize, ias and stage. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig9
-rw-r--r--drivers/iommu/io-pgtable-arm.c200
2 files changed, 208 insertions, 1 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 306454fbc52d..9fd9909867cd 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -28,6 +28,15 @@ config IOMMU_IO_PGTABLE_LPAE
28 sizes at both stage-1 and stage-2, as well as address spaces 28 sizes at both stage-1 and stage-2, as well as address spaces
29 up to 48-bits in size. 29 up to 48-bits in size.
30 30
31config IOMMU_IO_PGTABLE_LPAE_SELFTEST
32 bool "LPAE selftests"
33 depends on IOMMU_IO_PGTABLE_LPAE
34 help
35 Enable self-tests for LPAE page table allocator. This performs
36 a series of page-table consistency checks during boot.
37
38 If unsure, say N here.
39
31endmenu 40endmenu
32 41
33config OF_IOMMU 42config OF_IOMMU
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index dbe6178a53e9..52fb21487f02 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -193,6 +193,8 @@ struct arm_lpae_io_pgtable {
193 193
194typedef u64 arm_lpae_iopte; 194typedef u64 arm_lpae_iopte;
195 195
196static bool selftest_running = false;
197
196static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 198static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
197 unsigned long iova, phys_addr_t paddr, 199 unsigned long iova, phys_addr_t paddr,
198 arm_lpae_iopte prot, int lvl, 200 arm_lpae_iopte prot, int lvl,
@@ -201,8 +203,10 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
201 arm_lpae_iopte pte = prot; 203 arm_lpae_iopte pte = prot;
202 204
203 /* We require an unmap first */ 205 /* We require an unmap first */
204 if (WARN_ON(iopte_leaf(*ptep, lvl))) 206 if (iopte_leaf(*ptep, lvl)) {
207 WARN_ON(!selftest_running);
205 return -EEXIST; 208 return -EEXIST;
209 }
206 210
207 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 211 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
208 pte |= ARM_LPAE_PTE_TYPE_PAGE; 212 pte |= ARM_LPAE_PTE_TYPE_PAGE;
@@ -779,3 +783,197 @@ struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
779 .alloc = arm_32_lpae_alloc_pgtable_s2, 783 .alloc = arm_32_lpae_alloc_pgtable_s2,
780 .free = arm_lpae_free_pgtable, 784 .free = arm_lpae_free_pgtable,
781}; 785};
786
787#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
788
789static struct io_pgtable_cfg *cfg_cookie;
790
791static void dummy_tlb_flush_all(void *cookie)
792{
793 WARN_ON(cookie != cfg_cookie);
794}
795
796static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
797 void *cookie)
798{
799 WARN_ON(cookie != cfg_cookie);
800 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
801}
802
803static void dummy_tlb_sync(void *cookie)
804{
805 WARN_ON(cookie != cfg_cookie);
806}
807
808static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
809{
810 WARN_ON(cookie != cfg_cookie);
811}
812
813static struct iommu_gather_ops dummy_tlb_ops __initdata = {
814 .tlb_flush_all = dummy_tlb_flush_all,
815 .tlb_add_flush = dummy_tlb_add_flush,
816 .tlb_sync = dummy_tlb_sync,
817 .flush_pgtable = dummy_flush_pgtable,
818};
819
820static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
821{
822 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
823 struct io_pgtable_cfg *cfg = &data->iop.cfg;
824
825 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
826 cfg->pgsize_bitmap, cfg->ias);
827 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
828 data->levels, data->pgd_size, data->pg_shift,
829 data->bits_per_level, data->pgd);
830}
831
832#define __FAIL(ops, i) ({ \
833 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
834 arm_lpae_dump_ops(ops); \
835 selftest_running = false; \
836 -EFAULT; \
837})
838
839static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
840{
841 static const enum io_pgtable_fmt fmts[] = {
842 ARM_64_LPAE_S1,
843 ARM_64_LPAE_S2,
844 };
845
846 int i, j;
847 unsigned long iova;
848 size_t size;
849 struct io_pgtable_ops *ops;
850
851 selftest_running = true;
852
853 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
854 cfg_cookie = cfg;
855 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
856 if (!ops) {
857 pr_err("selftest: failed to allocate io pgtable ops\n");
858 return -ENOMEM;
859 }
860
861 /*
862 * Initial sanity checks.
863 * Empty page tables shouldn't provide any translations.
864 */
865 if (ops->iova_to_phys(ops, 42))
866 return __FAIL(ops, i);
867
868 if (ops->iova_to_phys(ops, SZ_1G + 42))
869 return __FAIL(ops, i);
870
871 if (ops->iova_to_phys(ops, SZ_2G + 42))
872 return __FAIL(ops, i);
873
874 /*
875 * Distinct mappings of different granule sizes.
876 */
877 iova = 0;
878 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
879 while (j != BITS_PER_LONG) {
880 size = 1UL << j;
881
882 if (ops->map(ops, iova, iova, size, IOMMU_READ |
883 IOMMU_WRITE |
884 IOMMU_NOEXEC |
885 IOMMU_CACHE))
886 return __FAIL(ops, i);
887
888 /* Overlapping mappings */
889 if (!ops->map(ops, iova, iova + size, size,
890 IOMMU_READ | IOMMU_NOEXEC))
891 return __FAIL(ops, i);
892
893 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
894 return __FAIL(ops, i);
895
896 iova += SZ_1G;
897 j++;
898 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
899 }
900
901 /* Partial unmap */
902 size = 1UL << __ffs(cfg->pgsize_bitmap);
903 if (ops->unmap(ops, SZ_1G + size, size) != size)
904 return __FAIL(ops, i);
905
906 /* Remap of partial unmap */
907 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
908 return __FAIL(ops, i);
909
910 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
911 return __FAIL(ops, i);
912
913 /* Full unmap */
914 iova = 0;
915 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
916 while (j != BITS_PER_LONG) {
917 size = 1UL << j;
918
919 if (ops->unmap(ops, iova, size) != size)
920 return __FAIL(ops, i);
921
922 if (ops->iova_to_phys(ops, iova + 42))
923 return __FAIL(ops, i);
924
925 /* Remap full block */
926 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
927 return __FAIL(ops, i);
928
929 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
930 return __FAIL(ops, i);
931
932 iova += SZ_1G;
933 j++;
934 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
935 }
936
937 free_io_pgtable_ops(ops);
938 }
939
940 selftest_running = false;
941 return 0;
942}
943
944static int __init arm_lpae_do_selftests(void)
945{
946 static const unsigned long pgsize[] = {
947 SZ_4K | SZ_2M | SZ_1G,
948 SZ_16K | SZ_32M,
949 SZ_64K | SZ_512M,
950 };
951
952 static const unsigned int ias[] = {
953 32, 36, 40, 42, 44, 48,
954 };
955
956 int i, j, pass = 0, fail = 0;
957 struct io_pgtable_cfg cfg = {
958 .tlb = &dummy_tlb_ops,
959 .oas = 48,
960 };
961
962 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
963 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
964 cfg.pgsize_bitmap = pgsize[i];
965 cfg.ias = ias[j];
966 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
967 pgsize[i], ias[j]);
968 if (arm_lpae_run_tests(&cfg))
969 fail++;
970 else
971 pass++;
972 }
973 }
974
975 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
976 return fail ? -EFAULT : 0;
977}
978subsys_initcall(arm_lpae_do_selftests);
979#endif