aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-04-28 12:12:09 -0400
committerWill Deacon <will.deacon@arm.com>2016-05-03 13:23:03 -0400
commit7602b8710645da48b2937f05fa41d627a0e73dad (patch)
treebf2783d9d2b5d76117b0b319a1e52e03ca03db4e /drivers/iommu/arm-smmu.c
parentf9a05f05b12a42f2c52f3d3b8cc71fe2a6e60bce (diff)
iommu/arm-smmu: Decouple context format from kernel config
The way the driver currently forces an AArch32 or AArch64 context format based on the kernel config and SMMU architecture version is suboptimal, in that it makes it very hard to support oddball mix-and-match cases like the SMMUv1 64KB supplement, or situations where the reduced table depth of an AArch32 short descriptor context may be desirable under an AArch64 kernel. It also only happens to work on current implementations which do support all the relevant formats. Introduce an explicit notion of context format, so we can manage that independently and get rid of the inflexible #ifdeffery. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c94
1 files changed, 72 insertions, 22 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index acff3326f818..f2ded69feba7 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -117,6 +117,8 @@
117#define ID0_NTS (1 << 28) 117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27) 118#define ID0_SMS (1 << 27)
119#define ID0_ATOSNS (1 << 26) 119#define ID0_ATOSNS (1 << 26)
120#define ID0_PTFS_NO_AARCH32 (1 << 25)
121#define ID0_PTFS_NO_AARCH32S (1 << 24)
120#define ID0_CTTW (1 << 14) 122#define ID0_CTTW (1 << 14)
121#define ID0_NUMIRPT_SHIFT 16 123#define ID0_NUMIRPT_SHIFT 16
122#define ID0_NUMIRPT_MASK 0xff 124#define ID0_NUMIRPT_MASK 0xff
@@ -317,6 +319,11 @@ struct arm_smmu_device {
317#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) 319#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
318#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) 320#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
319#define ARM_SMMU_FEAT_VMID16 (1 << 6) 321#define ARM_SMMU_FEAT_VMID16 (1 << 6)
322#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
323#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
324#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
325#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
326#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
320 u32 features; 327 u32 features;
321 328
322#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) 329#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -346,10 +353,18 @@ struct arm_smmu_device {
346 u32 cavium_id_base; /* Specific to Cavium */ 353 u32 cavium_id_base; /* Specific to Cavium */
347}; 354};
348 355
356enum arm_smmu_context_fmt {
357 ARM_SMMU_CTX_FMT_NONE,
358 ARM_SMMU_CTX_FMT_AARCH64,
359 ARM_SMMU_CTX_FMT_AARCH32_L,
360 ARM_SMMU_CTX_FMT_AARCH32_S,
361};
362
349struct arm_smmu_cfg { 363struct arm_smmu_cfg {
350 u8 cbndx; 364 u8 cbndx;
351 u8 irptndx; 365 u8 irptndx;
352 u32 cbar; 366 u32 cbar;
367 enum arm_smmu_context_fmt fmt;
353}; 368};
354#define INVALID_IRPTNDX 0xff 369#define INVALID_IRPTNDX 0xff
355 370
@@ -619,14 +634,13 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
619 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); 634 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
620 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; 635 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
621 636
622 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { 637 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
623 iova &= ~12UL; 638 iova &= ~12UL;
624 iova |= ARM_SMMU_CB_ASID(smmu, cfg); 639 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
625 do { 640 do {
626 writel_relaxed(iova, reg); 641 writel_relaxed(iova, reg);
627 iova += granule; 642 iova += granule;
628 } while (size -= granule); 643 } while (size -= granule);
629#ifdef CONFIG_64BIT
630 } else { 644 } else {
631 iova >>= 12; 645 iova >>= 12;
632 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48; 646 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
@@ -634,9 +648,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
634 writeq_relaxed(iova, reg); 648 writeq_relaxed(iova, reg);
635 iova += granule >> 12; 649 iova += granule >> 12;
636 } while (size -= granule); 650 } while (size -= granule);
637#endif
638 } 651 }
639#ifdef CONFIG_64BIT
640 } else if (smmu->version == ARM_SMMU_V2) { 652 } else if (smmu->version == ARM_SMMU_V2) {
641 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); 653 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
642 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : 654 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
@@ -646,7 +658,6 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
646 smmu_write_atomic_lq(iova, reg); 658 smmu_write_atomic_lq(iova, reg);
647 iova += granule >> 12; 659 iova += granule >> 12;
648 } while (size -= granule); 660 } while (size -= granule);
649#endif
650 } else { 661 } else {
651 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; 662 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
652 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg); 663 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
@@ -745,11 +756,10 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
745 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); 756 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
746 757
747 if (smmu->version > ARM_SMMU_V1) { 758 if (smmu->version > ARM_SMMU_V1) {
748#ifdef CONFIG_64BIT 759 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
749 reg = CBA2R_RW64_64BIT; 760 reg = CBA2R_RW64_64BIT;
750#else 761 else
751 reg = CBA2R_RW64_32BIT; 762 reg = CBA2R_RW64_32BIT;
752#endif
753 /* 16-bit VMIDs live in CBA2R */ 763 /* 16-bit VMIDs live in CBA2R */
754 if (smmu->features & ARM_SMMU_FEAT_VMID16) 764 if (smmu->features & ARM_SMMU_FEAT_VMID16)
755 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT; 765 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
@@ -860,16 +870,40 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
860 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) 870 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
861 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 871 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
862 872
873 /*
874 * Choosing a suitable context format is even more fiddly. Until we
875 * grow some way for the caller to express a preference, and/or move
876 * the decision into the io-pgtable code where it arguably belongs,
877 * just aim for the closest thing to the rest of the system, and hope
878 * that the hardware isn't esoteric enough that we can't assume AArch64
879 * support to be a superset of AArch32 support...
880 */
881 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
882 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
883 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
884 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
885 ARM_SMMU_FEAT_FMT_AARCH64_16K |
886 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
887 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
888
889 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
890 ret = -EINVAL;
891 goto out_unlock;
892 }
893
863 switch (smmu_domain->stage) { 894 switch (smmu_domain->stage) {
864 case ARM_SMMU_DOMAIN_S1: 895 case ARM_SMMU_DOMAIN_S1:
865 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; 896 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
866 start = smmu->num_s2_context_banks; 897 start = smmu->num_s2_context_banks;
867 ias = smmu->va_size; 898 ias = smmu->va_size;
868 oas = smmu->ipa_size; 899 oas = smmu->ipa_size;
869 if (IS_ENABLED(CONFIG_64BIT)) 900 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
870 fmt = ARM_64_LPAE_S1; 901 fmt = ARM_64_LPAE_S1;
871 else 902 } else {
872 fmt = ARM_32_LPAE_S1; 903 fmt = ARM_32_LPAE_S1;
904 ias = min(ias, 32UL);
905 oas = min(oas, 40UL);
906 }
873 break; 907 break;
874 case ARM_SMMU_DOMAIN_NESTED: 908 case ARM_SMMU_DOMAIN_NESTED:
875 /* 909 /*
@@ -881,10 +915,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
881 start = 0; 915 start = 0;
882 ias = smmu->ipa_size; 916 ias = smmu->ipa_size;
883 oas = smmu->pa_size; 917 oas = smmu->pa_size;
884 if (IS_ENABLED(CONFIG_64BIT)) 918 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
885 fmt = ARM_64_LPAE_S2; 919 fmt = ARM_64_LPAE_S2;
886 else 920 } else {
887 fmt = ARM_32_LPAE_S2; 921 fmt = ARM_32_LPAE_S2;
922 ias = min(ias, 40UL);
923 oas = min(oas, 40UL);
924 }
888 break; 925 break;
889 default: 926 default:
890 ret = -EINVAL; 927 ret = -EINVAL;
@@ -1670,6 +1707,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1670 ID0_NUMSIDB_MASK; 1707 ID0_NUMSIDB_MASK;
1671 } 1708 }
1672 1709
1710 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1711 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1712 if (!(id & ID0_PTFS_NO_AARCH32S))
1713 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1714 }
1715
1673 /* ID1 */ 1716 /* ID1 */
1674 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); 1717 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1675 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; 1718 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
@@ -1725,22 +1768,29 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1725 1768
1726 if (smmu->version == ARM_SMMU_V1) { 1769 if (smmu->version == ARM_SMMU_V1) {
1727 smmu->va_size = smmu->ipa_size; 1770 smmu->va_size = smmu->ipa_size;
1728 size = SZ_4K | SZ_2M | SZ_1G;
1729 } else { 1771 } else {
1730 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; 1772 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1731 smmu->va_size = arm_smmu_id_size_to_bits(size); 1773 smmu->va_size = arm_smmu_id_size_to_bits(size);
1732#ifndef CONFIG_64BIT
1733 smmu->va_size = min(32UL, smmu->va_size);
1734#endif
1735 size = 0;
1736 if (id & ID2_PTFS_4K) 1774 if (id & ID2_PTFS_4K)
1737 size |= SZ_4K | SZ_2M | SZ_1G; 1775 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1738 if (id & ID2_PTFS_16K) 1776 if (id & ID2_PTFS_16K)
1739 size |= SZ_16K | SZ_32M; 1777 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1740 if (id & ID2_PTFS_64K) 1778 if (id & ID2_PTFS_64K)
1741 size |= SZ_64K | SZ_512M; 1779 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1742 } 1780 }
1743 1781
1782 /* Now we've corralled the various formats, what'll it do? */
1783 size = 0;
1784 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1785 size |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1786 if (smmu->features &
1787 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1788 size |= SZ_4K | SZ_2M | SZ_1G;
1789 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1790 size |= SZ_16K | SZ_32M;
1791 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1792 size |= SZ_64K | SZ_512M;
1793
1744 arm_smmu_ops.pgsize_bitmap &= size; 1794 arm_smmu_ops.pgsize_bitmap &= size;
1745 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); 1795 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1746 1796