aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-08-11 12:44:06 -0400
committerWill Deacon <will.deacon@arm.com>2016-09-16 04:34:13 -0400
commit6070529bebd26e0a80d9b9653a6f53275086603f (patch)
tree5a1cac9f1b3125b7b5be278d18295b25ba6bb15e /drivers/iommu/arm-smmu.c
parentb4163fb3333cf2279f5bfa2bb4d2d93aa66a3eac (diff)
iommu/arm-smmu: Support v7s context format
Fill in the last bits of machinery required to drive a stage 1 context bank in v7 short descriptor format. By default we'll prefer to use it only when the CPUs are also using the same format, such that we're guaranteed that everything will be strictly 32-bit. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c69
1 files changed, 47 insertions, 22 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 069b2ea89113..4b1c87e947fd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -217,6 +217,7 @@
217#define ARM_SMMU_CB_TTBR0 0x20 217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28 218#define ARM_SMMU_CB_TTBR1 0x28
219#define ARM_SMMU_CB_TTBCR 0x30 219#define ARM_SMMU_CB_TTBCR 0x30
220#define ARM_SMMU_CB_CONTEXTIDR 0x34
220#define ARM_SMMU_CB_S1_MAIR0 0x38 221#define ARM_SMMU_CB_S1_MAIR0 0x38
221#define ARM_SMMU_CB_S1_MAIR1 0x3c 222#define ARM_SMMU_CB_S1_MAIR1 0x3c
222#define ARM_SMMU_CB_PAR 0x50 223#define ARM_SMMU_CB_PAR 0x50
@@ -239,7 +240,6 @@
239#define SCTLR_AFE (1 << 2) 240#define SCTLR_AFE (1 << 2)
240#define SCTLR_TRE (1 << 1) 241#define SCTLR_TRE (1 << 1)
241#define SCTLR_M (1 << 0) 242#define SCTLR_M (1 << 0)
242#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
243 243
244#define ARM_MMU500_ACTLR_CPRE (1 << 1) 244#define ARM_MMU500_ACTLR_CPRE (1 << 1)
245 245
@@ -738,7 +738,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
738static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, 738static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
739 struct io_pgtable_cfg *pgtbl_cfg) 739 struct io_pgtable_cfg *pgtbl_cfg)
740{ 740{
741 u32 reg; 741 u32 reg, reg2;
742 u64 reg64; 742 u64 reg64;
743 bool stage1; 743 bool stage1;
744 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 744 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
@@ -781,14 +781,22 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
781 781
782 /* TTBRs */ 782 /* TTBRs */
783 if (stage1) { 783 if (stage1) {
784 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; 784 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
785 785
786 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT; 786 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
787 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); 787 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
788 788 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
789 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; 789 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
790 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT; 790 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
791 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); 791 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
792 } else {
793 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
794 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
795 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
796 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
797 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
798 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
799 }
792 } else { 800 } else {
793 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; 801 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
794 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); 802 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
@@ -796,28 +804,36 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
796 804
797 /* TTBCR */ 805 /* TTBCR */
798 if (stage1) { 806 if (stage1) {
799 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; 807 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
800 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); 808 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
801 if (smmu->version > ARM_SMMU_V1) { 809 reg2 = 0;
802 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; 810 } else {
803 reg |= TTBCR2_SEP_UPSTREAM; 811 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
804 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); 812 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
813 reg2 |= TTBCR2_SEP_UPSTREAM;
805 } 814 }
815 if (smmu->version > ARM_SMMU_V1)
816 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
806 } else { 817 } else {
807 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; 818 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
808 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
809 } 819 }
820 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
810 821
811 /* MAIRs (stage-1 only) */ 822 /* MAIRs (stage-1 only) */
812 if (stage1) { 823 if (stage1) {
813 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; 824 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
825 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
826 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
827 } else {
828 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
829 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
830 }
814 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); 831 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
815 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; 832 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
816 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
817 } 833 }
818 834
819 /* SCTLR */ 835 /* SCTLR */
820 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; 836 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
821 if (stage1) 837 if (stage1)
822 reg |= SCTLR_S1_ASIDPNE; 838 reg |= SCTLR_S1_ASIDPNE;
823#ifdef __BIG_ENDIAN 839#ifdef __BIG_ENDIAN
@@ -880,6 +896,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
880 */ 896 */
881 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) 897 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
882 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; 898 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
899 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
900 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
901 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
902 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
903 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
883 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && 904 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
884 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | 905 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
885 ARM_SMMU_FEAT_FMT_AARCH64_16K | 906 ARM_SMMU_FEAT_FMT_AARCH64_16K |
@@ -899,10 +920,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
899 oas = smmu->ipa_size; 920 oas = smmu->ipa_size;
900 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { 921 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
901 fmt = ARM_64_LPAE_S1; 922 fmt = ARM_64_LPAE_S1;
902 } else { 923 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
903 fmt = ARM_32_LPAE_S1; 924 fmt = ARM_32_LPAE_S1;
904 ias = min(ias, 32UL); 925 ias = min(ias, 32UL);
905 oas = min(oas, 40UL); 926 oas = min(oas, 40UL);
927 } else {
928 fmt = ARM_V7S;
929 ias = min(ias, 32UL);
930 oas = min(oas, 32UL);
906 } 931 }
907 break; 932 break;
908 case ARM_SMMU_DOMAIN_NESTED: 933 case ARM_SMMU_DOMAIN_NESTED: