aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c48
1 files changed, 19 insertions, 29 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 085fc8d808a5..acff3326f818 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -34,6 +34,7 @@
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/io-64-nonatomic-hi-lo.h>
37#include <linux/iommu.h> 38#include <linux/iommu.h>
38#include <linux/iopoll.h> 39#include <linux/iopoll.h>
39#include <linux/module.h> 40#include <linux/module.h>
@@ -71,16 +72,15 @@
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ 72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0)) 73 ? 0x400 : 0))
73 74
75/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
74#ifdef CONFIG_64BIT 80#ifdef CONFIG_64BIT
75#define smmu_writeq writeq_relaxed 81#define smmu_write_atomic_lq writeq_relaxed
76#else 82#else
77#define smmu_writeq(reg64, addr) \ 83#define smmu_write_atomic_lq writel_relaxed
78 do { \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
83 } while (0)
84#endif 84#endif
85 85
86/* Configuration registers */ 86/* Configuration registers */
@@ -211,11 +211,9 @@
211#define ARM_SMMU_CB_TTBCR 0x30 211#define ARM_SMMU_CB_TTBCR 0x30
212#define ARM_SMMU_CB_S1_MAIR0 0x38 212#define ARM_SMMU_CB_S1_MAIR0 0x38
213#define ARM_SMMU_CB_S1_MAIR1 0x3c 213#define ARM_SMMU_CB_S1_MAIR1 0x3c
214#define ARM_SMMU_CB_PAR_LO 0x50 214#define ARM_SMMU_CB_PAR 0x50
215#define ARM_SMMU_CB_PAR_HI 0x54
216#define ARM_SMMU_CB_FSR 0x58 215#define ARM_SMMU_CB_FSR 0x58
217#define ARM_SMMU_CB_FAR_LO 0x60 216#define ARM_SMMU_CB_FAR 0x60
218#define ARM_SMMU_CB_FAR_HI 0x64
219#define ARM_SMMU_CB_FSYNR0 0x68 217#define ARM_SMMU_CB_FSYNR0 0x68
220#define ARM_SMMU_CB_S1_TLBIVA 0x600 218#define ARM_SMMU_CB_S1_TLBIVA 0x600
221#define ARM_SMMU_CB_S1_TLBIASID 0x610 219#define ARM_SMMU_CB_S1_TLBIASID 0x610
@@ -645,7 +643,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
645 ARM_SMMU_CB_S2_TLBIIPAS2; 643 ARM_SMMU_CB_S2_TLBIIPAS2;
646 iova >>= 12; 644 iova >>= 12;
647 do { 645 do {
648 writeq_relaxed(iova, reg); 646 smmu_write_atomic_lq(iova, reg);
649 iova += granule >> 12; 647 iova += granule >> 12;
650 } while (size -= granule); 648 } while (size -= granule);
651#endif 649#endif
@@ -664,7 +662,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
664static irqreturn_t arm_smmu_context_fault(int irq, void *dev) 662static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
665{ 663{
666 int flags, ret; 664 int flags, ret;
667 u32 fsr, far, fsynr, resume; 665 u32 fsr, fsynr, resume;
668 unsigned long iova; 666 unsigned long iova;
669 struct iommu_domain *domain = dev; 667 struct iommu_domain *domain = dev;
670 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 668 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -686,13 +684,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
686 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); 684 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
687 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; 685 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
688 686
689 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); 687 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
690 iova = far;
691#ifdef CONFIG_64BIT
692 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
693 iova |= ((unsigned long)far << 32);
694#endif
695
696 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { 688 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
697 ret = IRQ_HANDLED; 689 ret = IRQ_HANDLED;
698 resume = RESUME_RETRY; 690 resume = RESUME_RETRY;
@@ -788,14 +780,14 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
788 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; 780 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
789 781
790 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT; 782 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
791 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0); 783 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
792 784
793 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; 785 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
794 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT; 786 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
795 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1); 787 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
796 } else { 788 } else {
797 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; 789 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
798 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0); 790 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
799 } 791 }
800 792
801 /* TTBCR */ 793 /* TTBCR */
@@ -1263,8 +1255,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1263 /* ATS1 registers can only be written atomically */ 1255 /* ATS1 registers can only be written atomically */
1264 va = iova & ~0xfffUL; 1256 va = iova & ~0xfffUL;
1265 if (smmu->version == ARM_SMMU_V2) 1257 if (smmu->version == ARM_SMMU_V2)
1266 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR); 1258 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1267 else 1259 else /* Register is only 32-bit in v1 */
1268 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); 1260 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1269 1261
1270 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, 1262 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
@@ -1275,9 +1267,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1275 return ops->iova_to_phys(ops, iova); 1267 return ops->iova_to_phys(ops, iova);
1276 } 1268 }
1277 1269
1278 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); 1270 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1279 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1280
1281 if (phys & CB_PAR_F) { 1271 if (phys & CB_PAR_F) {
1282 dev_err(dev, "translation fault!\n"); 1272 dev_err(dev, "translation fault!\n");
1283 dev_err(dev, "PAR = 0x%llx\n", phys); 1273 dev_err(dev, "PAR = 0x%llx\n", phys);