aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2017-07-06 12:55:30 -0400
committerWill Deacon <will.deacon@arm.com>2017-07-20 05:30:27 -0400
commit98a8f63e56a0bdcf1d0af8d840d011ab90386684 (patch)
treead2d756e8769b73d814d93cc818ce5f8598e0169
parent8e517e762a826d16451fb6ffb0a8722e4265582e (diff)
iommu/mtk: Avoid redundant TLB syncs locally
Under certain circumstances, the io-pgtable code may end up issuing two TLB sync operations without any intervening invalidations. This goes badly for the M4U hardware, since it means the second sync ends up polling for a non-existent operation to finish, and as a result times out and warns. The io_pgtable_tlb_* helpers implement a high-level optimisation to avoid issuing the second sync at all in such cases, but in order to work correctly that requires all pagetable operations to be serialised under a lock, thus is no longer applicable to all io-pgtable users. Since we're the only user actually relying on this flag for correctness, let's reimplement it locally to avoid the headache of trying to make the high-level version concurrency-safe for other users. CC: Yong Wu <yong.wu@mediatek.com> CC: Matthias Brugger <matthias.bgg@gmail.com> Tested-by: Yong Wu <yong.wu@mediatek.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/mtk_iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.h1
2 files changed, 7 insertions, 0 deletions
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 5d14cd15198d..91c6d367ab35 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
129 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); 129 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
130 writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); 130 writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
131 writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); 131 writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
132 data->tlb_flush_active = true;
132} 133}
133 134
134static void mtk_iommu_tlb_sync(void *cookie) 135static void mtk_iommu_tlb_sync(void *cookie)
@@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie)
137 int ret; 138 int ret;
138 u32 tmp; 139 u32 tmp;
139 140
141 /* Avoid timing out if there's nothing to wait for */
142 if (!data->tlb_flush_active)
143 return;
144
140 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, 145 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
141 tmp != 0, 10, 100000); 146 tmp != 0, 10, 100000);
142 if (ret) { 147 if (ret) {
@@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
146 } 151 }
147 /* Clear the CPE status */ 152 /* Clear the CPE status */
148 writel_relaxed(0, data->base + REG_MMU_CPE_DONE); 153 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
154 data->tlb_flush_active = false;
149} 155}
150 156
151static const struct iommu_gather_ops mtk_iommu_gather_ops = { 157static const struct iommu_gather_ops mtk_iommu_gather_ops = {
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 2a28eadeea0e..c06cc91b5d9a 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -47,6 +47,7 @@ struct mtk_iommu_data {
47 struct iommu_group *m4u_group; 47 struct iommu_group *m4u_group;
48 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ 48 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
49 bool enable_4GB; 49 bool enable_4GB;
50 bool tlb_flush_active;
50 51
51 struct iommu_device iommu; 52 struct iommu_device iommu;
52}; 53};