summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-07-02 11:43:34 -0400
committerWill Deacon <will@kernel.org>2019-07-24 08:32:33 -0400
commit298f78895b081911e0b3605f07d79ebd3d4cf7b0 (patch)
treef4ba760feb4069d598671753468e716aa223fc41
parentf71da46719460acd5afa411e52dc8cdf1cb9b0ce (diff)
iommu/io-pgtable: Rename iommu_gather_ops to iommu_flush_ops
In preparation for TLB flush gathering in the IOMMU API, rename the iommu_gather_ops structure in io-pgtable to iommu_flush_ops, which better describes its purpose and avoids the potential for confusion between different levels of the API. $ find linux/ -type f -name '*.[ch]' | xargs sed -i 's/gather_ops/flush_ops/g' Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c4
-rw-r--r--drivers/iommu/arm-smmu.c8
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c2
-rw-r--r--drivers/iommu/io-pgtable-arm.c2
-rw-r--r--drivers/iommu/ipmmu-vmsa.c4
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/mtk_iommu.c4
-rw-r--r--drivers/iommu/qcom_iommu.c4
-rw-r--r--include/linux/io-pgtable.h6
10 files changed, 20 insertions, 20 deletions
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 92ac995dd9c6..17bceb11e708 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -257,7 +257,7 @@ static void mmu_tlb_sync_context(void *cookie)
257 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X 257 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
258} 258}
259 259
260static const struct iommu_gather_ops mmu_tlb_ops = { 260static const struct iommu_flush_ops mmu_tlb_ops = {
261 .tlb_flush_all = mmu_tlb_inv_context_s1, 261 .tlb_flush_all = mmu_tlb_inv_context_s1,
262 .tlb_add_flush = mmu_tlb_inv_range_nosync, 262 .tlb_add_flush = mmu_tlb_inv_range_nosync,
263 .tlb_sync = mmu_tlb_sync_context, 263 .tlb_sync = mmu_tlb_sync_context,
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a9a9fabd3968..7e137e1e28f1 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1603,7 +1603,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1603 } while (size -= granule); 1603 } while (size -= granule);
1604} 1604}
1605 1605
1606static const struct iommu_gather_ops arm_smmu_gather_ops = { 1606static const struct iommu_flush_ops arm_smmu_flush_ops = {
1607 .tlb_flush_all = arm_smmu_tlb_inv_context, 1607 .tlb_flush_all = arm_smmu_tlb_inv_context,
1608 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, 1608 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1609 .tlb_sync = arm_smmu_tlb_sync, 1609 .tlb_sync = arm_smmu_tlb_sync,
@@ -1796,7 +1796,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1796 .ias = ias, 1796 .ias = ias,
1797 .oas = oas, 1797 .oas = oas,
1798 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, 1798 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
1799 .tlb = &arm_smmu_gather_ops, 1799 .tlb = &arm_smmu_flush_ops,
1800 .iommu_dev = smmu->dev, 1800 .iommu_dev = smmu->dev,
1801 }; 1801 };
1802 1802
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 64977c131ee6..dc08db347ef3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -251,7 +251,7 @@ enum arm_smmu_domain_stage {
251struct arm_smmu_domain { 251struct arm_smmu_domain {
252 struct arm_smmu_device *smmu; 252 struct arm_smmu_device *smmu;
253 struct io_pgtable_ops *pgtbl_ops; 253 struct io_pgtable_ops *pgtbl_ops;
254 const struct iommu_gather_ops *tlb_ops; 254 const struct iommu_flush_ops *tlb_ops;
255 struct arm_smmu_cfg cfg; 255 struct arm_smmu_cfg cfg;
256 enum arm_smmu_domain_stage stage; 256 enum arm_smmu_domain_stage stage;
257 bool non_strict; 257 bool non_strict;
@@ -547,19 +547,19 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
547 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); 547 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
548} 548}
549 549
550static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { 550static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
551 .tlb_flush_all = arm_smmu_tlb_inv_context_s1, 551 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
552 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, 552 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
553 .tlb_sync = arm_smmu_tlb_sync_context, 553 .tlb_sync = arm_smmu_tlb_sync_context,
554}; 554};
555 555
556static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { 556static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
557 .tlb_flush_all = arm_smmu_tlb_inv_context_s2, 557 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
558 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, 558 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
559 .tlb_sync = arm_smmu_tlb_sync_context, 559 .tlb_sync = arm_smmu_tlb_sync_context,
560}; 560};
561 561
562static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = { 562static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
563 .tlb_flush_all = arm_smmu_tlb_inv_context_s2, 563 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
564 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, 564 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
565 .tlb_sync = arm_smmu_tlb_sync_vmid, 565 .tlb_sync = arm_smmu_tlb_sync_vmid,
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index a62733c6a632..116f97ee991e 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -817,7 +817,7 @@ static void dummy_tlb_sync(void *cookie)
817 WARN_ON(cookie != cfg_cookie); 817 WARN_ON(cookie != cfg_cookie);
818} 818}
819 819
820static const struct iommu_gather_ops dummy_tlb_ops = { 820static const struct iommu_flush_ops dummy_tlb_ops = {
821 .tlb_flush_all = dummy_tlb_flush_all, 821 .tlb_flush_all = dummy_tlb_flush_all,
822 .tlb_add_flush = dummy_tlb_add_flush, 822 .tlb_add_flush = dummy_tlb_add_flush,
823 .tlb_sync = dummy_tlb_sync, 823 .tlb_sync = dummy_tlb_sync,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 0d6633921c1e..402f913b6f6d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -1081,7 +1081,7 @@ static void dummy_tlb_sync(void *cookie)
1081 WARN_ON(cookie != cfg_cookie); 1081 WARN_ON(cookie != cfg_cookie);
1082} 1082}
1083 1083
1084static const struct iommu_gather_ops dummy_tlb_ops __initconst = { 1084static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1085 .tlb_flush_all = dummy_tlb_flush_all, 1085 .tlb_flush_all = dummy_tlb_flush_all,
1086 .tlb_add_flush = dummy_tlb_add_flush, 1086 .tlb_add_flush = dummy_tlb_add_flush,
1087 .tlb_sync = dummy_tlb_sync, 1087 .tlb_sync = dummy_tlb_sync,
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ad0098c0c87c..2c14a2c65b22 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -367,7 +367,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
367 /* The hardware doesn't support selective TLB flush. */ 367 /* The hardware doesn't support selective TLB flush. */
368} 368}
369 369
370static const struct iommu_gather_ops ipmmu_gather_ops = { 370static const struct iommu_flush_ops ipmmu_flush_ops = {
371 .tlb_flush_all = ipmmu_tlb_flush_all, 371 .tlb_flush_all = ipmmu_tlb_flush_all,
372 .tlb_add_flush = ipmmu_tlb_add_flush, 372 .tlb_add_flush = ipmmu_tlb_add_flush,
373 .tlb_sync = ipmmu_tlb_flush_all, 373 .tlb_sync = ipmmu_tlb_flush_all,
@@ -480,7 +480,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
480 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; 480 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
481 domain->cfg.ias = 32; 481 domain->cfg.ias = 32;
482 domain->cfg.oas = 40; 482 domain->cfg.oas = 40;
483 domain->cfg.tlb = &ipmmu_gather_ops; 483 domain->cfg.tlb = &ipmmu_flush_ops;
484 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); 484 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
485 domain->io_domain.geometry.force_aperture = true; 485 domain->io_domain.geometry.force_aperture = true;
486 /* 486 /*
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index b25e2eb9e038..8b602384a385 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -178,7 +178,7 @@ static void __flush_iotlb_sync(void *cookie)
178 */ 178 */
179} 179}
180 180
181static const struct iommu_gather_ops msm_iommu_gather_ops = { 181static const struct iommu_flush_ops msm_iommu_flush_ops = {
182 .tlb_flush_all = __flush_iotlb, 182 .tlb_flush_all = __flush_iotlb,
183 .tlb_add_flush = __flush_iotlb_range, 183 .tlb_add_flush = __flush_iotlb_range,
184 .tlb_sync = __flush_iotlb_sync, 184 .tlb_sync = __flush_iotlb_sync,
@@ -345,7 +345,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
345 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, 345 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
346 .ias = 32, 346 .ias = 32,
347 .oas = 32, 347 .oas = 32,
348 .tlb = &msm_iommu_gather_ops, 348 .tlb = &msm_iommu_flush_ops,
349 .iommu_dev = priv->dev, 349 .iommu_dev = priv->dev,
350 }; 350 };
351 351
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 82e4be4dfdaf..fed77658d67e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -188,7 +188,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
188 } 188 }
189} 189}
190 190
191static const struct iommu_gather_ops mtk_iommu_gather_ops = { 191static const struct iommu_flush_ops mtk_iommu_flush_ops = {
192 .tlb_flush_all = mtk_iommu_tlb_flush_all, 192 .tlb_flush_all = mtk_iommu_tlb_flush_all,
193 .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, 193 .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
194 .tlb_sync = mtk_iommu_tlb_sync, 194 .tlb_sync = mtk_iommu_tlb_sync,
@@ -267,7 +267,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
267 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, 267 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
268 .ias = 32, 268 .ias = 32,
269 .oas = 32, 269 .oas = 32,
270 .tlb = &mtk_iommu_gather_ops, 270 .tlb = &mtk_iommu_flush_ops,
271 .iommu_dev = data->dev, 271 .iommu_dev = data->dev,
272 }; 272 };
273 273
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 34d0b9783b3e..fd9d9f4da735 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -164,7 +164,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
164 } 164 }
165} 165}
166 166
167static const struct iommu_gather_ops qcom_gather_ops = { 167static const struct iommu_flush_ops qcom_flush_ops = {
168 .tlb_flush_all = qcom_iommu_tlb_inv_context, 168 .tlb_flush_all = qcom_iommu_tlb_inv_context,
169 .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, 169 .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync,
170 .tlb_sync = qcom_iommu_tlb_sync, 170 .tlb_sync = qcom_iommu_tlb_sync,
@@ -215,7 +215,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
215 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, 215 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
216 .ias = 32, 216 .ias = 32,
217 .oas = 40, 217 .oas = 40,
218 .tlb = &qcom_gather_ops, 218 .tlb = &qcom_flush_ops,
219 .iommu_dev = qcom_iommu->dev, 219 .iommu_dev = qcom_iommu->dev,
220 }; 220 };
221 221
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index b5a450a3bb47..6292ea15d674 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -17,7 +17,7 @@ enum io_pgtable_fmt {
17}; 17};
18 18
19/** 19/**
20 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. 20 * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
21 * 21 *
22 * @tlb_flush_all: Synchronously invalidate the entire TLB context. 22 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
23 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. 23 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
@@ -28,7 +28,7 @@ enum io_pgtable_fmt {
28 * Note that these can all be called in atomic context and must therefore 28 * Note that these can all be called in atomic context and must therefore
29 * not block. 29 * not block.
30 */ 30 */
31struct iommu_gather_ops { 31struct iommu_flush_ops {
32 void (*tlb_flush_all)(void *cookie); 32 void (*tlb_flush_all)(void *cookie);
33 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, 33 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
34 bool leaf, void *cookie); 34 bool leaf, void *cookie);
@@ -84,7 +84,7 @@ struct io_pgtable_cfg {
84 unsigned int ias; 84 unsigned int ias;
85 unsigned int oas; 85 unsigned int oas;
86 bool coherent_walk; 86 bool coherent_walk;
87 const struct iommu_gather_ops *tlb; 87 const struct iommu_flush_ops *tlb;
88 struct device *iommu_dev; 88 struct device *iommu_dev;
89 89
90 /* Low-level data specific to the table format */ 90 /* Low-level data specific to the table format */