summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-07-02 11:44:16 -0400
committerWill Deacon <will@kernel.org>2019-07-29 12:22:55 -0400
commit3445545b2248300319b6965208e77140c960c3fd (patch)
tree5e5c54c1f85b904030b86620634bf1fcddf60916
parent56f8af5e9d38f120cba2c2adb0786fa2dbc901a4 (diff)
iommu/io-pgtable: Introduce tlb_flush_walk() and tlb_flush_leaf()
In preparation for deferring TLB flushes to iommu_tlb_sync(), introduce two new synchronous invalidation helpers to the io-pgtable API, which allow the unmap() code to force invalidation in cases where it cannot be deferred (e.g. when replacing a table with a block or when TLBI_ON_MAP is set). Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--include/linux/io-pgtable.h24
1 files changed, 19 insertions, 5 deletions
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 6292ea15d674..27275575b305 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -19,17 +19,31 @@ enum io_pgtable_fmt {
19/** 19/**
20 * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management. 20 * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
21 * 21 *
22 * @tlb_flush_all: Synchronously invalidate the entire TLB context. 22 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
23 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. 23 * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
24 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and 24 * (sometimes referred to as the "walk cache") for a virtual
25 * any corresponding page table updates are visible to the 25 * address range.
26 * IOMMU. 26 * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
27 * address range.
28 * @tlb_add_flush: Optional callback to queue up leaf TLB invalidation for a
29 * virtual address range. This function exists purely as an
30 * optimisation for IOMMUs that cannot batch TLB invalidation
31 * operations efficiently and are therefore better suited to
32 * issuing them early rather than deferring them until
33 * iommu_tlb_sync().
34 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
35 * any corresponding page table updates are visible to the
36 * IOMMU.
27 * 37 *
28 * Note that these can all be called in atomic context and must therefore 38 * Note that these can all be called in atomic context and must therefore
29 * not block. 39 * not block.
30 */ 40 */
31struct iommu_flush_ops { 41struct iommu_flush_ops {
32 void (*tlb_flush_all)(void *cookie); 42 void (*tlb_flush_all)(void *cookie);
43 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
44 void *cookie);
45 void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
46 void *cookie);
33 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, 47 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
34 bool leaf, void *cookie); 48 bool leaf, void *cookie);
35 void (*tlb_sync)(void *cookie); 49 void (*tlb_sync)(void *cookie);