aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZhen Lei <thunder.leizhen@huawei.com>2018-09-20 12:10:22 -0400
committerWill Deacon <will.deacon@arm.com>2018-10-01 08:01:32 -0400
commit2da274cdf998a1c12afa6b5975db2df1df01edf1 (patch)
tree0edeb8424745e5f3f4a5e42d9df6aadd1c737c83
parent7d321bd3542500caf125249f44dc37cb4e738013 (diff)
iommu/dma: Add support for non-strict mode
With the flush queue infrastructure already abstracted into IOVA domains, hooking it up in iommu-dma is pretty simple. Since there is a degree of dependency on the IOMMU driver knowing what to do to play along, we key the whole thing off a domain attribute which will be set on default DMA ops domains to request non-strict invalidation. That way, drivers can indicate the appropriate support by acknowledging the attribute, and we can easily fall back to strict invalidation otherwise. The flush queue callback needs a handle on the iommu_domain which owns our cookie, so we have to add a pointer back to that, but neatly, that's also sufficient to indicate whether we're using a flush queue or not, and thus which way to release IOVAs. The only slight subtlety is switching __iommu_dma_unmap() from calling iommu_unmap() to explicit iommu_unmap_fast()/iommu_tlb_sync() so that we can elide the sync entirely in non-strict mode. Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> [rm: convert to domain attribute, tweak comments and commit message] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/dma-iommu.c32
-rw-r--r--include/linux/iommu.h1
2 files changed, 32 insertions, 1 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 511ff9a1d6d9..cc1bf786cfac 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -55,6 +55,9 @@ struct iommu_dma_cookie {
55 }; 55 };
56 struct list_head msi_page_list; 56 struct list_head msi_page_list;
57 spinlock_t msi_lock; 57 spinlock_t msi_lock;
58
59 /* Domain for flush queue callback; NULL if flush queue not in use */
60 struct iommu_domain *fq_domain;
58}; 61};
59 62
60static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 63static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
@@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev,
257 return ret; 260 return ret;
258} 261}
259 262
263static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
264{
265 struct iommu_dma_cookie *cookie;
266 struct iommu_domain *domain;
267
268 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
269 domain = cookie->fq_domain;
270 /*
271 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
272 * implies that ops->flush_iotlb_all must be non-NULL.
273 */
274 domain->ops->flush_iotlb_all(domain);
275}
276
260/** 277/**
261 * iommu_dma_init_domain - Initialise a DMA mapping domain 278 * iommu_dma_init_domain - Initialise a DMA mapping domain
262 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 279 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
275 struct iommu_dma_cookie *cookie = domain->iova_cookie; 292 struct iommu_dma_cookie *cookie = domain->iova_cookie;
276 struct iova_domain *iovad = &cookie->iovad; 293 struct iova_domain *iovad = &cookie->iovad;
277 unsigned long order, base_pfn, end_pfn; 294 unsigned long order, base_pfn, end_pfn;
295 int attr;
278 296
279 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 297 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
280 return -EINVAL; 298 return -EINVAL;
@@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
308 } 326 }
309 327
310 init_iova_domain(iovad, 1UL << order, base_pfn); 328 init_iova_domain(iovad, 1UL << order, base_pfn);
329
330 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
331 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
332 cookie->fq_domain = domain;
333 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
334 }
335
311 if (!dev) 336 if (!dev)
312 return 0; 337 return 0;
313 338
@@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
393 /* The MSI case is only ever cleaning up its most recent allocation */ 418 /* The MSI case is only ever cleaning up its most recent allocation */
394 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 419 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
395 cookie->msi_iova -= size; 420 cookie->msi_iova -= size;
421 else if (cookie->fq_domain) /* non-strict mode */
422 queue_iova(iovad, iova_pfn(iovad, iova),
423 size >> iova_shift(iovad), 0);
396 else 424 else
397 free_iova_fast(iovad, iova_pfn(iovad, iova), 425 free_iova_fast(iovad, iova_pfn(iovad, iova),
398 size >> iova_shift(iovad)); 426 size >> iova_shift(iovad));
@@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
408 dma_addr -= iova_off; 436 dma_addr -= iova_off;
409 size = iova_align(iovad, size + iova_off); 437 size = iova_align(iovad, size + iova_off);
410 438
411 WARN_ON(iommu_unmap(domain, dma_addr, size) != size); 439 WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
440 if (!cookie->fq_domain)
441 iommu_tlb_sync(domain);
412 iommu_dma_free_iova(cookie, dma_addr, size); 442 iommu_dma_free_iova(cookie, dma_addr, size);
413} 443}
414 444
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 87994c265bf5..decabe8e8dbe 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -124,6 +124,7 @@ enum iommu_attr {
124 DOMAIN_ATTR_FSL_PAMU_ENABLE, 124 DOMAIN_ATTR_FSL_PAMU_ENABLE,
125 DOMAIN_ATTR_FSL_PAMUV1, 125 DOMAIN_ATTR_FSL_PAMUV1,
126 DOMAIN_ATTR_NESTING, /* two stages of translation */ 126 DOMAIN_ATTR_NESTING, /* two stages of translation */
127 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
127 DOMAIN_ATTR_MAX, 128 DOMAIN_ATTR_MAX,
128}; 129};
129 130