diff options
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 08a1e2f3690f..00c8a08d56e7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
68 | if (!iovad) | 68 | if (!iovad) |
69 | return; | 69 | return; |
70 | 70 | ||
71 | put_iova_domain(iovad); | 71 | if (iovad->granule) |
72 | put_iova_domain(iovad); | ||
72 | kfree(iovad); | 73 | kfree(iovad); |
73 | domain->iova_cookie = NULL; | 74 | domain->iova_cookie = NULL; |
74 | } | 75 | } |
@@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | |||
151 | } | 152 | } |
152 | } | 153 | } |
153 | 154 | ||
154 | static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, | 155 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
155 | dma_addr_t dma_limit) | 156 | dma_addr_t dma_limit) |
156 | { | 157 | { |
158 | struct iova_domain *iovad = domain->iova_cookie; | ||
157 | unsigned long shift = iova_shift(iovad); | 159 | unsigned long shift = iova_shift(iovad); |
158 | unsigned long length = iova_align(iovad, size) >> shift; | 160 | unsigned long length = iova_align(iovad, size) >> shift; |
159 | 161 | ||
162 | if (domain->geometry.force_aperture) | ||
163 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | ||
160 | /* | 164 | /* |
161 | * Enforce size-alignment to be safe - there could perhaps be an | 165 | * Enforce size-alignment to be safe - there could perhaps be an |
162 | * attribute to control this per-device, or at least per-domain... | 166 | * attribute to control this per-device, or at least per-domain... |
@@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
314 | if (!pages) | 318 | if (!pages) |
315 | return NULL; | 319 | return NULL; |
316 | 320 | ||
317 | iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); | 321 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask); |
318 | if (!iova) | 322 | if (!iova) |
319 | goto out_free_pages; | 323 | goto out_free_pages; |
320 | 324 | ||
@@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |||
386 | phys_addr_t phys = page_to_phys(page) + offset; | 390 | phys_addr_t phys = page_to_phys(page) + offset; |
387 | size_t iova_off = iova_offset(iovad, phys); | 391 | size_t iova_off = iova_offset(iovad, phys); |
388 | size_t len = iova_align(iovad, size + iova_off); | 392 | size_t len = iova_align(iovad, size + iova_off); |
389 | struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); | 393 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); |
390 | 394 | ||
391 | if (!iova) | 395 | if (!iova) |
392 | return DMA_ERROR_CODE; | 396 | return DMA_ERROR_CODE; |
@@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
538 | prev = s; | 542 | prev = s; |
539 | } | 543 | } |
540 | 544 | ||
541 | iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); | 545 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); |
542 | if (!iova) | 546 | if (!iova) |
543 | goto out_restore_sg; | 547 | goto out_restore_sg; |
544 | 548 | ||