aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorRobin Murphy <Robin.Murphy@arm.com>2015-09-17 12:42:16 -0400
committerWill Deacon <will.deacon@arm.com>2015-09-22 12:35:33 -0400
commitffcb6d1686ceb4a6b50776fb2597ab0e4dd79040 (patch)
tree55f734b2d6a0ee79b0c0e5ede21e7a94feadd0c4 /drivers/iommu
parent6ff33f3902c3b1c5d0db6b1e2c70b6d76fba357f (diff)
iommu/io-pgtable-arm: Don't use dma_to_phys()
In checking whether DMA addresses differ from physical addresses, using dma_to_phys() is actually the wrong thing to do, since it may hide any DMA offset, which is precisely one of the things we are checking for. Simply casting between the two address types, whilst ugly, is in fact the appropriate course of action. Further care (and ugliness) is also necessary in the comparison to avoid truncation if phys_addr_t and dma_addr_t differ in size. We can also reject any device with a fixed DMA offset up-front at page table creation, leaving the allocation-time check for the more subtle cases like bounce buffering due to an incorrect DMA mask. Furthermore, we can then fix the hackish KConfig dependency so that architectures without a dma_to_phys() implementation may still COMPILE_TEST (or even use!) the code. The true dependency is on the DMA API, so use the appropriate symbol for that. Signed-off-by: Robin Murphy <robin.murphy@arm.com> [will: folded in selftest fix from Yong Wu] Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/io-pgtable-arm.c24
2 files changed, 14 insertions, 13 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 4664c2a96c67..3dc1bcb0d01d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE
23config IOMMU_IO_PGTABLE_LPAE 23config IOMMU_IO_PGTABLE_LPAE
24 bool "ARMv7/v8 Long Descriptor Format" 24 bool "ARMv7/v8 Long Descriptor Format"
25 select IOMMU_IO_PGTABLE 25 select IOMMU_IO_PGTABLE
26 # SWIOTLB guarantees a dma_to_phys() implementation 26 depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
27 depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
28 help 27 help
29 Enable support for the ARM long descriptor pagetable format. 28 Enable support for the ARM long descriptor pagetable format.
30 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page 29 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 73c07482f487..7df97777662d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte;
202 202
203static bool selftest_running = false; 203static bool selftest_running = false;
204 204
205static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages) 205static dma_addr_t __arm_lpae_dma_addr(void *pages)
206{ 206{
207 return phys_to_dma(dev, virt_to_phys(pages)); 207 return (dma_addr_t)virt_to_phys(pages);
208} 208}
209 209
210static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 210static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
@@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
223 goto out_free; 223 goto out_free;
224 /* 224 /*
225 * We depend on the IOMMU being able to work with any physical 225 * We depend on the IOMMU being able to work with any physical
226 * address directly, so if the DMA layer suggests it can't by 226 * address directly, so if the DMA layer suggests otherwise by
227 * giving us back some translation, that bodes very badly... 227 * translating or truncating them, that bodes very badly...
228 */ 228 */
229 if (dma != __arm_lpae_dma_addr(dev, pages)) 229 if (dma != virt_to_phys(pages))
230 goto out_unmap; 230 goto out_unmap;
231 } 231 }
232 232
@@ -243,10 +243,8 @@ out_free:
243static void __arm_lpae_free_pages(void *pages, size_t size, 243static void __arm_lpae_free_pages(void *pages, size_t size,
244 struct io_pgtable_cfg *cfg) 244 struct io_pgtable_cfg *cfg)
245{ 245{
246 struct device *dev = cfg->iommu_dev;
247
248 if (!selftest_running) 246 if (!selftest_running)
249 dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages), 247 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
250 size, DMA_TO_DEVICE); 248 size, DMA_TO_DEVICE);
251 free_pages_exact(pages, size); 249 free_pages_exact(pages, size);
252} 250}
@@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
254static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, 252static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
255 struct io_pgtable_cfg *cfg) 253 struct io_pgtable_cfg *cfg)
256{ 254{
257 struct device *dev = cfg->iommu_dev;
258
259 *ptep = pte; 255 *ptep = pte;
260 256
261 if (!selftest_running) 257 if (!selftest_running)
262 dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep), 258 dma_sync_single_for_device(cfg->iommu_dev,
259 __arm_lpae_dma_addr(ptep),
263 sizeof(pte), DMA_TO_DEVICE); 260 sizeof(pte), DMA_TO_DEVICE);
264} 261}
265 262
@@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
629 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 626 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
630 return NULL; 627 return NULL;
631 628
629 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
630 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
631 return NULL;
632 }
633
632 data = kmalloc(sizeof(*data), GFP_KERNEL); 634 data = kmalloc(sizeof(*data), GFP_KERNEL);
633 if (!data) 635 if (!data)
634 return NULL; 636 return NULL;