aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2017-01-09 07:03:54 -0500
committerJoerg Roedel <jroedel@suse.de>2017-01-10 09:01:21 -0500
commit0d6d3da46ac5949ab5d373fdecf4b4e85a04731c (patch)
tree2b3b85db1b414ccf077394baffeb589e4e73b5c8
parentec5d241b5f8b2d5f002070fd8fdbf71b6664bacb (diff)
iommu/exynos: Fix warnings from DMA-debug
Add a simple checks for dma_map_single() return value to make DMA-debug checker happly. Exynos IOMMU on Samsung Exynos SoCs always use device, which has linear DMA mapping ops (dma address is equal to physical memory address), so no failures are returned from dma_map_single(). Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/exynos-iommu.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index ac726e1760de..dda4e5907979 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -744,6 +744,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
744 DMA_TO_DEVICE); 744 DMA_TO_DEVICE);
745 /* For mapping page table entries we rely on dma == phys */ 745 /* For mapping page table entries we rely on dma == phys */
746 BUG_ON(handle != virt_to_phys(domain->pgtable)); 746 BUG_ON(handle != virt_to_phys(domain->pgtable));
747 if (dma_mapping_error(dma_dev, handle))
748 goto err_lv2ent;
747 749
748 spin_lock_init(&domain->lock); 750 spin_lock_init(&domain->lock);
749 spin_lock_init(&domain->pgtablelock); 751 spin_lock_init(&domain->pgtablelock);
@@ -755,6 +757,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
755 757
756 return &domain->domain; 758 return &domain->domain;
757 759
760err_lv2ent:
761 free_pages((unsigned long)domain->lv2entcnt, 1);
758err_counter: 762err_counter:
759 free_pages((unsigned long)domain->pgtable, 2); 763 free_pages((unsigned long)domain->pgtable, 2);
760err_dma_cookie: 764err_dma_cookie:
@@ -898,6 +902,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
898 } 902 }
899 903
900 if (lv1ent_fault(sent)) { 904 if (lv1ent_fault(sent)) {
905 dma_addr_t handle;
901 sysmmu_pte_t *pent; 906 sysmmu_pte_t *pent;
902 bool need_flush_flpd_cache = lv1ent_zero(sent); 907 bool need_flush_flpd_cache = lv1ent_zero(sent);
903 908
@@ -909,7 +914,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
909 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); 914 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
910 kmemleak_ignore(pent); 915 kmemleak_ignore(pent);
911 *pgcounter = NUM_LV2ENTRIES; 916 *pgcounter = NUM_LV2ENTRIES;
912 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE); 917 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
918 DMA_TO_DEVICE);
919 if (dma_mapping_error(dma_dev, handle)) {
920 kmem_cache_free(lv2table_kmem_cache, pent);
921 return ERR_PTR(-EADDRINUSE);
922 }
913 923
914 /* 924 /*
915 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, 925 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,