aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2016-02-18 09:12:50 -0500
committerJoerg Roedel <jroedel@suse.de>2016-02-25 09:32:09 -0500
commit5e3435eb7e1d8c9431254f5e0053ce1ad654a591 (patch)
tree8be5d517c3d188574d3663f95fa88c6fb3e2185c
parent58c6f6a3dc97ed1a5a5262279a2e9db41bd6d035 (diff)
iommu/exynos: Remove ARM-specific cache flush interface
This patch replaces custom ARM-specific code for performing CPU cache flush operations with generic code based on DMA-mapping. Domain managing code is independent of particular SYSMMU device, so the first registered SYSMMU device is used for DMA-mapping calls. This simplification works fine because all SYSMMU controllers are in the same address space (where DMA address equals physical address) and the DMA-mapping calls are done mainly to flush CPU cache to make changes visible to SYSMMU controllers. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/exynos-iommu.c74
1 files changed, 49 insertions, 25 deletions
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 595e0da55db4..8c8a7f7968d1 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -27,9 +27,6 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/dma-iommu.h> 28#include <linux/dma-iommu.h>
29 29
30#include <asm/cacheflush.h>
31#include <asm/pgtable.h>
32
33typedef u32 sysmmu_iova_t; 30typedef u32 sysmmu_iova_t;
34typedef u32 sysmmu_pte_t; 31typedef u32 sysmmu_pte_t;
35 32
@@ -83,6 +80,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
83 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); 80 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
84} 81}
85 82
83#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
86#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) 84#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
87 85
88#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) 86#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
@@ -134,6 +132,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
134 132
135#define has_sysmmu(dev) (dev->archdata.iommu != NULL) 133#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
136 134
135static struct device *dma_dev;
137static struct kmem_cache *lv2table_kmem_cache; 136static struct kmem_cache *lv2table_kmem_cache;
138static sysmmu_pte_t *zero_lv2_table; 137static sysmmu_pte_t *zero_lv2_table;
139#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) 138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
@@ -650,16 +649,19 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
650 } 649 }
651}; 650};
652 651
653static inline void pgtable_flush(void *vastart, void *vaend) 652static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
654{ 653{
655 dmac_flush_range(vastart, vaend); 654 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
656 outer_flush_range(virt_to_phys(vastart), 655 DMA_TO_DEVICE);
657 virt_to_phys(vaend)); 656 *ent = val;
657 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
658 DMA_TO_DEVICE);
658} 659}
659 660
660static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) 661static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
661{ 662{
662 struct exynos_iommu_domain *domain; 663 struct exynos_iommu_domain *domain;
664 dma_addr_t handle;
663 int i; 665 int i;
664 666
665 667
@@ -694,7 +696,10 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
694 domain->pgtable[i + 7] = ZERO_LV2LINK; 696 domain->pgtable[i + 7] = ZERO_LV2LINK;
695 } 697 }
696 698
697 pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES); 699 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
700 DMA_TO_DEVICE);
701 /* For mapping page table entries we rely on dma == phys */
702 BUG_ON(handle != virt_to_phys(domain->pgtable));
698 703
699 spin_lock_init(&domain->lock); 704 spin_lock_init(&domain->lock);
700 spin_lock_init(&domain->pgtablelock); 705 spin_lock_init(&domain->pgtablelock);
@@ -738,10 +743,18 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
738 if (iommu_domain->type == IOMMU_DOMAIN_DMA) 743 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
739 iommu_put_dma_cookie(iommu_domain); 744 iommu_put_dma_cookie(iommu_domain);
740 745
746 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
747 DMA_TO_DEVICE);
748
741 for (i = 0; i < NUM_LV1ENTRIES; i++) 749 for (i = 0; i < NUM_LV1ENTRIES; i++)
742 if (lv1ent_page(domain->pgtable + i)) 750 if (lv1ent_page(domain->pgtable + i)) {
751 phys_addr_t base = lv2table_base(domain->pgtable + i);
752
753 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
754 DMA_TO_DEVICE);
743 kmem_cache_free(lv2table_kmem_cache, 755 kmem_cache_free(lv2table_kmem_cache,
744 phys_to_virt(lv2table_base(domain->pgtable + i))); 756 phys_to_virt(base));
757 }
745 758
746 free_pages((unsigned long)domain->pgtable, 2); 759 free_pages((unsigned long)domain->pgtable, 2);
747 free_pages((unsigned long)domain->lv2entcnt, 1); 760 free_pages((unsigned long)domain->lv2entcnt, 1);
@@ -834,11 +847,10 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
834 if (!pent) 847 if (!pent)
835 return ERR_PTR(-ENOMEM); 848 return ERR_PTR(-ENOMEM);
836 849
837 *sent = mk_lv1ent_page(virt_to_phys(pent)); 850 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
838 kmemleak_ignore(pent); 851 kmemleak_ignore(pent);
839 *pgcounter = NUM_LV2ENTRIES; 852 *pgcounter = NUM_LV2ENTRIES;
840 pgtable_flush(pent, pent + NUM_LV2ENTRIES); 853 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
841 pgtable_flush(sent, sent + 1);
842 854
843 /* 855 /*
844 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, 856 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
@@ -891,9 +903,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
891 *pgcnt = 0; 903 *pgcnt = 0;
892 } 904 }
893 905
894 *sent = mk_lv1ent_sect(paddr); 906 update_pte(sent, mk_lv1ent_sect(paddr));
895
896 pgtable_flush(sent, sent + 1);
897 907
898 spin_lock(&domain->lock); 908 spin_lock(&domain->lock);
899 if (lv1ent_page_zero(sent)) { 909 if (lv1ent_page_zero(sent)) {
@@ -917,12 +927,15 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
917 if (WARN_ON(!lv2ent_fault(pent))) 927 if (WARN_ON(!lv2ent_fault(pent)))
918 return -EADDRINUSE; 928 return -EADDRINUSE;
919 929
920 *pent = mk_lv2ent_spage(paddr); 930 update_pte(pent, mk_lv2ent_spage(paddr));
921 pgtable_flush(pent, pent + 1);
922 *pgcnt -= 1; 931 *pgcnt -= 1;
923 } else { /* size == LPAGE_SIZE */ 932 } else { /* size == LPAGE_SIZE */
924 int i; 933 int i;
934 dma_addr_t pent_base = virt_to_phys(pent);
925 935
936 dma_sync_single_for_cpu(dma_dev, pent_base,
937 sizeof(*pent) * SPAGES_PER_LPAGE,
938 DMA_TO_DEVICE);
926 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { 939 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
927 if (WARN_ON(!lv2ent_fault(pent))) { 940 if (WARN_ON(!lv2ent_fault(pent))) {
928 if (i > 0) 941 if (i > 0)
@@ -932,7 +945,9 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
932 945
933 *pent = mk_lv2ent_lpage(paddr); 946 *pent = mk_lv2ent_lpage(paddr);
934 } 947 }
935 pgtable_flush(pent - SPAGES_PER_LPAGE, pent); 948 dma_sync_single_for_device(dma_dev, pent_base,
949 sizeof(*pent) * SPAGES_PER_LPAGE,
950 DMA_TO_DEVICE);
936 *pgcnt -= SPAGES_PER_LPAGE; 951 *pgcnt -= SPAGES_PER_LPAGE;
937 } 952 }
938 953
@@ -1042,8 +1057,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1042 } 1057 }
1043 1058
1044 /* workaround for h/w bug in System MMU v3.3 */ 1059 /* workaround for h/w bug in System MMU v3.3 */
1045 *ent = ZERO_LV2LINK; 1060 update_pte(ent, ZERO_LV2LINK);
1046 pgtable_flush(ent, ent + 1);
1047 size = SECT_SIZE; 1061 size = SECT_SIZE;
1048 goto done; 1062 goto done;
1049 } 1063 }
@@ -1064,9 +1078,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1064 } 1078 }
1065 1079
1066 if (lv2ent_small(ent)) { 1080 if (lv2ent_small(ent)) {
1067 *ent = 0; 1081 update_pte(ent, 0);
1068 size = SPAGE_SIZE; 1082 size = SPAGE_SIZE;
1069 pgtable_flush(ent, ent + 1);
1070 domain->lv2entcnt[lv1ent_offset(iova)] += 1; 1083 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1071 goto done; 1084 goto done;
1072 } 1085 }
@@ -1077,9 +1090,13 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1077 goto err; 1090 goto err;
1078 } 1091 }
1079 1092
1093 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1094 sizeof(*ent) * SPAGES_PER_LPAGE,
1095 DMA_TO_DEVICE);
1080 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); 1096 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1081 pgtable_flush(ent, ent + SPAGES_PER_LPAGE); 1097 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1082 1098 sizeof(*ent) * SPAGES_PER_LPAGE,
1099 DMA_TO_DEVICE);
1083 size = LPAGE_SIZE; 1100 size = LPAGE_SIZE;
1084 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; 1101 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1085done: 1102done:
@@ -1261,6 +1278,13 @@ static int __init exynos_iommu_of_setup(struct device_node *np)
1261 if (IS_ERR(pdev)) 1278 if (IS_ERR(pdev))
1262 return PTR_ERR(pdev); 1279 return PTR_ERR(pdev);
1263 1280
1281 /*
1282 * use the first registered sysmmu device for performing
1283 * dma mapping operations on iommu page tables (cpu cache flush)
1284 */
1285 if (!dma_dev)
1286 dma_dev = &pdev->dev;
1287
1264 of_iommu_set_ops(np, &exynos_iommu_ops); 1288 of_iommu_set_ops(np, &exynos_iommu_ops);
1265 return 0; 1289 return 0;
1266} 1290}