aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-06-01 18:46:12 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-08-26 05:46:00 -0400
commitf626b52d4a568d4315cd152123ef2d1ea406def2 (patch)
tree3592dfe99e9fd2713609cef9fbb8d0dfc8653a52 /arch/arm
parentfcb8ce5cfe30ca9ca5c9a79cdfe26d1993e65e0c (diff)
omap: iommu: migrate to the generic IOMMU API
Migrate OMAP's iommu driver to the generic IOMMU API, so users can stay generic, and any generic IOMMU functionality can be developed once in the generic framework. Migrate omap's iovmm (virtual memory manager) to the generic IOMMU API, and adapt omap3isp as needed, so the latter won't break. The plan is to eventually remove iovmm completely by replacing it with the (upcoming) IOMMU-based DMA-API. Tested on OMAP3 (with omap3isp) and OMAP4 (with rpmsg/remoteproc). Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> Acked-by: Tony Lindgren <tony@atomide.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/plat-omap/Kconfig4
-rw-r--r--arch/arm/plat-omap/include/plat/iommu.h5
-rw-r--r--arch/arm/plat-omap/include/plat/iovmm.h27
-rw-r--r--arch/arm/plat-omap/iommu.c308
-rw-r--r--arch/arm/plat-omap/iopgtable.h18
-rw-r--r--arch/arm/plat-omap/iovmm.c115
6 files changed, 372 insertions, 105 deletions
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index bb8f4a6b3e37..e1e954d7486d 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -132,8 +132,10 @@ config OMAP_MBOX_KFIFO_SIZE
132 This can also be changed at runtime (via the mbox_kfifo_size 132 This can also be changed at runtime (via the mbox_kfifo_size
133 module parameter). 133 module parameter).
134 134
135#can't be tristate; iommu api doesn't support un-registration
135config OMAP_IOMMU 136config OMAP_IOMMU
136 tristate 137 bool
138 select IOMMU_API
137 139
138config OMAP_IOMMU_DEBUG 140config OMAP_IOMMU_DEBUG
139 tristate "Export OMAP IOMMU internals in DebugFS" 141 tristate "Export OMAP IOMMU internals in DebugFS"
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index 174f1b9c8c03..dcb757b87fca 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -34,7 +34,7 @@ struct iommu {
34 void *isr_priv; 34 void *isr_priv;
35 35
36 unsigned int refcount; 36 unsigned int refcount;
37 struct mutex iommu_lock; /* global for this whole object */ 37 spinlock_t iommu_lock; /* global for this whole object */
38 38
39 /* 39 /*
40 * We don't change iopgd for a situation like pgd for a task, 40 * We don't change iopgd for a situation like pgd for a task,
@@ -167,8 +167,6 @@ extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd,
167extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); 167extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
168 168
169extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end); 169extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end);
170extern struct iommu *iommu_get(const char *name);
171extern void iommu_put(struct iommu *obj);
172extern int iommu_set_isr(const char *name, 170extern int iommu_set_isr(const char *name,
173 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, 171 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
174 void *priv), 172 void *priv),
@@ -185,5 +183,6 @@ extern int foreach_iommu_device(void *data,
185 183
186extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); 184extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len);
187extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); 185extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len);
186struct device *omap_find_iommu_device(const char *name);
188 187
189#endif /* __MACH_IOMMU_H */ 188#endif /* __MACH_IOMMU_H */
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h
index e992b9655fbc..e2f0b38a0265 100644
--- a/arch/arm/plat-omap/include/plat/iovmm.h
+++ b/arch/arm/plat-omap/include/plat/iovmm.h
@@ -13,6 +13,8 @@
13#ifndef __IOMMU_MMAP_H 13#ifndef __IOMMU_MMAP_H
14#define __IOMMU_MMAP_H 14#define __IOMMU_MMAP_H
15 15
16#include <linux/iommu.h>
17
16struct iovm_struct { 18struct iovm_struct {
17 struct iommu *iommu; /* iommu object which this belongs to */ 19 struct iommu *iommu; /* iommu object which this belongs to */
18 u32 da_start; /* area definition */ 20 u32 da_start; /* area definition */
@@ -71,18 +73,21 @@ struct iovm_struct {
71 73
72 74
73extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); 75extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da);
74extern u32 iommu_vmap(struct iommu *obj, u32 da, 76extern u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
75 const struct sg_table *sgt, u32 flags); 77 const struct sg_table *sgt, u32 flags);
76extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da); 78extern struct sg_table *iommu_vunmap(struct iommu_domain *domain,
77extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, 79 struct iommu *obj, u32 da);
78 u32 flags); 80extern u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj,
79extern void iommu_vfree(struct iommu *obj, const u32 da); 81 u32 da, size_t bytes, u32 flags);
80extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, 82extern void iommu_vfree(struct iommu_domain *domain, struct iommu *obj,
81 u32 flags); 83 const u32 da);
82extern void iommu_kunmap(struct iommu *obj, u32 da); 84extern u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
83extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, 85 u32 pa, size_t bytes, u32 flags);
84 u32 flags); 86extern void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj,
85extern void iommu_kfree(struct iommu *obj, u32 da); 87 u32 da);
88extern u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj,
89 u32 da, size_t bytes, u32 flags);
90extern void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da);
86 91
87extern void *da_to_va(struct iommu *obj, u32 da); 92extern void *da_to_va(struct iommu *obj, u32 da);
88 93
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 34fc31ee9081..51aa008d8223 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -18,6 +18,9 @@
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/iommu.h>
22#include <linux/mutex.h>
23#include <linux/spinlock.h>
21 24
22#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
23 26
@@ -30,6 +33,19 @@
30 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ 33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
31 __i++) 34 __i++)
32 35
36/**
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
42 */
43struct omap_iommu_domain {
44 u32 *pgtable;
45 struct iommu *iommu_dev;
46 spinlock_t lock;
47};
48
33/* accommodate the difference between omap1 and omap2/3 */ 49/* accommodate the difference between omap1 and omap2/3 */
34static const struct iommu_functions *arch_iommu; 50static const struct iommu_functions *arch_iommu;
35 51
@@ -852,35 +868,55 @@ int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
852EXPORT_SYMBOL_GPL(iommu_set_da_range); 868EXPORT_SYMBOL_GPL(iommu_set_da_range);
853 869
854/** 870/**
855 * iommu_get - Get iommu handler 871 * omap_find_iommu_device() - find an omap iommu device by name
856 * @name: target iommu name 872 * @name: name of the iommu device
873 *
874 * The generic iommu API requires the caller to provide the device
875 * he wishes to attach to a certain iommu domain.
876 *
877 * Drivers generally should not bother with this as it should just
878 * be taken care of by the DMA-API using dev_archdata.
879 *
880 * This function is provided as an interim solution until the latter
881 * materializes, and omap3isp is fully migrated to the DMA-API.
882 */
883struct device *omap_find_iommu_device(const char *name)
884{
885 return driver_find_device(&omap_iommu_driver.driver, NULL,
886 (void *)name,
887 device_match_by_alias);
888}
889EXPORT_SYMBOL_GPL(omap_find_iommu_device);
890
891/**
892 * omap_iommu_attach() - attach iommu device to an iommu domain
893 * @dev: target omap iommu device
894 * @iopgd: page table
857 **/ 895 **/
858struct iommu *iommu_get(const char *name) 896static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
859{ 897{
860 int err = -ENOMEM; 898 int err = -ENOMEM;
861 struct device *dev; 899 struct iommu *obj = to_iommu(dev);
862 struct iommu *obj;
863
864 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
865 device_match_by_alias);
866 if (!dev)
867 return ERR_PTR(-ENODEV);
868
869 obj = to_iommu(dev);
870 900
871 mutex_lock(&obj->iommu_lock); 901 spin_lock(&obj->iommu_lock);
872 902
873 if (obj->refcount++ == 0) { 903 /* an iommu device can only be attached once */
874 err = iommu_enable(obj); 904 if (++obj->refcount > 1) {
875 if (err) 905 dev_err(dev, "%s: already attached!\n", obj->name);
876 goto err_enable; 906 err = -EBUSY;
877 flush_iotlb_all(obj); 907 goto err_enable;
878 } 908 }
879 909
910 obj->iopgd = iopgd;
911 err = iommu_enable(obj);
912 if (err)
913 goto err_enable;
914 flush_iotlb_all(obj);
915
880 if (!try_module_get(obj->owner)) 916 if (!try_module_get(obj->owner))
881 goto err_module; 917 goto err_module;
882 918
883 mutex_unlock(&obj->iommu_lock); 919 spin_unlock(&obj->iommu_lock);
884 920
885 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 921 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
886 return obj; 922 return obj;
@@ -890,32 +926,32 @@ err_module:
890 iommu_disable(obj); 926 iommu_disable(obj);
891err_enable: 927err_enable:
892 obj->refcount--; 928 obj->refcount--;
893 mutex_unlock(&obj->iommu_lock); 929 spin_unlock(&obj->iommu_lock);
894 return ERR_PTR(err); 930 return ERR_PTR(err);
895} 931}
896EXPORT_SYMBOL_GPL(iommu_get);
897 932
898/** 933/**
899 * iommu_put - Put back iommu handler 934 * omap_iommu_detach - release iommu device
900 * @obj: target iommu 935 * @obj: target iommu
901 **/ 936 **/
902void iommu_put(struct iommu *obj) 937static void omap_iommu_detach(struct iommu *obj)
903{ 938{
904 if (!obj || IS_ERR(obj)) 939 if (!obj || IS_ERR(obj))
905 return; 940 return;
906 941
907 mutex_lock(&obj->iommu_lock); 942 spin_lock(&obj->iommu_lock);
908 943
909 if (--obj->refcount == 0) 944 if (--obj->refcount == 0)
910 iommu_disable(obj); 945 iommu_disable(obj);
911 946
912 module_put(obj->owner); 947 module_put(obj->owner);
913 948
914 mutex_unlock(&obj->iommu_lock); 949 obj->iopgd = NULL;
950
951 spin_unlock(&obj->iommu_lock);
915 952
916 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 953 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
917} 954}
918EXPORT_SYMBOL_GPL(iommu_put);
919 955
920int iommu_set_isr(const char *name, 956int iommu_set_isr(const char *name,
921 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, 957 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
@@ -950,7 +986,6 @@ EXPORT_SYMBOL_GPL(iommu_set_isr);
950static int __devinit omap_iommu_probe(struct platform_device *pdev) 986static int __devinit omap_iommu_probe(struct platform_device *pdev)
951{ 987{
952 int err = -ENODEV; 988 int err = -ENODEV;
953 void *p;
954 int irq; 989 int irq;
955 struct iommu *obj; 990 struct iommu *obj;
956 struct resource *res; 991 struct resource *res;
@@ -974,7 +1009,7 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
974 obj->da_start = pdata->da_start; 1009 obj->da_start = pdata->da_start;
975 obj->da_end = pdata->da_end; 1010 obj->da_end = pdata->da_end;
976 1011
977 mutex_init(&obj->iommu_lock); 1012 spin_lock_init(&obj->iommu_lock);
978 mutex_init(&obj->mmap_lock); 1013 mutex_init(&obj->mmap_lock);
979 spin_lock_init(&obj->page_table_lock); 1014 spin_lock_init(&obj->page_table_lock);
980 INIT_LIST_HEAD(&obj->mmap); 1015 INIT_LIST_HEAD(&obj->mmap);
@@ -1009,22 +1044,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
1009 goto err_irq; 1044 goto err_irq;
1010 platform_set_drvdata(pdev, obj); 1045 platform_set_drvdata(pdev, obj);
1011 1046
1012 p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
1013 if (!p) {
1014 err = -ENOMEM;
1015 goto err_pgd;
1016 }
1017 memset(p, 0, IOPGD_TABLE_SIZE);
1018 clean_dcache_area(p, IOPGD_TABLE_SIZE);
1019 obj->iopgd = p;
1020
1021 BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
1022
1023 dev_info(&pdev->dev, "%s registered\n", obj->name); 1047 dev_info(&pdev->dev, "%s registered\n", obj->name);
1024 return 0; 1048 return 0;
1025 1049
1026err_pgd:
1027 free_irq(irq, obj);
1028err_irq: 1050err_irq:
1029 iounmap(obj->regbase); 1051 iounmap(obj->regbase);
1030err_ioremap: 1052err_ioremap:
@@ -1045,7 +1067,6 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
1045 platform_set_drvdata(pdev, NULL); 1067 platform_set_drvdata(pdev, NULL);
1046 1068
1047 iopgtable_clear_entry_all(obj); 1069 iopgtable_clear_entry_all(obj);
1048 free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
1049 1070
1050 irq = platform_get_irq(pdev, 0); 1071 irq = platform_get_irq(pdev, 0);
1051 free_irq(irq, obj); 1072 free_irq(irq, obj);
@@ -1072,6 +1093,207 @@ static void iopte_cachep_ctor(void *iopte)
1072 clean_dcache_area(iopte, IOPTE_TABLE_SIZE); 1093 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1073} 1094}
1074 1095
1096static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1097 phys_addr_t pa, int order, int prot)
1098{
1099 struct omap_iommu_domain *omap_domain = domain->priv;
1100 struct iommu *oiommu = omap_domain->iommu_dev;
1101 struct device *dev = oiommu->dev;
1102 size_t bytes = PAGE_SIZE << order;
1103 struct iotlb_entry e;
1104 int omap_pgsz;
1105 u32 ret, flags;
1106
1107 /* we only support mapping a single iommu page for now */
1108 omap_pgsz = bytes_to_iopgsz(bytes);
1109 if (omap_pgsz < 0) {
1110 dev_err(dev, "invalid size to map: %d\n", bytes);
1111 return -EINVAL;
1112 }
1113
1114 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1115
1116 flags = omap_pgsz | prot;
1117
1118 iotlb_init_entry(&e, da, pa, flags);
1119
1120 ret = iopgtable_store_entry(oiommu, &e);
1121 if (ret) {
1122 dev_err(dev, "iopgtable_store_entry failed: %d\n", ret);
1123 return ret;
1124 }
1125
1126 return 0;
1127}
1128
1129static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1130 int order)
1131{
1132 struct omap_iommu_domain *omap_domain = domain->priv;
1133 struct iommu *oiommu = omap_domain->iommu_dev;
1134 struct device *dev = oiommu->dev;
1135 size_t bytes = PAGE_SIZE << order;
1136 size_t ret;
1137
1138 dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes);
1139
1140 ret = iopgtable_clear_entry(oiommu, da);
1141 if (ret != bytes) {
1142 dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes);
1143 return -EINVAL;
1144 }
1145
1146 return 0;
1147}
1148
1149static int
1150omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1151{
1152 struct omap_iommu_domain *omap_domain = domain->priv;
1153 struct iommu *oiommu;
1154 int ret = 0;
1155
1156 spin_lock(&omap_domain->lock);
1157
1158 /* only a single device is supported per domain for now */
1159 if (omap_domain->iommu_dev) {
1160 dev_err(dev, "iommu domain is already attached\n");
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164
1165 /* get a handle to and enable the omap iommu */
1166 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1167 if (IS_ERR(oiommu)) {
1168 ret = PTR_ERR(oiommu);
1169 dev_err(dev, "can't get omap iommu: %d\n", ret);
1170 goto out;
1171 }
1172
1173 omap_domain->iommu_dev = oiommu;
1174
1175out:
1176 spin_unlock(&omap_domain->lock);
1177 return ret;
1178}
1179
1180static void omap_iommu_detach_dev(struct iommu_domain *domain,
1181 struct device *dev)
1182{
1183 struct omap_iommu_domain *omap_domain = domain->priv;
1184 struct iommu *oiommu = to_iommu(dev);
1185
1186 spin_lock(&omap_domain->lock);
1187
1188 /* only a single device is supported per domain for now */
1189 if (omap_domain->iommu_dev != oiommu) {
1190 dev_err(dev, "invalid iommu device\n");
1191 goto out;
1192 }
1193
1194 iopgtable_clear_entry_all(oiommu);
1195
1196 omap_iommu_detach(oiommu);
1197
1198 omap_domain->iommu_dev = NULL;
1199
1200out:
1201 spin_unlock(&omap_domain->lock);
1202}
1203
1204static int omap_iommu_domain_init(struct iommu_domain *domain)
1205{
1206 struct omap_iommu_domain *omap_domain;
1207
1208 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1209 if (!omap_domain) {
1210 pr_err("kzalloc failed\n");
1211 goto out;
1212 }
1213
1214 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1215 if (!omap_domain->pgtable) {
1216 pr_err("kzalloc failed\n");
1217 goto fail_nomem;
1218 }
1219
1220 /*
1221 * should never fail, but please keep this around to ensure
1222 * we keep the hardware happy
1223 */
1224 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1225
1226 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1227 spin_lock_init(&omap_domain->lock);
1228
1229 domain->priv = omap_domain;
1230
1231 return 0;
1232
1233fail_nomem:
1234 kfree(omap_domain);
1235out:
1236 return -ENOMEM;
1237}
1238
1239/* assume device was already detached */
1240static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1241{
1242 struct omap_iommu_domain *omap_domain = domain->priv;
1243
1244 domain->priv = NULL;
1245
1246 kfree(omap_domain->pgtable);
1247 kfree(omap_domain);
1248}
1249
1250static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1251 unsigned long da)
1252{
1253 struct omap_iommu_domain *omap_domain = domain->priv;
1254 struct iommu *oiommu = omap_domain->iommu_dev;
1255 struct device *dev = oiommu->dev;
1256 u32 *pgd, *pte;
1257 phys_addr_t ret = 0;
1258
1259 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1260
1261 if (pte) {
1262 if (iopte_is_small(*pte))
1263 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1264 else if (iopte_is_large(*pte))
1265 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1266 else
1267 dev_err(dev, "bogus pte 0x%x", *pte);
1268 } else {
1269 if (iopgd_is_section(*pgd))
1270 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1271 else if (iopgd_is_super(*pgd))
1272 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1273 else
1274 dev_err(dev, "bogus pgd 0x%x", *pgd);
1275 }
1276
1277 return ret;
1278}
1279
1280static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1281 unsigned long cap)
1282{
1283 return 0;
1284}
1285
1286static struct iommu_ops omap_iommu_ops = {
1287 .domain_init = omap_iommu_domain_init,
1288 .domain_destroy = omap_iommu_domain_destroy,
1289 .attach_dev = omap_iommu_attach_dev,
1290 .detach_dev = omap_iommu_detach_dev,
1291 .map = omap_iommu_map,
1292 .unmap = omap_iommu_unmap,
1293 .iova_to_phys = omap_iommu_iova_to_phys,
1294 .domain_has_cap = omap_iommu_domain_has_cap,
1295};
1296
1075static int __init omap_iommu_init(void) 1297static int __init omap_iommu_init(void)
1076{ 1298{
1077 struct kmem_cache *p; 1299 struct kmem_cache *p;
@@ -1084,6 +1306,8 @@ static int __init omap_iommu_init(void)
1084 return -ENOMEM; 1306 return -ENOMEM;
1085 iopte_cachep = p; 1307 iopte_cachep = p;
1086 1308
1309 register_iommu(&omap_iommu_ops);
1310
1087 return platform_driver_register(&omap_iommu_driver); 1311 return platform_driver_register(&omap_iommu_driver);
1088} 1312}
1089module_init(omap_iommu_init); 1313module_init(omap_iommu_init);
diff --git a/arch/arm/plat-omap/iopgtable.h b/arch/arm/plat-omap/iopgtable.h
index c3e93bb0911f..33c7aa986f53 100644
--- a/arch/arm/plat-omap/iopgtable.h
+++ b/arch/arm/plat-omap/iopgtable.h
@@ -56,6 +56,19 @@
56 56
57#define IOPAGE_MASK IOPTE_MASK 57#define IOPAGE_MASK IOPTE_MASK
58 58
59/**
60 * omap_iommu_translate() - va to pa translation
61 * @d: omap iommu descriptor
62 * @va: virtual address
63 * @mask: omap iommu descriptor mask
64 *
65 * va to pa translation
66 */
67static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
68{
69 return (d & mask) | (va & (~mask));
70}
71
59/* 72/*
60 * some descriptor attributes. 73 * some descriptor attributes.
61 */ 74 */
@@ -64,10 +77,15 @@
64#define IOPGD_SUPER (1 << 18 | 2 << 0) 77#define IOPGD_SUPER (1 << 18 | 2 << 0)
65 78
66#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) 79#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
80#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
81#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
67 82
68#define IOPTE_SMALL (2 << 0) 83#define IOPTE_SMALL (2 << 0)
69#define IOPTE_LARGE (1 << 0) 84#define IOPTE_LARGE (1 << 0)
70 85
86#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
87#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
88
71/* to find an entry in a page-table-directory */ 89/* to find an entry in a page-table-directory */
72#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) 90#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1))
73#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) 91#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da))
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 79e7fedb8602..aa2c47893b02 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -15,6 +15,7 @@
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/iommu.h>
18 19
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/mach/map.h> 21#include <asm/mach/map.h>
@@ -453,39 +454,38 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
453} 454}
454 455
455/* create 'da' <-> 'pa' mapping from 'sgt' */ 456/* create 'da' <-> 'pa' mapping from 'sgt' */
456static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, 457static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
457 const struct sg_table *sgt, u32 flags) 458 const struct sg_table *sgt, u32 flags)
458{ 459{
459 int err; 460 int err;
460 unsigned int i, j; 461 unsigned int i, j;
461 struct scatterlist *sg; 462 struct scatterlist *sg;
462 u32 da = new->da_start; 463 u32 da = new->da_start;
464 int order;
463 465
464 if (!obj || !sgt) 466 if (!domain || !sgt)
465 return -EINVAL; 467 return -EINVAL;
466 468
467 BUG_ON(!sgtable_ok(sgt)); 469 BUG_ON(!sgtable_ok(sgt));
468 470
469 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 471 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
470 u32 pa; 472 u32 pa;
471 int pgsz;
472 size_t bytes; 473 size_t bytes;
473 struct iotlb_entry e;
474 474
475 pa = sg_phys(sg); 475 pa = sg_phys(sg);
476 bytes = sg->length; 476 bytes = sg->length;
477 477
478 flags &= ~IOVMF_PGSZ_MASK; 478 flags &= ~IOVMF_PGSZ_MASK;
479 pgsz = bytes_to_iopgsz(bytes); 479
480 if (pgsz < 0) 480 if (bytes_to_iopgsz(bytes) < 0)
481 goto err_out; 481 goto err_out;
482 flags |= pgsz; 482
483 order = get_order(bytes);
483 484
484 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, 485 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
485 i, da, pa, bytes); 486 i, da, pa, bytes);
486 487
487 iotlb_init_entry(&e, da, pa, flags); 488 err = iommu_map(domain, da, pa, order, flags);
488 err = iopgtable_store_entry(obj, &e);
489 if (err) 489 if (err)
490 goto err_out; 490 goto err_out;
491 491
@@ -499,9 +499,11 @@ err_out:
499 for_each_sg(sgt->sgl, sg, i, j) { 499 for_each_sg(sgt->sgl, sg, i, j) {
500 size_t bytes; 500 size_t bytes;
501 501
502 bytes = iopgtable_clear_entry(obj, da); 502 bytes = sg->length;
503 order = get_order(bytes);
503 504
504 BUG_ON(!iopgsz_ok(bytes)); 505 /* ignore failures.. we're already handling one */
506 iommu_unmap(domain, da, order);
505 507
506 da += bytes; 508 da += bytes;
507 } 509 }
@@ -509,22 +511,31 @@ err_out:
509} 511}
510 512
511/* release 'da' <-> 'pa' mapping */ 513/* release 'da' <-> 'pa' mapping */
512static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) 514static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
515 struct iovm_struct *area)
513{ 516{
514 u32 start; 517 u32 start;
515 size_t total = area->da_end - area->da_start; 518 size_t total = area->da_end - area->da_start;
519 const struct sg_table *sgt = area->sgt;
520 struct scatterlist *sg;
521 int i, err;
516 522
523 BUG_ON(!sgtable_ok(sgt));
517 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); 524 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
518 525
519 start = area->da_start; 526 start = area->da_start;
520 while (total > 0) { 527 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
521 size_t bytes; 528 size_t bytes;
529 int order;
530
531 bytes = sg->length;
532 order = get_order(bytes);
533
534 err = iommu_unmap(domain, start, order);
535 if (err)
536 break;
522 537
523 bytes = iopgtable_clear_entry(obj, start); 538 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
524 if (bytes == 0)
525 bytes = PAGE_SIZE;
526 else
527 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
528 __func__, start, bytes, area->flags); 539 __func__, start, bytes, area->flags);
529 540
530 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); 541 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
@@ -536,7 +547,8 @@ static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
536} 547}
537 548
538/* template function for all unmapping */ 549/* template function for all unmapping */
539static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, 550static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
551 struct iommu *obj, const u32 da,
540 void (*fn)(const void *), u32 flags) 552 void (*fn)(const void *), u32 flags)
541{ 553{
542 struct sg_table *sgt = NULL; 554 struct sg_table *sgt = NULL;
@@ -562,7 +574,7 @@ static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
562 } 574 }
563 sgt = (struct sg_table *)area->sgt; 575 sgt = (struct sg_table *)area->sgt;
564 576
565 unmap_iovm_area(obj, area); 577 unmap_iovm_area(domain, obj, area);
566 578
567 fn(area->va); 579 fn(area->va);
568 580
@@ -577,8 +589,9 @@ out:
577 return sgt; 589 return sgt;
578} 590}
579 591
580static u32 map_iommu_region(struct iommu *obj, u32 da, 592static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj,
581 const struct sg_table *sgt, void *va, size_t bytes, u32 flags) 593 u32 da, const struct sg_table *sgt, void *va,
594 size_t bytes, u32 flags)
582{ 595{
583 int err = -ENOMEM; 596 int err = -ENOMEM;
584 struct iovm_struct *new; 597 struct iovm_struct *new;
@@ -593,7 +606,7 @@ static u32 map_iommu_region(struct iommu *obj, u32 da,
593 new->va = va; 606 new->va = va;
594 new->sgt = sgt; 607 new->sgt = sgt;
595 608
596 if (map_iovm_area(obj, new, sgt, new->flags)) 609 if (map_iovm_area(domain, new, sgt, new->flags))
597 goto err_map; 610 goto err_map;
598 611
599 mutex_unlock(&obj->mmap_lock); 612 mutex_unlock(&obj->mmap_lock);
@@ -610,10 +623,11 @@ err_alloc_iovma:
610 return err; 623 return err;
611} 624}
612 625
613static inline u32 __iommu_vmap(struct iommu *obj, u32 da, 626static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
614 const struct sg_table *sgt, void *va, size_t bytes, u32 flags) 627 u32 da, const struct sg_table *sgt,
628 void *va, size_t bytes, u32 flags)
615{ 629{
616 return map_iommu_region(obj, da, sgt, va, bytes, flags); 630 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
617} 631}
618 632
619/** 633/**
@@ -625,8 +639,8 @@ static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
625 * Creates 1-n-1 mapping with given @sgt and returns @da. 639 * Creates 1-n-1 mapping with given @sgt and returns @da.
626 * All @sgt element must be io page size aligned. 640 * All @sgt element must be io page size aligned.
627 */ 641 */
628u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, 642u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
629 u32 flags) 643 const struct sg_table *sgt, u32 flags)
630{ 644{
631 size_t bytes; 645 size_t bytes;
632 void *va = NULL; 646 void *va = NULL;
@@ -648,7 +662,7 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
648 flags |= IOVMF_DISCONT; 662 flags |= IOVMF_DISCONT;
649 flags |= IOVMF_MMIO; 663 flags |= IOVMF_MMIO;
650 664
651 da = __iommu_vmap(obj, da, sgt, va, bytes, flags); 665 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
652 if (IS_ERR_VALUE(da)) 666 if (IS_ERR_VALUE(da))
653 vunmap_sg(va); 667 vunmap_sg(va);
654 668
@@ -664,14 +678,16 @@ EXPORT_SYMBOL_GPL(iommu_vmap);
664 * Free the iommu virtually contiguous memory area starting at 678 * Free the iommu virtually contiguous memory area starting at
665 * @da, which was returned by 'iommu_vmap()'. 679 * @da, which was returned by 'iommu_vmap()'.
666 */ 680 */
667struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) 681struct sg_table *
682iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
668{ 683{
669 struct sg_table *sgt; 684 struct sg_table *sgt;
670 /* 685 /*
671 * 'sgt' is allocated before 'iommu_vmalloc()' is called. 686 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
672 * Just returns 'sgt' to the caller to free 687 * Just returns 'sgt' to the caller to free
673 */ 688 */
674 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); 689 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
690 IOVMF_DISCONT | IOVMF_MMIO);
675 if (!sgt) 691 if (!sgt)
676 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 692 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
677 return sgt; 693 return sgt;
@@ -688,7 +704,8 @@ EXPORT_SYMBOL_GPL(iommu_vunmap);
688 * Allocate @bytes linearly and creates 1-n-1 mapping and returns 704 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
689 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. 705 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
690 */ 706 */
691u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) 707u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
708 size_t bytes, u32 flags)
692{ 709{
693 void *va; 710 void *va;
694 struct sg_table *sgt; 711 struct sg_table *sgt;
@@ -712,7 +729,7 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
712 } 729 }
713 sgtable_fill_vmalloc(sgt, va); 730 sgtable_fill_vmalloc(sgt, va);
714 731
715 da = __iommu_vmap(obj, da, sgt, va, bytes, flags); 732 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
716 if (IS_ERR_VALUE(da)) 733 if (IS_ERR_VALUE(da))
717 goto err_iommu_vmap; 734 goto err_iommu_vmap;
718 735
@@ -735,19 +752,20 @@ EXPORT_SYMBOL_GPL(iommu_vmalloc);
735 * Frees the iommu virtually continuous memory area starting at 752 * Frees the iommu virtually continuous memory area starting at
736 * @da, as obtained from 'iommu_vmalloc()'. 753 * @da, as obtained from 'iommu_vmalloc()'.
737 */ 754 */
738void iommu_vfree(struct iommu *obj, const u32 da) 755void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
739{ 756{
740 struct sg_table *sgt; 757 struct sg_table *sgt;
741 758
742 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); 759 sgt = unmap_vm_area(domain, obj, da, vfree,
760 IOVMF_DISCONT | IOVMF_ALLOC);
743 if (!sgt) 761 if (!sgt)
744 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 762 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
745 sgtable_free(sgt); 763 sgtable_free(sgt);
746} 764}
747EXPORT_SYMBOL_GPL(iommu_vfree); 765EXPORT_SYMBOL_GPL(iommu_vfree);
748 766
749static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, 767static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
750 size_t bytes, u32 flags) 768 u32 da, u32 pa, void *va, size_t bytes, u32 flags)
751{ 769{
752 struct sg_table *sgt; 770 struct sg_table *sgt;
753 771
@@ -757,7 +775,7 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
757 775
758 sgtable_fill_kmalloc(sgt, pa, da, bytes); 776 sgtable_fill_kmalloc(sgt, pa, da, bytes);
759 777
760 da = map_iommu_region(obj, da, sgt, va, bytes, flags); 778 da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
761 if (IS_ERR_VALUE(da)) { 779 if (IS_ERR_VALUE(da)) {
762 sgtable_drain_kmalloc(sgt); 780 sgtable_drain_kmalloc(sgt);
763 sgtable_free(sgt); 781 sgtable_free(sgt);
@@ -776,8 +794,8 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
776 * Creates 1-1-1 mapping and returns @da again, which can be 794 * Creates 1-1-1 mapping and returns @da again, which can be
777 * adjusted if 'IOVMF_DA_FIXED' is not set. 795 * adjusted if 'IOVMF_DA_FIXED' is not set.
778 */ 796 */
779u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, 797u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
780 u32 flags) 798 size_t bytes, u32 flags)
781{ 799{
782 void *va; 800 void *va;
783 801
@@ -793,7 +811,7 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
793 flags |= IOVMF_LINEAR; 811 flags |= IOVMF_LINEAR;
794 flags |= IOVMF_MMIO; 812 flags |= IOVMF_MMIO;
795 813
796 da = __iommu_kmap(obj, da, pa, va, bytes, flags); 814 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
797 if (IS_ERR_VALUE(da)) 815 if (IS_ERR_VALUE(da))
798 iounmap(va); 816 iounmap(va);
799 817
@@ -809,12 +827,12 @@ EXPORT_SYMBOL_GPL(iommu_kmap);
809 * Frees the iommu virtually contiguous memory area starting at 827 * Frees the iommu virtually contiguous memory area starting at
810 * @da, which was passed to and was returned by'iommu_kmap()'. 828 * @da, which was passed to and was returned by'iommu_kmap()'.
811 */ 829 */
812void iommu_kunmap(struct iommu *obj, u32 da) 830void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
813{ 831{
814 struct sg_table *sgt; 832 struct sg_table *sgt;
815 typedef void (*func_t)(const void *); 833 typedef void (*func_t)(const void *);
816 834
817 sgt = unmap_vm_area(obj, da, (func_t)iounmap, 835 sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
818 IOVMF_LINEAR | IOVMF_MMIO); 836 IOVMF_LINEAR | IOVMF_MMIO);
819 if (!sgt) 837 if (!sgt)
820 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 838 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
@@ -832,7 +850,8 @@ EXPORT_SYMBOL_GPL(iommu_kunmap);
832 * Allocate @bytes linearly and creates 1-1-1 mapping and returns 850 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
833 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. 851 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
834 */ 852 */
835u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) 853u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
854 size_t bytes, u32 flags)
836{ 855{
837 void *va; 856 void *va;
838 u32 pa; 857 u32 pa;
@@ -850,7 +869,7 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
850 flags |= IOVMF_LINEAR; 869 flags |= IOVMF_LINEAR;
851 flags |= IOVMF_ALLOC; 870 flags |= IOVMF_ALLOC;
852 871
853 da = __iommu_kmap(obj, da, pa, va, bytes, flags); 872 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
854 if (IS_ERR_VALUE(da)) 873 if (IS_ERR_VALUE(da))
855 kfree(va); 874 kfree(va);
856 875
@@ -866,11 +885,11 @@ EXPORT_SYMBOL_GPL(iommu_kmalloc);
866 * Frees the iommu virtually contiguous memory area starting at 885 * Frees the iommu virtually contiguous memory area starting at
867 * @da, which was passed to and was returned by'iommu_kmalloc()'. 886 * @da, which was passed to and was returned by'iommu_kmalloc()'.
868 */ 887 */
869void iommu_kfree(struct iommu *obj, u32 da) 888void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
870{ 889{
871 struct sg_table *sgt; 890 struct sg_table *sgt;
872 891
873 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); 892 sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
874 if (!sgt) 893 if (!sgt)
875 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 894 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
876 sgtable_free(sgt); 895 sgtable_free(sgt);