aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-06-01 18:46:12 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-08-26 05:46:00 -0400
commitf626b52d4a568d4315cd152123ef2d1ea406def2 (patch)
tree3592dfe99e9fd2713609cef9fbb8d0dfc8653a52
parentfcb8ce5cfe30ca9ca5c9a79cdfe26d1993e65e0c (diff)
omap: iommu: migrate to the generic IOMMU API
Migrate OMAP's iommu driver to the generic IOMMU API, so users can stay generic, and any generic IOMMU functionality can be developed once in the generic framework. Migrate omap's iovmm (virtual memory manager) to the generic IOMMU API, and adapt omap3isp as needed, so the latter won't break. The plan is to eventually remove iovmm completely by replacing it with the (upcoming) IOMMU-based DMA-API. Tested on OMAP3 (with omap3isp) and OMAP4 (with rpmsg/remoteproc). Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> Acked-by: Tony Lindgren <tony@atomide.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/arm/plat-omap/Kconfig4
-rw-r--r--arch/arm/plat-omap/include/plat/iommu.h5
-rw-r--r--arch/arm/plat-omap/include/plat/iovmm.h27
-rw-r--r--arch/arm/plat-omap/iommu.c308
-rw-r--r--arch/arm/plat-omap/iopgtable.h18
-rw-r--r--arch/arm/plat-omap/iovmm.c115
-rw-r--r--drivers/media/video/omap3isp/isp.c41
-rw-r--r--drivers/media/video/omap3isp/isp.h3
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c16
-rw-r--r--drivers/media/video/omap3isp/ispstat.c6
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c4
11 files changed, 422 insertions, 125 deletions
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index bb8f4a6b3e3..e1e954d7486 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -132,8 +132,10 @@ config OMAP_MBOX_KFIFO_SIZE
132 This can also be changed at runtime (via the mbox_kfifo_size 132 This can also be changed at runtime (via the mbox_kfifo_size
133 module parameter). 133 module parameter).
134 134
135#can't be tristate; iommu api doesn't support un-registration
135config OMAP_IOMMU 136config OMAP_IOMMU
136 tristate 137 bool
138 select IOMMU_API
137 139
138config OMAP_IOMMU_DEBUG 140config OMAP_IOMMU_DEBUG
139 tristate "Export OMAP IOMMU internals in DebugFS" 141 tristate "Export OMAP IOMMU internals in DebugFS"
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index 174f1b9c8c0..dcb757b87fc 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -34,7 +34,7 @@ struct iommu {
34 void *isr_priv; 34 void *isr_priv;
35 35
36 unsigned int refcount; 36 unsigned int refcount;
37 struct mutex iommu_lock; /* global for this whole object */ 37 spinlock_t iommu_lock; /* global for this whole object */
38 38
39 /* 39 /*
40 * We don't change iopgd for a situation like pgd for a task, 40 * We don't change iopgd for a situation like pgd for a task,
@@ -167,8 +167,6 @@ extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd,
167extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); 167extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
168 168
169extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end); 169extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end);
170extern struct iommu *iommu_get(const char *name);
171extern void iommu_put(struct iommu *obj);
172extern int iommu_set_isr(const char *name, 170extern int iommu_set_isr(const char *name,
173 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, 171 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
174 void *priv), 172 void *priv),
@@ -185,5 +183,6 @@ extern int foreach_iommu_device(void *data,
185 183
186extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); 184extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len);
187extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); 185extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len);
186struct device *omap_find_iommu_device(const char *name);
188 187
189#endif /* __MACH_IOMMU_H */ 188#endif /* __MACH_IOMMU_H */
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h
index e992b9655fb..e2f0b38a026 100644
--- a/arch/arm/plat-omap/include/plat/iovmm.h
+++ b/arch/arm/plat-omap/include/plat/iovmm.h
@@ -13,6 +13,8 @@
13#ifndef __IOMMU_MMAP_H 13#ifndef __IOMMU_MMAP_H
14#define __IOMMU_MMAP_H 14#define __IOMMU_MMAP_H
15 15
16#include <linux/iommu.h>
17
16struct iovm_struct { 18struct iovm_struct {
17 struct iommu *iommu; /* iommu object which this belongs to */ 19 struct iommu *iommu; /* iommu object which this belongs to */
18 u32 da_start; /* area definition */ 20 u32 da_start; /* area definition */
@@ -71,18 +73,21 @@ struct iovm_struct {
71 73
72 74
73extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); 75extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da);
74extern u32 iommu_vmap(struct iommu *obj, u32 da, 76extern u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
75 const struct sg_table *sgt, u32 flags); 77 const struct sg_table *sgt, u32 flags);
76extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da); 78extern struct sg_table *iommu_vunmap(struct iommu_domain *domain,
77extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, 79 struct iommu *obj, u32 da);
78 u32 flags); 80extern u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj,
79extern void iommu_vfree(struct iommu *obj, const u32 da); 81 u32 da, size_t bytes, u32 flags);
80extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, 82extern void iommu_vfree(struct iommu_domain *domain, struct iommu *obj,
81 u32 flags); 83 const u32 da);
82extern void iommu_kunmap(struct iommu *obj, u32 da); 84extern u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
83extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, 85 u32 pa, size_t bytes, u32 flags);
84 u32 flags); 86extern void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj,
85extern void iommu_kfree(struct iommu *obj, u32 da); 87 u32 da);
88extern u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj,
89 u32 da, size_t bytes, u32 flags);
90extern void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da);
86 91
87extern void *da_to_va(struct iommu *obj, u32 da); 92extern void *da_to_va(struct iommu *obj, u32 da);
88 93
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 34fc31ee908..51aa008d822 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -18,6 +18,9 @@
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/iommu.h>
22#include <linux/mutex.h>
23#include <linux/spinlock.h>
21 24
22#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
23 26
@@ -30,6 +33,19 @@
30 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ 33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
31 __i++) 34 __i++)
32 35
36/**
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
42 */
43struct omap_iommu_domain {
44 u32 *pgtable;
45 struct iommu *iommu_dev;
46 spinlock_t lock;
47};
48
33/* accommodate the difference between omap1 and omap2/3 */ 49/* accommodate the difference between omap1 and omap2/3 */
34static const struct iommu_functions *arch_iommu; 50static const struct iommu_functions *arch_iommu;
35 51
@@ -852,35 +868,55 @@ int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
852EXPORT_SYMBOL_GPL(iommu_set_da_range); 868EXPORT_SYMBOL_GPL(iommu_set_da_range);
853 869
854/** 870/**
855 * iommu_get - Get iommu handler 871 * omap_find_iommu_device() - find an omap iommu device by name
856 * @name: target iommu name 872 * @name: name of the iommu device
873 *
874 * The generic iommu API requires the caller to provide the device
875 * he wishes to attach to a certain iommu domain.
876 *
877 * Drivers generally should not bother with this as it should just
878 * be taken care of by the DMA-API using dev_archdata.
879 *
880 * This function is provided as an interim solution until the latter
881 * materializes, and omap3isp is fully migrated to the DMA-API.
882 */
883struct device *omap_find_iommu_device(const char *name)
884{
885 return driver_find_device(&omap_iommu_driver.driver, NULL,
886 (void *)name,
887 device_match_by_alias);
888}
889EXPORT_SYMBOL_GPL(omap_find_iommu_device);
890
891/**
892 * omap_iommu_attach() - attach iommu device to an iommu domain
893 * @dev: target omap iommu device
894 * @iopgd: page table
857 **/ 895 **/
858struct iommu *iommu_get(const char *name) 896static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
859{ 897{
860 int err = -ENOMEM; 898 int err = -ENOMEM;
861 struct device *dev; 899 struct iommu *obj = to_iommu(dev);
862 struct iommu *obj;
863
864 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
865 device_match_by_alias);
866 if (!dev)
867 return ERR_PTR(-ENODEV);
868
869 obj = to_iommu(dev);
870 900
871 mutex_lock(&obj->iommu_lock); 901 spin_lock(&obj->iommu_lock);
872 902
873 if (obj->refcount++ == 0) { 903 /* an iommu device can only be attached once */
874 err = iommu_enable(obj); 904 if (++obj->refcount > 1) {
875 if (err) 905 dev_err(dev, "%s: already attached!\n", obj->name);
876 goto err_enable; 906 err = -EBUSY;
877 flush_iotlb_all(obj); 907 goto err_enable;
878 } 908 }
879 909
910 obj->iopgd = iopgd;
911 err = iommu_enable(obj);
912 if (err)
913 goto err_enable;
914 flush_iotlb_all(obj);
915
880 if (!try_module_get(obj->owner)) 916 if (!try_module_get(obj->owner))
881 goto err_module; 917 goto err_module;
882 918
883 mutex_unlock(&obj->iommu_lock); 919 spin_unlock(&obj->iommu_lock);
884 920
885 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 921 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
886 return obj; 922 return obj;
@@ -890,32 +926,32 @@ err_module:
890 iommu_disable(obj); 926 iommu_disable(obj);
891err_enable: 927err_enable:
892 obj->refcount--; 928 obj->refcount--;
893 mutex_unlock(&obj->iommu_lock); 929 spin_unlock(&obj->iommu_lock);
894 return ERR_PTR(err); 930 return ERR_PTR(err);
895} 931}
896EXPORT_SYMBOL_GPL(iommu_get);
897 932
898/** 933/**
899 * iommu_put - Put back iommu handler 934 * omap_iommu_detach - release iommu device
900 * @obj: target iommu 935 * @obj: target iommu
901 **/ 936 **/
902void iommu_put(struct iommu *obj) 937static void omap_iommu_detach(struct iommu *obj)
903{ 938{
904 if (!obj || IS_ERR(obj)) 939 if (!obj || IS_ERR(obj))
905 return; 940 return;
906 941
907 mutex_lock(&obj->iommu_lock); 942 spin_lock(&obj->iommu_lock);
908 943
909 if (--obj->refcount == 0) 944 if (--obj->refcount == 0)
910 iommu_disable(obj); 945 iommu_disable(obj);
911 946
912 module_put(obj->owner); 947 module_put(obj->owner);
913 948
914 mutex_unlock(&obj->iommu_lock); 949 obj->iopgd = NULL;
950
951 spin_unlock(&obj->iommu_lock);
915 952
916 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 953 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
917} 954}
918EXPORT_SYMBOL_GPL(iommu_put);
919 955
920int iommu_set_isr(const char *name, 956int iommu_set_isr(const char *name,
921 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, 957 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
@@ -950,7 +986,6 @@ EXPORT_SYMBOL_GPL(iommu_set_isr);
950static int __devinit omap_iommu_probe(struct platform_device *pdev) 986static int __devinit omap_iommu_probe(struct platform_device *pdev)
951{ 987{
952 int err = -ENODEV; 988 int err = -ENODEV;
953 void *p;
954 int irq; 989 int irq;
955 struct iommu *obj; 990 struct iommu *obj;
956 struct resource *res; 991 struct resource *res;
@@ -974,7 +1009,7 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
974 obj->da_start = pdata->da_start; 1009 obj->da_start = pdata->da_start;
975 obj->da_end = pdata->da_end; 1010 obj->da_end = pdata->da_end;
976 1011
977 mutex_init(&obj->iommu_lock); 1012 spin_lock_init(&obj->iommu_lock);
978 mutex_init(&obj->mmap_lock); 1013 mutex_init(&obj->mmap_lock);
979 spin_lock_init(&obj->page_table_lock); 1014 spin_lock_init(&obj->page_table_lock);
980 INIT_LIST_HEAD(&obj->mmap); 1015 INIT_LIST_HEAD(&obj->mmap);
@@ -1009,22 +1044,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
1009 goto err_irq; 1044 goto err_irq;
1010 platform_set_drvdata(pdev, obj); 1045 platform_set_drvdata(pdev, obj);
1011 1046
1012 p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
1013 if (!p) {
1014 err = -ENOMEM;
1015 goto err_pgd;
1016 }
1017 memset(p, 0, IOPGD_TABLE_SIZE);
1018 clean_dcache_area(p, IOPGD_TABLE_SIZE);
1019 obj->iopgd = p;
1020
1021 BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
1022
1023 dev_info(&pdev->dev, "%s registered\n", obj->name); 1047 dev_info(&pdev->dev, "%s registered\n", obj->name);
1024 return 0; 1048 return 0;
1025 1049
1026err_pgd:
1027 free_irq(irq, obj);
1028err_irq: 1050err_irq:
1029 iounmap(obj->regbase); 1051 iounmap(obj->regbase);
1030err_ioremap: 1052err_ioremap:
@@ -1045,7 +1067,6 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
1045 platform_set_drvdata(pdev, NULL); 1067 platform_set_drvdata(pdev, NULL);
1046 1068
1047 iopgtable_clear_entry_all(obj); 1069 iopgtable_clear_entry_all(obj);
1048 free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
1049 1070
1050 irq = platform_get_irq(pdev, 0); 1071 irq = platform_get_irq(pdev, 0);
1051 free_irq(irq, obj); 1072 free_irq(irq, obj);
@@ -1072,6 +1093,207 @@ static void iopte_cachep_ctor(void *iopte)
1072 clean_dcache_area(iopte, IOPTE_TABLE_SIZE); 1093 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1073} 1094}
1074 1095
1096static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1097 phys_addr_t pa, int order, int prot)
1098{
1099 struct omap_iommu_domain *omap_domain = domain->priv;
1100 struct iommu *oiommu = omap_domain->iommu_dev;
1101 struct device *dev = oiommu->dev;
1102 size_t bytes = PAGE_SIZE << order;
1103 struct iotlb_entry e;
1104 int omap_pgsz;
1105 u32 ret, flags;
1106
1107 /* we only support mapping a single iommu page for now */
1108 omap_pgsz = bytes_to_iopgsz(bytes);
1109 if (omap_pgsz < 0) {
1110 dev_err(dev, "invalid size to map: %d\n", bytes);
1111 return -EINVAL;
1112 }
1113
1114 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1115
1116 flags = omap_pgsz | prot;
1117
1118 iotlb_init_entry(&e, da, pa, flags);
1119
1120 ret = iopgtable_store_entry(oiommu, &e);
1121 if (ret) {
1122 dev_err(dev, "iopgtable_store_entry failed: %d\n", ret);
1123 return ret;
1124 }
1125
1126 return 0;
1127}
1128
1129static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1130 int order)
1131{
1132 struct omap_iommu_domain *omap_domain = domain->priv;
1133 struct iommu *oiommu = omap_domain->iommu_dev;
1134 struct device *dev = oiommu->dev;
1135 size_t bytes = PAGE_SIZE << order;
1136 size_t ret;
1137
1138 dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes);
1139
1140 ret = iopgtable_clear_entry(oiommu, da);
1141 if (ret != bytes) {
1142 dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes);
1143 return -EINVAL;
1144 }
1145
1146 return 0;
1147}
1148
1149static int
1150omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1151{
1152 struct omap_iommu_domain *omap_domain = domain->priv;
1153 struct iommu *oiommu;
1154 int ret = 0;
1155
1156 spin_lock(&omap_domain->lock);
1157
1158 /* only a single device is supported per domain for now */
1159 if (omap_domain->iommu_dev) {
1160 dev_err(dev, "iommu domain is already attached\n");
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164
1165 /* get a handle to and enable the omap iommu */
1166 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1167 if (IS_ERR(oiommu)) {
1168 ret = PTR_ERR(oiommu);
1169 dev_err(dev, "can't get omap iommu: %d\n", ret);
1170 goto out;
1171 }
1172
1173 omap_domain->iommu_dev = oiommu;
1174
1175out:
1176 spin_unlock(&omap_domain->lock);
1177 return ret;
1178}
1179
1180static void omap_iommu_detach_dev(struct iommu_domain *domain,
1181 struct device *dev)
1182{
1183 struct omap_iommu_domain *omap_domain = domain->priv;
1184 struct iommu *oiommu = to_iommu(dev);
1185
1186 spin_lock(&omap_domain->lock);
1187
1188 /* only a single device is supported per domain for now */
1189 if (omap_domain->iommu_dev != oiommu) {
1190 dev_err(dev, "invalid iommu device\n");
1191 goto out;
1192 }
1193
1194 iopgtable_clear_entry_all(oiommu);
1195
1196 omap_iommu_detach(oiommu);
1197
1198 omap_domain->iommu_dev = NULL;
1199
1200out:
1201 spin_unlock(&omap_domain->lock);
1202}
1203
1204static int omap_iommu_domain_init(struct iommu_domain *domain)
1205{
1206 struct omap_iommu_domain *omap_domain;
1207
1208 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1209 if (!omap_domain) {
1210 pr_err("kzalloc failed\n");
1211 goto out;
1212 }
1213
1214 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1215 if (!omap_domain->pgtable) {
1216 pr_err("kzalloc failed\n");
1217 goto fail_nomem;
1218 }
1219
1220 /*
1221 * should never fail, but please keep this around to ensure
1222 * we keep the hardware happy
1223 */
1224 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1225
1226 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1227 spin_lock_init(&omap_domain->lock);
1228
1229 domain->priv = omap_domain;
1230
1231 return 0;
1232
1233fail_nomem:
1234 kfree(omap_domain);
1235out:
1236 return -ENOMEM;
1237}
1238
1239/* assume device was already detached */
1240static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1241{
1242 struct omap_iommu_domain *omap_domain = domain->priv;
1243
1244 domain->priv = NULL;
1245
1246 kfree(omap_domain->pgtable);
1247 kfree(omap_domain);
1248}
1249
1250static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1251 unsigned long da)
1252{
1253 struct omap_iommu_domain *omap_domain = domain->priv;
1254 struct iommu *oiommu = omap_domain->iommu_dev;
1255 struct device *dev = oiommu->dev;
1256 u32 *pgd, *pte;
1257 phys_addr_t ret = 0;
1258
1259 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1260
1261 if (pte) {
1262 if (iopte_is_small(*pte))
1263 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1264 else if (iopte_is_large(*pte))
1265 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1266 else
1267 dev_err(dev, "bogus pte 0x%x", *pte);
1268 } else {
1269 if (iopgd_is_section(*pgd))
1270 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1271 else if (iopgd_is_super(*pgd))
1272 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1273 else
1274 dev_err(dev, "bogus pgd 0x%x", *pgd);
1275 }
1276
1277 return ret;
1278}
1279
1280static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1281 unsigned long cap)
1282{
1283 return 0;
1284}
1285
1286static struct iommu_ops omap_iommu_ops = {
1287 .domain_init = omap_iommu_domain_init,
1288 .domain_destroy = omap_iommu_domain_destroy,
1289 .attach_dev = omap_iommu_attach_dev,
1290 .detach_dev = omap_iommu_detach_dev,
1291 .map = omap_iommu_map,
1292 .unmap = omap_iommu_unmap,
1293 .iova_to_phys = omap_iommu_iova_to_phys,
1294 .domain_has_cap = omap_iommu_domain_has_cap,
1295};
1296
1075static int __init omap_iommu_init(void) 1297static int __init omap_iommu_init(void)
1076{ 1298{
1077 struct kmem_cache *p; 1299 struct kmem_cache *p;
@@ -1084,6 +1306,8 @@ static int __init omap_iommu_init(void)
1084 return -ENOMEM; 1306 return -ENOMEM;
1085 iopte_cachep = p; 1307 iopte_cachep = p;
1086 1308
1309 register_iommu(&omap_iommu_ops);
1310
1087 return platform_driver_register(&omap_iommu_driver); 1311 return platform_driver_register(&omap_iommu_driver);
1088} 1312}
1089module_init(omap_iommu_init); 1313module_init(omap_iommu_init);
diff --git a/arch/arm/plat-omap/iopgtable.h b/arch/arm/plat-omap/iopgtable.h
index c3e93bb0911..33c7aa986f5 100644
--- a/arch/arm/plat-omap/iopgtable.h
+++ b/arch/arm/plat-omap/iopgtable.h
@@ -56,6 +56,19 @@
56 56
57#define IOPAGE_MASK IOPTE_MASK 57#define IOPAGE_MASK IOPTE_MASK
58 58
59/**
60 * omap_iommu_translate() - va to pa translation
61 * @d: omap iommu descriptor
62 * @va: virtual address
63 * @mask: omap iommu descriptor mask
64 *
65 * va to pa translation
66 */
67static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
68{
69 return (d & mask) | (va & (~mask));
70}
71
59/* 72/*
60 * some descriptor attributes. 73 * some descriptor attributes.
61 */ 74 */
@@ -64,10 +77,15 @@
64#define IOPGD_SUPER (1 << 18 | 2 << 0) 77#define IOPGD_SUPER (1 << 18 | 2 << 0)
65 78
66#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) 79#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
80#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
81#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
67 82
68#define IOPTE_SMALL (2 << 0) 83#define IOPTE_SMALL (2 << 0)
69#define IOPTE_LARGE (1 << 0) 84#define IOPTE_LARGE (1 << 0)
70 85
86#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
87#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
88
71/* to find an entry in a page-table-directory */ 89/* to find an entry in a page-table-directory */
72#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) 90#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1))
73#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) 91#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da))
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 79e7fedb860..aa2c47893b0 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -15,6 +15,7 @@
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/iommu.h>
18 19
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/mach/map.h> 21#include <asm/mach/map.h>
@@ -453,39 +454,38 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
453} 454}
454 455
455/* create 'da' <-> 'pa' mapping from 'sgt' */ 456/* create 'da' <-> 'pa' mapping from 'sgt' */
456static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, 457static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
457 const struct sg_table *sgt, u32 flags) 458 const struct sg_table *sgt, u32 flags)
458{ 459{
459 int err; 460 int err;
460 unsigned int i, j; 461 unsigned int i, j;
461 struct scatterlist *sg; 462 struct scatterlist *sg;
462 u32 da = new->da_start; 463 u32 da = new->da_start;
464 int order;
463 465
464 if (!obj || !sgt) 466 if (!domain || !sgt)
465 return -EINVAL; 467 return -EINVAL;
466 468
467 BUG_ON(!sgtable_ok(sgt)); 469 BUG_ON(!sgtable_ok(sgt));
468 470
469 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 471 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
470 u32 pa; 472 u32 pa;
471 int pgsz;
472 size_t bytes; 473 size_t bytes;
473 struct iotlb_entry e;
474 474
475 pa = sg_phys(sg); 475 pa = sg_phys(sg);
476 bytes = sg->length; 476 bytes = sg->length;
477 477
478 flags &= ~IOVMF_PGSZ_MASK; 478 flags &= ~IOVMF_PGSZ_MASK;
479 pgsz = bytes_to_iopgsz(bytes); 479
480 if (pgsz < 0) 480 if (bytes_to_iopgsz(bytes) < 0)
481 goto err_out; 481 goto err_out;
482 flags |= pgsz; 482
483 order = get_order(bytes);
483 484
484 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, 485 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
485 i, da, pa, bytes); 486 i, da, pa, bytes);
486 487
487 iotlb_init_entry(&e, da, pa, flags); 488 err = iommu_map(domain, da, pa, order, flags);
488 err = iopgtable_store_entry(obj, &e);
489 if (err) 489 if (err)
490 goto err_out; 490 goto err_out;
491 491
@@ -499,9 +499,11 @@ err_out:
499 for_each_sg(sgt->sgl, sg, i, j) { 499 for_each_sg(sgt->sgl, sg, i, j) {
500 size_t bytes; 500 size_t bytes;
501 501
502 bytes = iopgtable_clear_entry(obj, da); 502 bytes = sg->length;
503 order = get_order(bytes);
503 504
504 BUG_ON(!iopgsz_ok(bytes)); 505 /* ignore failures.. we're already handling one */
506 iommu_unmap(domain, da, order);
505 507
506 da += bytes; 508 da += bytes;
507 } 509 }
@@ -509,22 +511,31 @@ err_out:
509} 511}
510 512
511/* release 'da' <-> 'pa' mapping */ 513/* release 'da' <-> 'pa' mapping */
512static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) 514static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
515 struct iovm_struct *area)
513{ 516{
514 u32 start; 517 u32 start;
515 size_t total = area->da_end - area->da_start; 518 size_t total = area->da_end - area->da_start;
519 const struct sg_table *sgt = area->sgt;
520 struct scatterlist *sg;
521 int i, err;
516 522
523 BUG_ON(!sgtable_ok(sgt));
517 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); 524 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
518 525
519 start = area->da_start; 526 start = area->da_start;
520 while (total > 0) { 527 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
521 size_t bytes; 528 size_t bytes;
529 int order;
530
531 bytes = sg->length;
532 order = get_order(bytes);
533
534 err = iommu_unmap(domain, start, order);
535 if (err)
536 break;
522 537
523 bytes = iopgtable_clear_entry(obj, start); 538 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
524 if (bytes == 0)
525 bytes = PAGE_SIZE;
526 else
527 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
528 __func__, start, bytes, area->flags); 539 __func__, start, bytes, area->flags);
529 540
530 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); 541 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
@@ -536,7 +547,8 @@ static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
536} 547}
537 548
538/* template function for all unmapping */ 549/* template function for all unmapping */
539static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, 550static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
551 struct iommu *obj, const u32 da,
540 void (*fn)(const void *), u32 flags) 552 void (*fn)(const void *), u32 flags)
541{ 553{
542 struct sg_table *sgt = NULL; 554 struct sg_table *sgt = NULL;
@@ -562,7 +574,7 @@ static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
562 } 574 }
563 sgt = (struct sg_table *)area->sgt; 575 sgt = (struct sg_table *)area->sgt;
564 576
565 unmap_iovm_area(obj, area); 577 unmap_iovm_area(domain, obj, area);
566 578
567 fn(area->va); 579 fn(area->va);
568 580
@@ -577,8 +589,9 @@ out:
577 return sgt; 589 return sgt;
578} 590}
579 591
580static u32 map_iommu_region(struct iommu *obj, u32 da, 592static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj,
581 const struct sg_table *sgt, void *va, size_t bytes, u32 flags) 593 u32 da, const struct sg_table *sgt, void *va,
594 size_t bytes, u32 flags)
582{ 595{
583 int err = -ENOMEM; 596 int err = -ENOMEM;
584 struct iovm_struct *new; 597 struct iovm_struct *new;
@@ -593,7 +606,7 @@ static u32 map_iommu_region(struct iommu *obj, u32 da,
593 new->va = va; 606 new->va = va;
594 new->sgt = sgt; 607 new->sgt = sgt;
595 608
596 if (map_iovm_area(obj, new, sgt, new->flags)) 609 if (map_iovm_area(domain, new, sgt, new->flags))
597 goto err_map; 610 goto err_map;
598 611
599 mutex_unlock(&obj->mmap_lock); 612 mutex_unlock(&obj->mmap_lock);
@@ -610,10 +623,11 @@ err_alloc_iovma:
610 return err; 623 return err;
611} 624}
612 625
613static inline u32 __iommu_vmap(struct iommu *obj, u32 da, 626static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
614 const struct sg_table *sgt, void *va, size_t bytes, u32 flags) 627 u32 da, const struct sg_table *sgt,
628 void *va, size_t bytes, u32 flags)
615{ 629{
616 return map_iommu_region(obj, da, sgt, va, bytes, flags); 630 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
617} 631}
618 632
619/** 633/**
@@ -625,8 +639,8 @@ static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
625 * Creates 1-n-1 mapping with given @sgt and returns @da. 639 * Creates 1-n-1 mapping with given @sgt and returns @da.
626 * All @sgt element must be io page size aligned. 640 * All @sgt element must be io page size aligned.
627 */ 641 */
628u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, 642u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
629 u32 flags) 643 const struct sg_table *sgt, u32 flags)
630{ 644{
631 size_t bytes; 645 size_t bytes;
632 void *va = NULL; 646 void *va = NULL;
@@ -648,7 +662,7 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
648 flags |= IOVMF_DISCONT; 662 flags |= IOVMF_DISCONT;
649 flags |= IOVMF_MMIO; 663 flags |= IOVMF_MMIO;
650 664
651 da = __iommu_vmap(obj, da, sgt, va, bytes, flags); 665 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
652 if (IS_ERR_VALUE(da)) 666 if (IS_ERR_VALUE(da))
653 vunmap_sg(va); 667 vunmap_sg(va);
654 668
@@ -664,14 +678,16 @@ EXPORT_SYMBOL_GPL(iommu_vmap);
664 * Free the iommu virtually contiguous memory area starting at 678 * Free the iommu virtually contiguous memory area starting at
665 * @da, which was returned by 'iommu_vmap()'. 679 * @da, which was returned by 'iommu_vmap()'.
666 */ 680 */
667struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) 681struct sg_table *
682iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
668{ 683{
669 struct sg_table *sgt; 684 struct sg_table *sgt;
670 /* 685 /*
671 * 'sgt' is allocated before 'iommu_vmalloc()' is called. 686 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
672 * Just returns 'sgt' to the caller to free 687 * Just returns 'sgt' to the caller to free
673 */ 688 */
674 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); 689 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
690 IOVMF_DISCONT | IOVMF_MMIO);
675 if (!sgt) 691 if (!sgt)
676 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 692 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
677 return sgt; 693 return sgt;
@@ -688,7 +704,8 @@ EXPORT_SYMBOL_GPL(iommu_vunmap);
688 * Allocate @bytes linearly and creates 1-n-1 mapping and returns 704 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
689 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. 705 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
690 */ 706 */
691u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) 707u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
708 size_t bytes, u32 flags)
692{ 709{
693 void *va; 710 void *va;
694 struct sg_table *sgt; 711 struct sg_table *sgt;
@@ -712,7 +729,7 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
712 } 729 }
713 sgtable_fill_vmalloc(sgt, va); 730 sgtable_fill_vmalloc(sgt, va);
714 731
715 da = __iommu_vmap(obj, da, sgt, va, bytes, flags); 732 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
716 if (IS_ERR_VALUE(da)) 733 if (IS_ERR_VALUE(da))
717 goto err_iommu_vmap; 734 goto err_iommu_vmap;
718 735
@@ -735,19 +752,20 @@ EXPORT_SYMBOL_GPL(iommu_vmalloc);
735 * Frees the iommu virtually continuous memory area starting at 752 * Frees the iommu virtually continuous memory area starting at
736 * @da, as obtained from 'iommu_vmalloc()'. 753 * @da, as obtained from 'iommu_vmalloc()'.
737 */ 754 */
738void iommu_vfree(struct iommu *obj, const u32 da) 755void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
739{ 756{
740 struct sg_table *sgt; 757 struct sg_table *sgt;
741 758
742 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); 759 sgt = unmap_vm_area(domain, obj, da, vfree,
760 IOVMF_DISCONT | IOVMF_ALLOC);
743 if (!sgt) 761 if (!sgt)
744 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 762 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
745 sgtable_free(sgt); 763 sgtable_free(sgt);
746} 764}
747EXPORT_SYMBOL_GPL(iommu_vfree); 765EXPORT_SYMBOL_GPL(iommu_vfree);
748 766
749static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, 767static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
750 size_t bytes, u32 flags) 768 u32 da, u32 pa, void *va, size_t bytes, u32 flags)
751{ 769{
752 struct sg_table *sgt; 770 struct sg_table *sgt;
753 771
@@ -757,7 +775,7 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
757 775
758 sgtable_fill_kmalloc(sgt, pa, da, bytes); 776 sgtable_fill_kmalloc(sgt, pa, da, bytes);
759 777
760 da = map_iommu_region(obj, da, sgt, va, bytes, flags); 778 da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
761 if (IS_ERR_VALUE(da)) { 779 if (IS_ERR_VALUE(da)) {
762 sgtable_drain_kmalloc(sgt); 780 sgtable_drain_kmalloc(sgt);
763 sgtable_free(sgt); 781 sgtable_free(sgt);
@@ -776,8 +794,8 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
776 * Creates 1-1-1 mapping and returns @da again, which can be 794 * Creates 1-1-1 mapping and returns @da again, which can be
777 * adjusted if 'IOVMF_DA_FIXED' is not set. 795 * adjusted if 'IOVMF_DA_FIXED' is not set.
778 */ 796 */
779u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, 797u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
780 u32 flags) 798 size_t bytes, u32 flags)
781{ 799{
782 void *va; 800 void *va;
783 801
@@ -793,7 +811,7 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
793 flags |= IOVMF_LINEAR; 811 flags |= IOVMF_LINEAR;
794 flags |= IOVMF_MMIO; 812 flags |= IOVMF_MMIO;
795 813
796 da = __iommu_kmap(obj, da, pa, va, bytes, flags); 814 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
797 if (IS_ERR_VALUE(da)) 815 if (IS_ERR_VALUE(da))
798 iounmap(va); 816 iounmap(va);
799 817
@@ -809,12 +827,12 @@ EXPORT_SYMBOL_GPL(iommu_kmap);
809 * Frees the iommu virtually contiguous memory area starting at 827 * Frees the iommu virtually contiguous memory area starting at
810 * @da, which was passed to and was returned by'iommu_kmap()'. 828 * @da, which was passed to and was returned by'iommu_kmap()'.
811 */ 829 */
812void iommu_kunmap(struct iommu *obj, u32 da) 830void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
813{ 831{
814 struct sg_table *sgt; 832 struct sg_table *sgt;
815 typedef void (*func_t)(const void *); 833 typedef void (*func_t)(const void *);
816 834
817 sgt = unmap_vm_area(obj, da, (func_t)iounmap, 835 sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
818 IOVMF_LINEAR | IOVMF_MMIO); 836 IOVMF_LINEAR | IOVMF_MMIO);
819 if (!sgt) 837 if (!sgt)
820 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 838 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
@@ -832,7 +850,8 @@ EXPORT_SYMBOL_GPL(iommu_kunmap);
832 * Allocate @bytes linearly and creates 1-1-1 mapping and returns 850 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
833 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. 851 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
834 */ 852 */
835u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) 853u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
854 size_t bytes, u32 flags)
836{ 855{
837 void *va; 856 void *va;
838 u32 pa; 857 u32 pa;
@@ -850,7 +869,7 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
850 flags |= IOVMF_LINEAR; 869 flags |= IOVMF_LINEAR;
851 flags |= IOVMF_ALLOC; 870 flags |= IOVMF_ALLOC;
852 871
853 da = __iommu_kmap(obj, da, pa, va, bytes, flags); 872 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
854 if (IS_ERR_VALUE(da)) 873 if (IS_ERR_VALUE(da))
855 kfree(va); 874 kfree(va);
856 875
@@ -866,11 +885,11 @@ EXPORT_SYMBOL_GPL(iommu_kmalloc);
866 * Frees the iommu virtually contiguous memory area starting at 885 * Frees the iommu virtually contiguous memory area starting at
867 * @da, which was passed to and was returned by'iommu_kmalloc()'. 886 * @da, which was passed to and was returned by'iommu_kmalloc()'.
868 */ 887 */
869void iommu_kfree(struct iommu *obj, u32 da) 888void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
870{ 889{
871 struct sg_table *sgt; 890 struct sg_table *sgt;
872 891
873 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); 892 sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
874 if (!sgt) 893 if (!sgt)
875 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 894 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
876 sgtable_free(sgt); 895 sgtable_free(sgt);
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 5cea2bbd701..0db45ac7489 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -80,6 +80,13 @@
80#include "isph3a.h" 80#include "isph3a.h"
81#include "isphist.h" 81#include "isphist.h"
82 82
83/*
84 * this is provided as an interim solution until omap3isp doesn't need
85 * any omap-specific iommu API
86 */
87#define to_iommu(dev) \
88 (struct iommu *)platform_get_drvdata(to_platform_device(dev))
89
83static unsigned int autoidle; 90static unsigned int autoidle;
84module_param(autoidle, int, 0444); 91module_param(autoidle, int, 0444);
85MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support"); 92MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
@@ -1975,7 +1982,8 @@ static int isp_remove(struct platform_device *pdev)
1975 isp_cleanup_modules(isp); 1982 isp_cleanup_modules(isp);
1976 1983
1977 omap3isp_get(isp); 1984 omap3isp_get(isp);
1978 iommu_put(isp->iommu); 1985 iommu_detach_device(isp->domain, isp->iommu_dev);
1986 iommu_domain_free(isp->domain);
1979 omap3isp_put(isp); 1987 omap3isp_put(isp);
1980 1988
1981 free_irq(isp->irq_num, isp); 1989 free_irq(isp->irq_num, isp);
@@ -2123,25 +2131,41 @@ static int isp_probe(struct platform_device *pdev)
2123 } 2131 }
2124 2132
2125 /* IOMMU */ 2133 /* IOMMU */
2126 isp->iommu = iommu_get("isp"); 2134 isp->iommu_dev = omap_find_iommu_device("isp");
2127 if (IS_ERR_OR_NULL(isp->iommu)) { 2135 if (!isp->iommu_dev) {
2128 isp->iommu = NULL; 2136 dev_err(isp->dev, "omap_find_iommu_device failed\n");
2129 ret = -ENODEV; 2137 ret = -ENODEV;
2130 goto error_isp; 2138 goto error_isp;
2131 } 2139 }
2132 2140
2141 /* to be removed once iommu migration is complete */
2142 isp->iommu = to_iommu(isp->iommu_dev);
2143
2144 isp->domain = iommu_domain_alloc();
2145 if (!isp->domain) {
2146 dev_err(isp->dev, "can't alloc iommu domain\n");
2147 ret = -ENOMEM;
2148 goto error_isp;
2149 }
2150
2151 ret = iommu_attach_device(isp->domain, isp->iommu_dev);
2152 if (ret) {
2153 dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
2154 goto free_domain;
2155 }
2156
2133 /* Interrupt */ 2157 /* Interrupt */
2134 isp->irq_num = platform_get_irq(pdev, 0); 2158 isp->irq_num = platform_get_irq(pdev, 0);
2135 if (isp->irq_num <= 0) { 2159 if (isp->irq_num <= 0) {
2136 dev_err(isp->dev, "No IRQ resource\n"); 2160 dev_err(isp->dev, "No IRQ resource\n");
2137 ret = -ENODEV; 2161 ret = -ENODEV;
2138 goto error_isp; 2162 goto detach_dev;
2139 } 2163 }
2140 2164
2141 if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) { 2165 if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) {
2142 dev_err(isp->dev, "Unable to request IRQ\n"); 2166 dev_err(isp->dev, "Unable to request IRQ\n");
2143 ret = -EINVAL; 2167 ret = -EINVAL;
2144 goto error_isp; 2168 goto detach_dev;
2145 } 2169 }
2146 2170
2147 /* Entities */ 2171 /* Entities */
@@ -2162,8 +2186,11 @@ error_modules:
2162 isp_cleanup_modules(isp); 2186 isp_cleanup_modules(isp);
2163error_irq: 2187error_irq:
2164 free_irq(isp->irq_num, isp); 2188 free_irq(isp->irq_num, isp);
2189detach_dev:
2190 iommu_detach_device(isp->domain, isp->iommu_dev);
2191free_domain:
2192 iommu_domain_free(isp->domain);
2165error_isp: 2193error_isp:
2166 iommu_put(isp->iommu);
2167 omap3isp_put(isp); 2194 omap3isp_put(isp);
2168error: 2195error:
2169 isp_put_clocks(isp); 2196 isp_put_clocks(isp);
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index 529e582ef94..c9ec7a2e53e 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -32,6 +32,7 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/iommu.h>
35#include <plat/iommu.h> 36#include <plat/iommu.h>
36#include <plat/iovmm.h> 37#include <plat/iovmm.h>
37 38
@@ -295,6 +296,8 @@ struct isp_device {
295 unsigned int subclk_resources; 296 unsigned int subclk_resources;
296 297
297 struct iommu *iommu; 298 struct iommu *iommu;
299 struct iommu_domain *domain;
300 struct device *iommu_dev;
298 301
299 struct isp_platform_callback platform_cb; 302 struct isp_platform_callback platform_cb;
300}; 303};
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 9d3459de04b..de254741373 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -365,7 +365,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
365 dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, 365 dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
366 req->iovm->sgt->nents, DMA_TO_DEVICE); 366 req->iovm->sgt->nents, DMA_TO_DEVICE);
367 if (req->table) 367 if (req->table)
368 iommu_vfree(isp->iommu, req->table); 368 iommu_vfree(isp->domain, isp->iommu, req->table);
369 kfree(req); 369 kfree(req);
370} 370}
371 371
@@ -437,8 +437,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
437 437
438 req->enable = 1; 438 req->enable = 1;
439 439
440 req->table = iommu_vmalloc(isp->iommu, 0, req->config.size, 440 req->table = iommu_vmalloc(isp->domain, isp->iommu, 0,
441 IOMMU_FLAG); 441 req->config.size, IOMMU_FLAG);
442 if (IS_ERR_VALUE(req->table)) { 442 if (IS_ERR_VALUE(req->table)) {
443 req->table = 0; 443 req->table = 0;
444 ret = -ENOMEM; 444 ret = -ENOMEM;
@@ -733,15 +733,15 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
733 * already done by iommu_vmalloc(). 733 * already done by iommu_vmalloc().
734 */ 734 */
735 size = ccdc->fpc.fpnum * 4; 735 size = ccdc->fpc.fpnum * 4;
736 table_new = iommu_vmalloc(isp->iommu, 0, size, 736 table_new = iommu_vmalloc(isp->domain, isp->iommu, 0,
737 IOMMU_FLAG); 737 size, IOMMU_FLAG);
738 if (IS_ERR_VALUE(table_new)) 738 if (IS_ERR_VALUE(table_new))
739 return -ENOMEM; 739 return -ENOMEM;
740 740
741 if (copy_from_user(da_to_va(isp->iommu, table_new), 741 if (copy_from_user(da_to_va(isp->iommu, table_new),
742 (__force void __user *) 742 (__force void __user *)
743 ccdc->fpc.fpcaddr, size)) { 743 ccdc->fpc.fpcaddr, size)) {
744 iommu_vfree(isp->iommu, table_new); 744 iommu_vfree(isp->domain, isp->iommu, table_new);
745 return -EFAULT; 745 return -EFAULT;
746 } 746 }
747 747
@@ -751,7 +751,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
751 751
752 ccdc_configure_fpc(ccdc); 752 ccdc_configure_fpc(ccdc);
753 if (table_old != 0) 753 if (table_old != 0)
754 iommu_vfree(isp->iommu, table_old); 754 iommu_vfree(isp->domain, isp->iommu, table_old);
755 } 755 }
756 756
757 return ccdc_lsc_config(ccdc, ccdc_struct); 757 return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -2286,5 +2286,5 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
2286 ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); 2286 ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
2287 2287
2288 if (ccdc->fpc.fpcaddr != 0) 2288 if (ccdc->fpc.fpcaddr != 0)
2289 iommu_vfree(isp->iommu, ccdc->fpc.fpcaddr); 2289 iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr);
2290} 2290}
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 808065948ac..98af736b9a9 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -366,7 +366,7 @@ static void isp_stat_bufs_free(struct ispstat *stat)
366 dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, 366 dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
367 buf->iovm->sgt->nents, 367 buf->iovm->sgt->nents,
368 DMA_FROM_DEVICE); 368 DMA_FROM_DEVICE);
369 iommu_vfree(isp->iommu, buf->iommu_addr); 369 iommu_vfree(isp->domain, isp->iommu, buf->iommu_addr);
370 } else { 370 } else {
371 if (!buf->virt_addr) 371 if (!buf->virt_addr)
372 continue; 372 continue;
@@ -399,8 +399,8 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
399 struct iovm_struct *iovm; 399 struct iovm_struct *iovm;
400 400
401 WARN_ON(buf->dma_addr); 401 WARN_ON(buf->dma_addr);
402 buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size, 402 buf->iommu_addr = iommu_vmalloc(isp->domain, isp->iommu, 0,
403 IOMMU_FLAG); 403 size, IOMMU_FLAG);
404 if (IS_ERR((void *)buf->iommu_addr)) { 404 if (IS_ERR((void *)buf->iommu_addr)) {
405 dev_err(stat->isp->dev, 405 dev_err(stat->isp->dev,
406 "%s: Can't acquire memory for " 406 "%s: Can't acquire memory for "
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index fd965adfd59..023b5028a27 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -446,7 +446,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
446 sgt->nents = sglen; 446 sgt->nents = sglen;
447 sgt->orig_nents = sglen; 447 sgt->orig_nents = sglen;
448 448
449 da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG); 449 da = iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG);
450 if (IS_ERR_VALUE(da)) 450 if (IS_ERR_VALUE(da))
451 kfree(sgt); 451 kfree(sgt);
452 452
@@ -462,7 +462,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
462{ 462{
463 struct sg_table *sgt; 463 struct sg_table *sgt;
464 464
465 sgt = iommu_vunmap(isp->iommu, (u32)da); 465 sgt = iommu_vunmap(isp->domain, isp->iommu, (u32)da);
466 kfree(sgt); 466 kfree(sgt);
467} 467}
468 468