aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/iommu.c
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-06-01 18:46:12 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-08-26 05:46:00 -0400
commitf626b52d4a568d4315cd152123ef2d1ea406def2 (patch)
tree3592dfe99e9fd2713609cef9fbb8d0dfc8653a52 /arch/arm/plat-omap/iommu.c
parentfcb8ce5cfe30ca9ca5c9a79cdfe26d1993e65e0c (diff)
omap: iommu: migrate to the generic IOMMU API
Migrate OMAP's iommu driver to the generic IOMMU API, so users can stay generic, and any generic IOMMU functionality can be developed once in the generic framework. Migrate omap's iovmm (virtual memory manager) to the generic IOMMU API, and adapt omap3isp as needed, so the latter won't break. The plan is to eventually remove iovmm completely by replacing it with the (upcoming) IOMMU-based DMA-API. Tested on OMAP3 (with omap3isp) and OMAP4 (with rpmsg/remoteproc). Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> Acked-by: Tony Lindgren <tony@atomide.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/arm/plat-omap/iommu.c')
-rw-r--r--arch/arm/plat-omap/iommu.c308
1 files changed, 266 insertions, 42 deletions
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 34fc31ee9081..51aa008d8223 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -18,6 +18,9 @@
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/iommu.h>
22#include <linux/mutex.h>
23#include <linux/spinlock.h>
21 24
22#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
23 26
@@ -30,6 +33,19 @@
30 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ 33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
31 __i++) 34 __i++)
32 35
36/**
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
42 */
43struct omap_iommu_domain {
44 u32 *pgtable;
45 struct iommu *iommu_dev;
46 spinlock_t lock;
47};
48
33/* accommodate the difference between omap1 and omap2/3 */ 49/* accommodate the difference between omap1 and omap2/3 */
34static const struct iommu_functions *arch_iommu; 50static const struct iommu_functions *arch_iommu;
35 51
@@ -852,35 +868,55 @@ int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
852EXPORT_SYMBOL_GPL(iommu_set_da_range); 868EXPORT_SYMBOL_GPL(iommu_set_da_range);
853 869
854/** 870/**
855 * iommu_get - Get iommu handler 871 * omap_find_iommu_device() - find an omap iommu device by name
856 * @name: target iommu name 872 * @name: name of the iommu device
873 *
874 * The generic iommu API requires the caller to provide the device
875 * he wishes to attach to a certain iommu domain.
876 *
877 * Drivers generally should not bother with this as it should just
878 * be taken care of by the DMA-API using dev_archdata.
879 *
880 * This function is provided as an interim solution until the latter
881 * materializes, and omap3isp is fully migrated to the DMA-API.
882 */
883struct device *omap_find_iommu_device(const char *name)
884{
885 return driver_find_device(&omap_iommu_driver.driver, NULL,
886 (void *)name,
887 device_match_by_alias);
888}
889EXPORT_SYMBOL_GPL(omap_find_iommu_device);
890
891/**
892 * omap_iommu_attach() - attach iommu device to an iommu domain
893 * @dev: target omap iommu device
894 * @iopgd: page table
857 **/ 895 **/
858struct iommu *iommu_get(const char *name) 896static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
859{ 897{
860 int err = -ENOMEM; 898 int err = -ENOMEM;
861 struct device *dev; 899 struct iommu *obj = to_iommu(dev);
862 struct iommu *obj;
863
864 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
865 device_match_by_alias);
866 if (!dev)
867 return ERR_PTR(-ENODEV);
868
869 obj = to_iommu(dev);
870 900
871 mutex_lock(&obj->iommu_lock); 901 spin_lock(&obj->iommu_lock);
872 902
873 if (obj->refcount++ == 0) { 903 /* an iommu device can only be attached once */
874 err = iommu_enable(obj); 904 if (++obj->refcount > 1) {
875 if (err) 905 dev_err(dev, "%s: already attached!\n", obj->name);
876 goto err_enable; 906 err = -EBUSY;
877 flush_iotlb_all(obj); 907 goto err_enable;
878 } 908 }
879 909
910 obj->iopgd = iopgd;
911 err = iommu_enable(obj);
912 if (err)
913 goto err_enable;
914 flush_iotlb_all(obj);
915
880 if (!try_module_get(obj->owner)) 916 if (!try_module_get(obj->owner))
881 goto err_module; 917 goto err_module;
882 918
883 mutex_unlock(&obj->iommu_lock); 919 spin_unlock(&obj->iommu_lock);
884 920
885 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 921 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
886 return obj; 922 return obj;
@@ -890,32 +926,32 @@ err_module:
890 iommu_disable(obj); 926 iommu_disable(obj);
891err_enable: 927err_enable:
892 obj->refcount--; 928 obj->refcount--;
893 mutex_unlock(&obj->iommu_lock); 929 spin_unlock(&obj->iommu_lock);
894 return ERR_PTR(err); 930 return ERR_PTR(err);
895} 931}
896EXPORT_SYMBOL_GPL(iommu_get);
897 932
898/** 933/**
899 * iommu_put - Put back iommu handler 934 * omap_iommu_detach - release iommu device
900 * @obj: target iommu 935 * @obj: target iommu
901 **/ 936 **/
902void iommu_put(struct iommu *obj) 937static void omap_iommu_detach(struct iommu *obj)
903{ 938{
904 if (!obj || IS_ERR(obj)) 939 if (!obj || IS_ERR(obj))
905 return; 940 return;
906 941
907 mutex_lock(&obj->iommu_lock); 942 spin_lock(&obj->iommu_lock);
908 943
909 if (--obj->refcount == 0) 944 if (--obj->refcount == 0)
910 iommu_disable(obj); 945 iommu_disable(obj);
911 946
912 module_put(obj->owner); 947 module_put(obj->owner);
913 948
914 mutex_unlock(&obj->iommu_lock); 949 obj->iopgd = NULL;
950
951 spin_unlock(&obj->iommu_lock);
915 952
916 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 953 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
917} 954}
918EXPORT_SYMBOL_GPL(iommu_put);
919 955
920int iommu_set_isr(const char *name, 956int iommu_set_isr(const char *name,
921 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, 957 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
@@ -950,7 +986,6 @@ EXPORT_SYMBOL_GPL(iommu_set_isr);
950static int __devinit omap_iommu_probe(struct platform_device *pdev) 986static int __devinit omap_iommu_probe(struct platform_device *pdev)
951{ 987{
952 int err = -ENODEV; 988 int err = -ENODEV;
953 void *p;
954 int irq; 989 int irq;
955 struct iommu *obj; 990 struct iommu *obj;
956 struct resource *res; 991 struct resource *res;
@@ -974,7 +1009,7 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
974 obj->da_start = pdata->da_start; 1009 obj->da_start = pdata->da_start;
975 obj->da_end = pdata->da_end; 1010 obj->da_end = pdata->da_end;
976 1011
977 mutex_init(&obj->iommu_lock); 1012 spin_lock_init(&obj->iommu_lock);
978 mutex_init(&obj->mmap_lock); 1013 mutex_init(&obj->mmap_lock);
979 spin_lock_init(&obj->page_table_lock); 1014 spin_lock_init(&obj->page_table_lock);
980 INIT_LIST_HEAD(&obj->mmap); 1015 INIT_LIST_HEAD(&obj->mmap);
@@ -1009,22 +1044,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
1009 goto err_irq; 1044 goto err_irq;
1010 platform_set_drvdata(pdev, obj); 1045 platform_set_drvdata(pdev, obj);
1011 1046
1012 p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
1013 if (!p) {
1014 err = -ENOMEM;
1015 goto err_pgd;
1016 }
1017 memset(p, 0, IOPGD_TABLE_SIZE);
1018 clean_dcache_area(p, IOPGD_TABLE_SIZE);
1019 obj->iopgd = p;
1020
1021 BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
1022
1023 dev_info(&pdev->dev, "%s registered\n", obj->name); 1047 dev_info(&pdev->dev, "%s registered\n", obj->name);
1024 return 0; 1048 return 0;
1025 1049
1026err_pgd:
1027 free_irq(irq, obj);
1028err_irq: 1050err_irq:
1029 iounmap(obj->regbase); 1051 iounmap(obj->regbase);
1030err_ioremap: 1052err_ioremap:
@@ -1045,7 +1067,6 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
1045 platform_set_drvdata(pdev, NULL); 1067 platform_set_drvdata(pdev, NULL);
1046 1068
1047 iopgtable_clear_entry_all(obj); 1069 iopgtable_clear_entry_all(obj);
1048 free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
1049 1070
1050 irq = platform_get_irq(pdev, 0); 1071 irq = platform_get_irq(pdev, 0);
1051 free_irq(irq, obj); 1072 free_irq(irq, obj);
@@ -1072,6 +1093,207 @@ static void iopte_cachep_ctor(void *iopte)
1072 clean_dcache_area(iopte, IOPTE_TABLE_SIZE); 1093 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1073} 1094}
1074 1095
1096static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1097 phys_addr_t pa, int order, int prot)
1098{
1099 struct omap_iommu_domain *omap_domain = domain->priv;
1100 struct iommu *oiommu = omap_domain->iommu_dev;
1101 struct device *dev = oiommu->dev;
1102 size_t bytes = PAGE_SIZE << order;
1103 struct iotlb_entry e;
1104 int omap_pgsz;
1105 u32 ret, flags;
1106
1107 /* we only support mapping a single iommu page for now */
1108 omap_pgsz = bytes_to_iopgsz(bytes);
1109 if (omap_pgsz < 0) {
1110 dev_err(dev, "invalid size to map: %d\n", bytes);
1111 return -EINVAL;
1112 }
1113
1114 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1115
1116 flags = omap_pgsz | prot;
1117
1118 iotlb_init_entry(&e, da, pa, flags);
1119
1120 ret = iopgtable_store_entry(oiommu, &e);
1121 if (ret) {
1122 dev_err(dev, "iopgtable_store_entry failed: %d\n", ret);
1123 return ret;
1124 }
1125
1126 return 0;
1127}
1128
1129static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1130 int order)
1131{
1132 struct omap_iommu_domain *omap_domain = domain->priv;
1133 struct iommu *oiommu = omap_domain->iommu_dev;
1134 struct device *dev = oiommu->dev;
1135 size_t bytes = PAGE_SIZE << order;
1136 size_t ret;
1137
1138 dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes);
1139
1140 ret = iopgtable_clear_entry(oiommu, da);
1141 if (ret != bytes) {
1142 dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes);
1143 return -EINVAL;
1144 }
1145
1146 return 0;
1147}
1148
1149static int
1150omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1151{
1152 struct omap_iommu_domain *omap_domain = domain->priv;
1153 struct iommu *oiommu;
1154 int ret = 0;
1155
1156 spin_lock(&omap_domain->lock);
1157
1158 /* only a single device is supported per domain for now */
1159 if (omap_domain->iommu_dev) {
1160 dev_err(dev, "iommu domain is already attached\n");
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164
1165 /* get a handle to and enable the omap iommu */
1166 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1167 if (IS_ERR(oiommu)) {
1168 ret = PTR_ERR(oiommu);
1169 dev_err(dev, "can't get omap iommu: %d\n", ret);
1170 goto out;
1171 }
1172
1173 omap_domain->iommu_dev = oiommu;
1174
1175out:
1176 spin_unlock(&omap_domain->lock);
1177 return ret;
1178}
1179
1180static void omap_iommu_detach_dev(struct iommu_domain *domain,
1181 struct device *dev)
1182{
1183 struct omap_iommu_domain *omap_domain = domain->priv;
1184 struct iommu *oiommu = to_iommu(dev);
1185
1186 spin_lock(&omap_domain->lock);
1187
1188 /* only a single device is supported per domain for now */
1189 if (omap_domain->iommu_dev != oiommu) {
1190 dev_err(dev, "invalid iommu device\n");
1191 goto out;
1192 }
1193
1194 iopgtable_clear_entry_all(oiommu);
1195
1196 omap_iommu_detach(oiommu);
1197
1198 omap_domain->iommu_dev = NULL;
1199
1200out:
1201 spin_unlock(&omap_domain->lock);
1202}
1203
1204static int omap_iommu_domain_init(struct iommu_domain *domain)
1205{
1206 struct omap_iommu_domain *omap_domain;
1207
1208 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1209 if (!omap_domain) {
1210 pr_err("kzalloc failed\n");
1211 goto out;
1212 }
1213
1214 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1215 if (!omap_domain->pgtable) {
1216 pr_err("kzalloc failed\n");
1217 goto fail_nomem;
1218 }
1219
1220 /*
1221 * should never fail, but please keep this around to ensure
1222 * we keep the hardware happy
1223 */
1224 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1225
1226 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1227 spin_lock_init(&omap_domain->lock);
1228
1229 domain->priv = omap_domain;
1230
1231 return 0;
1232
1233fail_nomem:
1234 kfree(omap_domain);
1235out:
1236 return -ENOMEM;
1237}
1238
1239/* assume device was already detached */
1240static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1241{
1242 struct omap_iommu_domain *omap_domain = domain->priv;
1243
1244 domain->priv = NULL;
1245
1246 kfree(omap_domain->pgtable);
1247 kfree(omap_domain);
1248}
1249
1250static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1251 unsigned long da)
1252{
1253 struct omap_iommu_domain *omap_domain = domain->priv;
1254 struct iommu *oiommu = omap_domain->iommu_dev;
1255 struct device *dev = oiommu->dev;
1256 u32 *pgd, *pte;
1257 phys_addr_t ret = 0;
1258
1259 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1260
1261 if (pte) {
1262 if (iopte_is_small(*pte))
1263 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1264 else if (iopte_is_large(*pte))
1265 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1266 else
1267 dev_err(dev, "bogus pte 0x%x", *pte);
1268 } else {
1269 if (iopgd_is_section(*pgd))
1270 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1271 else if (iopgd_is_super(*pgd))
1272 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1273 else
1274 dev_err(dev, "bogus pgd 0x%x", *pgd);
1275 }
1276
1277 return ret;
1278}
1279
1280static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1281 unsigned long cap)
1282{
1283 return 0;
1284}
1285
1286static struct iommu_ops omap_iommu_ops = {
1287 .domain_init = omap_iommu_domain_init,
1288 .domain_destroy = omap_iommu_domain_destroy,
1289 .attach_dev = omap_iommu_attach_dev,
1290 .detach_dev = omap_iommu_detach_dev,
1291 .map = omap_iommu_map,
1292 .unmap = omap_iommu_unmap,
1293 .iova_to_phys = omap_iommu_iova_to_phys,
1294 .domain_has_cap = omap_iommu_domain_has_cap,
1295};
1296
1075static int __init omap_iommu_init(void) 1297static int __init omap_iommu_init(void)
1076{ 1298{
1077 struct kmem_cache *p; 1299 struct kmem_cache *p;
@@ -1084,6 +1306,8 @@ static int __init omap_iommu_init(void)
1084 return -ENOMEM; 1306 return -ENOMEM;
1085 iopte_cachep = p; 1307 iopte_cachep = p;
1086 1308
1309 register_iommu(&omap_iommu_ops);
1310
1087 return platform_driver_register(&omap_iommu_driver); 1311 return platform_driver_register(&omap_iommu_driver);
1088} 1312}
1089module_init(omap_iommu_init); 1313module_init(omap_iommu_init);