aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSuman Anna <s-anna@ti.com>2017-09-05 18:56:18 -0400
committerJoerg Roedel <jroedel@suse.de>2017-09-19 05:32:05 -0400
commit9d5018deec86673ef8418546a3ac43e47dbff3b9 (patch)
tree7968986b50db80f3d6cc57a66abdc6fea7b8c8d6 /drivers
parent0d3642883b092ccfc0b044c6581ee2c1f32ab165 (diff)
iommu/omap: Add support to program multiple iommus
A client user instantiates and attaches to an iommu_domain to program the OMAP IOMMU associated with the domain. The iommus programmed by a client user are bound with the iommu_domain through the user's device archdata. The OMAP IOMMU driver currently supports only one IOMMU per IOMMU domain per user. The OMAP IOMMU driver has been enhanced to support allowing multiple IOMMUs to be programmed by a single client user. This support is being added mainly to handle the DSP subsystems on the DRA7xx SoCs, which have two MMUs within the same subsystem. These MMUs provide translations for a processor core port and an internal EDMA port. This support allows both the MMUs to be programmed together, but with each one retaining it's own internal state objects. The internal EDMA block is managed by the software running on the DSPs, and this design provides on-par functionality with previous generation OMAP DSPs where the EDMA and the DSP core shared the same MMU. The multiple iommus are expected to be provided through a sentinel terminated array of omap_iommu_arch_data objects through the client user's device archdata. The OMAP driver core is enhanced to loop through the array of attached iommus and program them for all common operations. The sentinel-terminated logic is used so as to not change the omap_iommu_arch_data structure. NOTE: 1. The IOMMU group and IOMMU core registration is done only for the DSP processor core MMU even though both MMUs are represented by their own platform device and are probed individually. The IOMMU device linking uses this registered MMU device. The struct iommu_device for the second MMU is not used even though memory for it is allocated. 2. The OMAP IOMMU debugfs code still continues to operate on individual IOMMU objects. Signed-off-by: Suman Anna <s-anna@ti.com> [t-kristo@ti.com: ported support to 4.13 based kernel] Signed-off-by: Tero Kristo <t-kristo@ti.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/omap-iommu.c358
-rw-r--r--drivers/iommu/omap-iommu.h30
2 files changed, 285 insertions, 103 deletions
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 81ef729994ce..e135ab830ebf 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -2,6 +2,7 @@
2 * omap iommu: tlb and pagetable primitives 2 * omap iommu: tlb and pagetable primitives
3 * 3 *
4 * Copyright (C) 2008-2010 Nokia Corporation 4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
5 * 6 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, 7 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi 8 * Paul Mundt and Toshihiro Kobayashi
@@ -71,13 +72,23 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
71 **/ 72 **/
72void omap_iommu_save_ctx(struct device *dev) 73void omap_iommu_save_ctx(struct device *dev)
73{ 74{
74 struct omap_iommu *obj = dev_to_omap_iommu(dev); 75 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
75 u32 *p = obj->ctx; 76 struct omap_iommu *obj;
77 u32 *p;
76 int i; 78 int i;
77 79
78 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 80 if (!arch_data)
79 p[i] = iommu_read_reg(obj, i * sizeof(u32)); 81 return;
80 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); 82
83 while (arch_data->iommu_dev) {
84 obj = arch_data->iommu_dev;
85 p = obj->ctx;
86 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
87 p[i] = iommu_read_reg(obj, i * sizeof(u32));
88 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
89 p[i]);
90 }
91 arch_data++;
81 } 92 }
82} 93}
83EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 94EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
@@ -88,13 +99,23 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
88 **/ 99 **/
89void omap_iommu_restore_ctx(struct device *dev) 100void omap_iommu_restore_ctx(struct device *dev)
90{ 101{
91 struct omap_iommu *obj = dev_to_omap_iommu(dev); 102 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
92 u32 *p = obj->ctx; 103 struct omap_iommu *obj;
104 u32 *p;
93 int i; 105 int i;
94 106
95 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 107 if (!arch_data)
96 iommu_write_reg(obj, p[i], i * sizeof(u32)); 108 return;
97 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); 109
110 while (arch_data->iommu_dev) {
111 obj = arch_data->iommu_dev;
112 p = obj->ctx;
113 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
114 iommu_write_reg(obj, p[i], i * sizeof(u32));
115 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
116 p[i]);
117 }
118 arch_data++;
98 } 119 }
99} 120}
100EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 121EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
@@ -893,6 +914,24 @@ static void omap_iommu_detach(struct omap_iommu *obj)
893 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 914 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
894} 915}
895 916
917static bool omap_iommu_can_register(struct platform_device *pdev)
918{
919 struct device_node *np = pdev->dev.of_node;
920
921 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
922 return true;
923
924 /*
925 * restrict IOMMU core registration only for processor-port MDMA MMUs
926 * on DRA7 DSPs
927 */
928 if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
929 (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
930 return true;
931
932 return false;
933}
934
896static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev, 935static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
897 struct omap_iommu *obj) 936 struct omap_iommu *obj)
898{ 937{
@@ -984,19 +1023,22 @@ static int omap_iommu_probe(struct platform_device *pdev)
984 return err; 1023 return err;
985 platform_set_drvdata(pdev, obj); 1024 platform_set_drvdata(pdev, obj);
986 1025
987 obj->group = iommu_group_alloc(); 1026 if (omap_iommu_can_register(pdev)) {
988 if (IS_ERR(obj->group)) 1027 obj->group = iommu_group_alloc();
989 return PTR_ERR(obj->group); 1028 if (IS_ERR(obj->group))
1029 return PTR_ERR(obj->group);
990 1030
991 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name); 1031 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
992 if (err) 1032 obj->name);
993 goto out_group; 1033 if (err)
1034 goto out_group;
994 1035
995 iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); 1036 iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
996 1037
997 err = iommu_device_register(&obj->iommu); 1038 err = iommu_device_register(&obj->iommu);
998 if (err) 1039 if (err)
999 goto out_sysfs; 1040 goto out_sysfs;
1041 }
1000 1042
1001 pm_runtime_irq_safe(obj->dev); 1043 pm_runtime_irq_safe(obj->dev);
1002 pm_runtime_enable(obj->dev); 1044 pm_runtime_enable(obj->dev);
@@ -1018,11 +1060,13 @@ static int omap_iommu_remove(struct platform_device *pdev)
1018{ 1060{
1019 struct omap_iommu *obj = platform_get_drvdata(pdev); 1061 struct omap_iommu *obj = platform_get_drvdata(pdev);
1020 1062
1021 iommu_group_put(obj->group); 1063 if (obj->group) {
1022 obj->group = NULL; 1064 iommu_group_put(obj->group);
1065 obj->group = NULL;
1023 1066
1024 iommu_device_sysfs_remove(&obj->iommu); 1067 iommu_device_sysfs_remove(&obj->iommu);
1025 iommu_device_unregister(&obj->iommu); 1068 iommu_device_unregister(&obj->iommu);
1069 }
1026 1070
1027 omap_iommu_debugfs_remove(obj); 1071 omap_iommu_debugfs_remove(obj);
1028 1072
@@ -1068,11 +1112,13 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1068 phys_addr_t pa, size_t bytes, int prot) 1112 phys_addr_t pa, size_t bytes, int prot)
1069{ 1113{
1070 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1114 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1071 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1115 struct device *dev = omap_domain->dev;
1072 struct device *dev = oiommu->dev; 1116 struct omap_iommu_device *iommu;
1117 struct omap_iommu *oiommu;
1073 struct iotlb_entry e; 1118 struct iotlb_entry e;
1074 int omap_pgsz; 1119 int omap_pgsz;
1075 u32 ret; 1120 u32 ret = -EINVAL;
1121 int i;
1076 1122
1077 omap_pgsz = bytes_to_iopgsz(bytes); 1123 omap_pgsz = bytes_to_iopgsz(bytes);
1078 if (omap_pgsz < 0) { 1124 if (omap_pgsz < 0) {
@@ -1084,9 +1130,24 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1084 1130
1085 iotlb_init_entry(&e, da, pa, omap_pgsz); 1131 iotlb_init_entry(&e, da, pa, omap_pgsz);
1086 1132
1087 ret = omap_iopgtable_store_entry(oiommu, &e); 1133 iommu = omap_domain->iommus;
1088 if (ret) 1134 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1089 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); 1135 oiommu = iommu->iommu_dev;
1136 ret = omap_iopgtable_store_entry(oiommu, &e);
1137 if (ret) {
1138 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
1139 ret);
1140 break;
1141 }
1142 }
1143
1144 if (ret) {
1145 while (i--) {
1146 iommu--;
1147 oiommu = iommu->iommu_dev;
1148 iopgtable_clear_entry(oiommu, da);
1149 }
1150 }
1090 1151
1091 return ret; 1152 return ret;
1092} 1153}
@@ -1095,12 +1156,90 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1095 size_t size) 1156 size_t size)
1096{ 1157{
1097 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1158 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1098 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1159 struct device *dev = omap_domain->dev;
1099 struct device *dev = oiommu->dev; 1160 struct omap_iommu_device *iommu;
1161 struct omap_iommu *oiommu;
1162 bool error = false;
1163 size_t bytes = 0;
1164 int i;
1100 1165
1101 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); 1166 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1102 1167
1103 return iopgtable_clear_entry(oiommu, da); 1168 iommu = omap_domain->iommus;
1169 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1170 oiommu = iommu->iommu_dev;
1171 bytes = iopgtable_clear_entry(oiommu, da);
1172 if (!bytes)
1173 error = true;
1174 }
1175
1176 /*
1177 * simplify return - we are only checking if any of the iommus
1178 * reported an error, but not if all of them are unmapping the
1179 * same number of entries. This should not occur due to the
1180 * mirror programming.
1181 */
1182 return error ? 0 : bytes;
1183}
1184
1185static int omap_iommu_count(struct device *dev)
1186{
1187 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1188 int count = 0;
1189
1190 while (arch_data->iommu_dev) {
1191 count++;
1192 arch_data++;
1193 }
1194
1195 return count;
1196}
1197
1198/* caller should call cleanup if this function fails */
1199static int omap_iommu_attach_init(struct device *dev,
1200 struct omap_iommu_domain *odomain)
1201{
1202 struct omap_iommu_device *iommu;
1203 int i;
1204
1205 odomain->num_iommus = omap_iommu_count(dev);
1206 if (!odomain->num_iommus)
1207 return -EINVAL;
1208
1209 odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
1210 GFP_ATOMIC);
1211 if (!odomain->iommus)
1212 return -ENOMEM;
1213
1214 iommu = odomain->iommus;
1215 for (i = 0; i < odomain->num_iommus; i++, iommu++) {
1216 iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
1217 if (!iommu->pgtable)
1218 return -ENOMEM;
1219
1220 /*
1221 * should never fail, but please keep this around to ensure
1222 * we keep the hardware happy
1223 */
1224 if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
1225 IOPGD_TABLE_SIZE)))
1226 return -EINVAL;
1227 }
1228
1229 return 0;
1230}
1231
1232static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
1233{
1234 int i;
1235 struct omap_iommu_device *iommu = odomain->iommus;
1236
1237 for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
1238 kfree(iommu->pgtable);
1239
1240 kfree(odomain->iommus);
1241 odomain->num_iommus = 0;
1242 odomain->iommus = NULL;
1104} 1243}
1105 1244
1106static int 1245static int
@@ -1108,8 +1247,10 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1108{ 1247{
1109 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1248 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1110 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1249 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1250 struct omap_iommu_device *iommu;
1111 struct omap_iommu *oiommu; 1251 struct omap_iommu *oiommu;
1112 int ret = 0; 1252 int ret = 0;
1253 int i;
1113 1254
1114 if (!arch_data || !arch_data->iommu_dev) { 1255 if (!arch_data || !arch_data->iommu_dev) {
1115 dev_err(dev, "device doesn't have an associated iommu\n"); 1256 dev_err(dev, "device doesn't have an associated iommu\n");
@@ -1125,19 +1266,42 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1125 goto out; 1266 goto out;
1126 } 1267 }
1127 1268
1128 oiommu = arch_data->iommu_dev; 1269 ret = omap_iommu_attach_init(dev, omap_domain);
1129
1130 /* get a handle to and enable the omap iommu */
1131 ret = omap_iommu_attach(oiommu, omap_domain->pgtable);
1132 if (ret) { 1270 if (ret) {
1133 dev_err(dev, "can't get omap iommu: %d\n", ret); 1271 dev_err(dev, "failed to allocate required iommu data %d\n",
1134 goto out; 1272 ret);
1273 goto init_fail;
1274 }
1275
1276 iommu = omap_domain->iommus;
1277 for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
1278 /* configure and enable the omap iommu */
1279 oiommu = arch_data->iommu_dev;
1280 ret = omap_iommu_attach(oiommu, iommu->pgtable);
1281 if (ret) {
1282 dev_err(dev, "can't get omap iommu: %d\n", ret);
1283 goto attach_fail;
1284 }
1285
1286 oiommu->domain = domain;
1287 iommu->iommu_dev = oiommu;
1135 } 1288 }
1136 1289
1137 omap_domain->iommu_dev = oiommu;
1138 omap_domain->dev = dev; 1290 omap_domain->dev = dev;
1139 oiommu->domain = domain;
1140 1291
1292 goto out;
1293
1294attach_fail:
1295 while (i--) {
1296 iommu--;
1297 arch_data--;
1298 oiommu = iommu->iommu_dev;
1299 omap_iommu_detach(oiommu);
1300 iommu->iommu_dev = NULL;
1301 oiommu->domain = NULL;
1302 }
1303init_fail:
1304 omap_iommu_detach_fini(omap_domain);
1141out: 1305out:
1142 spin_unlock(&omap_domain->lock); 1306 spin_unlock(&omap_domain->lock);
1143 return ret; 1307 return ret;
@@ -1146,7 +1310,10 @@ out:
1146static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, 1310static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1147 struct device *dev) 1311 struct device *dev)
1148{ 1312{
1149 struct omap_iommu *oiommu = dev_to_omap_iommu(dev); 1313 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1314 struct omap_iommu_device *iommu = omap_domain->iommus;
1315 struct omap_iommu *oiommu;
1316 int i;
1150 1317
1151 if (!omap_domain->dev) { 1318 if (!omap_domain->dev) {
1152 dev_err(dev, "domain has no attached device\n"); 1319 dev_err(dev, "domain has no attached device\n");
@@ -1159,13 +1326,24 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1159 return; 1326 return;
1160 } 1327 }
1161 1328
1162 iopgtable_clear_entry_all(oiommu); 1329 /*
1330 * cleanup in the reverse order of attachment - this addresses
1331 * any h/w dependencies between multiple instances, if any
1332 */
1333 iommu += (omap_domain->num_iommus - 1);
1334 arch_data += (omap_domain->num_iommus - 1);
1335 for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
1336 oiommu = iommu->iommu_dev;
1337 iopgtable_clear_entry_all(oiommu);
1338
1339 omap_iommu_detach(oiommu);
1340 iommu->iommu_dev = NULL;
1341 oiommu->domain = NULL;
1342 }
1163 1343
1164 omap_iommu_detach(oiommu); 1344 omap_iommu_detach_fini(omap_domain);
1165 1345
1166 omap_domain->iommu_dev = NULL;
1167 omap_domain->dev = NULL; 1346 omap_domain->dev = NULL;
1168 oiommu->domain = NULL;
1169} 1347}
1170 1348
1171static void omap_iommu_detach_dev(struct iommu_domain *domain, 1349static void omap_iommu_detach_dev(struct iommu_domain *domain,
@@ -1187,18 +1365,7 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
1187 1365
1188 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); 1366 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1189 if (!omap_domain) 1367 if (!omap_domain)
1190 goto out; 1368 return NULL;
1191
1192 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1193 if (!omap_domain->pgtable)
1194 goto fail_nomem;
1195
1196 /*
1197 * should never fail, but please keep this around to ensure
1198 * we keep the hardware happy
1199 */
1200 if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
1201 goto fail_align;
1202 1369
1203 spin_lock_init(&omap_domain->lock); 1370 spin_lock_init(&omap_domain->lock);
1204 1371
@@ -1207,13 +1374,6 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
1207 omap_domain->domain.geometry.force_aperture = true; 1374 omap_domain->domain.geometry.force_aperture = true;
1208 1375
1209 return &omap_domain->domain; 1376 return &omap_domain->domain;
1210
1211fail_align:
1212 kfree(omap_domain->pgtable);
1213fail_nomem:
1214 kfree(omap_domain);
1215out:
1216 return NULL;
1217} 1377}
1218 1378
1219static void omap_iommu_domain_free(struct iommu_domain *domain) 1379static void omap_iommu_domain_free(struct iommu_domain *domain)
@@ -1227,7 +1387,6 @@ static void omap_iommu_domain_free(struct iommu_domain *domain)
1227 if (omap_domain->dev) 1387 if (omap_domain->dev)
1228 _omap_iommu_detach_dev(omap_domain, omap_domain->dev); 1388 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1229 1389
1230 kfree(omap_domain->pgtable);
1231 kfree(omap_domain); 1390 kfree(omap_domain);
1232} 1391}
1233 1392
@@ -1235,11 +1394,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1235 dma_addr_t da) 1394 dma_addr_t da)
1236{ 1395{
1237 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1396 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1238 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1397 struct omap_iommu_device *iommu = omap_domain->iommus;
1398 struct omap_iommu *oiommu = iommu->iommu_dev;
1239 struct device *dev = oiommu->dev; 1399 struct device *dev = oiommu->dev;
1240 u32 *pgd, *pte; 1400 u32 *pgd, *pte;
1241 phys_addr_t ret = 0; 1401 phys_addr_t ret = 0;
1242 1402
1403 /*
1404 * all the iommus within the domain will have identical programming,
1405 * so perform the lookup using just the first iommu
1406 */
1243 iopgtable_lookup_entry(oiommu, da, &pgd, &pte); 1407 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1244 1408
1245 if (pte) { 1409 if (pte) {
@@ -1265,11 +1429,12 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1265 1429
1266static int omap_iommu_add_device(struct device *dev) 1430static int omap_iommu_add_device(struct device *dev)
1267{ 1431{
1268 struct omap_iommu_arch_data *arch_data; 1432 struct omap_iommu_arch_data *arch_data, *tmp;
1269 struct omap_iommu *oiommu; 1433 struct omap_iommu *oiommu;
1270 struct iommu_group *group; 1434 struct iommu_group *group;
1271 struct device_node *np; 1435 struct device_node *np;
1272 struct platform_device *pdev; 1436 struct platform_device *pdev;
1437 int num_iommus, i;
1273 int ret; 1438 int ret;
1274 1439
1275 /* 1440 /*
@@ -1281,36 +1446,57 @@ static int omap_iommu_add_device(struct device *dev)
1281 if (!dev->of_node) 1446 if (!dev->of_node)
1282 return 0; 1447 return 0;
1283 1448
1284 np = of_parse_phandle(dev->of_node, "iommus", 0); 1449 /*
1285 if (!np) 1450 * retrieve the count of IOMMU nodes using phandle size as element size
1451 * since #iommu-cells = 0 for OMAP
1452 */
1453 num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
1454 sizeof(phandle));
1455 if (num_iommus < 0)
1286 return 0; 1456 return 0;
1287 1457
1288 pdev = of_find_device_by_node(np); 1458 arch_data = kzalloc((num_iommus + 1) * sizeof(*arch_data), GFP_KERNEL);
1289 if (WARN_ON(!pdev)) { 1459 if (!arch_data)
1290 of_node_put(np); 1460 return -ENOMEM;
1291 return -EINVAL;
1292 }
1293 1461
1294 oiommu = platform_get_drvdata(pdev); 1462 for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
1295 if (!oiommu) { 1463 np = of_parse_phandle(dev->of_node, "iommus", i);
1296 of_node_put(np); 1464 if (!np) {
1297 return -EINVAL; 1465 kfree(arch_data);
1298 } 1466 return -EINVAL;
1467 }
1468
1469 pdev = of_find_device_by_node(np);
1470 if (WARN_ON(!pdev)) {
1471 of_node_put(np);
1472 kfree(arch_data);
1473 return -EINVAL;
1474 }
1475
1476 oiommu = platform_get_drvdata(pdev);
1477 if (!oiommu) {
1478 of_node_put(np);
1479 kfree(arch_data);
1480 return -EINVAL;
1481 }
1482
1483 tmp->iommu_dev = oiommu;
1299 1484
1300 arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
1301 if (!arch_data) {
1302 of_node_put(np); 1485 of_node_put(np);
1303 return -ENOMEM;
1304 } 1486 }
1305 1487
1488 /*
1489 * use the first IOMMU alone for the sysfs device linking.
1490 * TODO: Evaluate if a single iommu_group needs to be
1491 * maintained for both IOMMUs
1492 */
1493 oiommu = arch_data->iommu_dev;
1306 ret = iommu_device_link(&oiommu->iommu, dev); 1494 ret = iommu_device_link(&oiommu->iommu, dev);
1307 if (ret) { 1495 if (ret) {
1308 kfree(arch_data); 1496 kfree(arch_data);
1309 of_node_put(np);
1310 return ret; 1497 return ret;
1311 } 1498 }
1312 1499
1313 arch_data->iommu_dev = oiommu;
1314 dev->archdata.iommu = arch_data; 1500 dev->archdata.iommu = arch_data;
1315 1501
1316 /* 1502 /*
@@ -1326,8 +1512,6 @@ static int omap_iommu_add_device(struct device *dev)
1326 } 1512 }
1327 iommu_group_put(group); 1513 iommu_group_put(group);
1328 1514
1329 of_node_put(np);
1330
1331 return 0; 1515 return 0;
1332} 1516}
1333 1517
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index a675af29a6ec..1703159ef5af 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -29,17 +29,26 @@ struct iotlb_entry {
29}; 29};
30 30
31/** 31/**
32 * struct omap_iommu_device - omap iommu device data
33 * @pgtable: page table used by an omap iommu attached to a domain
34 * @iommu_dev: pointer to store an omap iommu instance attached to a domain
35 */
36struct omap_iommu_device {
37 u32 *pgtable;
38 struct omap_iommu *iommu_dev;
39};
40
41/**
32 * struct omap_iommu_domain - omap iommu domain 42 * struct omap_iommu_domain - omap iommu domain
33 * @pgtable: the page table 43 * @num_iommus: number of iommus in this domain
34 * @iommu_dev: an omap iommu device attached to this domain. only a single 44 * @iommus: omap iommu device data for all iommus in this domain
35 * iommu device can be attached for now.
36 * @dev: Device using this domain. 45 * @dev: Device using this domain.
37 * @lock: domain lock, should be taken when attaching/detaching 46 * @lock: domain lock, should be taken when attaching/detaching
38 * @domain: generic domain handle used by iommu core code 47 * @domain: generic domain handle used by iommu core code
39 */ 48 */
40struct omap_iommu_domain { 49struct omap_iommu_domain {
41 u32 *pgtable; 50 u32 num_iommus;
42 struct omap_iommu *iommu_dev; 51 struct omap_iommu_device *iommus;
43 struct device *dev; 52 struct device *dev;
44 spinlock_t lock; 53 spinlock_t lock;
45 struct iommu_domain domain; 54 struct iommu_domain domain;
@@ -97,17 +106,6 @@ struct iotlb_lock {
97 short vict; 106 short vict;
98}; 107};
99 108
100/**
101 * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
102 * @dev: iommu client device
103 */
104static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
105{
106 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
107
108 return arch_data->iommu_dev;
109}
110
111/* 109/*
112 * MMU Register offsets 110 * MMU Register offsets
113 */ 111 */