aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-09-12 12:13:55 -0400
committerWill Deacon <will.deacon@arm.com>2016-09-16 04:34:20 -0400
commitadfec2e709d2a48dbd756d65fe4fa8e4aae529a3 (patch)
treee77fc536a77f4aa7c658b9c34b8c177b3e7ecefa
parent588888a7399db352d2b1a41c9d5b3bf0fd482390 (diff)
iommu/arm-smmu: Convert to iommu_fwspec
In the final step of preparation for full generic configuration support, swap our fixed-size master_cfg for the generic iommu_fwspec. For the legacy DT bindings, the driver simply gets to act as its own 'firmware'. Farewell, arbitrary MAX_MASTER_STREAMIDS! Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/arm-smmu.c140
1 files changed, 78 insertions, 62 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7a2bd60a54da..9dbb6a37e625 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -42,6 +42,7 @@
42#include <linux/of.h> 42#include <linux/of.h>
43#include <linux/of_address.h> 43#include <linux/of_address.h>
44#include <linux/of_device.h> 44#include <linux/of_device.h>
45#include <linux/of_iommu.h>
45#include <linux/pci.h> 46#include <linux/pci.h>
46#include <linux/platform_device.h> 47#include <linux/platform_device.h>
47#include <linux/slab.h> 48#include <linux/slab.h>
@@ -51,9 +52,6 @@
51 52
52#include "io-pgtable.h" 53#include "io-pgtable.h"
53 54
54/* Maximum number of stream IDs assigned to a single device */
55#define MAX_MASTER_STREAMIDS 128
56
57/* Maximum number of context banks per SMMU */ 55/* Maximum number of context banks per SMMU */
58#define ARM_SMMU_MAX_CBS 128 56#define ARM_SMMU_MAX_CBS 128
59 57
@@ -321,13 +319,13 @@ struct arm_smmu_smr {
321 319
322struct arm_smmu_master_cfg { 320struct arm_smmu_master_cfg {
323 struct arm_smmu_device *smmu; 321 struct arm_smmu_device *smmu;
324 int num_streamids; 322 s16 smendx[];
325 u16 streamids[MAX_MASTER_STREAMIDS];
326 s16 smendx[MAX_MASTER_STREAMIDS];
327}; 323};
328#define INVALID_SMENDX -1 324#define INVALID_SMENDX -1
329#define for_each_cfg_sme(cfg, i, idx) \ 325#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
330 for (i = 0; idx = cfg->smendx[i], i < cfg->num_streamids; ++i) 326#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
327#define for_each_cfg_sme(fw, i, idx) \
328 for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
331 329
332struct arm_smmu_device { 330struct arm_smmu_device {
333 struct device *dev; 331 struct device *dev;
@@ -480,14 +478,16 @@ static int __find_legacy_master_phandle(struct device *dev, void *data)
480} 478}
481 479
482static struct platform_driver arm_smmu_driver; 480static struct platform_driver arm_smmu_driver;
481static struct iommu_ops arm_smmu_ops;
483 482
484static int arm_smmu_register_legacy_master(struct device *dev) 483static int arm_smmu_register_legacy_master(struct device *dev,
484 struct arm_smmu_device **smmu)
485{ 485{
486 struct arm_smmu_device *smmu; 486 struct device *smmu_dev;
487 struct arm_smmu_master_cfg *cfg;
488 struct device_node *np; 487 struct device_node *np;
489 struct of_phandle_iterator it; 488 struct of_phandle_iterator it;
490 void *data = &it; 489 void *data = &it;
490 u32 *sids;
491 __be32 pci_sid; 491 __be32 pci_sid;
492 int err; 492 int err;
493 493
@@ -500,20 +500,13 @@ static int arm_smmu_register_legacy_master(struct device *dev)
500 it.node = np; 500 it.node = np;
501 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data, 501 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
502 __find_legacy_master_phandle); 502 __find_legacy_master_phandle);
503 smmu_dev = data;
503 of_node_put(np); 504 of_node_put(np);
504 if (err == 0) 505 if (err == 0)
505 return -ENODEV; 506 return -ENODEV;
506 if (err < 0) 507 if (err < 0)
507 return err; 508 return err;
508 509
509 smmu = dev_get_drvdata(data);
510
511 if (it.cur_count > MAX_MASTER_STREAMIDS) {
512 dev_err(smmu->dev,
513 "reached maximum number (%d) of stream IDs for master device %s\n",
514 MAX_MASTER_STREAMIDS, dev_name(dev));
515 return -ENOSPC;
516 }
517 if (dev_is_pci(dev)) { 510 if (dev_is_pci(dev)) {
518 /* "mmu-masters" assumes Stream ID == Requester ID */ 511 /* "mmu-masters" assumes Stream ID == Requester ID */
519 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid, 512 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
@@ -522,17 +515,20 @@ static int arm_smmu_register_legacy_master(struct device *dev)
522 it.cur_count = 1; 515 it.cur_count = 1;
523 } 516 }
524 517
525 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 518 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
526 if (!cfg) 519 &arm_smmu_ops);
527 return -ENOMEM; 520 if (err)
528 521 return err;
529 cfg->smmu = smmu;
530 dev->archdata.iommu = cfg;
531 522
532 while (it.cur_count--) 523 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
533 cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++); 524 if (!sids)
525 return -ENOMEM;
534 526
535 return 0; 527 *smmu = dev_get_drvdata(smmu_dev);
528 of_phandle_iterator_args(&it, sids, it.cur_count);
529 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
530 kfree(sids);
531 return err;
536} 532}
537 533
538static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) 534static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
@@ -1127,7 +1123,8 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1127 1123
1128static int arm_smmu_master_alloc_smes(struct device *dev) 1124static int arm_smmu_master_alloc_smes(struct device *dev)
1129{ 1125{
1130 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; 1126 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1127 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1131 struct arm_smmu_device *smmu = cfg->smmu; 1128 struct arm_smmu_device *smmu = cfg->smmu;
1132 struct arm_smmu_smr *smrs = smmu->smrs; 1129 struct arm_smmu_smr *smrs = smmu->smrs;
1133 struct iommu_group *group; 1130 struct iommu_group *group;
@@ -1135,19 +1132,19 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
1135 1132
1136 mutex_lock(&smmu->stream_map_mutex); 1133 mutex_lock(&smmu->stream_map_mutex);
1137 /* Figure out a viable stream map entry allocation */ 1134 /* Figure out a viable stream map entry allocation */
1138 for_each_cfg_sme(cfg, i, idx) { 1135 for_each_cfg_sme(fwspec, i, idx) {
1139 if (idx != INVALID_SMENDX) { 1136 if (idx != INVALID_SMENDX) {
1140 ret = -EEXIST; 1137 ret = -EEXIST;
1141 goto out_err; 1138 goto out_err;
1142 } 1139 }
1143 1140
1144 ret = arm_smmu_find_sme(smmu, cfg->streamids[i], 0); 1141 ret = arm_smmu_find_sme(smmu, fwspec->ids[i], 0);
1145 if (ret < 0) 1142 if (ret < 0)
1146 goto out_err; 1143 goto out_err;
1147 1144
1148 idx = ret; 1145 idx = ret;
1149 if (smrs && smmu->s2crs[idx].count == 0) { 1146 if (smrs && smmu->s2crs[idx].count == 0) {
1150 smrs[idx].id = cfg->streamids[i]; 1147 smrs[idx].id = fwspec->ids[i];
1151 smrs[idx].mask = 0; /* We don't currently share SMRs */ 1148 smrs[idx].mask = 0; /* We don't currently share SMRs */
1152 smrs[idx].valid = true; 1149 smrs[idx].valid = true;
1153 } 1150 }
@@ -1165,7 +1162,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
1165 iommu_group_put(group); 1162 iommu_group_put(group);
1166 1163
1167 /* It worked! Now, poke the actual hardware */ 1164 /* It worked! Now, poke the actual hardware */
1168 for_each_cfg_sme(cfg, i, idx) { 1165 for_each_cfg_sme(fwspec, i, idx) {
1169 arm_smmu_write_sme(smmu, idx); 1166 arm_smmu_write_sme(smmu, idx);
1170 smmu->s2crs[idx].group = group; 1167 smmu->s2crs[idx].group = group;
1171 } 1168 }
@@ -1182,13 +1179,14 @@ out_err:
1182 return ret; 1179 return ret;
1183} 1180}
1184 1181
1185static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) 1182static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
1186{ 1183{
1187 struct arm_smmu_device *smmu = cfg->smmu; 1184 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1185 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1188 int i, idx; 1186 int i, idx;
1189 1187
1190 mutex_lock(&smmu->stream_map_mutex); 1188 mutex_lock(&smmu->stream_map_mutex);
1191 for_each_cfg_sme(cfg, i, idx) { 1189 for_each_cfg_sme(fwspec, i, idx) {
1192 if (arm_smmu_free_sme(smmu, idx)) 1190 if (arm_smmu_free_sme(smmu, idx))
1193 arm_smmu_write_sme(smmu, idx); 1191 arm_smmu_write_sme(smmu, idx);
1194 cfg->smendx[i] = INVALID_SMENDX; 1192 cfg->smendx[i] = INVALID_SMENDX;
@@ -1197,7 +1195,7 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg)
1197} 1195}
1198 1196
1199static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, 1197static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1200 struct arm_smmu_master_cfg *cfg) 1198 struct iommu_fwspec *fwspec)
1201{ 1199{
1202 struct arm_smmu_device *smmu = smmu_domain->smmu; 1200 struct arm_smmu_device *smmu = smmu_domain->smmu;
1203 struct arm_smmu_s2cr *s2cr = smmu->s2crs; 1201 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
@@ -1214,7 +1212,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1214 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1212 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1215 type = S2CR_TYPE_BYPASS; 1213 type = S2CR_TYPE_BYPASS;
1216 1214
1217 for_each_cfg_sme(cfg, i, idx) { 1215 for_each_cfg_sme(fwspec, i, idx) {
1218 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) 1216 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1219 continue; 1217 continue;
1220 1218
@@ -1229,16 +1227,18 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1229static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) 1227static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1230{ 1228{
1231 int ret; 1229 int ret;
1230 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1231 struct arm_smmu_device *smmu;
1232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1233 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
1234 1233
1235 if (!cfg) { 1234 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1236 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); 1235 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1237 return -ENXIO; 1236 return -ENXIO;
1238 } 1237 }
1239 1238
1239 smmu = fwspec_smmu(fwspec);
1240 /* Ensure that the domain is finalised */ 1240 /* Ensure that the domain is finalised */
1241 ret = arm_smmu_init_domain_context(domain, cfg->smmu); 1241 ret = arm_smmu_init_domain_context(domain, smmu);
1242 if (ret < 0) 1242 if (ret < 0)
1243 return ret; 1243 return ret;
1244 1244
@@ -1246,15 +1246,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1246 * Sanity check the domain. We don't support domains across 1246 * Sanity check the domain. We don't support domains across
1247 * different SMMUs. 1247 * different SMMUs.
1248 */ 1248 */
1249 if (smmu_domain->smmu != cfg->smmu) { 1249 if (smmu_domain->smmu != smmu) {
1250 dev_err(dev, 1250 dev_err(dev,
1251 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", 1251 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1252 dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev)); 1252 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1253 return -EINVAL; 1253 return -EINVAL;
1254 } 1254 }
1255 1255
1256 /* Looks ok, so add the device to the domain */ 1256 /* Looks ok, so add the device to the domain */
1257 return arm_smmu_domain_add_master(smmu_domain, cfg); 1257 return arm_smmu_domain_add_master(smmu_domain, fwspec);
1258} 1258}
1259 1259
1260static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, 1260static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
@@ -1375,57 +1375,72 @@ static bool arm_smmu_capable(enum iommu_cap cap)
1375 1375
1376static int arm_smmu_add_device(struct device *dev) 1376static int arm_smmu_add_device(struct device *dev)
1377{ 1377{
1378 struct arm_smmu_device *smmu;
1378 struct arm_smmu_master_cfg *cfg; 1379 struct arm_smmu_master_cfg *cfg;
1380 struct iommu_fwspec *fwspec;
1379 int i, ret; 1381 int i, ret;
1380 1382
1381 ret = arm_smmu_register_legacy_master(dev); 1383 ret = arm_smmu_register_legacy_master(dev, &smmu);
1382 cfg = dev->archdata.iommu; 1384 fwspec = dev->iommu_fwspec;
1383 if (ret) 1385 if (ret)
1384 goto out_free; 1386 goto out_free;
1385 1387
1386 ret = -EINVAL; 1388 ret = -EINVAL;
1387 for (i = 0; i < cfg->num_streamids; i++) { 1389 for (i = 0; i < fwspec->num_ids; i++) {
1388 u16 sid = cfg->streamids[i]; 1390 u16 sid = fwspec->ids[i];
1389 1391
1390 if (sid & ~cfg->smmu->streamid_mask) { 1392 if (sid & ~smmu->streamid_mask) {
1391 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", 1393 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1392 sid, cfg->smmu->streamid_mask); 1394 sid, cfg->smmu->streamid_mask);
1393 goto out_free; 1395 goto out_free;
1394 } 1396 }
1395 cfg->smendx[i] = INVALID_SMENDX;
1396 } 1397 }
1397 1398
1399 ret = -ENOMEM;
1400 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1401 GFP_KERNEL);
1402 if (!cfg)
1403 goto out_free;
1404
1405 cfg->smmu = smmu;
1406 fwspec->iommu_priv = cfg;
1407 while (i--)
1408 cfg->smendx[i] = INVALID_SMENDX;
1409
1398 ret = arm_smmu_master_alloc_smes(dev); 1410 ret = arm_smmu_master_alloc_smes(dev);
1399 if (!ret) 1411 if (ret)
1400 return ret; 1412 goto out_free;
1413
1414 return 0;
1401 1415
1402out_free: 1416out_free:
1403 kfree(cfg); 1417 if (fwspec)
1404 dev->archdata.iommu = NULL; 1418 kfree(fwspec->iommu_priv);
1419 iommu_fwspec_free(dev);
1405 return ret; 1420 return ret;
1406} 1421}
1407 1422
1408static void arm_smmu_remove_device(struct device *dev) 1423static void arm_smmu_remove_device(struct device *dev)
1409{ 1424{
1410 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; 1425 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1411 1426
1412 if (!cfg) 1427 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1413 return; 1428 return;
1414 1429
1415 arm_smmu_master_free_smes(cfg); 1430 arm_smmu_master_free_smes(fwspec);
1416 iommu_group_remove_device(dev); 1431 iommu_group_remove_device(dev);
1417 kfree(cfg); 1432 kfree(fwspec->iommu_priv);
1418 dev->archdata.iommu = NULL; 1433 iommu_fwspec_free(dev);
1419} 1434}
1420 1435
1421static struct iommu_group *arm_smmu_device_group(struct device *dev) 1436static struct iommu_group *arm_smmu_device_group(struct device *dev)
1422{ 1437{
1423 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; 1438 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1424 struct arm_smmu_device *smmu = cfg->smmu; 1439 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1425 struct iommu_group *group = NULL; 1440 struct iommu_group *group = NULL;
1426 int i, idx; 1441 int i, idx;
1427 1442
1428 for_each_cfg_sme(cfg, i, idx) { 1443 for_each_cfg_sme(fwspec, i, idx) {
1429 if (group && smmu->s2crs[idx].group && 1444 if (group && smmu->s2crs[idx].group &&
1430 group != smmu->s2crs[idx].group) 1445 group != smmu->s2crs[idx].group)
1431 return ERR_PTR(-EINVAL); 1446 return ERR_PTR(-EINVAL);
@@ -1936,6 +1951,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1936 } 1951 }
1937 } 1952 }
1938 1953
1954 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
1939 platform_set_drvdata(pdev, smmu); 1955 platform_set_drvdata(pdev, smmu);
1940 arm_smmu_device_reset(smmu); 1956 arm_smmu_device_reset(smmu);
1941 return 0; 1957 return 0;