aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-09-14 10:26:46 -0400
committerWill Deacon <will.deacon@arm.com>2016-09-16 04:34:21 -0400
commit021bb8420d44cf56102d44fca9af628625e75482 (patch)
tree3a2322080bb7676f4be08e3238c33e261e1df253 /drivers/iommu/arm-smmu.c
parentd0acbb750a22bc8961c746bc9cad5937a9d9a83d (diff)
iommu/arm-smmu: Wire up generic configuration support
With everything else now in place, fill in an of_xlate callback and the appropriate registration to plumb into the generic configuration machinery, and watch everything just work. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c168
1 files changed, 108 insertions, 60 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 9dbb6a37e625..fd6cc19c4ced 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -418,6 +418,8 @@ struct arm_smmu_option_prop {
418 418
419static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); 419static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
420 420
421static bool using_legacy_binding, using_generic_binding;
422
421static struct arm_smmu_option_prop arm_smmu_options[] = { 423static struct arm_smmu_option_prop arm_smmu_options[] = {
422 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, 424 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
423 { 0, NULL}, 425 { 0, NULL},
@@ -817,12 +819,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
817 if (smmu_domain->smmu) 819 if (smmu_domain->smmu)
818 goto out_unlock; 820 goto out_unlock;
819 821
820 /* We're bypassing these SIDs, so don't allocate an actual context */
821 if (domain->type == IOMMU_DOMAIN_DMA) {
822 smmu_domain->smmu = smmu;
823 goto out_unlock;
824 }
825
826 /* 822 /*
827 * Mapping the requested stage onto what we support is surprisingly 823 * Mapping the requested stage onto what we support is surprisingly
828 * complicated, mainly because the spec allows S1+S2 SMMUs without 824 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -981,7 +977,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
981 void __iomem *cb_base; 977 void __iomem *cb_base;
982 int irq; 978 int irq;
983 979
984 if (!smmu || domain->type == IOMMU_DOMAIN_DMA) 980 if (!smmu)
985 return; 981 return;
986 982
987 /* 983 /*
@@ -1015,8 +1011,8 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1015 if (!smmu_domain) 1011 if (!smmu_domain)
1016 return NULL; 1012 return NULL;
1017 1013
1018 if (type == IOMMU_DOMAIN_DMA && 1014 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1019 iommu_get_dma_cookie(&smmu_domain->domain)) { 1015 iommu_get_dma_cookie(&smmu_domain->domain))) {
1020 kfree(smmu_domain); 1016 kfree(smmu_domain);
1021 return NULL; 1017 return NULL;
1022 } 1018 }
@@ -1133,19 +1129,22 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
1133 mutex_lock(&smmu->stream_map_mutex); 1129 mutex_lock(&smmu->stream_map_mutex);
1134 /* Figure out a viable stream map entry allocation */ 1130 /* Figure out a viable stream map entry allocation */
1135 for_each_cfg_sme(fwspec, i, idx) { 1131 for_each_cfg_sme(fwspec, i, idx) {
1132 u16 sid = fwspec->ids[i];
1133 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1134
1136 if (idx != INVALID_SMENDX) { 1135 if (idx != INVALID_SMENDX) {
1137 ret = -EEXIST; 1136 ret = -EEXIST;
1138 goto out_err; 1137 goto out_err;
1139 } 1138 }
1140 1139
1141 ret = arm_smmu_find_sme(smmu, fwspec->ids[i], 0); 1140 ret = arm_smmu_find_sme(smmu, sid, mask);
1142 if (ret < 0) 1141 if (ret < 0)
1143 goto out_err; 1142 goto out_err;
1144 1143
1145 idx = ret; 1144 idx = ret;
1146 if (smrs && smmu->s2crs[idx].count == 0) { 1145 if (smrs && smmu->s2crs[idx].count == 0) {
1147 smrs[idx].id = fwspec->ids[i]; 1146 smrs[idx].id = sid;
1148 smrs[idx].mask = 0; /* We don't currently share SMRs */ 1147 smrs[idx].mask = mask;
1149 smrs[idx].valid = true; 1148 smrs[idx].valid = true;
1150 } 1149 }
1151 smmu->s2crs[idx].count++; 1150 smmu->s2crs[idx].count++;
@@ -1203,15 +1202,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1203 u8 cbndx = smmu_domain->cfg.cbndx; 1202 u8 cbndx = smmu_domain->cfg.cbndx;
1204 int i, idx; 1203 int i, idx;
1205 1204
1206 /*
1207 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1208 * for all devices behind the SMMU. Note that we need to take
1209 * care configuring SMRs for devices both a platform_device and
1210 * and a PCI device (i.e. a PCI host controller)
1211 */
1212 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1213 type = S2CR_TYPE_BYPASS;
1214
1215 for_each_cfg_sme(fwspec, i, idx) { 1205 for_each_cfg_sme(fwspec, i, idx) {
1216 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) 1206 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1217 continue; 1207 continue;
@@ -1373,25 +1363,50 @@ static bool arm_smmu_capable(enum iommu_cap cap)
1373 } 1363 }
1374} 1364}
1375 1365
1366static int arm_smmu_match_node(struct device *dev, void *data)
1367{
1368 return dev->of_node == data;
1369}
1370
1371static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
1372{
1373 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1374 np, arm_smmu_match_node);
1375 put_device(dev);
1376 return dev ? dev_get_drvdata(dev) : NULL;
1377}
1378
1376static int arm_smmu_add_device(struct device *dev) 1379static int arm_smmu_add_device(struct device *dev)
1377{ 1380{
1378 struct arm_smmu_device *smmu; 1381 struct arm_smmu_device *smmu;
1379 struct arm_smmu_master_cfg *cfg; 1382 struct arm_smmu_master_cfg *cfg;
1380 struct iommu_fwspec *fwspec; 1383 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1381 int i, ret; 1384 int i, ret;
1382 1385
1383 ret = arm_smmu_register_legacy_master(dev, &smmu); 1386 if (using_legacy_binding) {
1384 fwspec = dev->iommu_fwspec; 1387 ret = arm_smmu_register_legacy_master(dev, &smmu);
1385 if (ret) 1388 fwspec = dev->iommu_fwspec;
1386 goto out_free; 1389 if (ret)
1390 goto out_free;
1391 } else if (fwspec) {
1392 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
1393 } else {
1394 return -ENODEV;
1395 }
1387 1396
1388 ret = -EINVAL; 1397 ret = -EINVAL;
1389 for (i = 0; i < fwspec->num_ids; i++) { 1398 for (i = 0; i < fwspec->num_ids; i++) {
1390 u16 sid = fwspec->ids[i]; 1399 u16 sid = fwspec->ids[i];
1400 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1391 1401
1392 if (sid & ~smmu->streamid_mask) { 1402 if (sid & ~smmu->streamid_mask) {
1393 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", 1403 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1394 sid, cfg->smmu->streamid_mask); 1404 sid, smmu->streamid_mask);
1405 goto out_free;
1406 }
1407 if (mask & ~smmu->smr_mask_mask) {
1408 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1409 sid, smmu->smr_mask_mask);
1395 goto out_free; 1410 goto out_free;
1396 } 1411 }
1397 } 1412 }
@@ -1503,6 +1518,19 @@ out_unlock:
1503 return ret; 1518 return ret;
1504} 1519}
1505 1520
1521static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1522{
1523 u32 fwid = 0;
1524
1525 if (args->args_count > 0)
1526 fwid |= (u16)args->args[0];
1527
1528 if (args->args_count > 1)
1529 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1530
1531 return iommu_fwspec_add_ids(dev, &fwid, 1);
1532}
1533
1506static struct iommu_ops arm_smmu_ops = { 1534static struct iommu_ops arm_smmu_ops = {
1507 .capable = arm_smmu_capable, 1535 .capable = arm_smmu_capable,
1508 .domain_alloc = arm_smmu_domain_alloc, 1536 .domain_alloc = arm_smmu_domain_alloc,
@@ -1517,6 +1545,7 @@ static struct iommu_ops arm_smmu_ops = {
1517 .device_group = arm_smmu_device_group, 1545 .device_group = arm_smmu_device_group,
1518 .domain_get_attr = arm_smmu_domain_get_attr, 1546 .domain_get_attr = arm_smmu_domain_get_attr,
1519 .domain_set_attr = arm_smmu_domain_set_attr, 1547 .domain_set_attr = arm_smmu_domain_set_attr,
1548 .of_xlate = arm_smmu_of_xlate,
1520 .pgsize_bitmap = -1UL, /* Restricted during device attach */ 1549 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1521}; 1550};
1522 1551
@@ -1870,6 +1899,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1870 struct arm_smmu_device *smmu; 1899 struct arm_smmu_device *smmu;
1871 struct device *dev = &pdev->dev; 1900 struct device *dev = &pdev->dev;
1872 int num_irqs, i, err; 1901 int num_irqs, i, err;
1902 bool legacy_binding;
1903
1904 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1905 if (legacy_binding && !using_generic_binding) {
1906 if (!using_legacy_binding)
1907 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1908 using_legacy_binding = true;
1909 } else if (!legacy_binding && !using_legacy_binding) {
1910 using_generic_binding = true;
1911 } else {
1912 dev_err(dev, "not probing due to mismatched DT properties\n");
1913 return -ENODEV;
1914 }
1873 1915
1874 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 1916 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1875 if (!smmu) { 1917 if (!smmu) {
@@ -1954,6 +1996,20 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1954 of_iommu_set_ops(dev->of_node, &arm_smmu_ops); 1996 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
1955 platform_set_drvdata(pdev, smmu); 1997 platform_set_drvdata(pdev, smmu);
1956 arm_smmu_device_reset(smmu); 1998 arm_smmu_device_reset(smmu);
1999
2000 /* Oh, for a proper bus abstraction */
2001 if (!iommu_present(&platform_bus_type))
2002 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2003#ifdef CONFIG_ARM_AMBA
2004 if (!iommu_present(&amba_bustype))
2005 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2006#endif
2007#ifdef CONFIG_PCI
2008 if (!iommu_present(&pci_bus_type)) {
2009 pci_request_acs();
2010 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2011 }
2012#endif
1957 return 0; 2013 return 0;
1958} 2014}
1959 2015
@@ -1983,41 +2039,14 @@ static struct platform_driver arm_smmu_driver = {
1983 2039
1984static int __init arm_smmu_init(void) 2040static int __init arm_smmu_init(void)
1985{ 2041{
1986 struct device_node *np; 2042 static bool registered;
1987 int ret; 2043 int ret = 0;
1988
1989 /*
1990 * Play nice with systems that don't have an ARM SMMU by checking that
1991 * an ARM SMMU exists in the system before proceeding with the driver
1992 * and IOMMU bus operation registration.
1993 */
1994 np = of_find_matching_node(NULL, arm_smmu_of_match);
1995 if (!np)
1996 return 0;
1997
1998 of_node_put(np);
1999
2000 ret = platform_driver_register(&arm_smmu_driver);
2001 if (ret)
2002 return ret;
2003
2004 /* Oh, for a proper bus abstraction */
2005 if (!iommu_present(&platform_bus_type))
2006 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2007
2008#ifdef CONFIG_ARM_AMBA
2009 if (!iommu_present(&amba_bustype))
2010 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2011#endif
2012 2044
2013#ifdef CONFIG_PCI 2045 if (!registered) {
2014 if (!iommu_present(&pci_bus_type)) { 2046 ret = platform_driver_register(&arm_smmu_driver);
2015 pci_request_acs(); 2047 registered = !ret;
2016 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2017 } 2048 }
2018#endif 2049 return ret;
2019
2020 return 0;
2021} 2050}
2022 2051
2023static void __exit arm_smmu_exit(void) 2052static void __exit arm_smmu_exit(void)
@@ -2028,6 +2057,25 @@ static void __exit arm_smmu_exit(void)
2028subsys_initcall(arm_smmu_init); 2057subsys_initcall(arm_smmu_init);
2029module_exit(arm_smmu_exit); 2058module_exit(arm_smmu_exit);
2030 2059
2060static int __init arm_smmu_of_init(struct device_node *np)
2061{
2062 int ret = arm_smmu_init();
2063
2064 if (ret)
2065 return ret;
2066
2067 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2068 return -ENODEV;
2069
2070 return 0;
2071}
2072IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
2073IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
2074IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
2075IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
2076IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
2077IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
2078
2031MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); 2079MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2032MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); 2080MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2033MODULE_LICENSE("GPL v2"); 2081MODULE_LICENSE("GPL v2");