aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-09-14 10:21:39 -0400
committerWill Deacon <will.deacon@arm.com>2016-09-16 04:34:19 -0400
commitf80cd885fcdd05dff769d62a116313927a03d480 (patch)
tree1fdf9a87fac9fbce98a107cdf64ccfbed0e38920
parent8e8b203eabd8b9e96d02d6339e4abce3e5a7ea4b (diff)
iommu/arm-smmu: Refactor mmu-masters handling
To be able to support the generic bindings and handle of_xlate() calls, we need to be able to associate SMMUs and stream IDs directly with devices *before* allocating IOMMU groups. Furthermore, to support real default domains with multi-device groups we also have to handle domain attach on a per-device basis, as the "whole group at a time" assumption fails to properly handle subsequent devices added to a group after the first has already triggered default domain creation and attachment. To that end, use the now-vacant dev->archdata.iommu field for easy config and SMMU instance lookup, and unify config management by chopping down the platform-device-specific tree and probing the "mmu-masters" property on-demand instead. This may add a bit of one-off overhead to initially adding a new device, but we're about to deprecate that binding in favour of the inherently-more-efficient generic ones anyway. For the sake of simplicity, this patch does temporarily regress the case of aliasing PCI devices by losing the duplicate stream ID detection that the previous per-group config had. Stay tuned, because we'll be back to fix that in a better and more general way momentarily... Tested-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/arm-smmu.c370
1 files changed, 101 insertions, 269 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 69b6cab65421..2023a77015a0 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -317,18 +317,13 @@ struct arm_smmu_smr {
317}; 317};
318 318
319struct arm_smmu_master_cfg { 319struct arm_smmu_master_cfg {
320 struct arm_smmu_device *smmu;
320 int num_streamids; 321 int num_streamids;
321 u16 streamids[MAX_MASTER_STREAMIDS]; 322 u16 streamids[MAX_MASTER_STREAMIDS];
322 s16 smendx[MAX_MASTER_STREAMIDS]; 323 s16 smendx[MAX_MASTER_STREAMIDS];
323}; 324};
324#define INVALID_SMENDX -1 325#define INVALID_SMENDX -1
325 326
326struct arm_smmu_master {
327 struct device_node *of_node;
328 struct rb_node node;
329 struct arm_smmu_master_cfg cfg;
330};
331
332struct arm_smmu_device { 327struct arm_smmu_device {
333 struct device *dev; 328 struct device *dev;
334 329
@@ -376,7 +371,6 @@ struct arm_smmu_device {
376 unsigned int *irqs; 371 unsigned int *irqs;
377 372
378 struct list_head list; 373 struct list_head list;
379 struct rb_root masters;
380 374
381 u32 cavium_id_base; /* Specific to Cavium */ 375 u32 cavium_id_base; /* Specific to Cavium */
382}; 376};
@@ -415,12 +409,6 @@ struct arm_smmu_domain {
415 struct iommu_domain domain; 409 struct iommu_domain domain;
416}; 410};
417 411
418struct arm_smmu_phandle_args {
419 struct device_node *np;
420 int args_count;
421 uint32_t args[MAX_MASTER_STREAMIDS];
422};
423
424static DEFINE_SPINLOCK(arm_smmu_devices_lock); 412static DEFINE_SPINLOCK(arm_smmu_devices_lock);
425static LIST_HEAD(arm_smmu_devices); 413static LIST_HEAD(arm_smmu_devices);
426 414
@@ -462,132 +450,89 @@ static struct device_node *dev_get_dev_node(struct device *dev)
462 450
463 while (!pci_is_root_bus(bus)) 451 while (!pci_is_root_bus(bus))
464 bus = bus->parent; 452 bus = bus->parent;
465 return bus->bridge->parent->of_node; 453 return of_node_get(bus->bridge->parent->of_node);
466 } 454 }
467 455
468 return dev->of_node; 456 return of_node_get(dev->of_node);
469} 457}
470 458
471static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, 459static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
472 struct device_node *dev_node)
473{ 460{
474 struct rb_node *node = smmu->masters.rb_node; 461 *((__be32 *)data) = cpu_to_be32(alias);
475 462 return 0; /* Continue walking */
476 while (node) {
477 struct arm_smmu_master *master;
478
479 master = container_of(node, struct arm_smmu_master, node);
480
481 if (dev_node < master->of_node)
482 node = node->rb_left;
483 else if (dev_node > master->of_node)
484 node = node->rb_right;
485 else
486 return master;
487 }
488
489 return NULL;
490} 463}
491 464
492static struct arm_smmu_master_cfg * 465static int __find_legacy_master_phandle(struct device *dev, void *data)
493find_smmu_master_cfg(struct device *dev)
494{ 466{
495 struct arm_smmu_master_cfg *cfg = NULL; 467 struct of_phandle_iterator *it = *(void **)data;
496 struct iommu_group *group = iommu_group_get(dev); 468 struct device_node *np = it->node;
497 469 int err;
498 if (group) { 470
499 cfg = iommu_group_get_iommudata(group); 471 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
500 iommu_group_put(group); 472 "#stream-id-cells", 0)
501 } 473 if (it->node == np) {
502 474 *(void **)data = dev;
503 return cfg; 475 return 1;
476 }
477 it->node = np;
478 return err == -ENOENT ? 0 : err;
504} 479}
505 480
506static int insert_smmu_master(struct arm_smmu_device *smmu, 481static int arm_smmu_register_legacy_master(struct device *dev)
507 struct arm_smmu_master *master)
508{ 482{
509 struct rb_node **new, *parent; 483 struct arm_smmu_device *smmu;
510 484 struct arm_smmu_master_cfg *cfg;
511 new = &smmu->masters.rb_node; 485 struct device_node *np;
512 parent = NULL; 486 struct of_phandle_iterator it;
513 while (*new) { 487 void *data = &it;
514 struct arm_smmu_master *this 488 __be32 pci_sid;
515 = container_of(*new, struct arm_smmu_master, node); 489 int err;
516
517 parent = *new;
518 if (master->of_node < this->of_node)
519 new = &((*new)->rb_left);
520 else if (master->of_node > this->of_node)
521 new = &((*new)->rb_right);
522 else
523 return -EEXIST;
524 }
525
526 rb_link_node(&master->node, parent, new);
527 rb_insert_color(&master->node, &smmu->masters);
528 return 0;
529}
530 490
531static int register_smmu_master(struct arm_smmu_device *smmu, 491 np = dev_get_dev_node(dev);
532 struct device *dev, 492 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
533 struct arm_smmu_phandle_args *masterspec) 493 of_node_put(np);
534{ 494 return -ENODEV;
535 int i; 495 }
536 struct arm_smmu_master *master;
537 496
538 master = find_smmu_master(smmu, masterspec->np); 497 it.node = np;
539 if (master) { 498 spin_lock(&arm_smmu_devices_lock);
540 dev_err(dev, 499 list_for_each_entry(smmu, &arm_smmu_devices, list) {
541 "rejecting multiple registrations for master device %s\n", 500 err = __find_legacy_master_phandle(smmu->dev, &data);
542 masterspec->np->name); 501 if (err)
543 return -EBUSY; 502 break;
544 } 503 }
504 spin_unlock(&arm_smmu_devices_lock);
505 of_node_put(np);
506 if (err == 0)
507 return -ENODEV;
508 if (err < 0)
509 return err;
545 510
546 if (masterspec->args_count > MAX_MASTER_STREAMIDS) { 511 if (it.cur_count > MAX_MASTER_STREAMIDS) {
547 dev_err(dev, 512 dev_err(smmu->dev,
548 "reached maximum number (%d) of stream IDs for master device %s\n", 513 "reached maximum number (%d) of stream IDs for master device %s\n",
549 MAX_MASTER_STREAMIDS, masterspec->np->name); 514 MAX_MASTER_STREAMIDS, dev_name(dev));
550 return -ENOSPC; 515 return -ENOSPC;
551 } 516 }
517 if (dev_is_pci(dev)) {
518 /* "mmu-masters" assumes Stream ID == Requester ID */
519 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
520 &pci_sid);
521 it.cur = &pci_sid;
522 it.cur_count = 1;
523 }
552 524
553 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); 525 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
554 if (!master) 526 if (!cfg)
555 return -ENOMEM; 527 return -ENOMEM;
556 528
557 master->of_node = masterspec->np; 529 cfg->smmu = smmu;
558 master->cfg.num_streamids = masterspec->args_count; 530 dev->archdata.iommu = cfg;
559
560 for (i = 0; i < master->cfg.num_streamids; ++i) {
561 u16 streamid = masterspec->args[i];
562
563 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
564 (streamid >= smmu->num_mapping_groups)) {
565 dev_err(dev,
566 "stream ID for master device %s greater than maximum allowed (%d)\n",
567 masterspec->np->name, smmu->num_mapping_groups);
568 return -ERANGE;
569 }
570 master->cfg.streamids[i] = streamid;
571 master->cfg.smendx[i] = INVALID_SMENDX;
572 }
573 return insert_smmu_master(smmu, master);
574}
575
576static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
577{
578 struct arm_smmu_device *smmu;
579 struct arm_smmu_master *master = NULL;
580 struct device_node *dev_node = dev_get_dev_node(dev);
581 531
582 spin_lock(&arm_smmu_devices_lock); 532 while (it.cur_count--)
583 list_for_each_entry(smmu, &arm_smmu_devices, list) { 533 cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++);
584 master = find_smmu_master(smmu, dev_node);
585 if (master)
586 break;
587 }
588 spin_unlock(&arm_smmu_devices_lock);
589 534
590 return master ? smmu : NULL; 535 return 0;
591} 536}
592 537
593static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) 538static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
@@ -1119,8 +1064,7 @@ static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx)
1119static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) 1064static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1120{ 1065{
1121 struct arm_smmu_smr *smr = smmu->smrs + idx; 1066 struct arm_smmu_smr *smr = smmu->smrs + idx;
1122 u32 reg = (smr->id & smmu->streamid_mask) << SMR_ID_SHIFT | 1067 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
1123 (smr->mask & smmu->smr_mask_mask) << SMR_MASK_SHIFT;
1124 1068
1125 if (smr->valid) 1069 if (smr->valid)
1126 reg |= SMR_VALID; 1070 reg |= SMR_VALID;
@@ -1189,9 +1133,9 @@ err_free_smrs:
1189 return -ENOSPC; 1133 return -ENOSPC;
1190} 1134}
1191 1135
1192static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu, 1136static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg)
1193 struct arm_smmu_master_cfg *cfg)
1194{ 1137{
1138 struct arm_smmu_device *smmu = cfg->smmu;
1195 int i; 1139 int i;
1196 1140
1197 /* 1141 /*
@@ -1262,17 +1206,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1262{ 1206{
1263 int ret; 1207 int ret;
1264 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1208 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1265 struct arm_smmu_device *smmu; 1209 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
1266 struct arm_smmu_master_cfg *cfg;
1267 1210
1268 smmu = find_smmu_for_device(dev); 1211 if (!cfg) {
1269 if (!smmu) {
1270 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); 1212 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1271 return -ENXIO; 1213 return -ENXIO;
1272 } 1214 }
1273 1215
1274 /* Ensure that the domain is finalised */ 1216 /* Ensure that the domain is finalised */
1275 ret = arm_smmu_init_domain_context(domain, smmu); 1217 ret = arm_smmu_init_domain_context(domain, cfg->smmu);
1276 if (ret < 0) 1218 if (ret < 0)
1277 return ret; 1219 return ret;
1278 1220
@@ -1280,18 +1222,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1280 * Sanity check the domain. We don't support domains across 1222 * Sanity check the domain. We don't support domains across
1281 * different SMMUs. 1223 * different SMMUs.
1282 */ 1224 */
1283 if (smmu_domain->smmu != smmu) { 1225 if (smmu_domain->smmu != cfg->smmu) {
1284 dev_err(dev, 1226 dev_err(dev,
1285 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", 1227 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1286 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); 1228 dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev));
1287 return -EINVAL; 1229 return -EINVAL;
1288 } 1230 }
1289 1231
1290 /* Looks ok, so add the device to the domain */ 1232 /* Looks ok, so add the device to the domain */
1291 cfg = find_smmu_master_cfg(dev);
1292 if (!cfg)
1293 return -ENODEV;
1294
1295 return arm_smmu_domain_add_master(smmu_domain, cfg); 1233 return arm_smmu_domain_add_master(smmu_domain, cfg);
1296} 1234}
1297 1235
@@ -1411,120 +1349,65 @@ static bool arm_smmu_capable(enum iommu_cap cap)
1411 } 1349 }
1412} 1350}
1413 1351
1414static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) 1352static int arm_smmu_add_device(struct device *dev)
1415{
1416 *((u16 *)data) = alias;
1417 return 0; /* Continue walking */
1418}
1419
1420static void __arm_smmu_release_pci_iommudata(void *data)
1421{
1422 kfree(data);
1423}
1424
1425static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1426 struct iommu_group *group)
1427{ 1353{
1428 struct arm_smmu_master_cfg *cfg; 1354 struct arm_smmu_master_cfg *cfg;
1429 u16 sid; 1355 struct iommu_group *group;
1430 int i; 1356 int i, ret;
1431
1432 cfg = iommu_group_get_iommudata(group);
1433 if (!cfg) {
1434 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1435 if (!cfg)
1436 return -ENOMEM;
1437
1438 iommu_group_set_iommudata(group, cfg,
1439 __arm_smmu_release_pci_iommudata);
1440 }
1441 1357
1442 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) 1358 ret = arm_smmu_register_legacy_master(dev);
1443 return -ENOSPC; 1359 cfg = dev->archdata.iommu;
1360 if (ret)
1361 goto out_free;
1444 1362
1445 /* 1363 ret = -EINVAL;
1446 * Assume Stream ID == Requester ID for now. 1364 for (i = 0; i < cfg->num_streamids; i++) {
1447 * We need a way to describe the ID mappings in FDT. 1365 u16 sid = cfg->streamids[i];
1448 */
1449 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1450 for (i = 0; i < cfg->num_streamids; ++i)
1451 if (cfg->streamids[i] == sid)
1452 break;
1453 1366
1454 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ 1367 if (sid & ~cfg->smmu->streamid_mask) {
1455 if (i == cfg->num_streamids) { 1368 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1456 cfg->streamids[i] = sid; 1369 sid, cfg->smmu->streamid_mask);
1370 goto out_free;
1371 }
1457 cfg->smendx[i] = INVALID_SMENDX; 1372 cfg->smendx[i] = INVALID_SMENDX;
1458 cfg->num_streamids++;
1459 } 1373 }
1460 1374
1461 return 0;
1462}
1463
1464static int arm_smmu_init_platform_device(struct device *dev,
1465 struct iommu_group *group)
1466{
1467 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1468 struct arm_smmu_master *master;
1469
1470 if (!smmu)
1471 return -ENODEV;
1472
1473 master = find_smmu_master(smmu, dev->of_node);
1474 if (!master)
1475 return -ENODEV;
1476
1477 iommu_group_set_iommudata(group, &master->cfg, NULL);
1478
1479 return 0;
1480}
1481
1482static int arm_smmu_add_device(struct device *dev)
1483{
1484 struct iommu_group *group;
1485
1486 group = iommu_group_get_for_dev(dev); 1375 group = iommu_group_get_for_dev(dev);
1487 if (IS_ERR(group)) 1376 if (IS_ERR(group)) {
1488 return PTR_ERR(group); 1377 ret = PTR_ERR(group);
1489 1378 goto out_free;
1379 }
1490 iommu_group_put(group); 1380 iommu_group_put(group);
1491 return 0; 1381 return 0;
1382
1383out_free:
1384 kfree(cfg);
1385 dev->archdata.iommu = NULL;
1386 return ret;
1492} 1387}
1493 1388
1494static void arm_smmu_remove_device(struct device *dev) 1389static void arm_smmu_remove_device(struct device *dev)
1495{ 1390{
1496 struct arm_smmu_device *smmu = find_smmu_for_device(dev); 1391 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
1497 struct arm_smmu_master_cfg *cfg = find_smmu_master_cfg(dev);
1498 1392
1499 if (smmu && cfg) 1393 if (!cfg)
1500 arm_smmu_master_free_smes(smmu, cfg); 1394 return;
1501 1395
1396 arm_smmu_master_free_smes(cfg);
1502 iommu_group_remove_device(dev); 1397 iommu_group_remove_device(dev);
1398 kfree(cfg);
1399 dev->archdata.iommu = NULL;
1503} 1400}
1504 1401
1505static struct iommu_group *arm_smmu_device_group(struct device *dev) 1402static struct iommu_group *arm_smmu_device_group(struct device *dev)
1506{ 1403{
1507 struct iommu_group *group; 1404 struct iommu_group *group;
1508 int ret;
1509 1405
1510 if (dev_is_pci(dev)) 1406 if (dev_is_pci(dev))
1511 group = pci_device_group(dev); 1407 group = pci_device_group(dev);
1512 else 1408 else
1513 group = generic_device_group(dev); 1409 group = generic_device_group(dev);
1514 1410
1515 if (IS_ERR(group))
1516 return group;
1517
1518 if (dev_is_pci(dev))
1519 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1520 else
1521 ret = arm_smmu_init_platform_device(dev, group);
1522
1523 if (ret) {
1524 iommu_group_put(group);
1525 group = ERR_PTR(ret);
1526 }
1527
1528 return group; 1411 return group;
1529} 1412}
1530 1413
@@ -1938,9 +1821,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1938 struct resource *res; 1821 struct resource *res;
1939 struct arm_smmu_device *smmu; 1822 struct arm_smmu_device *smmu;
1940 struct device *dev = &pdev->dev; 1823 struct device *dev = &pdev->dev;
1941 struct rb_node *node;
1942 struct of_phandle_iterator it;
1943 struct arm_smmu_phandle_args *masterspec;
1944 int num_irqs, i, err; 1824 int num_irqs, i, err;
1945 1825
1946 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 1826 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
@@ -2001,37 +1881,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2001 if (err) 1881 if (err)
2002 return err; 1882 return err;
2003 1883
2004 i = 0;
2005 smmu->masters = RB_ROOT;
2006
2007 err = -ENOMEM;
2008 /* No need to zero the memory for masterspec */
2009 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2010 if (!masterspec)
2011 goto out_put_masters;
2012
2013 of_for_each_phandle(&it, err, dev->of_node,
2014 "mmu-masters", "#stream-id-cells", 0) {
2015 int count = of_phandle_iterator_args(&it, masterspec->args,
2016 MAX_MASTER_STREAMIDS);
2017 masterspec->np = of_node_get(it.node);
2018 masterspec->args_count = count;
2019
2020 err = register_smmu_master(smmu, dev, masterspec);
2021 if (err) {
2022 dev_err(dev, "failed to add master %s\n",
2023 masterspec->np->name);
2024 kfree(masterspec);
2025 goto out_put_masters;
2026 }
2027
2028 i++;
2029 }
2030
2031 dev_notice(dev, "registered %d master devices\n", i);
2032
2033 kfree(masterspec);
2034
2035 parse_driver_options(smmu); 1884 parse_driver_options(smmu);
2036 1885
2037 if (smmu->version == ARM_SMMU_V2 && 1886 if (smmu->version == ARM_SMMU_V2 &&
@@ -2039,8 +1888,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2039 dev_err(dev, 1888 dev_err(dev,
2040 "found only %d context interrupt(s) but %d required\n", 1889 "found only %d context interrupt(s) but %d required\n",
2041 smmu->num_context_irqs, smmu->num_context_banks); 1890 smmu->num_context_irqs, smmu->num_context_banks);
2042 err = -ENODEV; 1891 return -ENODEV;
2043 goto out_put_masters;
2044 } 1892 }
2045 1893
2046 for (i = 0; i < smmu->num_global_irqs; ++i) { 1894 for (i = 0; i < smmu->num_global_irqs; ++i) {
@@ -2052,7 +1900,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2052 if (err) { 1900 if (err) {
2053 dev_err(dev, "failed to request global IRQ %d (%u)\n", 1901 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2054 i, smmu->irqs[i]); 1902 i, smmu->irqs[i]);
2055 goto out_put_masters; 1903 return err;
2056 } 1904 }
2057 } 1905 }
2058 1906
@@ -2063,22 +1911,12 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2063 1911
2064 arm_smmu_device_reset(smmu); 1912 arm_smmu_device_reset(smmu);
2065 return 0; 1913 return 0;
2066
2067out_put_masters:
2068 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2069 struct arm_smmu_master *master
2070 = container_of(node, struct arm_smmu_master, node);
2071 of_node_put(master->of_node);
2072 }
2073
2074 return err;
2075} 1914}
2076 1915
2077static int arm_smmu_device_remove(struct platform_device *pdev) 1916static int arm_smmu_device_remove(struct platform_device *pdev)
2078{ 1917{
2079 struct device *dev = &pdev->dev; 1918 struct device *dev = &pdev->dev;
2080 struct arm_smmu_device *curr, *smmu = NULL; 1919 struct arm_smmu_device *curr, *smmu = NULL;
2081 struct rb_node *node;
2082 1920
2083 spin_lock(&arm_smmu_devices_lock); 1921 spin_lock(&arm_smmu_devices_lock);
2084 list_for_each_entry(curr, &arm_smmu_devices, list) { 1922 list_for_each_entry(curr, &arm_smmu_devices, list) {
@@ -2093,12 +1931,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
2093 if (!smmu) 1931 if (!smmu)
2094 return -ENODEV; 1932 return -ENODEV;
2095 1933
2096 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2097 struct arm_smmu_master *master
2098 = container_of(node, struct arm_smmu_master, node);
2099 of_node_put(master->of_node);
2100 }
2101
2102 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) 1934 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2103 dev_err(dev, "removing device with active domains!\n"); 1935 dev_err(dev, "removing device with active domains!\n");
2104 1936