aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/arm-smmu.c108
1 files changed, 98 insertions, 10 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 602b67d4f2d6..1917d214c4d9 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -270,6 +270,20 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
270 { 0, NULL}, 270 { 0, NULL},
271}; 271};
272 272
273static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
274{
275 if (pm_runtime_enabled(smmu->dev))
276 return pm_runtime_get_sync(smmu->dev);
277
278 return 0;
279}
280
281static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
282{
283 if (pm_runtime_enabled(smmu->dev))
284 pm_runtime_put(smmu->dev);
285}
286
273static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) 287static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
274{ 288{
275 return container_of(dom, struct arm_smmu_domain, domain); 289 return container_of(dom, struct arm_smmu_domain, domain);
@@ -929,11 +943,15 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
929 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 943 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
930 struct arm_smmu_device *smmu = smmu_domain->smmu; 944 struct arm_smmu_device *smmu = smmu_domain->smmu;
931 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 945 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
932 int irq; 946 int ret, irq;
933 947
934 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) 948 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
935 return; 949 return;
936 950
951 ret = arm_smmu_rpm_get(smmu);
952 if (ret < 0)
953 return;
954
937 /* 955 /*
938 * Disable the context bank and free the page tables before freeing 956 * Disable the context bank and free the page tables before freeing
939 * it. 957 * it.
@@ -948,6 +966,8 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
948 966
949 free_io_pgtable_ops(smmu_domain->pgtbl_ops); 967 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
950 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); 968 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
969
970 arm_smmu_rpm_put(smmu);
951} 971}
952 972
953static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) 973static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
@@ -1229,10 +1249,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1229 return -ENODEV; 1249 return -ENODEV;
1230 1250
1231 smmu = fwspec_smmu(fwspec); 1251 smmu = fwspec_smmu(fwspec);
1252
1253 ret = arm_smmu_rpm_get(smmu);
1254 if (ret < 0)
1255 return ret;
1256
1232 /* Ensure that the domain is finalised */ 1257 /* Ensure that the domain is finalised */
1233 ret = arm_smmu_init_domain_context(domain, smmu); 1258 ret = arm_smmu_init_domain_context(domain, smmu);
1234 if (ret < 0) 1259 if (ret < 0)
1235 return ret; 1260 goto rpm_put;
1236 1261
1237 /* 1262 /*
1238 * Sanity check the domain. We don't support domains across 1263 * Sanity check the domain. We don't support domains across
@@ -1242,49 +1267,74 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1242 dev_err(dev, 1267 dev_err(dev,
1243 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", 1268 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1244 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); 1269 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1245 return -EINVAL; 1270 ret = -EINVAL;
1271 goto rpm_put;
1246 } 1272 }
1247 1273
1248 /* Looks ok, so add the device to the domain */ 1274 /* Looks ok, so add the device to the domain */
1249 return arm_smmu_domain_add_master(smmu_domain, fwspec); 1275 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1276
1277rpm_put:
1278 arm_smmu_rpm_put(smmu);
1279 return ret;
1250} 1280}
1251 1281
1252static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, 1282static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1253 phys_addr_t paddr, size_t size, int prot) 1283 phys_addr_t paddr, size_t size, int prot)
1254{ 1284{
1255 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; 1285 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1286 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1287 int ret;
1256 1288
1257 if (!ops) 1289 if (!ops)
1258 return -ENODEV; 1290 return -ENODEV;
1259 1291
1260 return ops->map(ops, iova, paddr, size, prot); 1292 arm_smmu_rpm_get(smmu);
1293 ret = ops->map(ops, iova, paddr, size, prot);
1294 arm_smmu_rpm_put(smmu);
1295
1296 return ret;
1261} 1297}
1262 1298
1263static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 1299static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1264 size_t size) 1300 size_t size)
1265{ 1301{
1266 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; 1302 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1303 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1304 size_t ret;
1267 1305
1268 if (!ops) 1306 if (!ops)
1269 return 0; 1307 return 0;
1270 1308
1271 return ops->unmap(ops, iova, size); 1309 arm_smmu_rpm_get(smmu);
1310 ret = ops->unmap(ops, iova, size);
1311 arm_smmu_rpm_put(smmu);
1312
1313 return ret;
1272} 1314}
1273 1315
1274static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) 1316static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1275{ 1317{
1276 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1318 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1319 struct arm_smmu_device *smmu = smmu_domain->smmu;
1277 1320
1278 if (smmu_domain->tlb_ops) 1321 if (smmu_domain->tlb_ops) {
1322 arm_smmu_rpm_get(smmu);
1279 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain); 1323 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
1324 arm_smmu_rpm_put(smmu);
1325 }
1280} 1326}
1281 1327
1282static void arm_smmu_iotlb_sync(struct iommu_domain *domain) 1328static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1283{ 1329{
1284 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1330 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1331 struct arm_smmu_device *smmu = smmu_domain->smmu;
1285 1332
1286 if (smmu_domain->tlb_ops) 1333 if (smmu_domain->tlb_ops) {
1334 arm_smmu_rpm_get(smmu);
1287 smmu_domain->tlb_ops->tlb_sync(smmu_domain); 1335 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
1336 arm_smmu_rpm_put(smmu);
1337 }
1288} 1338}
1289 1339
1290static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, 1340static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -1299,6 +1349,11 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1299 u32 tmp; 1349 u32 tmp;
1300 u64 phys; 1350 u64 phys;
1301 unsigned long va, flags; 1351 unsigned long va, flags;
1352 int ret;
1353
1354 ret = arm_smmu_rpm_get(smmu);
1355 if (ret < 0)
1356 return 0;
1302 1357
1303 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); 1358 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
1304 1359
@@ -1327,6 +1382,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1327 return 0; 1382 return 0;
1328 } 1383 }
1329 1384
1385 arm_smmu_rpm_put(smmu);
1386
1330 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); 1387 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1331} 1388}
1332 1389
@@ -1431,7 +1488,13 @@ static int arm_smmu_add_device(struct device *dev)
1431 while (i--) 1488 while (i--)
1432 cfg->smendx[i] = INVALID_SMENDX; 1489 cfg->smendx[i] = INVALID_SMENDX;
1433 1490
1491 ret = arm_smmu_rpm_get(smmu);
1492 if (ret < 0)
1493 goto out_cfg_free;
1494
1434 ret = arm_smmu_master_alloc_smes(dev); 1495 ret = arm_smmu_master_alloc_smes(dev);
1496 arm_smmu_rpm_put(smmu);
1497
1435 if (ret) 1498 if (ret)
1436 goto out_cfg_free; 1499 goto out_cfg_free;
1437 1500
@@ -1451,7 +1514,7 @@ static void arm_smmu_remove_device(struct device *dev)
1451 struct iommu_fwspec *fwspec = dev->iommu_fwspec; 1514 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1452 struct arm_smmu_master_cfg *cfg; 1515 struct arm_smmu_master_cfg *cfg;
1453 struct arm_smmu_device *smmu; 1516 struct arm_smmu_device *smmu;
1454 1517 int ret;
1455 1518
1456 if (!fwspec || fwspec->ops != &arm_smmu_ops) 1519 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1457 return; 1520 return;
@@ -1459,8 +1522,15 @@ static void arm_smmu_remove_device(struct device *dev)
1459 cfg = fwspec->iommu_priv; 1522 cfg = fwspec->iommu_priv;
1460 smmu = cfg->smmu; 1523 smmu = cfg->smmu;
1461 1524
1525 ret = arm_smmu_rpm_get(smmu);
1526 if (ret < 0)
1527 return;
1528
1462 iommu_device_unlink(&smmu->iommu, dev); 1529 iommu_device_unlink(&smmu->iommu, dev);
1463 arm_smmu_master_free_smes(fwspec); 1530 arm_smmu_master_free_smes(fwspec);
1531
1532 arm_smmu_rpm_put(smmu);
1533
1464 iommu_group_remove_device(dev); 1534 iommu_group_remove_device(dev);
1465 kfree(fwspec->iommu_priv); 1535 kfree(fwspec->iommu_priv);
1466 iommu_fwspec_free(dev); 1536 iommu_fwspec_free(dev);
@@ -2214,6 +2284,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2214 arm_smmu_test_smr_masks(smmu); 2284 arm_smmu_test_smr_masks(smmu);
2215 2285
2216 /* 2286 /*
2287 * We want to avoid touching dev->power.lock in fastpaths unless
2288 * it's really going to do something useful - pm_runtime_enabled()
2289 * can serve as an ideal proxy for that decision. So, conditionally
2290 * enable pm_runtime.
2291 */
2292 if (dev->pm_domain) {
2293 pm_runtime_set_active(dev);
2294 pm_runtime_enable(dev);
2295 }
2296
2297 /*
2217 * For ACPI and generic DT bindings, an SMMU will be probed before 2298 * For ACPI and generic DT bindings, an SMMU will be probed before
2218 * any device which might need it, so we want the bus ops in place 2299 * any device which might need it, so we want the bus ops in place
2219 * ready to handle default domain setup as soon as any SMMU exists. 2300 * ready to handle default domain setup as soon as any SMMU exists.
@@ -2248,10 +2329,17 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
2248 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) 2329 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2249 dev_err(&pdev->dev, "removing device with active domains!\n"); 2330 dev_err(&pdev->dev, "removing device with active domains!\n");
2250 2331
2332 arm_smmu_rpm_get(smmu);
2251 /* Turn the thing off */ 2333 /* Turn the thing off */
2252 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); 2334 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2335 arm_smmu_rpm_put(smmu);
2336
2337 if (pm_runtime_enabled(smmu->dev))
2338 pm_runtime_force_suspend(smmu->dev);
2339 else
2340 clk_bulk_disable(smmu->num_clks, smmu->clks);
2253 2341
2254 clk_bulk_disable_unprepare(smmu->num_clks, smmu->clks); 2342 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2255 2343
2256 return 0; 2344 return 0;
2257} 2345}