aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c66
1 files changed, 28 insertions, 38 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1abfb5684ab7..e46a88700b68 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -392,7 +392,7 @@ struct arm_smmu_domain {
392 struct arm_smmu_cfg root_cfg; 392 struct arm_smmu_cfg root_cfg;
393 phys_addr_t output_mask; 393 phys_addr_t output_mask;
394 394
395 spinlock_t lock; 395 struct mutex lock;
396}; 396};
397 397
398static DEFINE_SPINLOCK(arm_smmu_devices_lock); 398static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
900 goto out_free_domain; 900 goto out_free_domain;
901 smmu_domain->root_cfg.pgd = pgd; 901 smmu_domain->root_cfg.pgd = pgd;
902 902
903 spin_lock_init(&smmu_domain->lock); 903 mutex_init(&smmu_domain->lock);
904 domain->priv = smmu_domain; 904 domain->priv = smmu_domain;
905 return 0; 905 return 0;
906 906
@@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1137 * Sanity check the domain. We don't currently support domains 1137 * Sanity check the domain. We don't currently support domains
1138 * that cross between different SMMU chains. 1138 * that cross between different SMMU chains.
1139 */ 1139 */
1140 spin_lock(&smmu_domain->lock); 1140 mutex_lock(&smmu_domain->lock);
1141 if (!smmu_domain->leaf_smmu) { 1141 if (!smmu_domain->leaf_smmu) {
1142 /* Now that we have a master, we can finalise the domain */ 1142 /* Now that we have a master, we can finalise the domain */
1143 ret = arm_smmu_init_domain_context(domain, dev); 1143 ret = arm_smmu_init_domain_context(domain, dev);
@@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1152 dev_name(device_smmu->dev)); 1152 dev_name(device_smmu->dev));
1153 goto err_unlock; 1153 goto err_unlock;
1154 } 1154 }
1155 spin_unlock(&smmu_domain->lock); 1155 mutex_unlock(&smmu_domain->lock);
1156 1156
1157 /* Looks ok, so add the device to the domain */ 1157 /* Looks ok, so add the device to the domain */
1158 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1158 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1162 return arm_smmu_domain_add_master(smmu_domain, master); 1162 return arm_smmu_domain_add_master(smmu_domain, master);
1163 1163
1164err_unlock: 1164err_unlock:
1165 spin_unlock(&smmu_domain->lock); 1165 mutex_unlock(&smmu_domain->lock);
1166 return ret; 1166 return ret;
1167} 1167}
1168 1168
@@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1394 if (paddr & ~output_mask) 1394 if (paddr & ~output_mask)
1395 return -ERANGE; 1395 return -ERANGE;
1396 1396
1397 spin_lock(&smmu_domain->lock); 1397 mutex_lock(&smmu_domain->lock);
1398 pgd += pgd_index(iova); 1398 pgd += pgd_index(iova);
1399 end = iova + size; 1399 end = iova + size;
1400 do { 1400 do {
@@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1410 } while (pgd++, iova != end); 1410 } while (pgd++, iova != end);
1411 1411
1412out_unlock: 1412out_unlock:
1413 spin_unlock(&smmu_domain->lock); 1413 mutex_unlock(&smmu_domain->lock);
1414 1414
1415 /* Ensure new page tables are visible to the hardware walker */ 1415 /* Ensure new page tables are visible to the hardware walker */
1416 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 1416 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
@@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1423 phys_addr_t paddr, size_t size, int flags) 1423 phys_addr_t paddr, size_t size, int flags)
1424{ 1424{
1425 struct arm_smmu_domain *smmu_domain = domain->priv; 1425 struct arm_smmu_domain *smmu_domain = domain->priv;
1426 struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
1427 1426
1428 if (!smmu_domain || !smmu) 1427 if (!smmu_domain)
1429 return -ENODEV; 1428 return -ENODEV;
1430 1429
1431 /* Check for silent address truncation up the SMMU chain. */ 1430 /* Check for silent address truncation up the SMMU chain. */
@@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1449static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, 1448static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1450 dma_addr_t iova) 1449 dma_addr_t iova)
1451{ 1450{
1452 pgd_t *pgd; 1451 pgd_t *pgdp, pgd;
1453 pud_t *pud; 1452 pud_t pud;
1454 pmd_t *pmd; 1453 pmd_t pmd;
1455 pte_t *pte; 1454 pte_t pte;
1456 struct arm_smmu_domain *smmu_domain = domain->priv; 1455 struct arm_smmu_domain *smmu_domain = domain->priv;
1457 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1456 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1458 struct arm_smmu_device *smmu = root_cfg->smmu;
1459 1457
1460 spin_lock(&smmu_domain->lock); 1458 pgdp = root_cfg->pgd;
1461 pgd = root_cfg->pgd; 1459 if (!pgdp)
1462 if (!pgd) 1460 return 0;
1463 goto err_unlock;
1464 1461
1465 pgd += pgd_index(iova); 1462 pgd = *(pgdp + pgd_index(iova));
1466 if (pgd_none_or_clear_bad(pgd)) 1463 if (pgd_none(pgd))
1467 goto err_unlock; 1464 return 0;
1468 1465
1469 pud = pud_offset(pgd, iova); 1466 pud = *pud_offset(&pgd, iova);
1470 if (pud_none_or_clear_bad(pud)) 1467 if (pud_none(pud))
1471 goto err_unlock; 1468 return 0;
1472 1469
1473 pmd = pmd_offset(pud, iova); 1470 pmd = *pmd_offset(&pud, iova);
1474 if (pmd_none_or_clear_bad(pmd)) 1471 if (pmd_none(pmd))
1475 goto err_unlock; 1472 return 0;
1476 1473
1477 pte = pmd_page_vaddr(*pmd) + pte_index(iova); 1474 pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
1478 if (pte_none(pte)) 1475 if (pte_none(pte))
1479 goto err_unlock; 1476 return 0;
1480
1481 spin_unlock(&smmu_domain->lock);
1482 return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
1483 1477
1484err_unlock: 1478 return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
1485 spin_unlock(&smmu_domain->lock);
1486 dev_warn(smmu->dev,
1487 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
1488 (unsigned long long)iova);
1489 return -EINVAL;
1490} 1479}
1491 1480
1492static int arm_smmu_domain_has_cap(struct iommu_domain *domain, 1481static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
@@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1863 dev_err(dev, 1852 dev_err(dev,
1864 "found only %d context interrupt(s) but %d required\n", 1853 "found only %d context interrupt(s) but %d required\n",
1865 smmu->num_context_irqs, smmu->num_context_banks); 1854 smmu->num_context_irqs, smmu->num_context_banks);
1855 err = -ENODEV;
1866 goto out_put_parent; 1856 goto out_put_parent;
1867 } 1857 }
1868 1858