aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-11-07 13:47:50 -0500
committerWill Deacon <will.deacon@arm.com>2013-12-06 11:44:13 -0500
commita44a9791e778d9ccda50d5534028ed4057a9a45b (patch)
tree5d90175829600719209f730c5fdf3f4bc8e43363 /drivers/iommu/arm-smmu.c
parentdc1ccc48159d63eca5089e507c82c7d22ef60839 (diff)
iommu/arm-smmu: use mutex instead of spinlock for locking page tables
When creating IO mappings, we lazily allocate our page tables using the standard, non-atomic allocator functions. This presents us with a problem, since our page tables are protected with a spinlock. This patch reworks the smmu_domain lock to use a mutex instead of a spinlock. iova_to_phys is then reworked so that it only reads the page tables, and can run in a lockless fashion, leaving the mutex to guard against concurrent mapping threads. Cc: <stable@vger.kernel.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c62
1 files changed, 26 insertions, 36 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1abfb5684ab7..6dbcaa4433cd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -392,7 +392,7 @@ struct arm_smmu_domain {
392 struct arm_smmu_cfg root_cfg; 392 struct arm_smmu_cfg root_cfg;
393 phys_addr_t output_mask; 393 phys_addr_t output_mask;
394 394
395 spinlock_t lock; 395 struct mutex lock;
396}; 396};
397 397
398static DEFINE_SPINLOCK(arm_smmu_devices_lock); 398static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
900 goto out_free_domain; 900 goto out_free_domain;
901 smmu_domain->root_cfg.pgd = pgd; 901 smmu_domain->root_cfg.pgd = pgd;
902 902
903 spin_lock_init(&smmu_domain->lock); 903 mutex_init(&smmu_domain->lock);
904 domain->priv = smmu_domain; 904 domain->priv = smmu_domain;
905 return 0; 905 return 0;
906 906
@@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1137 * Sanity check the domain. We don't currently support domains 1137 * Sanity check the domain. We don't currently support domains
1138 * that cross between different SMMU chains. 1138 * that cross between different SMMU chains.
1139 */ 1139 */
1140 spin_lock(&smmu_domain->lock); 1140 mutex_lock(&smmu_domain->lock);
1141 if (!smmu_domain->leaf_smmu) { 1141 if (!smmu_domain->leaf_smmu) {
1142 /* Now that we have a master, we can finalise the domain */ 1142 /* Now that we have a master, we can finalise the domain */
1143 ret = arm_smmu_init_domain_context(domain, dev); 1143 ret = arm_smmu_init_domain_context(domain, dev);
@@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1152 dev_name(device_smmu->dev)); 1152 dev_name(device_smmu->dev));
1153 goto err_unlock; 1153 goto err_unlock;
1154 } 1154 }
1155 spin_unlock(&smmu_domain->lock); 1155 mutex_unlock(&smmu_domain->lock);
1156 1156
1157 /* Looks ok, so add the device to the domain */ 1157 /* Looks ok, so add the device to the domain */
1158 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1158 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1162 return arm_smmu_domain_add_master(smmu_domain, master); 1162 return arm_smmu_domain_add_master(smmu_domain, master);
1163 1163
1164err_unlock: 1164err_unlock:
1165 spin_unlock(&smmu_domain->lock); 1165 mutex_unlock(&smmu_domain->lock);
1166 return ret; 1166 return ret;
1167} 1167}
1168 1168
@@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1394 if (paddr & ~output_mask) 1394 if (paddr & ~output_mask)
1395 return -ERANGE; 1395 return -ERANGE;
1396 1396
1397 spin_lock(&smmu_domain->lock); 1397 mutex_lock(&smmu_domain->lock);
1398 pgd += pgd_index(iova); 1398 pgd += pgd_index(iova);
1399 end = iova + size; 1399 end = iova + size;
1400 do { 1400 do {
@@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1410 } while (pgd++, iova != end); 1410 } while (pgd++, iova != end);
1411 1411
1412out_unlock: 1412out_unlock:
1413 spin_unlock(&smmu_domain->lock); 1413 mutex_unlock(&smmu_domain->lock);
1414 1414
1415 /* Ensure new page tables are visible to the hardware walker */ 1415 /* Ensure new page tables are visible to the hardware walker */
1416 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 1416 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
@@ -1449,44 +1449,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1449static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, 1449static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1450 dma_addr_t iova) 1450 dma_addr_t iova)
1451{ 1451{
1452 pgd_t *pgd; 1452 pgd_t *pgdp, pgd;
1453 pud_t *pud; 1453 pud_t pud;
1454 pmd_t *pmd; 1454 pmd_t pmd;
1455 pte_t *pte; 1455 pte_t pte;
1456 struct arm_smmu_domain *smmu_domain = domain->priv; 1456 struct arm_smmu_domain *smmu_domain = domain->priv;
1457 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1457 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1458 struct arm_smmu_device *smmu = root_cfg->smmu;
1459 1458
1460 spin_lock(&smmu_domain->lock); 1459 pgdp = root_cfg->pgd;
1461 pgd = root_cfg->pgd; 1460 if (!pgdp)
1462 if (!pgd) 1461 return 0;
1463 goto err_unlock;
1464 1462
1465 pgd += pgd_index(iova); 1463 pgd = *(pgdp + pgd_index(iova));
1466 if (pgd_none_or_clear_bad(pgd)) 1464 if (pgd_none(pgd))
1467 goto err_unlock; 1465 return 0;
1468 1466
1469 pud = pud_offset(pgd, iova); 1467 pud = *pud_offset(&pgd, iova);
1470 if (pud_none_or_clear_bad(pud)) 1468 if (pud_none(pud))
1471 goto err_unlock; 1469 return 0;
1472 1470
1473 pmd = pmd_offset(pud, iova); 1471 pmd = *pmd_offset(&pud, iova);
1474 if (pmd_none_or_clear_bad(pmd)) 1472 if (pmd_none(pmd))
1475 goto err_unlock; 1473 return 0;
1476 1474
1477 pte = pmd_page_vaddr(*pmd) + pte_index(iova); 1475 pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
1478 if (pte_none(pte)) 1476 if (pte_none(pte))
1479 goto err_unlock; 1477 return 0;
1480
1481 spin_unlock(&smmu_domain->lock);
1482 return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
1483 1478
1484err_unlock: 1479 return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
1485 spin_unlock(&smmu_domain->lock);
1486 dev_warn(smmu->dev,
1487 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
1488 (unsigned long long)iova);
1489 return -EINVAL;
1490} 1480}
1491 1481
1492static int arm_smmu_domain_has_cap(struct iommu_domain *domain, 1482static int arm_smmu_domain_has_cap(struct iommu_domain *domain,