aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-02-04 17:12:42 -0500
committerWill Deacon <will.deacon@arm.com>2014-02-10 12:00:49 -0500
commitc9d09e2748eaa55cac2af274574baa6368189bc1 (patch)
treeff3b8dc58a149f1949083a0a89cf497611bb0905
parent97a644208d1a08b7104d1fe2ace8cef011222711 (diff)
iommu/arm-smmu: really fix page table locking
Commit a44a9791e778 ("iommu/arm-smmu: use mutex instead of spinlock for locking page tables") replaced the page table spinlock with a mutex, to allow blocking allocations to satisfy lazy mapping requests. Unfortunately, it turns out that IOMMU mappings are created from atomic context (e.g. spinlock held during a dma_map), so this change doesn't really help us in practice. This patch is a partial revert of the offending commit, bringing back the original spinlock but replacing our page table allocations for any levels below the pgd (which is allocated during domain init) with GFP_ATOMIC instead of GFP_KERNEL. Cc: <stable@vger.kernel.org> Reported-by: Andreas Herrmann <andreas.herrmann@calxeda.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/arm-smmu.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 9f210de6537e..6eb54ae97470 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -393,7 +393,7 @@ struct arm_smmu_domain {
393 struct arm_smmu_cfg root_cfg; 393 struct arm_smmu_cfg root_cfg;
394 phys_addr_t output_mask; 394 phys_addr_t output_mask;
395 395
396 struct mutex lock; 396 spinlock_t lock;
397}; 397};
398 398
399static DEFINE_SPINLOCK(arm_smmu_devices_lock); 399static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -901,7 +901,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
901 goto out_free_domain; 901 goto out_free_domain;
902 smmu_domain->root_cfg.pgd = pgd; 902 smmu_domain->root_cfg.pgd = pgd;
903 903
904 mutex_init(&smmu_domain->lock); 904 spin_lock_init(&smmu_domain->lock);
905 domain->priv = smmu_domain; 905 domain->priv = smmu_domain;
906 return 0; 906 return 0;
907 907
@@ -1138,7 +1138,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1138 * Sanity check the domain. We don't currently support domains 1138 * Sanity check the domain. We don't currently support domains
1139 * that cross between different SMMU chains. 1139 * that cross between different SMMU chains.
1140 */ 1140 */
1141 mutex_lock(&smmu_domain->lock); 1141 spin_lock(&smmu_domain->lock);
1142 if (!smmu_domain->leaf_smmu) { 1142 if (!smmu_domain->leaf_smmu) {
1143 /* Now that we have a master, we can finalise the domain */ 1143 /* Now that we have a master, we can finalise the domain */
1144 ret = arm_smmu_init_domain_context(domain, dev); 1144 ret = arm_smmu_init_domain_context(domain, dev);
@@ -1153,7 +1153,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1153 dev_name(device_smmu->dev)); 1153 dev_name(device_smmu->dev));
1154 goto err_unlock; 1154 goto err_unlock;
1155 } 1155 }
1156 mutex_unlock(&smmu_domain->lock); 1156 spin_unlock(&smmu_domain->lock);
1157 1157
1158 /* Looks ok, so add the device to the domain */ 1158 /* Looks ok, so add the device to the domain */
1159 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1159 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1163,7 +1163,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1163 return arm_smmu_domain_add_master(smmu_domain, master); 1163 return arm_smmu_domain_add_master(smmu_domain, master);
1164 1164
1165err_unlock: 1165err_unlock:
1166 mutex_unlock(&smmu_domain->lock); 1166 spin_unlock(&smmu_domain->lock);
1167 return ret; 1167 return ret;
1168} 1168}
1169 1169
@@ -1210,7 +1210,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1210 1210
1211 if (pmd_none(*pmd)) { 1211 if (pmd_none(*pmd)) {
1212 /* Allocate a new set of tables */ 1212 /* Allocate a new set of tables */
1213 pgtable_t table = alloc_page(PGALLOC_GFP); 1213 pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
1214 if (!table) 1214 if (!table)
1215 return -ENOMEM; 1215 return -ENOMEM;
1216 1216
@@ -1317,7 +1317,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1317 1317
1318#ifndef __PAGETABLE_PMD_FOLDED 1318#ifndef __PAGETABLE_PMD_FOLDED
1319 if (pud_none(*pud)) { 1319 if (pud_none(*pud)) {
1320 pmd = pmd_alloc_one(NULL, addr); 1320 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
1321 if (!pmd) 1321 if (!pmd)
1322 return -ENOMEM; 1322 return -ENOMEM;
1323 1323
@@ -1349,7 +1349,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1349 1349
1350#ifndef __PAGETABLE_PUD_FOLDED 1350#ifndef __PAGETABLE_PUD_FOLDED
1351 if (pgd_none(*pgd)) { 1351 if (pgd_none(*pgd)) {
1352 pud = pud_alloc_one(NULL, addr); 1352 pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
1353 if (!pud) 1353 if (!pud)
1354 return -ENOMEM; 1354 return -ENOMEM;
1355 1355
@@ -1403,7 +1403,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1403 if (paddr & ~output_mask) 1403 if (paddr & ~output_mask)
1404 return -ERANGE; 1404 return -ERANGE;
1405 1405
1406 mutex_lock(&smmu_domain->lock); 1406 spin_lock(&smmu_domain->lock);
1407 pgd += pgd_index(iova); 1407 pgd += pgd_index(iova);
1408 end = iova + size; 1408 end = iova + size;
1409 do { 1409 do {
@@ -1419,7 +1419,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1419 } while (pgd++, iova != end); 1419 } while (pgd++, iova != end);
1420 1420
1421out_unlock: 1421out_unlock:
1422 mutex_unlock(&smmu_domain->lock); 1422 spin_unlock(&smmu_domain->lock);
1423 1423
1424 /* Ensure new page tables are visible to the hardware walker */ 1424 /* Ensure new page tables are visible to the hardware walker */
1425 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 1425 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)