aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-02-05 12:49:34 -0500
committerWill Deacon <will.deacon@arm.com>2014-02-10 12:02:17 -0500
commit6dd35f45b8dac827b6f9dd86f5aca6436cdd2410 (patch)
tree325f901f3c1603ced5a26bb5191c8bd59c920a9a /drivers/iommu/arm-smmu.c
parentc9d09e2748eaa55cac2af274574baa6368189bc1 (diff)
iommu/arm-smmu: fix table flushing during initial allocations
Now that we populate page tables as we traverse them ("iommu/arm-smmu: fix pud/pmd entry fill sequence"), we need to ensure that we flush out our zeroed tables after initial allocation, to prevent speculative TLB fills using bogus data. This patch adds additional calls to arm_smmu_flush_pgtable during initial table allocation, and moves the dsb required by coherent table walkers into the helper. Cc: <stable@vger.kernel.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c51
1 files changed, 27 insertions, 24 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6eb54ae97470..509f01f054d9 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -79,7 +79,6 @@
79 79
80#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) 80#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
81#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) 81#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
82#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
83 82
84/* Stage-1 PTE */ 83/* Stage-1 PTE */
85#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) 84#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
@@ -632,6 +631,28 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
632 return IRQ_HANDLED; 631 return IRQ_HANDLED;
633} 632}
634 633
634static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
635 size_t size)
636{
637 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
638
639
640 /* Ensure new page tables are visible to the hardware walker */
641 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
642 dsb();
643 } else {
644 /*
645 * If the SMMU can't walk tables in the CPU caches, treat them
646 * like non-coherent DMA since we need to flush the new entries
647 * all the way out to memory. There's no possibility of
648 * recursion here as the SMMU table walker will not be wired
649 * through another SMMU.
650 */
651 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
652 DMA_TO_DEVICE);
653 }
654}
655
635static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) 656static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
636{ 657{
637 u32 reg; 658 u32 reg;
@@ -715,6 +736,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
715 } 736 }
716 737
717 /* TTBR0 */ 738 /* TTBR0 */
739 arm_smmu_flush_pgtable(smmu, root_cfg->pgd,
740 PTRS_PER_PGD * sizeof(pgd_t));
718 reg = __pa(root_cfg->pgd); 741 reg = __pa(root_cfg->pgd);
719 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 742 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
720 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; 743 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
@@ -1177,23 +1200,6 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1177 arm_smmu_domain_remove_master(smmu_domain, master); 1200 arm_smmu_domain_remove_master(smmu_domain, master);
1178} 1201}
1179 1202
1180static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
1181 size_t size)
1182{
1183 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
1184
1185 /*
1186 * If the SMMU can't walk tables in the CPU caches, treat them
1187 * like non-coherent DMA since we need to flush the new entries
1188 * all the way out to memory. There's no possibility of recursion
1189 * here as the SMMU table walker will not be wired through another
1190 * SMMU.
1191 */
1192 if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
1193 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
1194 DMA_TO_DEVICE);
1195}
1196
1197static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, 1203static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
1198 unsigned long end) 1204 unsigned long end)
1199{ 1205{
@@ -1214,8 +1220,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1214 if (!table) 1220 if (!table)
1215 return -ENOMEM; 1221 return -ENOMEM;
1216 1222
1217 arm_smmu_flush_pgtable(smmu, page_address(table), 1223 arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
1218 ARM_SMMU_PTE_HWTABLE_SIZE);
1219 if (!pgtable_page_ctor(table)) { 1224 if (!pgtable_page_ctor(table)) {
1220 __free_page(table); 1225 __free_page(table);
1221 return -ENOMEM; 1226 return -ENOMEM;
@@ -1321,6 +1326,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1321 if (!pmd) 1326 if (!pmd)
1322 return -ENOMEM; 1327 return -ENOMEM;
1323 1328
1329 arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
1324 pud_populate(NULL, pud, pmd); 1330 pud_populate(NULL, pud, pmd);
1325 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); 1331 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1326 1332
@@ -1353,6 +1359,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1353 if (!pud) 1359 if (!pud)
1354 return -ENOMEM; 1360 return -ENOMEM;
1355 1361
1362 arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
1356 pgd_populate(NULL, pgd, pud); 1363 pgd_populate(NULL, pgd, pud);
1357 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); 1364 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1358 1365
@@ -1421,10 +1428,6 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1421out_unlock: 1428out_unlock:
1422 spin_unlock(&smmu_domain->lock); 1429 spin_unlock(&smmu_domain->lock);
1423 1430
1424 /* Ensure new page tables are visible to the hardware walker */
1425 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1426 dsb();
1427
1428 return ret; 1431 return ret;
1429} 1432}
1430 1433