aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joro@8bytes.org>2014-02-18 06:26:36 -0500
committerJoerg Roedel <joro@8bytes.org>2014-02-18 06:26:36 -0500
commit15eeb2e925c091b999195d0102ac39a271dbcb7e (patch)
tree99b68df42b2e053244820c0ca9260d2fc0fcf4b3
parent6d0abeca3242a88cab8232e4acd7e2bf088f3bc2 (diff)
parentd123cf82d339c5cc4ffe2a481e0caa23a501d4ac (diff)
Merge branch 'for-joerg/arm-smmu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into iommu/fixes
-rw-r--r--drivers/iommu/arm-smmu.c103
1 files changed, 61 insertions, 42 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8911850c9444..6fe7922ecc1d 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -79,7 +79,6 @@
79 79
80#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) 80#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
81#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) 81#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
82#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
83 82
84/* Stage-1 PTE */ 83/* Stage-1 PTE */
85#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) 84#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
@@ -191,6 +190,9 @@
191#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) 190#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
192#define CBAR_VMID_SHIFT 0 191#define CBAR_VMID_SHIFT 0
193#define CBAR_VMID_MASK 0xff 192#define CBAR_VMID_MASK 0xff
193#define CBAR_S1_BPSHCFG_SHIFT 8
194#define CBAR_S1_BPSHCFG_MASK 3
195#define CBAR_S1_BPSHCFG_NSH 3
194#define CBAR_S1_MEMATTR_SHIFT 12 196#define CBAR_S1_MEMATTR_SHIFT 12
195#define CBAR_S1_MEMATTR_MASK 0xf 197#define CBAR_S1_MEMATTR_MASK 0xf
196#define CBAR_S1_MEMATTR_WB 0xf 198#define CBAR_S1_MEMATTR_WB 0xf
@@ -393,7 +395,7 @@ struct arm_smmu_domain {
393 struct arm_smmu_cfg root_cfg; 395 struct arm_smmu_cfg root_cfg;
394 phys_addr_t output_mask; 396 phys_addr_t output_mask;
395 397
396 struct mutex lock; 398 spinlock_t lock;
397}; 399};
398 400
399static DEFINE_SPINLOCK(arm_smmu_devices_lock); 401static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -632,6 +634,28 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
632 return IRQ_HANDLED; 634 return IRQ_HANDLED;
633} 635}
634 636
637static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
638 size_t size)
639{
640 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
641
642
643 /* Ensure new page tables are visible to the hardware walker */
644 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
645 dsb();
646 } else {
647 /*
648 * If the SMMU can't walk tables in the CPU caches, treat them
649 * like non-coherent DMA since we need to flush the new entries
650 * all the way out to memory. There's no possibility of
651 * recursion here as the SMMU table walker will not be wired
652 * through another SMMU.
653 */
654 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
655 DMA_TO_DEVICE);
656 }
657}
658
635static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) 659static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
636{ 660{
637 u32 reg; 661 u32 reg;
@@ -650,11 +674,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
650 if (smmu->version == 1) 674 if (smmu->version == 1)
651 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; 675 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
652 676
653 /* Use the weakest memory type, so it is overridden by the pte */ 677 /*
654 if (stage1) 678 * Use the weakest shareability/memory types, so they are
655 reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 679 * overridden by the ttbcr/pte.
656 else 680 */
681 if (stage1) {
682 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
683 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
684 } else {
657 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT; 685 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
686 }
658 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); 687 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
659 688
660 if (smmu->version > 1) { 689 if (smmu->version > 1) {
@@ -715,6 +744,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
715 } 744 }
716 745
717 /* TTBR0 */ 746 /* TTBR0 */
747 arm_smmu_flush_pgtable(smmu, root_cfg->pgd,
748 PTRS_PER_PGD * sizeof(pgd_t));
718 reg = __pa(root_cfg->pgd); 749 reg = __pa(root_cfg->pgd);
719 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 750 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
720 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; 751 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
@@ -901,7 +932,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
901 goto out_free_domain; 932 goto out_free_domain;
902 smmu_domain->root_cfg.pgd = pgd; 933 smmu_domain->root_cfg.pgd = pgd;
903 934
904 mutex_init(&smmu_domain->lock); 935 spin_lock_init(&smmu_domain->lock);
905 domain->priv = smmu_domain; 936 domain->priv = smmu_domain;
906 return 0; 937 return 0;
907 938
@@ -1138,7 +1169,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1138 * Sanity check the domain. We don't currently support domains 1169 * Sanity check the domain. We don't currently support domains
1139 * that cross between different SMMU chains. 1170 * that cross between different SMMU chains.
1140 */ 1171 */
1141 mutex_lock(&smmu_domain->lock); 1172 spin_lock(&smmu_domain->lock);
1142 if (!smmu_domain->leaf_smmu) { 1173 if (!smmu_domain->leaf_smmu) {
1143 /* Now that we have a master, we can finalise the domain */ 1174 /* Now that we have a master, we can finalise the domain */
1144 ret = arm_smmu_init_domain_context(domain, dev); 1175 ret = arm_smmu_init_domain_context(domain, dev);
@@ -1153,7 +1184,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1153 dev_name(device_smmu->dev)); 1184 dev_name(device_smmu->dev));
1154 goto err_unlock; 1185 goto err_unlock;
1155 } 1186 }
1156 mutex_unlock(&smmu_domain->lock); 1187 spin_unlock(&smmu_domain->lock);
1157 1188
1158 /* Looks ok, so add the device to the domain */ 1189 /* Looks ok, so add the device to the domain */
1159 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1190 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1163,7 +1194,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1163 return arm_smmu_domain_add_master(smmu_domain, master); 1194 return arm_smmu_domain_add_master(smmu_domain, master);
1164 1195
1165err_unlock: 1196err_unlock:
1166 mutex_unlock(&smmu_domain->lock); 1197 spin_unlock(&smmu_domain->lock);
1167 return ret; 1198 return ret;
1168} 1199}
1169 1200
@@ -1177,23 +1208,6 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1177 arm_smmu_domain_remove_master(smmu_domain, master); 1208 arm_smmu_domain_remove_master(smmu_domain, master);
1178} 1209}
1179 1210
1180static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
1181 size_t size)
1182{
1183 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
1184
1185 /*
1186 * If the SMMU can't walk tables in the CPU caches, treat them
1187 * like non-coherent DMA since we need to flush the new entries
1188 * all the way out to memory. There's no possibility of recursion
1189 * here as the SMMU table walker will not be wired through another
1190 * SMMU.
1191 */
1192 if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
1193 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
1194 DMA_TO_DEVICE);
1195}
1196
1197static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, 1211static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
1198 unsigned long end) 1212 unsigned long end)
1199{ 1213{
@@ -1210,12 +1224,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1210 1224
1211 if (pmd_none(*pmd)) { 1225 if (pmd_none(*pmd)) {
1212 /* Allocate a new set of tables */ 1226 /* Allocate a new set of tables */
1213 pgtable_t table = alloc_page(PGALLOC_GFP); 1227 pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
1214 if (!table) 1228 if (!table)
1215 return -ENOMEM; 1229 return -ENOMEM;
1216 1230
1217 arm_smmu_flush_pgtable(smmu, page_address(table), 1231 arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
1218 ARM_SMMU_PTE_HWTABLE_SIZE);
1219 if (!pgtable_page_ctor(table)) { 1232 if (!pgtable_page_ctor(table)) {
1220 __free_page(table); 1233 __free_page(table);
1221 return -ENOMEM; 1234 return -ENOMEM;
@@ -1317,9 +1330,15 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1317 1330
1318#ifndef __PAGETABLE_PMD_FOLDED 1331#ifndef __PAGETABLE_PMD_FOLDED
1319 if (pud_none(*pud)) { 1332 if (pud_none(*pud)) {
1320 pmd = pmd_alloc_one(NULL, addr); 1333 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
1321 if (!pmd) 1334 if (!pmd)
1322 return -ENOMEM; 1335 return -ENOMEM;
1336
1337 arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
1338 pud_populate(NULL, pud, pmd);
1339 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1340
1341 pmd += pmd_index(addr);
1323 } else 1342 } else
1324#endif 1343#endif
1325 pmd = pmd_offset(pud, addr); 1344 pmd = pmd_offset(pud, addr);
@@ -1328,8 +1347,6 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1328 next = pmd_addr_end(addr, end); 1347 next = pmd_addr_end(addr, end);
1329 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, 1348 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
1330 flags, stage); 1349 flags, stage);
1331 pud_populate(NULL, pud, pmd);
1332 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1333 phys += next - addr; 1350 phys += next - addr;
1334 } while (pmd++, addr = next, addr < end); 1351 } while (pmd++, addr = next, addr < end);
1335 1352
@@ -1346,9 +1363,15 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1346 1363
1347#ifndef __PAGETABLE_PUD_FOLDED 1364#ifndef __PAGETABLE_PUD_FOLDED
1348 if (pgd_none(*pgd)) { 1365 if (pgd_none(*pgd)) {
1349 pud = pud_alloc_one(NULL, addr); 1366 pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
1350 if (!pud) 1367 if (!pud)
1351 return -ENOMEM; 1368 return -ENOMEM;
1369
1370 arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
1371 pgd_populate(NULL, pgd, pud);
1372 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1373
1374 pud += pud_index(addr);
1352 } else 1375 } else
1353#endif 1376#endif
1354 pud = pud_offset(pgd, addr); 1377 pud = pud_offset(pgd, addr);
@@ -1357,8 +1380,6 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1357 next = pud_addr_end(addr, end); 1380 next = pud_addr_end(addr, end);
1358 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, 1381 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
1359 flags, stage); 1382 flags, stage);
1360 pgd_populate(NULL, pud, pgd);
1361 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1362 phys += next - addr; 1383 phys += next - addr;
1363 } while (pud++, addr = next, addr < end); 1384 } while (pud++, addr = next, addr < end);
1364 1385
@@ -1397,7 +1418,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1397 if (paddr & ~output_mask) 1418 if (paddr & ~output_mask)
1398 return -ERANGE; 1419 return -ERANGE;
1399 1420
1400 mutex_lock(&smmu_domain->lock); 1421 spin_lock(&smmu_domain->lock);
1401 pgd += pgd_index(iova); 1422 pgd += pgd_index(iova);
1402 end = iova + size; 1423 end = iova + size;
1403 do { 1424 do {
@@ -1413,11 +1434,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1413 } while (pgd++, iova != end); 1434 } while (pgd++, iova != end);
1414 1435
1415out_unlock: 1436out_unlock:
1416 mutex_unlock(&smmu_domain->lock); 1437 spin_unlock(&smmu_domain->lock);
1417
1418 /* Ensure new page tables are visible to the hardware walker */
1419 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1420 dsb();
1421 1438
1422 return ret; 1439 return ret;
1423} 1440}
@@ -1987,8 +2004,10 @@ static int __init arm_smmu_init(void)
1987 if (!iommu_present(&platform_bus_type)) 2004 if (!iommu_present(&platform_bus_type))
1988 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 2005 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1989 2006
2007#ifdef CONFIG_ARM_AMBA
1990 if (!iommu_present(&amba_bustype)) 2008 if (!iommu_present(&amba_bustype))
1991 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 2009 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2010#endif
1992 2011
1993 return 0; 2012 return 0;
1994} 2013}