diff options
author | Joerg Roedel <joro@8bytes.org> | 2014-03-04 08:54:27 -0500 |
---|---|---|
committer | Joerg Roedel <joro@8bytes.org> | 2014-03-04 08:54:27 -0500 |
commit | dc03753b98e21d73699c38b208ad39d9839519ee (patch) | |
tree | 0b7bbfba26ea0c75098c0276897653f6a87fb6fa | |
parent | 0414855fdc4a40da05221fc6062cccbc0c30f169 (diff) | |
parent | 34fb4b37b7b6da7dc34797d1abf234dd30b091d8 (diff) |
Merge branch 'for-joerg/arm-smmu/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu
-rw-r--r-- | Documentation/devicetree/bindings/iommu/arm,smmu.txt | 6 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 105 |
2 files changed, 77 insertions, 34 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt index e34c6cdd8ba8..f284b99402bc 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt | |||
@@ -48,6 +48,12 @@ conditions. | |||
48 | from the mmu-masters towards memory) node for this | 48 | from the mmu-masters towards memory) node for this |
49 | SMMU. | 49 | SMMU. |
50 | 50 | ||
51 | - calxeda,smmu-secure-config-access : Enable proper handling of buggy | ||
52 | implementations that always use secure access to | ||
53 | SMMU configuration registers. In this case non-secure | ||
54 | aliases of secure registers have to be used during | ||
55 | SMMU configuration. | ||
56 | |||
51 | Example: | 57 | Example: |
52 | 58 | ||
53 | smmu { | 59 | smmu { |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1d9ab39af29f..8b89e33a89fe 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #include <asm/pgalloc.h> | 48 | #include <asm/pgalloc.h> |
49 | 49 | ||
50 | /* Maximum number of stream IDs assigned to a single device */ | 50 | /* Maximum number of stream IDs assigned to a single device */ |
51 | #define MAX_MASTER_STREAMIDS 8 | 51 | #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS |
52 | 52 | ||
53 | /* Maximum number of context banks per SMMU */ | 53 | /* Maximum number of context banks per SMMU */ |
54 | #define ARM_SMMU_MAX_CBS 128 | 54 | #define ARM_SMMU_MAX_CBS 128 |
@@ -60,6 +60,16 @@ | |||
60 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | 60 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) |
61 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) | 61 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) |
62 | 62 | ||
63 | /* | ||
64 | * SMMU global address space with conditional offset to access secure | ||
65 | * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, | ||
66 | * nsGFSYNR0: 0x450) | ||
67 | */ | ||
68 | #define ARM_SMMU_GR0_NS(smmu) \ | ||
69 | ((smmu)->base + \ | ||
70 | ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ | ||
71 | ? 0x400 : 0)) | ||
72 | |||
63 | /* Page table bits */ | 73 | /* Page table bits */ |
64 | #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) | 74 | #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) |
65 | #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) | 75 | #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) |
@@ -351,6 +361,9 @@ struct arm_smmu_device { | |||
351 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | 361 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) |
352 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | 362 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) |
353 | u32 features; | 363 | u32 features; |
364 | |||
365 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | ||
366 | u32 options; | ||
354 | int version; | 367 | int version; |
355 | 368 | ||
356 | u32 num_context_banks; | 369 | u32 num_context_banks; |
@@ -401,6 +414,29 @@ struct arm_smmu_domain { | |||
401 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); | 414 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
402 | static LIST_HEAD(arm_smmu_devices); | 415 | static LIST_HEAD(arm_smmu_devices); |
403 | 416 | ||
417 | struct arm_smmu_option_prop { | ||
418 | u32 opt; | ||
419 | const char *prop; | ||
420 | }; | ||
421 | |||
422 | static struct arm_smmu_option_prop arm_smmu_options [] = { | ||
423 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, | ||
424 | { 0, NULL}, | ||
425 | }; | ||
426 | |||
427 | static void parse_driver_options(struct arm_smmu_device *smmu) | ||
428 | { | ||
429 | int i = 0; | ||
430 | do { | ||
431 | if (of_property_read_bool(smmu->dev->of_node, | ||
432 | arm_smmu_options[i].prop)) { | ||
433 | smmu->options |= arm_smmu_options[i].opt; | ||
434 | dev_notice(smmu->dev, "option %s\n", | ||
435 | arm_smmu_options[i].prop); | ||
436 | } | ||
437 | } while (arm_smmu_options[++i].opt); | ||
438 | } | ||
439 | |||
404 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | 440 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, |
405 | struct device_node *dev_node) | 441 | struct device_node *dev_node) |
406 | { | 442 | { |
@@ -614,16 +650,16 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | |||
614 | { | 650 | { |
615 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; | 651 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; |
616 | struct arm_smmu_device *smmu = dev; | 652 | struct arm_smmu_device *smmu = dev; |
617 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 653 | void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); |
618 | 654 | ||
619 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | 655 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); |
620 | if (!gfsr) | ||
621 | return IRQ_NONE; | ||
622 | |||
623 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); | 656 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); |
624 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); | 657 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); |
625 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); | 658 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); |
626 | 659 | ||
660 | if (!gfsr) | ||
661 | return IRQ_NONE; | ||
662 | |||
627 | dev_err_ratelimited(smmu->dev, | 663 | dev_err_ratelimited(smmu->dev, |
628 | "Unexpected global fault, this could be serious\n"); | 664 | "Unexpected global fault, this could be serious\n"); |
629 | dev_err_ratelimited(smmu->dev, | 665 | dev_err_ratelimited(smmu->dev, |
@@ -642,7 +678,7 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, | |||
642 | 678 | ||
643 | /* Ensure new page tables are visible to the hardware walker */ | 679 | /* Ensure new page tables are visible to the hardware walker */ |
644 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { | 680 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { |
645 | dsb(); | 681 | dsb(ishst); |
646 | } else { | 682 | } else { |
647 | /* | 683 | /* |
648 | * If the SMMU can't walk tables in the CPU caches, treat them | 684 | * If the SMMU can't walk tables in the CPU caches, treat them |
@@ -990,9 +1026,8 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) | |||
990 | 1026 | ||
991 | /* | 1027 | /* |
992 | * Recursively free the page tables for this domain. We don't | 1028 | * Recursively free the page tables for this domain. We don't |
993 | * care about speculative TLB filling, because the TLB will be | 1029 | * care about speculative TLB filling because the tables should |
994 | * nuked next time this context bank is re-allocated and no devices | 1030 | * not be active in any context bank at this point (SCTLR.M is 0). |
995 | * currently map to these tables. | ||
996 | */ | 1031 | */ |
997 | pgd = pgd_base; | 1032 | pgd = pgd_base; |
998 | for (i = 0; i < PTRS_PER_PGD; ++i) { | 1033 | for (i = 0; i < PTRS_PER_PGD; ++i) { |
@@ -1218,7 +1253,7 @@ static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, | |||
1218 | 1253 | ||
1219 | static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | 1254 | static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, |
1220 | unsigned long addr, unsigned long end, | 1255 | unsigned long addr, unsigned long end, |
1221 | unsigned long pfn, int flags, int stage) | 1256 | unsigned long pfn, int prot, int stage) |
1222 | { | 1257 | { |
1223 | pte_t *pte, *start; | 1258 | pte_t *pte, *start; |
1224 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; | 1259 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; |
@@ -1240,28 +1275,28 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1240 | 1275 | ||
1241 | if (stage == 1) { | 1276 | if (stage == 1) { |
1242 | pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; | 1277 | pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; |
1243 | if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) | 1278 | if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) |
1244 | pteval |= ARM_SMMU_PTE_AP_RDONLY; | 1279 | pteval |= ARM_SMMU_PTE_AP_RDONLY; |
1245 | 1280 | ||
1246 | if (flags & IOMMU_CACHE) | 1281 | if (prot & IOMMU_CACHE) |
1247 | pteval |= (MAIR_ATTR_IDX_CACHE << | 1282 | pteval |= (MAIR_ATTR_IDX_CACHE << |
1248 | ARM_SMMU_PTE_ATTRINDX_SHIFT); | 1283 | ARM_SMMU_PTE_ATTRINDX_SHIFT); |
1249 | } else { | 1284 | } else { |
1250 | pteval |= ARM_SMMU_PTE_HAP_FAULT; | 1285 | pteval |= ARM_SMMU_PTE_HAP_FAULT; |
1251 | if (flags & IOMMU_READ) | 1286 | if (prot & IOMMU_READ) |
1252 | pteval |= ARM_SMMU_PTE_HAP_READ; | 1287 | pteval |= ARM_SMMU_PTE_HAP_READ; |
1253 | if (flags & IOMMU_WRITE) | 1288 | if (prot & IOMMU_WRITE) |
1254 | pteval |= ARM_SMMU_PTE_HAP_WRITE; | 1289 | pteval |= ARM_SMMU_PTE_HAP_WRITE; |
1255 | if (flags & IOMMU_CACHE) | 1290 | if (prot & IOMMU_CACHE) |
1256 | pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; | 1291 | pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; |
1257 | else | 1292 | else |
1258 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; | 1293 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; |
1259 | } | 1294 | } |
1260 | 1295 | ||
1261 | /* If no access, create a faulting entry to avoid TLB fills */ | 1296 | /* If no access, create a faulting entry to avoid TLB fills */ |
1262 | if (flags & IOMMU_EXEC) | 1297 | if (prot & IOMMU_EXEC) |
1263 | pteval &= ~ARM_SMMU_PTE_XN; | 1298 | pteval &= ~ARM_SMMU_PTE_XN; |
1264 | else if (!(flags & (IOMMU_READ | IOMMU_WRITE))) | 1299 | else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) |
1265 | pteval &= ~ARM_SMMU_PTE_PAGE; | 1300 | pteval &= ~ARM_SMMU_PTE_PAGE; |
1266 | 1301 | ||
1267 | pteval |= ARM_SMMU_PTE_SH_IS; | 1302 | pteval |= ARM_SMMU_PTE_SH_IS; |
@@ -1323,7 +1358,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1323 | 1358 | ||
1324 | static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | 1359 | static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, |
1325 | unsigned long addr, unsigned long end, | 1360 | unsigned long addr, unsigned long end, |
1326 | phys_addr_t phys, int flags, int stage) | 1361 | phys_addr_t phys, int prot, int stage) |
1327 | { | 1362 | { |
1328 | int ret; | 1363 | int ret; |
1329 | pmd_t *pmd; | 1364 | pmd_t *pmd; |
@@ -1347,7 +1382,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | |||
1347 | do { | 1382 | do { |
1348 | next = pmd_addr_end(addr, end); | 1383 | next = pmd_addr_end(addr, end); |
1349 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, | 1384 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, |
1350 | flags, stage); | 1385 | prot, stage); |
1351 | phys += next - addr; | 1386 | phys += next - addr; |
1352 | } while (pmd++, addr = next, addr < end); | 1387 | } while (pmd++, addr = next, addr < end); |
1353 | 1388 | ||
@@ -1356,7 +1391,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | |||
1356 | 1391 | ||
1357 | static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | 1392 | static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, |
1358 | unsigned long addr, unsigned long end, | 1393 | unsigned long addr, unsigned long end, |
1359 | phys_addr_t phys, int flags, int stage) | 1394 | phys_addr_t phys, int prot, int stage) |
1360 | { | 1395 | { |
1361 | int ret = 0; | 1396 | int ret = 0; |
1362 | pud_t *pud; | 1397 | pud_t *pud; |
@@ -1380,7 +1415,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | |||
1380 | do { | 1415 | do { |
1381 | next = pud_addr_end(addr, end); | 1416 | next = pud_addr_end(addr, end); |
1382 | ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, | 1417 | ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, |
1383 | flags, stage); | 1418 | prot, stage); |
1384 | phys += next - addr; | 1419 | phys += next - addr; |
1385 | } while (pud++, addr = next, addr < end); | 1420 | } while (pud++, addr = next, addr < end); |
1386 | 1421 | ||
@@ -1389,7 +1424,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | |||
1389 | 1424 | ||
1390 | static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | 1425 | static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, |
1391 | unsigned long iova, phys_addr_t paddr, | 1426 | unsigned long iova, phys_addr_t paddr, |
1392 | size_t size, int flags) | 1427 | size_t size, int prot) |
1393 | { | 1428 | { |
1394 | int ret, stage; | 1429 | int ret, stage; |
1395 | unsigned long end; | 1430 | unsigned long end; |
@@ -1397,7 +1432,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
1397 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 1432 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; |
1398 | pgd_t *pgd = root_cfg->pgd; | 1433 | pgd_t *pgd = root_cfg->pgd; |
1399 | struct arm_smmu_device *smmu = root_cfg->smmu; | 1434 | struct arm_smmu_device *smmu = root_cfg->smmu; |
1400 | unsigned long irqflags; | 1435 | unsigned long flags; |
1401 | 1436 | ||
1402 | if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { | 1437 | if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { |
1403 | stage = 2; | 1438 | stage = 2; |
@@ -1420,14 +1455,14 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
1420 | if (paddr & ~output_mask) | 1455 | if (paddr & ~output_mask) |
1421 | return -ERANGE; | 1456 | return -ERANGE; |
1422 | 1457 | ||
1423 | spin_lock_irqsave(&smmu_domain->lock, irqflags); | 1458 | spin_lock_irqsave(&smmu_domain->lock, flags); |
1424 | pgd += pgd_index(iova); | 1459 | pgd += pgd_index(iova); |
1425 | end = iova + size; | 1460 | end = iova + size; |
1426 | do { | 1461 | do { |
1427 | unsigned long next = pgd_addr_end(iova, end); | 1462 | unsigned long next = pgd_addr_end(iova, end); |
1428 | 1463 | ||
1429 | ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, | 1464 | ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, |
1430 | flags, stage); | 1465 | prot, stage); |
1431 | if (ret) | 1466 | if (ret) |
1432 | goto out_unlock; | 1467 | goto out_unlock; |
1433 | 1468 | ||
@@ -1436,13 +1471,13 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
1436 | } while (pgd++, iova != end); | 1471 | } while (pgd++, iova != end); |
1437 | 1472 | ||
1438 | out_unlock: | 1473 | out_unlock: |
1439 | spin_unlock_irqrestore(&smmu_domain->lock, irqflags); | 1474 | spin_unlock_irqrestore(&smmu_domain->lock, flags); |
1440 | 1475 | ||
1441 | return ret; | 1476 | return ret; |
1442 | } | 1477 | } |
1443 | 1478 | ||
1444 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | 1479 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
1445 | phys_addr_t paddr, size_t size, int flags) | 1480 | phys_addr_t paddr, size_t size, int prot) |
1446 | { | 1481 | { |
1447 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1482 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1448 | 1483 | ||
@@ -1453,7 +1488,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | |||
1453 | if ((phys_addr_t)iova & ~smmu_domain->output_mask) | 1488 | if ((phys_addr_t)iova & ~smmu_domain->output_mask) |
1454 | return -ERANGE; | 1489 | return -ERANGE; |
1455 | 1490 | ||
1456 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags); | 1491 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); |
1457 | } | 1492 | } |
1458 | 1493 | ||
1459 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | 1494 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
@@ -1597,9 +1632,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1597 | int i = 0; | 1632 | int i = 0; |
1598 | u32 reg; | 1633 | u32 reg; |
1599 | 1634 | ||
1600 | /* Clear Global FSR */ | 1635 | /* clear global FSR */ |
1601 | reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | 1636 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); |
1602 | writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR); | 1637 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); |
1603 | 1638 | ||
1604 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | 1639 | /* Mark all SMRn as invalid and all S2CRn as bypass */ |
1605 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | 1640 | for (i = 0; i < smmu->num_mapping_groups; ++i) { |
@@ -1619,7 +1654,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1619 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); | 1654 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); |
1620 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | 1655 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); |
1621 | 1656 | ||
1622 | reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); | 1657 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
1623 | 1658 | ||
1624 | /* Enable fault reporting */ | 1659 | /* Enable fault reporting */ |
1625 | reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); | 1660 | reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); |
@@ -1638,7 +1673,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1638 | 1673 | ||
1639 | /* Push the button */ | 1674 | /* Push the button */ |
1640 | arm_smmu_tlb_sync(smmu); | 1675 | arm_smmu_tlb_sync(smmu); |
1641 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0); | 1676 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
1642 | } | 1677 | } |
1643 | 1678 | ||
1644 | static int arm_smmu_id_size_to_bits(int size) | 1679 | static int arm_smmu_id_size_to_bits(int size) |
@@ -1885,6 +1920,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1885 | if (err) | 1920 | if (err) |
1886 | goto out_put_parent; | 1921 | goto out_put_parent; |
1887 | 1922 | ||
1923 | parse_driver_options(smmu); | ||
1924 | |||
1888 | if (smmu->version > 1 && | 1925 | if (smmu->version > 1 && |
1889 | smmu->num_context_banks != smmu->num_context_irqs) { | 1926 | smmu->num_context_banks != smmu->num_context_irqs) { |
1890 | dev_err(dev, | 1927 | dev_err(dev, |
@@ -1969,7 +2006,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) | |||
1969 | free_irq(smmu->irqs[i], smmu); | 2006 | free_irq(smmu->irqs[i], smmu); |
1970 | 2007 | ||
1971 | /* Turn the thing off */ | 2008 | /* Turn the thing off */ |
1972 | writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); | 2009 | writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
1973 | return 0; | 2010 | return 0; |
1974 | } | 2011 | } |
1975 | 2012 | ||