aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-06-25 06:29:12 -0400
committerWill Deacon <will.deacon@arm.com>2014-07-03 10:50:22 -0400
commit44680eedf9409daf0fed618ae101f35d1f83d1a4 (patch)
tree5951a5e3af529803ead15a7885a12f0008fabb70 /drivers/iommu/arm-smmu.c
parentd0948945638635487111d0851218080e81de5104 (diff)
iommu/arm-smmu: remove support for chained SMMUs
The ARM SMMU driver has supported chained SMMUs (i.e. SMMUs connected back-to-back in series) via the smmu-parent property in device tree. This was in anticipation of somebody building such a configuration, however that seems not to be the case. This patch removes the unused chained SMMU hack from the driver. We can consider adding it back later if somebody decided they need it, but for the time being it's just pointless mess that we're carrying in mainline. Removal of the feature also makes migration to the generic IOMMU bindings easier. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c263
1 files changed, 77 insertions, 186 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 3ae50be49269..2961b8c474eb 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -333,28 +333,17 @@ struct arm_smmu_smr {
333struct arm_smmu_master_cfg { 333struct arm_smmu_master_cfg {
334 int num_streamids; 334 int num_streamids;
335 u16 streamids[MAX_MASTER_STREAMIDS]; 335 u16 streamids[MAX_MASTER_STREAMIDS];
336
337 /*
338 * We only need to allocate these on the root SMMU, as we
339 * configure unmatched streams to bypass translation.
340 */
341 struct arm_smmu_smr *smrs; 336 struct arm_smmu_smr *smrs;
342}; 337};
343 338
344struct arm_smmu_master { 339struct arm_smmu_master {
345 struct device_node *of_node; 340 struct device_node *of_node;
346
347 /*
348 * The following is specific to the master's position in the
349 * SMMU chain.
350 */
351 struct rb_node node; 341 struct rb_node node;
352 struct arm_smmu_master_cfg cfg; 342 struct arm_smmu_master_cfg cfg;
353}; 343};
354 344
355struct arm_smmu_device { 345struct arm_smmu_device {
356 struct device *dev; 346 struct device *dev;
357 struct device_node *parent_of_node;
358 347
359 void __iomem *base; 348 void __iomem *base;
360 unsigned long size; 349 unsigned long size;
@@ -392,7 +381,6 @@ struct arm_smmu_device {
392}; 381};
393 382
394struct arm_smmu_cfg { 383struct arm_smmu_cfg {
395 struct arm_smmu_device *smmu;
396 u8 cbndx; 384 u8 cbndx;
397 u8 irptndx; 385 u8 irptndx;
398 u32 cbar; 386 u32 cbar;
@@ -404,15 +392,8 @@ struct arm_smmu_cfg {
404#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) 392#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
405 393
406struct arm_smmu_domain { 394struct arm_smmu_domain {
407 /* 395 struct arm_smmu_device *smmu;
408 * A domain can span across multiple, chained SMMUs and requires 396 struct arm_smmu_cfg cfg;
409 * all devices within the domain to follow the same translation
410 * path.
411 */
412 struct arm_smmu_device *leaf_smmu;
413 struct arm_smmu_cfg root_cfg;
414 phys_addr_t output_mask;
415
416 spinlock_t lock; 397 spinlock_t lock;
417}; 398};
418 399
@@ -546,59 +527,20 @@ static int register_smmu_master(struct arm_smmu_device *smmu,
546 return insert_smmu_master(smmu, master); 527 return insert_smmu_master(smmu, master);
547} 528}
548 529
549static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu) 530static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
550{ 531{
551 struct arm_smmu_device *parent; 532 struct arm_smmu_device *smmu;
552
553 if (!smmu->parent_of_node)
554 return NULL;
555
556 spin_lock(&arm_smmu_devices_lock);
557 list_for_each_entry(parent, &arm_smmu_devices, list)
558 if (parent->dev->of_node == smmu->parent_of_node)
559 goto out_unlock;
560
561 parent = NULL;
562 dev_warn(smmu->dev,
563 "Failed to find SMMU parent despite parent in DT\n");
564out_unlock:
565 spin_unlock(&arm_smmu_devices_lock);
566 return parent;
567}
568
569static struct arm_smmu_device *find_parent_smmu_for_device(struct device *dev)
570{
571 struct arm_smmu_device *child, *parent, *smmu;
572 struct arm_smmu_master *master = NULL; 533 struct arm_smmu_master *master = NULL;
573 struct device_node *dev_node = dev_get_master_dev(dev)->of_node; 534 struct device_node *dev_node = dev_get_master_dev(dev)->of_node;
574 535
575 spin_lock(&arm_smmu_devices_lock); 536 spin_lock(&arm_smmu_devices_lock);
576 list_for_each_entry(parent, &arm_smmu_devices, list) { 537 list_for_each_entry(smmu, &arm_smmu_devices, list) {
577 smmu = parent;
578
579 /* Try to find a child of the current SMMU. */
580 list_for_each_entry(child, &arm_smmu_devices, list) {
581 if (child->parent_of_node == parent->dev->of_node) {
582 /* Does the child sit above our master? */
583 master = find_smmu_master(child, dev_node);
584 if (master) {
585 smmu = NULL;
586 break;
587 }
588 }
589 }
590
591 /* We found some children, so keep searching. */
592 if (!smmu) {
593 master = NULL;
594 continue;
595 }
596
597 master = find_smmu_master(smmu, dev_node); 538 master = find_smmu_master(smmu, dev_node);
598 if (master) 539 if (master)
599 break; 540 break;
600 } 541 }
601 spin_unlock(&arm_smmu_devices_lock); 542 spin_unlock(&arm_smmu_devices_lock);
543
602 return master ? smmu : NULL; 544 return master ? smmu : NULL;
603} 545}
604 546
@@ -639,9 +581,10 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
639 } 581 }
640} 582}
641 583
642static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg) 584static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
643{ 585{
644 struct arm_smmu_device *smmu = cfg->smmu; 586 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
587 struct arm_smmu_device *smmu = smmu_domain->smmu;
645 void __iomem *base = ARM_SMMU_GR0(smmu); 588 void __iomem *base = ARM_SMMU_GR0(smmu);
646 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; 589 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
647 590
@@ -665,11 +608,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
665 unsigned long iova; 608 unsigned long iova;
666 struct iommu_domain *domain = dev; 609 struct iommu_domain *domain = dev;
667 struct arm_smmu_domain *smmu_domain = domain->priv; 610 struct arm_smmu_domain *smmu_domain = domain->priv;
668 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 611 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
669 struct arm_smmu_device *smmu = root_cfg->smmu; 612 struct arm_smmu_device *smmu = smmu_domain->smmu;
670 void __iomem *cb_base; 613 void __iomem *cb_base;
671 614
672 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); 615 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
673 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); 616 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
674 617
675 if (!(fsr & FSR_FAULT)) 618 if (!(fsr & FSR_FAULT))
@@ -696,7 +639,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
696 } else { 639 } else {
697 dev_err_ratelimited(smmu->dev, 640 dev_err_ratelimited(smmu->dev,
698 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", 641 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
699 iova, fsynr, root_cfg->cbndx); 642 iova, fsynr, cfg->cbndx);
700 ret = IRQ_NONE; 643 ret = IRQ_NONE;
701 resume = RESUME_TERMINATE; 644 resume = RESUME_TERMINATE;
702 } 645 }
@@ -761,19 +704,19 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
761{ 704{
762 u32 reg; 705 u32 reg;
763 bool stage1; 706 bool stage1;
764 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 707 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
765 struct arm_smmu_device *smmu = root_cfg->smmu; 708 struct arm_smmu_device *smmu = smmu_domain->smmu;
766 void __iomem *cb_base, *gr0_base, *gr1_base; 709 void __iomem *cb_base, *gr0_base, *gr1_base;
767 710
768 gr0_base = ARM_SMMU_GR0(smmu); 711 gr0_base = ARM_SMMU_GR0(smmu);
769 gr1_base = ARM_SMMU_GR1(smmu); 712 gr1_base = ARM_SMMU_GR1(smmu);
770 stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS; 713 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
771 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); 714 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
772 715
773 /* CBAR */ 716 /* CBAR */
774 reg = root_cfg->cbar; 717 reg = cfg->cbar;
775 if (smmu->version == 1) 718 if (smmu->version == 1)
776 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; 719 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
777 720
778 /* 721 /*
779 * Use the weakest shareability/memory types, so they are 722 * Use the weakest shareability/memory types, so they are
@@ -783,9 +726,9 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
783 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | 726 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
784 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 727 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
785 } else { 728 } else {
786 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT; 729 reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
787 } 730 }
788 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); 731 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
789 732
790 if (smmu->version > 1) { 733 if (smmu->version > 1) {
791 /* CBA2R */ 734 /* CBA2R */
@@ -795,7 +738,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
795 reg = CBA2R_RW64_32BIT; 738 reg = CBA2R_RW64_32BIT;
796#endif 739#endif
797 writel_relaxed(reg, 740 writel_relaxed(reg,
798 gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx)); 741 gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
799 742
800 /* TTBCR2 */ 743 /* TTBCR2 */
801 switch (smmu->input_size) { 744 switch (smmu->input_size) {
@@ -845,13 +788,13 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
845 } 788 }
846 789
847 /* TTBR0 */ 790 /* TTBR0 */
848 arm_smmu_flush_pgtable(smmu, root_cfg->pgd, 791 arm_smmu_flush_pgtable(smmu, cfg->pgd,
849 PTRS_PER_PGD * sizeof(pgd_t)); 792 PTRS_PER_PGD * sizeof(pgd_t));
850 reg = __pa(root_cfg->pgd); 793 reg = __pa(cfg->pgd);
851 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 794 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
852 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; 795 reg = (phys_addr_t)__pa(cfg->pgd) >> 32;
853 if (stage1) 796 if (stage1)
854 reg |= ARM_SMMU_CB_ASID(root_cfg) << TTBRn_HI_ASID_SHIFT; 797 reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
855 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); 798 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
856 799
857 /* 800 /*
@@ -920,44 +863,24 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
920} 863}
921 864
922static int arm_smmu_init_domain_context(struct iommu_domain *domain, 865static int arm_smmu_init_domain_context(struct iommu_domain *domain,
923 struct device *dev, 866 struct arm_smmu_device *smmu)
924 struct arm_smmu_device *device_smmu)
925{ 867{
926 int irq, ret, start; 868 int irq, ret, start;
927 struct arm_smmu_domain *smmu_domain = domain->priv; 869 struct arm_smmu_domain *smmu_domain = domain->priv;
928 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 870 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
929 struct arm_smmu_device *smmu, *parent;
930
931 /*
932 * Walk the SMMU chain to find the root device for this chain.
933 * We assume that no masters have translations which terminate
934 * early, and therefore check that the root SMMU does indeed have
935 * a StreamID for the master in question.
936 */
937 parent = device_smmu;
938 smmu_domain->output_mask = -1;
939 do {
940 smmu = parent;
941 smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1;
942 } while ((parent = find_parent_smmu(smmu)));
943
944 if (!find_smmu_master_cfg(smmu, dev)) {
945 dev_err(dev, "unable to find root SMMU config for device\n");
946 return -ENODEV;
947 }
948 871
949 if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { 872 if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
950 /* 873 /*
951 * We will likely want to change this if/when KVM gets 874 * We will likely want to change this if/when KVM gets
952 * involved. 875 * involved.
953 */ 876 */
954 root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; 877 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
955 start = smmu->num_s2_context_banks; 878 start = smmu->num_s2_context_banks;
956 } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) { 879 } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) {
957 root_cfg->cbar = CBAR_TYPE_S2_TRANS; 880 cfg->cbar = CBAR_TYPE_S2_TRANS;
958 start = 0; 881 start = 0;
959 } else { 882 } else {
960 root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; 883 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
961 start = smmu->num_s2_context_banks; 884 start = smmu->num_s2_context_banks;
962 } 885 }
963 886
@@ -966,39 +889,38 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
966 if (IS_ERR_VALUE(ret)) 889 if (IS_ERR_VALUE(ret))
967 return ret; 890 return ret;
968 891
969 root_cfg->cbndx = ret; 892 cfg->cbndx = ret;
970 if (smmu->version == 1) { 893 if (smmu->version == 1) {
971 root_cfg->irptndx = atomic_inc_return(&smmu->irptndx); 894 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
972 root_cfg->irptndx %= smmu->num_context_irqs; 895 cfg->irptndx %= smmu->num_context_irqs;
973 } else { 896 } else {
974 root_cfg->irptndx = root_cfg->cbndx; 897 cfg->irptndx = cfg->cbndx;
975 } 898 }
976 899
977 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 900 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
978 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, 901 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
979 "arm-smmu-context-fault", domain); 902 "arm-smmu-context-fault", domain);
980 if (IS_ERR_VALUE(ret)) { 903 if (IS_ERR_VALUE(ret)) {
981 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 904 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
982 root_cfg->irptndx, irq); 905 cfg->irptndx, irq);
983 root_cfg->irptndx = INVALID_IRPTNDX; 906 cfg->irptndx = INVALID_IRPTNDX;
984 goto out_free_context; 907 goto out_free_context;
985 } 908 }
986 909
987 root_cfg->smmu = smmu; 910 smmu_domain->smmu = smmu;
988 arm_smmu_init_context_bank(smmu_domain); 911 arm_smmu_init_context_bank(smmu_domain);
989 smmu_domain->leaf_smmu = device_smmu;
990 return 0; 912 return 0;
991 913
992out_free_context: 914out_free_context:
993 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); 915 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
994 return ret; 916 return ret;
995} 917}
996 918
997static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) 919static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
998{ 920{
999 struct arm_smmu_domain *smmu_domain = domain->priv; 921 struct arm_smmu_domain *smmu_domain = domain->priv;
1000 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 922 struct arm_smmu_device *smmu = smmu_domain->smmu;
1001 struct arm_smmu_device *smmu = root_cfg->smmu; 923 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1002 void __iomem *cb_base; 924 void __iomem *cb_base;
1003 int irq; 925 int irq;
1004 926
@@ -1006,16 +928,16 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1006 return; 928 return;
1007 929
1008 /* Disable the context bank and nuke the TLB before freeing it. */ 930 /* Disable the context bank and nuke the TLB before freeing it. */
1009 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); 931 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1010 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); 932 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1011 arm_smmu_tlb_inv_context(root_cfg); 933 arm_smmu_tlb_inv_context(smmu_domain);
1012 934
1013 if (root_cfg->irptndx != INVALID_IRPTNDX) { 935 if (cfg->irptndx != INVALID_IRPTNDX) {
1014 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 936 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1015 free_irq(irq, domain); 937 free_irq(irq, domain);
1016 } 938 }
1017 939
1018 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); 940 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1019} 941}
1020 942
1021static int arm_smmu_domain_init(struct iommu_domain *domain) 943static int arm_smmu_domain_init(struct iommu_domain *domain)
@@ -1035,7 +957,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
1035 pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); 957 pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
1036 if (!pgd) 958 if (!pgd)
1037 goto out_free_domain; 959 goto out_free_domain;
1038 smmu_domain->root_cfg.pgd = pgd; 960 smmu_domain->cfg.pgd = pgd;
1039 961
1040 spin_lock_init(&smmu_domain->lock); 962 spin_lock_init(&smmu_domain->lock);
1041 domain->priv = smmu_domain; 963 domain->priv = smmu_domain;
@@ -1090,8 +1012,8 @@ static void arm_smmu_free_puds(pgd_t *pgd)
1090static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) 1012static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
1091{ 1013{
1092 int i; 1014 int i;
1093 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1015 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1094 pgd_t *pgd, *pgd_base = root_cfg->pgd; 1016 pgd_t *pgd, *pgd_base = cfg->pgd;
1095 1017
1096 /* 1018 /*
1097 * Recursively free the page tables for this domain. We don't 1019 * Recursively free the page tables for this domain. We don't
@@ -1142,7 +1064,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1142 return -ENOMEM; 1064 return -ENOMEM;
1143 } 1065 }
1144 1066
1145 /* Allocate the SMRs on the root SMMU */ 1067 /* Allocate the SMRs on the SMMU */
1146 for (i = 0; i < cfg->num_streamids; ++i) { 1068 for (i = 0; i < cfg->num_streamids; ++i) {
1147 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, 1069 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1148 smmu->num_mapping_groups); 1070 smmu->num_mapping_groups);
@@ -1210,34 +1132,18 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1210 struct arm_smmu_master_cfg *cfg) 1132 struct arm_smmu_master_cfg *cfg)
1211{ 1133{
1212 int i, ret; 1134 int i, ret;
1213 struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu; 1135 struct arm_smmu_device *smmu = smmu_domain->smmu;
1214 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1136 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1215 1137
1216 ret = arm_smmu_master_configure_smrs(smmu, cfg); 1138 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1217 if (ret) 1139 if (ret)
1218 return ret; 1140 return ret;
1219 1141
1220 /* Bypass the leaves */
1221 smmu = smmu_domain->leaf_smmu;
1222 while ((parent = find_parent_smmu(smmu))) {
1223 /*
1224 * We won't have a StreamID match for anything but the root
1225 * smmu, so we only need to worry about StreamID indexing,
1226 * where we must install bypass entries in the S2CRs.
1227 */
1228 if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)
1229 continue;
1230
1231 arm_smmu_bypass_stream_mapping(smmu, cfg);
1232 smmu = parent;
1233 }
1234
1235 /* Now we're at the root, time to point at our context bank */
1236 for (i = 0; i < cfg->num_streamids; ++i) { 1142 for (i = 0; i < cfg->num_streamids; ++i) {
1237 u32 idx, s2cr; 1143 u32 idx, s2cr;
1238 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; 1144 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1239 s2cr = S2CR_TYPE_TRANS | 1145 s2cr = S2CR_TYPE_TRANS |
1240 (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT); 1146 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1241 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); 1147 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1242 } 1148 }
1243 1149
@@ -1247,7 +1153,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1247static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, 1153static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1248 struct arm_smmu_master_cfg *cfg) 1154 struct arm_smmu_master_cfg *cfg)
1249{ 1155{
1250 struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu; 1156 struct arm_smmu_device *smmu = smmu_domain->smmu;
1251 1157
1252 /* 1158 /*
1253 * We *must* clear the S2CR first, because freeing the SMR means 1159 * We *must* clear the S2CR first, because freeing the SMR means
@@ -1261,37 +1167,37 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1261{ 1167{
1262 int ret = -EINVAL; 1168 int ret = -EINVAL;
1263 struct arm_smmu_domain *smmu_domain = domain->priv; 1169 struct arm_smmu_domain *smmu_domain = domain->priv;
1264 struct arm_smmu_device *device_smmu; 1170 struct arm_smmu_device *smmu;
1265 struct arm_smmu_master_cfg *cfg; 1171 struct arm_smmu_master_cfg *cfg;
1266 unsigned long flags; 1172 unsigned long flags;
1267 1173
1268 device_smmu = dev_get_master_dev(dev)->archdata.iommu; 1174 smmu = dev_get_master_dev(dev)->archdata.iommu;
1269 if (!device_smmu) { 1175 if (!smmu) {
1270 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); 1176 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1271 return -ENXIO; 1177 return -ENXIO;
1272 } 1178 }
1273 1179
1274 /* 1180 /*
1275 * Sanity check the domain. We don't currently support domains 1181 * Sanity check the domain. We don't support domains across
1276 * that cross between different SMMU chains. 1182 * different SMMUs.
1277 */ 1183 */
1278 spin_lock_irqsave(&smmu_domain->lock, flags); 1184 spin_lock_irqsave(&smmu_domain->lock, flags);
1279 if (!smmu_domain->leaf_smmu) { 1185 if (!smmu_domain->smmu) {
1280 /* Now that we have a master, we can finalise the domain */ 1186 /* Now that we have a master, we can finalise the domain */
1281 ret = arm_smmu_init_domain_context(domain, dev, device_smmu); 1187 ret = arm_smmu_init_domain_context(domain, smmu);
1282 if (IS_ERR_VALUE(ret)) 1188 if (IS_ERR_VALUE(ret))
1283 goto err_unlock; 1189 goto err_unlock;
1284 } else if (smmu_domain->leaf_smmu != device_smmu) { 1190 } else if (smmu_domain->smmu != smmu) {
1285 dev_err(dev, 1191 dev_err(dev,
1286 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", 1192 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1287 dev_name(smmu_domain->leaf_smmu->dev), 1193 dev_name(smmu_domain->smmu->dev),
1288 dev_name(device_smmu->dev)); 1194 dev_name(smmu->dev));
1289 goto err_unlock; 1195 goto err_unlock;
1290 } 1196 }
1291 spin_unlock_irqrestore(&smmu_domain->lock, flags); 1197 spin_unlock_irqrestore(&smmu_domain->lock, flags);
1292 1198
1293 /* Looks ok, so add the device to the domain */ 1199 /* Looks ok, so add the device to the domain */
1294 cfg = find_smmu_master_cfg(smmu_domain->leaf_smmu, dev); 1200 cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
1295 if (!cfg) 1201 if (!cfg)
1296 return -ENODEV; 1202 return -ENODEV;
1297 1203
@@ -1307,7 +1213,7 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1307 struct arm_smmu_domain *smmu_domain = domain->priv; 1213 struct arm_smmu_domain *smmu_domain = domain->priv;
1308 struct arm_smmu_master_cfg *cfg; 1214 struct arm_smmu_master_cfg *cfg;
1309 1215
1310 cfg = find_smmu_master_cfg(smmu_domain->leaf_smmu, dev); 1216 cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
1311 if (cfg) 1217 if (cfg)
1312 arm_smmu_domain_remove_master(smmu_domain, cfg); 1218 arm_smmu_domain_remove_master(smmu_domain, cfg);
1313} 1219}
@@ -1497,12 +1403,12 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1497 int ret, stage; 1403 int ret, stage;
1498 unsigned long end; 1404 unsigned long end;
1499 phys_addr_t input_mask, output_mask; 1405 phys_addr_t input_mask, output_mask;
1500 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1406 struct arm_smmu_device *smmu = smmu_domain->smmu;
1501 pgd_t *pgd = root_cfg->pgd; 1407 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1502 struct arm_smmu_device *smmu = root_cfg->smmu; 1408 pgd_t *pgd = cfg->pgd;
1503 unsigned long flags; 1409 unsigned long flags;
1504 1410
1505 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { 1411 if (cfg->cbar == CBAR_TYPE_S2_TRANS) {
1506 stage = 2; 1412 stage = 2;
1507 output_mask = (1ULL << smmu->s2_output_size) - 1; 1413 output_mask = (1ULL << smmu->s2_output_size) - 1;
1508 } else { 1414 } else {
@@ -1552,10 +1458,6 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1552 if (!smmu_domain) 1458 if (!smmu_domain)
1553 return -ENODEV; 1459 return -ENODEV;
1554 1460
1555 /* Check for silent address truncation up the SMMU chain. */
1556 if ((phys_addr_t)iova & ~smmu_domain->output_mask)
1557 return -ERANGE;
1558
1559 return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); 1461 return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
1560} 1462}
1561 1463
@@ -1566,7 +1468,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1566 struct arm_smmu_domain *smmu_domain = domain->priv; 1468 struct arm_smmu_domain *smmu_domain = domain->priv;
1567 1469
1568 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); 1470 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
1569 arm_smmu_tlb_inv_context(&smmu_domain->root_cfg); 1471 arm_smmu_tlb_inv_context(smmu_domain);
1570 return ret ? 0 : size; 1472 return ret ? 0 : size;
1571} 1473}
1572 1474
@@ -1578,9 +1480,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1578 pmd_t pmd; 1480 pmd_t pmd;
1579 pte_t pte; 1481 pte_t pte;
1580 struct arm_smmu_domain *smmu_domain = domain->priv; 1482 struct arm_smmu_domain *smmu_domain = domain->priv;
1581 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1483 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1582 1484
1583 pgdp = root_cfg->pgd; 1485 pgdp = cfg->pgd;
1584 if (!pgdp) 1486 if (!pgdp)
1585 return 0; 1487 return 0;
1586 1488
@@ -1607,7 +1509,7 @@ static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
1607 unsigned long cap) 1509 unsigned long cap)
1608{ 1510{
1609 struct arm_smmu_domain *smmu_domain = domain->priv; 1511 struct arm_smmu_domain *smmu_domain = domain->priv;
1610 u32 features = smmu_domain->root_cfg.smmu->features; 1512 u32 features = smmu_domain->smmu->features;
1611 1513
1612 switch (cap) { 1514 switch (cap) {
1613 case IOMMU_CAP_CACHE_COHERENCY: 1515 case IOMMU_CAP_CACHE_COHERENCY:
@@ -1636,7 +1538,7 @@ static int arm_smmu_add_device(struct device *dev)
1636 return -EINVAL; 1538 return -EINVAL;
1637 } 1539 }
1638 1540
1639 smmu = find_parent_smmu_for_device(dev); 1541 smmu = find_smmu_for_device(dev);
1640 if (!smmu) 1542 if (!smmu)
1641 return -ENODEV; 1543 return -ENODEV;
1642 1544
@@ -1918,7 +1820,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1918{ 1820{
1919 struct resource *res; 1821 struct resource *res;
1920 struct arm_smmu_device *smmu; 1822 struct arm_smmu_device *smmu;
1921 struct device_node *dev_node;
1922 struct device *dev = &pdev->dev; 1823 struct device *dev = &pdev->dev;
1923 struct rb_node *node; 1824 struct rb_node *node;
1924 struct of_phandle_args masterspec; 1825 struct of_phandle_args masterspec;
@@ -1988,12 +1889,9 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1988 } 1889 }
1989 dev_notice(dev, "registered %d master devices\n", i); 1890 dev_notice(dev, "registered %d master devices\n", i);
1990 1891
1991 if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0)))
1992 smmu->parent_of_node = dev_node;
1993
1994 err = arm_smmu_device_cfg_probe(smmu); 1892 err = arm_smmu_device_cfg_probe(smmu);
1995 if (err) 1893 if (err)
1996 goto out_put_parent; 1894 goto out_put_masters;
1997 1895
1998 parse_driver_options(smmu); 1896 parse_driver_options(smmu);
1999 1897
@@ -2003,7 +1901,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2003 "found only %d context interrupt(s) but %d required\n", 1901 "found only %d context interrupt(s) but %d required\n",
2004 smmu->num_context_irqs, smmu->num_context_banks); 1902 smmu->num_context_irqs, smmu->num_context_banks);
2005 err = -ENODEV; 1903 err = -ENODEV;
2006 goto out_put_parent; 1904 goto out_put_masters;
2007 } 1905 }
2008 1906
2009 for (i = 0; i < smmu->num_global_irqs; ++i) { 1907 for (i = 0; i < smmu->num_global_irqs; ++i) {
@@ -2031,10 +1929,6 @@ out_free_irqs:
2031 while (i--) 1929 while (i--)
2032 free_irq(smmu->irqs[i], smmu); 1930 free_irq(smmu->irqs[i], smmu);
2033 1931
2034out_put_parent:
2035 if (smmu->parent_of_node)
2036 of_node_put(smmu->parent_of_node);
2037
2038out_put_masters: 1932out_put_masters:
2039 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { 1933 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2040 struct arm_smmu_master *master; 1934 struct arm_smmu_master *master;
@@ -2065,9 +1959,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
2065 if (!smmu) 1959 if (!smmu)
2066 return -ENODEV; 1960 return -ENODEV;
2067 1961
2068 if (smmu->parent_of_node)
2069 of_node_put(smmu->parent_of_node);
2070
2071 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { 1962 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2072 struct arm_smmu_master *master; 1963 struct arm_smmu_master *master;
2073 master = container_of(node, struct arm_smmu_master, node); 1964 master = container_of(node, struct arm_smmu_master, node);