aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-07-31 14:21:27 -0400
committerJoerg Roedel <joro@8bytes.org>2013-08-14 06:09:42 -0400
commit1463fe44fd0f87af0404e2c147ab9724081b7235 (patch)
treebe7cfd49660f28164d41499348900f5987b81908
parentadaba320916d246af56821a1aab81a715091e7e5 (diff)
iommu/arm-smmu: Don't use VMIDs for stage-1 translations
Although permitted by the architecture, using VMIDs for stage-1 translations causes a complete nightmare for hypervisors, who end up having to virtualise the VMID space across VMs, which may be using multiple VMIDs each. To make life easier for hypervisors (which might just decide not to support this VMID virtualisation), this patch reworks the stage-1 context-bank TLB invalidation so that: - Stage-1 mappings are marked non-global in the ptes - Each Stage-1 context-bank is assigned an ASID in TTBR0 - VMID 0 is reserved for Stage-1 context-banks This allows the hypervisor to overwrite the Stage-1 VMID in the CBAR when trapping the write from the guest. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
-rw-r--r--drivers/iommu/arm-smmu.c59
1 files changed, 46 insertions, 13 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 3a595bb5b824..07a03087db33 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -87,6 +87,7 @@
87#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) 87#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
88#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) 88#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
89#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 89#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
90#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
90 91
91/* Stage-2 PTE */ 92/* Stage-2 PTE */
92#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) 93#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
@@ -223,6 +224,7 @@
223#define ARM_SMMU_CB_FAR_LO 0x60 224#define ARM_SMMU_CB_FAR_LO 0x60
224#define ARM_SMMU_CB_FAR_HI 0x64 225#define ARM_SMMU_CB_FAR_HI 0x64
225#define ARM_SMMU_CB_FSYNR0 0x68 226#define ARM_SMMU_CB_FSYNR0 0x68
227#define ARM_SMMU_CB_S1_TLBIASID 0x610
226 228
227#define SCTLR_S1_ASIDPNE (1 << 12) 229#define SCTLR_S1_ASIDPNE (1 << 12)
228#define SCTLR_CFCFG (1 << 7) 230#define SCTLR_CFCFG (1 << 7)
@@ -282,6 +284,8 @@
282#define TTBCR2_ADDR_44 4 284#define TTBCR2_ADDR_44 4
283#define TTBCR2_ADDR_48 5 285#define TTBCR2_ADDR_48 5
284 286
287#define TTBRn_HI_ASID_SHIFT 16
288
285#define MAIR_ATTR_SHIFT(n) ((n) << 3) 289#define MAIR_ATTR_SHIFT(n) ((n) << 3)
286#define MAIR_ATTR_MASK 0xff 290#define MAIR_ATTR_MASK 0xff
287#define MAIR_ATTR_DEVICE 0x04 291#define MAIR_ATTR_DEVICE 0x04
@@ -533,6 +537,23 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
533 } 537 }
534} 538}
535 539
540static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg)
541{
542 struct arm_smmu_device *smmu = cfg->smmu;
543 void __iomem *base = ARM_SMMU_GR0(smmu);
544 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
545
546 if (stage1) {
547 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
548 writel_relaxed(cfg->vmid, base + ARM_SMMU_CB_S1_TLBIASID);
549 } else {
550 base = ARM_SMMU_GR0(smmu);
551 writel_relaxed(cfg->vmid, base + ARM_SMMU_GR0_TLBIVMID);
552 }
553
554 arm_smmu_tlb_sync(smmu);
555}
556
536static irqreturn_t arm_smmu_context_fault(int irq, void *dev) 557static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
537{ 558{
538 int flags, ret; 559 int flags, ret;
@@ -621,14 +642,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
621 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); 642 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
622 643
623 /* CBAR */ 644 /* CBAR */
624 reg = root_cfg->cbar | 645 reg = root_cfg->cbar;
625 (root_cfg->vmid << CBAR_VMID_SHIFT);
626 if (smmu->version == 1) 646 if (smmu->version == 1)
627 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; 647 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
628 648
629 /* Use the weakest memory type, so it is overridden by the pte */ 649 /* Use the weakest memory type, so it is overridden by the pte */
630 if (stage1) 650 if (stage1)
631 reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 651 reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
652 else
653 reg |= root_cfg->vmid << CBAR_VMID_SHIFT;
632 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); 654 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
633 655
634 if (smmu->version > 1) { 656 if (smmu->version > 1) {
@@ -692,6 +714,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
692 reg = __pa(root_cfg->pgd); 714 reg = __pa(root_cfg->pgd);
693 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 715 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
694 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; 716 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
717 if (stage1)
718 reg |= root_cfg->vmid << TTBRn_HI_ASID_SHIFT;
695 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); 719 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
696 720
697 /* 721 /*
@@ -747,10 +771,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
747 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); 771 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
748 } 772 }
749 773
750 /* Nuke the TLB */
751 writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
752 arm_smmu_tlb_sync(smmu);
753
754 /* SCTLR */ 774 /* SCTLR */
755 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; 775 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
756 if (stage1) 776 if (stage1)
@@ -787,7 +807,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
787 return -ENODEV; 807 return -ENODEV;
788 } 808 }
789 809
790 ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS); 810 /* VMID zero is reserved for stage-1 mappings */
811 ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 1, ARM_SMMU_NUM_VMIDS);
791 if (IS_ERR_VALUE(ret)) 812 if (IS_ERR_VALUE(ret))
792 return ret; 813 return ret;
793 814
@@ -847,11 +868,17 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
847 struct arm_smmu_domain *smmu_domain = domain->priv; 868 struct arm_smmu_domain *smmu_domain = domain->priv;
848 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 869 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
849 struct arm_smmu_device *smmu = root_cfg->smmu; 870 struct arm_smmu_device *smmu = root_cfg->smmu;
871 void __iomem *cb_base;
850 int irq; 872 int irq;
851 873
852 if (!smmu) 874 if (!smmu)
853 return; 875 return;
854 876
877 /* Disable the context bank and nuke the TLB before freeing it. */
878 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
879 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
880 arm_smmu_tlb_inv_context(root_cfg);
881
855 if (root_cfg->irptndx != -1) { 882 if (root_cfg->irptndx != -1) {
856 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 883 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
857 free_irq(irq, domain); 884 free_irq(irq, domain);
@@ -956,6 +983,11 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
956static void arm_smmu_domain_destroy(struct iommu_domain *domain) 983static void arm_smmu_domain_destroy(struct iommu_domain *domain)
957{ 984{
958 struct arm_smmu_domain *smmu_domain = domain->priv; 985 struct arm_smmu_domain *smmu_domain = domain->priv;
986
987 /*
988 * Free the domain resources. We assume that all devices have
989 * already been detached.
990 */
959 arm_smmu_destroy_domain_context(domain); 991 arm_smmu_destroy_domain_context(domain);
960 arm_smmu_free_pgtables(smmu_domain); 992 arm_smmu_free_pgtables(smmu_domain);
961 kfree(smmu_domain); 993 kfree(smmu_domain);
@@ -1196,7 +1228,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1196 } 1228 }
1197 1229
1198 if (stage == 1) { 1230 if (stage == 1) {
1199 pteval |= ARM_SMMU_PTE_AP_UNPRIV; 1231 pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
1200 if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) 1232 if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
1201 pteval |= ARM_SMMU_PTE_AP_RDONLY; 1233 pteval |= ARM_SMMU_PTE_AP_RDONLY;
1202 1234
@@ -1412,13 +1444,9 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1412{ 1444{
1413 int ret; 1445 int ret;
1414 struct arm_smmu_domain *smmu_domain = domain->priv; 1446 struct arm_smmu_domain *smmu_domain = domain->priv;
1415 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1416 struct arm_smmu_device *smmu = root_cfg->smmu;
1417 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1418 1447
1419 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); 1448 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
1420 writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID); 1449 arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
1421 arm_smmu_tlb_sync(smmu);
1422 return ret ? ret : size; 1450 return ret ? ret : size;
1423} 1451}
1424 1452
@@ -1541,6 +1569,7 @@ static struct iommu_ops arm_smmu_ops = {
1541static void arm_smmu_device_reset(struct arm_smmu_device *smmu) 1569static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1542{ 1570{
1543 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1571 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1572 void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR;
1544 int i = 0; 1573 int i = 0;
1545 u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); 1574 u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
1546 1575
@@ -1550,6 +1579,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1550 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); 1579 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
1551 } 1580 }
1552 1581
1582 /* Make sure all context banks are disabled */
1583 for (i = 0; i < smmu->num_context_banks; ++i)
1584 writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i));
1585
1553 /* Invalidate the TLB, just in case */ 1586 /* Invalidate the TLB, just in case */
1554 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); 1587 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
1555 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); 1588 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);