aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 14:29:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 14:29:26 -0400
commite5d0c874391a500be7643d3eef9fb07171eee129 (patch)
treee584dda865c5628fbb8e59a50096a0f4c21bf2bd
parentd5adf7e2db897f9d4a00be59262875ae5d9574f4 (diff)
parentd6a60fc1a8187004792a01643d8af1d06a465026 (diff)
Merge tag 'iommu-updates-v3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU Updates from Joerg Roedel: "This round the updates contain: - A new driver for the Freescale PAMU IOMMU from Varun Sethi. This driver has cooked for a while and required changes to the IOMMU-API and infrastructure that were already merged before. - Updates for the ARM-SMMU driver from Will Deacon - Various fixes, the most important one is probably a fix from Alex Williamson for a memory leak in the VT-d page-table freeing code In summary not all that much. The biggest part in the diffstat is the new PAMU driver" * tag 'iommu-updates-v3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: intel-iommu: Fix leaks in pagetable freeing iommu/amd: Fix resource leak in iommu_init_device() iommu/amd: Clean up unnecessary MSI/MSI-X capability find iommu/arm-smmu: Simplify VMID and ASID allocation iommu/arm-smmu: Don't use VMIDs for stage-1 translations iommu/arm-smmu: Tighten up global fault reporting iommu/arm-smmu: Remove broken big-endian check iommu/fsl: Remove unnecessary 'fsl-pamu' prefixes iommu/fsl: Fix whitespace problems noticed by git-am iommu/fsl: Freescale PAMU driver and iommu implementation. iommu/fsl: Add additional iommu attributes required by the PAMU driver. powerpc: Add iommu domain pointer to device archdata iommu/exynos: Remove dead code (set_prefbuf)
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/fsl_pamu_stash.h39
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h5
-rw-r--r--drivers/iommu/Kconfig10
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/arm-smmu.c93
-rw-r--r--drivers/iommu/exynos-iommu.c44
-rw-r--r--drivers/iommu/fsl_pamu.c1309
-rw-r--r--drivers/iommu/fsl_pamu.h410
-rw-r--r--drivers/iommu/fsl_pamu_domain.c1172
-rw-r--r--drivers/iommu/fsl_pamu_domain.h85
-rw-r--r--drivers/iommu/intel-iommu.c72
-rw-r--r--include/linux/iommu.h16
15 files changed, 3145 insertions, 120 deletions
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 77e97dd0c15d..38faeded7d59 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -28,6 +28,9 @@ struct dev_archdata {
28 void *iommu_table_base; 28 void *iommu_table_base;
29 } dma_data; 29 } dma_data;
30 30
31#ifdef CONFIG_IOMMU_API
32 void *iommu_domain;
33#endif
31#ifdef CONFIG_SWIOTLB 34#ifdef CONFIG_SWIOTLB
32 dma_addr_t max_direct_dma_addr; 35 dma_addr_t max_direct_dma_addr;
33#endif 36#endif
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
new file mode 100644
index 000000000000..caa1b21c25cd
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pamu_stash.h
@@ -0,0 +1,39 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 *
17 */
18
19#ifndef __FSL_PAMU_STASH_H
20#define __FSL_PAMU_STASH_H
21
22/* cache stash targets */
23enum pamu_stash_target {
24 PAMU_ATTR_CACHE_L1 = 1,
25 PAMU_ATTR_CACHE_L2,
26 PAMU_ATTR_CACHE_L3,
27};
28
29/*
30 * This attribute allows configuring stashig specific parameters
31 * in the PAMU hardware.
32 */
33
34struct pamu_stash_attribute {
35 u32 cpu; /* cpu number */
36 u32 cache; /* cache to stash to: L1,L2,L3 */
37};
38
39#endif /* __FSL_PAMU_STASH_H */
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index defc422a375f..8d455df58471 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -16,6 +16,11 @@
16 16
17struct platform_device; 17struct platform_device;
18 18
19
20/* FSL PCI controller BRR1 register */
21#define PCI_FSL_BRR1 0xbf8
22#define PCI_FSL_BRR1_VER 0xffff
23
19#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */ 24#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */
20#define PCIE_LTSSM_L0 0x16 /* L0 state */ 25#define PCIE_LTSSM_L0 0x16 /* L0 state */
21#define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */ 26#define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 820d85c4a4a0..fe302e33f72e 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -17,6 +17,16 @@ config OF_IOMMU
17 def_bool y 17 def_bool y
18 depends on OF 18 depends on OF
19 19
20config FSL_PAMU
21 bool "Freescale IOMMU support"
22 depends on PPC_E500MC
23 select IOMMU_API
24 select GENERIC_ALLOCATOR
25 help
26 Freescale PAMU support. PAMU is the IOMMU present on Freescale QorIQ platforms.
27 PAMU can authorize memory access, remap the memory address, and remap I/O
28 transaction types.
29
20# MSM IOMMU support 30# MSM IOMMU support
21config MSM_IOMMU 31config MSM_IOMMU
22 bool "MSM IOMMU Support" 32 bool "MSM IOMMU Support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index bbe7041212dd..14c1f474cf11 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
16obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o 16obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
17obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o 17obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
18obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o 18obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
19obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6dc659426a51..72531f008a5e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -456,8 +456,10 @@ static int iommu_init_device(struct device *dev)
456 } 456 }
457 457
458 ret = init_iommu_group(dev); 458 ret = init_iommu_group(dev);
459 if (ret) 459 if (ret) {
460 free_dev_data(dev_data);
460 return ret; 461 return ret;
462 }
461 463
462 if (pci_iommuv2_capable(pdev)) { 464 if (pci_iommuv2_capable(pdev)) {
463 struct amd_iommu *iommu; 465 struct amd_iommu *iommu;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 7acbf351e9af..8f798be6e398 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1384,7 +1384,7 @@ static int iommu_init_msi(struct amd_iommu *iommu)
1384 if (iommu->int_enabled) 1384 if (iommu->int_enabled)
1385 goto enable_faults; 1385 goto enable_faults;
1386 1386
1387 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) 1387 if (iommu->dev->msi_cap)
1388 ret = iommu_setup_msi(iommu); 1388 ret = iommu_setup_msi(iommu);
1389 else 1389 else
1390 ret = -ENODEV; 1390 ret = -ENODEV;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ebd0a4cff049..f417e89e1e7e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -56,9 +56,6 @@
56/* Maximum number of mapping groups per SMMU */ 56/* Maximum number of mapping groups per SMMU */
57#define ARM_SMMU_MAX_SMRS 128 57#define ARM_SMMU_MAX_SMRS 128
58 58
59/* Number of VMIDs per SMMU */
60#define ARM_SMMU_NUM_VMIDS 256
61
62/* SMMU global address space */ 59/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base) 60#define ARM_SMMU_GR0(smmu) ((smmu)->base)
64#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) 61#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
@@ -87,6 +84,7 @@
87#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) 84#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
88#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) 85#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
89#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 86#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
87#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
90 88
91/* Stage-2 PTE */ 89/* Stage-2 PTE */
92#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) 90#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
@@ -223,6 +221,7 @@
223#define ARM_SMMU_CB_FAR_LO 0x60 221#define ARM_SMMU_CB_FAR_LO 0x60
224#define ARM_SMMU_CB_FAR_HI 0x64 222#define ARM_SMMU_CB_FAR_HI 0x64
225#define ARM_SMMU_CB_FSYNR0 0x68 223#define ARM_SMMU_CB_FSYNR0 0x68
224#define ARM_SMMU_CB_S1_TLBIASID 0x610
226 225
227#define SCTLR_S1_ASIDPNE (1 << 12) 226#define SCTLR_S1_ASIDPNE (1 << 12)
228#define SCTLR_CFCFG (1 << 7) 227#define SCTLR_CFCFG (1 << 7)
@@ -282,6 +281,8 @@
282#define TTBCR2_ADDR_44 4 281#define TTBCR2_ADDR_44 4
283#define TTBCR2_ADDR_48 5 282#define TTBCR2_ADDR_48 5
284 283
284#define TTBRn_HI_ASID_SHIFT 16
285
285#define MAIR_ATTR_SHIFT(n) ((n) << 3) 286#define MAIR_ATTR_SHIFT(n) ((n) << 3)
286#define MAIR_ATTR_MASK 0xff 287#define MAIR_ATTR_MASK 0xff
287#define MAIR_ATTR_DEVICE 0x04 288#define MAIR_ATTR_DEVICE 0x04
@@ -305,7 +306,7 @@
305#define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \ 306#define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \
306 FSR_TLBLKF) 307 FSR_TLBLKF)
307#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ 308#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
308 FSR_EF | FSR_PF | FSR_TF) 309 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
309 310
310#define FSYNR0_WNR (1 << 4) 311#define FSYNR0_WNR (1 << 4)
311 312
@@ -365,21 +366,21 @@ struct arm_smmu_device {
365 u32 num_context_irqs; 366 u32 num_context_irqs;
366 unsigned int *irqs; 367 unsigned int *irqs;
367 368
368 DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS);
369
370 struct list_head list; 369 struct list_head list;
371 struct rb_root masters; 370 struct rb_root masters;
372}; 371};
373 372
374struct arm_smmu_cfg { 373struct arm_smmu_cfg {
375 struct arm_smmu_device *smmu; 374 struct arm_smmu_device *smmu;
376 u8 vmid;
377 u8 cbndx; 375 u8 cbndx;
378 u8 irptndx; 376 u8 irptndx;
379 u32 cbar; 377 u32 cbar;
380 pgd_t *pgd; 378 pgd_t *pgd;
381}; 379};
382 380
381#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
382#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
383
383struct arm_smmu_domain { 384struct arm_smmu_domain {
384 /* 385 /*
385 * A domain can span across multiple, chained SMMUs and requires 386 * A domain can span across multiple, chained SMMUs and requires
@@ -533,6 +534,25 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
533 } 534 }
534} 535}
535 536
537static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg)
538{
539 struct arm_smmu_device *smmu = cfg->smmu;
540 void __iomem *base = ARM_SMMU_GR0(smmu);
541 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
542
543 if (stage1) {
544 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
545 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
546 base + ARM_SMMU_CB_S1_TLBIASID);
547 } else {
548 base = ARM_SMMU_GR0(smmu);
549 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
550 base + ARM_SMMU_GR0_TLBIVMID);
551 }
552
553 arm_smmu_tlb_sync(smmu);
554}
555
536static irqreturn_t arm_smmu_context_fault(int irq, void *dev) 556static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
537{ 557{
538 int flags, ret; 558 int flags, ret;
@@ -590,6 +610,9 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
590 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 610 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
591 611
592 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); 612 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
613 if (!gfsr)
614 return IRQ_NONE;
615
593 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); 616 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
594 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); 617 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
595 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); 618 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
@@ -601,7 +624,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
601 gfsr, gfsynr0, gfsynr1, gfsynr2); 624 gfsr, gfsynr0, gfsynr1, gfsynr2);
602 625
603 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); 626 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
604 return IRQ_NONE; 627 return IRQ_HANDLED;
605} 628}
606 629
607static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) 630static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
@@ -618,14 +641,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
618 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); 641 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
619 642
620 /* CBAR */ 643 /* CBAR */
621 reg = root_cfg->cbar | 644 reg = root_cfg->cbar;
622 (root_cfg->vmid << CBAR_VMID_SHIFT);
623 if (smmu->version == 1) 645 if (smmu->version == 1)
624 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; 646 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
625 647
626 /* Use the weakest memory type, so it is overridden by the pte */ 648 /* Use the weakest memory type, so it is overridden by the pte */
627 if (stage1) 649 if (stage1)
628 reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 650 reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
651 else
652 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
629 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); 653 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
630 654
631 if (smmu->version > 1) { 655 if (smmu->version > 1) {
@@ -687,15 +711,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
687 711
688 /* TTBR0 */ 712 /* TTBR0 */
689 reg = __pa(root_cfg->pgd); 713 reg = __pa(root_cfg->pgd);
690#ifndef __BIG_ENDIAN
691 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 714 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
692 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; 715 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
716 if (stage1)
717 reg |= ARM_SMMU_CB_ASID(root_cfg) << TTBRn_HI_ASID_SHIFT;
693 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); 718 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
694#else
695 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
696 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
697 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
698#endif
699 719
700 /* 720 /*
701 * TTBCR 721 * TTBCR
@@ -750,10 +770,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
750 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); 770 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
751 } 771 }
752 772
753 /* Nuke the TLB */
754 writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
755 arm_smmu_tlb_sync(smmu);
756
757 /* SCTLR */ 773 /* SCTLR */
758 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; 774 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
759 if (stage1) 775 if (stage1)
@@ -790,11 +806,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
790 return -ENODEV; 806 return -ENODEV;
791 } 807 }
792 808
793 ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS);
794 if (IS_ERR_VALUE(ret))
795 return ret;
796
797 root_cfg->vmid = ret;
798 if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { 809 if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
799 /* 810 /*
800 * We will likely want to change this if/when KVM gets 811 * We will likely want to change this if/when KVM gets
@@ -813,10 +824,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
813 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, 824 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
814 smmu->num_context_banks); 825 smmu->num_context_banks);
815 if (IS_ERR_VALUE(ret)) 826 if (IS_ERR_VALUE(ret))
816 goto out_free_vmid; 827 return ret;
817 828
818 root_cfg->cbndx = ret; 829 root_cfg->cbndx = ret;
819
820 if (smmu->version == 1) { 830 if (smmu->version == 1) {
821 root_cfg->irptndx = atomic_inc_return(&smmu->irptndx); 831 root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
822 root_cfg->irptndx %= smmu->num_context_irqs; 832 root_cfg->irptndx %= smmu->num_context_irqs;
@@ -840,8 +850,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
840 850
841out_free_context: 851out_free_context:
842 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); 852 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
843out_free_vmid:
844 __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
845 return ret; 853 return ret;
846} 854}
847 855
@@ -850,17 +858,22 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
850 struct arm_smmu_domain *smmu_domain = domain->priv; 858 struct arm_smmu_domain *smmu_domain = domain->priv;
851 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 859 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
852 struct arm_smmu_device *smmu = root_cfg->smmu; 860 struct arm_smmu_device *smmu = root_cfg->smmu;
861 void __iomem *cb_base;
853 int irq; 862 int irq;
854 863
855 if (!smmu) 864 if (!smmu)
856 return; 865 return;
857 866
867 /* Disable the context bank and nuke the TLB before freeing it. */
868 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
869 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
870 arm_smmu_tlb_inv_context(root_cfg);
871
858 if (root_cfg->irptndx != -1) { 872 if (root_cfg->irptndx != -1) {
859 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 873 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
860 free_irq(irq, domain); 874 free_irq(irq, domain);
861 } 875 }
862 876
863 __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
864 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); 877 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
865} 878}
866 879
@@ -959,6 +972,11 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
959static void arm_smmu_domain_destroy(struct iommu_domain *domain) 972static void arm_smmu_domain_destroy(struct iommu_domain *domain)
960{ 973{
961 struct arm_smmu_domain *smmu_domain = domain->priv; 974 struct arm_smmu_domain *smmu_domain = domain->priv;
975
976 /*
977 * Free the domain resources. We assume that all devices have
978 * already been detached.
979 */
962 arm_smmu_destroy_domain_context(domain); 980 arm_smmu_destroy_domain_context(domain);
963 arm_smmu_free_pgtables(smmu_domain); 981 arm_smmu_free_pgtables(smmu_domain);
964 kfree(smmu_domain); 982 kfree(smmu_domain);
@@ -1199,7 +1217,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1199 } 1217 }
1200 1218
1201 if (stage == 1) { 1219 if (stage == 1) {
1202 pteval |= ARM_SMMU_PTE_AP_UNPRIV; 1220 pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
1203 if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) 1221 if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
1204 pteval |= ARM_SMMU_PTE_AP_RDONLY; 1222 pteval |= ARM_SMMU_PTE_AP_RDONLY;
1205 1223
@@ -1415,13 +1433,9 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1415{ 1433{
1416 int ret; 1434 int ret;
1417 struct arm_smmu_domain *smmu_domain = domain->priv; 1435 struct arm_smmu_domain *smmu_domain = domain->priv;
1418 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1419 struct arm_smmu_device *smmu = root_cfg->smmu;
1420 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1421 1436
1422 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); 1437 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
1423 writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID); 1438 arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
1424 arm_smmu_tlb_sync(smmu);
1425 return ret ? ret : size; 1439 return ret ? ret : size;
1426} 1440}
1427 1441
@@ -1544,6 +1558,7 @@ static struct iommu_ops arm_smmu_ops = {
1544static void arm_smmu_device_reset(struct arm_smmu_device *smmu) 1558static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1545{ 1559{
1546 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1560 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1561 void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR;
1547 int i = 0; 1562 int i = 0;
1548 u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); 1563 u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
1549 1564
@@ -1553,6 +1568,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1553 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); 1568 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
1554 } 1569 }
1555 1570
1571 /* Make sure all context banks are disabled */
1572 for (i = 0; i < smmu->num_context_banks; ++i)
1573 writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i));
1574
1556 /* Invalidate the TLB, just in case */ 1575 /* Invalidate the TLB, just in case */
1557 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); 1576 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
1558 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); 1577 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
@@ -1906,7 +1925,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
1906 of_node_put(master->of_node); 1925 of_node_put(master->of_node);
1907 } 1926 }
1908 1927
1909 if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS)) 1928 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
1910 dev_err(dev, "removing device with active domains!\n"); 1929 dev_err(dev, "removing device with active domains!\n");
1911 1930
1912 for (i = 0; i < smmu->num_global_irqs; ++i) 1931 for (i = 0; i < smmu->num_global_irqs; ++i)
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 3f32d64ab87a..074018979cdf 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -247,50 +247,6 @@ static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
247 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8); 247 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
248} 248}
249 249
250void exynos_sysmmu_set_prefbuf(struct device *dev,
251 unsigned long base0, unsigned long size0,
252 unsigned long base1, unsigned long size1)
253{
254 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
255 unsigned long flags;
256 int i;
257
258 BUG_ON((base0 + size0) <= base0);
259 BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
260
261 read_lock_irqsave(&data->lock, flags);
262 if (!is_sysmmu_active(data))
263 goto finish;
264
265 for (i = 0; i < data->nsfrs; i++) {
266 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
267 if (!sysmmu_block(data->sfrbases[i]))
268 continue;
269
270 if (size1 == 0) {
271 if (size0 <= SZ_128K) {
272 base1 = base0;
273 size1 = size0;
274 } else {
275 size1 = size0 -
276 ALIGN(size0 / 2, SZ_64K);
277 size0 = size0 - size1;
278 base1 = base0 + size0;
279 }
280 }
281
282 __sysmmu_set_prefbuf(
283 data->sfrbases[i], base0, size0, 0);
284 __sysmmu_set_prefbuf(
285 data->sfrbases[i], base1, size1, 1);
286
287 sysmmu_unblock(data->sfrbases[i]);
288 }
289 }
290finish:
291 read_unlock_irqrestore(&data->lock, flags);
292}
293
294static void __set_fault_handler(struct sysmmu_drvdata *data, 250static void __set_fault_handler(struct sysmmu_drvdata *data,
295 sysmmu_fault_handler_t handler) 251 sysmmu_fault_handler_t handler)
296{ 252{
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
new file mode 100644
index 000000000000..cba0498eb011
--- /dev/null
+++ b/drivers/iommu/fsl_pamu.c
@@ -0,0 +1,1309 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 *
17 */
18
19#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
20
21#include <linux/init.h>
22#include <linux/iommu.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/mm.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/of_platform.h>
30#include <linux/bootmem.h>
31#include <linux/genalloc.h>
32#include <asm/io.h>
33#include <asm/bitops.h>
34#include <asm/fsl_guts.h>
35
36#include "fsl_pamu.h"
37
38/* define indexes for each operation mapping scenario */
39#define OMI_QMAN 0x00
40#define OMI_FMAN 0x01
41#define OMI_QMAN_PRIV 0x02
42#define OMI_CAAM 0x03
43
44#define make64(high, low) (((u64)(high) << 32) | (low))
45
46struct pamu_isr_data {
47 void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
48 unsigned int count; /* The number of PAMUs */
49};
50
51static struct paace *ppaact;
52static struct paace *spaact;
53static struct ome *omt;
54
55/*
56 * Table for matching compatible strings, for device tree
57 * guts node, for QorIQ SOCs.
58 * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
59 * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
60 * string would be used.
61*/
62static const struct of_device_id guts_device_ids[] = {
63 { .compatible = "fsl,qoriq-device-config-1.0", },
64 { .compatible = "fsl,qoriq-device-config-2.0", },
65 {}
66};
67
68
69/*
70 * Table for matching compatible strings, for device tree
71 * L3 cache controller node.
72 * "fsl,t4240-l3-cache-controller" corresponds to T4,
73 * "fsl,b4860-l3-cache-controller" corresponds to B4 &
74 * "fsl,p4080-l3-cache-controller" corresponds to other,
75 * SOCs.
76*/
77static const struct of_device_id l3_device_ids[] = {
78 { .compatible = "fsl,t4240-l3-cache-controller", },
79 { .compatible = "fsl,b4860-l3-cache-controller", },
80 { .compatible = "fsl,p4080-l3-cache-controller", },
81 {}
82};
83
84/* maximum subwindows permitted per liodn */
85static u32 max_subwindow_count;
86
87/* Pool for fspi allocation */
88struct gen_pool *spaace_pool;
89
90/**
91 * pamu_get_max_subwin_cnt() - Return the maximum supported
92 * subwindow count per liodn.
93 *
94 */
95u32 pamu_get_max_subwin_cnt()
96{
97 return max_subwindow_count;
98}
99
100/**
101 * pamu_get_ppaace() - Return the primary PACCE
102 * @liodn: liodn PAACT index for desired PAACE
103 *
104 * Returns the ppace pointer upon success else return
105 * null.
106 */
107static struct paace *pamu_get_ppaace(int liodn)
108{
109 if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) {
110 pr_debug("PPAACT doesn't exist\n");
111 return NULL;
112 }
113
114 return &ppaact[liodn];
115}
116
117/**
118 * pamu_enable_liodn() - Set valid bit of PACCE
119 * @liodn: liodn PAACT index for desired PAACE
120 *
121 * Returns 0 upon success else error code < 0 returned
122 */
123int pamu_enable_liodn(int liodn)
124{
125 struct paace *ppaace;
126
127 ppaace = pamu_get_ppaace(liodn);
128 if (!ppaace) {
129 pr_debug("Invalid primary paace entry\n");
130 return -ENOENT;
131 }
132
133 if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) {
134 pr_debug("liodn %d not configured\n", liodn);
135 return -EINVAL;
136 }
137
138 /* Ensure that all other stores to the ppaace complete first */
139 mb();
140
141 set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
142 mb();
143
144 return 0;
145}
146
147/**
148 * pamu_disable_liodn() - Clears valid bit of PACCE
149 * @liodn: liodn PAACT index for desired PAACE
150 *
151 * Returns 0 upon success else error code < 0 returned
152 */
153int pamu_disable_liodn(int liodn)
154{
155 struct paace *ppaace;
156
157 ppaace = pamu_get_ppaace(liodn);
158 if (!ppaace) {
159 pr_debug("Invalid primary paace entry\n");
160 return -ENOENT;
161 }
162
163 set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
164 mb();
165
166 return 0;
167}
168
169/* Derive the window size encoding for a particular PAACE entry */
170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
171{
172 /* Bug if not a power of 2 */
173 BUG_ON(!is_power_of_2(addrspace_size));
174
175 /* window size is 2^(WSE+1) bytes */
176 return __ffs(addrspace_size) - 1;
177}
178
179/* Derive the PAACE window count encoding for the subwindow count */
180static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
181{
182 /* window count is 2^(WCE+1) bytes */
183 return __ffs(subwindow_cnt) - 1;
184}
185
186/*
187 * Set the PAACE type as primary and set the coherency required domain
188 * attribute
189 */
190static void pamu_init_ppaace(struct paace *ppaace)
191{
192 set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY);
193
194 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
195 PAACE_M_COHERENCE_REQ);
196}
197
198/*
199 * Set the PAACE type as secondary and set the coherency required domain
200 * attribute.
201 */
202static void pamu_init_spaace(struct paace *spaace)
203{
204 set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
205 set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
206 PAACE_M_COHERENCE_REQ);
207}
208
209/*
210 * Return the spaace (corresponding to the secondary window index)
211 * for a particular ppaace.
212 */
213static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
214{
215 u32 subwin_cnt;
216 struct paace *spaace = NULL;
217
218 subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
219
220 if (wnum < subwin_cnt)
221 spaace = &spaact[paace->fspi + wnum];
222 else
223 pr_debug("secondary paace out of bounds\n");
224
225 return spaace;
226}
227
228/**
229 * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
230 * required for primary PAACE in the secondary
231 * PAACE table.
232 * @subwin_cnt: Number of subwindows to be reserved.
233 *
234 * A PPAACE entry may have a number of associated subwindows. A subwindow
235 * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
236 * the index (fspi) of the first SPAACE entry in the SPAACT table. This
237 * function returns the index of the first SPAACE entry. The remaining
238 * SPAACE entries are reserved contiguously from that index.
239 *
240 * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
241 * If no SPAACE entry is available or the allocator can not reserve the required
242 * number of contiguous entries function returns ULONG_MAX indicating a failure.
243 *
244*/
245static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
246{
247 unsigned long spaace_addr;
248
249 spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
250 if (!spaace_addr)
251 return ULONG_MAX;
252
253 return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
254}
255
256/* Release the subwindows reserved for a particular LIODN */
257void pamu_free_subwins(int liodn)
258{
259 struct paace *ppaace;
260 u32 subwin_cnt, size;
261
262 ppaace = pamu_get_ppaace(liodn);
263 if (!ppaace) {
264 pr_debug("Invalid liodn entry\n");
265 return;
266 }
267
268 if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
269 subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
270 size = (subwin_cnt - 1) * sizeof(struct paace);
271 gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
272 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
273 }
274}
275
276/*
277 * Function used for updating stash destination for the coressponding
278 * LIODN.
279 */
280int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
281{
282 struct paace *paace;
283
284 paace = pamu_get_ppaace(liodn);
285 if (!paace) {
286 pr_debug("Invalid liodn entry\n");
287 return -ENOENT;
288 }
289 if (subwin) {
290 paace = pamu_get_spaace(paace, subwin - 1);
291 if (!paace) {
292 return -ENOENT;
293 }
294 }
295 set_bf(paace->impl_attr, PAACE_IA_CID, value);
296
297 mb();
298
299 return 0;
300}
301
302/* Disable a subwindow corresponding to the LIODN */
303int pamu_disable_spaace(int liodn, u32 subwin)
304{
305 struct paace *paace;
306
307 paace = pamu_get_ppaace(liodn);
308 if (!paace) {
309 pr_debug("Invalid liodn entry\n");
310 return -ENOENT;
311 }
312 if (subwin) {
313 paace = pamu_get_spaace(paace, subwin - 1);
314 if (!paace) {
315 return -ENOENT;
316 }
317 set_bf(paace->addr_bitfields, PAACE_AF_V,
318 PAACE_V_INVALID);
319 } else {
320 set_bf(paace->addr_bitfields, PAACE_AF_AP,
321 PAACE_AP_PERMS_DENIED);
322 }
323
324 mb();
325
326 return 0;
327}
328
329
330/**
331 * pamu_config_paace() - Sets up PPAACE entry for specified liodn
332 *
333 * @liodn: Logical IO device number
334 * @win_addr: starting address of DSA window
335 * @win-size: size of DSA window
336 * @omi: Operation mapping index -- if ~omi == 0 then omi not defined
337 * @rpn: real (true physical) page number
338 * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
339 * stashid not defined
340 * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
341 * snoopid not defined
342 * @subwin_cnt: number of sub-windows
343 * @prot: window permissions
344 *
345 * Returns 0 upon success else error code < 0 returned
346 */
347int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
348 u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
349 u32 subwin_cnt, int prot)
350{
351 struct paace *ppaace;
352 unsigned long fspi;
353
354 if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
355 pr_debug("window size too small or not a power of two %llx\n", win_size);
356 return -EINVAL;
357 }
358
359 if (win_addr & (win_size - 1)) {
360 pr_debug("window address is not aligned with window size\n");
361 return -EINVAL;
362 }
363
364 ppaace = pamu_get_ppaace(liodn);
365 if (!ppaace) {
366 return -ENOENT;
367 }
368
369 /* window size is 2^(WSE+1) bytes */
370 set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
371 map_addrspace_size_to_wse(win_size));
372
373 pamu_init_ppaace(ppaace);
374
375 ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
376 set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
377 (win_addr >> PAMU_PAGE_SHIFT));
378
379 /* set up operation mapping if it's configured */
380 if (omi < OME_NUMBER_ENTRIES) {
381 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
382 ppaace->op_encode.index_ot.omi = omi;
383 } else if (~omi != 0) {
384 pr_debug("bad operation mapping index: %d\n", omi);
385 return -EINVAL;
386 }
387
388 /* configure stash id */
389 if (~stashid != 0)
390 set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
391
392 /* configure snoop id */
393 if (~snoopid != 0)
394 ppaace->domain_attr.to_host.snpid = snoopid;
395
396 if (subwin_cnt) {
397 /* The first entry is in the primary PAACE instead */
398 fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
399 if (fspi == ULONG_MAX) {
400 pr_debug("spaace indexes exhausted\n");
401 return -EINVAL;
402 }
403
404 /* window count is 2^(WCE+1) bytes */
405 set_bf(ppaace->impl_attr, PAACE_IA_WCE,
406 map_subwindow_cnt_to_wce(subwin_cnt));
407 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
408 ppaace->fspi = fspi;
409 } else {
410 set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
411 ppaace->twbah = rpn >> 20;
412 set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
413 set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
414 set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
415 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
416 }
417 mb();
418
419 return 0;
420}
421
422/**
423 * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
424 *
425 * @liodn: Logical IO device number
426 * @subwin_cnt: number of sub-windows associated with dma-window
427 * @subwin: subwindow index
428 * @subwin_size: size of subwindow
429 * @omi: Operation mapping index
430 * @rpn: real (true physical) page number
431 * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
432 * snoopid not defined
433 * @stashid: cache stash id for associated cpu
434 * @enable: enable/disable subwindow after reconfiguration
435 * @prot: sub window permissions
436 *
437 * Returns 0 upon success else error code < 0 returned
438 */
439int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
440 phys_addr_t subwin_size, u32 omi, unsigned long rpn,
441 u32 snoopid, u32 stashid, int enable, int prot)
442{
443 struct paace *paace;
444
445
446 /* setup sub-windows */
447 if (!subwin_cnt) {
448 pr_debug("Invalid subwindow count\n");
449 return -EINVAL;
450 }
451
452 paace = pamu_get_ppaace(liodn);
453 if (subwin > 0 && subwin < subwin_cnt && paace) {
454 paace = pamu_get_spaace(paace, subwin - 1);
455
456 if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
457 pamu_init_spaace(paace);
458 set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
459 }
460 }
461
462 if (!paace) {
463 pr_debug("Invalid liodn entry\n");
464 return -ENOENT;
465 }
466
467 if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
468 pr_debug("subwindow size out of range, or not a power of 2\n");
469 return -EINVAL;
470 }
471
472 if (rpn == ULONG_MAX) {
473 pr_debug("real page number out of range\n");
474 return -EINVAL;
475 }
476
477 /* window size is 2^(WSE+1) bytes */
478 set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
479 map_addrspace_size_to_wse(subwin_size));
480
481 set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
482 paace->twbah = rpn >> 20;
483 set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
484 set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
485
486 /* configure snoop id */
487 if (~snoopid != 0)
488 paace->domain_attr.to_host.snpid = snoopid;
489
490 /* set up operation mapping if it's configured */
491 if (omi < OME_NUMBER_ENTRIES) {
492 set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
493 paace->op_encode.index_ot.omi = omi;
494 } else if (~omi != 0) {
495 pr_debug("bad operation mapping index: %d\n", omi);
496 return -EINVAL;
497 }
498
499 if (~stashid != 0)
500 set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
501
502 smp_wmb();
503
504 if (enable)
505 set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
506
507 mb();
508
509 return 0;
510}
511
512/**
513* get_ome_index() - Returns the index in the operation mapping table
514* for device.
515* @*omi_index: pointer for storing the index value
516*
517*/
518void get_ome_index(u32 *omi_index, struct device *dev)
519{
520 if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
521 *omi_index = OMI_QMAN;
522 if (of_device_is_compatible(dev->of_node, "fsl,qman"))
523 *omi_index = OMI_QMAN_PRIV;
524}
525
526/**
527 * get_stash_id - Returns stash destination id corresponding to a
528 * cache type and vcpu.
529 * @stash_dest_hint: L1, L2 or L3
530 * @vcpu: vpcu target for a particular cache type.
531 *
532 * Returs stash on success or ~(u32)0 on failure.
533 *
534 */
535u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
536{
537 const u32 *prop;
538 struct device_node *node;
539 u32 cache_level;
540 int len, found = 0;
541 int i;
542
543 /* Fastpath, exit early if L3/CPC cache is target for stashing */
544 if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
545 node = of_find_matching_node(NULL, l3_device_ids);
546 if (node) {
547 prop = of_get_property(node, "cache-stash-id", 0);
548 if (!prop) {
549 pr_debug("missing cache-stash-id at %s\n", node->full_name);
550 of_node_put(node);
551 return ~(u32)0;
552 }
553 of_node_put(node);
554 return be32_to_cpup(prop);
555 }
556 return ~(u32)0;
557 }
558
559 for_each_node_by_type(node, "cpu") {
560 prop = of_get_property(node, "reg", &len);
561 for (i = 0; i < len / sizeof(u32); i++) {
562 if (be32_to_cpup(&prop[i]) == vcpu) {
563 found = 1;
564 goto found_cpu_node;
565 }
566 }
567 }
568found_cpu_node:
569
570 /* find the hwnode that represents the cache */
571 for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
572 if (stash_dest_hint == cache_level) {
573 prop = of_get_property(node, "cache-stash-id", 0);
574 if (!prop) {
575 pr_debug("missing cache-stash-id at %s\n", node->full_name);
576 of_node_put(node);
577 return ~(u32)0;
578 }
579 of_node_put(node);
580 return be32_to_cpup(prop);
581 }
582
583 prop = of_get_property(node, "next-level-cache", 0);
584 if (!prop) {
585 pr_debug("can't find next-level-cache at %s\n",
586 node->full_name);
587 of_node_put(node);
588 return ~(u32)0; /* can't traverse any further */
589 }
590 of_node_put(node);
591
592 /* advance to next node in cache hierarchy */
593 node = of_find_node_by_phandle(*prop);
594 if (!node) {
595 pr_debug("Invalid node for cache hierarchy %s\n",
596 node->full_name);
597 return ~(u32)0;
598 }
599 }
600
601 pr_debug("stash dest not found for %d on vcpu %d\n",
602 stash_dest_hint, vcpu);
603 return ~(u32)0;
604}
605
606/* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */
607#define QMAN_PAACE 1
608#define QMAN_PORTAL_PAACE 2
609#define BMAN_PAACE 3
610
611/**
612 * Setup operation mapping and stash destinations for QMAN and QMAN portal.
613 * Memory accesses to QMAN and BMAN private memory need not be coherent, so
614 * clear the PAACE entry coherency attribute for them.
615 */
616static void setup_qbman_paace(struct paace *ppaace, int paace_type)
617{
618 switch (paace_type) {
619 case QMAN_PAACE:
620 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
621 ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV;
622 /* setup QMAN Private data stashing for the L3 cache */
623 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
624 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
625 0);
626 break;
627 case QMAN_PORTAL_PAACE:
628 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
629 ppaace->op_encode.index_ot.omi = OMI_QMAN;
630 /*Set DQRR and Frame stashing for the L3 cache */
631 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
632 break;
633 case BMAN_PAACE:
634 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
635 0);
636 break;
637 }
638}
639
640/**
641 * Setup the operation mapping table for various devices. This is a static
642 * table where each table index corresponds to a particular device. PAMU uses
643 * this table to translate device transaction to appropriate corenet
644 * transaction.
645 */
646static void __init setup_omt(struct ome *omt)
647{
648 struct ome *ome;
649
650 /* Configure OMI_QMAN */
651 ome = &omt[OMI_QMAN];
652
653 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
654 ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
655 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
656 ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
657
658 ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC;
659 ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE;
660
661 /* Configure OMI_FMAN */
662 ome = &omt[OMI_FMAN];
663 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
664 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
665
666 /* Configure OMI_QMAN private */
667 ome = &omt[OMI_QMAN_PRIV];
668 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
669 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
670 ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
671 ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA;
672
673 /* Configure OMI_CAAM */
674 ome = &omt[OMI_CAAM];
675 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
676 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
677}
678
679/*
680 * Get the maximum number of PAACT table entries
681 * and subwindows supported by PAMU
682 */
683static void get_pamu_cap_values(unsigned long pamu_reg_base)
684{
685 u32 pc_val;
686
687 pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3));
688 /* Maximum number of subwindows per liodn */
689 max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val));
690}
691
692/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
693int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
694 phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
695 phys_addr_t omt_phys)
696{
697 u32 *pc;
698 struct pamu_mmap_regs *pamu_regs;
699
700 pc = (u32 *) (pamu_reg_base + PAMU_PC);
701 pamu_regs = (struct pamu_mmap_regs *)
702 (pamu_reg_base + PAMU_MMAP_REGS_BASE);
703
704 /* set up pointers to corenet control blocks */
705
706 out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
707 out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys));
708 ppaact_phys = ppaact_phys + PAACT_SIZE;
709 out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys));
710 out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys));
711
712 out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys));
713 out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys));
714 spaact_phys = spaact_phys + SPAACT_SIZE;
715 out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys));
716 out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys));
717
718 out_be32(&pamu_regs->obah, upper_32_bits(omt_phys));
719 out_be32(&pamu_regs->obal, lower_32_bits(omt_phys));
720 omt_phys = omt_phys + OMT_SIZE;
721 out_be32(&pamu_regs->olah, upper_32_bits(omt_phys));
722 out_be32(&pamu_regs->olal, lower_32_bits(omt_phys));
723
724 /*
725 * set PAMU enable bit,
726 * allow ppaact & omt to be cached
727 * & enable PAMU access violation interrupts.
728 */
729
730 out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
731 PAMU_ACCESS_VIOLATION_ENABLE);
732 out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
733 return 0;
734}
735
736/* Enable all device LIODNS */
737static void __init setup_liodns(void)
738{
739 int i, len;
740 struct paace *ppaace;
741 struct device_node *node = NULL;
742 const u32 *prop;
743
744 for_each_node_with_property(node, "fsl,liodn") {
745 prop = of_get_property(node, "fsl,liodn", &len);
746 for (i = 0; i < len / sizeof(u32); i++) {
747 int liodn;
748
749 liodn = be32_to_cpup(&prop[i]);
750 if (liodn >= PAACE_NUMBER_ENTRIES) {
751 pr_debug("Invalid LIODN value %d\n", liodn);
752 continue;
753 }
754 ppaace = pamu_get_ppaace(liodn);
755 pamu_init_ppaace(ppaace);
756 /* window size is 2^(WSE+1) bytes */
757 set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
758 ppaace->wbah = 0;
759 set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
760 set_bf(ppaace->impl_attr, PAACE_IA_ATM,
761 PAACE_ATM_NO_XLATE);
762 set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
763 PAACE_AP_PERMS_ALL);
764 if (of_device_is_compatible(node, "fsl,qman-portal"))
765 setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
766 if (of_device_is_compatible(node, "fsl,qman"))
767 setup_qbman_paace(ppaace, QMAN_PAACE);
768 if (of_device_is_compatible(node, "fsl,bman"))
769 setup_qbman_paace(ppaace, BMAN_PAACE);
770 mb();
771 pamu_enable_liodn(liodn);
772 }
773 }
774}
775
776irqreturn_t pamu_av_isr(int irq, void *arg)
777{
778 struct pamu_isr_data *data = arg;
779 phys_addr_t phys;
780 unsigned int i, j, ret;
781
782 pr_emerg("access violation interrupt\n");
783
784 for (i = 0; i < data->count; i++) {
785 void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
786 u32 pics = in_be32(p + PAMU_PICS);
787
788 if (pics & PAMU_ACCESS_VIOLATION_STAT) {
789 u32 avs1 = in_be32(p + PAMU_AVS1);
790 struct paace *paace;
791
792 pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
793 pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
794 pr_emerg("AVS1=%08x\n", avs1);
795 pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
796 pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
797 in_be32(p + PAMU_AVAL)));
798 pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
799 pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
800 in_be32(p + PAMU_POEAL)));
801
802 phys = make64(in_be32(p + PAMU_POEAH),
803 in_be32(p + PAMU_POEAL));
804
805 /* Assume that POEA points to a PAACE */
806 if (phys) {
807 u32 *paace = phys_to_virt(phys);
808
809 /* Only the first four words are relevant */
810 for (j = 0; j < 4; j++)
811 pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
812 }
813
814 /* clear access violation condition */
815 out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
816 paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
817 BUG_ON(!paace);
818 /* check if we got a violation for a disabled LIODN */
819 if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) {
820 /*
821 * As per hardware erratum A-003638, access
822 * violation can be reported for a disabled
823 * LIODN. If we hit that condition, disable
824 * access violation reporting.
825 */
826 pics &= ~PAMU_ACCESS_VIOLATION_ENABLE;
827 } else {
828 /* Disable the LIODN */
829 ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
830 BUG_ON(ret);
831 pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
832 }
833 out_be32((p + PAMU_PICS), pics);
834 }
835 }
836
837
838 return IRQ_HANDLED;
839}
840
841#define LAWAR_EN 0x80000000
842#define LAWAR_TARGET_MASK 0x0FF00000
843#define LAWAR_TARGET_SHIFT 20
844#define LAWAR_SIZE_MASK 0x0000003F
845#define LAWAR_CSDID_MASK 0x000FF000
846#define LAWAR_CSDID_SHIFT 12
847
848#define LAW_SIZE_4K 0xb
849
850struct ccsr_law {
851 u32 lawbarh; /* LAWn base address high */
852 u32 lawbarl; /* LAWn base address low */
853 u32 lawar; /* LAWn attributes */
854 u32 reserved;
855};
856
857/*
858 * Create a coherence subdomain for a given memory block.
859 */
860static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
861{
862 struct device_node *np;
863 const __be32 *iprop;
864 void __iomem *lac = NULL; /* Local Access Control registers */
865 struct ccsr_law __iomem *law;
866 void __iomem *ccm = NULL;
867 u32 __iomem *csdids;
868 unsigned int i, num_laws, num_csds;
869 u32 law_target = 0;
870 u32 csd_id = 0;
871 int ret = 0;
872
873 np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law");
874 if (!np)
875 return -ENODEV;
876
877 iprop = of_get_property(np, "fsl,num-laws", NULL);
878 if (!iprop) {
879 ret = -ENODEV;
880 goto error;
881 }
882
883 num_laws = be32_to_cpup(iprop);
884 if (!num_laws) {
885 ret = -ENODEV;
886 goto error;
887 }
888
889 lac = of_iomap(np, 0);
890 if (!lac) {
891 ret = -ENODEV;
892 goto error;
893 }
894
895 /* LAW registers are at offset 0xC00 */
896 law = lac + 0xC00;
897
898 of_node_put(np);
899
900 np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf");
901 if (!np) {
902 ret = -ENODEV;
903 goto error;
904 }
905
906 iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL);
907 if (!iprop) {
908 ret = -ENODEV;
909 goto error;
910 }
911
912 num_csds = be32_to_cpup(iprop);
913 if (!num_csds) {
914 ret = -ENODEV;
915 goto error;
916 }
917
918 ccm = of_iomap(np, 0);
919 if (!ccm) {
920 ret = -ENOMEM;
921 goto error;
922 }
923
924 /* The undocumented CSDID registers are at offset 0x600 */
925 csdids = ccm + 0x600;
926
927 of_node_put(np);
928 np = NULL;
929
930 /* Find an unused coherence subdomain ID */
931 for (csd_id = 0; csd_id < num_csds; csd_id++) {
932 if (!csdids[csd_id])
933 break;
934 }
935
936 /* Store the Port ID in the (undocumented) proper CIDMRxx register */
937 csdids[csd_id] = csd_port_id;
938
939 /* Find the DDR LAW that maps to our buffer. */
940 for (i = 0; i < num_laws; i++) {
941 if (law[i].lawar & LAWAR_EN) {
942 phys_addr_t law_start, law_end;
943
944 law_start = make64(law[i].lawbarh, law[i].lawbarl);
945 law_end = law_start +
946 (2ULL << (law[i].lawar & LAWAR_SIZE_MASK));
947
948 if (law_start <= phys && phys < law_end) {
949 law_target = law[i].lawar & LAWAR_TARGET_MASK;
950 break;
951 }
952 }
953 }
954
955 if (i == 0 || i == num_laws) {
956 /* This should never happen*/
957 ret = -ENOENT;
958 goto error;
959 }
960
961 /* Find a free LAW entry */
962 while (law[--i].lawar & LAWAR_EN) {
963 if (i == 0) {
964 /* No higher priority LAW slots available */
965 ret = -ENOENT;
966 goto error;
967 }
968 }
969
970 law[i].lawbarh = upper_32_bits(phys);
971 law[i].lawbarl = lower_32_bits(phys);
972 wmb();
973 law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) |
974 (LAW_SIZE_4K + get_order(size));
975 wmb();
976
977error:
978 if (ccm)
979 iounmap(ccm);
980
981 if (lac)
982 iounmap(lac);
983
984 if (np)
985 of_node_put(np);
986
987 return ret;
988}
989
990/*
991 * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a
992 * bit map of snoopers for a given range of memory mapped by a LAW.
993 *
994 * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this
995 * table should never need to be updated. SVRs are guaranteed to be unique, so
996 * there is no worry that a future SOC will inadvertently have one of these
997 * values.
998 */
999static const struct {
1000 u32 svr;
1001 u32 port_id;
1002} port_id_map[] = {
1003 {0x82100010, 0xFF000000}, /* P2040 1.0 */
1004 {0x82100011, 0xFF000000}, /* P2040 1.1 */
1005 {0x82100110, 0xFF000000}, /* P2041 1.0 */
1006 {0x82100111, 0xFF000000}, /* P2041 1.1 */
1007 {0x82110310, 0xFF000000}, /* P3041 1.0 */
1008 {0x82110311, 0xFF000000}, /* P3041 1.1 */
1009 {0x82010020, 0xFFF80000}, /* P4040 2.0 */
1010 {0x82000020, 0xFFF80000}, /* P4080 2.0 */
1011 {0x82210010, 0xFC000000}, /* P5010 1.0 */
1012 {0x82210020, 0xFC000000}, /* P5010 2.0 */
1013 {0x82200010, 0xFC000000}, /* P5020 1.0 */
1014 {0x82050010, 0xFF800000}, /* P5021 1.0 */
1015 {0x82040010, 0xFF800000}, /* P5040 1.0 */
1016};
1017
1018#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1019
1020static int __init fsl_pamu_probe(struct platform_device *pdev)
1021{
1022 void __iomem *pamu_regs = NULL;
1023 struct ccsr_guts __iomem *guts_regs = NULL;
1024 u32 pamubypenr, pamu_counter;
1025 unsigned long pamu_reg_off;
1026 unsigned long pamu_reg_base;
1027 struct pamu_isr_data *data = NULL;
1028 struct device_node *guts_node;
1029 u64 size;
1030 struct page *p;
1031 int ret = 0;
1032 int irq;
1033 phys_addr_t ppaact_phys;
1034 phys_addr_t spaact_phys;
1035 phys_addr_t omt_phys;
1036 size_t mem_size = 0;
1037 unsigned int order = 0;
1038 u32 csd_port_id = 0;
1039 unsigned i;
1040 /*
1041 * enumerate all PAMUs and allocate and setup PAMU tables
1042 * for each of them,
1043 * NOTE : All PAMUs share the same LIODN tables.
1044 */
1045
1046 pamu_regs = of_iomap(pdev->dev.of_node, 0);
1047 if (!pamu_regs) {
1048 dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
1049 return -ENOMEM;
1050 }
1051 of_get_address(pdev->dev.of_node, 0, &size, NULL);
1052
1053 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1054 if (irq == NO_IRQ) {
1055 dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
1056 goto error;
1057 }
1058
1059 data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
1060 if (!data) {
1061 dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
1062 ret = -ENOMEM;
1063 goto error;
1064 }
1065 data->pamu_reg_base = pamu_regs;
1066 data->count = size / PAMU_OFFSET;
1067
1068 /* The ISR needs access to the regs, so we won't iounmap them */
1069 ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
1070 if (ret < 0) {
1071 dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
1072 ret, irq);
1073 goto error;
1074 }
1075
1076 guts_node = of_find_matching_node(NULL, guts_device_ids);
1077 if (!guts_node) {
1078 dev_err(&pdev->dev, "could not find GUTS node %s\n",
1079 pdev->dev.of_node->full_name);
1080 ret = -ENODEV;
1081 goto error;
1082 }
1083
1084 guts_regs = of_iomap(guts_node, 0);
1085 of_node_put(guts_node);
1086 if (!guts_regs) {
1087 dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
1088 ret = -ENODEV;
1089 goto error;
1090 }
1091
1092 /* read in the PAMU capability registers */
1093 get_pamu_cap_values((unsigned long)pamu_regs);
1094 /*
1095 * To simplify the allocation of a coherency domain, we allocate the
1096 * PAACT and the OMT in the same memory buffer. Unfortunately, this
1097 * wastes more memory compared to allocating the buffers separately.
1098 */
1099 /* Determine how much memory we need */
1100 mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
1101 (PAGE_SIZE << get_order(SPAACT_SIZE)) +
1102 (PAGE_SIZE << get_order(OMT_SIZE));
1103 order = get_order(mem_size);
1104
1105 p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1106 if (!p) {
1107 dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
1108 ret = -ENOMEM;
1109 goto error;
1110 }
1111
1112 ppaact = page_address(p);
1113 ppaact_phys = page_to_phys(p);
1114
1115 /* Make sure the memory is naturally aligned */
1116 if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
1117 dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
1118 ret = -ENOMEM;
1119 goto error;
1120 }
1121
1122 spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
1123 omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
1124
1125 dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
1126 (unsigned long long) ppaact_phys);
1127
1128 /* Check to see if we need to implement the work-around on this SOC */
1129
1130 /* Determine the Port ID for our coherence subdomain */
1131 for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
1132 if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
1133 csd_port_id = port_id_map[i].port_id;
1134 dev_dbg(&pdev->dev, "found matching SVR %08x\n",
1135 port_id_map[i].svr);
1136 break;
1137 }
1138 }
1139
1140 if (csd_port_id) {
1141 dev_dbg(&pdev->dev, "creating coherency subdomain at address "
1142 "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
1143 mem_size, csd_port_id);
1144
1145 ret = create_csd(ppaact_phys, mem_size, csd_port_id);
1146 if (ret) {
1147 dev_err(&pdev->dev, "could not create coherence "
1148 "subdomain\n");
1149 return ret;
1150 }
1151 }
1152
1153 spaact_phys = virt_to_phys(spaact);
1154 omt_phys = virt_to_phys(omt);
1155
1156 spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
1157 if (!spaace_pool) {
1158 ret = -ENOMEM;
1159 dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
1160 goto error;
1161 }
1162
1163 ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
1164 if (ret)
1165 goto error_genpool;
1166
1167 pamubypenr = in_be32(&guts_regs->pamubypenr);
1168
1169 for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
1170 pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
1171
1172 pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
1173 setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
1174 spaact_phys, omt_phys);
1175 /* Disable PAMU bypass for this PAMU */
1176 pamubypenr &= ~pamu_counter;
1177 }
1178
1179 setup_omt(omt);
1180
1181 /* Enable all relevant PAMU(s) */
1182 out_be32(&guts_regs->pamubypenr, pamubypenr);
1183
1184 iounmap(guts_regs);
1185
1186 /* Enable DMA for the LIODNs in the device tree*/
1187
1188 setup_liodns();
1189
1190 return 0;
1191
1192error_genpool:
1193 gen_pool_destroy(spaace_pool);
1194
1195error:
1196 if (irq != NO_IRQ)
1197 free_irq(irq, data);
1198
1199 if (data) {
1200 memset(data, 0, sizeof(struct pamu_isr_data));
1201 kfree(data);
1202 }
1203
1204 if (pamu_regs)
1205 iounmap(pamu_regs);
1206
1207 if (guts_regs)
1208 iounmap(guts_regs);
1209
1210 if (ppaact)
1211 free_pages((unsigned long)ppaact, order);
1212
1213 ppaact = NULL;
1214
1215 return ret;
1216}
1217
1218static const struct of_device_id fsl_of_pamu_ids[] = {
1219 {
1220 .compatible = "fsl,p4080-pamu",
1221 },
1222 {
1223 .compatible = "fsl,pamu",
1224 },
1225 {},
1226};
1227
1228static struct platform_driver fsl_of_pamu_driver = {
1229 .driver = {
1230 .name = "fsl-of-pamu",
1231 .owner = THIS_MODULE,
1232 },
1233 .probe = fsl_pamu_probe,
1234};
1235
1236static __init int fsl_pamu_init(void)
1237{
1238 struct platform_device *pdev = NULL;
1239 struct device_node *np;
1240 int ret;
1241
1242 /*
1243 * The normal OF process calls the probe function at some
1244 * indeterminate later time, after most drivers have loaded. This is
1245 * too late for us, because PAMU clients (like the Qman driver)
1246 * depend on PAMU being initialized early.
1247 *
1248 * So instead, we "manually" call our probe function by creating the
1249 * platform devices ourselves.
1250 */
1251
1252 /*
1253 * We assume that there is only one PAMU node in the device tree. A
1254 * single PAMU node represents all of the PAMU devices in the SOC
1255 * already. Everything else already makes that assumption, and the
1256 * binding for the PAMU nodes doesn't allow for any parent-child
1257 * relationships anyway. In other words, support for more than one
1258 * PAMU node would require significant changes to a lot of code.
1259 */
1260
1261 np = of_find_compatible_node(NULL, NULL, "fsl,pamu");
1262 if (!np) {
1263 pr_err("could not find a PAMU node\n");
1264 return -ENODEV;
1265 }
1266
1267 ret = platform_driver_register(&fsl_of_pamu_driver);
1268 if (ret) {
1269 pr_err("could not register driver (err=%i)\n", ret);
1270 goto error_driver_register;
1271 }
1272
1273 pdev = platform_device_alloc("fsl-of-pamu", 0);
1274 if (!pdev) {
1275 pr_err("could not allocate device %s\n",
1276 np->full_name);
1277 ret = -ENOMEM;
1278 goto error_device_alloc;
1279 }
1280 pdev->dev.of_node = of_node_get(np);
1281
1282 ret = pamu_domain_init();
1283 if (ret)
1284 goto error_device_add;
1285
1286 ret = platform_device_add(pdev);
1287 if (ret) {
1288 pr_err("could not add device %s (err=%i)\n",
1289 np->full_name, ret);
1290 goto error_device_add;
1291 }
1292
1293 return 0;
1294
1295error_device_add:
1296 of_node_put(pdev->dev.of_node);
1297 pdev->dev.of_node = NULL;
1298
1299 platform_device_put(pdev);
1300
1301error_device_alloc:
1302 platform_driver_unregister(&fsl_of_pamu_driver);
1303
1304error_driver_register:
1305 of_node_put(np);
1306
1307 return ret;
1308}
1309arch_initcall(fsl_pamu_init);
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
new file mode 100644
index 000000000000..8fc1a125b16e
--- /dev/null
+++ b/drivers/iommu/fsl_pamu.h
@@ -0,0 +1,410 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 *
17 */
18
19#ifndef __FSL_PAMU_H
20#define __FSL_PAMU_H
21
22#include <asm/fsl_pamu_stash.h>
23
24/* Bit Field macros
25 * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
26 */
27#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
28#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT))
29
30/* PAMU CCSR space */
31#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
32#define PAMU_PE 0x40000000 /* enable PAMU */
33
34/* PAMU_OFFSET to the next pamu space in ccsr */
35#define PAMU_OFFSET 0x1000
36
37#define PAMU_MMAP_REGS_BASE 0
38
39struct pamu_mmap_regs {
40 u32 ppbah;
41 u32 ppbal;
42 u32 pplah;
43 u32 pplal;
44 u32 spbah;
45 u32 spbal;
46 u32 splah;
47 u32 splal;
48 u32 obah;
49 u32 obal;
50 u32 olah;
51 u32 olal;
52};
53
54/* PAMU Error Registers */
55#define PAMU_POES1 0x0040
56#define PAMU_POES2 0x0044
57#define PAMU_POEAH 0x0048
58#define PAMU_POEAL 0x004C
59#define PAMU_AVS1 0x0050
60#define PAMU_AVS1_AV 0x1
61#define PAMU_AVS1_OTV 0x6
62#define PAMU_AVS1_APV 0x78
63#define PAMU_AVS1_WAV 0x380
64#define PAMU_AVS1_LAV 0x1c00
65#define PAMU_AVS1_GCV 0x2000
66#define PAMU_AVS1_PDV 0x4000
67#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
68 | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
69#define PAMU_AVS1_LIODN_SHIFT 16
70#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
71
72#define PAMU_AVS2 0x0054
73#define PAMU_AVAH 0x0058
74#define PAMU_AVAL 0x005C
75#define PAMU_EECTL 0x0060
76#define PAMU_EEDIS 0x0064
77#define PAMU_EEINTEN 0x0068
78#define PAMU_EEDET 0x006C
79#define PAMU_EEATTR 0x0070
80#define PAMU_EEAHI 0x0074
81#define PAMU_EEALO 0x0078
82#define PAMU_EEDHI 0X007C
83#define PAMU_EEDLO 0x0080
84#define PAMU_EECC 0x0084
85#define PAMU_UDAD 0x0090
86
87/* PAMU Revision Registers */
88#define PAMU_PR1 0x0BF8
89#define PAMU_PR2 0x0BFC
90
91/* PAMU version mask */
92#define PAMU_PR1_MASK 0xffff
93
94/* PAMU Capabilities Registers */
95#define PAMU_PC1 0x0C00
96#define PAMU_PC2 0x0C04
97#define PAMU_PC3 0x0C08
98#define PAMU_PC4 0x0C0C
99
100/* PAMU Control Register */
101#define PAMU_PC 0x0C10
102
103/* PAMU control defs */
104#define PAMU_CONTROL 0x0C10
105#define PAMU_PC_PGC 0x80000000 /* PAMU gate closed bit */
106#define PAMU_PC_PE 0x40000000 /* PAMU enable bit */
107#define PAMU_PC_SPCC 0x00000010 /* sPAACE cache enable */
108#define PAMU_PC_PPCC 0x00000001 /* pPAACE cache enable */
109#define PAMU_PC_OCE 0x00001000 /* OMT cache enable */
110
111#define PAMU_PFA1 0x0C14
112#define PAMU_PFA2 0x0C18
113
114#define PAMU_PC2_MLIODN(X) ((X) >> 16)
115#define PAMU_PC3_MWCE(X) (((X) >> 21) & 0xf)
116
117/* PAMU Interrupt control and Status Register */
118#define PAMU_PICS 0x0C1C
119#define PAMU_ACCESS_VIOLATION_STAT 0x8
120#define PAMU_ACCESS_VIOLATION_ENABLE 0x4
121
122/* PAMU Debug Registers */
123#define PAMU_PD1 0x0F00
124#define PAMU_PD2 0x0F04
125#define PAMU_PD3 0x0F08
126#define PAMU_PD4 0x0F0C
127
128#define PAACE_AP_PERMS_DENIED 0x0
129#define PAACE_AP_PERMS_QUERY 0x1
130#define PAACE_AP_PERMS_UPDATE 0x2
131#define PAACE_AP_PERMS_ALL 0x3
132
133#define PAACE_DD_TO_HOST 0x0
134#define PAACE_DD_TO_IO 0x1
135#define PAACE_PT_PRIMARY 0x0
136#define PAACE_PT_SECONDARY 0x1
137#define PAACE_V_INVALID 0x0
138#define PAACE_V_VALID 0x1
139#define PAACE_MW_SUBWINDOWS 0x1
140
141#define PAACE_WSE_4K 0xB
142#define PAACE_WSE_8K 0xC
143#define PAACE_WSE_16K 0xD
144#define PAACE_WSE_32K 0xE
145#define PAACE_WSE_64K 0xF
146#define PAACE_WSE_128K 0x10
147#define PAACE_WSE_256K 0x11
148#define PAACE_WSE_512K 0x12
149#define PAACE_WSE_1M 0x13
150#define PAACE_WSE_2M 0x14
151#define PAACE_WSE_4M 0x15
152#define PAACE_WSE_8M 0x16
153#define PAACE_WSE_16M 0x17
154#define PAACE_WSE_32M 0x18
155#define PAACE_WSE_64M 0x19
156#define PAACE_WSE_128M 0x1A
157#define PAACE_WSE_256M 0x1B
158#define PAACE_WSE_512M 0x1C
159#define PAACE_WSE_1G 0x1D
160#define PAACE_WSE_2G 0x1E
161#define PAACE_WSE_4G 0x1F
162
163#define PAACE_DID_PCI_EXPRESS_1 0x00
164#define PAACE_DID_PCI_EXPRESS_2 0x01
165#define PAACE_DID_PCI_EXPRESS_3 0x02
166#define PAACE_DID_PCI_EXPRESS_4 0x03
167#define PAACE_DID_LOCAL_BUS 0x04
168#define PAACE_DID_SRIO 0x0C
169#define PAACE_DID_MEM_1 0x10
170#define PAACE_DID_MEM_2 0x11
171#define PAACE_DID_MEM_3 0x12
172#define PAACE_DID_MEM_4 0x13
173#define PAACE_DID_MEM_1_2 0x14
174#define PAACE_DID_MEM_3_4 0x15
175#define PAACE_DID_MEM_1_4 0x16
176#define PAACE_DID_BM_SW_PORTAL 0x18
177#define PAACE_DID_PAMU 0x1C
178#define PAACE_DID_CAAM 0x21
179#define PAACE_DID_QM_SW_PORTAL 0x3C
180#define PAACE_DID_CORE0_INST 0x80
181#define PAACE_DID_CORE0_DATA 0x81
182#define PAACE_DID_CORE1_INST 0x82
183#define PAACE_DID_CORE1_DATA 0x83
184#define PAACE_DID_CORE2_INST 0x84
185#define PAACE_DID_CORE2_DATA 0x85
186#define PAACE_DID_CORE3_INST 0x86
187#define PAACE_DID_CORE3_DATA 0x87
188#define PAACE_DID_CORE4_INST 0x88
189#define PAACE_DID_CORE4_DATA 0x89
190#define PAACE_DID_CORE5_INST 0x8A
191#define PAACE_DID_CORE5_DATA 0x8B
192#define PAACE_DID_CORE6_INST 0x8C
193#define PAACE_DID_CORE6_DATA 0x8D
194#define PAACE_DID_CORE7_INST 0x8E
195#define PAACE_DID_CORE7_DATA 0x8F
196#define PAACE_DID_BROADCAST 0xFF
197
198#define PAACE_ATM_NO_XLATE 0x00
199#define PAACE_ATM_WINDOW_XLATE 0x01
200#define PAACE_ATM_PAGE_XLATE 0x02
201#define PAACE_ATM_WIN_PG_XLATE \
202 (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
203#define PAACE_OTM_NO_XLATE 0x00
204#define PAACE_OTM_IMMEDIATE 0x01
205#define PAACE_OTM_INDEXED 0x02
206#define PAACE_OTM_RESERVED 0x03
207
208#define PAACE_M_COHERENCE_REQ 0x01
209
210#define PAACE_PID_0 0x0
211#define PAACE_PID_1 0x1
212#define PAACE_PID_2 0x2
213#define PAACE_PID_3 0x3
214#define PAACE_PID_4 0x4
215#define PAACE_PID_5 0x5
216#define PAACE_PID_6 0x6
217#define PAACE_PID_7 0x7
218
219#define PAACE_TCEF_FORMAT0_8B 0x00
220#define PAACE_TCEF_FORMAT1_RSVD 0x01
221/*
222 * Hard coded value for the PAACT size to accomodate
223 * maximum LIODN value generated by u-boot.
224 */
225#define PAACE_NUMBER_ENTRIES 0x500
226/* Hard coded value for the SPAACT size */
227#define SPAACE_NUMBER_ENTRIES 0x800
228
229#define OME_NUMBER_ENTRIES 16
230
231/* PAACE Bit Field Defines */
232#define PPAACE_AF_WBAL 0xfffff000
233#define PPAACE_AF_WBAL_SHIFT 12
234#define PPAACE_AF_WSE 0x00000fc0
235#define PPAACE_AF_WSE_SHIFT 6
236#define PPAACE_AF_MW 0x00000020
237#define PPAACE_AF_MW_SHIFT 5
238
239#define SPAACE_AF_LIODN 0xffff0000
240#define SPAACE_AF_LIODN_SHIFT 16
241
242#define PAACE_AF_AP 0x00000018
243#define PAACE_AF_AP_SHIFT 3
244#define PAACE_AF_DD 0x00000004
245#define PAACE_AF_DD_SHIFT 2
246#define PAACE_AF_PT 0x00000002
247#define PAACE_AF_PT_SHIFT 1
248#define PAACE_AF_V 0x00000001
249#define PAACE_AF_V_SHIFT 0
250
251#define PAACE_DA_HOST_CR 0x80
252#define PAACE_DA_HOST_CR_SHIFT 7
253
254#define PAACE_IA_CID 0x00FF0000
255#define PAACE_IA_CID_SHIFT 16
256#define PAACE_IA_WCE 0x000000F0
257#define PAACE_IA_WCE_SHIFT 4
258#define PAACE_IA_ATM 0x0000000C
259#define PAACE_IA_ATM_SHIFT 2
260#define PAACE_IA_OTM 0x00000003
261#define PAACE_IA_OTM_SHIFT 0
262
263#define PAACE_WIN_TWBAL 0xfffff000
264#define PAACE_WIN_TWBAL_SHIFT 12
265#define PAACE_WIN_SWSE 0x00000fc0
266#define PAACE_WIN_SWSE_SHIFT 6
267
268/* PAMU Data Structures */
269/* primary / secondary paact structure */
270struct paace {
271 /* PAACE Offset 0x00 */
272 u32 wbah; /* only valid for Primary PAACE */
273 u32 addr_bitfields; /* See P/S PAACE_AF_* */
274
275 /* PAACE Offset 0x08 */
276 /* Interpretation of first 32 bits dependent on DD above */
277 union {
278 struct {
279 /* Destination ID, see PAACE_DID_* defines */
280 u8 did;
281 /* Partition ID */
282 u8 pid;
283 /* Snoop ID */
284 u8 snpid;
285 /* coherency_required : 1 reserved : 7 */
286 u8 coherency_required; /* See PAACE_DA_* */
287 } to_host;
288 struct {
289 /* Destination ID, see PAACE_DID_* defines */
290 u8 did;
291 u8 reserved1;
292 u16 reserved2;
293 } to_io;
294 } domain_attr;
295
296 /* Implementation attributes + window count + address & operation translation modes */
297 u32 impl_attr; /* See PAACE_IA_* */
298
299 /* PAACE Offset 0x10 */
300 /* Translated window base address */
301 u32 twbah;
302 u32 win_bitfields; /* See PAACE_WIN_* */
303
304 /* PAACE Offset 0x18 */
305 /* first secondary paace entry */
306 u32 fspi; /* only valid for Primary PAACE */
307 union {
308 struct {
309 u8 ioea;
310 u8 moea;
311 u8 ioeb;
312 u8 moeb;
313 } immed_ot;
314 struct {
315 u16 reserved;
316 u16 omi;
317 } index_ot;
318 } op_encode;
319
320 /* PAACE Offsets 0x20-0x38 */
321 u32 reserved[8]; /* not currently implemented */
322};
323
324/* OME : Operation mapping entry
325 * MOE : Mapped Operation Encodings
326 * The operation mapping table is table containing operation mapping entries (OME).
327 * The index of a particular OME is programmed in the PAACE entry for translation
328 * in bound I/O operations corresponding to an LIODN. The OMT is used for translation
329 * specifically in case of the indexed translation mode. Each OME contains a 128
330 * byte mapped operation encoding (MOE), where each byte represents an MOE.
331 */
332#define NUM_MOE 128
333struct ome {
334 u8 moe[NUM_MOE];
335} __attribute__((packed));
336
337#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
338#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
339#define OMT_SIZE (sizeof(struct ome) * OME_NUMBER_ENTRIES)
340
341#define PAMU_PAGE_SHIFT 12
342#define PAMU_PAGE_SIZE 4096ULL
343
344#define IOE_READ 0x00
345#define IOE_READ_IDX 0x00
346#define IOE_WRITE 0x81
347#define IOE_WRITE_IDX 0x01
348#define IOE_EREAD0 0x82 /* Enhanced read type 0 */
349#define IOE_EREAD0_IDX 0x02 /* Enhanced read type 0 */
350#define IOE_EWRITE0 0x83 /* Enhanced write type 0 */
351#define IOE_EWRITE0_IDX 0x03 /* Enhanced write type 0 */
352#define IOE_DIRECT0 0x84 /* Directive type 0 */
353#define IOE_DIRECT0_IDX 0x04 /* Directive type 0 */
354#define IOE_EREAD1 0x85 /* Enhanced read type 1 */
355#define IOE_EREAD1_IDX 0x05 /* Enhanced read type 1 */
356#define IOE_EWRITE1 0x86 /* Enhanced write type 1 */
357#define IOE_EWRITE1_IDX 0x06 /* Enhanced write type 1 */
358#define IOE_DIRECT1 0x87 /* Directive type 1 */
359#define IOE_DIRECT1_IDX 0x07 /* Directive type 1 */
360#define IOE_RAC 0x8c /* Read with Atomic clear */
361#define IOE_RAC_IDX 0x0c /* Read with Atomic clear */
362#define IOE_RAS 0x8d /* Read with Atomic set */
363#define IOE_RAS_IDX 0x0d /* Read with Atomic set */
364#define IOE_RAD 0x8e /* Read with Atomic decrement */
365#define IOE_RAD_IDX 0x0e /* Read with Atomic decrement */
366#define IOE_RAI 0x8f /* Read with Atomic increment */
367#define IOE_RAI_IDX 0x0f /* Read with Atomic increment */
368
369#define EOE_READ 0x00
370#define EOE_WRITE 0x01
371#define EOE_RAC 0x0c /* Read with Atomic clear */
372#define EOE_RAS 0x0d /* Read with Atomic set */
373#define EOE_RAD 0x0e /* Read with Atomic decrement */
374#define EOE_RAI 0x0f /* Read with Atomic increment */
375#define EOE_LDEC 0x10 /* Load external cache */
376#define EOE_LDECL 0x11 /* Load external cache with stash lock */
377#define EOE_LDECPE 0x12 /* Load external cache with preferred exclusive */
378#define EOE_LDECPEL 0x13 /* Load external cache with preferred exclusive and lock */
379#define EOE_LDECFE 0x14 /* Load external cache with forced exclusive */
380#define EOE_LDECFEL 0x15 /* Load external cache with forced exclusive and lock */
381#define EOE_RSA 0x16 /* Read with stash allocate */
382#define EOE_RSAU 0x17 /* Read with stash allocate and unlock */
383#define EOE_READI 0x18 /* Read with invalidate */
384#define EOE_RWNITC 0x19 /* Read with no intention to cache */
385#define EOE_WCI 0x1a /* Write cache inhibited */
386#define EOE_WWSA 0x1b /* Write with stash allocate */
387#define EOE_WWSAL 0x1c /* Write with stash allocate and lock */
388#define EOE_WWSAO 0x1d /* Write with stash allocate only */
389#define EOE_WWSAOL 0x1e /* Write with stash allocate only and lock */
390#define EOE_VALID 0x80
391
392/* Function prototypes */
393int pamu_domain_init(void);
394int pamu_enable_liodn(int liodn);
395int pamu_disable_liodn(int liodn);
396void pamu_free_subwins(int liodn);
397int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
398 u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid,
399 u32 subwin_cnt, int prot);
400int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr,
401 phys_addr_t subwin_size, u32 omi, unsigned long rpn,
402 uint32_t snoopid, u32 stashid, int enable, int prot);
403
404u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
405void get_ome_index(u32 *omi_index, struct device *dev);
406int pamu_update_paace_stash(int liodn, u32 subwin, u32 value);
407int pamu_disable_spaace(int liodn, u32 subwin);
408u32 pamu_get_max_subwin_cnt(void);
409
410#endif /* __FSL_PAMU_H */
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
new file mode 100644
index 000000000000..c857c30da979
--- /dev/null
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -0,0 +1,1172 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 * Author: Varun Sethi <varun.sethi@freescale.com>
17 *
18 */
19
20#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/iommu.h>
24#include <linux/notifier.h>
25#include <linux/slab.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/device.h>
31#include <linux/of_platform.h>
32#include <linux/bootmem.h>
33#include <linux/err.h>
34#include <asm/io.h>
35#include <asm/bitops.h>
36
37#include <asm/pci-bridge.h>
38#include <sysdev/fsl_pci.h>
39
40#include "fsl_pamu_domain.h"
41#include "pci.h"
42
43/*
44 * Global spinlock that needs to be held while
45 * configuring PAMU.
46 */
47static DEFINE_SPINLOCK(iommu_lock);
48
49static struct kmem_cache *fsl_pamu_domain_cache;
50static struct kmem_cache *iommu_devinfo_cache;
51static DEFINE_SPINLOCK(device_domain_lock);
52
53static int __init iommu_init_mempool(void)
54{
55
56 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
57 sizeof(struct fsl_dma_domain),
58 0,
59 SLAB_HWCACHE_ALIGN,
60
61 NULL);
62 if (!fsl_pamu_domain_cache) {
63 pr_debug("Couldn't create fsl iommu_domain cache\n");
64 return -ENOMEM;
65 }
66
67 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
68 sizeof(struct device_domain_info),
69 0,
70 SLAB_HWCACHE_ALIGN,
71 NULL);
72 if (!iommu_devinfo_cache) {
73 pr_debug("Couldn't create devinfo cache\n");
74 kmem_cache_destroy(fsl_pamu_domain_cache);
75 return -ENOMEM;
76 }
77
78 return 0;
79}
80
81static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
82{
83 u32 win_cnt = dma_domain->win_cnt;
84 struct dma_window *win_ptr =
85 &dma_domain->win_arr[0];
86 struct iommu_domain_geometry *geom;
87
88 geom = &dma_domain->iommu_domain->geometry;
89
90 if (!win_cnt || !dma_domain->geom_size) {
91 pr_debug("Number of windows/geometry not configured for the domain\n");
92 return 0;
93 }
94
95 if (win_cnt > 1) {
96 u64 subwin_size;
97 dma_addr_t subwin_iova;
98 u32 wnd;
99
100 subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
101 subwin_iova = iova & ~(subwin_size - 1);
102 wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
103 win_ptr = &dma_domain->win_arr[wnd];
104 }
105
106 if (win_ptr->valid)
107 return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
108
109 return 0;
110}
111
112static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
113{
114 struct dma_window *sub_win_ptr =
115 &dma_domain->win_arr[0];
116 int i, ret;
117 unsigned long rpn, flags;
118
119 for (i = 0; i < dma_domain->win_cnt; i++) {
120 if (sub_win_ptr[i].valid) {
121 rpn = sub_win_ptr[i].paddr >>
122 PAMU_PAGE_SHIFT;
123 spin_lock_irqsave(&iommu_lock, flags);
124 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
125 sub_win_ptr[i].size,
126 ~(u32)0,
127 rpn,
128 dma_domain->snoop_id,
129 dma_domain->stash_id,
130 (i > 0) ? 1 : 0,
131 sub_win_ptr[i].prot);
132 spin_unlock_irqrestore(&iommu_lock, flags);
133 if (ret) {
134 pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
135 liodn);
136 return ret;
137 }
138 }
139 }
140
141 return ret;
142}
143
144static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
145{
146 int ret;
147 struct dma_window *wnd = &dma_domain->win_arr[0];
148 phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
149 unsigned long flags;
150
151 spin_lock_irqsave(&iommu_lock, flags);
152 ret = pamu_config_ppaace(liodn, wnd_addr,
153 wnd->size,
154 ~(u32)0,
155 wnd->paddr >> PAMU_PAGE_SHIFT,
156 dma_domain->snoop_id, dma_domain->stash_id,
157 0, wnd->prot);
158 spin_unlock_irqrestore(&iommu_lock, flags);
159 if (ret)
160 pr_debug("PAMU PAACE configuration failed for liodn %d\n",
161 liodn);
162
163 return ret;
164}
165
166/* Map the DMA window corresponding to the LIODN */
167static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
168{
169 if (dma_domain->win_cnt > 1)
170 return map_subwins(liodn, dma_domain);
171 else
172 return map_win(liodn, dma_domain);
173
174}
175
176/* Update window/subwindow mapping for the LIODN */
177static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
178{
179 int ret;
180 struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
181 unsigned long flags;
182
183 spin_lock_irqsave(&iommu_lock, flags);
184 if (dma_domain->win_cnt > 1) {
185 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
186 wnd->size,
187 ~(u32)0,
188 wnd->paddr >> PAMU_PAGE_SHIFT,
189 dma_domain->snoop_id,
190 dma_domain->stash_id,
191 (wnd_nr > 0) ? 1 : 0,
192 wnd->prot);
193 if (ret)
194 pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
195 } else {
196 phys_addr_t wnd_addr;
197
198 wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
199
200 ret = pamu_config_ppaace(liodn, wnd_addr,
201 wnd->size,
202 ~(u32)0,
203 wnd->paddr >> PAMU_PAGE_SHIFT,
204 dma_domain->snoop_id, dma_domain->stash_id,
205 0, wnd->prot);
206 if (ret)
207 pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
208 }
209
210 spin_unlock_irqrestore(&iommu_lock, flags);
211
212 return ret;
213}
214
215static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
216 u32 val)
217{
218 int ret = 0, i;
219 unsigned long flags;
220
221 spin_lock_irqsave(&iommu_lock, flags);
222 if (!dma_domain->win_arr) {
223 pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
224 spin_unlock_irqrestore(&iommu_lock, flags);
225 return -EINVAL;
226 }
227
228 for (i = 0; i < dma_domain->win_cnt; i++) {
229 ret = pamu_update_paace_stash(liodn, i, val);
230 if (ret) {
231 pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
232 spin_unlock_irqrestore(&iommu_lock, flags);
233 return ret;
234 }
235 }
236
237 spin_unlock_irqrestore(&iommu_lock, flags);
238
239 return ret;
240}
241
242/* Set the geometry parameters for a LIODN */
243static int pamu_set_liodn(int liodn, struct device *dev,
244 struct fsl_dma_domain *dma_domain,
245 struct iommu_domain_geometry *geom_attr,
246 u32 win_cnt)
247{
248 phys_addr_t window_addr, window_size;
249 phys_addr_t subwin_size;
250 int ret = 0, i;
251 u32 omi_index = ~(u32)0;
252 unsigned long flags;
253
254 /*
255 * Configure the omi_index at the geometry setup time.
256 * This is a static value which depends on the type of
257 * device and would not change thereafter.
258 */
259 get_ome_index(&omi_index, dev);
260
261 window_addr = geom_attr->aperture_start;
262 window_size = dma_domain->geom_size;
263
264 spin_lock_irqsave(&iommu_lock, flags);
265 ret = pamu_disable_liodn(liodn);
266 if (!ret)
267 ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
268 0, dma_domain->snoop_id,
269 dma_domain->stash_id, win_cnt, 0);
270 spin_unlock_irqrestore(&iommu_lock, flags);
271 if (ret) {
272 pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
273 return ret;
274 }
275
276 if (win_cnt > 1) {
277 subwin_size = window_size >> ilog2(win_cnt);
278 for (i = 0; i < win_cnt; i++) {
279 spin_lock_irqsave(&iommu_lock, flags);
280 ret = pamu_disable_spaace(liodn, i);
281 if (!ret)
282 ret = pamu_config_spaace(liodn, win_cnt, i,
283 subwin_size, omi_index,
284 0, dma_domain->snoop_id,
285 dma_domain->stash_id,
286 0, 0);
287 spin_unlock_irqrestore(&iommu_lock, flags);
288 if (ret) {
289 pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
290 return ret;
291 }
292 }
293 }
294
295 return ret;
296}
297
298static int check_size(u64 size, dma_addr_t iova)
299{
300 /*
301 * Size must be a power of two and at least be equal
302 * to PAMU page size.
303 */
304 if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
305 pr_debug("%s: size too small or not a power of two\n", __func__);
306 return -EINVAL;
307 }
308
309 /* iova must be page size aligned*/
310 if (iova & (size - 1)) {
311 pr_debug("%s: address is not aligned with window size\n", __func__);
312 return -EINVAL;
313 }
314
315 return 0;
316}
317
318static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
319{
320 struct fsl_dma_domain *domain;
321
322 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
323 if (!domain)
324 return NULL;
325
326 domain->stash_id = ~(u32)0;
327 domain->snoop_id = ~(u32)0;
328 domain->win_cnt = pamu_get_max_subwin_cnt();
329 domain->geom_size = 0;
330
331 INIT_LIST_HEAD(&domain->devices);
332
333 spin_lock_init(&domain->domain_lock);
334
335 return domain;
336}
337
338static inline struct device_domain_info *find_domain(struct device *dev)
339{
340 return dev->archdata.iommu_domain;
341}
342
343static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
344{
345 unsigned long flags;
346
347 list_del(&info->link);
348 spin_lock_irqsave(&iommu_lock, flags);
349 if (win_cnt > 1)
350 pamu_free_subwins(info->liodn);
351 pamu_disable_liodn(info->liodn);
352 spin_unlock_irqrestore(&iommu_lock, flags);
353 spin_lock_irqsave(&device_domain_lock, flags);
354 info->dev->archdata.iommu_domain = NULL;
355 kmem_cache_free(iommu_devinfo_cache, info);
356 spin_unlock_irqrestore(&device_domain_lock, flags);
357}
358
359static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
360{
361 struct device_domain_info *info, *tmp;
362 unsigned long flags;
363
364 spin_lock_irqsave(&dma_domain->domain_lock, flags);
365 /* Remove the device from the domain device list */
366 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
367 if (!dev || (info->dev == dev))
368 remove_device_ref(info, dma_domain->win_cnt);
369 }
370 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
371}
372
373static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
374{
375 struct device_domain_info *info, *old_domain_info;
376 unsigned long flags;
377
378 spin_lock_irqsave(&device_domain_lock, flags);
379 /*
380 * Check here if the device is already attached to domain or not.
381 * If the device is already attached to a domain detach it.
382 */
383 old_domain_info = find_domain(dev);
384 if (old_domain_info && old_domain_info->domain != dma_domain) {
385 spin_unlock_irqrestore(&device_domain_lock, flags);
386 detach_device(dev, old_domain_info->domain);
387 spin_lock_irqsave(&device_domain_lock, flags);
388 }
389
390 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
391
392 info->dev = dev;
393 info->liodn = liodn;
394 info->domain = dma_domain;
395
396 list_add(&info->link, &dma_domain->devices);
397 /*
398 * In case of devices with multiple LIODNs just store
399 * the info for the first LIODN as all
400 * LIODNs share the same domain
401 */
402 if (!old_domain_info)
403 dev->archdata.iommu_domain = info;
404 spin_unlock_irqrestore(&device_domain_lock, flags);
405
406}
407
408static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
409 dma_addr_t iova)
410{
411 struct fsl_dma_domain *dma_domain = domain->priv;
412
413 if ((iova < domain->geometry.aperture_start) ||
414 iova > (domain->geometry.aperture_end))
415 return 0;
416
417 return get_phys_addr(dma_domain, iova);
418}
419
420static int fsl_pamu_domain_has_cap(struct iommu_domain *domain,
421 unsigned long cap)
422{
423 return cap == IOMMU_CAP_CACHE_COHERENCY;
424}
425
426static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
427{
428 struct fsl_dma_domain *dma_domain = domain->priv;
429
430 domain->priv = NULL;
431
432 /* remove all the devices from the device list */
433 detach_device(NULL, dma_domain);
434
435 dma_domain->enabled = 0;
436 dma_domain->mapped = 0;
437
438 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
439}
440
441static int fsl_pamu_domain_init(struct iommu_domain *domain)
442{
443 struct fsl_dma_domain *dma_domain;
444
445 dma_domain = iommu_alloc_dma_domain();
446 if (!dma_domain) {
447 pr_debug("dma_domain allocation failed\n");
448 return -ENOMEM;
449 }
450 domain->priv = dma_domain;
451 dma_domain->iommu_domain = domain;
452 /* defaul geometry 64 GB i.e. maximum system address */
453 domain->geometry.aperture_start = 0;
454 domain->geometry.aperture_end = (1ULL << 36) - 1;
455 domain->geometry.force_aperture = true;
456
457 return 0;
458}
459
460/* Configure geometry settings for all LIODNs associated with domain */
461static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
462 struct iommu_domain_geometry *geom_attr,
463 u32 win_cnt)
464{
465 struct device_domain_info *info;
466 int ret = 0;
467
468 list_for_each_entry(info, &dma_domain->devices, link) {
469 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
470 geom_attr, win_cnt);
471 if (ret)
472 break;
473 }
474
475 return ret;
476}
477
478/* Update stash destination for all LIODNs associated with the domain */
479static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
480{
481 struct device_domain_info *info;
482 int ret = 0;
483
484 list_for_each_entry(info, &dma_domain->devices, link) {
485 ret = update_liodn_stash(info->liodn, dma_domain, val);
486 if (ret)
487 break;
488 }
489
490 return ret;
491}
492
493/* Update domain mappings for all LIODNs associated with the domain */
494static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
495{
496 struct device_domain_info *info;
497 int ret = 0;
498
499 list_for_each_entry(info, &dma_domain->devices, link) {
500 ret = update_liodn(info->liodn, dma_domain, wnd_nr);
501 if (ret)
502 break;
503 }
504 return ret;
505}
506
507static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
508{
509 struct device_domain_info *info;
510 int ret = 0;
511
512 list_for_each_entry(info, &dma_domain->devices, link) {
513 if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
514 ret = pamu_disable_liodn(info->liodn);
515 if (!ret)
516 dma_domain->enabled = 0;
517 } else {
518 ret = pamu_disable_spaace(info->liodn, wnd_nr);
519 }
520 }
521
522 return ret;
523}
524
525static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
526{
527 struct fsl_dma_domain *dma_domain = domain->priv;
528 unsigned long flags;
529 int ret;
530
531 spin_lock_irqsave(&dma_domain->domain_lock, flags);
532 if (!dma_domain->win_arr) {
533 pr_debug("Number of windows not configured\n");
534 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
535 return;
536 }
537
538 if (wnd_nr >= dma_domain->win_cnt) {
539 pr_debug("Invalid window index\n");
540 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
541 return;
542 }
543
544 if (dma_domain->win_arr[wnd_nr].valid) {
545 ret = disable_domain_win(dma_domain, wnd_nr);
546 if (!ret) {
547 dma_domain->win_arr[wnd_nr].valid = 0;
548 dma_domain->mapped--;
549 }
550 }
551
552 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
553
554}
555
556static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
557 phys_addr_t paddr, u64 size, int prot)
558{
559 struct fsl_dma_domain *dma_domain = domain->priv;
560 struct dma_window *wnd;
561 int pamu_prot = 0;
562 int ret;
563 unsigned long flags;
564 u64 win_size;
565
566 if (prot & IOMMU_READ)
567 pamu_prot |= PAACE_AP_PERMS_QUERY;
568 if (prot & IOMMU_WRITE)
569 pamu_prot |= PAACE_AP_PERMS_UPDATE;
570
571 spin_lock_irqsave(&dma_domain->domain_lock, flags);
572 if (!dma_domain->win_arr) {
573 pr_debug("Number of windows not configured\n");
574 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
575 return -ENODEV;
576 }
577
578 if (wnd_nr >= dma_domain->win_cnt) {
579 pr_debug("Invalid window index\n");
580 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
581 return -EINVAL;
582 }
583
584 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
585 if (size > win_size) {
586 pr_debug("Invalid window size \n");
587 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
588 return -EINVAL;
589 }
590
591 if (dma_domain->win_cnt == 1) {
592 if (dma_domain->enabled) {
593 pr_debug("Disable the window before updating the mapping\n");
594 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
595 return -EBUSY;
596 }
597
598 ret = check_size(size, domain->geometry.aperture_start);
599 if (ret) {
600 pr_debug("Aperture start not aligned to the size\n");
601 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
602 return -EINVAL;
603 }
604 }
605
606 wnd = &dma_domain->win_arr[wnd_nr];
607 if (!wnd->valid) {
608 wnd->paddr = paddr;
609 wnd->size = size;
610 wnd->prot = pamu_prot;
611
612 ret = update_domain_mapping(dma_domain, wnd_nr);
613 if (!ret) {
614 wnd->valid = 1;
615 dma_domain->mapped++;
616 }
617 } else {
618 pr_debug("Disable the window before updating the mapping\n");
619 ret = -EBUSY;
620 }
621
622 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
623
624 return ret;
625}
626
627/*
628 * Attach the LIODN to the DMA domain and configure the geometry
629 * and window mappings.
630 */
631static int handle_attach_device(struct fsl_dma_domain *dma_domain,
632 struct device *dev, const u32 *liodn,
633 int num)
634{
635 unsigned long flags;
636 struct iommu_domain *domain = dma_domain->iommu_domain;
637 int ret = 0;
638 int i;
639
640 spin_lock_irqsave(&dma_domain->domain_lock, flags);
641 for (i = 0; i < num; i++) {
642
643 /* Ensure that LIODN value is valid */
644 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
645 pr_debug("Invalid liodn %d, attach device failed for %s\n",
646 liodn[i], dev->of_node->full_name);
647 ret = -EINVAL;
648 break;
649 }
650
651 attach_device(dma_domain, liodn[i], dev);
652 /*
653 * Check if geometry has already been configured
654 * for the domain. If yes, set the geometry for
655 * the LIODN.
656 */
657 if (dma_domain->win_arr) {
658 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
659 ret = pamu_set_liodn(liodn[i], dev, dma_domain,
660 &domain->geometry,
661 win_cnt);
662 if (ret)
663 break;
664 if (dma_domain->mapped) {
665 /*
666 * Create window/subwindow mapping for
667 * the LIODN.
668 */
669 ret = map_liodn(liodn[i], dma_domain);
670 if (ret)
671 break;
672 }
673 }
674 }
675 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
676
677 return ret;
678}
679
680static int fsl_pamu_attach_device(struct iommu_domain *domain,
681 struct device *dev)
682{
683 struct fsl_dma_domain *dma_domain = domain->priv;
684 const u32 *liodn;
685 u32 liodn_cnt;
686 int len, ret = 0;
687 struct pci_dev *pdev = NULL;
688 struct pci_controller *pci_ctl;
689
690 /*
691 * Use LIODN of the PCI controller while attaching a
692 * PCI device.
693 */
694 if (dev->bus == &pci_bus_type) {
695 pdev = to_pci_dev(dev);
696 pci_ctl = pci_bus_to_host(pdev->bus);
697 /*
698 * make dev point to pci controller device
699 * so we can get the LIODN programmed by
700 * u-boot.
701 */
702 dev = pci_ctl->parent;
703 }
704
705 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
706 if (liodn) {
707 liodn_cnt = len / sizeof(u32);
708 ret = handle_attach_device(dma_domain, dev,
709 liodn, liodn_cnt);
710 } else {
711 pr_debug("missing fsl,liodn property at %s\n",
712 dev->of_node->full_name);
713 ret = -EINVAL;
714 }
715
716 return ret;
717}
718
719static void fsl_pamu_detach_device(struct iommu_domain *domain,
720 struct device *dev)
721{
722 struct fsl_dma_domain *dma_domain = domain->priv;
723 const u32 *prop;
724 int len;
725 struct pci_dev *pdev = NULL;
726 struct pci_controller *pci_ctl;
727
728 /*
729 * Use LIODN of the PCI controller while detaching a
730 * PCI device.
731 */
732 if (dev->bus == &pci_bus_type) {
733 pdev = to_pci_dev(dev);
734 pci_ctl = pci_bus_to_host(pdev->bus);
735 /*
736 * make dev point to pci controller device
737 * so we can get the LIODN programmed by
738 * u-boot.
739 */
740 dev = pci_ctl->parent;
741 }
742
743 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
744 if (prop)
745 detach_device(dev, dma_domain);
746 else
747 pr_debug("missing fsl,liodn property at %s\n",
748 dev->of_node->full_name);
749}
750
751static int configure_domain_geometry(struct iommu_domain *domain, void *data)
752{
753 struct iommu_domain_geometry *geom_attr = data;
754 struct fsl_dma_domain *dma_domain = domain->priv;
755 dma_addr_t geom_size;
756 unsigned long flags;
757
758 geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
759 /*
760 * Sanity check the geometry size. Also, we do not support
761 * DMA outside of the geometry.
762 */
763 if (check_size(geom_size, geom_attr->aperture_start) ||
764 !geom_attr->force_aperture) {
765 pr_debug("Invalid PAMU geometry attributes\n");
766 return -EINVAL;
767 }
768
769 spin_lock_irqsave(&dma_domain->domain_lock, flags);
770 if (dma_domain->enabled) {
771 pr_debug("Can't set geometry attributes as domain is active\n");
772 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
773 return -EBUSY;
774 }
775
776 /* Copy the domain geometry information */
777 memcpy(&domain->geometry, geom_attr,
778 sizeof(struct iommu_domain_geometry));
779 dma_domain->geom_size = geom_size;
780
781 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
782
783 return 0;
784}
785
786/* Set the domain stash attribute */
787static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
788{
789 struct pamu_stash_attribute *stash_attr = data;
790 unsigned long flags;
791 int ret;
792
793 spin_lock_irqsave(&dma_domain->domain_lock, flags);
794
795 memcpy(&dma_domain->dma_stash, stash_attr,
796 sizeof(struct pamu_stash_attribute));
797
798 dma_domain->stash_id = get_stash_id(stash_attr->cache,
799 stash_attr->cpu);
800 if (dma_domain->stash_id == ~(u32)0) {
801 pr_debug("Invalid stash attributes\n");
802 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
803 return -EINVAL;
804 }
805
806 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
807
808 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
809
810 return ret;
811}
812
813/* Configure domain dma state i.e. enable/disable DMA*/
814static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
815{
816 struct device_domain_info *info;
817 unsigned long flags;
818 int ret;
819
820 spin_lock_irqsave(&dma_domain->domain_lock, flags);
821
822 if (enable && !dma_domain->mapped) {
823 pr_debug("Can't enable DMA domain without valid mapping\n");
824 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
825 return -ENODEV;
826 }
827
828 dma_domain->enabled = enable;
829 list_for_each_entry(info, &dma_domain->devices,
830 link) {
831 ret = (enable) ? pamu_enable_liodn(info->liodn) :
832 pamu_disable_liodn(info->liodn);
833 if (ret)
834 pr_debug("Unable to set dma state for liodn %d",
835 info->liodn);
836 }
837 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
838
839 return 0;
840}
841
842static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
843 enum iommu_attr attr_type, void *data)
844{
845 struct fsl_dma_domain *dma_domain = domain->priv;
846 int ret = 0;
847
848
849 switch (attr_type) {
850 case DOMAIN_ATTR_GEOMETRY:
851 ret = configure_domain_geometry(domain, data);
852 break;
853 case DOMAIN_ATTR_FSL_PAMU_STASH:
854 ret = configure_domain_stash(dma_domain, data);
855 break;
856 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
857 ret = configure_domain_dma_state(dma_domain, *(int *)data);
858 break;
859 default:
860 pr_debug("Unsupported attribute type\n");
861 ret = -EINVAL;
862 break;
863 };
864
865 return ret;
866}
867
868static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
869 enum iommu_attr attr_type, void *data)
870{
871 struct fsl_dma_domain *dma_domain = domain->priv;
872 int ret = 0;
873
874
875 switch (attr_type) {
876 case DOMAIN_ATTR_FSL_PAMU_STASH:
877 memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
878 sizeof(struct pamu_stash_attribute));
879 break;
880 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
881 *(int *)data = dma_domain->enabled;
882 break;
883 case DOMAIN_ATTR_FSL_PAMUV1:
884 *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
885 break;
886 default:
887 pr_debug("Unsupported attribute type\n");
888 ret = -EINVAL;
889 break;
890 };
891
892 return ret;
893}
894
895#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
896
897static struct iommu_group *get_device_iommu_group(struct device *dev)
898{
899 struct iommu_group *group;
900
901 group = iommu_group_get(dev);
902 if (!group)
903 group = iommu_group_alloc();
904
905 return group;
906}
907
908static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
909{
910 u32 version;
911
912 /* Check the PCI controller version number by readding BRR1 register */
913 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
914 version &= PCI_FSL_BRR1_VER;
915 /* If PCI controller version is >= 0x204 we can partition endpoints*/
916 if (version >= 0x204)
917 return 1;
918
919 return 0;
920}
921
922/* Get iommu group information from peer devices or devices on the parent bus */
923static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
924{
925 struct pci_dev *tmp;
926 struct iommu_group *group;
927 struct pci_bus *bus = pdev->bus;
928
929 /*
930 * Traverese the pci bus device list to get
931 * the shared iommu group.
932 */
933 while (bus) {
934 list_for_each_entry(tmp, &bus->devices, bus_list) {
935 if (tmp == pdev)
936 continue;
937 group = iommu_group_get(&tmp->dev);
938 if (group)
939 return group;
940 }
941
942 bus = bus->parent;
943 }
944
945 return NULL;
946}
947
948static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
949{
950 struct pci_controller *pci_ctl;
951 bool pci_endpt_partioning;
952 struct iommu_group *group = NULL;
953 struct pci_dev *bridge, *dma_pdev = NULL;
954
955 pci_ctl = pci_bus_to_host(pdev->bus);
956 pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
957 /* We can partition PCIe devices so assign device group to the device */
958 if (pci_endpt_partioning) {
959 bridge = pci_find_upstream_pcie_bridge(pdev);
960 if (bridge) {
961 if (pci_is_pcie(bridge))
962 dma_pdev = pci_get_domain_bus_and_slot(
963 pci_domain_nr(pdev->bus),
964 bridge->subordinate->number, 0);
965 if (!dma_pdev)
966 dma_pdev = pci_dev_get(bridge);
967 } else
968 dma_pdev = pci_dev_get(pdev);
969
970 /* Account for quirked devices */
971 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
972
973 /*
974 * If it's a multifunction device that does not support our
975 * required ACS flags, add to the same group as lowest numbered
976 * function that also does not suport the required ACS flags.
977 */
978 if (dma_pdev->multifunction &&
979 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
980 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
981
982 for (i = 0; i < 8; i++) {
983 struct pci_dev *tmp;
984
985 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
986 if (!tmp)
987 continue;
988
989 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
990 swap_pci_ref(&dma_pdev, tmp);
991 break;
992 }
993 pci_dev_put(tmp);
994 }
995 }
996
997 /*
998 * Devices on the root bus go through the iommu. If that's not us,
999 * find the next upstream device and test ACS up to the root bus.
1000 * Finding the next device may require skipping virtual buses.
1001 */
1002 while (!pci_is_root_bus(dma_pdev->bus)) {
1003 struct pci_bus *bus = dma_pdev->bus;
1004
1005 while (!bus->self) {
1006 if (!pci_is_root_bus(bus))
1007 bus = bus->parent;
1008 else
1009 goto root_bus;
1010 }
1011
1012 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1013 break;
1014
1015 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
1016 }
1017
1018root_bus:
1019 group = get_device_iommu_group(&dma_pdev->dev);
1020 pci_dev_put(dma_pdev);
1021 /*
1022 * PCIe controller is not a paritionable entity
1023 * free the controller device iommu_group.
1024 */
1025 if (pci_ctl->parent->iommu_group)
1026 iommu_group_remove_device(pci_ctl->parent);
1027 } else {
1028 /*
1029 * All devices connected to the controller will share the
1030 * PCI controllers device group. If this is the first
1031 * device to be probed for the pci controller, copy the
1032 * device group information from the PCI controller device
1033 * node and remove the PCI controller iommu group.
1034 * For subsequent devices, the iommu group information can
1035 * be obtained from sibling devices (i.e. from the bus_devices
1036 * link list).
1037 */
1038 if (pci_ctl->parent->iommu_group) {
1039 group = get_device_iommu_group(pci_ctl->parent);
1040 iommu_group_remove_device(pci_ctl->parent);
1041 } else
1042 group = get_shared_pci_device_group(pdev);
1043 }
1044
1045 return group;
1046}
1047
1048static int fsl_pamu_add_device(struct device *dev)
1049{
1050 struct iommu_group *group = NULL;
1051 struct pci_dev *pdev;
1052 const u32 *prop;
1053 int ret, len;
1054
1055 /*
1056 * For platform devices we allocate a separate group for
1057 * each of the devices.
1058 */
1059 if (dev->bus == &pci_bus_type) {
1060 pdev = to_pci_dev(dev);
1061 /* Don't create device groups for virtual PCI bridges */
1062 if (pdev->subordinate)
1063 return 0;
1064
1065 group = get_pci_device_group(pdev);
1066
1067 } else {
1068 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
1069 if (prop)
1070 group = get_device_iommu_group(dev);
1071 }
1072
1073 if (!group || IS_ERR(group))
1074 return PTR_ERR(group);
1075
1076 ret = iommu_group_add_device(group, dev);
1077
1078 iommu_group_put(group);
1079 return ret;
1080}
1081
1082static void fsl_pamu_remove_device(struct device *dev)
1083{
1084 iommu_group_remove_device(dev);
1085}
1086
1087static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
1088{
1089 struct fsl_dma_domain *dma_domain = domain->priv;
1090 unsigned long flags;
1091 int ret;
1092
1093 spin_lock_irqsave(&dma_domain->domain_lock, flags);
1094 /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
1095 if (dma_domain->enabled) {
1096 pr_debug("Can't set geometry attributes as domain is active\n");
1097 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1098 return -EBUSY;
1099 }
1100
1101 /* Ensure that the geometry has been set for the domain */
1102 if (!dma_domain->geom_size) {
1103 pr_debug("Please configure geometry before setting the number of windows\n");
1104 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1105 return -EINVAL;
1106 }
1107
1108 /*
1109 * Ensure we have valid window count i.e. it should be less than
1110 * maximum permissible limit and should be a power of two.
1111 */
1112 if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
1113 pr_debug("Invalid window count\n");
1114 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1115 return -EINVAL;
1116 }
1117
1118 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1119 ((w_count > 1) ? w_count : 0));
1120 if (!ret) {
1121 if (dma_domain->win_arr)
1122 kfree(dma_domain->win_arr);
1123 dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
1124 w_count, GFP_ATOMIC);
1125 if (!dma_domain->win_arr) {
1126 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1127 return -ENOMEM;
1128 }
1129 dma_domain->win_cnt = w_count;
1130 }
1131 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1132
1133 return ret;
1134}
1135
1136static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
1137{
1138 struct fsl_dma_domain *dma_domain = domain->priv;
1139
1140 return dma_domain->win_cnt;
1141}
1142
1143static struct iommu_ops fsl_pamu_ops = {
1144 .domain_init = fsl_pamu_domain_init,
1145 .domain_destroy = fsl_pamu_domain_destroy,
1146 .attach_dev = fsl_pamu_attach_device,
1147 .detach_dev = fsl_pamu_detach_device,
1148 .domain_window_enable = fsl_pamu_window_enable,
1149 .domain_window_disable = fsl_pamu_window_disable,
1150 .domain_get_windows = fsl_pamu_get_windows,
1151 .domain_set_windows = fsl_pamu_set_windows,
1152 .iova_to_phys = fsl_pamu_iova_to_phys,
1153 .domain_has_cap = fsl_pamu_domain_has_cap,
1154 .domain_set_attr = fsl_pamu_set_domain_attr,
1155 .domain_get_attr = fsl_pamu_get_domain_attr,
1156 .add_device = fsl_pamu_add_device,
1157 .remove_device = fsl_pamu_remove_device,
1158};
1159
1160int pamu_domain_init()
1161{
1162 int ret = 0;
1163
1164 ret = iommu_init_mempool();
1165 if (ret)
1166 return ret;
1167
1168 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1169 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1170
1171 return ret;
1172}
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
new file mode 100644
index 000000000000..c90293f99709
--- /dev/null
+++ b/drivers/iommu/fsl_pamu_domain.h
@@ -0,0 +1,85 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 *
17 */
18
19#ifndef __FSL_PAMU_DOMAIN_H
20#define __FSL_PAMU_DOMAIN_H
21
22#include "fsl_pamu.h"
23
24struct dma_window {
25 phys_addr_t paddr;
26 u64 size;
27 int valid;
28 int prot;
29};
30
31struct fsl_dma_domain {
32 /*
33 * Indicates the geometry size for the domain.
34 * This would be set when the geometry is
35 * configured for the domain.
36 */
37 dma_addr_t geom_size;
38 /*
39 * Number of windows assocaited with this domain.
40 * During domain initialization, it is set to the
41 * the maximum number of subwindows allowed for a LIODN.
42 * Minimum value for this is 1 indicating a single PAMU
43 * window, without any sub windows. Value can be set/
44 * queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS.
45 * Value can only be set once the geometry has been configured.
46 */
47 u32 win_cnt;
48 /*
49 * win_arr contains information of the configured
50 * windows for a domain. This is allocated only
51 * when the number of windows for the domain are
52 * set.
53 */
54 struct dma_window *win_arr;
55 /* list of devices associated with the domain */
56 struct list_head devices;
57 /* dma_domain states:
58 * mapped - A particular mapping has been created
59 * within the configured geometry.
60 * enabled - DMA has been enabled for the given
61 * domain. This translates to setting of the
62 * valid bit for the primary PAACE in the PAMU
63 * PAACT table. Domain geometry should be set and
64 * it must have a valid mapping before DMA can be
65 * enabled for it.
66 *
67 */
68 int mapped;
69 int enabled;
70 /* stash_id obtained from the stash attribute details */
71 u32 stash_id;
72 struct pamu_stash_attribute dma_stash;
73 u32 snoop_id;
74 struct iommu_domain *iommu_domain;
75 spinlock_t domain_lock;
76};
77
78/* domain-device relationship */
79struct device_domain_info {
80 struct list_head link; /* link to domain siblings */
81 struct device *dev;
82 u32 liodn;
83 struct fsl_dma_domain *domain; /* pointer to domain */
84};
85#endif /* __FSL_PAMU_DOMAIN_H */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index eec0d3e04bf5..15e9b57e9cf0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -890,56 +890,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
890 return order; 890 return order;
891} 891}
892 892
893static void dma_pte_free_level(struct dmar_domain *domain, int level,
894 struct dma_pte *pte, unsigned long pfn,
895 unsigned long start_pfn, unsigned long last_pfn)
896{
897 pfn = max(start_pfn, pfn);
898 pte = &pte[pfn_level_offset(pfn, level)];
899
900 do {
901 unsigned long level_pfn;
902 struct dma_pte *level_pte;
903
904 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
905 goto next;
906
907 level_pfn = pfn & level_mask(level - 1);
908 level_pte = phys_to_virt(dma_pte_addr(pte));
909
910 if (level > 2)
911 dma_pte_free_level(domain, level - 1, level_pte,
912 level_pfn, start_pfn, last_pfn);
913
914 /* If range covers entire pagetable, free it */
915 if (!(start_pfn > level_pfn ||
916 last_pfn < level_pfn + level_size(level))) {
917 dma_clear_pte(pte);
918 domain_flush_cache(domain, pte, sizeof(*pte));
919 free_pgtable_page(level_pte);
920 }
921next:
922 pfn += level_size(level);
923 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
924}
925
893/* free page table pages. last level pte should already be cleared */ 926/* free page table pages. last level pte should already be cleared */
894static void dma_pte_free_pagetable(struct dmar_domain *domain, 927static void dma_pte_free_pagetable(struct dmar_domain *domain,
895 unsigned long start_pfn, 928 unsigned long start_pfn,
896 unsigned long last_pfn) 929 unsigned long last_pfn)
897{ 930{
898 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 931 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
899 struct dma_pte *first_pte, *pte;
900 int total = agaw_to_level(domain->agaw);
901 int level;
902 unsigned long tmp;
903 int large_page = 2;
904 932
905 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 933 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
906 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 934 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
907 BUG_ON(start_pfn > last_pfn); 935 BUG_ON(start_pfn > last_pfn);
908 936
909 /* We don't need lock here; nobody else touches the iova range */ 937 /* We don't need lock here; nobody else touches the iova range */
910 level = 2; 938 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
911 while (level <= total) { 939 domain->pgd, 0, start_pfn, last_pfn);
912 tmp = align_to_level(start_pfn, level);
913
914 /* If we can't even clear one PTE at this level, we're done */
915 if (tmp + level_size(level) - 1 > last_pfn)
916 return;
917
918 do {
919 large_page = level;
920 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
921 if (large_page > level)
922 level = large_page + 1;
923 if (!pte) {
924 tmp = align_to_level(tmp + 1, level + 1);
925 continue;
926 }
927 do {
928 if (dma_pte_present(pte)) {
929 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
930 dma_clear_pte(pte);
931 }
932 pte++;
933 tmp += level_size(level);
934 } while (!first_pte_in_page(pte) &&
935 tmp + level_size(level) - 1 <= last_pfn);
936 940
937 domain_flush_cache(domain, first_pte,
938 (void *)pte - (void *)first_pte);
939
940 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
941 level++;
942 }
943 /* free pgd */ 941 /* free pgd */
944 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { 942 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
945 free_pgtable_page(domain->pgd); 943 free_pgtable_page(domain->pgd);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3aeb7305e2f5..7ea319e95b47 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -58,10 +58,26 @@ struct iommu_domain {
58#define IOMMU_CAP_CACHE_COHERENCY 0x1 58#define IOMMU_CAP_CACHE_COHERENCY 0x1
59#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ 59#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
60 60
61/*
62 * Following constraints are specifc to FSL_PAMUV1:
63 * -aperture must be power of 2, and naturally aligned
64 * -number of windows must be power of 2, and address space size
65 * of each window is determined by aperture size / # of windows
66 * -the actual size of the mapped region of a window must be power
67 * of 2 starting with 4KB and physical address must be naturally
68 * aligned.
69 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
70 * The caller can invoke iommu_domain_get_attr to check if the underlying
71 * iommu implementation supports these constraints.
72 */
73
61enum iommu_attr { 74enum iommu_attr {
62 DOMAIN_ATTR_GEOMETRY, 75 DOMAIN_ATTR_GEOMETRY,
63 DOMAIN_ATTR_PAGING, 76 DOMAIN_ATTR_PAGING,
64 DOMAIN_ATTR_WINDOWS, 77 DOMAIN_ATTR_WINDOWS,
78 DOMAIN_ATTR_FSL_PAMU_STASH,
79 DOMAIN_ATTR_FSL_PAMU_ENABLE,
80 DOMAIN_ATTR_FSL_PAMUV1,
65 DOMAIN_ATTR_MAX, 81 DOMAIN_ATTR_MAX,
66}; 82};
67 83