aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/iommu/arm-smmu.c1
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c1
-rw-r--r--drivers/iommu/iommu.c50
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1
-rw-r--r--drivers/iommu/irq_remapping.c8
-rw-r--r--drivers/iommu/msm_iommu.c1
-rw-r--r--drivers/iommu/omap-iommu.c1
-rw-r--r--drivers/iommu/shmobile-iommu.c1
-rw-r--r--drivers/iommu/tegra-smmu.c1609
12 files changed, 585 insertions, 1102 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dd5112265cc9..6dbfbc209491 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -163,14 +163,14 @@ config TEGRA_IOMMU_GART
163 hardware included on Tegra SoCs. 163 hardware included on Tegra SoCs.
164 164
165config TEGRA_IOMMU_SMMU 165config TEGRA_IOMMU_SMMU
166 bool "Tegra SMMU IOMMU Support" 166 bool "NVIDIA Tegra SMMU Support"
167 depends on ARCH_TEGRA && TEGRA_AHB 167 depends on ARCH_TEGRA
168 depends on TEGRA_AHB
169 depends on TEGRA_MC
168 select IOMMU_API 170 select IOMMU_API
169 help 171 help
170 Enables support for remapping discontiguous physical memory 172 This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra
171 shared with the operating system into contiguous I/O virtual 173 SoCs (Tegra30 up to Tegra124).
172 space through the SMMU (System Memory Management Unit)
173 hardware included on Tegra SoCs.
174 174
175config EXYNOS_IOMMU 175config EXYNOS_IOMMU
176 bool "Exynos IOMMU Support" 176 bool "Exynos IOMMU Support"
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 505a9adac2d5..2d84c9edf3b8 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3424,6 +3424,7 @@ static const struct iommu_ops amd_iommu_ops = {
3424 .detach_dev = amd_iommu_detach_device, 3424 .detach_dev = amd_iommu_detach_device,
3425 .map = amd_iommu_map, 3425 .map = amd_iommu_map,
3426 .unmap = amd_iommu_unmap, 3426 .unmap = amd_iommu_unmap,
3427 .map_sg = default_iommu_map_sg,
3427 .iova_to_phys = amd_iommu_iova_to_phys, 3428 .iova_to_phys = amd_iommu_iova_to_phys,
3428 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3429 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3429}; 3430};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 60558f794922..e393ae01b5d2 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1652,6 +1652,7 @@ static const struct iommu_ops arm_smmu_ops = {
1652 .detach_dev = arm_smmu_detach_dev, 1652 .detach_dev = arm_smmu_detach_dev,
1653 .map = arm_smmu_map, 1653 .map = arm_smmu_map,
1654 .unmap = arm_smmu_unmap, 1654 .unmap = arm_smmu_unmap,
1655 .map_sg = default_iommu_map_sg,
1655 .iova_to_phys = arm_smmu_iova_to_phys, 1656 .iova_to_phys = arm_smmu_iova_to_phys,
1656 .add_device = arm_smmu_add_device, 1657 .add_device = arm_smmu_add_device,
1657 .remove_device = arm_smmu_remove_device, 1658 .remove_device = arm_smmu_remove_device,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 74233186f6f7..28372b85d8da 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1178,6 +1178,7 @@ static const struct iommu_ops exynos_iommu_ops = {
1178 .detach_dev = exynos_iommu_detach_device, 1178 .detach_dev = exynos_iommu_detach_device,
1179 .map = exynos_iommu_map, 1179 .map = exynos_iommu_map,
1180 .unmap = exynos_iommu_unmap, 1180 .unmap = exynos_iommu_unmap,
1181 .map_sg = default_iommu_map_sg,
1181 .iova_to_phys = exynos_iommu_iova_to_phys, 1182 .iova_to_phys = exynos_iommu_iova_to_phys,
1182 .add_device = exynos_iommu_add_device, 1183 .add_device = exynos_iommu_add_device,
1183 .remove_device = exynos_iommu_remove_device, 1184 .remove_device = exynos_iommu_remove_device,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a27d6cb1a793..02cd26a17fe0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4467,6 +4467,7 @@ static const struct iommu_ops intel_iommu_ops = {
4467 .detach_dev = intel_iommu_detach_device, 4467 .detach_dev = intel_iommu_detach_device,
4468 .map = intel_iommu_map, 4468 .map = intel_iommu_map,
4469 .unmap = intel_iommu_unmap, 4469 .unmap = intel_iommu_unmap,
4470 .map_sg = default_iommu_map_sg,
4470 .iova_to_phys = intel_iommu_iova_to_phys, 4471 .iova_to_phys = intel_iommu_iova_to_phys,
4471 .add_device = intel_iommu_add_device, 4472 .add_device = intel_iommu_add_device,
4472 .remove_device = intel_iommu_remove_device, 4473 .remove_device = intel_iommu_remove_device,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ed8b04867b1f..02e4313e937c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
818 kfree(nb); 818 kfree(nb);
819 return err; 819 return err;
820 } 820 }
821 return bus_for_each_dev(bus, NULL, &cb, add_iommu_group); 821
822 err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
823 if (err) {
824 bus_unregister_notifier(bus, nb);
825 kfree(nb);
826 return err;
827 }
828
829 return 0;
822} 830}
823 831
824/** 832/**
@@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
836 */ 844 */
837int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 845int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
838{ 846{
847 int err;
848
839 if (bus->iommu_ops != NULL) 849 if (bus->iommu_ops != NULL)
840 return -EBUSY; 850 return -EBUSY;
841 851
842 bus->iommu_ops = ops; 852 bus->iommu_ops = ops;
843 853
844 /* Do IOMMU specific setup for this bus-type */ 854 /* Do IOMMU specific setup for this bus-type */
845 return iommu_bus_init(bus, ops); 855 err = iommu_bus_init(bus, ops);
856 if (err)
857 bus->iommu_ops = NULL;
858
859 return err;
846} 860}
847EXPORT_SYMBOL_GPL(bus_set_iommu); 861EXPORT_SYMBOL_GPL(bus_set_iommu);
848 862
@@ -1124,6 +1138,38 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1124} 1138}
1125EXPORT_SYMBOL_GPL(iommu_unmap); 1139EXPORT_SYMBOL_GPL(iommu_unmap);
1126 1140
1141size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1142 struct scatterlist *sg, unsigned int nents, int prot)
1143{
1144 struct scatterlist *s;
1145 size_t mapped = 0;
1146 unsigned int i;
1147 int ret;
1148
1149 for_each_sg(sg, s, nents, i) {
1150 phys_addr_t phys = page_to_phys(sg_page(s));
1151
1152 /* We are mapping on page boundarys, so offset must be 0 */
1153 if (s->offset)
1154 goto out_err;
1155
1156 ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1157 if (ret)
1158 goto out_err;
1159
1160 mapped += s->length;
1161 }
1162
1163 return mapped;
1164
1165out_err:
1166 /* undo mappings already done */
1167 iommu_unmap(domain, iova, mapped);
1168
1169 return 0;
1170
1171}
1172EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1127 1173
1128int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 1174int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1129 phys_addr_t paddr, u64 size, int prot) 1175 phys_addr_t paddr, u64 size, int prot)
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 7dab5cbcc775..e509c58eee92 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = {
1127 .detach_dev = ipmmu_detach_device, 1127 .detach_dev = ipmmu_detach_device,
1128 .map = ipmmu_map, 1128 .map = ipmmu_map,
1129 .unmap = ipmmu_unmap, 1129 .unmap = ipmmu_unmap,
1130 .map_sg = default_iommu_map_sg,
1130 .iova_to_phys = ipmmu_iova_to_phys, 1131 .iova_to_phys = ipmmu_iova_to_phys,
1131 .add_device = ipmmu_add_device, 1132 .add_device = ipmmu_add_device,
1132 .remove_device = ipmmu_remove_device, 1133 .remove_device = ipmmu_remove_device,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 74a1767c89b5..2c3f5ad01098 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -56,19 +56,13 @@ static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
56 unsigned int irq; 56 unsigned int irq;
57 struct msi_desc *msidesc; 57 struct msi_desc *msidesc;
58 58
59 WARN_ON(!list_is_singular(&dev->msi_list));
60 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); 59 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
61 WARN_ON(msidesc->irq);
62 WARN_ON(msidesc->msi_attrib.multiple);
63 WARN_ON(msidesc->nvec_used);
64 60
65 irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); 61 irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev));
66 if (irq == 0) 62 if (irq == 0)
67 return -ENOSPC; 63 return -ENOSPC;
68 64
69 nvec_pow2 = __roundup_pow_of_two(nvec); 65 nvec_pow2 = __roundup_pow_of_two(nvec);
70 msidesc->nvec_used = nvec;
71 msidesc->msi_attrib.multiple = ilog2(nvec_pow2);
72 for (sub_handle = 0; sub_handle < nvec; sub_handle++) { 66 for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
73 if (!sub_handle) { 67 if (!sub_handle) {
74 index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); 68 index = msi_alloc_remapped_irq(dev, irq, nvec_pow2);
@@ -96,8 +90,6 @@ error:
96 * IRQs from tearing down again in default_teardown_msi_irqs() 90 * IRQs from tearing down again in default_teardown_msi_irqs()
97 */ 91 */
98 msidesc->irq = 0; 92 msidesc->irq = 0;
99 msidesc->nvec_used = 0;
100 msidesc->msi_attrib.multiple = 0;
101 93
102 return ret; 94 return ret;
103} 95}
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6e3dcc289d59..1c7b78ecf3e3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -681,6 +681,7 @@ static const struct iommu_ops msm_iommu_ops = {
681 .detach_dev = msm_iommu_detach_dev, 681 .detach_dev = msm_iommu_detach_dev,
682 .map = msm_iommu_map, 682 .map = msm_iommu_map,
683 .unmap = msm_iommu_unmap, 683 .unmap = msm_iommu_unmap,
684 .map_sg = default_iommu_map_sg,
684 .iova_to_phys = msm_iommu_iova_to_phys, 685 .iova_to_phys = msm_iommu_iova_to_phys,
685 .pgsize_bitmap = MSM_IOMMU_PGSIZES, 686 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
686}; 687};
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 36278870e84a..18003c044454 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1288,6 +1288,7 @@ static const struct iommu_ops omap_iommu_ops = {
1288 .detach_dev = omap_iommu_detach_dev, 1288 .detach_dev = omap_iommu_detach_dev,
1289 .map = omap_iommu_map, 1289 .map = omap_iommu_map,
1290 .unmap = omap_iommu_unmap, 1290 .unmap = omap_iommu_unmap,
1291 .map_sg = default_iommu_map_sg,
1291 .iova_to_phys = omap_iommu_iova_to_phys, 1292 .iova_to_phys = omap_iommu_iova_to_phys,
1292 .add_device = omap_iommu_add_device, 1293 .add_device = omap_iommu_add_device,
1293 .remove_device = omap_iommu_remove_device, 1294 .remove_device = omap_iommu_remove_device,
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index 1333e6fb3405..f1b00774e4de 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = {
361 .detach_dev = shmobile_iommu_detach_device, 361 .detach_dev = shmobile_iommu_detach_device,
362 .map = shmobile_iommu_map, 362 .map = shmobile_iommu_map,
363 .unmap = shmobile_iommu_unmap, 363 .unmap = shmobile_iommu_unmap,
364 .map_sg = default_iommu_map_sg,
364 .iova_to_phys = shmobile_iommu_iova_to_phys, 365 .iova_to_phys = shmobile_iommu_iova_to_phys,
365 .add_device = shmobile_iommu_add_device, 366 .add_device = shmobile_iommu_add_device,
366 .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, 367 .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 3afdf43f732a..6e134c7c227f 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1,1295 +1,732 @@
1/* 1/*
2 * IOMMU API for SMMU in Tegra30 2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. 4 * This program is free software; you can redistribute it and/or modify
5 * 5 * it under the terms of the GNU General Public License version 2 as
6 * This program is free software; you can redistribute it and/or modify it 6 * published by the Free Software Foundation.
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 7 */
19 8
20#define pr_fmt(fmt) "%s(): " fmt, __func__
21
22#include <linux/err.h> 9#include <linux/err.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/device.h>
31#include <linux/sched.h>
32#include <linux/iommu.h> 10#include <linux/iommu.h>
33#include <linux/io.h> 11#include <linux/kernel.h>
34#include <linux/of.h> 12#include <linux/of.h>
35#include <linux/of_iommu.h> 13#include <linux/of_device.h>
36#include <linux/debugfs.h> 14#include <linux/platform_device.h>
37#include <linux/seq_file.h> 15#include <linux/slab.h>
38 16
39#include <soc/tegra/ahb.h> 17#include <soc/tegra/ahb.h>
18#include <soc/tegra/mc.h>
40 19
41#include <asm/page.h> 20struct tegra_smmu {
42#include <asm/cacheflush.h> 21 void __iomem *regs;
43 22 struct device *dev;
44enum smmu_hwgrp {
45 HWGRP_AFI,
46 HWGRP_AVPC,
47 HWGRP_DC,
48 HWGRP_DCB,
49 HWGRP_EPP,
50 HWGRP_G2,
51 HWGRP_HC,
52 HWGRP_HDA,
53 HWGRP_ISP,
54 HWGRP_MPE,
55 HWGRP_NV,
56 HWGRP_NV2,
57 HWGRP_PPCS,
58 HWGRP_SATA,
59 HWGRP_VDE,
60 HWGRP_VI,
61
62 HWGRP_COUNT,
63
64 HWGRP_END = ~0,
65};
66 23
67#define HWG_AFI (1 << HWGRP_AFI) 24 struct tegra_mc *mc;
68#define HWG_AVPC (1 << HWGRP_AVPC) 25 const struct tegra_smmu_soc *soc;
69#define HWG_DC (1 << HWGRP_DC)
70#define HWG_DCB (1 << HWGRP_DCB)
71#define HWG_EPP (1 << HWGRP_EPP)
72#define HWG_G2 (1 << HWGRP_G2)
73#define HWG_HC (1 << HWGRP_HC)
74#define HWG_HDA (1 << HWGRP_HDA)
75#define HWG_ISP (1 << HWGRP_ISP)
76#define HWG_MPE (1 << HWGRP_MPE)
77#define HWG_NV (1 << HWGRP_NV)
78#define HWG_NV2 (1 << HWGRP_NV2)
79#define HWG_PPCS (1 << HWGRP_PPCS)
80#define HWG_SATA (1 << HWGRP_SATA)
81#define HWG_VDE (1 << HWGRP_VDE)
82#define HWG_VI (1 << HWGRP_VI)
83
84/* bitmap of the page sizes currently supported */
85#define SMMU_IOMMU_PGSIZES (SZ_4K)
86
87#define SMMU_CONFIG 0x10
88#define SMMU_CONFIG_DISABLE 0
89#define SMMU_CONFIG_ENABLE 1
90
91/* REVISIT: To support multiple MCs */
92enum {
93 _MC = 0,
94};
95 26
96enum { 27 unsigned long *asids;
97 _TLB = 0, 28 struct mutex lock;
98 _PTC,
99};
100 29
101#define SMMU_CACHE_CONFIG_BASE 0x14 30 struct list_head list;
102#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache)
103#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache)
104
105#define SMMU_CACHE_CONFIG_STATS_SHIFT 31
106#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
107#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30
108#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
109
110#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
111#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
112#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
113
114#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
115#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
116#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
117
118#define SMMU_PTB_ASID 0x1c
119#define SMMU_PTB_ASID_CURRENT_SHIFT 0
120
121#define SMMU_PTB_DATA 0x20
122#define SMMU_PTB_DATA_RESET_VAL 0
123#define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29
124#define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30
125#define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31
126
127#define SMMU_TLB_FLUSH 0x30
128#define SMMU_TLB_FLUSH_VA_MATCH_ALL 0
129#define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2
130#define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3
131#define SMMU_TLB_FLUSH_ASID_SHIFT 29
132#define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0
133#define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1
134#define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31
135
136#define SMMU_PTC_FLUSH 0x34
137#define SMMU_PTC_FLUSH_TYPE_ALL 0
138#define SMMU_PTC_FLUSH_TYPE_ADR 1
139#define SMMU_PTC_FLUSH_ADR_SHIFT 4
140
141#define SMMU_ASID_SECURITY 0x38
142
143#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0
144
145#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \
146 (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
147
148#define SMMU_TRANSLATION_ENABLE_0 0x228
149#define SMMU_TRANSLATION_ENABLE_1 0x22c
150#define SMMU_TRANSLATION_ENABLE_2 0x230
151
152#define SMMU_AFI_ASID 0x238 /* PCIE */
153#define SMMU_AVPC_ASID 0x23c /* AVP */
154#define SMMU_DC_ASID 0x240 /* Display controller */
155#define SMMU_DCB_ASID 0x244 /* Display controller B */
156#define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */
157#define SMMU_G2_ASID 0x24c /* 2D engine */
158#define SMMU_HC_ASID 0x250 /* Host1x */
159#define SMMU_HDA_ASID 0x254 /* High-def audio */
160#define SMMU_ISP_ASID 0x258 /* Image signal processor */
161#define SMMU_MPE_ASID 0x264 /* MPEG encoder */
162#define SMMU_NV_ASID 0x268 /* (3D) */
163#define SMMU_NV2_ASID 0x26c /* (3D) */
164#define SMMU_PPCS_ASID 0x270 /* AHB */
165#define SMMU_SATA_ASID 0x278 /* SATA */
166#define SMMU_VDE_ASID 0x27c /* Video decoder */
167#define SMMU_VI_ASID 0x280 /* Video input */
168
169#define SMMU_PDE_NEXT_SHIFT 28
170
171#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
172#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
173#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
174#define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
175#define SMMU_TLB_FLUSH_VA(iova, which) \
176 ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
177 SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \
178 SMMU_TLB_FLUSH_VA_MATCH_##which)
179#define SMMU_PTB_ASID_CUR(n) \
180 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
181#define SMMU_TLB_FLUSH_ASID_MATCH_disable \
182 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \
183 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
184#define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \
185 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \
186 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
187
188#define SMMU_PAGE_SHIFT 12
189#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
190#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
191
192#define SMMU_PDIR_COUNT 1024
193#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
194#define SMMU_PTBL_COUNT 1024
195#define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
196#define SMMU_PDIR_SHIFT 12
197#define SMMU_PDE_SHIFT 12
198#define SMMU_PTE_SHIFT 12
199#define SMMU_PFN_MASK 0x000fffff
200
201#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
202#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
203#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
204
205#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
206#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
207#define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
208#define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
209#define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
210
211#define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
212
213#define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
214#define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
215#define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
216
217#define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
218#define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
219
220#define SMMU_MK_PDIR(page, attr) \
221 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
222#define SMMU_MK_PDE(page, attr) \
223 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
224#define SMMU_EX_PTBL_PAGE(pde) \
225 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
226#define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
227
228#define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
229#define SMMU_ASID_DISABLE 0
230#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
231
232#define NUM_SMMU_REG_BANKS 3
233
234#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
235#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
236#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
237#define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0)
238
239#define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
240
241static const u32 smmu_hwgrp_asid_reg[] = {
242 HWGRP_INIT(AFI),
243 HWGRP_INIT(AVPC),
244 HWGRP_INIT(DC),
245 HWGRP_INIT(DCB),
246 HWGRP_INIT(EPP),
247 HWGRP_INIT(G2),
248 HWGRP_INIT(HC),
249 HWGRP_INIT(HDA),
250 HWGRP_INIT(ISP),
251 HWGRP_INIT(MPE),
252 HWGRP_INIT(NV),
253 HWGRP_INIT(NV2),
254 HWGRP_INIT(PPCS),
255 HWGRP_INIT(SATA),
256 HWGRP_INIT(VDE),
257 HWGRP_INIT(VI),
258}; 31};
259#define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
260 32
261/* 33struct tegra_smmu_as {
262 * Per client for address space 34 struct iommu_domain *domain;
263 */ 35 struct tegra_smmu *smmu;
264struct smmu_client { 36 unsigned int use_count;
265 struct device *dev; 37 struct page *count;
266 struct list_head list; 38 struct page *pd;
267 struct smmu_as *as; 39 unsigned id;
268 u32 hwgrp; 40 u32 attr;
269}; 41};
270 42
271/* 43static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
272 * Per address space 44 unsigned long offset)
273 */ 45{
274struct smmu_as { 46 writel(value, smmu->regs + offset);
275 struct smmu_device *smmu; /* back pointer to container */ 47}
276 unsigned int asid;
277 spinlock_t lock; /* for pagetable */
278 struct page *pdir_page;
279 unsigned long pdir_attr;
280 unsigned long pde_attr;
281 unsigned long pte_attr;
282 unsigned int *pte_count;
283
284 struct list_head client;
285 spinlock_t client_lock; /* for client list */
286};
287 48
288struct smmu_debugfs_info { 49static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
289 struct smmu_device *smmu; 50{
290 int mc; 51 return readl(smmu->regs + offset);
291 int cache; 52}
292};
293 53
294/* 54#define SMMU_CONFIG 0x010
295 * Per SMMU device - IOMMU device 55#define SMMU_CONFIG_ENABLE (1 << 0)
296 */
297struct smmu_device {
298 void __iomem *regbase; /* register offset base */
299 void __iomem **regs; /* register block start address array */
300 void __iomem **rege; /* register block end address array */
301 int nregs; /* number of register blocks */
302
303 unsigned long iovmm_base; /* remappable base address */
304 unsigned long page_count; /* total remappable size */
305 spinlock_t lock;
306 char *name;
307 struct device *dev;
308 struct page *avp_vector_page; /* dummy page shared by all AS's */
309 56
310 /* 57#define SMMU_TLB_CONFIG 0x14
311 * Register image savers for suspend/resume 58#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
312 */ 59#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
313 unsigned long translation_enable_0; 60#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
314 unsigned long translation_enable_1;
315 unsigned long translation_enable_2;
316 unsigned long asid_security;
317 61
318 struct dentry *debugfs_root; 62#define SMMU_PTC_CONFIG 0x18
319 struct smmu_debugfs_info *debugfs_info; 63#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
64#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
65#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
320 66
321 struct device_node *ahb; 67#define SMMU_PTB_ASID 0x01c
68#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
322 69
323 int num_as; 70#define SMMU_PTB_DATA 0x020
324 struct smmu_as as[0]; /* Run-time allocated array */ 71#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
325};
326 72
327static struct smmu_device *smmu_handle; /* unique for a system */ 73#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
328 74
329/* 75#define SMMU_TLB_FLUSH 0x030
330 * SMMU register accessors 76#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
331 */ 77#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
332static bool inline smmu_valid_reg(struct smmu_device *smmu, 78#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
333 void __iomem *addr) 79#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
334{ 80#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
335 int i; 81 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
82#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
83 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
84#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
336 85
337 for (i = 0; i < smmu->nregs; i++) { 86#define SMMU_PTC_FLUSH 0x034
338 if (addr < smmu->regs[i]) 87#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
339 break; 88#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
340 if (addr <= smmu->rege[i])
341 return true;
342 }
343 89
344 return false; 90#define SMMU_PTC_FLUSH_HI 0x9b8
345} 91#define SMMU_PTC_FLUSH_HI_MASK 0x3
346 92
347static inline u32 smmu_read(struct smmu_device *smmu, size_t offs) 93/* per-SWGROUP SMMU_*_ASID register */
348{ 94#define SMMU_ASID_ENABLE (1 << 31)
349 void __iomem *addr = smmu->regbase + offs; 95#define SMMU_ASID_MASK 0x7f
96#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
350 97
351 BUG_ON(!smmu_valid_reg(smmu, addr)); 98/* page table definitions */
99#define SMMU_NUM_PDE 1024
100#define SMMU_NUM_PTE 1024
352 101
353 return readl(addr); 102#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
354} 103#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
355 104
356static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs) 105#define SMMU_PDE_SHIFT 22
357{ 106#define SMMU_PTE_SHIFT 12
358 void __iomem *addr = smmu->regbase + offs;
359 107
360 BUG_ON(!smmu_valid_reg(smmu, addr)); 108#define SMMU_PFN_MASK 0x000fffff
361 109
362 writel(val, addr); 110#define SMMU_PD_READABLE (1 << 31)
363} 111#define SMMU_PD_WRITABLE (1 << 30)
112#define SMMU_PD_NONSECURE (1 << 29)
364 113
365#define VA_PAGE_TO_PA(va, page) \ 114#define SMMU_PDE_READABLE (1 << 31)
366 (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK)) 115#define SMMU_PDE_WRITABLE (1 << 30)
116#define SMMU_PDE_NONSECURE (1 << 29)
117#define SMMU_PDE_NEXT (1 << 28)
367 118
368#define FLUSH_CPU_DCACHE(va, page, size) \ 119#define SMMU_PTE_READABLE (1 << 31)
369 do { \ 120#define SMMU_PTE_WRITABLE (1 << 30)
370 unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \ 121#define SMMU_PTE_NONSECURE (1 << 29)
371 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
372 outer_flush_range(_pa_, _pa_+(size_t)(size)); \
373 } while (0)
374 122
375/* 123#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
376 * Any interaction between any block on PPSB and a block on APB or AHB 124 SMMU_PDE_NONSECURE)
377 * must have these read-back barriers to ensure the APB/AHB bus 125#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
378 * transaction is complete before initiating activity on the PPSB 126 SMMU_PTE_NONSECURE)
379 * block.
380 */
381#define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG)
382 127
383#define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data) 128static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
384 129 unsigned long offset)
385static int __smmu_client_set_hwgrp(struct smmu_client *c,
386 unsigned long map, int on)
387{ 130{
388 int i; 131 phys_addr_t phys = page ? page_to_phys(page) : 0;
389 struct smmu_as *as = c->as; 132 u32 value;
390 u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid); 133
391 struct smmu_device *smmu = as->smmu; 134 if (page) {
392 135 offset &= ~(smmu->mc->soc->atom_size - 1);
393 WARN_ON(!on && map); 136
394 if (on && !map) 137 if (smmu->mc->soc->num_address_bits > 32) {
395 return -EINVAL; 138#ifdef CONFIG_PHYS_ADDR_T_64BIT
396 if (!on) 139 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
397 map = smmu_client_hwgrp(c); 140#else
398 141 value = 0;
399 for_each_set_bit(i, &map, HWGRP_COUNT) { 142#endif
400 offs = HWGRP_ASID_REG(i); 143 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
401 val = smmu_read(smmu, offs);
402 if (on) {
403 if (WARN_ON(val & mask))
404 goto err_hw_busy;
405 val |= mask;
406 } else {
407 WARN_ON((val & mask) == mask);
408 val &= ~mask;
409 } 144 }
410 smmu_write(smmu, val, offs);
411 }
412 FLUSH_SMMU_REGS(smmu);
413 c->hwgrp = map;
414 return 0;
415 145
416err_hw_busy: 146 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
417 for_each_set_bit(i, &map, HWGRP_COUNT) { 147 } else {
418 offs = HWGRP_ASID_REG(i); 148 value = SMMU_PTC_FLUSH_TYPE_ALL;
419 val = smmu_read(smmu, offs);
420 val &= ~mask;
421 smmu_write(smmu, val, offs);
422 } 149 }
423 return -EBUSY; 150
151 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
424} 152}
425 153
426static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on) 154static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
427{ 155{
428 u32 val; 156 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
429 unsigned long flags;
430 struct smmu_as *as = c->as;
431 struct smmu_device *smmu = as->smmu;
432
433 spin_lock_irqsave(&smmu->lock, flags);
434 val = __smmu_client_set_hwgrp(c, map, on);
435 spin_unlock_irqrestore(&smmu->lock, flags);
436 return val;
437} 157}
438 158
439/* 159static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
440 * Flush all TLB entries and all PTC entries 160 unsigned long asid)
441 * Caller must lock smmu
442 */
443static void smmu_flush_regs(struct smmu_device *smmu, int enable)
444{ 161{
445 u32 val; 162 u32 value;
446
447 smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
448 FLUSH_SMMU_REGS(smmu);
449 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
450 SMMU_TLB_FLUSH_ASID_MATCH_disable;
451 smmu_write(smmu, val, SMMU_TLB_FLUSH);
452 163
453 if (enable) 164 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
454 smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); 165 SMMU_TLB_FLUSH_VA_MATCH_ALL;
455 FLUSH_SMMU_REGS(smmu); 166 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
456} 167}
457 168
458static int smmu_setup_regs(struct smmu_device *smmu) 169static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
170 unsigned long asid,
171 unsigned long iova)
459{ 172{
460 int i; 173 u32 value;
461 u32 val;
462 174
463 for (i = 0; i < smmu->num_as; i++) { 175 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
464 struct smmu_as *as = &smmu->as[i]; 176 SMMU_TLB_FLUSH_VA_SECTION(iova);
465 struct smmu_client *c; 177 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
466
467 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
468 val = as->pdir_page ?
469 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
470 SMMU_PTB_DATA_RESET_VAL;
471 smmu_write(smmu, val, SMMU_PTB_DATA);
472
473 list_for_each_entry(c, &as->client, list)
474 __smmu_client_set_hwgrp(c, c->hwgrp, 1);
475 }
476
477 smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
478 smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
479 smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
480 smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
481 smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
482 smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
483
484 smmu_flush_regs(smmu, 1);
485
486 return tegra_ahb_enable_smmu(smmu->ahb);
487} 178}
488 179
489static void flush_ptc_and_tlb(struct smmu_device *smmu, 180static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
490 struct smmu_as *as, dma_addr_t iova, 181 unsigned long asid,
491 unsigned long *pte, struct page *page, int is_pde) 182 unsigned long iova)
492{ 183{
493 u32 val; 184 u32 value;
494 unsigned long tlb_flush_va = is_pde
495 ? SMMU_TLB_FLUSH_VA(iova, SECTION)
496 : SMMU_TLB_FLUSH_VA(iova, GROUP);
497
498 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
499 smmu_write(smmu, val, SMMU_PTC_FLUSH);
500 FLUSH_SMMU_REGS(smmu);
501 val = tlb_flush_va |
502 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
503 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
504 smmu_write(smmu, val, SMMU_TLB_FLUSH);
505 FLUSH_SMMU_REGS(smmu);
506}
507 185
508static void free_ptbl(struct smmu_as *as, dma_addr_t iova) 186 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
509{ 187 SMMU_TLB_FLUSH_VA_GROUP(iova);
510 unsigned long pdn = SMMU_ADDR_TO_PDN(iova); 188 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
511 unsigned long *pdir = (unsigned long *)page_address(as->pdir_page);
512
513 if (pdir[pdn] != _PDE_VACANT(pdn)) {
514 dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn);
515
516 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
517 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
518 pdir[pdn] = _PDE_VACANT(pdn);
519 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
520 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
521 as->pdir_page, 1);
522 }
523} 189}
524 190
525static void free_pdir(struct smmu_as *as) 191static inline void smmu_flush(struct tegra_smmu *smmu)
526{ 192{
527 unsigned addr; 193 smmu_readl(smmu, SMMU_CONFIG);
528 int count;
529 struct device *dev = as->smmu->dev;
530
531 if (!as->pdir_page)
532 return;
533
534 addr = as->smmu->iovmm_base;
535 count = as->smmu->page_count;
536 while (count-- > 0) {
537 free_ptbl(as, addr);
538 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
539 }
540 ClearPageReserved(as->pdir_page);
541 __free_page(as->pdir_page);
542 as->pdir_page = NULL;
543 devm_kfree(dev, as->pte_count);
544 as->pte_count = NULL;
545} 194}
546 195
547/* 196static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
548 * Maps PTBL for given iova and returns the PTE address
549 * Caller must unmap the mapped PTBL returned in *ptbl_page_p
550 */
551static unsigned long *locate_pte(struct smmu_as *as,
552 dma_addr_t iova, bool allocate,
553 struct page **ptbl_page_p,
554 unsigned int **count)
555{ 197{
556 unsigned long ptn = SMMU_ADDR_TO_PFN(iova); 198 unsigned long id;
557 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
558 unsigned long *pdir = page_address(as->pdir_page);
559 unsigned long *ptbl;
560
561 if (pdir[pdn] != _PDE_VACANT(pdn)) {
562 /* Mapped entry table already exists */
563 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
564 ptbl = page_address(*ptbl_page_p);
565 } else if (!allocate) {
566 return NULL;
567 } else {
568 int pn;
569 unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
570 199
571 /* Vacant - allocate a new page table */ 200 mutex_lock(&smmu->lock);
572 dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn);
573 201
574 *ptbl_page_p = alloc_page(GFP_ATOMIC); 202 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
575 if (!*ptbl_page_p) { 203 if (id >= smmu->soc->num_asids) {
576 dev_err(as->smmu->dev, 204 mutex_unlock(&smmu->lock);
577 "failed to allocate smmu_device page table\n"); 205 return -ENOSPC;
578 return NULL;
579 }
580 SetPageReserved(*ptbl_page_p);
581 ptbl = (unsigned long *)page_address(*ptbl_page_p);
582 for (pn = 0; pn < SMMU_PTBL_COUNT;
583 pn++, addr += SMMU_PAGE_SIZE) {
584 ptbl[pn] = _PTE_VACANT(addr);
585 }
586 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
587 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
588 as->pde_attr | _PDE_NEXT);
589 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
590 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
591 as->pdir_page, 1);
592 } 206 }
593 *count = &as->pte_count[pdn];
594 207
595 return &ptbl[ptn % SMMU_PTBL_COUNT]; 208 set_bit(id, smmu->asids);
209 *idp = id;
210
211 mutex_unlock(&smmu->lock);
212 return 0;
596} 213}
597 214
598#ifdef CONFIG_SMMU_SIG_DEBUG 215static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
599static void put_signature(struct smmu_as *as,
600 dma_addr_t iova, unsigned long pfn)
601{ 216{
602 struct page *page; 217 mutex_lock(&smmu->lock);
603 unsigned long *vaddr; 218 clear_bit(id, smmu->asids);
604 219 mutex_unlock(&smmu->lock);
605 page = pfn_to_page(pfn);
606 vaddr = page_address(page);
607 if (!vaddr)
608 return;
609
610 vaddr[0] = iova;
611 vaddr[1] = pfn << PAGE_SHIFT;
612 FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
613} 220}
614#else 221
615static inline void put_signature(struct smmu_as *as, 222static bool tegra_smmu_capable(enum iommu_cap cap)
616 unsigned long addr, unsigned long pfn)
617{ 223{
224 return false;
618} 225}
619#endif
620 226
621/* 227static int tegra_smmu_domain_init(struct iommu_domain *domain)
622 * Caller must not hold as->lock
623 */
624static int alloc_pdir(struct smmu_as *as)
625{ 228{
626 unsigned long *pdir, flags; 229 struct tegra_smmu_as *as;
627 int pdn, err = 0; 230 unsigned int i;
628 u32 val; 231 uint32_t *pd;
629 struct smmu_device *smmu = as->smmu;
630 struct page *page;
631 unsigned int *cnt;
632 232
633 /* 233 as = kzalloc(sizeof(*as), GFP_KERNEL);
634 * do the allocation, then grab as->lock 234 if (!as)
635 */ 235 return -ENOMEM;
636 cnt = devm_kzalloc(smmu->dev,
637 sizeof(cnt[0]) * SMMU_PDIR_COUNT,
638 GFP_KERNEL);
639 page = alloc_page(GFP_KERNEL | __GFP_DMA);
640 236
641 spin_lock_irqsave(&as->lock, flags); 237 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
238 as->domain = domain;
642 239
643 if (as->pdir_page) { 240 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
644 /* We raced, free the redundant */ 241 if (!as->pd) {
645 err = -EAGAIN; 242 kfree(as);
646 goto err_out; 243 return -ENOMEM;
647 } 244 }
648 245
649 if (!page || !cnt) { 246 as->count = alloc_page(GFP_KERNEL);
650 dev_err(smmu->dev, "failed to allocate at %s\n", __func__); 247 if (!as->count) {
651 err = -ENOMEM; 248 __free_page(as->pd);
652 goto err_out; 249 kfree(as);
250 return -ENOMEM;
653 } 251 }
654 252
655 as->pdir_page = page; 253 /* clear PDEs */
656 as->pte_count = cnt; 254 pd = page_address(as->pd);
255 SetPageReserved(as->pd);
657 256
658 SetPageReserved(as->pdir_page); 257 for (i = 0; i < SMMU_NUM_PDE; i++)
659 pdir = page_address(as->pdir_page); 258 pd[i] = 0;
660 259
661 for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++) 260 /* clear PDE usage counters */
662 pdir[pdn] = _PDE_VACANT(pdn); 261 pd = page_address(as->count);
663 FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE); 262 SetPageReserved(as->count);
664 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page);
665 smmu_write(smmu, val, SMMU_PTC_FLUSH);
666 FLUSH_SMMU_REGS(as->smmu);
667 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
668 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
669 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
670 smmu_write(smmu, val, SMMU_TLB_FLUSH);
671 FLUSH_SMMU_REGS(as->smmu);
672 263
673 spin_unlock_irqrestore(&as->lock, flags); 264 for (i = 0; i < SMMU_NUM_PDE; i++)
674 265 pd[i] = 0;
675 return 0;
676 266
677err_out: 267 domain->priv = as;
678 spin_unlock_irqrestore(&as->lock, flags);
679 268
680 devm_kfree(smmu->dev, cnt); 269 return 0;
681 if (page)
682 __free_page(page);
683 return err;
684} 270}
685 271
686static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) 272static void tegra_smmu_domain_destroy(struct iommu_domain *domain)
687{ 273{
688 unsigned long *pte; 274 struct tegra_smmu_as *as = domain->priv;
689 struct page *page;
690 unsigned int *count;
691 275
692 pte = locate_pte(as, iova, false, &page, &count); 276 /* TODO: free page directory and page tables */
693 if (WARN_ON(!pte)) 277 ClearPageReserved(as->pd);
694 return;
695 278
696 if (WARN_ON(*pte == _PTE_VACANT(iova))) 279 kfree(as);
697 return;
698
699 *pte = _PTE_VACANT(iova);
700 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
701 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
702 if (!--(*count))
703 free_ptbl(as, iova);
704} 280}
705 281
706static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, 282static const struct tegra_smmu_swgroup *
707 unsigned long pfn) 283tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
708{ 284{
709 struct smmu_device *smmu = as->smmu; 285 const struct tegra_smmu_swgroup *group = NULL;
710 unsigned long *pte; 286 unsigned int i;
711 unsigned int *count;
712 struct page *page;
713 287
714 pte = locate_pte(as, iova, true, &page, &count); 288 for (i = 0; i < smmu->soc->num_swgroups; i++) {
715 if (WARN_ON(!pte)) 289 if (smmu->soc->swgroups[i].swgroup == swgroup) {
716 return; 290 group = &smmu->soc->swgroups[i];
291 break;
292 }
293 }
717 294
718 if (*pte == _PTE_VACANT(iova)) 295 return group;
719 (*count)++;
720 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
721 if (unlikely((*pte == _PTE_VACANT(iova))))
722 (*count)--;
723 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
724 flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
725 put_signature(as, iova, pfn);
726} 296}
727 297
728static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova, 298static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
729 phys_addr_t pa, size_t bytes, int prot) 299 unsigned int asid)
730{ 300{
731 struct smmu_as *as = domain->priv; 301 const struct tegra_smmu_swgroup *group;
732 unsigned long pfn = __phys_to_pfn(pa); 302 unsigned int i;
733 unsigned long flags; 303 u32 value;
734 304
735 dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa); 305 for (i = 0; i < smmu->soc->num_clients; i++) {
306 const struct tegra_mc_client *client = &smmu->soc->clients[i];
736 307
737 if (!pfn_valid(pfn)) 308 if (client->swgroup != swgroup)
738 return -ENOMEM; 309 continue;
739
740 spin_lock_irqsave(&as->lock, flags);
741 __smmu_iommu_map_pfn(as, iova, pfn);
742 spin_unlock_irqrestore(&as->lock, flags);
743 return 0;
744}
745
746static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
747 size_t bytes)
748{
749 struct smmu_as *as = domain->priv;
750 unsigned long flags;
751 310
752 dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova); 311 value = smmu_readl(smmu, client->smmu.reg);
312 value |= BIT(client->smmu.bit);
313 smmu_writel(smmu, value, client->smmu.reg);
314 }
753 315
754 spin_lock_irqsave(&as->lock, flags); 316 group = tegra_smmu_find_swgroup(smmu, swgroup);
755 __smmu_iommu_unmap(as, iova); 317 if (group) {
756 spin_unlock_irqrestore(&as->lock, flags); 318 value = smmu_readl(smmu, group->reg);
757 return SMMU_PAGE_SIZE; 319 value &= ~SMMU_ASID_MASK;
320 value |= SMMU_ASID_VALUE(asid);
321 value |= SMMU_ASID_ENABLE;
322 smmu_writel(smmu, value, group->reg);
323 }
758} 324}
759 325
760static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, 326static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
761 dma_addr_t iova) 327 unsigned int asid)
762{ 328{
763 struct smmu_as *as = domain->priv; 329 const struct tegra_smmu_swgroup *group;
764 unsigned long *pte; 330 unsigned int i;
765 unsigned int *count; 331 u32 value;
766 struct page *page;
767 unsigned long pfn;
768 unsigned long flags;
769 332
770 spin_lock_irqsave(&as->lock, flags); 333 group = tegra_smmu_find_swgroup(smmu, swgroup);
334 if (group) {
335 value = smmu_readl(smmu, group->reg);
336 value &= ~SMMU_ASID_MASK;
337 value |= SMMU_ASID_VALUE(asid);
338 value &= ~SMMU_ASID_ENABLE;
339 smmu_writel(smmu, value, group->reg);
340 }
771 341
772 pte = locate_pte(as, iova, true, &page, &count); 342 for (i = 0; i < smmu->soc->num_clients; i++) {
773 pfn = *pte & SMMU_PFN_MASK; 343 const struct tegra_mc_client *client = &smmu->soc->clients[i];
774 WARN_ON(!pfn_valid(pfn));
775 dev_dbg(as->smmu->dev,
776 "iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova,
777 pfn, as->asid);
778 344
779 spin_unlock_irqrestore(&as->lock, flags); 345 if (client->swgroup != swgroup)
780 return PFN_PHYS(pfn); 346 continue;
781}
782 347
783static bool smmu_iommu_capable(enum iommu_cap cap) 348 value = smmu_readl(smmu, client->smmu.reg);
784{ 349 value &= ~BIT(client->smmu.bit);
785 return false; 350 smmu_writel(smmu, value, client->smmu.reg);
351 }
786} 352}
787 353
788static int smmu_iommu_attach_dev(struct iommu_domain *domain, 354static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
789 struct device *dev) 355 struct tegra_smmu_as *as)
790{ 356{
791 struct smmu_as *as = domain->priv; 357 u32 value;
792 struct smmu_device *smmu = as->smmu;
793 struct smmu_client *client, *c;
794 u32 map;
795 int err; 358 int err;
796 359
797 client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL); 360 if (as->use_count > 0) {
798 if (!client) 361 as->use_count++;
799 return -ENOMEM; 362 return 0;
800 client->dev = dev;
801 client->as = as;
802 map = (unsigned long)dev->platform_data;
803 if (!map)
804 return -EINVAL;
805
806 err = smmu_client_enable_hwgrp(client, map);
807 if (err)
808 goto err_hwgrp;
809
810 spin_lock(&as->client_lock);
811 list_for_each_entry(c, &as->client, list) {
812 if (c->dev == dev) {
813 dev_err(smmu->dev,
814 "%s is already attached\n", dev_name(c->dev));
815 err = -EINVAL;
816 goto err_client;
817 }
818 } 363 }
819 list_add(&client->list, &as->client);
820 spin_unlock(&as->client_lock);
821 364
822 /* 365 err = tegra_smmu_alloc_asid(smmu, &as->id);
823 * Reserve "page zero" for AVP vectors using a common dummy 366 if (err < 0)
824 * page. 367 return err;
825 */
826 if (map & HWG_AVPC) {
827 struct page *page;
828 368
829 page = as->smmu->avp_vector_page; 369 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
830 __smmu_iommu_map_pfn(as, 0, page_to_pfn(page)); 370 smmu_flush_ptc(smmu, as->pd, 0);
371 smmu_flush_tlb_asid(smmu, as->id);
831 372
832 pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n"); 373 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
833 } 374 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
375 smmu_writel(smmu, value, SMMU_PTB_DATA);
376 smmu_flush(smmu);
834 377
835 dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev)); 378 as->smmu = smmu;
836 return 0; 379 as->use_count++;
837 380
838err_client: 381 return 0;
839 smmu_client_disable_hwgrp(client);
840 spin_unlock(&as->client_lock);
841err_hwgrp:
842 devm_kfree(smmu->dev, client);
843 return err;
844} 382}
845 383
846static void smmu_iommu_detach_dev(struct iommu_domain *domain, 384static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
847 struct device *dev) 385 struct tegra_smmu_as *as)
848{ 386{
849 struct smmu_as *as = domain->priv; 387 if (--as->use_count > 0)
850 struct smmu_device *smmu = as->smmu; 388 return;
851 struct smmu_client *c; 389
852 390 tegra_smmu_free_asid(smmu, as->id);
853 spin_lock(&as->client_lock); 391 as->smmu = NULL;
854
855 list_for_each_entry(c, &as->client, list) {
856 if (c->dev == dev) {
857 smmu_client_disable_hwgrp(c);
858 list_del(&c->list);
859 devm_kfree(smmu->dev, c);
860 c->as = NULL;
861 dev_dbg(smmu->dev,
862 "%s is detached\n", dev_name(c->dev));
863 goto out;
864 }
865 }
866 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
867out:
868 spin_unlock(&as->client_lock);
869} 392}
870 393
871static int smmu_iommu_domain_init(struct iommu_domain *domain) 394static int tegra_smmu_attach_dev(struct iommu_domain *domain,
395 struct device *dev)
872{ 396{
873 int i, err = -EAGAIN; 397 struct tegra_smmu *smmu = dev->archdata.iommu;
874 unsigned long flags; 398 struct tegra_smmu_as *as = domain->priv;
875 struct smmu_as *as; 399 struct device_node *np = dev->of_node;
876 struct smmu_device *smmu = smmu_handle; 400 struct of_phandle_args args;
401 unsigned int index = 0;
402 int err = 0;
877 403
878 /* Look for a free AS with lock held */ 404 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
879 for (i = 0; i < smmu->num_as; i++) { 405 &args)) {
880 as = &smmu->as[i]; 406 unsigned int swgroup = args.args[0];
881 407
882 if (as->pdir_page) 408 if (args.np != smmu->dev->of_node) {
409 of_node_put(args.np);
883 continue; 410 continue;
411 }
884 412
885 err = alloc_pdir(as); 413 of_node_put(args.np);
886 if (!err)
887 goto found;
888 414
889 if (err != -EAGAIN) 415 err = tegra_smmu_as_prepare(smmu, as);
890 break; 416 if (err < 0)
417 return err;
418
419 tegra_smmu_enable(smmu, swgroup, as->id);
420 index++;
891 } 421 }
892 if (i == smmu->num_as)
893 dev_err(smmu->dev, "no free AS\n");
894 return err;
895 422
896found: 423 if (index == 0)
897 spin_lock_irqsave(&smmu->lock, flags); 424 return -ENODEV;
898 425
899 /* Update PDIR register */ 426 return 0;
900 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); 427}
901 smmu_write(smmu,
902 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
903 FLUSH_SMMU_REGS(smmu);
904 428
905 spin_unlock_irqrestore(&smmu->lock, flags); 429static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
430{
431 struct tegra_smmu_as *as = domain->priv;
432 struct device_node *np = dev->of_node;
433 struct tegra_smmu *smmu = as->smmu;
434 struct of_phandle_args args;
435 unsigned int index = 0;
906 436
907 domain->priv = as; 437 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
438 &args)) {
439 unsigned int swgroup = args.args[0];
908 440
909 domain->geometry.aperture_start = smmu->iovmm_base; 441 if (args.np != smmu->dev->of_node) {
910 domain->geometry.aperture_end = smmu->iovmm_base + 442 of_node_put(args.np);
911 smmu->page_count * SMMU_PAGE_SIZE - 1; 443 continue;
912 domain->geometry.force_aperture = true; 444 }
913 445
914 dev_dbg(smmu->dev, "smmu_as@%p\n", as); 446 of_node_put(args.np);
915 447
916 return 0; 448 tegra_smmu_disable(smmu, swgroup, as->id);
449 tegra_smmu_as_unprepare(smmu, as);
450 index++;
451 }
917} 452}
918 453
919static void smmu_iommu_domain_destroy(struct iommu_domain *domain) 454static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
455 struct page **pagep)
920{ 456{
921 struct smmu_as *as = domain->priv; 457 u32 *pd = page_address(as->pd), *pt, *count;
922 struct smmu_device *smmu = as->smmu; 458 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
923 unsigned long flags; 459 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
460 struct tegra_smmu *smmu = as->smmu;
461 struct page *page;
462 unsigned int i;
463
464 if (pd[pde] == 0) {
465 page = alloc_page(GFP_KERNEL | __GFP_DMA);
466 if (!page)
467 return NULL;
924 468
925 spin_lock_irqsave(&as->lock, flags); 469 pt = page_address(page);
470 SetPageReserved(page);
926 471
927 if (as->pdir_page) { 472 for (i = 0; i < SMMU_NUM_PTE; i++)
928 spin_lock(&smmu->lock); 473 pt[i] = 0;
929 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
930 smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
931 FLUSH_SMMU_REGS(smmu);
932 spin_unlock(&smmu->lock);
933 474
934 free_pdir(as); 475 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
935 }
936 476
937 if (!list_empty(&as->client)) { 477 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
938 struct smmu_client *c;
939 478
940 list_for_each_entry(c, &as->client, list) 479 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
941 smmu_iommu_detach_dev(domain, c->dev); 480 smmu_flush_ptc(smmu, as->pd, pde << 2);
481 smmu_flush_tlb_section(smmu, as->id, iova);
482 smmu_flush(smmu);
483 } else {
484 page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
485 pt = page_address(page);
942 } 486 }
943 487
944 spin_unlock_irqrestore(&as->lock, flags); 488 *pagep = page;
945 489
946 domain->priv = NULL; 490 /* Keep track of entries in this page table. */
947 dev_dbg(smmu->dev, "smmu_as@%p\n", as); 491 count = page_address(as->count);
948} 492 if (pt[pte] == 0)
493 count[pde]++;
949 494
950static const struct iommu_ops smmu_iommu_ops = { 495 return &pt[pte];
951 .capable = smmu_iommu_capable, 496}
952 .domain_init = smmu_iommu_domain_init,
953 .domain_destroy = smmu_iommu_domain_destroy,
954 .attach_dev = smmu_iommu_attach_dev,
955 .detach_dev = smmu_iommu_detach_dev,
956 .map = smmu_iommu_map,
957 .unmap = smmu_iommu_unmap,
958 .iova_to_phys = smmu_iommu_iova_to_phys,
959 .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
960};
961
962/* Should be in the order of enum */
963static const char * const smmu_debugfs_mc[] = { "mc", };
964static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
965 497
966static ssize_t smmu_debugfs_stats_write(struct file *file, 498static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
967 const char __user *buffer,
968 size_t count, loff_t *pos)
969{ 499{
970 struct smmu_debugfs_info *info; 500 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
971 struct smmu_device *smmu; 501 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
972 int i; 502 u32 *count = page_address(as->count);
973 enum { 503 u32 *pd = page_address(as->pd), *pt;
974 _OFF = 0, 504 struct page *page;
975 _ON,
976 _RESET,
977 };
978 const char * const command[] = {
979 [_OFF] = "off",
980 [_ON] = "on",
981 [_RESET] = "reset",
982 };
983 char str[] = "reset";
984 u32 val;
985 size_t offs;
986 505
987 count = min_t(size_t, count, sizeof(str)); 506 page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
988 if (copy_from_user(str, buffer, count)) 507 pt = page_address(page);
989 return -EINVAL;
990 508
991 for (i = 0; i < ARRAY_SIZE(command); i++) 509 /*
992 if (strncmp(str, command[i], 510 * When no entries in this page table are used anymore, return the
993 strlen(command[i])) == 0) 511 * memory page to the system.
994 break; 512 */
513 if (pt[pte] != 0) {
514 if (--count[pde] == 0) {
515 ClearPageReserved(page);
516 __free_page(page);
517 pd[pde] = 0;
518 }
995 519
996 if (i == ARRAY_SIZE(command)) 520 pt[pte] = 0;
997 return -EINVAL;
998
999 info = file_inode(file)->i_private;
1000 smmu = info->smmu;
1001
1002 offs = SMMU_CACHE_CONFIG(info->cache);
1003 val = smmu_read(smmu, offs);
1004 switch (i) {
1005 case _OFF:
1006 val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
1007 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1008 smmu_write(smmu, val, offs);
1009 break;
1010 case _ON:
1011 val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
1012 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1013 smmu_write(smmu, val, offs);
1014 break;
1015 case _RESET:
1016 val |= SMMU_CACHE_CONFIG_STATS_TEST;
1017 smmu_write(smmu, val, offs);
1018 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1019 smmu_write(smmu, val, offs);
1020 break;
1021 default:
1022 BUG();
1023 break;
1024 } 521 }
1025
1026 dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
1027 val, smmu_read(smmu, offs), offs);
1028
1029 return count;
1030} 522}
1031 523
1032static int smmu_debugfs_stats_show(struct seq_file *s, void *v) 524static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
525 phys_addr_t paddr, size_t size, int prot)
1033{ 526{
1034 struct smmu_debugfs_info *info = s->private; 527 struct tegra_smmu_as *as = domain->priv;
1035 struct smmu_device *smmu = info->smmu; 528 struct tegra_smmu *smmu = as->smmu;
1036 int i; 529 unsigned long offset;
1037 const char * const stats[] = { "hit", "miss", }; 530 struct page *page;
531 u32 *pte;
1038 532
533 pte = as_get_pte(as, iova, &page);
534 if (!pte)
535 return -ENOMEM;
1039 536
1040 for (i = 0; i < ARRAY_SIZE(stats); i++) { 537 *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
1041 u32 val; 538 offset = offset_in_page(pte);
1042 size_t offs;
1043 539
1044 offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i); 540 smmu->soc->ops->flush_dcache(page, offset, 4);
1045 val = smmu_read(smmu, offs); 541 smmu_flush_ptc(smmu, page, offset);
1046 seq_printf(s, "%s:%08x ", stats[i], val); 542 smmu_flush_tlb_group(smmu, as->id, iova);
543 smmu_flush(smmu);
1047 544
1048 dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
1049 stats[i], val, offs);
1050 }
1051 seq_printf(s, "\n");
1052 return 0; 545 return 0;
1053} 546}
1054 547
1055static int smmu_debugfs_stats_open(struct inode *inode, struct file *file) 548static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
549 size_t size)
1056{ 550{
1057 return single_open(file, smmu_debugfs_stats_show, inode->i_private); 551 struct tegra_smmu_as *as = domain->priv;
1058} 552 struct tegra_smmu *smmu = as->smmu;
553 unsigned long offset;
554 struct page *page;
555 u32 *pte;
1059 556
1060static const struct file_operations smmu_debugfs_stats_fops = { 557 pte = as_get_pte(as, iova, &page);
1061 .open = smmu_debugfs_stats_open, 558 if (!pte)
1062 .read = seq_read, 559 return 0;
1063 .llseek = seq_lseek,
1064 .release = single_release,
1065 .write = smmu_debugfs_stats_write,
1066};
1067 560
1068static void smmu_debugfs_delete(struct smmu_device *smmu) 561 offset = offset_in_page(pte);
1069{ 562 as_put_pte(as, iova);
1070 debugfs_remove_recursive(smmu->debugfs_root); 563
1071 kfree(smmu->debugfs_info); 564 smmu->soc->ops->flush_dcache(page, offset, 4);
565 smmu_flush_ptc(smmu, page, offset);
566 smmu_flush_tlb_group(smmu, as->id, iova);
567 smmu_flush(smmu);
568
569 return size;
1072} 570}
1073 571
1074static void smmu_debugfs_create(struct smmu_device *smmu) 572static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
573 dma_addr_t iova)
1075{ 574{
1076 int i; 575 struct tegra_smmu_as *as = domain->priv;
1077 size_t bytes; 576 struct page *page;
1078 struct dentry *root; 577 unsigned long pfn;
1079 578 u32 *pte;
1080 bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
1081 sizeof(*smmu->debugfs_info);
1082 smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
1083 if (!smmu->debugfs_info)
1084 return;
1085
1086 root = debugfs_create_dir(dev_name(smmu->dev), NULL);
1087 if (!root)
1088 goto err_out;
1089 smmu->debugfs_root = root;
1090
1091 for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
1092 int j;
1093 struct dentry *mc;
1094
1095 mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
1096 if (!mc)
1097 goto err_out;
1098
1099 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
1100 struct dentry *cache;
1101 struct smmu_debugfs_info *info;
1102
1103 info = smmu->debugfs_info;
1104 info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
1105 info->smmu = smmu;
1106 info->mc = i;
1107 info->cache = j;
1108
1109 cache = debugfs_create_file(smmu_debugfs_cache[j],
1110 S_IWUGO | S_IRUGO, mc,
1111 (void *)info,
1112 &smmu_debugfs_stats_fops);
1113 if (!cache)
1114 goto err_out;
1115 }
1116 }
1117 579
1118 return; 580 pte = as_get_pte(as, iova, &page);
581 pfn = *pte & SMMU_PFN_MASK;
1119 582
1120err_out: 583 return PFN_PHYS(pfn);
1121 smmu_debugfs_delete(smmu);
1122} 584}
1123 585
1124static int tegra_smmu_suspend(struct device *dev) 586static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
1125{ 587{
1126 struct smmu_device *smmu = dev_get_drvdata(dev); 588 struct platform_device *pdev;
589 struct tegra_mc *mc;
1127 590
1128 smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0); 591 pdev = of_find_device_by_node(np);
1129 smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1); 592 if (!pdev)
1130 smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2); 593 return NULL;
1131 smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY); 594
1132 return 0; 595 mc = platform_get_drvdata(pdev);
596 if (!mc)
597 return NULL;
598
599 return mc->smmu;
1133} 600}
1134 601
1135static int tegra_smmu_resume(struct device *dev) 602static int tegra_smmu_add_device(struct device *dev)
1136{ 603{
1137 struct smmu_device *smmu = dev_get_drvdata(dev); 604 struct device_node *np = dev->of_node;
1138 unsigned long flags; 605 struct of_phandle_args args;
1139 int err; 606 unsigned int index = 0;
1140 607
1141 spin_lock_irqsave(&smmu->lock, flags); 608 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
1142 err = smmu_setup_regs(smmu); 609 &args) == 0) {
1143 spin_unlock_irqrestore(&smmu->lock, flags); 610 struct tegra_smmu *smmu;
1144 return err; 611
612 smmu = tegra_smmu_find(args.np);
613 if (smmu) {
614 /*
615 * Only a single IOMMU master interface is currently
616 * supported by the Linux kernel, so abort after the
617 * first match.
618 */
619 dev->archdata.iommu = smmu;
620 break;
621 }
622
623 index++;
624 }
625
626 return 0;
1145} 627}
1146 628
1147static int tegra_smmu_probe(struct platform_device *pdev) 629static void tegra_smmu_remove_device(struct device *dev)
1148{ 630{
1149 struct smmu_device *smmu; 631 dev->archdata.iommu = NULL;
1150 struct device *dev = &pdev->dev; 632}
1151 int i, asids, err = 0;
1152 dma_addr_t uninitialized_var(base);
1153 size_t bytes, uninitialized_var(size);
1154 633
1155 if (smmu_handle) 634static const struct iommu_ops tegra_smmu_ops = {
1156 return -EIO; 635 .capable = tegra_smmu_capable,
636 .domain_init = tegra_smmu_domain_init,
637 .domain_destroy = tegra_smmu_domain_destroy,
638 .attach_dev = tegra_smmu_attach_dev,
639 .detach_dev = tegra_smmu_detach_dev,
640 .add_device = tegra_smmu_add_device,
641 .remove_device = tegra_smmu_remove_device,
642 .map = tegra_smmu_map,
643 .unmap = tegra_smmu_unmap,
644 .map_sg = default_iommu_map_sg,
645 .iova_to_phys = tegra_smmu_iova_to_phys,
1157 646
1158 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); 647 .pgsize_bitmap = SZ_4K,
648};
1159 649
1160 if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids)) 650static void tegra_smmu_ahb_enable(void)
1161 return -ENODEV; 651{
652 static const struct of_device_id ahb_match[] = {
653 { .compatible = "nvidia,tegra30-ahb", },
654 { }
655 };
656 struct device_node *ahb;
1162 657
1163 bytes = sizeof(*smmu) + asids * sizeof(*smmu->as); 658 ahb = of_find_matching_node(NULL, ahb_match);
1164 smmu = devm_kzalloc(dev, bytes, GFP_KERNEL); 659 if (ahb) {
1165 if (!smmu) { 660 tegra_ahb_enable_smmu(ahb);
1166 dev_err(dev, "failed to allocate smmu_device\n"); 661 of_node_put(ahb);
1167 return -ENOMEM;
1168 } 662 }
663}
1169 664
1170 smmu->nregs = pdev->num_resources; 665struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1171 smmu->regs = devm_kzalloc(dev, 2 * smmu->nregs * sizeof(*smmu->regs), 666 const struct tegra_smmu_soc *soc,
1172 GFP_KERNEL); 667 struct tegra_mc *mc)
1173 smmu->rege = smmu->regs + smmu->nregs; 668{
1174 if (!smmu->regs) 669 struct tegra_smmu *smmu;
1175 return -ENOMEM; 670 size_t size;
1176 for (i = 0; i < smmu->nregs; i++) { 671 u32 value;
1177 struct resource *res; 672 int err;
1178
1179 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1180 smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res);
1181 if (IS_ERR(smmu->regs[i]))
1182 return PTR_ERR(smmu->regs[i]);
1183 smmu->rege[i] = smmu->regs[i] + resource_size(res) - 1;
1184 }
1185 /* Same as "mc" 1st regiter block start address */
1186 smmu->regbase = (void __iomem *)((u32)smmu->regs[0] & PAGE_MASK);
1187 673
1188 err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size); 674 /* This can happen on Tegra20 which doesn't have an SMMU */
1189 if (err) 675 if (!soc)
1190 return -ENODEV; 676 return NULL;
1191 677
1192 if (size & SMMU_PAGE_MASK) 678 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1193 return -EINVAL; 679 if (!smmu)
680 return ERR_PTR(-ENOMEM);
1194 681
1195 size >>= SMMU_PAGE_SHIFT; 682 /*
1196 if (!size) 683 * This is a bit of a hack. Ideally we'd want to simply return this
1197 return -EINVAL; 684 * value. However the IOMMU registration process will attempt to add
685 * all devices to the IOMMU when bus_set_iommu() is called. In order
686 * not to rely on global variables to track the IOMMU instance, we
687 * set it here so that it can be looked up from the .add_device()
688 * callback via the IOMMU device's .drvdata field.
689 */
690 mc->smmu = smmu;
1198 691
1199 smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0); 692 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
1200 if (!smmu->ahb)
1201 return -ENODEV;
1202 693
1203 smmu->dev = dev; 694 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1204 smmu->num_as = asids; 695 if (!smmu->asids)
1205 smmu->iovmm_base = base; 696 return ERR_PTR(-ENOMEM);
1206 smmu->page_count = size;
1207
1208 smmu->translation_enable_0 = ~0;
1209 smmu->translation_enable_1 = ~0;
1210 smmu->translation_enable_2 = ~0;
1211 smmu->asid_security = 0;
1212
1213 for (i = 0; i < smmu->num_as; i++) {
1214 struct smmu_as *as = &smmu->as[i];
1215
1216 as->smmu = smmu;
1217 as->asid = i;
1218 as->pdir_attr = _PDIR_ATTR;
1219 as->pde_attr = _PDE_ATTR;
1220 as->pte_attr = _PTE_ATTR;
1221
1222 spin_lock_init(&as->lock);
1223 spin_lock_init(&as->client_lock);
1224 INIT_LIST_HEAD(&as->client);
1225 }
1226 spin_lock_init(&smmu->lock);
1227 err = smmu_setup_regs(smmu);
1228 if (err)
1229 return err;
1230 platform_set_drvdata(pdev, smmu);
1231 697
1232 smmu->avp_vector_page = alloc_page(GFP_KERNEL); 698 mutex_init(&smmu->lock);
1233 if (!smmu->avp_vector_page)
1234 return -ENOMEM;
1235 699
1236 smmu_debugfs_create(smmu); 700 smmu->regs = mc->regs;
1237 smmu_handle = smmu; 701 smmu->soc = soc;
1238 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops); 702 smmu->dev = dev;
1239 return 0; 703 smmu->mc = mc;
1240}
1241 704
1242static int tegra_smmu_remove(struct platform_device *pdev) 705 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1243{
1244 struct smmu_device *smmu = platform_get_drvdata(pdev);
1245 int i;
1246 706
1247 smmu_debugfs_delete(smmu); 707 if (soc->supports_request_limit)
708 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1248 709
1249 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); 710 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1250 for (i = 0; i < smmu->num_as; i++)
1251 free_pdir(&smmu->as[i]);
1252 __free_page(smmu->avp_vector_page);
1253 smmu_handle = NULL;
1254 return 0;
1255}
1256 711
1257static const struct dev_pm_ops tegra_smmu_pm_ops = { 712 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1258 .suspend = tegra_smmu_suspend, 713 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
1259 .resume = tegra_smmu_resume,
1260};
1261 714
1262static const struct of_device_id tegra_smmu_of_match[] = { 715 if (soc->supports_round_robin_arbitration)
1263 { .compatible = "nvidia,tegra30-smmu", }, 716 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1264 { },
1265};
1266MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
1267
1268static struct platform_driver tegra_smmu_driver = {
1269 .probe = tegra_smmu_probe,
1270 .remove = tegra_smmu_remove,
1271 .driver = {
1272 .owner = THIS_MODULE,
1273 .name = "tegra-smmu",
1274 .pm = &tegra_smmu_pm_ops,
1275 .of_match_table = tegra_smmu_of_match,
1276 },
1277};
1278 717
1279static int tegra_smmu_init(void) 718 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1280{
1281 return platform_driver_register(&tegra_smmu_driver);
1282}
1283 719
1284static void __exit tegra_smmu_exit(void) 720 smmu_flush_ptc(smmu, NULL, 0);
1285{ 721 smmu_flush_tlb(smmu);
1286 platform_driver_unregister(&tegra_smmu_driver); 722 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1287} 723 smmu_flush(smmu);
724
725 tegra_smmu_ahb_enable();
1288 726
1289subsys_initcall(tegra_smmu_init); 727 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
1290module_exit(tegra_smmu_exit); 728 if (err < 0)
729 return ERR_PTR(err);
1291 730
1292MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30"); 731 return smmu;
1293MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); 732}
1294MODULE_ALIAS("platform:tegra-smmu");
1295MODULE_LICENSE("GPL v2");