diff options
Diffstat (limited to 'drivers/iommu/tegra-smmu.c')
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 1610 |
1 files changed, 523 insertions, 1087 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 73e845a66925..6e134c7c227f 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -1,1296 +1,732 @@ | |||
1 | /* | 1 | /* |
2 | * IOMMU API for SMMU in Tegra30 | 2 | * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * published by the Free Software Foundation. |
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | */ | 7 | */ |
19 | 8 | ||
20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | ||
21 | |||
22 | #include <linux/err.h> | 9 | #include <linux/err.h> |
23 | #include <linux/module.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/vmalloc.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/pagemap.h> | ||
30 | #include <linux/device.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/iommu.h> | 10 | #include <linux/iommu.h> |
33 | #include <linux/io.h> | 11 | #include <linux/kernel.h> |
34 | #include <linux/of.h> | 12 | #include <linux/of.h> |
35 | #include <linux/of_iommu.h> | 13 | #include <linux/of_device.h> |
36 | #include <linux/debugfs.h> | 14 | #include <linux/platform_device.h> |
37 | #include <linux/seq_file.h> | 15 | #include <linux/slab.h> |
38 | 16 | ||
39 | #include <soc/tegra/ahb.h> | 17 | #include <soc/tegra/ahb.h> |
18 | #include <soc/tegra/mc.h> | ||
40 | 19 | ||
41 | #include <asm/page.h> | 20 | struct tegra_smmu { |
42 | #include <asm/cacheflush.h> | 21 | void __iomem *regs; |
43 | 22 | struct device *dev; | |
44 | enum smmu_hwgrp { | ||
45 | HWGRP_AFI, | ||
46 | HWGRP_AVPC, | ||
47 | HWGRP_DC, | ||
48 | HWGRP_DCB, | ||
49 | HWGRP_EPP, | ||
50 | HWGRP_G2, | ||
51 | HWGRP_HC, | ||
52 | HWGRP_HDA, | ||
53 | HWGRP_ISP, | ||
54 | HWGRP_MPE, | ||
55 | HWGRP_NV, | ||
56 | HWGRP_NV2, | ||
57 | HWGRP_PPCS, | ||
58 | HWGRP_SATA, | ||
59 | HWGRP_VDE, | ||
60 | HWGRP_VI, | ||
61 | |||
62 | HWGRP_COUNT, | ||
63 | |||
64 | HWGRP_END = ~0, | ||
65 | }; | ||
66 | 23 | ||
67 | #define HWG_AFI (1 << HWGRP_AFI) | 24 | struct tegra_mc *mc; |
68 | #define HWG_AVPC (1 << HWGRP_AVPC) | 25 | const struct tegra_smmu_soc *soc; |
69 | #define HWG_DC (1 << HWGRP_DC) | ||
70 | #define HWG_DCB (1 << HWGRP_DCB) | ||
71 | #define HWG_EPP (1 << HWGRP_EPP) | ||
72 | #define HWG_G2 (1 << HWGRP_G2) | ||
73 | #define HWG_HC (1 << HWGRP_HC) | ||
74 | #define HWG_HDA (1 << HWGRP_HDA) | ||
75 | #define HWG_ISP (1 << HWGRP_ISP) | ||
76 | #define HWG_MPE (1 << HWGRP_MPE) | ||
77 | #define HWG_NV (1 << HWGRP_NV) | ||
78 | #define HWG_NV2 (1 << HWGRP_NV2) | ||
79 | #define HWG_PPCS (1 << HWGRP_PPCS) | ||
80 | #define HWG_SATA (1 << HWGRP_SATA) | ||
81 | #define HWG_VDE (1 << HWGRP_VDE) | ||
82 | #define HWG_VI (1 << HWGRP_VI) | ||
83 | |||
84 | /* bitmap of the page sizes currently supported */ | ||
85 | #define SMMU_IOMMU_PGSIZES (SZ_4K) | ||
86 | |||
87 | #define SMMU_CONFIG 0x10 | ||
88 | #define SMMU_CONFIG_DISABLE 0 | ||
89 | #define SMMU_CONFIG_ENABLE 1 | ||
90 | |||
91 | /* REVISIT: To support multiple MCs */ | ||
92 | enum { | ||
93 | _MC = 0, | ||
94 | }; | ||
95 | 26 | ||
96 | enum { | 27 | unsigned long *asids; |
97 | _TLB = 0, | 28 | struct mutex lock; |
98 | _PTC, | ||
99 | }; | ||
100 | 29 | ||
101 | #define SMMU_CACHE_CONFIG_BASE 0x14 | 30 | struct list_head list; |
102 | #define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache) | ||
103 | #define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache) | ||
104 | |||
105 | #define SMMU_CACHE_CONFIG_STATS_SHIFT 31 | ||
106 | #define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT) | ||
107 | #define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30 | ||
108 | #define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT) | ||
109 | |||
110 | #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29) | ||
111 | #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10 | ||
112 | #define SMMU_TLB_CONFIG_RESET_VAL 0x20000010 | ||
113 | |||
114 | #define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29) | ||
115 | #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f | ||
116 | #define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f | ||
117 | |||
118 | #define SMMU_PTB_ASID 0x1c | ||
119 | #define SMMU_PTB_ASID_CURRENT_SHIFT 0 | ||
120 | |||
121 | #define SMMU_PTB_DATA 0x20 | ||
122 | #define SMMU_PTB_DATA_RESET_VAL 0 | ||
123 | #define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29 | ||
124 | #define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30 | ||
125 | #define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31 | ||
126 | |||
127 | #define SMMU_TLB_FLUSH 0x30 | ||
128 | #define SMMU_TLB_FLUSH_VA_MATCH_ALL 0 | ||
129 | #define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2 | ||
130 | #define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3 | ||
131 | #define SMMU_TLB_FLUSH_ASID_SHIFT 29 | ||
132 | #define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0 | ||
133 | #define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1 | ||
134 | #define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31 | ||
135 | |||
136 | #define SMMU_PTC_FLUSH 0x34 | ||
137 | #define SMMU_PTC_FLUSH_TYPE_ALL 0 | ||
138 | #define SMMU_PTC_FLUSH_TYPE_ADR 1 | ||
139 | #define SMMU_PTC_FLUSH_ADR_SHIFT 4 | ||
140 | |||
141 | #define SMMU_ASID_SECURITY 0x38 | ||
142 | |||
143 | #define SMMU_STATS_CACHE_COUNT_BASE 0x1f0 | ||
144 | |||
145 | #define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \ | ||
146 | (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss) | ||
147 | |||
148 | #define SMMU_TRANSLATION_ENABLE_0 0x228 | ||
149 | #define SMMU_TRANSLATION_ENABLE_1 0x22c | ||
150 | #define SMMU_TRANSLATION_ENABLE_2 0x230 | ||
151 | |||
152 | #define SMMU_AFI_ASID 0x238 /* PCIE */ | ||
153 | #define SMMU_AVPC_ASID 0x23c /* AVP */ | ||
154 | #define SMMU_DC_ASID 0x240 /* Display controller */ | ||
155 | #define SMMU_DCB_ASID 0x244 /* Display controller B */ | ||
156 | #define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */ | ||
157 | #define SMMU_G2_ASID 0x24c /* 2D engine */ | ||
158 | #define SMMU_HC_ASID 0x250 /* Host1x */ | ||
159 | #define SMMU_HDA_ASID 0x254 /* High-def audio */ | ||
160 | #define SMMU_ISP_ASID 0x258 /* Image signal processor */ | ||
161 | #define SMMU_MPE_ASID 0x264 /* MPEG encoder */ | ||
162 | #define SMMU_NV_ASID 0x268 /* (3D) */ | ||
163 | #define SMMU_NV2_ASID 0x26c /* (3D) */ | ||
164 | #define SMMU_PPCS_ASID 0x270 /* AHB */ | ||
165 | #define SMMU_SATA_ASID 0x278 /* SATA */ | ||
166 | #define SMMU_VDE_ASID 0x27c /* Video decoder */ | ||
167 | #define SMMU_VI_ASID 0x280 /* Video input */ | ||
168 | |||
169 | #define SMMU_PDE_NEXT_SHIFT 28 | ||
170 | |||
171 | #define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000 | ||
172 | #define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */ | ||
173 | #define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000 | ||
174 | #define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */ | ||
175 | #define SMMU_TLB_FLUSH_VA(iova, which) \ | ||
176 | ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \ | ||
177 | SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \ | ||
178 | SMMU_TLB_FLUSH_VA_MATCH_##which) | ||
179 | #define SMMU_PTB_ASID_CUR(n) \ | ||
180 | ((n) << SMMU_PTB_ASID_CURRENT_SHIFT) | ||
181 | #define SMMU_TLB_FLUSH_ASID_MATCH_disable \ | ||
182 | (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \ | ||
183 | SMMU_TLB_FLUSH_ASID_MATCH_SHIFT) | ||
184 | #define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \ | ||
185 | (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \ | ||
186 | SMMU_TLB_FLUSH_ASID_MATCH_SHIFT) | ||
187 | |||
188 | #define SMMU_PAGE_SHIFT 12 | ||
189 | #define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT) | ||
190 | #define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1) | ||
191 | |||
192 | #define SMMU_PDIR_COUNT 1024 | ||
193 | #define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT) | ||
194 | #define SMMU_PTBL_COUNT 1024 | ||
195 | #define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT) | ||
196 | #define SMMU_PDIR_SHIFT 12 | ||
197 | #define SMMU_PDE_SHIFT 12 | ||
198 | #define SMMU_PTE_SHIFT 12 | ||
199 | #define SMMU_PFN_MASK 0x000fffff | ||
200 | |||
201 | #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12) | ||
202 | #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22) | ||
203 | #define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22) | ||
204 | |||
205 | #define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT) | ||
206 | #define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT) | ||
207 | #define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT) | ||
208 | #define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT) | ||
209 | #define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE) | ||
210 | |||
211 | #define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE) | ||
212 | |||
213 | #define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE) | ||
214 | #define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT) | ||
215 | #define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR) | ||
216 | |||
217 | #define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE) | ||
218 | #define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR) | ||
219 | |||
220 | #define SMMU_MK_PDIR(page, attr) \ | ||
221 | ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr)) | ||
222 | #define SMMU_MK_PDE(page, attr) \ | ||
223 | (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr)) | ||
224 | #define SMMU_EX_PTBL_PAGE(pde) \ | ||
225 | pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK) | ||
226 | #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr)) | ||
227 | |||
228 | #define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31)) | ||
229 | #define SMMU_ASID_DISABLE 0 | ||
230 | #define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0)) | ||
231 | |||
232 | #define NUM_SMMU_REG_BANKS 3 | ||
233 | |||
234 | #define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1) | ||
235 | #define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0) | ||
236 | #define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1) | ||
237 | #define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0) | ||
238 | |||
239 | #define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID | ||
240 | |||
241 | static const u32 smmu_hwgrp_asid_reg[] = { | ||
242 | HWGRP_INIT(AFI), | ||
243 | HWGRP_INIT(AVPC), | ||
244 | HWGRP_INIT(DC), | ||
245 | HWGRP_INIT(DCB), | ||
246 | HWGRP_INIT(EPP), | ||
247 | HWGRP_INIT(G2), | ||
248 | HWGRP_INIT(HC), | ||
249 | HWGRP_INIT(HDA), | ||
250 | HWGRP_INIT(ISP), | ||
251 | HWGRP_INIT(MPE), | ||
252 | HWGRP_INIT(NV), | ||
253 | HWGRP_INIT(NV2), | ||
254 | HWGRP_INIT(PPCS), | ||
255 | HWGRP_INIT(SATA), | ||
256 | HWGRP_INIT(VDE), | ||
257 | HWGRP_INIT(VI), | ||
258 | }; | 31 | }; |
259 | #define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x]) | ||
260 | 32 | ||
261 | /* | 33 | struct tegra_smmu_as { |
262 | * Per client for address space | 34 | struct iommu_domain *domain; |
263 | */ | 35 | struct tegra_smmu *smmu; |
264 | struct smmu_client { | 36 | unsigned int use_count; |
265 | struct device *dev; | 37 | struct page *count; |
266 | struct list_head list; | 38 | struct page *pd; |
267 | struct smmu_as *as; | 39 | unsigned id; |
268 | u32 hwgrp; | 40 | u32 attr; |
269 | }; | 41 | }; |
270 | 42 | ||
271 | /* | 43 | static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, |
272 | * Per address space | 44 | unsigned long offset) |
273 | */ | 45 | { |
274 | struct smmu_as { | 46 | writel(value, smmu->regs + offset); |
275 | struct smmu_device *smmu; /* back pointer to container */ | 47 | } |
276 | unsigned int asid; | ||
277 | spinlock_t lock; /* for pagetable */ | ||
278 | struct page *pdir_page; | ||
279 | unsigned long pdir_attr; | ||
280 | unsigned long pde_attr; | ||
281 | unsigned long pte_attr; | ||
282 | unsigned int *pte_count; | ||
283 | |||
284 | struct list_head client; | ||
285 | spinlock_t client_lock; /* for client list */ | ||
286 | }; | ||
287 | 48 | ||
288 | struct smmu_debugfs_info { | 49 | static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) |
289 | struct smmu_device *smmu; | 50 | { |
290 | int mc; | 51 | return readl(smmu->regs + offset); |
291 | int cache; | 52 | } |
292 | }; | ||
293 | 53 | ||
294 | /* | 54 | #define SMMU_CONFIG 0x010 |
295 | * Per SMMU device - IOMMU device | 55 | #define SMMU_CONFIG_ENABLE (1 << 0) |
296 | */ | ||
297 | struct smmu_device { | ||
298 | void __iomem *regbase; /* register offset base */ | ||
299 | void __iomem **regs; /* register block start address array */ | ||
300 | void __iomem **rege; /* register block end address array */ | ||
301 | int nregs; /* number of register blocks */ | ||
302 | |||
303 | unsigned long iovmm_base; /* remappable base address */ | ||
304 | unsigned long page_count; /* total remappable size */ | ||
305 | spinlock_t lock; | ||
306 | char *name; | ||
307 | struct device *dev; | ||
308 | struct page *avp_vector_page; /* dummy page shared by all AS's */ | ||
309 | 56 | ||
310 | /* | 57 | #define SMMU_TLB_CONFIG 0x14 |
311 | * Register image savers for suspend/resume | 58 | #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) |
312 | */ | 59 | #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) |
313 | unsigned long translation_enable_0; | 60 | #define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f) |
314 | unsigned long translation_enable_1; | ||
315 | unsigned long translation_enable_2; | ||
316 | unsigned long asid_security; | ||
317 | 61 | ||
318 | struct dentry *debugfs_root; | 62 | #define SMMU_PTC_CONFIG 0x18 |
319 | struct smmu_debugfs_info *debugfs_info; | 63 | #define SMMU_PTC_CONFIG_ENABLE (1 << 29) |
64 | #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) | ||
65 | #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) | ||
320 | 66 | ||
321 | struct device_node *ahb; | 67 | #define SMMU_PTB_ASID 0x01c |
68 | #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) | ||
322 | 69 | ||
323 | int num_as; | 70 | #define SMMU_PTB_DATA 0x020 |
324 | struct smmu_as as[0]; /* Run-time allocated array */ | 71 | #define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr)) |
325 | }; | ||
326 | 72 | ||
327 | static struct smmu_device *smmu_handle; /* unique for a system */ | 73 | #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr)) |
328 | 74 | ||
329 | /* | 75 | #define SMMU_TLB_FLUSH 0x030 |
330 | * SMMU register accessors | 76 | #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) |
331 | */ | 77 | #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) |
332 | static bool inline smmu_valid_reg(struct smmu_device *smmu, | 78 | #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) |
333 | void __iomem *addr) | 79 | #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24) |
334 | { | 80 | #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ |
335 | int i; | 81 | SMMU_TLB_FLUSH_VA_MATCH_SECTION) |
82 | #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ | ||
83 | SMMU_TLB_FLUSH_VA_MATCH_GROUP) | ||
84 | #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) | ||
336 | 85 | ||
337 | for (i = 0; i < smmu->nregs; i++) { | 86 | #define SMMU_PTC_FLUSH 0x034 |
338 | if (addr < smmu->regs[i]) | 87 | #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) |
339 | break; | 88 | #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) |
340 | if (addr <= smmu->rege[i]) | ||
341 | return true; | ||
342 | } | ||
343 | 89 | ||
344 | return false; | 90 | #define SMMU_PTC_FLUSH_HI 0x9b8 |
345 | } | 91 | #define SMMU_PTC_FLUSH_HI_MASK 0x3 |
346 | 92 | ||
347 | static inline u32 smmu_read(struct smmu_device *smmu, size_t offs) | 93 | /* per-SWGROUP SMMU_*_ASID register */ |
348 | { | 94 | #define SMMU_ASID_ENABLE (1 << 31) |
349 | void __iomem *addr = smmu->regbase + offs; | 95 | #define SMMU_ASID_MASK 0x7f |
96 | #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) | ||
350 | 97 | ||
351 | BUG_ON(!smmu_valid_reg(smmu, addr)); | 98 | /* page table definitions */ |
99 | #define SMMU_NUM_PDE 1024 | ||
100 | #define SMMU_NUM_PTE 1024 | ||
352 | 101 | ||
353 | return readl(addr); | 102 | #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) |
354 | } | 103 | #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) |
355 | 104 | ||
356 | static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs) | 105 | #define SMMU_PDE_SHIFT 22 |
357 | { | 106 | #define SMMU_PTE_SHIFT 12 |
358 | void __iomem *addr = smmu->regbase + offs; | ||
359 | 107 | ||
360 | BUG_ON(!smmu_valid_reg(smmu, addr)); | 108 | #define SMMU_PFN_MASK 0x000fffff |
361 | 109 | ||
362 | writel(val, addr); | 110 | #define SMMU_PD_READABLE (1 << 31) |
363 | } | 111 | #define SMMU_PD_WRITABLE (1 << 30) |
112 | #define SMMU_PD_NONSECURE (1 << 29) | ||
364 | 113 | ||
365 | #define VA_PAGE_TO_PA(va, page) \ | 114 | #define SMMU_PDE_READABLE (1 << 31) |
366 | (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK)) | 115 | #define SMMU_PDE_WRITABLE (1 << 30) |
116 | #define SMMU_PDE_NONSECURE (1 << 29) | ||
117 | #define SMMU_PDE_NEXT (1 << 28) | ||
367 | 118 | ||
368 | #define FLUSH_CPU_DCACHE(va, page, size) \ | 119 | #define SMMU_PTE_READABLE (1 << 31) |
369 | do { \ | 120 | #define SMMU_PTE_WRITABLE (1 << 30) |
370 | unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \ | 121 | #define SMMU_PTE_NONSECURE (1 << 29) |
371 | __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \ | ||
372 | outer_flush_range(_pa_, _pa_+(size_t)(size)); \ | ||
373 | } while (0) | ||
374 | 122 | ||
375 | /* | 123 | #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ |
376 | * Any interaction between any block on PPSB and a block on APB or AHB | 124 | SMMU_PDE_NONSECURE) |
377 | * must have these read-back barriers to ensure the APB/AHB bus | 125 | #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \ |
378 | * transaction is complete before initiating activity on the PPSB | 126 | SMMU_PTE_NONSECURE) |
379 | * block. | ||
380 | */ | ||
381 | #define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG) | ||
382 | 127 | ||
383 | #define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data) | 128 | static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page, |
384 | 129 | unsigned long offset) | |
385 | static int __smmu_client_set_hwgrp(struct smmu_client *c, | ||
386 | unsigned long map, int on) | ||
387 | { | 130 | { |
388 | int i; | 131 | phys_addr_t phys = page ? page_to_phys(page) : 0; |
389 | struct smmu_as *as = c->as; | 132 | u32 value; |
390 | u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid); | 133 | |
391 | struct smmu_device *smmu = as->smmu; | 134 | if (page) { |
392 | 135 | offset &= ~(smmu->mc->soc->atom_size - 1); | |
393 | WARN_ON(!on && map); | 136 | |
394 | if (on && !map) | 137 | if (smmu->mc->soc->num_address_bits > 32) { |
395 | return -EINVAL; | 138 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
396 | if (!on) | 139 | value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK; |
397 | map = smmu_client_hwgrp(c); | 140 | #else |
398 | 141 | value = 0; | |
399 | for_each_set_bit(i, &map, HWGRP_COUNT) { | 142 | #endif |
400 | offs = HWGRP_ASID_REG(i); | 143 | smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); |
401 | val = smmu_read(smmu, offs); | ||
402 | if (on) { | ||
403 | if (WARN_ON(val & mask)) | ||
404 | goto err_hw_busy; | ||
405 | val |= mask; | ||
406 | } else { | ||
407 | WARN_ON((val & mask) == mask); | ||
408 | val &= ~mask; | ||
409 | } | 144 | } |
410 | smmu_write(smmu, val, offs); | ||
411 | } | ||
412 | FLUSH_SMMU_REGS(smmu); | ||
413 | c->hwgrp = map; | ||
414 | return 0; | ||
415 | 145 | ||
416 | err_hw_busy: | 146 | value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR; |
417 | for_each_set_bit(i, &map, HWGRP_COUNT) { | 147 | } else { |
418 | offs = HWGRP_ASID_REG(i); | 148 | value = SMMU_PTC_FLUSH_TYPE_ALL; |
419 | val = smmu_read(smmu, offs); | ||
420 | val &= ~mask; | ||
421 | smmu_write(smmu, val, offs); | ||
422 | } | 149 | } |
423 | return -EBUSY; | 150 | |
151 | smmu_writel(smmu, value, SMMU_PTC_FLUSH); | ||
424 | } | 152 | } |
425 | 153 | ||
426 | static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on) | 154 | static inline void smmu_flush_tlb(struct tegra_smmu *smmu) |
427 | { | 155 | { |
428 | u32 val; | 156 | smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); |
429 | unsigned long flags; | ||
430 | struct smmu_as *as = c->as; | ||
431 | struct smmu_device *smmu = as->smmu; | ||
432 | |||
433 | spin_lock_irqsave(&smmu->lock, flags); | ||
434 | val = __smmu_client_set_hwgrp(c, map, on); | ||
435 | spin_unlock_irqrestore(&smmu->lock, flags); | ||
436 | return val; | ||
437 | } | 157 | } |
438 | 158 | ||
439 | /* | 159 | static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, |
440 | * Flush all TLB entries and all PTC entries | 160 | unsigned long asid) |
441 | * Caller must lock smmu | ||
442 | */ | ||
443 | static void smmu_flush_regs(struct smmu_device *smmu, int enable) | ||
444 | { | 161 | { |
445 | u32 val; | 162 | u32 value; |
446 | |||
447 | smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); | ||
448 | FLUSH_SMMU_REGS(smmu); | ||
449 | val = SMMU_TLB_FLUSH_VA_MATCH_ALL | | ||
450 | SMMU_TLB_FLUSH_ASID_MATCH_disable; | ||
451 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | ||
452 | 163 | ||
453 | if (enable) | 164 | value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | |
454 | smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); | 165 | SMMU_TLB_FLUSH_VA_MATCH_ALL; |
455 | FLUSH_SMMU_REGS(smmu); | 166 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
456 | } | 167 | } |
457 | 168 | ||
458 | static int smmu_setup_regs(struct smmu_device *smmu) | 169 | static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, |
170 | unsigned long asid, | ||
171 | unsigned long iova) | ||
459 | { | 172 | { |
460 | int i; | 173 | u32 value; |
461 | u32 val; | ||
462 | 174 | ||
463 | for (i = 0; i < smmu->num_as; i++) { | 175 | value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | |
464 | struct smmu_as *as = &smmu->as[i]; | 176 | SMMU_TLB_FLUSH_VA_SECTION(iova); |
465 | struct smmu_client *c; | 177 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
466 | |||
467 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | ||
468 | val = as->pdir_page ? | ||
469 | SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) : | ||
470 | SMMU_PTB_DATA_RESET_VAL; | ||
471 | smmu_write(smmu, val, SMMU_PTB_DATA); | ||
472 | |||
473 | list_for_each_entry(c, &as->client, list) | ||
474 | __smmu_client_set_hwgrp(c, c->hwgrp, 1); | ||
475 | } | ||
476 | |||
477 | smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0); | ||
478 | smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1); | ||
479 | smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2); | ||
480 | smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY); | ||
481 | smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB)); | ||
482 | smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC)); | ||
483 | |||
484 | smmu_flush_regs(smmu, 1); | ||
485 | |||
486 | return tegra_ahb_enable_smmu(smmu->ahb); | ||
487 | } | 178 | } |
488 | 179 | ||
489 | static void flush_ptc_and_tlb(struct smmu_device *smmu, | 180 | static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, |
490 | struct smmu_as *as, dma_addr_t iova, | 181 | unsigned long asid, |
491 | unsigned long *pte, struct page *page, int is_pde) | 182 | unsigned long iova) |
492 | { | 183 | { |
493 | u32 val; | 184 | u32 value; |
494 | unsigned long tlb_flush_va = is_pde | ||
495 | ? SMMU_TLB_FLUSH_VA(iova, SECTION) | ||
496 | : SMMU_TLB_FLUSH_VA(iova, GROUP); | ||
497 | |||
498 | val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page); | ||
499 | smmu_write(smmu, val, SMMU_PTC_FLUSH); | ||
500 | FLUSH_SMMU_REGS(smmu); | ||
501 | val = tlb_flush_va | | ||
502 | SMMU_TLB_FLUSH_ASID_MATCH__ENABLE | | ||
503 | (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT); | ||
504 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | ||
505 | FLUSH_SMMU_REGS(smmu); | ||
506 | } | ||
507 | 185 | ||
508 | static void free_ptbl(struct smmu_as *as, dma_addr_t iova) | 186 | value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | |
509 | { | 187 | SMMU_TLB_FLUSH_VA_GROUP(iova); |
510 | unsigned long pdn = SMMU_ADDR_TO_PDN(iova); | 188 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
511 | unsigned long *pdir = (unsigned long *)page_address(as->pdir_page); | ||
512 | |||
513 | if (pdir[pdn] != _PDE_VACANT(pdn)) { | ||
514 | dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn); | ||
515 | |||
516 | ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn])); | ||
517 | __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn])); | ||
518 | pdir[pdn] = _PDE_VACANT(pdn); | ||
519 | FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); | ||
520 | flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], | ||
521 | as->pdir_page, 1); | ||
522 | } | ||
523 | } | 189 | } |
524 | 190 | ||
525 | static void free_pdir(struct smmu_as *as) | 191 | static inline void smmu_flush(struct tegra_smmu *smmu) |
526 | { | 192 | { |
527 | unsigned addr; | 193 | smmu_readl(smmu, SMMU_CONFIG); |
528 | int count; | ||
529 | struct device *dev = as->smmu->dev; | ||
530 | |||
531 | if (!as->pdir_page) | ||
532 | return; | ||
533 | |||
534 | addr = as->smmu->iovmm_base; | ||
535 | count = as->smmu->page_count; | ||
536 | while (count-- > 0) { | ||
537 | free_ptbl(as, addr); | ||
538 | addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT; | ||
539 | } | ||
540 | ClearPageReserved(as->pdir_page); | ||
541 | __free_page(as->pdir_page); | ||
542 | as->pdir_page = NULL; | ||
543 | devm_kfree(dev, as->pte_count); | ||
544 | as->pte_count = NULL; | ||
545 | } | 194 | } |
546 | 195 | ||
547 | /* | 196 | static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) |
548 | * Maps PTBL for given iova and returns the PTE address | ||
549 | * Caller must unmap the mapped PTBL returned in *ptbl_page_p | ||
550 | */ | ||
551 | static unsigned long *locate_pte(struct smmu_as *as, | ||
552 | dma_addr_t iova, bool allocate, | ||
553 | struct page **ptbl_page_p, | ||
554 | unsigned int **count) | ||
555 | { | 197 | { |
556 | unsigned long ptn = SMMU_ADDR_TO_PFN(iova); | 198 | unsigned long id; |
557 | unsigned long pdn = SMMU_ADDR_TO_PDN(iova); | ||
558 | unsigned long *pdir = page_address(as->pdir_page); | ||
559 | unsigned long *ptbl; | ||
560 | |||
561 | if (pdir[pdn] != _PDE_VACANT(pdn)) { | ||
562 | /* Mapped entry table already exists */ | ||
563 | *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]); | ||
564 | ptbl = page_address(*ptbl_page_p); | ||
565 | } else if (!allocate) { | ||
566 | return NULL; | ||
567 | } else { | ||
568 | int pn; | ||
569 | unsigned long addr = SMMU_PDN_TO_ADDR(pdn); | ||
570 | 199 | ||
571 | /* Vacant - allocate a new page table */ | 200 | mutex_lock(&smmu->lock); |
572 | dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn); | ||
573 | 201 | ||
574 | *ptbl_page_p = alloc_page(GFP_ATOMIC); | 202 | id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); |
575 | if (!*ptbl_page_p) { | 203 | if (id >= smmu->soc->num_asids) { |
576 | dev_err(as->smmu->dev, | 204 | mutex_unlock(&smmu->lock); |
577 | "failed to allocate smmu_device page table\n"); | 205 | return -ENOSPC; |
578 | return NULL; | ||
579 | } | ||
580 | SetPageReserved(*ptbl_page_p); | ||
581 | ptbl = (unsigned long *)page_address(*ptbl_page_p); | ||
582 | for (pn = 0; pn < SMMU_PTBL_COUNT; | ||
583 | pn++, addr += SMMU_PAGE_SIZE) { | ||
584 | ptbl[pn] = _PTE_VACANT(addr); | ||
585 | } | ||
586 | FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE); | ||
587 | pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p, | ||
588 | as->pde_attr | _PDE_NEXT); | ||
589 | FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); | ||
590 | flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], | ||
591 | as->pdir_page, 1); | ||
592 | } | 206 | } |
593 | *count = &as->pte_count[pdn]; | ||
594 | 207 | ||
595 | return &ptbl[ptn % SMMU_PTBL_COUNT]; | 208 | set_bit(id, smmu->asids); |
209 | *idp = id; | ||
210 | |||
211 | mutex_unlock(&smmu->lock); | ||
212 | return 0; | ||
596 | } | 213 | } |
597 | 214 | ||
598 | #ifdef CONFIG_SMMU_SIG_DEBUG | 215 | static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) |
599 | static void put_signature(struct smmu_as *as, | ||
600 | dma_addr_t iova, unsigned long pfn) | ||
601 | { | 216 | { |
602 | struct page *page; | 217 | mutex_lock(&smmu->lock); |
603 | unsigned long *vaddr; | 218 | clear_bit(id, smmu->asids); |
604 | 219 | mutex_unlock(&smmu->lock); | |
605 | page = pfn_to_page(pfn); | ||
606 | vaddr = page_address(page); | ||
607 | if (!vaddr) | ||
608 | return; | ||
609 | |||
610 | vaddr[0] = iova; | ||
611 | vaddr[1] = pfn << PAGE_SHIFT; | ||
612 | FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2); | ||
613 | } | 220 | } |
614 | #else | 221 | |
615 | static inline void put_signature(struct smmu_as *as, | 222 | static bool tegra_smmu_capable(enum iommu_cap cap) |
616 | unsigned long addr, unsigned long pfn) | ||
617 | { | 223 | { |
224 | return false; | ||
618 | } | 225 | } |
619 | #endif | ||
620 | 226 | ||
621 | /* | 227 | static int tegra_smmu_domain_init(struct iommu_domain *domain) |
622 | * Caller must not hold as->lock | ||
623 | */ | ||
624 | static int alloc_pdir(struct smmu_as *as) | ||
625 | { | 228 | { |
626 | unsigned long *pdir, flags; | 229 | struct tegra_smmu_as *as; |
627 | int pdn, err = 0; | 230 | unsigned int i; |
628 | u32 val; | 231 | uint32_t *pd; |
629 | struct smmu_device *smmu = as->smmu; | ||
630 | struct page *page; | ||
631 | unsigned int *cnt; | ||
632 | 232 | ||
633 | /* | 233 | as = kzalloc(sizeof(*as), GFP_KERNEL); |
634 | * do the allocation, then grab as->lock | 234 | if (!as) |
635 | */ | 235 | return -ENOMEM; |
636 | cnt = devm_kzalloc(smmu->dev, | ||
637 | sizeof(cnt[0]) * SMMU_PDIR_COUNT, | ||
638 | GFP_KERNEL); | ||
639 | page = alloc_page(GFP_KERNEL | __GFP_DMA); | ||
640 | 236 | ||
641 | spin_lock_irqsave(&as->lock, flags); | 237 | as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; |
238 | as->domain = domain; | ||
642 | 239 | ||
643 | if (as->pdir_page) { | 240 | as->pd = alloc_page(GFP_KERNEL | __GFP_DMA); |
644 | /* We raced, free the redundant */ | 241 | if (!as->pd) { |
645 | err = -EAGAIN; | 242 | kfree(as); |
646 | goto err_out; | 243 | return -ENOMEM; |
647 | } | 244 | } |
648 | 245 | ||
649 | if (!page || !cnt) { | 246 | as->count = alloc_page(GFP_KERNEL); |
650 | dev_err(smmu->dev, "failed to allocate at %s\n", __func__); | 247 | if (!as->count) { |
651 | err = -ENOMEM; | 248 | __free_page(as->pd); |
652 | goto err_out; | 249 | kfree(as); |
250 | return -ENOMEM; | ||
653 | } | 251 | } |
654 | 252 | ||
655 | as->pdir_page = page; | 253 | /* clear PDEs */ |
656 | as->pte_count = cnt; | 254 | pd = page_address(as->pd); |
255 | SetPageReserved(as->pd); | ||
657 | 256 | ||
658 | SetPageReserved(as->pdir_page); | 257 | for (i = 0; i < SMMU_NUM_PDE; i++) |
659 | pdir = page_address(as->pdir_page); | 258 | pd[i] = 0; |
660 | 259 | ||
661 | for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++) | 260 | /* clear PDE usage counters */ |
662 | pdir[pdn] = _PDE_VACANT(pdn); | 261 | pd = page_address(as->count); |
663 | FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE); | 262 | SetPageReserved(as->count); |
664 | val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page); | ||
665 | smmu_write(smmu, val, SMMU_PTC_FLUSH); | ||
666 | FLUSH_SMMU_REGS(as->smmu); | ||
667 | val = SMMU_TLB_FLUSH_VA_MATCH_ALL | | ||
668 | SMMU_TLB_FLUSH_ASID_MATCH__ENABLE | | ||
669 | (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT); | ||
670 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | ||
671 | FLUSH_SMMU_REGS(as->smmu); | ||
672 | 263 | ||
673 | spin_unlock_irqrestore(&as->lock, flags); | 264 | for (i = 0; i < SMMU_NUM_PDE; i++) |
674 | 265 | pd[i] = 0; | |
675 | return 0; | ||
676 | 266 | ||
677 | err_out: | 267 | domain->priv = as; |
678 | spin_unlock_irqrestore(&as->lock, flags); | ||
679 | 268 | ||
680 | devm_kfree(smmu->dev, cnt); | 269 | return 0; |
681 | if (page) | ||
682 | __free_page(page); | ||
683 | return err; | ||
684 | } | 270 | } |
685 | 271 | ||
686 | static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) | 272 | static void tegra_smmu_domain_destroy(struct iommu_domain *domain) |
687 | { | 273 | { |
688 | unsigned long *pte; | 274 | struct tegra_smmu_as *as = domain->priv; |
689 | struct page *page; | ||
690 | unsigned int *count; | ||
691 | 275 | ||
692 | pte = locate_pte(as, iova, false, &page, &count); | 276 | /* TODO: free page directory and page tables */ |
693 | if (WARN_ON(!pte)) | 277 | ClearPageReserved(as->pd); |
694 | return; | ||
695 | 278 | ||
696 | if (WARN_ON(*pte == _PTE_VACANT(iova))) | 279 | kfree(as); |
697 | return; | ||
698 | |||
699 | *pte = _PTE_VACANT(iova); | ||
700 | FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); | ||
701 | flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0); | ||
702 | if (!--(*count)) | ||
703 | free_ptbl(as, iova); | ||
704 | } | 280 | } |
705 | 281 | ||
706 | static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, | 282 | static const struct tegra_smmu_swgroup * |
707 | unsigned long pfn) | 283 | tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) |
708 | { | 284 | { |
709 | struct smmu_device *smmu = as->smmu; | 285 | const struct tegra_smmu_swgroup *group = NULL; |
710 | unsigned long *pte; | 286 | unsigned int i; |
711 | unsigned int *count; | ||
712 | struct page *page; | ||
713 | 287 | ||
714 | pte = locate_pte(as, iova, true, &page, &count); | 288 | for (i = 0; i < smmu->soc->num_swgroups; i++) { |
715 | if (WARN_ON(!pte)) | 289 | if (smmu->soc->swgroups[i].swgroup == swgroup) { |
716 | return; | 290 | group = &smmu->soc->swgroups[i]; |
291 | break; | ||
292 | } | ||
293 | } | ||
717 | 294 | ||
718 | if (*pte == _PTE_VACANT(iova)) | 295 | return group; |
719 | (*count)++; | ||
720 | *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr); | ||
721 | if (unlikely((*pte == _PTE_VACANT(iova)))) | ||
722 | (*count)--; | ||
723 | FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); | ||
724 | flush_ptc_and_tlb(smmu, as, iova, pte, page, 0); | ||
725 | put_signature(as, iova, pfn); | ||
726 | } | 296 | } |
727 | 297 | ||
728 | static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova, | 298 | static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, |
729 | phys_addr_t pa, size_t bytes, int prot) | 299 | unsigned int asid) |
730 | { | 300 | { |
731 | struct smmu_as *as = domain->priv; | 301 | const struct tegra_smmu_swgroup *group; |
732 | unsigned long pfn = __phys_to_pfn(pa); | 302 | unsigned int i; |
733 | unsigned long flags; | 303 | u32 value; |
734 | 304 | ||
735 | dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa); | 305 | for (i = 0; i < smmu->soc->num_clients; i++) { |
306 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; | ||
736 | 307 | ||
737 | if (!pfn_valid(pfn)) | 308 | if (client->swgroup != swgroup) |
738 | return -ENOMEM; | 309 | continue; |
739 | |||
740 | spin_lock_irqsave(&as->lock, flags); | ||
741 | __smmu_iommu_map_pfn(as, iova, pfn); | ||
742 | spin_unlock_irqrestore(&as->lock, flags); | ||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||
747 | size_t bytes) | ||
748 | { | ||
749 | struct smmu_as *as = domain->priv; | ||
750 | unsigned long flags; | ||
751 | 310 | ||
752 | dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova); | 311 | value = smmu_readl(smmu, client->smmu.reg); |
312 | value |= BIT(client->smmu.bit); | ||
313 | smmu_writel(smmu, value, client->smmu.reg); | ||
314 | } | ||
753 | 315 | ||
754 | spin_lock_irqsave(&as->lock, flags); | 316 | group = tegra_smmu_find_swgroup(smmu, swgroup); |
755 | __smmu_iommu_unmap(as, iova); | 317 | if (group) { |
756 | spin_unlock_irqrestore(&as->lock, flags); | 318 | value = smmu_readl(smmu, group->reg); |
757 | return SMMU_PAGE_SIZE; | 319 | value &= ~SMMU_ASID_MASK; |
320 | value |= SMMU_ASID_VALUE(asid); | ||
321 | value |= SMMU_ASID_ENABLE; | ||
322 | smmu_writel(smmu, value, group->reg); | ||
323 | } | ||
758 | } | 324 | } |
759 | 325 | ||
760 | static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, | 326 | static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, |
761 | dma_addr_t iova) | 327 | unsigned int asid) |
762 | { | 328 | { |
763 | struct smmu_as *as = domain->priv; | 329 | const struct tegra_smmu_swgroup *group; |
764 | unsigned long *pte; | 330 | unsigned int i; |
765 | unsigned int *count; | 331 | u32 value; |
766 | struct page *page; | ||
767 | unsigned long pfn; | ||
768 | unsigned long flags; | ||
769 | 332 | ||
770 | spin_lock_irqsave(&as->lock, flags); | 333 | group = tegra_smmu_find_swgroup(smmu, swgroup); |
334 | if (group) { | ||
335 | value = smmu_readl(smmu, group->reg); | ||
336 | value &= ~SMMU_ASID_MASK; | ||
337 | value |= SMMU_ASID_VALUE(asid); | ||
338 | value &= ~SMMU_ASID_ENABLE; | ||
339 | smmu_writel(smmu, value, group->reg); | ||
340 | } | ||
771 | 341 | ||
772 | pte = locate_pte(as, iova, true, &page, &count); | 342 | for (i = 0; i < smmu->soc->num_clients; i++) { |
773 | pfn = *pte & SMMU_PFN_MASK; | 343 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; |
774 | WARN_ON(!pfn_valid(pfn)); | ||
775 | dev_dbg(as->smmu->dev, | ||
776 | "iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova, | ||
777 | pfn, as->asid); | ||
778 | 344 | ||
779 | spin_unlock_irqrestore(&as->lock, flags); | 345 | if (client->swgroup != swgroup) |
780 | return PFN_PHYS(pfn); | 346 | continue; |
781 | } | ||
782 | 347 | ||
783 | static bool smmu_iommu_capable(enum iommu_cap cap) | 348 | value = smmu_readl(smmu, client->smmu.reg); |
784 | { | 349 | value &= ~BIT(client->smmu.bit); |
785 | return false; | 350 | smmu_writel(smmu, value, client->smmu.reg); |
351 | } | ||
786 | } | 352 | } |
787 | 353 | ||
788 | static int smmu_iommu_attach_dev(struct iommu_domain *domain, | 354 | static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, |
789 | struct device *dev) | 355 | struct tegra_smmu_as *as) |
790 | { | 356 | { |
791 | struct smmu_as *as = domain->priv; | 357 | u32 value; |
792 | struct smmu_device *smmu = as->smmu; | ||
793 | struct smmu_client *client, *c; | ||
794 | u32 map; | ||
795 | int err; | 358 | int err; |
796 | 359 | ||
797 | client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL); | 360 | if (as->use_count > 0) { |
798 | if (!client) | 361 | as->use_count++; |
799 | return -ENOMEM; | 362 | return 0; |
800 | client->dev = dev; | ||
801 | client->as = as; | ||
802 | map = (unsigned long)dev->platform_data; | ||
803 | if (!map) | ||
804 | return -EINVAL; | ||
805 | |||
806 | err = smmu_client_enable_hwgrp(client, map); | ||
807 | if (err) | ||
808 | goto err_hwgrp; | ||
809 | |||
810 | spin_lock(&as->client_lock); | ||
811 | list_for_each_entry(c, &as->client, list) { | ||
812 | if (c->dev == dev) { | ||
813 | dev_err(smmu->dev, | ||
814 | "%s is already attached\n", dev_name(c->dev)); | ||
815 | err = -EINVAL; | ||
816 | goto err_client; | ||
817 | } | ||
818 | } | 363 | } |
819 | list_add(&client->list, &as->client); | ||
820 | spin_unlock(&as->client_lock); | ||
821 | 364 | ||
822 | /* | 365 | err = tegra_smmu_alloc_asid(smmu, &as->id); |
823 | * Reserve "page zero" for AVP vectors using a common dummy | 366 | if (err < 0) |
824 | * page. | 367 | return err; |
825 | */ | ||
826 | if (map & HWG_AVPC) { | ||
827 | struct page *page; | ||
828 | 368 | ||
829 | page = as->smmu->avp_vector_page; | 369 | smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD); |
830 | __smmu_iommu_map_pfn(as, 0, page_to_pfn(page)); | 370 | smmu_flush_ptc(smmu, as->pd, 0); |
371 | smmu_flush_tlb_asid(smmu, as->id); | ||
831 | 372 | ||
832 | pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n"); | 373 | smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); |
833 | } | 374 | value = SMMU_PTB_DATA_VALUE(as->pd, as->attr); |
375 | smmu_writel(smmu, value, SMMU_PTB_DATA); | ||
376 | smmu_flush(smmu); | ||
834 | 377 | ||
835 | dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev)); | 378 | as->smmu = smmu; |
836 | return 0; | 379 | as->use_count++; |
837 | 380 | ||
838 | err_client: | 381 | return 0; |
839 | smmu_client_disable_hwgrp(client); | ||
840 | spin_unlock(&as->client_lock); | ||
841 | err_hwgrp: | ||
842 | devm_kfree(smmu->dev, client); | ||
843 | return err; | ||
844 | } | 382 | } |
845 | 383 | ||
846 | static void smmu_iommu_detach_dev(struct iommu_domain *domain, | 384 | static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, |
847 | struct device *dev) | 385 | struct tegra_smmu_as *as) |
848 | { | 386 | { |
849 | struct smmu_as *as = domain->priv; | 387 | if (--as->use_count > 0) |
850 | struct smmu_device *smmu = as->smmu; | 388 | return; |
851 | struct smmu_client *c; | 389 | |
852 | 390 | tegra_smmu_free_asid(smmu, as->id); | |
853 | spin_lock(&as->client_lock); | 391 | as->smmu = NULL; |
854 | |||
855 | list_for_each_entry(c, &as->client, list) { | ||
856 | if (c->dev == dev) { | ||
857 | smmu_client_disable_hwgrp(c); | ||
858 | list_del(&c->list); | ||
859 | devm_kfree(smmu->dev, c); | ||
860 | c->as = NULL; | ||
861 | dev_dbg(smmu->dev, | ||
862 | "%s is detached\n", dev_name(c->dev)); | ||
863 | goto out; | ||
864 | } | ||
865 | } | ||
866 | dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev)); | ||
867 | out: | ||
868 | spin_unlock(&as->client_lock); | ||
869 | } | 392 | } |
870 | 393 | ||
871 | static int smmu_iommu_domain_init(struct iommu_domain *domain) | 394 | static int tegra_smmu_attach_dev(struct iommu_domain *domain, |
395 | struct device *dev) | ||
872 | { | 396 | { |
873 | int i, err = -EAGAIN; | 397 | struct tegra_smmu *smmu = dev->archdata.iommu; |
874 | unsigned long flags; | 398 | struct tegra_smmu_as *as = domain->priv; |
875 | struct smmu_as *as; | 399 | struct device_node *np = dev->of_node; |
876 | struct smmu_device *smmu = smmu_handle; | 400 | struct of_phandle_args args; |
401 | unsigned int index = 0; | ||
402 | int err = 0; | ||
877 | 403 | ||
878 | /* Look for a free AS with lock held */ | 404 | while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, |
879 | for (i = 0; i < smmu->num_as; i++) { | 405 | &args)) { |
880 | as = &smmu->as[i]; | 406 | unsigned int swgroup = args.args[0]; |
881 | 407 | ||
882 | if (as->pdir_page) | 408 | if (args.np != smmu->dev->of_node) { |
409 | of_node_put(args.np); | ||
883 | continue; | 410 | continue; |
411 | } | ||
884 | 412 | ||
885 | err = alloc_pdir(as); | 413 | of_node_put(args.np); |
886 | if (!err) | ||
887 | goto found; | ||
888 | 414 | ||
889 | if (err != -EAGAIN) | 415 | err = tegra_smmu_as_prepare(smmu, as); |
890 | break; | 416 | if (err < 0) |
417 | return err; | ||
418 | |||
419 | tegra_smmu_enable(smmu, swgroup, as->id); | ||
420 | index++; | ||
891 | } | 421 | } |
892 | if (i == smmu->num_as) | ||
893 | dev_err(smmu->dev, "no free AS\n"); | ||
894 | return err; | ||
895 | 422 | ||
896 | found: | 423 | if (index == 0) |
897 | spin_lock_irqsave(&smmu->lock, flags); | 424 | return -ENODEV; |
898 | 425 | ||
899 | /* Update PDIR register */ | 426 | return 0; |
900 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | 427 | } |
901 | smmu_write(smmu, | ||
902 | SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA); | ||
903 | FLUSH_SMMU_REGS(smmu); | ||
904 | 428 | ||
905 | spin_unlock_irqrestore(&smmu->lock, flags); | 429 | static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) |
430 | { | ||
431 | struct tegra_smmu_as *as = domain->priv; | ||
432 | struct device_node *np = dev->of_node; | ||
433 | struct tegra_smmu *smmu = as->smmu; | ||
434 | struct of_phandle_args args; | ||
435 | unsigned int index = 0; | ||
906 | 436 | ||
907 | domain->priv = as; | 437 | while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, |
438 | &args)) { | ||
439 | unsigned int swgroup = args.args[0]; | ||
908 | 440 | ||
909 | domain->geometry.aperture_start = smmu->iovmm_base; | 441 | if (args.np != smmu->dev->of_node) { |
910 | domain->geometry.aperture_end = smmu->iovmm_base + | 442 | of_node_put(args.np); |
911 | smmu->page_count * SMMU_PAGE_SIZE - 1; | 443 | continue; |
912 | domain->geometry.force_aperture = true; | 444 | } |
913 | 445 | ||
914 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); | 446 | of_node_put(args.np); |
915 | 447 | ||
916 | return 0; | 448 | tegra_smmu_disable(smmu, swgroup, as->id); |
449 | tegra_smmu_as_unprepare(smmu, as); | ||
450 | index++; | ||
451 | } | ||
917 | } | 452 | } |
918 | 453 | ||
919 | static void smmu_iommu_domain_destroy(struct iommu_domain *domain) | 454 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, |
455 | struct page **pagep) | ||
920 | { | 456 | { |
921 | struct smmu_as *as = domain->priv; | 457 | u32 *pd = page_address(as->pd), *pt, *count; |
922 | struct smmu_device *smmu = as->smmu; | 458 | u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; |
923 | unsigned long flags; | 459 | u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; |
460 | struct tegra_smmu *smmu = as->smmu; | ||
461 | struct page *page; | ||
462 | unsigned int i; | ||
463 | |||
464 | if (pd[pde] == 0) { | ||
465 | page = alloc_page(GFP_KERNEL | __GFP_DMA); | ||
466 | if (!page) | ||
467 | return NULL; | ||
924 | 468 | ||
925 | spin_lock_irqsave(&as->lock, flags); | 469 | pt = page_address(page); |
470 | SetPageReserved(page); | ||
926 | 471 | ||
927 | if (as->pdir_page) { | 472 | for (i = 0; i < SMMU_NUM_PTE; i++) |
928 | spin_lock(&smmu->lock); | 473 | pt[i] = 0; |
929 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | ||
930 | smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA); | ||
931 | FLUSH_SMMU_REGS(smmu); | ||
932 | spin_unlock(&smmu->lock); | ||
933 | 474 | ||
934 | free_pdir(as); | 475 | smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT); |
935 | } | ||
936 | 476 | ||
937 | if (!list_empty(&as->client)) { | 477 | pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT); |
938 | struct smmu_client *c; | ||
939 | 478 | ||
940 | list_for_each_entry(c, &as->client, list) | 479 | smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4); |
941 | smmu_iommu_detach_dev(domain, c->dev); | 480 | smmu_flush_ptc(smmu, as->pd, pde << 2); |
481 | smmu_flush_tlb_section(smmu, as->id, iova); | ||
482 | smmu_flush(smmu); | ||
483 | } else { | ||
484 | page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); | ||
485 | pt = page_address(page); | ||
942 | } | 486 | } |
943 | 487 | ||
944 | spin_unlock_irqrestore(&as->lock, flags); | 488 | *pagep = page; |
945 | 489 | ||
946 | domain->priv = NULL; | 490 | /* Keep track of entries in this page table. */ |
947 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); | 491 | count = page_address(as->count); |
948 | } | 492 | if (pt[pte] == 0) |
493 | count[pde]++; | ||
949 | 494 | ||
950 | static const struct iommu_ops smmu_iommu_ops = { | 495 | return &pt[pte]; |
951 | .capable = smmu_iommu_capable, | 496 | } |
952 | .domain_init = smmu_iommu_domain_init, | ||
953 | .domain_destroy = smmu_iommu_domain_destroy, | ||
954 | .attach_dev = smmu_iommu_attach_dev, | ||
955 | .detach_dev = smmu_iommu_detach_dev, | ||
956 | .map = smmu_iommu_map, | ||
957 | .unmap = smmu_iommu_unmap, | ||
958 | .map_sg = default_iommu_map_sg, | ||
959 | .iova_to_phys = smmu_iommu_iova_to_phys, | ||
960 | .pgsize_bitmap = SMMU_IOMMU_PGSIZES, | ||
961 | }; | ||
962 | |||
963 | /* Should be in the order of enum */ | ||
964 | static const char * const smmu_debugfs_mc[] = { "mc", }; | ||
965 | static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", }; | ||
966 | 497 | ||
967 | static ssize_t smmu_debugfs_stats_write(struct file *file, | 498 | static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) |
968 | const char __user *buffer, | ||
969 | size_t count, loff_t *pos) | ||
970 | { | 499 | { |
971 | struct smmu_debugfs_info *info; | 500 | u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; |
972 | struct smmu_device *smmu; | 501 | u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; |
973 | int i; | 502 | u32 *count = page_address(as->count); |
974 | enum { | 503 | u32 *pd = page_address(as->pd), *pt; |
975 | _OFF = 0, | 504 | struct page *page; |
976 | _ON, | ||
977 | _RESET, | ||
978 | }; | ||
979 | const char * const command[] = { | ||
980 | [_OFF] = "off", | ||
981 | [_ON] = "on", | ||
982 | [_RESET] = "reset", | ||
983 | }; | ||
984 | char str[] = "reset"; | ||
985 | u32 val; | ||
986 | size_t offs; | ||
987 | 505 | ||
988 | count = min_t(size_t, count, sizeof(str)); | 506 | page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); |
989 | if (copy_from_user(str, buffer, count)) | 507 | pt = page_address(page); |
990 | return -EINVAL; | ||
991 | 508 | ||
992 | for (i = 0; i < ARRAY_SIZE(command); i++) | 509 | /* |
993 | if (strncmp(str, command[i], | 510 | * When no entries in this page table are used anymore, return the |
994 | strlen(command[i])) == 0) | 511 | * memory page to the system. |
995 | break; | 512 | */ |
513 | if (pt[pte] != 0) { | ||
514 | if (--count[pde] == 0) { | ||
515 | ClearPageReserved(page); | ||
516 | __free_page(page); | ||
517 | pd[pde] = 0; | ||
518 | } | ||
996 | 519 | ||
997 | if (i == ARRAY_SIZE(command)) | 520 | pt[pte] = 0; |
998 | return -EINVAL; | ||
999 | |||
1000 | info = file_inode(file)->i_private; | ||
1001 | smmu = info->smmu; | ||
1002 | |||
1003 | offs = SMMU_CACHE_CONFIG(info->cache); | ||
1004 | val = smmu_read(smmu, offs); | ||
1005 | switch (i) { | ||
1006 | case _OFF: | ||
1007 | val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE; | ||
1008 | val &= ~SMMU_CACHE_CONFIG_STATS_TEST; | ||
1009 | smmu_write(smmu, val, offs); | ||
1010 | break; | ||
1011 | case _ON: | ||
1012 | val |= SMMU_CACHE_CONFIG_STATS_ENABLE; | ||
1013 | val &= ~SMMU_CACHE_CONFIG_STATS_TEST; | ||
1014 | smmu_write(smmu, val, offs); | ||
1015 | break; | ||
1016 | case _RESET: | ||
1017 | val |= SMMU_CACHE_CONFIG_STATS_TEST; | ||
1018 | smmu_write(smmu, val, offs); | ||
1019 | val &= ~SMMU_CACHE_CONFIG_STATS_TEST; | ||
1020 | smmu_write(smmu, val, offs); | ||
1021 | break; | ||
1022 | default: | ||
1023 | BUG(); | ||
1024 | break; | ||
1025 | } | 521 | } |
1026 | |||
1027 | dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__, | ||
1028 | val, smmu_read(smmu, offs), offs); | ||
1029 | |||
1030 | return count; | ||
1031 | } | 522 | } |
1032 | 523 | ||
1033 | static int smmu_debugfs_stats_show(struct seq_file *s, void *v) | 524 | static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, |
525 | phys_addr_t paddr, size_t size, int prot) | ||
1034 | { | 526 | { |
1035 | struct smmu_debugfs_info *info = s->private; | 527 | struct tegra_smmu_as *as = domain->priv; |
1036 | struct smmu_device *smmu = info->smmu; | 528 | struct tegra_smmu *smmu = as->smmu; |
1037 | int i; | 529 | unsigned long offset; |
1038 | const char * const stats[] = { "hit", "miss", }; | 530 | struct page *page; |
531 | u32 *pte; | ||
1039 | 532 | ||
533 | pte = as_get_pte(as, iova, &page); | ||
534 | if (!pte) | ||
535 | return -ENOMEM; | ||
1040 | 536 | ||
1041 | for (i = 0; i < ARRAY_SIZE(stats); i++) { | 537 | *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR; |
1042 | u32 val; | 538 | offset = offset_in_page(pte); |
1043 | size_t offs; | ||
1044 | 539 | ||
1045 | offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i); | 540 | smmu->soc->ops->flush_dcache(page, offset, 4); |
1046 | val = smmu_read(smmu, offs); | 541 | smmu_flush_ptc(smmu, page, offset); |
1047 | seq_printf(s, "%s:%08x ", stats[i], val); | 542 | smmu_flush_tlb_group(smmu, as->id, iova); |
543 | smmu_flush(smmu); | ||
1048 | 544 | ||
1049 | dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__, | ||
1050 | stats[i], val, offs); | ||
1051 | } | ||
1052 | seq_printf(s, "\n"); | ||
1053 | return 0; | 545 | return 0; |
1054 | } | 546 | } |
1055 | 547 | ||
1056 | static int smmu_debugfs_stats_open(struct inode *inode, struct file *file) | 548 | static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
549 | size_t size) | ||
1057 | { | 550 | { |
1058 | return single_open(file, smmu_debugfs_stats_show, inode->i_private); | 551 | struct tegra_smmu_as *as = domain->priv; |
1059 | } | 552 | struct tegra_smmu *smmu = as->smmu; |
553 | unsigned long offset; | ||
554 | struct page *page; | ||
555 | u32 *pte; | ||
1060 | 556 | ||
1061 | static const struct file_operations smmu_debugfs_stats_fops = { | 557 | pte = as_get_pte(as, iova, &page); |
1062 | .open = smmu_debugfs_stats_open, | 558 | if (!pte) |
1063 | .read = seq_read, | 559 | return 0; |
1064 | .llseek = seq_lseek, | ||
1065 | .release = single_release, | ||
1066 | .write = smmu_debugfs_stats_write, | ||
1067 | }; | ||
1068 | 560 | ||
1069 | static void smmu_debugfs_delete(struct smmu_device *smmu) | 561 | offset = offset_in_page(pte); |
1070 | { | 562 | as_put_pte(as, iova); |
1071 | debugfs_remove_recursive(smmu->debugfs_root); | 563 | |
1072 | kfree(smmu->debugfs_info); | 564 | smmu->soc->ops->flush_dcache(page, offset, 4); |
565 | smmu_flush_ptc(smmu, page, offset); | ||
566 | smmu_flush_tlb_group(smmu, as->id, iova); | ||
567 | smmu_flush(smmu); | ||
568 | |||
569 | return size; | ||
1073 | } | 570 | } |
1074 | 571 | ||
1075 | static void smmu_debugfs_create(struct smmu_device *smmu) | 572 | static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, |
573 | dma_addr_t iova) | ||
1076 | { | 574 | { |
1077 | int i; | 575 | struct tegra_smmu_as *as = domain->priv; |
1078 | size_t bytes; | 576 | struct page *page; |
1079 | struct dentry *root; | 577 | unsigned long pfn; |
1080 | 578 | u32 *pte; | |
1081 | bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) * | ||
1082 | sizeof(*smmu->debugfs_info); | ||
1083 | smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL); | ||
1084 | if (!smmu->debugfs_info) | ||
1085 | return; | ||
1086 | |||
1087 | root = debugfs_create_dir(dev_name(smmu->dev), NULL); | ||
1088 | if (!root) | ||
1089 | goto err_out; | ||
1090 | smmu->debugfs_root = root; | ||
1091 | |||
1092 | for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) { | ||
1093 | int j; | ||
1094 | struct dentry *mc; | ||
1095 | |||
1096 | mc = debugfs_create_dir(smmu_debugfs_mc[i], root); | ||
1097 | if (!mc) | ||
1098 | goto err_out; | ||
1099 | |||
1100 | for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) { | ||
1101 | struct dentry *cache; | ||
1102 | struct smmu_debugfs_info *info; | ||
1103 | |||
1104 | info = smmu->debugfs_info; | ||
1105 | info += i * ARRAY_SIZE(smmu_debugfs_mc) + j; | ||
1106 | info->smmu = smmu; | ||
1107 | info->mc = i; | ||
1108 | info->cache = j; | ||
1109 | |||
1110 | cache = debugfs_create_file(smmu_debugfs_cache[j], | ||
1111 | S_IWUGO | S_IRUGO, mc, | ||
1112 | (void *)info, | ||
1113 | &smmu_debugfs_stats_fops); | ||
1114 | if (!cache) | ||
1115 | goto err_out; | ||
1116 | } | ||
1117 | } | ||
1118 | 579 | ||
1119 | return; | 580 | pte = as_get_pte(as, iova, &page); |
581 | pfn = *pte & SMMU_PFN_MASK; | ||
1120 | 582 | ||
1121 | err_out: | 583 | return PFN_PHYS(pfn); |
1122 | smmu_debugfs_delete(smmu); | ||
1123 | } | 584 | } |
1124 | 585 | ||
1125 | static int tegra_smmu_suspend(struct device *dev) | 586 | static struct tegra_smmu *tegra_smmu_find(struct device_node *np) |
1126 | { | 587 | { |
1127 | struct smmu_device *smmu = dev_get_drvdata(dev); | 588 | struct platform_device *pdev; |
589 | struct tegra_mc *mc; | ||
1128 | 590 | ||
1129 | smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0); | 591 | pdev = of_find_device_by_node(np); |
1130 | smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1); | 592 | if (!pdev) |
1131 | smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2); | 593 | return NULL; |
1132 | smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY); | 594 | |
1133 | return 0; | 595 | mc = platform_get_drvdata(pdev); |
596 | if (!mc) | ||
597 | return NULL; | ||
598 | |||
599 | return mc->smmu; | ||
1134 | } | 600 | } |
1135 | 601 | ||
1136 | static int tegra_smmu_resume(struct device *dev) | 602 | static int tegra_smmu_add_device(struct device *dev) |
1137 | { | 603 | { |
1138 | struct smmu_device *smmu = dev_get_drvdata(dev); | 604 | struct device_node *np = dev->of_node; |
1139 | unsigned long flags; | 605 | struct of_phandle_args args; |
1140 | int err; | 606 | unsigned int index = 0; |
1141 | 607 | ||
1142 | spin_lock_irqsave(&smmu->lock, flags); | 608 | while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, |
1143 | err = smmu_setup_regs(smmu); | 609 | &args) == 0) { |
1144 | spin_unlock_irqrestore(&smmu->lock, flags); | 610 | struct tegra_smmu *smmu; |
1145 | return err; | 611 | |
612 | smmu = tegra_smmu_find(args.np); | ||
613 | if (smmu) { | ||
614 | /* | ||
615 | * Only a single IOMMU master interface is currently | ||
616 | * supported by the Linux kernel, so abort after the | ||
617 | * first match. | ||
618 | */ | ||
619 | dev->archdata.iommu = smmu; | ||
620 | break; | ||
621 | } | ||
622 | |||
623 | index++; | ||
624 | } | ||
625 | |||
626 | return 0; | ||
1146 | } | 627 | } |
1147 | 628 | ||
1148 | static int tegra_smmu_probe(struct platform_device *pdev) | 629 | static void tegra_smmu_remove_device(struct device *dev) |
1149 | { | 630 | { |
1150 | struct smmu_device *smmu; | 631 | dev->archdata.iommu = NULL; |
1151 | struct device *dev = &pdev->dev; | 632 | } |
1152 | int i, asids, err = 0; | ||
1153 | dma_addr_t uninitialized_var(base); | ||
1154 | size_t bytes, uninitialized_var(size); | ||
1155 | 633 | ||
1156 | if (smmu_handle) | 634 | static const struct iommu_ops tegra_smmu_ops = { |
1157 | return -EIO; | 635 | .capable = tegra_smmu_capable, |
636 | .domain_init = tegra_smmu_domain_init, | ||
637 | .domain_destroy = tegra_smmu_domain_destroy, | ||
638 | .attach_dev = tegra_smmu_attach_dev, | ||
639 | .detach_dev = tegra_smmu_detach_dev, | ||
640 | .add_device = tegra_smmu_add_device, | ||
641 | .remove_device = tegra_smmu_remove_device, | ||
642 | .map = tegra_smmu_map, | ||
643 | .unmap = tegra_smmu_unmap, | ||
644 | .map_sg = default_iommu_map_sg, | ||
645 | .iova_to_phys = tegra_smmu_iova_to_phys, | ||
1158 | 646 | ||
1159 | BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); | 647 | .pgsize_bitmap = SZ_4K, |
648 | }; | ||
1160 | 649 | ||
1161 | if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids)) | 650 | static void tegra_smmu_ahb_enable(void) |
1162 | return -ENODEV; | 651 | { |
652 | static const struct of_device_id ahb_match[] = { | ||
653 | { .compatible = "nvidia,tegra30-ahb", }, | ||
654 | { } | ||
655 | }; | ||
656 | struct device_node *ahb; | ||
1163 | 657 | ||
1164 | bytes = sizeof(*smmu) + asids * sizeof(*smmu->as); | 658 | ahb = of_find_matching_node(NULL, ahb_match); |
1165 | smmu = devm_kzalloc(dev, bytes, GFP_KERNEL); | 659 | if (ahb) { |
1166 | if (!smmu) { | 660 | tegra_ahb_enable_smmu(ahb); |
1167 | dev_err(dev, "failed to allocate smmu_device\n"); | 661 | of_node_put(ahb); |
1168 | return -ENOMEM; | ||
1169 | } | 662 | } |
663 | } | ||
1170 | 664 | ||
1171 | smmu->nregs = pdev->num_resources; | 665 | struct tegra_smmu *tegra_smmu_probe(struct device *dev, |
1172 | smmu->regs = devm_kzalloc(dev, 2 * smmu->nregs * sizeof(*smmu->regs), | 666 | const struct tegra_smmu_soc *soc, |
1173 | GFP_KERNEL); | 667 | struct tegra_mc *mc) |
1174 | smmu->rege = smmu->regs + smmu->nregs; | 668 | { |
1175 | if (!smmu->regs) | 669 | struct tegra_smmu *smmu; |
1176 | return -ENOMEM; | 670 | size_t size; |
1177 | for (i = 0; i < smmu->nregs; i++) { | 671 | u32 value; |
1178 | struct resource *res; | 672 | int err; |
1179 | |||
1180 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); | ||
1181 | smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res); | ||
1182 | if (IS_ERR(smmu->regs[i])) | ||
1183 | return PTR_ERR(smmu->regs[i]); | ||
1184 | smmu->rege[i] = smmu->regs[i] + resource_size(res) - 1; | ||
1185 | } | ||
1186 | /* Same as "mc" 1st regiter block start address */ | ||
1187 | smmu->regbase = (void __iomem *)((u32)smmu->regs[0] & PAGE_MASK); | ||
1188 | 673 | ||
1189 | err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size); | 674 | /* This can happen on Tegra20 which doesn't have an SMMU */ |
1190 | if (err) | 675 | if (!soc) |
1191 | return -ENODEV; | 676 | return NULL; |
1192 | 677 | ||
1193 | if (size & SMMU_PAGE_MASK) | 678 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); |
1194 | return -EINVAL; | 679 | if (!smmu) |
680 | return ERR_PTR(-ENOMEM); | ||
1195 | 681 | ||
1196 | size >>= SMMU_PAGE_SHIFT; | 682 | /* |
1197 | if (!size) | 683 | * This is a bit of a hack. Ideally we'd want to simply return this |
1198 | return -EINVAL; | 684 | * value. However the IOMMU registration process will attempt to add |
685 | * all devices to the IOMMU when bus_set_iommu() is called. In order | ||
686 | * not to rely on global variables to track the IOMMU instance, we | ||
687 | * set it here so that it can be looked up from the .add_device() | ||
688 | * callback via the IOMMU device's .drvdata field. | ||
689 | */ | ||
690 | mc->smmu = smmu; | ||
1199 | 691 | ||
1200 | smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0); | 692 | size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); |
1201 | if (!smmu->ahb) | ||
1202 | return -ENODEV; | ||
1203 | 693 | ||
1204 | smmu->dev = dev; | 694 | smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); |
1205 | smmu->num_as = asids; | 695 | if (!smmu->asids) |
1206 | smmu->iovmm_base = base; | 696 | return ERR_PTR(-ENOMEM); |
1207 | smmu->page_count = size; | ||
1208 | |||
1209 | smmu->translation_enable_0 = ~0; | ||
1210 | smmu->translation_enable_1 = ~0; | ||
1211 | smmu->translation_enable_2 = ~0; | ||
1212 | smmu->asid_security = 0; | ||
1213 | |||
1214 | for (i = 0; i < smmu->num_as; i++) { | ||
1215 | struct smmu_as *as = &smmu->as[i]; | ||
1216 | |||
1217 | as->smmu = smmu; | ||
1218 | as->asid = i; | ||
1219 | as->pdir_attr = _PDIR_ATTR; | ||
1220 | as->pde_attr = _PDE_ATTR; | ||
1221 | as->pte_attr = _PTE_ATTR; | ||
1222 | |||
1223 | spin_lock_init(&as->lock); | ||
1224 | spin_lock_init(&as->client_lock); | ||
1225 | INIT_LIST_HEAD(&as->client); | ||
1226 | } | ||
1227 | spin_lock_init(&smmu->lock); | ||
1228 | err = smmu_setup_regs(smmu); | ||
1229 | if (err) | ||
1230 | return err; | ||
1231 | platform_set_drvdata(pdev, smmu); | ||
1232 | 697 | ||
1233 | smmu->avp_vector_page = alloc_page(GFP_KERNEL); | 698 | mutex_init(&smmu->lock); |
1234 | if (!smmu->avp_vector_page) | ||
1235 | return -ENOMEM; | ||
1236 | 699 | ||
1237 | smmu_debugfs_create(smmu); | 700 | smmu->regs = mc->regs; |
1238 | smmu_handle = smmu; | 701 | smmu->soc = soc; |
1239 | bus_set_iommu(&platform_bus_type, &smmu_iommu_ops); | 702 | smmu->dev = dev; |
1240 | return 0; | 703 | smmu->mc = mc; |
1241 | } | ||
1242 | 704 | ||
1243 | static int tegra_smmu_remove(struct platform_device *pdev) | 705 | value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); |
1244 | { | ||
1245 | struct smmu_device *smmu = platform_get_drvdata(pdev); | ||
1246 | int i; | ||
1247 | 706 | ||
1248 | smmu_debugfs_delete(smmu); | 707 | if (soc->supports_request_limit) |
708 | value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); | ||
1249 | 709 | ||
1250 | smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); | 710 | smmu_writel(smmu, value, SMMU_PTC_CONFIG); |
1251 | for (i = 0; i < smmu->num_as; i++) | ||
1252 | free_pdir(&smmu->as[i]); | ||
1253 | __free_page(smmu->avp_vector_page); | ||
1254 | smmu_handle = NULL; | ||
1255 | return 0; | ||
1256 | } | ||
1257 | 711 | ||
1258 | static const struct dev_pm_ops tegra_smmu_pm_ops = { | 712 | value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | |
1259 | .suspend = tegra_smmu_suspend, | 713 | SMMU_TLB_CONFIG_ACTIVE_LINES(0x20); |
1260 | .resume = tegra_smmu_resume, | ||
1261 | }; | ||
1262 | 714 | ||
1263 | static const struct of_device_id tegra_smmu_of_match[] = { | 715 | if (soc->supports_round_robin_arbitration) |
1264 | { .compatible = "nvidia,tegra30-smmu", }, | 716 | value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; |
1265 | { }, | ||
1266 | }; | ||
1267 | MODULE_DEVICE_TABLE(of, tegra_smmu_of_match); | ||
1268 | |||
1269 | static struct platform_driver tegra_smmu_driver = { | ||
1270 | .probe = tegra_smmu_probe, | ||
1271 | .remove = tegra_smmu_remove, | ||
1272 | .driver = { | ||
1273 | .owner = THIS_MODULE, | ||
1274 | .name = "tegra-smmu", | ||
1275 | .pm = &tegra_smmu_pm_ops, | ||
1276 | .of_match_table = tegra_smmu_of_match, | ||
1277 | }, | ||
1278 | }; | ||
1279 | 717 | ||
1280 | static int tegra_smmu_init(void) | 718 | smmu_writel(smmu, value, SMMU_TLB_CONFIG); |
1281 | { | ||
1282 | return platform_driver_register(&tegra_smmu_driver); | ||
1283 | } | ||
1284 | 719 | ||
1285 | static void __exit tegra_smmu_exit(void) | 720 | smmu_flush_ptc(smmu, NULL, 0); |
1286 | { | 721 | smmu_flush_tlb(smmu); |
1287 | platform_driver_unregister(&tegra_smmu_driver); | 722 | smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); |
1288 | } | 723 | smmu_flush(smmu); |
724 | |||
725 | tegra_smmu_ahb_enable(); | ||
1289 | 726 | ||
1290 | subsys_initcall(tegra_smmu_init); | 727 | err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); |
1291 | module_exit(tegra_smmu_exit); | 728 | if (err < 0) |
729 | return ERR_PTR(err); | ||
1292 | 730 | ||
1293 | MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30"); | 731 | return smmu; |
1294 | MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); | 732 | } |
1295 | MODULE_ALIAS("platform:tegra-smmu"); | ||
1296 | MODULE_LICENSE("GPL v2"); | ||