diff options
Diffstat (limited to 'arch/arm/mach-tegra/iovmm-smmu.c')
-rw-r--r-- | arch/arm/mach-tegra/iovmm-smmu.c | 1350 |
1 files changed, 1350 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/iovmm-smmu.c b/arch/arm/mach-tegra/iovmm-smmu.c new file mode 100644 index 00000000000..170afd15f1c --- /dev/null +++ b/arch/arm/mach-tegra/iovmm-smmu.c | |||
@@ -0,0 +1,1350 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/iovmm-smmu.c | ||
3 | * | ||
4 | * Tegra I/O VMM implementation for SMMU devices for Tegra 3 series | ||
5 | * systems-on-a-chip. | ||
6 | * | ||
7 | * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/pagemap.h> | ||
31 | #include <linux/sysfs.h> | ||
32 | #include <linux/device.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/io.h> | ||
35 | |||
36 | #include <asm/page.h> | ||
37 | #include <asm/cacheflush.h> | ||
38 | |||
39 | #include <mach/iovmm.h> | ||
40 | #include <mach/iomap.h> | ||
41 | #include <mach/tegra_smmu.h> | ||
42 | |||
43 | #ifndef CONFIG_ARCH_TEGRA_2x_SOC | ||
44 | /* | ||
45 | * ALL-CAP macros copied from armc.h | ||
46 | */ | ||
47 | #define MC_SMMU_CONFIG_0 0x10 | ||
48 | #define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE 0 | ||
49 | #define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE 1 | ||
50 | |||
51 | #define MC_SMMU_TLB_CONFIG_0 0x14 | ||
52 | #define MC_SMMU_TLB_CONFIG_0_TLB_STATS__MASK (1 << 31) | ||
53 | #define MC_SMMU_TLB_CONFIG_0_TLB_STATS__ENABLE (1 << 31) | ||
54 | #define MC_SMMU_TLB_CONFIG_0_TLB_HIT_UNDER_MISS__ENABLE (1 << 29) | ||
55 | #define MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES__VALUE 0x10 | ||
56 | #define MC_SMMU_TLB_CONFIG_0_RESET_VAL 0x20000010 | ||
57 | |||
58 | #define MC_SMMU_PTC_CONFIG_0 0x18 | ||
59 | #define MC_SMMU_PTC_CONFIG_0_PTC_STATS__MASK (1 << 31) | ||
60 | #define MC_SMMU_PTC_CONFIG_0_PTC_STATS__ENABLE (1 << 31) | ||
61 | #define MC_SMMU_PTC_CONFIG_0_PTC_CACHE__ENABLE (1 << 29) | ||
62 | #define MC_SMMU_PTC_CONFIG_0_PTC_INDEX_MAP__PATTERN 0x3f | ||
63 | #define MC_SMMU_PTC_CONFIG_0_RESET_VAL 0x2000003f | ||
64 | |||
65 | #define MC_SMMU_PTB_ASID_0 0x1c | ||
66 | #define MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT 0 | ||
67 | |||
68 | #define MC_SMMU_PTB_DATA_0 0x20 | ||
69 | #define MC_SMMU_PTB_DATA_0_RESET_VAL 0 | ||
70 | #define MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT 29 | ||
71 | #define MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT 30 | ||
72 | #define MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT 31 | ||
73 | |||
74 | #define MC_SMMU_TLB_FLUSH_0 0x30 | ||
75 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL 0 | ||
76 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_SECTION 2 | ||
77 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_GROUP 3 | ||
78 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT 29 | ||
79 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE 0 | ||
80 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE 1 | ||
81 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT 31 | ||
82 | |||
83 | #define MC_SMMU_PTC_FLUSH_0 0x34 | ||
84 | #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL 0 | ||
85 | #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR 1 | ||
86 | #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_ADR_SHIFT 4 | ||
87 | |||
88 | #define MC_SMMU_ASID_SECURITY_0 0x38 | ||
89 | |||
90 | #define MC_SMMU_STATS_TLB_HIT_COUNT_0 0x1f0 | ||
91 | #define MC_SMMU_STATS_TLB_MISS_COUNT_0 0x1f4 | ||
92 | #define MC_SMMU_STATS_PTC_HIT_COUNT_0 0x1f8 | ||
93 | #define MC_SMMU_STATS_PTC_MISS_COUNT_0 0x1fc | ||
94 | |||
95 | #define MC_SMMU_TRANSLATION_ENABLE_0_0 0x228 | ||
96 | #define MC_SMMU_TRANSLATION_ENABLE_1_0 0x22c | ||
97 | #define MC_SMMU_TRANSLATION_ENABLE_2_0 0x230 | ||
98 | |||
99 | #define MC_SMMU_AFI_ASID_0 0x238 /* PCIE */ | ||
100 | #define MC_SMMU_AVPC_ASID_0 0x23c /* AVP */ | ||
101 | #define MC_SMMU_DC_ASID_0 0x240 /* Display controller */ | ||
102 | #define MC_SMMU_DCB_ASID_0 0x244 /* Display controller B */ | ||
103 | #define MC_SMMU_EPP_ASID_0 0x248 /* Encoder pre-processor */ | ||
104 | #define MC_SMMU_G2_ASID_0 0x24c /* 2D engine */ | ||
105 | #define MC_SMMU_HC_ASID_0 0x250 /* Host1x */ | ||
106 | #define MC_SMMU_HDA_ASID_0 0x254 /* High-def audio */ | ||
107 | #define MC_SMMU_ISP_ASID_0 0x258 /* Image signal processor */ | ||
108 | #define MC_SMMU_MPE_ASID_0 0x264 /* MPEG encoder */ | ||
109 | #define MC_SMMU_NV_ASID_0 0x268 /* (3D) */ | ||
110 | #define MC_SMMU_NV2_ASID_0 0x26c /* (3D) */ | ||
111 | #define MC_SMMU_PPCS_ASID_0 0x270 /* AHB */ | ||
112 | #define MC_SMMU_SATA_ASID_0 0x278 /* SATA */ | ||
113 | #define MC_SMMU_VDE_ASID_0 0x27c /* Video decoder */ | ||
114 | #define MC_SMMU_VI_ASID_0 0x280 /* Video input */ | ||
115 | |||
116 | #define SMMU_PDE_NEXT_SHIFT 28 | ||
117 | |||
118 | /* Copied from arahb_arbc.h */ | ||
119 | #define AHB_ARBITRATION_XBAR_CTRL_0 0xe0 | ||
120 | #define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE 1 | ||
121 | #define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT 17 | ||
122 | |||
123 | #endif | ||
124 | |||
125 | #define MC_SMMU_NUM_ASIDS 4 | ||
126 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__MASK 0xffc00000 | ||
127 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */ | ||
128 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__MASK 0xffffc000 | ||
129 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */ | ||
130 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, which) \ | ||
131 | ((((iova) & MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__MASK) >> \ | ||
132 | MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__SHIFT) | \ | ||
133 | MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_##which) | ||
134 | #define MC_SMMU_PTB_ASID_0_CURRENT_ASID(n) \ | ||
135 | ((n) << MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT) | ||
136 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable \ | ||
137 | (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE << \ | ||
138 | MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT) | ||
139 | #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE \ | ||
140 | (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE << \ | ||
141 | MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT) | ||
142 | |||
143 | #define VMM_NAME "iovmm-smmu" | ||
144 | #define DRIVER_NAME "tegra_smmu" | ||
145 | |||
146 | #define SMMU_PAGE_SHIFT 12 | ||
147 | #define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT) | ||
148 | |||
149 | #define SMMU_PDIR_COUNT 1024 | ||
150 | #define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT) | ||
151 | #define SMMU_PTBL_COUNT 1024 | ||
152 | #define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT) | ||
153 | #define SMMU_PDIR_SHIFT 12 | ||
154 | #define SMMU_PDE_SHIFT 12 | ||
155 | #define SMMU_PTE_SHIFT 12 | ||
156 | #define SMMU_PFN_MASK 0x000fffff | ||
157 | |||
158 | #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12) | ||
159 | #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22) | ||
160 | #define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22) | ||
161 | |||
162 | #define _READABLE (1 << MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT) | ||
163 | #define _WRITABLE (1 << MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT) | ||
164 | #define _NONSECURE (1 << MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT) | ||
165 | #define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT) | ||
166 | #define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE) | ||
167 | |||
168 | #define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE) | ||
169 | |||
170 | #define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE) | ||
171 | #define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT) | ||
172 | #define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR) | ||
173 | |||
174 | #define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE) | ||
175 | #define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR) | ||
176 | |||
177 | #define SMMU_MK_PDIR(page, attr) \ | ||
178 | ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr)) | ||
179 | #define SMMU_MK_PDE(page, attr) \ | ||
180 | (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr)) | ||
181 | #define SMMU_EX_PTBL_PAGE(pde) \ | ||
182 | pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK) | ||
183 | #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr)) | ||
184 | |||
185 | #define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31)) | ||
186 | #define SMMU_ASID_DISABLE 0 | ||
187 | #define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0)) | ||
188 | |||
189 | /* Keep this as a "natural" enumeration (no assignments) */ | ||
190 | enum smmu_hwclient { | ||
191 | HWC_AFI, | ||
192 | HWC_AVPC, | ||
193 | HWC_DC, | ||
194 | HWC_DCB, | ||
195 | HWC_EPP, | ||
196 | HWC_G2, | ||
197 | HWC_HC, | ||
198 | HWC_HDA, | ||
199 | HWC_ISP, | ||
200 | HWC_MPE, | ||
201 | HWC_NV, | ||
202 | HWC_NV2, | ||
203 | HWC_PPCS, | ||
204 | HWC_SATA, | ||
205 | HWC_VDE, | ||
206 | HWC_VI, | ||
207 | |||
208 | HWC_COUNT | ||
209 | }; | ||
210 | |||
211 | struct smmu_hwc_state { | ||
212 | unsigned long reg; | ||
213 | unsigned long enable_disable; | ||
214 | }; | ||
215 | |||
216 | /* Hardware client mapping initializer */ | ||
217 | #define HWC_INIT(client) \ | ||
218 | [HWC_##client] = {MC_SMMU_##client##_ASID_0, SMMU_ASID_DISABLE}, | ||
219 | |||
220 | static const struct smmu_hwc_state smmu_hwc_state_init[] = { | ||
221 | HWC_INIT(AFI) | ||
222 | HWC_INIT(AVPC) | ||
223 | HWC_INIT(DC) | ||
224 | HWC_INIT(DCB) | ||
225 | HWC_INIT(EPP) | ||
226 | HWC_INIT(G2) | ||
227 | HWC_INIT(HC) | ||
228 | HWC_INIT(HDA) | ||
229 | HWC_INIT(ISP) | ||
230 | HWC_INIT(MPE) | ||
231 | HWC_INIT(NV) | ||
232 | HWC_INIT(NV2) | ||
233 | HWC_INIT(PPCS) | ||
234 | HWC_INIT(SATA) | ||
235 | HWC_INIT(VDE) | ||
236 | HWC_INIT(VI) | ||
237 | }; | ||
238 | |||
239 | |||
240 | struct domain_hwc_map { | ||
241 | const char *dev_name; | ||
242 | const enum smmu_hwclient *hwcs; | ||
243 | const unsigned int nr_hwcs; | ||
244 | }; | ||
245 | |||
246 | /* Enable all hardware clients for SMMU translation */ | ||
247 | static const enum smmu_hwclient nvmap_hwcs[] = { | ||
248 | HWC_AFI, | ||
249 | HWC_AVPC, | ||
250 | HWC_DC, | ||
251 | HWC_DCB, | ||
252 | HWC_EPP, | ||
253 | HWC_G2, | ||
254 | HWC_HC, | ||
255 | HWC_HDA, | ||
256 | HWC_ISP, | ||
257 | HWC_MPE, | ||
258 | HWC_NV, | ||
259 | HWC_NV2, | ||
260 | HWC_PPCS, | ||
261 | HWC_SATA, | ||
262 | HWC_VDE, | ||
263 | HWC_VI | ||
264 | }; | ||
265 | |||
266 | static const struct domain_hwc_map smmu_hwc_map[] = { | ||
267 | { | ||
268 | .dev_name = "nvmap", | ||
269 | .hwcs = nvmap_hwcs, | ||
270 | .nr_hwcs = ARRAY_SIZE(nvmap_hwcs), | ||
271 | }, | ||
272 | }; | ||
273 | |||
274 | /* | ||
275 | * Per address space | ||
276 | */ | ||
277 | struct smmu_as { | ||
278 | struct smmu_device *smmu; /* back pointer to container */ | ||
279 | unsigned int asid; | ||
280 | const struct domain_hwc_map *hwclients; | ||
281 | struct mutex lock; /* for pagetable */ | ||
282 | struct tegra_iovmm_domain domain; | ||
283 | struct page *pdir_page; | ||
284 | unsigned long pdir_attr; | ||
285 | unsigned long pde_attr; | ||
286 | unsigned long pte_attr; | ||
287 | unsigned int *pte_count; | ||
288 | struct device sysfs_dev; | ||
289 | int sysfs_use_count; | ||
290 | }; | ||
291 | |||
292 | /* | ||
293 | * Per SMMU device | ||
294 | */ | ||
295 | struct smmu_device { | ||
296 | void __iomem *regs, *regs_ahbarb; | ||
297 | tegra_iovmm_addr_t iovmm_base; /* remappable base address */ | ||
298 | unsigned long page_count; /* total remappable size */ | ||
299 | spinlock_t lock; | ||
300 | char *name; | ||
301 | struct tegra_iovmm_device iovmm_dev; | ||
302 | int num_ases; | ||
303 | struct smmu_as *as; /* Run-time allocated array */ | ||
304 | struct smmu_hwc_state hwc_state[HWC_COUNT]; | ||
305 | struct device sysfs_dev; | ||
306 | int sysfs_use_count; | ||
307 | bool enable; | ||
308 | struct page *avp_vector_page; /* dummy page shared by all AS's */ | ||
309 | |||
310 | /* | ||
311 | * Register image savers for suspend/resume | ||
312 | */ | ||
313 | unsigned long translation_enable_0_0; | ||
314 | unsigned long translation_enable_1_0; | ||
315 | unsigned long translation_enable_2_0; | ||
316 | unsigned long asid_security_0; | ||
317 | |||
318 | unsigned long lowest_asid; /* Variables for hardware testing */ | ||
319 | unsigned long debug_asid; | ||
320 | unsigned long signature_pid; /* For debugging aid */ | ||
321 | }; | ||
322 | |||
323 | #define VA_PAGE_TO_PA(va, page) \ | ||
324 | (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK)) | ||
325 | |||
326 | #define FLUSH_CPU_DCACHE(va, page, size) \ | ||
327 | do { \ | ||
328 | unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \ | ||
329 | __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \ | ||
330 | outer_flush_range(_pa_, _pa_+(size_t)(size)); \ | ||
331 | } while (0) | ||
332 | |||
333 | /* | ||
334 | * Any interaction between any block on PPSB and a block on APB or AHB | ||
335 | * must have these read-back to ensure the APB/AHB bus transaction is | ||
336 | * complete before initiating activity on the PPSB block. | ||
337 | */ | ||
338 | #define FLUSH_SMMU_REGS(smmu) (void)readl((smmu)->regs + MC_SMMU_CONFIG_0) | ||
339 | |||
340 | /* | ||
341 | * Flush all TLB entries and all PTC entries | ||
342 | * Caller must lock smmu | ||
343 | */ | ||
344 | static void smmu_flush_regs(struct smmu_device *smmu, int enable) | ||
345 | { | ||
346 | writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL, | ||
347 | smmu->regs + MC_SMMU_PTC_FLUSH_0); | ||
348 | FLUSH_SMMU_REGS(smmu); | ||
349 | writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL | | ||
350 | MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable, | ||
351 | smmu->regs + MC_SMMU_TLB_FLUSH_0); | ||
352 | |||
353 | if (enable) | ||
354 | writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE, | ||
355 | smmu->regs + MC_SMMU_CONFIG_0); | ||
356 | FLUSH_SMMU_REGS(smmu); | ||
357 | } | ||
358 | |||
359 | static void smmu_setup_regs(struct smmu_device *smmu) | ||
360 | { | ||
361 | int i; | ||
362 | |||
363 | if (smmu->as) { | ||
364 | int asid; | ||
365 | |||
366 | /* Set/restore page directory for each AS */ | ||
367 | for (asid = 0; asid < smmu->num_ases; asid++) { | ||
368 | struct smmu_as *as = &smmu->as[asid]; | ||
369 | |||
370 | writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid), | ||
371 | as->smmu->regs + MC_SMMU_PTB_ASID_0); | ||
372 | writel(as->pdir_page | ||
373 | ? SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) | ||
374 | : MC_SMMU_PTB_DATA_0_RESET_VAL, | ||
375 | as->smmu->regs + MC_SMMU_PTB_DATA_0); | ||
376 | } | ||
377 | } | ||
378 | |||
379 | /* Set/restore ASID for each hardware client */ | ||
380 | for (i = 0; i < HWC_COUNT; i++) { | ||
381 | struct smmu_hwc_state *hwcst = &smmu->hwc_state[i]; | ||
382 | writel(hwcst->enable_disable, smmu->regs + hwcst->reg); | ||
383 | } | ||
384 | |||
385 | writel(smmu->translation_enable_0_0, | ||
386 | smmu->regs + MC_SMMU_TRANSLATION_ENABLE_0_0); | ||
387 | writel(smmu->translation_enable_1_0, | ||
388 | smmu->regs + MC_SMMU_TRANSLATION_ENABLE_1_0); | ||
389 | writel(smmu->translation_enable_2_0, | ||
390 | smmu->regs + MC_SMMU_TRANSLATION_ENABLE_2_0); | ||
391 | writel(smmu->asid_security_0, | ||
392 | smmu->regs + MC_SMMU_ASID_SECURITY_0); | ||
393 | writel(MC_SMMU_TLB_CONFIG_0_RESET_VAL, | ||
394 | smmu->regs + MC_SMMU_TLB_CONFIG_0); | ||
395 | writel(MC_SMMU_PTC_CONFIG_0_RESET_VAL, | ||
396 | smmu->regs + MC_SMMU_PTC_CONFIG_0); | ||
397 | |||
398 | smmu_flush_regs(smmu, 1); | ||
399 | writel( | ||
400 | readl(smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0) | | ||
401 | (AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE << | ||
402 | AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT), | ||
403 | smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0); | ||
404 | } | ||
405 | |||
406 | static int smmu_suspend(struct tegra_iovmm_device *dev) | ||
407 | { | ||
408 | struct smmu_device *smmu = | ||
409 | container_of(dev, struct smmu_device, iovmm_dev); | ||
410 | |||
411 | smmu->translation_enable_0_0 = | ||
412 | readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_0_0); | ||
413 | smmu->translation_enable_1_0 = | ||
414 | readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_1_0); | ||
415 | smmu->translation_enable_2_0 = | ||
416 | readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_2_0); | ||
417 | smmu->asid_security_0 = | ||
418 | readl(smmu->regs + MC_SMMU_ASID_SECURITY_0); | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | static void smmu_resume(struct tegra_iovmm_device *dev) | ||
423 | { | ||
424 | struct smmu_device *smmu = | ||
425 | container_of(dev, struct smmu_device, iovmm_dev); | ||
426 | |||
427 | if (!smmu->enable) | ||
428 | return; | ||
429 | |||
430 | spin_lock(&smmu->lock); | ||
431 | smmu_setup_regs(smmu); | ||
432 | spin_unlock(&smmu->lock); | ||
433 | } | ||
434 | |||
435 | static void flush_ptc_and_tlb(struct smmu_device *smmu, | ||
436 | struct smmu_as *as, unsigned long iova, | ||
437 | unsigned long *pte, struct page *ptpage, int is_pde) | ||
438 | { | ||
439 | unsigned long tlb_flush_va = is_pde | ||
440 | ? MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, SECTION) | ||
441 | : MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, GROUP); | ||
442 | |||
443 | writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR | | ||
444 | VA_PAGE_TO_PA(pte, ptpage), | ||
445 | smmu->regs + MC_SMMU_PTC_FLUSH_0); | ||
446 | FLUSH_SMMU_REGS(smmu); | ||
447 | writel(tlb_flush_va | | ||
448 | MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE | | ||
449 | (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT), | ||
450 | smmu->regs + MC_SMMU_TLB_FLUSH_0); | ||
451 | FLUSH_SMMU_REGS(smmu); | ||
452 | } | ||
453 | |||
454 | static void free_ptbl(struct smmu_as *as, unsigned long iova) | ||
455 | { | ||
456 | unsigned long pdn = SMMU_ADDR_TO_PDN(iova); | ||
457 | unsigned long *pdir = (unsigned long *)kmap(as->pdir_page); | ||
458 | |||
459 | if (pdir[pdn] != _PDE_VACANT(pdn)) { | ||
460 | pr_debug("%s:%d pdn=%lx\n", __func__, __LINE__, pdn); | ||
461 | |||
462 | ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn])); | ||
463 | __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn])); | ||
464 | pdir[pdn] = _PDE_VACANT(pdn); | ||
465 | FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); | ||
466 | flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], | ||
467 | as->pdir_page, 1); | ||
468 | } | ||
469 | kunmap(as->pdir_page); | ||
470 | } | ||
471 | |||
472 | static void free_pdir(struct smmu_as *as) | ||
473 | { | ||
474 | if (as->pdir_page) { | ||
475 | unsigned addr = as->smmu->iovmm_base; | ||
476 | int count = as->smmu->page_count; | ||
477 | |||
478 | while (count-- > 0) { | ||
479 | free_ptbl(as, addr); | ||
480 | addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT; | ||
481 | } | ||
482 | ClearPageReserved(as->pdir_page); | ||
483 | __free_page(as->pdir_page); | ||
484 | as->pdir_page = NULL; | ||
485 | kfree(as->pte_count); | ||
486 | as->pte_count = NULL; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | static int smmu_remove(struct platform_device *pdev) | ||
491 | { | ||
492 | struct smmu_device *smmu = platform_get_drvdata(pdev); | ||
493 | |||
494 | if (!smmu) | ||
495 | return 0; | ||
496 | |||
497 | if (smmu->enable) { | ||
498 | writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE, | ||
499 | smmu->regs + MC_SMMU_CONFIG_0); | ||
500 | smmu->enable = 0; | ||
501 | } | ||
502 | platform_set_drvdata(pdev, NULL); | ||
503 | |||
504 | if (smmu->as) { | ||
505 | int asid; | ||
506 | |||
507 | for (asid = 0; asid < smmu->num_ases; asid++) | ||
508 | free_pdir(&smmu->as[asid]); | ||
509 | kfree(smmu->as); | ||
510 | } | ||
511 | |||
512 | if (smmu->avp_vector_page) | ||
513 | __free_page(smmu->avp_vector_page); | ||
514 | if (smmu->regs) | ||
515 | iounmap(smmu->regs); | ||
516 | if (smmu->regs_ahbarb) | ||
517 | iounmap(smmu->regs_ahbarb); | ||
518 | tegra_iovmm_unregister(&smmu->iovmm_dev); | ||
519 | kfree(smmu); | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | /* | ||
524 | * Maps PTBL for given iova and returns the PTE address | ||
525 | * Caller must unmap the mapped PTBL returned in *ptbl_page_p | ||
526 | */ | ||
527 | static unsigned long *locate_pte(struct smmu_as *as, | ||
528 | unsigned long iova, bool allocate, | ||
529 | struct page **ptbl_page_p, | ||
530 | unsigned int **pte_counter) | ||
531 | { | ||
532 | unsigned long ptn = SMMU_ADDR_TO_PFN(iova); | ||
533 | unsigned long pdn = SMMU_ADDR_TO_PDN(iova); | ||
534 | unsigned long *pdir = kmap(as->pdir_page); | ||
535 | unsigned long *ptbl; | ||
536 | |||
537 | if (pdir[pdn] != _PDE_VACANT(pdn)) { | ||
538 | /* Mapped entry table already exists */ | ||
539 | *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]); | ||
540 | ptbl = kmap(*ptbl_page_p); | ||
541 | } else if (!allocate) { | ||
542 | kunmap(as->pdir_page); | ||
543 | return NULL; | ||
544 | } else { | ||
545 | /* Vacant - allocate a new page table */ | ||
546 | pr_debug("%s:%d new PTBL pdn=%lx\n", __func__, __LINE__, pdn); | ||
547 | |||
548 | *ptbl_page_p = alloc_page(GFP_KERNEL | __GFP_DMA); | ||
549 | if (!*ptbl_page_p) { | ||
550 | kunmap(as->pdir_page); | ||
551 | pr_err(DRIVER_NAME | ||
552 | ": failed to allocate tegra_iovmm_device page table\n"); | ||
553 | return NULL; | ||
554 | } | ||
555 | SetPageReserved(*ptbl_page_p); | ||
556 | ptbl = (unsigned long *)kmap(*ptbl_page_p); | ||
557 | { | ||
558 | int pn; | ||
559 | unsigned long addr = SMMU_PDN_TO_ADDR(pdn); | ||
560 | for (pn = 0; pn < SMMU_PTBL_COUNT; | ||
561 | pn++, addr += SMMU_PAGE_SIZE) { | ||
562 | ptbl[pn] = _PTE_VACANT(addr); | ||
563 | } | ||
564 | } | ||
565 | FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE); | ||
566 | pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p, | ||
567 | as->pde_attr | _PDE_NEXT); | ||
568 | FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); | ||
569 | flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], | ||
570 | as->pdir_page, 1); | ||
571 | } | ||
572 | *pte_counter = &as->pte_count[pdn]; | ||
573 | |||
574 | kunmap(as->pdir_page); | ||
575 | return &ptbl[ptn % SMMU_PTBL_COUNT]; | ||
576 | } | ||
577 | |||
578 | static void put_signature(struct smmu_as *as, | ||
579 | unsigned long addr, unsigned long pfn) | ||
580 | { | ||
581 | if (as->smmu->signature_pid == current->pid) { | ||
582 | struct page *page = pfn_to_page(pfn); | ||
583 | unsigned long *vaddr = kmap(page); | ||
584 | if (vaddr) { | ||
585 | vaddr[0] = addr; | ||
586 | vaddr[1] = pfn << PAGE_SHIFT; | ||
587 | FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2); | ||
588 | kunmap(page); | ||
589 | } | ||
590 | } | ||
591 | } | ||
592 | |||
593 | static int smmu_map(struct tegra_iovmm_domain *domain, | ||
594 | struct tegra_iovmm_area *iovma) | ||
595 | { | ||
596 | struct smmu_as *as = container_of(domain, struct smmu_as, domain); | ||
597 | unsigned long addr = iovma->iovm_start; | ||
598 | unsigned long pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT; | ||
599 | int i; | ||
600 | |||
601 | pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__, | ||
602 | addr, as - as->smmu->as); | ||
603 | |||
604 | for (i = 0; i < pcount; i++) { | ||
605 | unsigned long pfn; | ||
606 | unsigned long *pte; | ||
607 | unsigned int *pte_counter; | ||
608 | struct page *ptpage; | ||
609 | |||
610 | pfn = iovma->ops->lock_makeresident(iovma, i << PAGE_SHIFT); | ||
611 | if (!pfn_valid(pfn)) | ||
612 | goto fail; | ||
613 | |||
614 | mutex_lock(&as->lock); | ||
615 | |||
616 | pte = locate_pte(as, addr, true, &ptpage, &pte_counter); | ||
617 | if (!pte) | ||
618 | goto fail2; | ||
619 | |||
620 | pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n", | ||
621 | __func__, __LINE__, addr, pfn, as - as->smmu->as); | ||
622 | |||
623 | if (*pte == _PTE_VACANT(addr)) | ||
624 | (*pte_counter)++; | ||
625 | *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr); | ||
626 | if (unlikely((*pte == _PTE_VACANT(addr)))) | ||
627 | (*pte_counter)--; | ||
628 | FLUSH_CPU_DCACHE(pte, ptpage, sizeof *pte); | ||
629 | flush_ptc_and_tlb(as->smmu, as, addr, pte, ptpage, 0); | ||
630 | kunmap(ptpage); | ||
631 | mutex_unlock(&as->lock); | ||
632 | put_signature(as, addr, pfn); | ||
633 | addr += SMMU_PAGE_SIZE; | ||
634 | } | ||
635 | return 0; | ||
636 | |||
637 | fail: | ||
638 | mutex_lock(&as->lock); | ||
639 | fail2: | ||
640 | |||
641 | while (i-- > 0) { | ||
642 | unsigned long *pte; | ||
643 | unsigned int *pte_counter; | ||
644 | struct page *page; | ||
645 | |||
646 | iovma->ops->release(iovma, i<<PAGE_SHIFT); | ||
647 | addr -= SMMU_PAGE_SIZE; | ||
648 | pte = locate_pte(as, addr, false, &page, &pte_counter); | ||
649 | if (pte) { | ||
650 | if (*pte != _PTE_VACANT(addr)) { | ||
651 | *pte = _PTE_VACANT(addr); | ||
652 | FLUSH_CPU_DCACHE(pte, page, sizeof *pte); | ||
653 | flush_ptc_and_tlb(as->smmu, as, addr, pte, | ||
654 | page, 0); | ||
655 | kunmap(page); | ||
656 | if (!--(*pte_counter)) | ||
657 | free_ptbl(as, addr); | ||
658 | } else { | ||
659 | kunmap(page); | ||
660 | } | ||
661 | } | ||
662 | } | ||
663 | mutex_unlock(&as->lock); | ||
664 | return -ENOMEM; | ||
665 | } | ||
666 | |||
667 | static void smmu_unmap(struct tegra_iovmm_domain *domain, | ||
668 | struct tegra_iovmm_area *iovma, bool decommit) | ||
669 | { | ||
670 | struct smmu_as *as = container_of(domain, struct smmu_as, domain); | ||
671 | unsigned long addr = iovma->iovm_start; | ||
672 | unsigned int pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT; | ||
673 | unsigned int i, *pte_counter; | ||
674 | |||
675 | pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__, | ||
676 | addr, as - as->smmu->as); | ||
677 | |||
678 | mutex_lock(&as->lock); | ||
679 | for (i = 0; i < pcount; i++) { | ||
680 | unsigned long *pte; | ||
681 | struct page *page; | ||
682 | |||
683 | if (iovma->ops && iovma->ops->release) | ||
684 | iovma->ops->release(iovma, i << PAGE_SHIFT); | ||
685 | |||
686 | pte = locate_pte(as, addr, false, &page, &pte_counter); | ||
687 | if (pte) { | ||
688 | if (*pte != _PTE_VACANT(addr)) { | ||
689 | *pte = _PTE_VACANT(addr); | ||
690 | FLUSH_CPU_DCACHE(pte, page, sizeof *pte); | ||
691 | flush_ptc_and_tlb(as->smmu, as, addr, pte, | ||
692 | page, 0); | ||
693 | kunmap(page); | ||
694 | if (!--(*pte_counter) && decommit) { | ||
695 | free_ptbl(as, addr); | ||
696 | smmu_flush_regs(as->smmu, 0); | ||
697 | } | ||
698 | } | ||
699 | } | ||
700 | addr += SMMU_PAGE_SIZE; | ||
701 | } | ||
702 | mutex_unlock(&as->lock); | ||
703 | } | ||
704 | |||
705 | static void smmu_map_pfn(struct tegra_iovmm_domain *domain, | ||
706 | struct tegra_iovmm_area *iovma, tegra_iovmm_addr_t addr, | ||
707 | unsigned long pfn) | ||
708 | { | ||
709 | struct smmu_as *as = container_of(domain, struct smmu_as, domain); | ||
710 | struct smmu_device *smmu = as->smmu; | ||
711 | unsigned long *pte; | ||
712 | unsigned int *pte_counter; | ||
713 | struct page *ptpage; | ||
714 | |||
715 | pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n", __func__, __LINE__, | ||
716 | (unsigned long)addr, pfn, as - as->smmu->as); | ||
717 | |||
718 | BUG_ON(!pfn_valid(pfn)); | ||
719 | mutex_lock(&as->lock); | ||
720 | pte = locate_pte(as, addr, true, &ptpage, &pte_counter); | ||
721 | if (pte) { | ||
722 | if (*pte == _PTE_VACANT(addr)) | ||
723 | (*pte_counter)++; | ||
724 | *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr); | ||
725 | if (unlikely((*pte == _PTE_VACANT(addr)))) | ||
726 | (*pte_counter)--; | ||
727 | FLUSH_CPU_DCACHE(pte, ptpage, sizeof *pte); | ||
728 | flush_ptc_and_tlb(smmu, as, addr, pte, ptpage, 0); | ||
729 | kunmap(ptpage); | ||
730 | put_signature(as, addr, pfn); | ||
731 | } | ||
732 | mutex_unlock(&as->lock); | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * Caller must lock/unlock as | ||
737 | */ | ||
738 | static int alloc_pdir(struct smmu_as *as) | ||
739 | { | ||
740 | unsigned long *pdir; | ||
741 | int pdn; | ||
742 | |||
743 | if (as->pdir_page) | ||
744 | return 0; | ||
745 | |||
746 | as->pte_count = kzalloc(sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, | ||
747 | GFP_KERNEL); | ||
748 | if (!as->pte_count) { | ||
749 | pr_err(DRIVER_NAME | ||
750 | ": failed to allocate tegra_iovmm_device PTE cunters\n"); | ||
751 | return -ENOMEM; | ||
752 | } | ||
753 | as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA); | ||
754 | if (!as->pdir_page) { | ||
755 | pr_err(DRIVER_NAME | ||
756 | ": failed to allocate tegra_iovmm_device page directory\n"); | ||
757 | kfree(as->pte_count); | ||
758 | as->pte_count = NULL; | ||
759 | return -ENOMEM; | ||
760 | } | ||
761 | SetPageReserved(as->pdir_page); | ||
762 | pdir = kmap(as->pdir_page); | ||
763 | |||
764 | for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++) | ||
765 | pdir[pdn] = _PDE_VACANT(pdn); | ||
766 | FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE); | ||
767 | writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR | | ||
768 | VA_PAGE_TO_PA(pdir, as->pdir_page), | ||
769 | as->smmu->regs + MC_SMMU_PTC_FLUSH_0); | ||
770 | FLUSH_SMMU_REGS(as->smmu); | ||
771 | writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL | | ||
772 | MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE | | ||
773 | (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT), | ||
774 | as->smmu->regs + MC_SMMU_TLB_FLUSH_0); | ||
775 | FLUSH_SMMU_REGS(as->smmu); | ||
776 | kunmap(as->pdir_page); | ||
777 | |||
778 | return 0; | ||
779 | } | ||
780 | |||
781 | static void _sysfs_create(struct smmu_as *as, struct device *sysfs_parent); | ||
782 | |||
783 | /* | ||
784 | * Allocate resources for an AS | ||
785 | * TODO: split into "alloc" and "lock" | ||
786 | */ | ||
787 | static struct tegra_iovmm_domain *smmu_alloc_domain( | ||
788 | struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client) | ||
789 | { | ||
790 | struct smmu_device *smmu = | ||
791 | container_of(dev, struct smmu_device, iovmm_dev); | ||
792 | struct smmu_as *as = NULL; | ||
793 | const struct domain_hwc_map *map = NULL; | ||
794 | int asid, i; | ||
795 | |||
796 | /* Look for a free AS */ | ||
797 | for (asid = smmu->lowest_asid; asid < smmu->num_ases; asid++) { | ||
798 | mutex_lock(&smmu->as[asid].lock); | ||
799 | if (!smmu->as[asid].hwclients) { | ||
800 | as = &smmu->as[asid]; | ||
801 | break; | ||
802 | } | ||
803 | mutex_unlock(&smmu->as[asid].lock); | ||
804 | } | ||
805 | |||
806 | if (!as) { | ||
807 | pr_err(DRIVER_NAME ": no free AS\n"); | ||
808 | return NULL; | ||
809 | } | ||
810 | |||
811 | if (alloc_pdir(as) < 0) | ||
812 | goto bad3; | ||
813 | |||
814 | /* Look for a matching hardware client group */ | ||
815 | for (i = 0; ARRAY_SIZE(smmu_hwc_map); i++) { | ||
816 | if (!strcmp(smmu_hwc_map[i].dev_name, client->misc_dev->name)) { | ||
817 | map = &smmu_hwc_map[i]; | ||
818 | break; | ||
819 | } | ||
820 | } | ||
821 | |||
822 | if (!map) { | ||
823 | pr_err(DRIVER_NAME ": no SMMU resource for %s (%s)\n", | ||
824 | client->name, client->misc_dev->name); | ||
825 | goto bad2; | ||
826 | } | ||
827 | |||
828 | spin_lock(&smmu->lock); | ||
829 | /* Update PDIR register */ | ||
830 | writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid), | ||
831 | as->smmu->regs + MC_SMMU_PTB_ASID_0); | ||
832 | writel(SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), | ||
833 | as->smmu->regs + MC_SMMU_PTB_DATA_0); | ||
834 | FLUSH_SMMU_REGS(smmu); | ||
835 | |||
836 | /* Put each hardware client in the group into the address space */ | ||
837 | for (i = 0; i < map->nr_hwcs; i++) { | ||
838 | struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]]; | ||
839 | |||
840 | /* Is the hardware client busy? */ | ||
841 | if (hwcst->enable_disable != SMMU_ASID_DISABLE && | ||
842 | hwcst->enable_disable != SMMU_ASID_ENABLE(as->asid)) { | ||
843 | pr_err(DRIVER_NAME | ||
844 | ": HW 0x%lx busy for ASID %ld (client!=%s)\n", | ||
845 | hwcst->reg, | ||
846 | SMMU_ASID_ASID(hwcst->enable_disable), | ||
847 | client->name); | ||
848 | goto bad; | ||
849 | } | ||
850 | hwcst->enable_disable = SMMU_ASID_ENABLE(as->asid); | ||
851 | writel(hwcst->enable_disable, smmu->regs + hwcst->reg); | ||
852 | } | ||
853 | FLUSH_SMMU_REGS(smmu); | ||
854 | spin_unlock(&smmu->lock); | ||
855 | as->hwclients = map; | ||
856 | _sysfs_create(as, client->misc_dev->this_device); | ||
857 | mutex_unlock(&as->lock); | ||
858 | |||
859 | /* Reserve "page zero" for AVP vectors using a common dummy page */ | ||
860 | smmu_map_pfn(&as->domain, NULL, 0, | ||
861 | page_to_phys(as->smmu->avp_vector_page) >> SMMU_PAGE_SHIFT); | ||
862 | return &as->domain; | ||
863 | |||
864 | bad: | ||
865 | /* Reset hardware clients that have been enabled */ | ||
866 | while (--i >= 0) { | ||
867 | struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]]; | ||
868 | |||
869 | hwcst->enable_disable = SMMU_ASID_DISABLE; | ||
870 | writel(hwcst->enable_disable, smmu->regs + hwcst->reg); | ||
871 | } | ||
872 | FLUSH_SMMU_REGS(smmu); | ||
873 | spin_unlock(&as->smmu->lock); | ||
874 | bad2: | ||
875 | free_pdir(as); | ||
876 | bad3: | ||
877 | mutex_unlock(&as->lock); | ||
878 | return NULL; | ||
879 | |||
880 | } | ||
881 | |||
882 | /* | ||
883 | * Release resources for an AS | ||
884 | * TODO: split into "unlock" and "free" | ||
885 | */ | ||
886 | static void smmu_free_domain( | ||
887 | struct tegra_iovmm_domain *domain, struct tegra_iovmm_client *client) | ||
888 | { | ||
889 | struct smmu_as *as = container_of(domain, struct smmu_as, domain); | ||
890 | struct smmu_device *smmu = as->smmu; | ||
891 | const struct domain_hwc_map *map = NULL; | ||
892 | int i; | ||
893 | |||
894 | mutex_lock(&as->lock); | ||
895 | map = as->hwclients; | ||
896 | |||
897 | spin_lock(&smmu->lock); | ||
898 | for (i = 0; i < map->nr_hwcs; i++) { | ||
899 | struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]]; | ||
900 | |||
901 | hwcst->enable_disable = SMMU_ASID_DISABLE; | ||
902 | writel(SMMU_ASID_DISABLE, smmu->regs + hwcst->reg); | ||
903 | } | ||
904 | FLUSH_SMMU_REGS(smmu); | ||
905 | spin_unlock(&smmu->lock); | ||
906 | |||
907 | as->hwclients = NULL; | ||
908 | if (as->pdir_page) { | ||
909 | spin_lock(&smmu->lock); | ||
910 | writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid), | ||
911 | smmu->regs + MC_SMMU_PTB_ASID_0); | ||
912 | writel(MC_SMMU_PTB_DATA_0_RESET_VAL, | ||
913 | smmu->regs + MC_SMMU_PTB_DATA_0); | ||
914 | FLUSH_SMMU_REGS(smmu); | ||
915 | spin_unlock(&smmu->lock); | ||
916 | |||
917 | free_pdir(as); | ||
918 | } | ||
919 | mutex_unlock(&as->lock); | ||
920 | } | ||
921 | |||
922 | static struct tegra_iovmm_device_ops tegra_iovmm_smmu_ops = { | ||
923 | .map = smmu_map, | ||
924 | .unmap = smmu_unmap, | ||
925 | .map_pfn = smmu_map_pfn, | ||
926 | .alloc_domain = smmu_alloc_domain, | ||
927 | .free_domain = smmu_free_domain, | ||
928 | .suspend = smmu_suspend, | ||
929 | .resume = smmu_resume, | ||
930 | }; | ||
931 | |||
932 | static int smmu_probe(struct platform_device *pdev) | ||
933 | { | ||
934 | struct smmu_device *smmu; | ||
935 | struct resource *regs, *regs2; | ||
936 | struct tegra_smmu_window *window; | ||
937 | int e, asid; | ||
938 | |||
939 | BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); | ||
940 | BUILD_BUG_ON(ARRAY_SIZE(smmu_hwc_state_init) != HWC_COUNT); | ||
941 | |||
942 | regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mc"); | ||
943 | regs2 = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahbarb"); | ||
944 | window = tegra_smmu_window(0); | ||
945 | |||
946 | if (!regs || !regs2 || !window) { | ||
947 | pr_err(DRIVER_NAME ": No SMMU resources\n"); | ||
948 | return -ENODEV; | ||
949 | } | ||
950 | |||
951 | smmu = kzalloc(sizeof(*smmu), GFP_KERNEL); | ||
952 | if (!smmu) { | ||
953 | pr_err(DRIVER_NAME ": failed to allocate smmu_device\n"); | ||
954 | return -ENOMEM; | ||
955 | } | ||
956 | |||
957 | smmu->num_ases = MC_SMMU_NUM_ASIDS; | ||
958 | smmu->iovmm_base = (tegra_iovmm_addr_t)window->start; | ||
959 | smmu->page_count = (window->end + 1 - window->start) >> SMMU_PAGE_SHIFT; | ||
960 | smmu->regs = ioremap(regs->start, regs->end + 1 - regs->start); | ||
961 | smmu->regs_ahbarb = | ||
962 | ioremap(regs2->start, regs2->end + 1 - regs2->start); | ||
963 | if (!smmu->regs || !smmu->regs_ahbarb) { | ||
964 | pr_err(DRIVER_NAME ": failed to remap SMMU registers\n"); | ||
965 | e = -ENXIO; | ||
966 | goto fail; | ||
967 | } | ||
968 | |||
969 | smmu->translation_enable_0_0 = ~0; | ||
970 | smmu->translation_enable_1_0 = ~0; | ||
971 | smmu->translation_enable_2_0 = ~0; | ||
972 | smmu->asid_security_0 = 0; | ||
973 | |||
974 | memcpy(smmu->hwc_state, smmu_hwc_state_init, sizeof(smmu->hwc_state)); | ||
975 | |||
976 | smmu->iovmm_dev.name = VMM_NAME; | ||
977 | smmu->iovmm_dev.ops = &tegra_iovmm_smmu_ops; | ||
978 | smmu->iovmm_dev.pgsize_bits = SMMU_PAGE_SHIFT; | ||
979 | |||
980 | e = tegra_iovmm_register(&smmu->iovmm_dev); | ||
981 | if (e) | ||
982 | goto fail; | ||
983 | |||
984 | smmu->as = kzalloc(sizeof(smmu->as[0]) * smmu->num_ases, GFP_KERNEL); | ||
985 | if (!smmu->as) { | ||
986 | pr_err(DRIVER_NAME ": failed to allocate smmu_as\n"); | ||
987 | e = -ENOMEM; | ||
988 | goto fail; | ||
989 | } | ||
990 | |||
991 | /* Initialize address space structure array */ | ||
992 | for (asid = 0; asid < smmu->num_ases; asid++) { | ||
993 | struct smmu_as *as = &smmu->as[asid]; | ||
994 | |||
995 | as->smmu = smmu; | ||
996 | as->asid = asid; | ||
997 | as->pdir_attr = _PDIR_ATTR; | ||
998 | as->pde_attr = _PDE_ATTR; | ||
999 | as->pte_attr = _PTE_ATTR; | ||
1000 | |||
1001 | mutex_init(&as->lock); | ||
1002 | |||
1003 | e = tegra_iovmm_domain_init(&as->domain, &smmu->iovmm_dev, | ||
1004 | smmu->iovmm_base, | ||
1005 | smmu->iovmm_base + | ||
1006 | (smmu->page_count << SMMU_PAGE_SHIFT)); | ||
1007 | if (e) | ||
1008 | goto fail; | ||
1009 | } | ||
1010 | spin_lock_init(&smmu->lock); | ||
1011 | smmu_setup_regs(smmu); | ||
1012 | smmu->enable = 1; | ||
1013 | platform_set_drvdata(pdev, smmu); | ||
1014 | |||
1015 | smmu->avp_vector_page = alloc_page(GFP_KERNEL); | ||
1016 | if (!smmu->avp_vector_page) | ||
1017 | goto fail; | ||
1018 | return 0; | ||
1019 | |||
1020 | fail: | ||
1021 | if (smmu->avp_vector_page) | ||
1022 | __free_page(smmu->avp_vector_page); | ||
1023 | if (smmu->regs) | ||
1024 | iounmap(smmu->regs); | ||
1025 | if (smmu->regs_ahbarb) | ||
1026 | iounmap(smmu->regs_ahbarb); | ||
1027 | if (smmu && smmu->as) { | ||
1028 | for (asid = 0; asid < smmu->num_ases; asid++) { | ||
1029 | if (smmu->as[asid].pdir_page) { | ||
1030 | ClearPageReserved(smmu->as[asid].pdir_page); | ||
1031 | __free_page(smmu->as[asid].pdir_page); | ||
1032 | } | ||
1033 | } | ||
1034 | kfree(smmu->as); | ||
1035 | } | ||
1036 | kfree(smmu); | ||
1037 | return e; | ||
1038 | } | ||
1039 | |||
1040 | static struct platform_driver tegra_iovmm_smmu_drv = { | ||
1041 | .probe = smmu_probe, | ||
1042 | .remove = smmu_remove, | ||
1043 | .driver = { | ||
1044 | .name = DRIVER_NAME, | ||
1045 | }, | ||
1046 | }; | ||
1047 | |||
1048 | static int __devinit smmu_init(void) | ||
1049 | { | ||
1050 | return platform_driver_register(&tegra_iovmm_smmu_drv); | ||
1051 | } | ||
1052 | |||
1053 | static void __exit smmu_exit(void) | ||
1054 | { | ||
1055 | platform_driver_unregister(&tegra_iovmm_smmu_drv); | ||
1056 | } | ||
1057 | |||
1058 | subsys_initcall(smmu_init); | ||
1059 | module_exit(smmu_exit); | ||
1060 | |||
1061 | /* | ||
1062 | * SMMU-global sysfs interface for debugging | ||
1063 | */ | ||
1064 | static ssize_t _sysfs_show_reg(struct device *d, | ||
1065 | struct device_attribute *da, char *buf); | ||
1066 | static ssize_t _sysfs_store_reg(struct device *d, | ||
1067 | struct device_attribute *da, const char *buf, | ||
1068 | size_t count); | ||
1069 | |||
1070 | #define _NAME_MAP(_name) { \ | ||
1071 | .name = __stringify(_name), \ | ||
1072 | .offset = _name##_0, \ | ||
1073 | .dev_attr = __ATTR(_name, S_IRUGO | S_IWUSR, \ | ||
1074 | _sysfs_show_reg, _sysfs_store_reg) \ | ||
1075 | } | ||
1076 | |||
1077 | static | ||
1078 | struct _reg_name_map { | ||
1079 | const char *name; | ||
1080 | unsigned offset; | ||
1081 | struct device_attribute dev_attr; | ||
1082 | } _smmu_reg_name_map[] = { | ||
1083 | _NAME_MAP(MC_SMMU_CONFIG), | ||
1084 | _NAME_MAP(MC_SMMU_TLB_CONFIG), | ||
1085 | _NAME_MAP(MC_SMMU_PTC_CONFIG), | ||
1086 | _NAME_MAP(MC_SMMU_PTB_ASID), | ||
1087 | _NAME_MAP(MC_SMMU_PTB_DATA), | ||
1088 | _NAME_MAP(MC_SMMU_TLB_FLUSH), | ||
1089 | _NAME_MAP(MC_SMMU_PTC_FLUSH), | ||
1090 | _NAME_MAP(MC_SMMU_ASID_SECURITY), | ||
1091 | _NAME_MAP(MC_SMMU_STATS_TLB_HIT_COUNT), | ||
1092 | _NAME_MAP(MC_SMMU_STATS_TLB_MISS_COUNT), | ||
1093 | _NAME_MAP(MC_SMMU_STATS_PTC_HIT_COUNT), | ||
1094 | _NAME_MAP(MC_SMMU_STATS_PTC_MISS_COUNT), | ||
1095 | _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_0), | ||
1096 | _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_1), | ||
1097 | _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_2), | ||
1098 | _NAME_MAP(MC_SMMU_AFI_ASID), | ||
1099 | _NAME_MAP(MC_SMMU_AVPC_ASID), | ||
1100 | _NAME_MAP(MC_SMMU_DC_ASID), | ||
1101 | _NAME_MAP(MC_SMMU_DCB_ASID), | ||
1102 | _NAME_MAP(MC_SMMU_EPP_ASID), | ||
1103 | _NAME_MAP(MC_SMMU_G2_ASID), | ||
1104 | _NAME_MAP(MC_SMMU_HC_ASID), | ||
1105 | _NAME_MAP(MC_SMMU_HDA_ASID), | ||
1106 | _NAME_MAP(MC_SMMU_ISP_ASID), | ||
1107 | _NAME_MAP(MC_SMMU_MPE_ASID), | ||
1108 | _NAME_MAP(MC_SMMU_NV_ASID), | ||
1109 | _NAME_MAP(MC_SMMU_NV2_ASID), | ||
1110 | _NAME_MAP(MC_SMMU_PPCS_ASID), | ||
1111 | _NAME_MAP(MC_SMMU_SATA_ASID), | ||
1112 | _NAME_MAP(MC_SMMU_VDE_ASID), | ||
1113 | _NAME_MAP(MC_SMMU_VI_ASID), | ||
1114 | }; | ||
1115 | |||
1116 | static ssize_t lookup_reg(struct device_attribute *da) | ||
1117 | { | ||
1118 | int i; | ||
1119 | for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) { | ||
1120 | if (!strcmp(_smmu_reg_name_map[i].name, da->attr.name)) | ||
1121 | return _smmu_reg_name_map[i].offset; | ||
1122 | } | ||
1123 | return -ENODEV; | ||
1124 | } | ||
1125 | |||
1126 | static ssize_t _sysfs_show_reg(struct device *d, | ||
1127 | struct device_attribute *da, char *buf) | ||
1128 | { | ||
1129 | struct smmu_device *smmu = | ||
1130 | container_of(d, struct smmu_device, sysfs_dev); | ||
1131 | ssize_t offset = lookup_reg(da); | ||
1132 | |||
1133 | if (offset < 0) | ||
1134 | return offset; | ||
1135 | return sprintf(buf, "%08lx\n", | ||
1136 | (unsigned long)readl(smmu->regs + offset)); | ||
1137 | } | ||
1138 | |||
1139 | static ssize_t _sysfs_store_reg(struct device *d, | ||
1140 | struct device_attribute *da, | ||
1141 | const char *buf, size_t count) | ||
1142 | { | ||
1143 | struct smmu_device *smmu = | ||
1144 | container_of(d, struct smmu_device, sysfs_dev); | ||
1145 | ssize_t offset = lookup_reg(da); | ||
1146 | u32 value; | ||
1147 | int err; | ||
1148 | |||
1149 | if (offset < 0) | ||
1150 | return offset; | ||
1151 | |||
1152 | err = kstrtou32(buf, 16, &value); | ||
1153 | if (err) | ||
1154 | return err; | ||
1155 | |||
1156 | #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS | ||
1157 | writel(value, smmu->regs + offset); | ||
1158 | #else | ||
1159 | /* Allow writing to reg only for TLB/PTC stats enabling/disabling */ | ||
1160 | { | ||
1161 | unsigned long mask = 0; | ||
1162 | switch (offset) { | ||
1163 | case MC_SMMU_TLB_CONFIG_0: | ||
1164 | mask = MC_SMMU_TLB_CONFIG_0_TLB_STATS__MASK; | ||
1165 | break; | ||
1166 | case MC_SMMU_PTC_CONFIG_0: | ||
1167 | mask = MC_SMMU_PTC_CONFIG_0_PTC_STATS__MASK; | ||
1168 | break; | ||
1169 | default: | ||
1170 | break; | ||
1171 | } | ||
1172 | |||
1173 | if (mask) { | ||
1174 | unsigned long currval = readl(smmu->regs + offset); | ||
1175 | currval &= ~mask; | ||
1176 | value &= mask; | ||
1177 | value |= currval; | ||
1178 | writel(value, smmu->regs + offset); | ||
1179 | } | ||
1180 | } | ||
1181 | #endif | ||
1182 | return count; | ||
1183 | } | ||
1184 | |||
1185 | static ssize_t _sysfs_show_smmu(struct device *d, | ||
1186 | struct device_attribute *da, char *buf) | ||
1187 | { | ||
1188 | struct smmu_device *smmu = | ||
1189 | container_of(d, struct smmu_device, sysfs_dev); | ||
1190 | ssize_t rv = 0; | ||
1191 | |||
1192 | rv += sprintf(buf + rv , " regs: %p\n", smmu->regs); | ||
1193 | rv += sprintf(buf + rv , "iovmm_base: %p\n", (void *)smmu->iovmm_base); | ||
1194 | rv += sprintf(buf + rv , "page_count: %lx\n", smmu->page_count); | ||
1195 | rv += sprintf(buf + rv , " num_ases: %d\n", smmu->num_ases); | ||
1196 | rv += sprintf(buf + rv , " as: %p\n", smmu->as); | ||
1197 | rv += sprintf(buf + rv , " enable: %s\n", | ||
1198 | smmu->enable ? "yes" : "no"); | ||
1199 | return rv; | ||
1200 | } | ||
1201 | |||
1202 | static struct device_attribute _attr_show_smmu | ||
1203 | = __ATTR(show_smmu, S_IRUGO, _sysfs_show_smmu, NULL); | ||
1204 | |||
1205 | #define _SYSFS_SHOW_VALUE(name, field, fmt) \ | ||
1206 | static ssize_t _sysfs_show_##name(struct device *d, \ | ||
1207 | struct device_attribute *da, char *buf) \ | ||
1208 | { \ | ||
1209 | struct smmu_device *smmu = \ | ||
1210 | container_of(d, struct smmu_device, sysfs_dev); \ | ||
1211 | ssize_t rv = 0; \ | ||
1212 | rv += sprintf(buf + rv, fmt "\n", smmu->field); \ | ||
1213 | return rv; \ | ||
1214 | } | ||
1215 | |||
1216 | static void (*_sysfs_null_callback)(struct smmu_device *, unsigned long *) = | ||
1217 | NULL; | ||
1218 | |||
1219 | #define _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback) \ | ||
1220 | static ssize_t _sysfs_set_##name(struct device *d, \ | ||
1221 | struct device_attribute *da, const char *buf, size_t count) \ | ||
1222 | { \ | ||
1223 | int err; \ | ||
1224 | u32 value; \ | ||
1225 | struct smmu_device *smmu = \ | ||
1226 | container_of(d, struct smmu_device, sysfs_dev); \ | ||
1227 | err = kstrtou32(buf, base, &value); \ | ||
1228 | if (err) \ | ||
1229 | return err; \ | ||
1230 | if (0 <= value && value < ceil) { \ | ||
1231 | smmu->field = value; \ | ||
1232 | if (callback) \ | ||
1233 | callback(smmu, &smmu->field); \ | ||
1234 | } \ | ||
1235 | return count; \ | ||
1236 | } | ||
1237 | #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS | ||
1238 | #define _SYSFS_SET_VALUE _SYSFS_SET_VALUE_DO | ||
1239 | #else | ||
1240 | #define _SYSFS_SET_VALUE(name, field, base, ceil, callback) \ | ||
1241 | static ssize_t _sysfs_set_##name(struct device *d, \ | ||
1242 | struct device_attribute *da, const char *buf, size_t count) \ | ||
1243 | { \ | ||
1244 | return count; \ | ||
1245 | } | ||
1246 | #endif | ||
1247 | |||
1248 | _SYSFS_SHOW_VALUE(lowest_asid, lowest_asid, "%lu") | ||
1249 | _SYSFS_SET_VALUE(lowest_asid, lowest_asid, 10, | ||
1250 | MC_SMMU_NUM_ASIDS, _sysfs_null_callback) | ||
1251 | _SYSFS_SHOW_VALUE(debug_asid, debug_asid, "%lu") | ||
1252 | _SYSFS_SET_VALUE(debug_asid, debug_asid, 10, | ||
1253 | MC_SMMU_NUM_ASIDS, _sysfs_null_callback) | ||
1254 | _SYSFS_SHOW_VALUE(signature_pid, signature_pid, "%lu") | ||
1255 | _SYSFS_SET_VALUE_DO(signature_pid, signature_pid, 10, PID_MAX_LIMIT + 1, | ||
1256 | _sysfs_null_callback) | ||
1257 | |||
1258 | #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS | ||
1259 | static void _sysfs_mask_attr(struct smmu_device *smmu, unsigned long *field) | ||
1260 | { | ||
1261 | *field &= _MASK_ATTR; | ||
1262 | } | ||
1263 | |||
1264 | static void _sysfs_mask_pdir_attr(struct smmu_device *smmu, | ||
1265 | unsigned long *field) | ||
1266 | { | ||
1267 | unsigned long pdir; | ||
1268 | |||
1269 | _sysfs_mask_attr(smmu, field); | ||
1270 | writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(smmu->debug_asid), | ||
1271 | smmu->regs + MC_SMMU_PTB_ASID_0); | ||
1272 | pdir = readl(smmu->regs + MC_SMMU_PTB_DATA_0); | ||
1273 | pdir &= ~_MASK_ATTR; | ||
1274 | pdir |= *field; | ||
1275 | writel(pdir, smmu->regs + MC_SMMU_PTB_DATA_0); | ||
1276 | FLUSH_SMMU_REGS(smmu); | ||
1277 | } | ||
1278 | |||
1279 | static void (*_sysfs_mask_attr_callback)(struct smmu_device *, | ||
1280 | unsigned long *field) = &_sysfs_mask_attr; | ||
1281 | static void (*_sysfs_mask_pdir_attr_callback)(struct smmu_device *, | ||
1282 | unsigned long *field) = &_sysfs_mask_pdir_attr; | ||
1283 | #endif | ||
1284 | |||
1285 | _SYSFS_SHOW_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, "%lx") | ||
1286 | _SYSFS_SET_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, 16, | ||
1287 | _PDIR_ATTR + 1, _sysfs_mask_pdir_attr_callback) | ||
1288 | _SYSFS_SHOW_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, "%lx") | ||
1289 | _SYSFS_SET_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, 16, | ||
1290 | _PDE_ATTR + 1, _sysfs_mask_attr_callback) | ||
1291 | _SYSFS_SHOW_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, "%lx") | ||
1292 | _SYSFS_SET_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, 16, | ||
1293 | _PTE_ATTR + 1, _sysfs_mask_attr_callback) | ||
1294 | |||
1295 | static struct device_attribute _attr_values[] = { | ||
1296 | __ATTR(lowest_asid, S_IRUGO | S_IWUSR, | ||
1297 | _sysfs_show_lowest_asid, _sysfs_set_lowest_asid), | ||
1298 | __ATTR(debug_asid, S_IRUGO | S_IWUSR, | ||
1299 | _sysfs_show_debug_asid, _sysfs_set_debug_asid), | ||
1300 | __ATTR(signature_pid, S_IRUGO | S_IWUSR, | ||
1301 | _sysfs_show_signature_pid, _sysfs_set_signature_pid), | ||
1302 | |||
1303 | __ATTR(pdir_attr, S_IRUGO | S_IWUSR, | ||
1304 | _sysfs_show_pdir_attr, _sysfs_set_pdir_attr), | ||
1305 | __ATTR(pde_attr, S_IRUGO | S_IWUSR, | ||
1306 | _sysfs_show_pde_attr, _sysfs_set_pde_attr), | ||
1307 | __ATTR(pte_attr, S_IRUGO | S_IWUSR, | ||
1308 | _sysfs_show_pte_attr, _sysfs_set_pte_attr), | ||
1309 | }; | ||
1310 | |||
1311 | static struct attribute *_smmu_attrs[ | ||
1312 | ARRAY_SIZE(_smmu_reg_name_map) + ARRAY_SIZE(_attr_values) + 3]; | ||
1313 | static struct attribute_group _smmu_attr_group = { | ||
1314 | .attrs = _smmu_attrs | ||
1315 | }; | ||
1316 | |||
1317 | static void _sysfs_smmu(struct smmu_device *smmu, struct device *parent) | ||
1318 | { | ||
1319 | int i, j; | ||
1320 | |||
1321 | if (smmu->sysfs_use_count++ > 0) | ||
1322 | return; | ||
1323 | for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) | ||
1324 | _smmu_attrs[i] = &_smmu_reg_name_map[i].dev_attr.attr; | ||
1325 | for (j = 0; j < ARRAY_SIZE(_attr_values); j++) | ||
1326 | _smmu_attrs[i++] = &_attr_values[j].attr; | ||
1327 | _smmu_attrs[i++] = &_attr_show_smmu.attr; | ||
1328 | _smmu_attrs[i] = NULL; | ||
1329 | |||
1330 | dev_set_name(&smmu->sysfs_dev, "smmu"); | ||
1331 | smmu->sysfs_dev.parent = parent; | ||
1332 | smmu->sysfs_dev.driver = NULL; | ||
1333 | smmu->sysfs_dev.release = NULL; | ||
1334 | if (device_register(&smmu->sysfs_dev)) { | ||
1335 | pr_err("%s: failed to register smmu_sysfs_dev\n", __func__); | ||
1336 | smmu->sysfs_use_count--; | ||
1337 | return; | ||
1338 | } | ||
1339 | if (sysfs_create_group(&smmu->sysfs_dev.kobj, &_smmu_attr_group)) { | ||
1340 | pr_err("%s: failed to create group for smmu_sysfs_dev\n", | ||
1341 | __func__); | ||
1342 | smmu->sysfs_use_count--; | ||
1343 | return; | ||
1344 | } | ||
1345 | } | ||
1346 | |||
1347 | static void _sysfs_create(struct smmu_as *as, struct device *parent) | ||
1348 | { | ||
1349 | _sysfs_smmu(as->smmu, parent); | ||
1350 | } | ||