diff options
author | Robin Murphy <robin.murphy@arm.com> | 2016-01-26 12:13:13 -0500 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2016-02-17 09:14:57 -0500 |
commit | e5fc9753b1a831466be9b45d0bf926eeaa8b84a0 (patch) | |
tree | 1ad4075104603c576ba895ab766f79a3222fdd46 | |
parent | 18558cae0272f8fd9647e69d3fec1565a7949865 (diff) |
iommu/io-pgtable: Add ARMv7 short descriptor support
Add a nearly-complete ARMv7 short descriptor implementation, omitting
only a few legacy and CPU-centric aspects which shouldn't be necessary
for IOMMU API use anyway.
Reviewed-by: Yong Wu <yong.wu@mediatek.com>
Tested-by: Yong Wu <yong.wu@mediatek.com>
Signed-off-by: Yong Wu <yong.wu@mediatek.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | drivers/iommu/Kconfig | 19 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable-arm-v7s.c | 849 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable.c | 3 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable.h | 14 |
5 files changed, 885 insertions, 1 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index a1e75cba18e0..dc1aaa5d53e8 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -39,6 +39,25 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST | |||
39 | 39 | ||
40 | If unsure, say N here. | 40 | If unsure, say N here. |
41 | 41 | ||
42 | config IOMMU_IO_PGTABLE_ARMV7S | ||
43 | bool "ARMv7/v8 Short Descriptor Format" | ||
44 | select IOMMU_IO_PGTABLE | ||
45 | depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) | ||
46 | help | ||
47 | Enable support for the ARM Short-descriptor pagetable format. | ||
48 | This supports 32-bit virtual and physical addresses mapped using | ||
49 | 2-level tables with 4KB pages/1MB sections, and contiguous entries | ||
50 | for 64KB pages/16MB supersections if indicated by the IOMMU driver. | ||
51 | |||
52 | config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST | ||
53 | bool "ARMv7s selftests" | ||
54 | depends on IOMMU_IO_PGTABLE_ARMV7S | ||
55 | help | ||
56 | Enable self-tests for ARMv7s page table allocator. This performs | ||
57 | a series of page-table consistency checks during boot. | ||
58 | |||
59 | If unsure, say N here. | ||
60 | |||
42 | endmenu | 61 | endmenu |
43 | 62 | ||
44 | config IOMMU_IOVA | 63 | config IOMMU_IOVA |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 42fc0c25cf1a..2f9bfbc8cfd1 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -3,6 +3,7 @@ obj-$(CONFIG_IOMMU_API) += iommu-traces.o | |||
3 | obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o | 3 | obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o |
4 | obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o | 4 | obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o |
5 | obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o | 5 | obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o |
6 | obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o | ||
6 | obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o | 7 | obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o |
7 | obj-$(CONFIG_IOMMU_IOVA) += iova.o | 8 | obj-$(CONFIG_IOMMU_IOVA) += iova.o |
8 | obj-$(CONFIG_OF_IOMMU) += of_iommu.o | 9 | obj-$(CONFIG_OF_IOMMU) += of_iommu.o |
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c new file mode 100644 index 000000000000..efc7d1ede825 --- /dev/null +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
@@ -0,0 +1,849 @@ | |||
1 | /* | ||
2 | * CPU-agnostic ARM page table allocator. | ||
3 | * | ||
4 | * ARMv7 Short-descriptor format, supporting | ||
5 | * - Basic memory attributes | ||
6 | * - Simplified access permissions (AP[2:1] model) | ||
7 | * - Backwards-compatible TEX remap | ||
8 | * - Large pages/supersections (if indicated by the caller) | ||
9 | * | ||
10 | * Not supporting: | ||
11 | * - Legacy access permissions (AP[2:0] model) | ||
12 | * | ||
13 | * Almost certainly never supporting: | ||
14 | * - PXN | ||
15 | * - Domains | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or modify | ||
18 | * it under the terms of the GNU General Public License version 2 as | ||
19 | * published by the Free Software Foundation. | ||
20 | * | ||
21 | * This program is distributed in the hope that it will be useful, | ||
22 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
23 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
24 | * GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
28 | * | ||
29 | * Copyright (C) 2014-2015 ARM Limited | ||
30 | * Copyright (c) 2014-2015 MediaTek Inc. | ||
31 | */ | ||
32 | |||
33 | #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt | ||
34 | |||
35 | #include <linux/dma-mapping.h> | ||
36 | #include <linux/gfp.h> | ||
37 | #include <linux/iommu.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/kmemleak.h> | ||
40 | #include <linux/sizes.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/types.h> | ||
43 | |||
44 | #include <asm/barrier.h> | ||
45 | |||
46 | #include "io-pgtable.h" | ||
47 | |||
48 | /* Struct accessors */ | ||
49 | #define io_pgtable_to_data(x) \ | ||
50 | container_of((x), struct arm_v7s_io_pgtable, iop) | ||
51 | |||
52 | #define io_pgtable_ops_to_data(x) \ | ||
53 | io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) | ||
54 | |||
55 | /* | ||
56 | * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2, | ||
57 | * and 12 bits in a page. With some carefully-chosen coefficients we can | ||
58 | * hide the ugly inconsistencies behind these macros and at least let the | ||
59 | * rest of the code pretend to be somewhat sane. | ||
60 | */ | ||
61 | #define ARM_V7S_ADDR_BITS 32 | ||
62 | #define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4) | ||
63 | #define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl))) | ||
64 | #define ARM_V7S_TABLE_SHIFT 10 | ||
65 | |||
66 | #define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl)) | ||
67 | #define ARM_V7S_TABLE_SIZE(lvl) \ | ||
68 | (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte)) | ||
69 | |||
70 | #define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl)) | ||
71 | #define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl))) | ||
72 | #define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT)) | ||
73 | #define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1) | ||
74 | #define ARM_V7S_LVL_IDX(addr, lvl) ({ \ | ||
75 | int _l = lvl; \ | ||
76 | ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \ | ||
77 | }) | ||
78 | |||
79 | /* | ||
80 | * Large page/supersection entries are effectively a block of 16 page/section | ||
81 | * entries, along the lines of the LPAE contiguous hint, but all with the | ||
82 | * same output address. For want of a better common name we'll call them | ||
83 | * "contiguous" versions of their respective page/section entries here, but | ||
84 | * noting the distinction (WRT to TLB maintenance) that they represent *one* | ||
85 | * entry repeated 16 times, not 16 separate entries (as in the LPAE case). | ||
86 | */ | ||
87 | #define ARM_V7S_CONT_PAGES 16 | ||
88 | |||
89 | /* PTE type bits: these are all mixed up with XN/PXN bits in most cases */ | ||
90 | #define ARM_V7S_PTE_TYPE_TABLE 0x1 | ||
91 | #define ARM_V7S_PTE_TYPE_PAGE 0x2 | ||
92 | #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1 | ||
93 | |||
94 | #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0) | ||
95 | #define ARM_V7S_PTE_IS_TABLE(pte, lvl) (lvl == 1 && ((pte) & ARM_V7S_PTE_TYPE_TABLE)) | ||
96 | |||
97 | /* Page table bits */ | ||
98 | #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl))) | ||
99 | #define ARM_V7S_ATTR_B BIT(2) | ||
100 | #define ARM_V7S_ATTR_C BIT(3) | ||
101 | #define ARM_V7S_ATTR_NS_TABLE BIT(3) | ||
102 | #define ARM_V7S_ATTR_NS_SECTION BIT(19) | ||
103 | |||
104 | #define ARM_V7S_CONT_SECTION BIT(18) | ||
105 | #define ARM_V7S_CONT_PAGE_XN_SHIFT 15 | ||
106 | |||
107 | /* | ||
108 | * The attribute bits are consistently ordered*, but occupy bits [17:10] of | ||
109 | * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual | ||
110 | * fields relative to that 8-bit block, plus a total shift relative to the PTE. | ||
111 | */ | ||
112 | #define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6) | ||
113 | |||
114 | #define ARM_V7S_ATTR_MASK 0xff | ||
115 | #define ARM_V7S_ATTR_AP0 BIT(0) | ||
116 | #define ARM_V7S_ATTR_AP1 BIT(1) | ||
117 | #define ARM_V7S_ATTR_AP2 BIT(5) | ||
118 | #define ARM_V7S_ATTR_S BIT(6) | ||
119 | #define ARM_V7S_ATTR_NG BIT(7) | ||
120 | #define ARM_V7S_TEX_SHIFT 2 | ||
121 | #define ARM_V7S_TEX_MASK 0x7 | ||
122 | #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT) | ||
123 | |||
124 | /* *well, except for TEX on level 2 large pages, of course :( */ | ||
125 | #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6 | ||
126 | #define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT) | ||
127 | |||
128 | /* Simplified access permissions */ | ||
129 | #define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0 | ||
130 | #define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1 | ||
131 | #define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2 | ||
132 | |||
133 | /* Register bits */ | ||
134 | #define ARM_V7S_RGN_NC 0 | ||
135 | #define ARM_V7S_RGN_WBWA 1 | ||
136 | #define ARM_V7S_RGN_WT 2 | ||
137 | #define ARM_V7S_RGN_WB 3 | ||
138 | |||
139 | #define ARM_V7S_PRRR_TYPE_DEVICE 1 | ||
140 | #define ARM_V7S_PRRR_TYPE_NORMAL 2 | ||
141 | #define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2)) | ||
142 | #define ARM_V7S_PRRR_DS0 BIT(16) | ||
143 | #define ARM_V7S_PRRR_DS1 BIT(17) | ||
144 | #define ARM_V7S_PRRR_NS0 BIT(18) | ||
145 | #define ARM_V7S_PRRR_NS1 BIT(19) | ||
146 | #define ARM_V7S_PRRR_NOS(n) BIT((n) + 24) | ||
147 | |||
148 | #define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2)) | ||
149 | #define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16)) | ||
150 | |||
151 | #define ARM_V7S_TTBR_S BIT(1) | ||
152 | #define ARM_V7S_TTBR_NOS BIT(5) | ||
153 | #define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3) | ||
154 | #define ARM_V7S_TTBR_IRGN_ATTR(attr) \ | ||
155 | ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1)) | ||
156 | |||
157 | #define ARM_V7S_TCR_PD1 BIT(5) | ||
158 | |||
159 | typedef u32 arm_v7s_iopte; | ||
160 | |||
161 | static bool selftest_running; | ||
162 | |||
163 | struct arm_v7s_io_pgtable { | ||
164 | struct io_pgtable iop; | ||
165 | |||
166 | arm_v7s_iopte *pgd; | ||
167 | struct kmem_cache *l2_tables; | ||
168 | }; | ||
169 | |||
170 | static dma_addr_t __arm_v7s_dma_addr(void *pages) | ||
171 | { | ||
172 | return (dma_addr_t)virt_to_phys(pages); | ||
173 | } | ||
174 | |||
175 | static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl) | ||
176 | { | ||
177 | if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) | ||
178 | pte &= ARM_V7S_TABLE_MASK; | ||
179 | else | ||
180 | pte &= ARM_V7S_LVL_MASK(lvl); | ||
181 | return phys_to_virt(pte); | ||
182 | } | ||
183 | |||
184 | static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, | ||
185 | struct arm_v7s_io_pgtable *data) | ||
186 | { | ||
187 | struct device *dev = data->iop.cfg.iommu_dev; | ||
188 | dma_addr_t dma; | ||
189 | size_t size = ARM_V7S_TABLE_SIZE(lvl); | ||
190 | void *table = NULL; | ||
191 | |||
192 | if (lvl == 1) | ||
193 | table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); | ||
194 | else if (lvl == 2) | ||
195 | table = kmem_cache_zalloc(data->l2_tables, gfp); | ||
196 | if (table && !selftest_running) { | ||
197 | dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); | ||
198 | if (dma_mapping_error(dev, dma)) | ||
199 | goto out_free; | ||
200 | /* | ||
201 | * We depend on the IOMMU being able to work with any physical | ||
202 | * address directly, so if the DMA layer suggests otherwise by | ||
203 | * translating or truncating them, that bodes very badly... | ||
204 | */ | ||
205 | if (dma != virt_to_phys(table)) | ||
206 | goto out_unmap; | ||
207 | } | ||
208 | kmemleak_ignore(table); | ||
209 | return table; | ||
210 | |||
211 | out_unmap: | ||
212 | dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); | ||
213 | dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); | ||
214 | out_free: | ||
215 | if (lvl == 1) | ||
216 | free_pages((unsigned long)table, get_order(size)); | ||
217 | else | ||
218 | kmem_cache_free(data->l2_tables, table); | ||
219 | return NULL; | ||
220 | } | ||
221 | |||
222 | static void __arm_v7s_free_table(void *table, int lvl, | ||
223 | struct arm_v7s_io_pgtable *data) | ||
224 | { | ||
225 | struct device *dev = data->iop.cfg.iommu_dev; | ||
226 | size_t size = ARM_V7S_TABLE_SIZE(lvl); | ||
227 | |||
228 | if (!selftest_running) | ||
229 | dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, | ||
230 | DMA_TO_DEVICE); | ||
231 | if (lvl == 1) | ||
232 | free_pages((unsigned long)table, get_order(size)); | ||
233 | else | ||
234 | kmem_cache_free(data->l2_tables, table); | ||
235 | } | ||
236 | |||
237 | static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, | ||
238 | struct io_pgtable_cfg *cfg) | ||
239 | { | ||
240 | if (selftest_running) | ||
241 | return; | ||
242 | |||
243 | dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), | ||
244 | num_entries * sizeof(*ptep), DMA_TO_DEVICE); | ||
245 | } | ||
246 | static void __arm_v7s_set_pte(arm_v7s_iopte *ptep, arm_v7s_iopte pte, | ||
247 | int num_entries, struct io_pgtable_cfg *cfg) | ||
248 | { | ||
249 | int i; | ||
250 | |||
251 | for (i = 0; i < num_entries; i++) | ||
252 | ptep[i] = pte; | ||
253 | |||
254 | __arm_v7s_pte_sync(ptep, num_entries, cfg); | ||
255 | } | ||
256 | |||
257 | static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, | ||
258 | struct io_pgtable_cfg *cfg) | ||
259 | { | ||
260 | bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS); | ||
261 | arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S | | ||
262 | ARM_V7S_ATTR_TEX(1); | ||
263 | |||
264 | if (ap) { | ||
265 | pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV; | ||
266 | if (!(prot & IOMMU_WRITE)) | ||
267 | pte |= ARM_V7S_PTE_AP_RDONLY; | ||
268 | } | ||
269 | pte <<= ARM_V7S_ATTR_SHIFT(lvl); | ||
270 | |||
271 | if ((prot & IOMMU_NOEXEC) && ap) | ||
272 | pte |= ARM_V7S_ATTR_XN(lvl); | ||
273 | if (prot & IOMMU_CACHE) | ||
274 | pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C; | ||
275 | |||
276 | return pte; | ||
277 | } | ||
278 | |||
279 | static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) | ||
280 | { | ||
281 | int prot = IOMMU_READ; | ||
282 | |||
283 | if (pte & (ARM_V7S_PTE_AP_RDONLY << ARM_V7S_ATTR_SHIFT(lvl))) | ||
284 | prot |= IOMMU_WRITE; | ||
285 | if (pte & ARM_V7S_ATTR_C) | ||
286 | prot |= IOMMU_CACHE; | ||
287 | |||
288 | return prot; | ||
289 | } | ||
290 | |||
291 | static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl) | ||
292 | { | ||
293 | if (lvl == 1) { | ||
294 | pte |= ARM_V7S_CONT_SECTION; | ||
295 | } else if (lvl == 2) { | ||
296 | arm_v7s_iopte xn = pte & ARM_V7S_ATTR_XN(lvl); | ||
297 | arm_v7s_iopte tex = pte & ARM_V7S_CONT_PAGE_TEX_MASK; | ||
298 | |||
299 | pte ^= xn | tex | ARM_V7S_PTE_TYPE_PAGE; | ||
300 | pte |= (xn << ARM_V7S_CONT_PAGE_XN_SHIFT) | | ||
301 | (tex << ARM_V7S_CONT_PAGE_TEX_SHIFT) | | ||
302 | ARM_V7S_PTE_TYPE_CONT_PAGE; | ||
303 | } | ||
304 | return pte; | ||
305 | } | ||
306 | |||
307 | static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl) | ||
308 | { | ||
309 | if (lvl == 1) { | ||
310 | pte &= ~ARM_V7S_CONT_SECTION; | ||
311 | } else if (lvl == 2) { | ||
312 | arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT); | ||
313 | arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK << | ||
314 | ARM_V7S_CONT_PAGE_TEX_SHIFT); | ||
315 | |||
316 | pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE; | ||
317 | pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) | | ||
318 | (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) | | ||
319 | ARM_V7S_PTE_TYPE_PAGE; | ||
320 | } | ||
321 | return pte; | ||
322 | } | ||
323 | |||
324 | static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl) | ||
325 | { | ||
326 | if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl)) | ||
327 | return pte & ARM_V7S_CONT_SECTION; | ||
328 | else if (lvl == 2) | ||
329 | return !(pte & ARM_V7S_PTE_TYPE_PAGE); | ||
330 | return false; | ||
331 | } | ||
332 | |||
333 | static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long, | ||
334 | size_t, int, arm_v7s_iopte *); | ||
335 | |||
336 | static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, | ||
337 | unsigned long iova, phys_addr_t paddr, int prot, | ||
338 | int lvl, int num_entries, arm_v7s_iopte *ptep) | ||
339 | { | ||
340 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | ||
341 | arm_v7s_iopte pte = arm_v7s_prot_to_pte(prot, lvl, cfg); | ||
342 | int i; | ||
343 | |||
344 | for (i = 0; i < num_entries; i++) | ||
345 | if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) { | ||
346 | /* | ||
347 | * We need to unmap and free the old table before | ||
348 | * overwriting it with a block entry. | ||
349 | */ | ||
350 | arm_v7s_iopte *tblp; | ||
351 | size_t sz = ARM_V7S_BLOCK_SIZE(lvl); | ||
352 | |||
353 | tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); | ||
354 | if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz, | ||
355 | sz, lvl, tblp) != sz)) | ||
356 | return -EINVAL; | ||
357 | } else if (ptep[i]) { | ||
358 | /* We require an unmap first */ | ||
359 | WARN_ON(!selftest_running); | ||
360 | return -EEXIST; | ||
361 | } | ||
362 | |||
363 | pte |= ARM_V7S_PTE_TYPE_PAGE; | ||
364 | if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) | ||
365 | pte |= ARM_V7S_ATTR_NS_SECTION; | ||
366 | |||
367 | if (num_entries > 1) | ||
368 | pte = arm_v7s_pte_to_cont(pte, lvl); | ||
369 | |||
370 | pte |= paddr & ARM_V7S_LVL_MASK(lvl); | ||
371 | |||
372 | __arm_v7s_set_pte(ptep, pte, num_entries, cfg); | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, | ||
377 | phys_addr_t paddr, size_t size, int prot, | ||
378 | int lvl, arm_v7s_iopte *ptep) | ||
379 | { | ||
380 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | ||
381 | arm_v7s_iopte pte, *cptep; | ||
382 | int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl); | ||
383 | |||
384 | /* Find our entry at the current level */ | ||
385 | ptep += ARM_V7S_LVL_IDX(iova, lvl); | ||
386 | |||
387 | /* If we can install a leaf entry at this level, then do so */ | ||
388 | if (num_entries) | ||
389 | return arm_v7s_init_pte(data, iova, paddr, prot, | ||
390 | lvl, num_entries, ptep); | ||
391 | |||
392 | /* We can't allocate tables at the final level */ | ||
393 | if (WARN_ON(lvl == 2)) | ||
394 | return -EINVAL; | ||
395 | |||
396 | /* Grab a pointer to the next level */ | ||
397 | pte = *ptep; | ||
398 | if (!pte) { | ||
399 | cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data); | ||
400 | if (!cptep) | ||
401 | return -ENOMEM; | ||
402 | |||
403 | pte = virt_to_phys(cptep) | ARM_V7S_PTE_TYPE_TABLE; | ||
404 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) | ||
405 | pte |= ARM_V7S_ATTR_NS_TABLE; | ||
406 | |||
407 | __arm_v7s_set_pte(ptep, pte, 1, cfg); | ||
408 | } else { | ||
409 | cptep = iopte_deref(pte, lvl); | ||
410 | } | ||
411 | |||
412 | /* Rinse, repeat */ | ||
413 | return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); | ||
414 | } | ||
415 | |||
416 | static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, | ||
417 | phys_addr_t paddr, size_t size, int prot) | ||
418 | { | ||
419 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); | ||
420 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | ||
421 | const struct iommu_gather_ops *tlb = cfg->tlb; | ||
422 | void *cookie = data->iop.cookie; | ||
423 | int ret; | ||
424 | |||
425 | /* If no access, then nothing to do */ | ||
426 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) | ||
427 | return 0; | ||
428 | |||
429 | ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); | ||
430 | /* | ||
431 | * Synchronise all PTE updates for the new mapping before there's | ||
432 | * a chance for anything to kick off a table walk for the new iova. | ||
433 | */ | ||
434 | if (cfg->quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) { | ||
435 | tlb->tlb_add_flush(iova, size, ARM_V7S_BLOCK_SIZE(2), false, | ||
436 | cookie); | ||
437 | tlb->tlb_sync(cookie); | ||
438 | } else { | ||
439 | wmb(); | ||
440 | } | ||
441 | |||
442 | return ret; | ||
443 | } | ||
444 | |||
445 | static void arm_v7s_free_pgtable(struct io_pgtable *iop) | ||
446 | { | ||
447 | struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); | ||
448 | int i; | ||
449 | |||
450 | for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) { | ||
451 | arm_v7s_iopte pte = data->pgd[i]; | ||
452 | |||
453 | if (ARM_V7S_PTE_IS_TABLE(pte, 1)) | ||
454 | __arm_v7s_free_table(iopte_deref(pte, 1), 2, data); | ||
455 | } | ||
456 | __arm_v7s_free_table(data->pgd, 1, data); | ||
457 | kmem_cache_destroy(data->l2_tables); | ||
458 | kfree(data); | ||
459 | } | ||
460 | |||
461 | static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, | ||
462 | unsigned long iova, int idx, int lvl, | ||
463 | arm_v7s_iopte *ptep) | ||
464 | { | ||
465 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | ||
466 | void *cookie = data->iop.cookie; | ||
467 | arm_v7s_iopte pte; | ||
468 | size_t size = ARM_V7S_BLOCK_SIZE(lvl); | ||
469 | int i; | ||
470 | |||
471 | ptep -= idx & (ARM_V7S_CONT_PAGES - 1); | ||
472 | pte = arm_v7s_cont_to_pte(*ptep, lvl); | ||
473 | for (i = 0; i < ARM_V7S_CONT_PAGES; i++) { | ||
474 | ptep[i] = pte; | ||
475 | pte += size; | ||
476 | } | ||
477 | |||
478 | __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, cfg); | ||
479 | |||
480 | size *= ARM_V7S_CONT_PAGES; | ||
481 | cfg->tlb->tlb_add_flush(iova, size, size, true, cookie); | ||
482 | cfg->tlb->tlb_sync(cookie); | ||
483 | } | ||
484 | |||
485 | static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, | ||
486 | unsigned long iova, size_t size, | ||
487 | arm_v7s_iopte *ptep) | ||
488 | { | ||
489 | unsigned long blk_start, blk_end, blk_size; | ||
490 | phys_addr_t blk_paddr; | ||
491 | arm_v7s_iopte table = 0; | ||
492 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | ||
493 | int prot = arm_v7s_pte_to_prot(*ptep, 1); | ||
494 | |||
495 | blk_size = ARM_V7S_BLOCK_SIZE(1); | ||
496 | blk_start = iova & ARM_V7S_LVL_MASK(1); | ||
497 | blk_end = blk_start + ARM_V7S_BLOCK_SIZE(1); | ||
498 | blk_paddr = *ptep & ARM_V7S_LVL_MASK(1); | ||
499 | |||
500 | for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { | ||
501 | arm_v7s_iopte *tablep; | ||
502 | |||
503 | /* Unmap! */ | ||
504 | if (blk_start == iova) | ||
505 | continue; | ||
506 | |||
507 | /* __arm_v7s_map expects a pointer to the start of the table */ | ||
508 | tablep = &table - ARM_V7S_LVL_IDX(blk_start, 1); | ||
509 | if (__arm_v7s_map(data, blk_start, blk_paddr, size, prot, 1, | ||
510 | tablep) < 0) { | ||
511 | if (table) { | ||
512 | /* Free the table we allocated */ | ||
513 | tablep = iopte_deref(table, 1); | ||
514 | __arm_v7s_free_table(tablep, 2, data); | ||
515 | } | ||
516 | return 0; /* Bytes unmapped */ | ||
517 | } | ||
518 | } | ||
519 | |||
520 | __arm_v7s_set_pte(ptep, table, 1, cfg); | ||
521 | iova &= ~(blk_size - 1); | ||
522 | cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie); | ||
523 | return size; | ||
524 | } | ||
525 | |||
526 | static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, | ||
527 | unsigned long iova, size_t size, int lvl, | ||
528 | arm_v7s_iopte *ptep) | ||
529 | { | ||
530 | arm_v7s_iopte pte[ARM_V7S_CONT_PAGES]; | ||
531 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | ||
532 | const struct iommu_gather_ops *tlb = cfg->tlb; | ||
533 | void *cookie = data->iop.cookie; | ||
534 | int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl); | ||
535 | |||
536 | /* Something went horribly wrong and we ran out of page table */ | ||
537 | if (WARN_ON(lvl > 2)) | ||
538 | return 0; | ||
539 | |||
540 | idx = ARM_V7S_LVL_IDX(iova, lvl); | ||
541 | ptep += idx; | ||
542 | do { | ||
543 | if (WARN_ON(!ARM_V7S_PTE_IS_VALID(ptep[i]))) | ||
544 | return 0; | ||
545 | pte[i] = ptep[i]; | ||
546 | } while (++i < num_entries); | ||
547 | |||
548 | /* | ||
549 | * If we've hit a contiguous 'large page' entry at this level, it | ||
550 | * needs splitting first, unless we're unmapping the whole lot. | ||
551 | */ | ||
552 | if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) | ||
553 | arm_v7s_split_cont(data, iova, idx, lvl, ptep); | ||
554 | |||
555 | /* If the size matches this level, we're in the right place */ | ||
556 | if (num_entries) { | ||
557 | size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl); | ||
558 | |||
559 | __arm_v7s_set_pte(ptep, 0, num_entries, cfg); | ||
560 | |||
561 | for (i = 0; i < num_entries; i++) { | ||
562 | if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) { | ||
563 | /* Also flush any partial walks */ | ||
564 | tlb->tlb_add_flush(iova, blk_size, | ||
565 | ARM_V7S_BLOCK_SIZE(lvl + 1), | ||
566 | false, cookie); | ||
567 | tlb->tlb_sync(cookie); | ||
568 | ptep = iopte_deref(pte[i], lvl); | ||
569 | __arm_v7s_free_table(ptep, lvl + 1, data); | ||
570 | } else { | ||
571 | tlb->tlb_add_flush(iova, blk_size, blk_size, | ||
572 | true, cookie); | ||
573 | } | ||
574 | iova += blk_size; | ||
575 | } | ||
576 | return size; | ||
577 | } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) { | ||
578 | /* | ||
579 | * Insert a table at the next level to map the old region, | ||
580 | * minus the part we want to unmap | ||
581 | */ | ||
582 | return arm_v7s_split_blk_unmap(data, iova, size, ptep); | ||
583 | } | ||
584 | |||
585 | /* Keep on walkin' */ | ||
586 | ptep = iopte_deref(pte[0], lvl); | ||
587 | return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); | ||
588 | } | ||
589 | |||
590 | static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, | ||
591 | size_t size) | ||
592 | { | ||
593 | size_t unmapped; | ||
594 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); | ||
595 | struct io_pgtable *iop = &data->iop; | ||
596 | |||
597 | unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); | ||
598 | if (unmapped) | ||
599 | iop->cfg.tlb->tlb_sync(iop->cookie); | ||
600 | |||
601 | return unmapped; | ||
602 | } | ||
603 | |||
604 | static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, | ||
605 | unsigned long iova) | ||
606 | { | ||
607 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); | ||
608 | arm_v7s_iopte *ptep = data->pgd, pte; | ||
609 | int lvl = 0; | ||
610 | u32 mask; | ||
611 | |||
612 | do { | ||
613 | pte = ptep[ARM_V7S_LVL_IDX(iova, ++lvl)]; | ||
614 | ptep = iopte_deref(pte, lvl); | ||
615 | } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); | ||
616 | |||
617 | if (!ARM_V7S_PTE_IS_VALID(pte)) | ||
618 | return 0; | ||
619 | |||
620 | mask = ARM_V7S_LVL_MASK(lvl); | ||
621 | if (arm_v7s_pte_is_cont(pte, lvl)) | ||
622 | mask *= ARM_V7S_CONT_PAGES; | ||
623 | return (pte & mask) | (iova & ~mask); | ||
624 | } | ||
625 | |||
626 | static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, | ||
627 | void *cookie) | ||
628 | { | ||
629 | struct arm_v7s_io_pgtable *data; | ||
630 | |||
631 | if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) | ||
632 | return NULL; | ||
633 | |||
634 | data = kmalloc(sizeof(*data), GFP_KERNEL); | ||
635 | if (!data) | ||
636 | return NULL; | ||
637 | |||
638 | data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", | ||
639 | ARM_V7S_TABLE_SIZE(2), | ||
640 | ARM_V7S_TABLE_SIZE(2), | ||
641 | SLAB_CACHE_DMA, NULL); | ||
642 | if (!data->l2_tables) | ||
643 | goto out_free_data; | ||
644 | |||
645 | data->iop.ops = (struct io_pgtable_ops) { | ||
646 | .map = arm_v7s_map, | ||
647 | .unmap = arm_v7s_unmap, | ||
648 | .iova_to_phys = arm_v7s_iova_to_phys, | ||
649 | }; | ||
650 | |||
651 | /* We have to do this early for __arm_v7s_alloc_table to work... */ | ||
652 | data->iop.cfg = *cfg; | ||
653 | |||
654 | /* | ||
655 | * Unless the IOMMU driver indicates supersection support by | ||
656 | * having SZ_16M set in the initial bitmap, they won't be used. | ||
657 | */ | ||
658 | cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M; | ||
659 | |||
660 | /* TCR: T0SZ=0, disable TTBR1 */ | ||
661 | cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1; | ||
662 | |||
663 | /* | ||
664 | * TEX remap: the indices used map to the closest equivalent types | ||
665 | * under the non-TEX-remap interpretation of those attribute bits, | ||
666 | * excepting various implementation-defined aspects of shareability. | ||
667 | */ | ||
668 | cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) | | ||
669 | ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) | | ||
670 | ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) | | ||
671 | ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 | | ||
672 | ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7); | ||
673 | cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) | | ||
674 | ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA); | ||
675 | |||
676 | /* Looking good; allocate a pgd */ | ||
677 | data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data); | ||
678 | if (!data->pgd) | ||
679 | goto out_free_data; | ||
680 | |||
681 | /* Ensure the empty pgd is visible before any actual TTBR write */ | ||
682 | wmb(); | ||
683 | |||
684 | /* TTBRs */ | ||
685 | cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) | | ||
686 | ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS | | ||
687 | ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | | ||
688 | ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA); | ||
689 | cfg->arm_v7s_cfg.ttbr[1] = 0; | ||
690 | return &data->iop; | ||
691 | |||
692 | out_free_data: | ||
693 | kmem_cache_destroy(data->l2_tables); | ||
694 | kfree(data); | ||
695 | return NULL; | ||
696 | } | ||
697 | |||
698 | struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = { | ||
699 | .alloc = arm_v7s_alloc_pgtable, | ||
700 | .free = arm_v7s_free_pgtable, | ||
701 | }; | ||
702 | |||
703 | #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST | ||
704 | |||
705 | static struct io_pgtable_cfg *cfg_cookie; | ||
706 | |||
707 | static void dummy_tlb_flush_all(void *cookie) | ||
708 | { | ||
709 | WARN_ON(cookie != cfg_cookie); | ||
710 | } | ||
711 | |||
712 | static void dummy_tlb_add_flush(unsigned long iova, size_t size, | ||
713 | size_t granule, bool leaf, void *cookie) | ||
714 | { | ||
715 | WARN_ON(cookie != cfg_cookie); | ||
716 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); | ||
717 | } | ||
718 | |||
719 | static void dummy_tlb_sync(void *cookie) | ||
720 | { | ||
721 | WARN_ON(cookie != cfg_cookie); | ||
722 | } | ||
723 | |||
724 | static struct iommu_gather_ops dummy_tlb_ops = { | ||
725 | .tlb_flush_all = dummy_tlb_flush_all, | ||
726 | .tlb_add_flush = dummy_tlb_add_flush, | ||
727 | .tlb_sync = dummy_tlb_sync, | ||
728 | }; | ||
729 | |||
730 | #define __FAIL(ops) ({ \ | ||
731 | WARN(1, "selftest: test failed\n"); \ | ||
732 | selftest_running = false; \ | ||
733 | -EFAULT; \ | ||
734 | }) | ||
735 | |||
736 | static int __init arm_v7s_do_selftests(void) | ||
737 | { | ||
738 | struct io_pgtable_ops *ops; | ||
739 | struct io_pgtable_cfg cfg = { | ||
740 | .tlb = &dummy_tlb_ops, | ||
741 | .oas = 32, | ||
742 | .ias = 32, | ||
743 | .quirks = IO_PGTABLE_QUIRK_ARM_NS, | ||
744 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, | ||
745 | }; | ||
746 | unsigned int iova, size, iova_start; | ||
747 | unsigned int i, loopnr = 0; | ||
748 | |||
749 | selftest_running = true; | ||
750 | |||
751 | cfg_cookie = &cfg; | ||
752 | |||
753 | ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg); | ||
754 | if (!ops) { | ||
755 | pr_err("selftest: failed to allocate io pgtable ops\n"); | ||
756 | return -EINVAL; | ||
757 | } | ||
758 | |||
759 | /* | ||
760 | * Initial sanity checks. | ||
761 | * Empty page tables shouldn't provide any translations. | ||
762 | */ | ||
763 | if (ops->iova_to_phys(ops, 42)) | ||
764 | return __FAIL(ops); | ||
765 | |||
766 | if (ops->iova_to_phys(ops, SZ_1G + 42)) | ||
767 | return __FAIL(ops); | ||
768 | |||
769 | if (ops->iova_to_phys(ops, SZ_2G + 42)) | ||
770 | return __FAIL(ops); | ||
771 | |||
772 | /* | ||
773 | * Distinct mappings of different granule sizes. | ||
774 | */ | ||
775 | iova = 0; | ||
776 | i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG); | ||
777 | while (i != BITS_PER_LONG) { | ||
778 | size = 1UL << i; | ||
779 | if (ops->map(ops, iova, iova, size, IOMMU_READ | | ||
780 | IOMMU_WRITE | | ||
781 | IOMMU_NOEXEC | | ||
782 | IOMMU_CACHE)) | ||
783 | return __FAIL(ops); | ||
784 | |||
785 | /* Overlapping mappings */ | ||
786 | if (!ops->map(ops, iova, iova + size, size, | ||
787 | IOMMU_READ | IOMMU_NOEXEC)) | ||
788 | return __FAIL(ops); | ||
789 | |||
790 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) | ||
791 | return __FAIL(ops); | ||
792 | |||
793 | iova += SZ_16M; | ||
794 | i++; | ||
795 | i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i); | ||
796 | loopnr++; | ||
797 | } | ||
798 | |||
799 | /* Partial unmap */ | ||
800 | i = 1; | ||
801 | size = 1UL << __ffs(cfg.pgsize_bitmap); | ||
802 | while (i < loopnr) { | ||
803 | iova_start = i * SZ_16M; | ||
804 | if (ops->unmap(ops, iova_start + size, size) != size) | ||
805 | return __FAIL(ops); | ||
806 | |||
807 | /* Remap of partial unmap */ | ||
808 | if (ops->map(ops, iova_start + size, size, size, IOMMU_READ)) | ||
809 | return __FAIL(ops); | ||
810 | |||
811 | if (ops->iova_to_phys(ops, iova_start + size + 42) | ||
812 | != (size + 42)) | ||
813 | return __FAIL(ops); | ||
814 | i++; | ||
815 | } | ||
816 | |||
817 | /* Full unmap */ | ||
818 | iova = 0; | ||
819 | i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG); | ||
820 | while (i != BITS_PER_LONG) { | ||
821 | size = 1UL << i; | ||
822 | |||
823 | if (ops->unmap(ops, iova, size) != size) | ||
824 | return __FAIL(ops); | ||
825 | |||
826 | if (ops->iova_to_phys(ops, iova + 42)) | ||
827 | return __FAIL(ops); | ||
828 | |||
829 | /* Remap full block */ | ||
830 | if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) | ||
831 | return __FAIL(ops); | ||
832 | |||
833 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) | ||
834 | return __FAIL(ops); | ||
835 | |||
836 | iova += SZ_16M; | ||
837 | i++; | ||
838 | i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i); | ||
839 | } | ||
840 | |||
841 | free_io_pgtable_ops(ops); | ||
842 | |||
843 | selftest_running = false; | ||
844 | |||
845 | pr_info("self test ok\n"); | ||
846 | return 0; | ||
847 | } | ||
848 | subsys_initcall(arm_v7s_do_selftests); | ||
849 | #endif | ||
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c index 6f2e319d4f04..8c615b77bcc7 100644 --- a/drivers/iommu/io-pgtable.c +++ b/drivers/iommu/io-pgtable.c | |||
@@ -33,6 +33,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = | |||
33 | [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, | 33 | [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, |
34 | [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, | 34 | [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, |
35 | #endif | 35 | #endif |
36 | #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S | ||
37 | [ARM_V7S] = &io_pgtable_arm_v7s_init_fns, | ||
38 | #endif | ||
36 | }; | 39 | }; |
37 | 40 | ||
38 | struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, | 41 | struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, |
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 36673c83de58..aa57073b1d8d 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h | |||
@@ -1,5 +1,6 @@ | |||
1 | #ifndef __IO_PGTABLE_H | 1 | #ifndef __IO_PGTABLE_H |
2 | #define __IO_PGTABLE_H | 2 | #define __IO_PGTABLE_H |
3 | #include <linux/bitops.h> | ||
3 | 4 | ||
4 | /* | 5 | /* |
5 | * Public API for use by IOMMU drivers | 6 | * Public API for use by IOMMU drivers |
@@ -9,6 +10,7 @@ enum io_pgtable_fmt { | |||
9 | ARM_32_LPAE_S2, | 10 | ARM_32_LPAE_S2, |
10 | ARM_64_LPAE_S1, | 11 | ARM_64_LPAE_S1, |
11 | ARM_64_LPAE_S2, | 12 | ARM_64_LPAE_S2, |
13 | ARM_V7S, | ||
12 | IO_PGTABLE_NUM_FMTS, | 14 | IO_PGTABLE_NUM_FMTS, |
13 | }; | 15 | }; |
14 | 16 | ||
@@ -45,7 +47,9 @@ struct iommu_gather_ops { | |||
45 | * page table walker. | 47 | * page table walker. |
46 | */ | 48 | */ |
47 | struct io_pgtable_cfg { | 49 | struct io_pgtable_cfg { |
48 | #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */ | 50 | #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) /* Set NS bit in PTEs */ |
51 | #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) /* No AP/XN bits */ | ||
52 | #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) /* TLB Inv. on map */ | ||
49 | int quirks; | 53 | int quirks; |
50 | unsigned long pgsize_bitmap; | 54 | unsigned long pgsize_bitmap; |
51 | unsigned int ias; | 55 | unsigned int ias; |
@@ -65,6 +69,13 @@ struct io_pgtable_cfg { | |||
65 | u64 vttbr; | 69 | u64 vttbr; |
66 | u64 vtcr; | 70 | u64 vtcr; |
67 | } arm_lpae_s2_cfg; | 71 | } arm_lpae_s2_cfg; |
72 | |||
73 | struct { | ||
74 | u32 ttbr[2]; | ||
75 | u32 tcr; | ||
76 | u32 nmrr; | ||
77 | u32 prrr; | ||
78 | } arm_v7s_cfg; | ||
68 | }; | 79 | }; |
69 | }; | 80 | }; |
70 | 81 | ||
@@ -149,5 +160,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; | |||
149 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; | 160 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; |
150 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; | 161 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; |
151 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; | 162 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; |
163 | extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; | ||
152 | 164 | ||
153 | #endif /* __IO_PGTABLE_H */ | 165 | #endif /* __IO_PGTABLE_H */ |