diff options
Diffstat (limited to 'arch/arm/plat-omap/iommu.c')
-rw-r--r-- | arch/arm/plat-omap/iommu.c | 996 |
1 files changed, 996 insertions, 0 deletions
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c new file mode 100644 index 000000000000..4cf449fa2cb5 --- /dev/null +++ b/arch/arm/plat-omap/iommu.c | |||
@@ -0,0 +1,996 @@ | |||
1 | /* | ||
2 | * omap iommu: tlb and pagetable primitives | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Nokia Corporation | ||
5 | * | ||
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, | ||
7 | * Paul Mundt and Toshihiro Kobayashi | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/err.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | |||
21 | #include <asm/cacheflush.h> | ||
22 | |||
23 | #include <mach/iommu.h> | ||
24 | |||
25 | #include "iopgtable.h" | ||
26 | |||
27 | /* accommodate the difference between omap1 and omap2/3 */ | ||
28 | static const struct iommu_functions *arch_iommu; | ||
29 | |||
30 | static struct platform_driver omap_iommu_driver; | ||
31 | static struct kmem_cache *iopte_cachep; | ||
32 | |||
33 | /** | ||
34 | * install_iommu_arch - Install archtecure specific iommu functions | ||
35 | * @ops: a pointer to architecture specific iommu functions | ||
36 | * | ||
37 | * There are several kind of iommu algorithm(tlb, pagetable) among | ||
38 | * omap series. This interface installs such an iommu algorighm. | ||
39 | **/ | ||
40 | int install_iommu_arch(const struct iommu_functions *ops) | ||
41 | { | ||
42 | if (arch_iommu) | ||
43 | return -EBUSY; | ||
44 | |||
45 | arch_iommu = ops; | ||
46 | return 0; | ||
47 | } | ||
48 | EXPORT_SYMBOL_GPL(install_iommu_arch); | ||
49 | |||
50 | /** | ||
51 | * uninstall_iommu_arch - Uninstall archtecure specific iommu functions | ||
52 | * @ops: a pointer to architecture specific iommu functions | ||
53 | * | ||
54 | * This interface uninstalls the iommu algorighm installed previously. | ||
55 | **/ | ||
56 | void uninstall_iommu_arch(const struct iommu_functions *ops) | ||
57 | { | ||
58 | if (arch_iommu != ops) | ||
59 | pr_err("%s: not your arch\n", __func__); | ||
60 | |||
61 | arch_iommu = NULL; | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(uninstall_iommu_arch); | ||
64 | |||
65 | /** | ||
66 | * iommu_save_ctx - Save registers for pm off-mode support | ||
67 | * @obj: target iommu | ||
68 | **/ | ||
69 | void iommu_save_ctx(struct iommu *obj) | ||
70 | { | ||
71 | arch_iommu->save_ctx(obj); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(iommu_save_ctx); | ||
74 | |||
75 | /** | ||
76 | * iommu_restore_ctx - Restore registers for pm off-mode support | ||
77 | * @obj: target iommu | ||
78 | **/ | ||
79 | void iommu_restore_ctx(struct iommu *obj) | ||
80 | { | ||
81 | arch_iommu->restore_ctx(obj); | ||
82 | } | ||
83 | EXPORT_SYMBOL_GPL(iommu_restore_ctx); | ||
84 | |||
85 | /** | ||
86 | * iommu_arch_version - Return running iommu arch version | ||
87 | **/ | ||
88 | u32 iommu_arch_version(void) | ||
89 | { | ||
90 | return arch_iommu->version; | ||
91 | } | ||
92 | EXPORT_SYMBOL_GPL(iommu_arch_version); | ||
93 | |||
94 | static int iommu_enable(struct iommu *obj) | ||
95 | { | ||
96 | int err; | ||
97 | |||
98 | if (!obj) | ||
99 | return -EINVAL; | ||
100 | |||
101 | clk_enable(obj->clk); | ||
102 | |||
103 | err = arch_iommu->enable(obj); | ||
104 | |||
105 | clk_disable(obj->clk); | ||
106 | return err; | ||
107 | } | ||
108 | |||
109 | static void iommu_disable(struct iommu *obj) | ||
110 | { | ||
111 | if (!obj) | ||
112 | return; | ||
113 | |||
114 | clk_enable(obj->clk); | ||
115 | |||
116 | arch_iommu->disable(obj); | ||
117 | |||
118 | clk_disable(obj->clk); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * TLB operations | ||
123 | */ | ||
124 | void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | ||
125 | { | ||
126 | BUG_ON(!cr || !e); | ||
127 | |||
128 | arch_iommu->cr_to_e(cr, e); | ||
129 | } | ||
130 | EXPORT_SYMBOL_GPL(iotlb_cr_to_e); | ||
131 | |||
132 | static inline int iotlb_cr_valid(struct cr_regs *cr) | ||
133 | { | ||
134 | if (!cr) | ||
135 | return -EINVAL; | ||
136 | |||
137 | return arch_iommu->cr_valid(cr); | ||
138 | } | ||
139 | |||
140 | static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, | ||
141 | struct iotlb_entry *e) | ||
142 | { | ||
143 | if (!e) | ||
144 | return NULL; | ||
145 | |||
146 | return arch_iommu->alloc_cr(obj, e); | ||
147 | } | ||
148 | |||
149 | u32 iotlb_cr_to_virt(struct cr_regs *cr) | ||
150 | { | ||
151 | return arch_iommu->cr_to_virt(cr); | ||
152 | } | ||
153 | EXPORT_SYMBOL_GPL(iotlb_cr_to_virt); | ||
154 | |||
155 | static u32 get_iopte_attr(struct iotlb_entry *e) | ||
156 | { | ||
157 | return arch_iommu->get_pte_attr(e); | ||
158 | } | ||
159 | |||
160 | static u32 iommu_report_fault(struct iommu *obj, u32 *da) | ||
161 | { | ||
162 | return arch_iommu->fault_isr(obj, da); | ||
163 | } | ||
164 | |||
165 | static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) | ||
166 | { | ||
167 | u32 val; | ||
168 | |||
169 | val = iommu_read_reg(obj, MMU_LOCK); | ||
170 | |||
171 | l->base = MMU_LOCK_BASE(val); | ||
172 | l->vict = MMU_LOCK_VICT(val); | ||
173 | |||
174 | BUG_ON(l->base != 0); /* Currently no preservation is used */ | ||
175 | } | ||
176 | |||
177 | static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) | ||
178 | { | ||
179 | u32 val; | ||
180 | |||
181 | BUG_ON(l->base != 0); /* Currently no preservation is used */ | ||
182 | |||
183 | val = (l->base << MMU_LOCK_BASE_SHIFT); | ||
184 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); | ||
185 | |||
186 | iommu_write_reg(obj, val, MMU_LOCK); | ||
187 | } | ||
188 | |||
189 | static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) | ||
190 | { | ||
191 | arch_iommu->tlb_read_cr(obj, cr); | ||
192 | } | ||
193 | |||
194 | static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) | ||
195 | { | ||
196 | arch_iommu->tlb_load_cr(obj, cr); | ||
197 | |||
198 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | ||
199 | iommu_write_reg(obj, 1, MMU_LD_TLB); | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | ||
204 | * @obj: target iommu | ||
205 | * @cr: contents of cam and ram register | ||
206 | * @buf: output buffer | ||
207 | **/ | ||
208 | static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, | ||
209 | char *buf) | ||
210 | { | ||
211 | BUG_ON(!cr || !buf); | ||
212 | |||
213 | return arch_iommu->dump_cr(obj, cr, buf); | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * load_iotlb_entry - Set an iommu tlb entry | ||
218 | * @obj: target iommu | ||
219 | * @e: an iommu tlb entry info | ||
220 | **/ | ||
221 | int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) | ||
222 | { | ||
223 | int i; | ||
224 | int err = 0; | ||
225 | struct iotlb_lock l; | ||
226 | struct cr_regs *cr; | ||
227 | |||
228 | if (!obj || !obj->nr_tlb_entries || !e) | ||
229 | return -EINVAL; | ||
230 | |||
231 | clk_enable(obj->clk); | ||
232 | |||
233 | for (i = 0; i < obj->nr_tlb_entries; i++) { | ||
234 | struct cr_regs tmp; | ||
235 | |||
236 | iotlb_lock_get(obj, &l); | ||
237 | l.vict = i; | ||
238 | iotlb_lock_set(obj, &l); | ||
239 | iotlb_read_cr(obj, &tmp); | ||
240 | if (!iotlb_cr_valid(&tmp)) | ||
241 | break; | ||
242 | } | ||
243 | |||
244 | if (i == obj->nr_tlb_entries) { | ||
245 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); | ||
246 | err = -EBUSY; | ||
247 | goto out; | ||
248 | } | ||
249 | |||
250 | cr = iotlb_alloc_cr(obj, e); | ||
251 | if (IS_ERR(cr)) { | ||
252 | clk_disable(obj->clk); | ||
253 | return PTR_ERR(cr); | ||
254 | } | ||
255 | |||
256 | iotlb_load_cr(obj, cr); | ||
257 | kfree(cr); | ||
258 | |||
259 | /* increment victim for next tlb load */ | ||
260 | if (++l.vict == obj->nr_tlb_entries) | ||
261 | l.vict = 0; | ||
262 | iotlb_lock_set(obj, &l); | ||
263 | out: | ||
264 | clk_disable(obj->clk); | ||
265 | return err; | ||
266 | } | ||
267 | EXPORT_SYMBOL_GPL(load_iotlb_entry); | ||
268 | |||
269 | /** | ||
270 | * flush_iotlb_page - Clear an iommu tlb entry | ||
271 | * @obj: target iommu | ||
272 | * @da: iommu device virtual address | ||
273 | * | ||
274 | * Clear an iommu tlb entry which includes 'da' address. | ||
275 | **/ | ||
276 | void flush_iotlb_page(struct iommu *obj, u32 da) | ||
277 | { | ||
278 | struct iotlb_lock l; | ||
279 | int i; | ||
280 | |||
281 | clk_enable(obj->clk); | ||
282 | |||
283 | for (i = 0; i < obj->nr_tlb_entries; i++) { | ||
284 | struct cr_regs cr; | ||
285 | u32 start; | ||
286 | size_t bytes; | ||
287 | |||
288 | iotlb_lock_get(obj, &l); | ||
289 | l.vict = i; | ||
290 | iotlb_lock_set(obj, &l); | ||
291 | iotlb_read_cr(obj, &cr); | ||
292 | if (!iotlb_cr_valid(&cr)) | ||
293 | continue; | ||
294 | |||
295 | start = iotlb_cr_to_virt(&cr); | ||
296 | bytes = iopgsz_to_bytes(cr.cam & 3); | ||
297 | |||
298 | if ((start <= da) && (da < start + bytes)) { | ||
299 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | ||
300 | __func__, start, da, bytes); | ||
301 | |||
302 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | ||
303 | } | ||
304 | } | ||
305 | clk_disable(obj->clk); | ||
306 | |||
307 | if (i == obj->nr_tlb_entries) | ||
308 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | ||
309 | } | ||
310 | EXPORT_SYMBOL_GPL(flush_iotlb_page); | ||
311 | |||
312 | /** | ||
313 | * flush_iotlb_range - Clear an iommu tlb entries | ||
314 | * @obj: target iommu | ||
315 | * @start: iommu device virtual address(start) | ||
316 | * @end: iommu device virtual address(end) | ||
317 | * | ||
318 | * Clear an iommu tlb entry which includes 'da' address. | ||
319 | **/ | ||
320 | void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) | ||
321 | { | ||
322 | u32 da = start; | ||
323 | |||
324 | while (da < end) { | ||
325 | flush_iotlb_page(obj, da); | ||
326 | /* FIXME: Optimize for multiple page size */ | ||
327 | da += IOPTE_SIZE; | ||
328 | } | ||
329 | } | ||
330 | EXPORT_SYMBOL_GPL(flush_iotlb_range); | ||
331 | |||
332 | /** | ||
333 | * flush_iotlb_all - Clear all iommu tlb entries | ||
334 | * @obj: target iommu | ||
335 | **/ | ||
336 | void flush_iotlb_all(struct iommu *obj) | ||
337 | { | ||
338 | struct iotlb_lock l; | ||
339 | |||
340 | clk_enable(obj->clk); | ||
341 | |||
342 | l.base = 0; | ||
343 | l.vict = 0; | ||
344 | iotlb_lock_set(obj, &l); | ||
345 | |||
346 | iommu_write_reg(obj, 1, MMU_GFLUSH); | ||
347 | |||
348 | clk_disable(obj->clk); | ||
349 | } | ||
350 | EXPORT_SYMBOL_GPL(flush_iotlb_all); | ||
351 | |||
352 | #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | ||
353 | |||
354 | ssize_t iommu_dump_ctx(struct iommu *obj, char *buf) | ||
355 | { | ||
356 | ssize_t bytes; | ||
357 | |||
358 | if (!obj || !buf) | ||
359 | return -EINVAL; | ||
360 | |||
361 | clk_enable(obj->clk); | ||
362 | |||
363 | bytes = arch_iommu->dump_ctx(obj, buf); | ||
364 | |||
365 | clk_disable(obj->clk); | ||
366 | |||
367 | return bytes; | ||
368 | } | ||
369 | EXPORT_SYMBOL_GPL(iommu_dump_ctx); | ||
370 | |||
371 | static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs) | ||
372 | { | ||
373 | int i; | ||
374 | struct iotlb_lock saved, l; | ||
375 | struct cr_regs *p = crs; | ||
376 | |||
377 | clk_enable(obj->clk); | ||
378 | |||
379 | iotlb_lock_get(obj, &saved); | ||
380 | memcpy(&l, &saved, sizeof(saved)); | ||
381 | |||
382 | for (i = 0; i < obj->nr_tlb_entries; i++) { | ||
383 | struct cr_regs tmp; | ||
384 | |||
385 | iotlb_lock_get(obj, &l); | ||
386 | l.vict = i; | ||
387 | iotlb_lock_set(obj, &l); | ||
388 | iotlb_read_cr(obj, &tmp); | ||
389 | if (!iotlb_cr_valid(&tmp)) | ||
390 | continue; | ||
391 | |||
392 | *p++ = tmp; | ||
393 | } | ||
394 | iotlb_lock_set(obj, &saved); | ||
395 | clk_disable(obj->clk); | ||
396 | |||
397 | return p - crs; | ||
398 | } | ||
399 | |||
400 | /** | ||
401 | * dump_tlb_entries - dump cr arrays to given buffer | ||
402 | * @obj: target iommu | ||
403 | * @buf: output buffer | ||
404 | **/ | ||
405 | size_t dump_tlb_entries(struct iommu *obj, char *buf) | ||
406 | { | ||
407 | int i, n; | ||
408 | struct cr_regs *cr; | ||
409 | char *p = buf; | ||
410 | |||
411 | cr = kcalloc(obj->nr_tlb_entries, sizeof(*cr), GFP_KERNEL); | ||
412 | if (!cr) | ||
413 | return 0; | ||
414 | |||
415 | n = __dump_tlb_entries(obj, cr); | ||
416 | for (i = 0; i < n; i++) | ||
417 | p += iotlb_dump_cr(obj, cr + i, p); | ||
418 | kfree(cr); | ||
419 | |||
420 | return p - buf; | ||
421 | } | ||
422 | EXPORT_SYMBOL_GPL(dump_tlb_entries); | ||
423 | |||
424 | int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | ||
425 | { | ||
426 | return driver_for_each_device(&omap_iommu_driver.driver, | ||
427 | NULL, data, fn); | ||
428 | } | ||
429 | EXPORT_SYMBOL_GPL(foreach_iommu_device); | ||
430 | |||
431 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | ||
432 | |||
433 | /* | ||
434 | * H/W pagetable operations | ||
435 | */ | ||
436 | static void flush_iopgd_range(u32 *first, u32 *last) | ||
437 | { | ||
438 | /* FIXME: L2 cache should be taken care of if it exists */ | ||
439 | do { | ||
440 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" | ||
441 | : : "r" (first)); | ||
442 | first += L1_CACHE_BYTES / sizeof(*first); | ||
443 | } while (first <= last); | ||
444 | } | ||
445 | |||
446 | static void flush_iopte_range(u32 *first, u32 *last) | ||
447 | { | ||
448 | /* FIXME: L2 cache should be taken care of if it exists */ | ||
449 | do { | ||
450 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" | ||
451 | : : "r" (first)); | ||
452 | first += L1_CACHE_BYTES / sizeof(*first); | ||
453 | } while (first <= last); | ||
454 | } | ||
455 | |||
456 | static void iopte_free(u32 *iopte) | ||
457 | { | ||
458 | /* Note: freed iopte's must be clean ready for re-use */ | ||
459 | kmem_cache_free(iopte_cachep, iopte); | ||
460 | } | ||
461 | |||
462 | static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) | ||
463 | { | ||
464 | u32 *iopte; | ||
465 | |||
466 | /* a table has already existed */ | ||
467 | if (*iopgd) | ||
468 | goto pte_ready; | ||
469 | |||
470 | /* | ||
471 | * do the allocation outside the page table lock | ||
472 | */ | ||
473 | spin_unlock(&obj->page_table_lock); | ||
474 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); | ||
475 | spin_lock(&obj->page_table_lock); | ||
476 | |||
477 | if (!*iopgd) { | ||
478 | if (!iopte) | ||
479 | return ERR_PTR(-ENOMEM); | ||
480 | |||
481 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; | ||
482 | flush_iopgd_range(iopgd, iopgd); | ||
483 | |||
484 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); | ||
485 | } else { | ||
486 | /* We raced, free the reduniovant table */ | ||
487 | iopte_free(iopte); | ||
488 | } | ||
489 | |||
490 | pte_ready: | ||
491 | iopte = iopte_offset(iopgd, da); | ||
492 | |||
493 | dev_vdbg(obj->dev, | ||
494 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | ||
495 | __func__, da, iopgd, *iopgd, iopte, *iopte); | ||
496 | |||
497 | return iopte; | ||
498 | } | ||
499 | |||
500 | static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) | ||
501 | { | ||
502 | u32 *iopgd = iopgd_offset(obj, da); | ||
503 | |||
504 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; | ||
505 | flush_iopgd_range(iopgd, iopgd); | ||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) | ||
510 | { | ||
511 | u32 *iopgd = iopgd_offset(obj, da); | ||
512 | int i; | ||
513 | |||
514 | for (i = 0; i < 16; i++) | ||
515 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; | ||
516 | flush_iopgd_range(iopgd, iopgd + 15); | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) | ||
521 | { | ||
522 | u32 *iopgd = iopgd_offset(obj, da); | ||
523 | u32 *iopte = iopte_alloc(obj, iopgd, da); | ||
524 | |||
525 | if (IS_ERR(iopte)) | ||
526 | return PTR_ERR(iopte); | ||
527 | |||
528 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; | ||
529 | flush_iopte_range(iopte, iopte); | ||
530 | |||
531 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", | ||
532 | __func__, da, pa, iopte, *iopte); | ||
533 | |||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) | ||
538 | { | ||
539 | u32 *iopgd = iopgd_offset(obj, da); | ||
540 | u32 *iopte = iopte_alloc(obj, iopgd, da); | ||
541 | int i; | ||
542 | |||
543 | if (IS_ERR(iopte)) | ||
544 | return PTR_ERR(iopte); | ||
545 | |||
546 | for (i = 0; i < 16; i++) | ||
547 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; | ||
548 | flush_iopte_range(iopte, iopte + 15); | ||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) | ||
553 | { | ||
554 | int (*fn)(struct iommu *, u32, u32, u32); | ||
555 | u32 prot; | ||
556 | int err; | ||
557 | |||
558 | if (!obj || !e) | ||
559 | return -EINVAL; | ||
560 | |||
561 | switch (e->pgsz) { | ||
562 | case MMU_CAM_PGSZ_16M: | ||
563 | fn = iopgd_alloc_super; | ||
564 | break; | ||
565 | case MMU_CAM_PGSZ_1M: | ||
566 | fn = iopgd_alloc_section; | ||
567 | break; | ||
568 | case MMU_CAM_PGSZ_64K: | ||
569 | fn = iopte_alloc_large; | ||
570 | break; | ||
571 | case MMU_CAM_PGSZ_4K: | ||
572 | fn = iopte_alloc_page; | ||
573 | break; | ||
574 | default: | ||
575 | fn = NULL; | ||
576 | BUG(); | ||
577 | break; | ||
578 | } | ||
579 | |||
580 | prot = get_iopte_attr(e); | ||
581 | |||
582 | spin_lock(&obj->page_table_lock); | ||
583 | err = fn(obj, e->da, e->pa, prot); | ||
584 | spin_unlock(&obj->page_table_lock); | ||
585 | |||
586 | return err; | ||
587 | } | ||
588 | |||
589 | /** | ||
590 | * iopgtable_store_entry - Make an iommu pte entry | ||
591 | * @obj: target iommu | ||
592 | * @e: an iommu tlb entry info | ||
593 | **/ | ||
594 | int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) | ||
595 | { | ||
596 | int err; | ||
597 | |||
598 | flush_iotlb_page(obj, e->da); | ||
599 | err = iopgtable_store_entry_core(obj, e); | ||
600 | #ifdef PREFETCH_IOTLB | ||
601 | if (!err) | ||
602 | load_iotlb_entry(obj, e); | ||
603 | #endif | ||
604 | return err; | ||
605 | } | ||
606 | EXPORT_SYMBOL_GPL(iopgtable_store_entry); | ||
607 | |||
608 | /** | ||
609 | * iopgtable_lookup_entry - Lookup an iommu pte entry | ||
610 | * @obj: target iommu | ||
611 | * @da: iommu device virtual address | ||
612 | * @ppgd: iommu pgd entry pointer to be returned | ||
613 | * @ppte: iommu pte entry pointer to be returned | ||
614 | **/ | ||
615 | void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | ||
616 | { | ||
617 | u32 *iopgd, *iopte = NULL; | ||
618 | |||
619 | iopgd = iopgd_offset(obj, da); | ||
620 | if (!*iopgd) | ||
621 | goto out; | ||
622 | |||
623 | if (*iopgd & IOPGD_TABLE) | ||
624 | iopte = iopte_offset(iopgd, da); | ||
625 | out: | ||
626 | *ppgd = iopgd; | ||
627 | *ppte = iopte; | ||
628 | } | ||
629 | EXPORT_SYMBOL_GPL(iopgtable_lookup_entry); | ||
630 | |||
631 | static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) | ||
632 | { | ||
633 | size_t bytes; | ||
634 | u32 *iopgd = iopgd_offset(obj, da); | ||
635 | int nent = 1; | ||
636 | |||
637 | if (!*iopgd) | ||
638 | return 0; | ||
639 | |||
640 | if (*iopgd & IOPGD_TABLE) { | ||
641 | int i; | ||
642 | u32 *iopte = iopte_offset(iopgd, da); | ||
643 | |||
644 | bytes = IOPTE_SIZE; | ||
645 | if (*iopte & IOPTE_LARGE) { | ||
646 | nent *= 16; | ||
647 | /* rewind to the 1st entry */ | ||
648 | iopte = (u32 *)((u32)iopte & IOLARGE_MASK); | ||
649 | } | ||
650 | bytes *= nent; | ||
651 | memset(iopte, 0, nent * sizeof(*iopte)); | ||
652 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); | ||
653 | |||
654 | /* | ||
655 | * do table walk to check if this table is necessary or not | ||
656 | */ | ||
657 | iopte = iopte_offset(iopgd, 0); | ||
658 | for (i = 0; i < PTRS_PER_IOPTE; i++) | ||
659 | if (iopte[i]) | ||
660 | goto out; | ||
661 | |||
662 | iopte_free(iopte); | ||
663 | nent = 1; /* for the next L1 entry */ | ||
664 | } else { | ||
665 | bytes = IOPGD_SIZE; | ||
666 | if (*iopgd & IOPGD_SUPER) { | ||
667 | nent *= 16; | ||
668 | /* rewind to the 1st entry */ | ||
669 | iopgd = (u32 *)((u32)iopgd & IOSUPER_MASK); | ||
670 | } | ||
671 | bytes *= nent; | ||
672 | } | ||
673 | memset(iopgd, 0, nent * sizeof(*iopgd)); | ||
674 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); | ||
675 | out: | ||
676 | return bytes; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * iopgtable_clear_entry - Remove an iommu pte entry | ||
681 | * @obj: target iommu | ||
682 | * @da: iommu device virtual address | ||
683 | **/ | ||
684 | size_t iopgtable_clear_entry(struct iommu *obj, u32 da) | ||
685 | { | ||
686 | size_t bytes; | ||
687 | |||
688 | spin_lock(&obj->page_table_lock); | ||
689 | |||
690 | bytes = iopgtable_clear_entry_core(obj, da); | ||
691 | flush_iotlb_page(obj, da); | ||
692 | |||
693 | spin_unlock(&obj->page_table_lock); | ||
694 | |||
695 | return bytes; | ||
696 | } | ||
697 | EXPORT_SYMBOL_GPL(iopgtable_clear_entry); | ||
698 | |||
699 | static void iopgtable_clear_entry_all(struct iommu *obj) | ||
700 | { | ||
701 | int i; | ||
702 | |||
703 | spin_lock(&obj->page_table_lock); | ||
704 | |||
705 | for (i = 0; i < PTRS_PER_IOPGD; i++) { | ||
706 | u32 da; | ||
707 | u32 *iopgd; | ||
708 | |||
709 | da = i << IOPGD_SHIFT; | ||
710 | iopgd = iopgd_offset(obj, da); | ||
711 | |||
712 | if (!*iopgd) | ||
713 | continue; | ||
714 | |||
715 | if (*iopgd & IOPGD_TABLE) | ||
716 | iopte_free(iopte_offset(iopgd, 0)); | ||
717 | |||
718 | *iopgd = 0; | ||
719 | flush_iopgd_range(iopgd, iopgd); | ||
720 | } | ||
721 | |||
722 | flush_iotlb_all(obj); | ||
723 | |||
724 | spin_unlock(&obj->page_table_lock); | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * Device IOMMU generic operations | ||
729 | */ | ||
730 | static irqreturn_t iommu_fault_handler(int irq, void *data) | ||
731 | { | ||
732 | u32 stat, da; | ||
733 | u32 *iopgd, *iopte; | ||
734 | int err = -EIO; | ||
735 | struct iommu *obj = data; | ||
736 | |||
737 | if (!obj->refcount) | ||
738 | return IRQ_NONE; | ||
739 | |||
740 | /* Dynamic loading TLB or PTE */ | ||
741 | if (obj->isr) | ||
742 | err = obj->isr(obj); | ||
743 | |||
744 | if (!err) | ||
745 | return IRQ_HANDLED; | ||
746 | |||
747 | clk_enable(obj->clk); | ||
748 | stat = iommu_report_fault(obj, &da); | ||
749 | clk_disable(obj->clk); | ||
750 | if (!stat) | ||
751 | return IRQ_HANDLED; | ||
752 | |||
753 | iopgd = iopgd_offset(obj, da); | ||
754 | |||
755 | if (!(*iopgd & IOPGD_TABLE)) { | ||
756 | dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__, | ||
757 | da, iopgd, *iopgd); | ||
758 | return IRQ_NONE; | ||
759 | } | ||
760 | |||
761 | iopte = iopte_offset(iopgd, da); | ||
762 | |||
763 | dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | ||
764 | __func__, da, iopgd, *iopgd, iopte, *iopte); | ||
765 | |||
766 | return IRQ_NONE; | ||
767 | } | ||
768 | |||
769 | static int device_match_by_alias(struct device *dev, void *data) | ||
770 | { | ||
771 | struct iommu *obj = to_iommu(dev); | ||
772 | const char *name = data; | ||
773 | |||
774 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | ||
775 | |||
776 | return strcmp(obj->name, name) == 0; | ||
777 | } | ||
778 | |||
779 | /** | ||
780 | * iommu_get - Get iommu handler | ||
781 | * @name: target iommu name | ||
782 | **/ | ||
783 | struct iommu *iommu_get(const char *name) | ||
784 | { | ||
785 | int err = -ENOMEM; | ||
786 | struct device *dev; | ||
787 | struct iommu *obj; | ||
788 | |||
789 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | ||
790 | device_match_by_alias); | ||
791 | if (!dev) | ||
792 | return ERR_PTR(-ENODEV); | ||
793 | |||
794 | obj = to_iommu(dev); | ||
795 | |||
796 | mutex_lock(&obj->iommu_lock); | ||
797 | |||
798 | if (obj->refcount++ == 0) { | ||
799 | err = iommu_enable(obj); | ||
800 | if (err) | ||
801 | goto err_enable; | ||
802 | flush_iotlb_all(obj); | ||
803 | } | ||
804 | |||
805 | if (!try_module_get(obj->owner)) | ||
806 | goto err_module; | ||
807 | |||
808 | mutex_unlock(&obj->iommu_lock); | ||
809 | |||
810 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | ||
811 | return obj; | ||
812 | |||
813 | err_module: | ||
814 | if (obj->refcount == 1) | ||
815 | iommu_disable(obj); | ||
816 | err_enable: | ||
817 | obj->refcount--; | ||
818 | mutex_unlock(&obj->iommu_lock); | ||
819 | return ERR_PTR(err); | ||
820 | } | ||
821 | EXPORT_SYMBOL_GPL(iommu_get); | ||
822 | |||
823 | /** | ||
824 | * iommu_put - Put back iommu handler | ||
825 | * @obj: target iommu | ||
826 | **/ | ||
827 | void iommu_put(struct iommu *obj) | ||
828 | { | ||
829 | if (!obj && IS_ERR(obj)) | ||
830 | return; | ||
831 | |||
832 | mutex_lock(&obj->iommu_lock); | ||
833 | |||
834 | if (--obj->refcount == 0) | ||
835 | iommu_disable(obj); | ||
836 | |||
837 | module_put(obj->owner); | ||
838 | |||
839 | mutex_unlock(&obj->iommu_lock); | ||
840 | |||
841 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | ||
842 | } | ||
843 | EXPORT_SYMBOL_GPL(iommu_put); | ||
844 | |||
845 | /* | ||
846 | * OMAP Device MMU(IOMMU) detection | ||
847 | */ | ||
848 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | ||
849 | { | ||
850 | int err = -ENODEV; | ||
851 | void *p; | ||
852 | int irq; | ||
853 | struct iommu *obj; | ||
854 | struct resource *res; | ||
855 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | ||
856 | |||
857 | if (pdev->num_resources != 2) | ||
858 | return -EINVAL; | ||
859 | |||
860 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | ||
861 | if (!obj) | ||
862 | return -ENOMEM; | ||
863 | |||
864 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); | ||
865 | if (IS_ERR(obj->clk)) | ||
866 | goto err_clk; | ||
867 | |||
868 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | ||
869 | obj->name = pdata->name; | ||
870 | obj->dev = &pdev->dev; | ||
871 | obj->ctx = (void *)obj + sizeof(*obj); | ||
872 | |||
873 | mutex_init(&obj->iommu_lock); | ||
874 | mutex_init(&obj->mmap_lock); | ||
875 | spin_lock_init(&obj->page_table_lock); | ||
876 | INIT_LIST_HEAD(&obj->mmap); | ||
877 | |||
878 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
879 | if (!res) { | ||
880 | err = -ENODEV; | ||
881 | goto err_mem; | ||
882 | } | ||
883 | obj->regbase = ioremap(res->start, resource_size(res)); | ||
884 | if (!obj->regbase) { | ||
885 | err = -ENOMEM; | ||
886 | goto err_mem; | ||
887 | } | ||
888 | |||
889 | res = request_mem_region(res->start, resource_size(res), | ||
890 | dev_name(&pdev->dev)); | ||
891 | if (!res) { | ||
892 | err = -EIO; | ||
893 | goto err_mem; | ||
894 | } | ||
895 | |||
896 | irq = platform_get_irq(pdev, 0); | ||
897 | if (irq < 0) { | ||
898 | err = -ENODEV; | ||
899 | goto err_irq; | ||
900 | } | ||
901 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | ||
902 | dev_name(&pdev->dev), obj); | ||
903 | if (err < 0) | ||
904 | goto err_irq; | ||
905 | platform_set_drvdata(pdev, obj); | ||
906 | |||
907 | p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE)); | ||
908 | if (!p) { | ||
909 | err = -ENOMEM; | ||
910 | goto err_pgd; | ||
911 | } | ||
912 | memset(p, 0, IOPGD_TABLE_SIZE); | ||
913 | clean_dcache_area(p, IOPGD_TABLE_SIZE); | ||
914 | obj->iopgd = p; | ||
915 | |||
916 | BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE)); | ||
917 | |||
918 | dev_info(&pdev->dev, "%s registered\n", obj->name); | ||
919 | return 0; | ||
920 | |||
921 | err_pgd: | ||
922 | free_irq(irq, obj); | ||
923 | err_irq: | ||
924 | release_mem_region(res->start, resource_size(res)); | ||
925 | iounmap(obj->regbase); | ||
926 | err_mem: | ||
927 | clk_put(obj->clk); | ||
928 | err_clk: | ||
929 | kfree(obj); | ||
930 | return err; | ||
931 | } | ||
932 | |||
933 | static int __devexit omap_iommu_remove(struct platform_device *pdev) | ||
934 | { | ||
935 | int irq; | ||
936 | struct resource *res; | ||
937 | struct iommu *obj = platform_get_drvdata(pdev); | ||
938 | |||
939 | platform_set_drvdata(pdev, NULL); | ||
940 | |||
941 | iopgtable_clear_entry_all(obj); | ||
942 | free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE)); | ||
943 | |||
944 | irq = platform_get_irq(pdev, 0); | ||
945 | free_irq(irq, obj); | ||
946 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
947 | release_mem_region(res->start, resource_size(res)); | ||
948 | iounmap(obj->regbase); | ||
949 | |||
950 | clk_put(obj->clk); | ||
951 | dev_info(&pdev->dev, "%s removed\n", obj->name); | ||
952 | kfree(obj); | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | static struct platform_driver omap_iommu_driver = { | ||
957 | .probe = omap_iommu_probe, | ||
958 | .remove = __devexit_p(omap_iommu_remove), | ||
959 | .driver = { | ||
960 | .name = "omap-iommu", | ||
961 | }, | ||
962 | }; | ||
963 | |||
964 | static void iopte_cachep_ctor(void *iopte) | ||
965 | { | ||
966 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | ||
967 | } | ||
968 | |||
969 | static int __init omap_iommu_init(void) | ||
970 | { | ||
971 | struct kmem_cache *p; | ||
972 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | ||
973 | size_t align = 1 << 10; /* L2 pagetable alignement */ | ||
974 | |||
975 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | ||
976 | iopte_cachep_ctor); | ||
977 | if (!p) | ||
978 | return -ENOMEM; | ||
979 | iopte_cachep = p; | ||
980 | |||
981 | return platform_driver_register(&omap_iommu_driver); | ||
982 | } | ||
983 | module_init(omap_iommu_init); | ||
984 | |||
985 | static void __exit omap_iommu_exit(void) | ||
986 | { | ||
987 | kmem_cache_destroy(iopte_cachep); | ||
988 | |||
989 | platform_driver_unregister(&omap_iommu_driver); | ||
990 | } | ||
991 | module_exit(omap_iommu_exit); | ||
992 | |||
993 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); | ||
994 | MODULE_ALIAS("platform:omap-iommu"); | ||
995 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | ||
996 | MODULE_LICENSE("GPL v2"); | ||