aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/iommu.c
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-08-15 16:21:41 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-08-26 05:46:00 -0400
commitfcf3a6ef4a588c9f06ad7b01c83534ab81985a3f (patch)
treed73b98dda1ad4def8eb2f4cc012eb931ef881e1b /arch/arm/plat-omap/iommu.c
parentf626b52d4a568d4315cd152123ef2d1ea406def2 (diff)
omap: iommu/iovmm: move to dedicated iommu folder
Move OMAP's iommu drivers to the dedicated iommu drivers folder. While OMAP's iovmm (virtual memory manager) driver does not strictly belong to the iommu drivers folder, move it there as well, because it's by no means OMAP-specific (in concept. technically it is still coupled with OMAP's iommu). Eventually, iovmm will be completely replaced with the generic, iommu-based, dma-mapping API. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> Acked-by: Tony Lindgren <tony@atomide.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/arm/plat-omap/iommu.c')
-rw-r--r--arch/arm/plat-omap/iommu.c1326
1 files changed, 0 insertions, 1326 deletions
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
deleted file mode 100644
index 51aa008d8223..000000000000
--- a/arch/arm/plat-omap/iommu.c
+++ /dev/null
@@ -1,1326 +0,0 @@
1/*
2 * omap iommu: tlb and pagetable primitives
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/ioport.h>
19#include <linux/clk.h>
20#include <linux/platform_device.h>
21#include <linux/iommu.h>
22#include <linux/mutex.h>
23#include <linux/spinlock.h>
24
25#include <asm/cacheflush.h>
26
27#include <plat/iommu.h>
28
29#include "iopgtable.h"
30
31#define for_each_iotlb_cr(obj, n, __i, cr) \
32 for (__i = 0; \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
34 __i++)
35
36/**
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
42 */
43struct omap_iommu_domain {
44 u32 *pgtable;
45 struct iommu *iommu_dev;
46 spinlock_t lock;
47};
48
49/* accommodate the difference between omap1 and omap2/3 */
50static const struct iommu_functions *arch_iommu;
51
52static struct platform_driver omap_iommu_driver;
53static struct kmem_cache *iopte_cachep;
54
55/**
56 * install_iommu_arch - Install archtecure specific iommu functions
57 * @ops: a pointer to architecture specific iommu functions
58 *
59 * There are several kind of iommu algorithm(tlb, pagetable) among
60 * omap series. This interface installs such an iommu algorighm.
61 **/
62int install_iommu_arch(const struct iommu_functions *ops)
63{
64 if (arch_iommu)
65 return -EBUSY;
66
67 arch_iommu = ops;
68 return 0;
69}
70EXPORT_SYMBOL_GPL(install_iommu_arch);
71
72/**
73 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
74 * @ops: a pointer to architecture specific iommu functions
75 *
76 * This interface uninstalls the iommu algorighm installed previously.
77 **/
78void uninstall_iommu_arch(const struct iommu_functions *ops)
79{
80 if (arch_iommu != ops)
81 pr_err("%s: not your arch\n", __func__);
82
83 arch_iommu = NULL;
84}
85EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
86
87/**
88 * iommu_save_ctx - Save registers for pm off-mode support
89 * @obj: target iommu
90 **/
91void iommu_save_ctx(struct iommu *obj)
92{
93 arch_iommu->save_ctx(obj);
94}
95EXPORT_SYMBOL_GPL(iommu_save_ctx);
96
97/**
98 * iommu_restore_ctx - Restore registers for pm off-mode support
99 * @obj: target iommu
100 **/
101void iommu_restore_ctx(struct iommu *obj)
102{
103 arch_iommu->restore_ctx(obj);
104}
105EXPORT_SYMBOL_GPL(iommu_restore_ctx);
106
107/**
108 * iommu_arch_version - Return running iommu arch version
109 **/
110u32 iommu_arch_version(void)
111{
112 return arch_iommu->version;
113}
114EXPORT_SYMBOL_GPL(iommu_arch_version);
115
116static int iommu_enable(struct iommu *obj)
117{
118 int err;
119
120 if (!obj)
121 return -EINVAL;
122
123 if (!arch_iommu)
124 return -ENODEV;
125
126 clk_enable(obj->clk);
127
128 err = arch_iommu->enable(obj);
129
130 clk_disable(obj->clk);
131 return err;
132}
133
134static void iommu_disable(struct iommu *obj)
135{
136 if (!obj)
137 return;
138
139 clk_enable(obj->clk);
140
141 arch_iommu->disable(obj);
142
143 clk_disable(obj->clk);
144}
145
146/*
147 * TLB operations
148 */
149void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
150{
151 BUG_ON(!cr || !e);
152
153 arch_iommu->cr_to_e(cr, e);
154}
155EXPORT_SYMBOL_GPL(iotlb_cr_to_e);
156
157static inline int iotlb_cr_valid(struct cr_regs *cr)
158{
159 if (!cr)
160 return -EINVAL;
161
162 return arch_iommu->cr_valid(cr);
163}
164
165static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
166 struct iotlb_entry *e)
167{
168 if (!e)
169 return NULL;
170
171 return arch_iommu->alloc_cr(obj, e);
172}
173
174u32 iotlb_cr_to_virt(struct cr_regs *cr)
175{
176 return arch_iommu->cr_to_virt(cr);
177}
178EXPORT_SYMBOL_GPL(iotlb_cr_to_virt);
179
180static u32 get_iopte_attr(struct iotlb_entry *e)
181{
182 return arch_iommu->get_pte_attr(e);
183}
184
185static u32 iommu_report_fault(struct iommu *obj, u32 *da)
186{
187 return arch_iommu->fault_isr(obj, da);
188}
189
190static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
191{
192 u32 val;
193
194 val = iommu_read_reg(obj, MMU_LOCK);
195
196 l->base = MMU_LOCK_BASE(val);
197 l->vict = MMU_LOCK_VICT(val);
198
199}
200
201static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
202{
203 u32 val;
204
205 val = (l->base << MMU_LOCK_BASE_SHIFT);
206 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
207
208 iommu_write_reg(obj, val, MMU_LOCK);
209}
210
211static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
212{
213 arch_iommu->tlb_read_cr(obj, cr);
214}
215
216static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
217{
218 arch_iommu->tlb_load_cr(obj, cr);
219
220 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
221 iommu_write_reg(obj, 1, MMU_LD_TLB);
222}
223
224/**
225 * iotlb_dump_cr - Dump an iommu tlb entry into buf
226 * @obj: target iommu
227 * @cr: contents of cam and ram register
228 * @buf: output buffer
229 **/
230static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
231 char *buf)
232{
233 BUG_ON(!cr || !buf);
234
235 return arch_iommu->dump_cr(obj, cr, buf);
236}
237
238/* only used in iotlb iteration for-loop */
239static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
240{
241 struct cr_regs cr;
242 struct iotlb_lock l;
243
244 iotlb_lock_get(obj, &l);
245 l.vict = n;
246 iotlb_lock_set(obj, &l);
247 iotlb_read_cr(obj, &cr);
248
249 return cr;
250}
251
252/**
253 * load_iotlb_entry - Set an iommu tlb entry
254 * @obj: target iommu
255 * @e: an iommu tlb entry info
256 **/
257int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
258{
259 int err = 0;
260 struct iotlb_lock l;
261 struct cr_regs *cr;
262
263 if (!obj || !obj->nr_tlb_entries || !e)
264 return -EINVAL;
265
266 clk_enable(obj->clk);
267
268 iotlb_lock_get(obj, &l);
269 if (l.base == obj->nr_tlb_entries) {
270 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
271 err = -EBUSY;
272 goto out;
273 }
274 if (!e->prsvd) {
275 int i;
276 struct cr_regs tmp;
277
278 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
279 if (!iotlb_cr_valid(&tmp))
280 break;
281
282 if (i == obj->nr_tlb_entries) {
283 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
284 err = -EBUSY;
285 goto out;
286 }
287
288 iotlb_lock_get(obj, &l);
289 } else {
290 l.vict = l.base;
291 iotlb_lock_set(obj, &l);
292 }
293
294 cr = iotlb_alloc_cr(obj, e);
295 if (IS_ERR(cr)) {
296 clk_disable(obj->clk);
297 return PTR_ERR(cr);
298 }
299
300 iotlb_load_cr(obj, cr);
301 kfree(cr);
302
303 if (e->prsvd)
304 l.base++;
305 /* increment victim for next tlb load */
306 if (++l.vict == obj->nr_tlb_entries)
307 l.vict = l.base;
308 iotlb_lock_set(obj, &l);
309out:
310 clk_disable(obj->clk);
311 return err;
312}
313EXPORT_SYMBOL_GPL(load_iotlb_entry);
314
315/**
316 * flush_iotlb_page - Clear an iommu tlb entry
317 * @obj: target iommu
318 * @da: iommu device virtual address
319 *
320 * Clear an iommu tlb entry which includes 'da' address.
321 **/
322void flush_iotlb_page(struct iommu *obj, u32 da)
323{
324 int i;
325 struct cr_regs cr;
326
327 clk_enable(obj->clk);
328
329 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
330 u32 start;
331 size_t bytes;
332
333 if (!iotlb_cr_valid(&cr))
334 continue;
335
336 start = iotlb_cr_to_virt(&cr);
337 bytes = iopgsz_to_bytes(cr.cam & 3);
338
339 if ((start <= da) && (da < start + bytes)) {
340 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
341 __func__, start, da, bytes);
342 iotlb_load_cr(obj, &cr);
343 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
344 }
345 }
346 clk_disable(obj->clk);
347
348 if (i == obj->nr_tlb_entries)
349 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
350}
351EXPORT_SYMBOL_GPL(flush_iotlb_page);
352
353/**
354 * flush_iotlb_range - Clear an iommu tlb entries
355 * @obj: target iommu
356 * @start: iommu device virtual address(start)
357 * @end: iommu device virtual address(end)
358 *
359 * Clear an iommu tlb entry which includes 'da' address.
360 **/
361void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
362{
363 u32 da = start;
364
365 while (da < end) {
366 flush_iotlb_page(obj, da);
367 /* FIXME: Optimize for multiple page size */
368 da += IOPTE_SIZE;
369 }
370}
371EXPORT_SYMBOL_GPL(flush_iotlb_range);
372
373/**
374 * flush_iotlb_all - Clear all iommu tlb entries
375 * @obj: target iommu
376 **/
377void flush_iotlb_all(struct iommu *obj)
378{
379 struct iotlb_lock l;
380
381 clk_enable(obj->clk);
382
383 l.base = 0;
384 l.vict = 0;
385 iotlb_lock_set(obj, &l);
386
387 iommu_write_reg(obj, 1, MMU_GFLUSH);
388
389 clk_disable(obj->clk);
390}
391EXPORT_SYMBOL_GPL(flush_iotlb_all);
392
393/**
394 * iommu_set_twl - enable/disable table walking logic
395 * @obj: target iommu
396 * @on: enable/disable
397 *
398 * Function used to enable/disable TWL. If one wants to work
399 * exclusively with locked TLB entries and receive notifications
400 * for TLB miss then call this function to disable TWL.
401 */
402void iommu_set_twl(struct iommu *obj, bool on)
403{
404 clk_enable(obj->clk);
405 arch_iommu->set_twl(obj, on);
406 clk_disable(obj->clk);
407}
408EXPORT_SYMBOL_GPL(iommu_set_twl);
409
410#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
411
412ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
413{
414 if (!obj || !buf)
415 return -EINVAL;
416
417 clk_enable(obj->clk);
418
419 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
420
421 clk_disable(obj->clk);
422
423 return bytes;
424}
425EXPORT_SYMBOL_GPL(iommu_dump_ctx);
426
427static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
428{
429 int i;
430 struct iotlb_lock saved;
431 struct cr_regs tmp;
432 struct cr_regs *p = crs;
433
434 clk_enable(obj->clk);
435 iotlb_lock_get(obj, &saved);
436
437 for_each_iotlb_cr(obj, num, i, tmp) {
438 if (!iotlb_cr_valid(&tmp))
439 continue;
440 *p++ = tmp;
441 }
442
443 iotlb_lock_set(obj, &saved);
444 clk_disable(obj->clk);
445
446 return p - crs;
447}
448
449/**
450 * dump_tlb_entries - dump cr arrays to given buffer
451 * @obj: target iommu
452 * @buf: output buffer
453 **/
454size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
455{
456 int i, num;
457 struct cr_regs *cr;
458 char *p = buf;
459
460 num = bytes / sizeof(*cr);
461 num = min(obj->nr_tlb_entries, num);
462
463 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
464 if (!cr)
465 return 0;
466
467 num = __dump_tlb_entries(obj, cr, num);
468 for (i = 0; i < num; i++)
469 p += iotlb_dump_cr(obj, cr + i, p);
470 kfree(cr);
471
472 return p - buf;
473}
474EXPORT_SYMBOL_GPL(dump_tlb_entries);
475
476int foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
477{
478 return driver_for_each_device(&omap_iommu_driver.driver,
479 NULL, data, fn);
480}
481EXPORT_SYMBOL_GPL(foreach_iommu_device);
482
483#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
484
485/*
486 * H/W pagetable operations
487 */
488static void flush_iopgd_range(u32 *first, u32 *last)
489{
490 /* FIXME: L2 cache should be taken care of if it exists */
491 do {
492 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
493 : : "r" (first));
494 first += L1_CACHE_BYTES / sizeof(*first);
495 } while (first <= last);
496}
497
498static void flush_iopte_range(u32 *first, u32 *last)
499{
500 /* FIXME: L2 cache should be taken care of if it exists */
501 do {
502 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
503 : : "r" (first));
504 first += L1_CACHE_BYTES / sizeof(*first);
505 } while (first <= last);
506}
507
508static void iopte_free(u32 *iopte)
509{
510 /* Note: freed iopte's must be clean ready for re-use */
511 kmem_cache_free(iopte_cachep, iopte);
512}
513
514static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
515{
516 u32 *iopte;
517
518 /* a table has already existed */
519 if (*iopgd)
520 goto pte_ready;
521
522 /*
523 * do the allocation outside the page table lock
524 */
525 spin_unlock(&obj->page_table_lock);
526 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
527 spin_lock(&obj->page_table_lock);
528
529 if (!*iopgd) {
530 if (!iopte)
531 return ERR_PTR(-ENOMEM);
532
533 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
534 flush_iopgd_range(iopgd, iopgd);
535
536 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
537 } else {
538 /* We raced, free the reduniovant table */
539 iopte_free(iopte);
540 }
541
542pte_ready:
543 iopte = iopte_offset(iopgd, da);
544
545 dev_vdbg(obj->dev,
546 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
547 __func__, da, iopgd, *iopgd, iopte, *iopte);
548
549 return iopte;
550}
551
552static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
553{
554 u32 *iopgd = iopgd_offset(obj, da);
555
556 if ((da | pa) & ~IOSECTION_MASK) {
557 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
558 __func__, da, pa, IOSECTION_SIZE);
559 return -EINVAL;
560 }
561
562 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
563 flush_iopgd_range(iopgd, iopgd);
564 return 0;
565}
566
567static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
568{
569 u32 *iopgd = iopgd_offset(obj, da);
570 int i;
571
572 if ((da | pa) & ~IOSUPER_MASK) {
573 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
574 __func__, da, pa, IOSUPER_SIZE);
575 return -EINVAL;
576 }
577
578 for (i = 0; i < 16; i++)
579 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
580 flush_iopgd_range(iopgd, iopgd + 15);
581 return 0;
582}
583
584static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
585{
586 u32 *iopgd = iopgd_offset(obj, da);
587 u32 *iopte = iopte_alloc(obj, iopgd, da);
588
589 if (IS_ERR(iopte))
590 return PTR_ERR(iopte);
591
592 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
593 flush_iopte_range(iopte, iopte);
594
595 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
596 __func__, da, pa, iopte, *iopte);
597
598 return 0;
599}
600
601static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
602{
603 u32 *iopgd = iopgd_offset(obj, da);
604 u32 *iopte = iopte_alloc(obj, iopgd, da);
605 int i;
606
607 if ((da | pa) & ~IOLARGE_MASK) {
608 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
609 __func__, da, pa, IOLARGE_SIZE);
610 return -EINVAL;
611 }
612
613 if (IS_ERR(iopte))
614 return PTR_ERR(iopte);
615
616 for (i = 0; i < 16; i++)
617 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
618 flush_iopte_range(iopte, iopte + 15);
619 return 0;
620}
621
622static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
623{
624 int (*fn)(struct iommu *, u32, u32, u32);
625 u32 prot;
626 int err;
627
628 if (!obj || !e)
629 return -EINVAL;
630
631 switch (e->pgsz) {
632 case MMU_CAM_PGSZ_16M:
633 fn = iopgd_alloc_super;
634 break;
635 case MMU_CAM_PGSZ_1M:
636 fn = iopgd_alloc_section;
637 break;
638 case MMU_CAM_PGSZ_64K:
639 fn = iopte_alloc_large;
640 break;
641 case MMU_CAM_PGSZ_4K:
642 fn = iopte_alloc_page;
643 break;
644 default:
645 fn = NULL;
646 BUG();
647 break;
648 }
649
650 prot = get_iopte_attr(e);
651
652 spin_lock(&obj->page_table_lock);
653 err = fn(obj, e->da, e->pa, prot);
654 spin_unlock(&obj->page_table_lock);
655
656 return err;
657}
658
659/**
660 * iopgtable_store_entry - Make an iommu pte entry
661 * @obj: target iommu
662 * @e: an iommu tlb entry info
663 **/
664int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
665{
666 int err;
667
668 flush_iotlb_page(obj, e->da);
669 err = iopgtable_store_entry_core(obj, e);
670#ifdef PREFETCH_IOTLB
671 if (!err)
672 load_iotlb_entry(obj, e);
673#endif
674 return err;
675}
676EXPORT_SYMBOL_GPL(iopgtable_store_entry);
677
678/**
679 * iopgtable_lookup_entry - Lookup an iommu pte entry
680 * @obj: target iommu
681 * @da: iommu device virtual address
682 * @ppgd: iommu pgd entry pointer to be returned
683 * @ppte: iommu pte entry pointer to be returned
684 **/
685void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
686{
687 u32 *iopgd, *iopte = NULL;
688
689 iopgd = iopgd_offset(obj, da);
690 if (!*iopgd)
691 goto out;
692
693 if (iopgd_is_table(*iopgd))
694 iopte = iopte_offset(iopgd, da);
695out:
696 *ppgd = iopgd;
697 *ppte = iopte;
698}
699EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
700
701static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
702{
703 size_t bytes;
704 u32 *iopgd = iopgd_offset(obj, da);
705 int nent = 1;
706
707 if (!*iopgd)
708 return 0;
709
710 if (iopgd_is_table(*iopgd)) {
711 int i;
712 u32 *iopte = iopte_offset(iopgd, da);
713
714 bytes = IOPTE_SIZE;
715 if (*iopte & IOPTE_LARGE) {
716 nent *= 16;
717 /* rewind to the 1st entry */
718 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
719 }
720 bytes *= nent;
721 memset(iopte, 0, nent * sizeof(*iopte));
722 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
723
724 /*
725 * do table walk to check if this table is necessary or not
726 */
727 iopte = iopte_offset(iopgd, 0);
728 for (i = 0; i < PTRS_PER_IOPTE; i++)
729 if (iopte[i])
730 goto out;
731
732 iopte_free(iopte);
733 nent = 1; /* for the next L1 entry */
734 } else {
735 bytes = IOPGD_SIZE;
736 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
737 nent *= 16;
738 /* rewind to the 1st entry */
739 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
740 }
741 bytes *= nent;
742 }
743 memset(iopgd, 0, nent * sizeof(*iopgd));
744 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
745out:
746 return bytes;
747}
748
749/**
750 * iopgtable_clear_entry - Remove an iommu pte entry
751 * @obj: target iommu
752 * @da: iommu device virtual address
753 **/
754size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
755{
756 size_t bytes;
757
758 spin_lock(&obj->page_table_lock);
759
760 bytes = iopgtable_clear_entry_core(obj, da);
761 flush_iotlb_page(obj, da);
762
763 spin_unlock(&obj->page_table_lock);
764
765 return bytes;
766}
767EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
768
769static void iopgtable_clear_entry_all(struct iommu *obj)
770{
771 int i;
772
773 spin_lock(&obj->page_table_lock);
774
775 for (i = 0; i < PTRS_PER_IOPGD; i++) {
776 u32 da;
777 u32 *iopgd;
778
779 da = i << IOPGD_SHIFT;
780 iopgd = iopgd_offset(obj, da);
781
782 if (!*iopgd)
783 continue;
784
785 if (iopgd_is_table(*iopgd))
786 iopte_free(iopte_offset(iopgd, 0));
787
788 *iopgd = 0;
789 flush_iopgd_range(iopgd, iopgd);
790 }
791
792 flush_iotlb_all(obj);
793
794 spin_unlock(&obj->page_table_lock);
795}
796
797/*
798 * Device IOMMU generic operations
799 */
800static irqreturn_t iommu_fault_handler(int irq, void *data)
801{
802 u32 da, errs;
803 u32 *iopgd, *iopte;
804 struct iommu *obj = data;
805
806 if (!obj->refcount)
807 return IRQ_NONE;
808
809 clk_enable(obj->clk);
810 errs = iommu_report_fault(obj, &da);
811 clk_disable(obj->clk);
812 if (errs == 0)
813 return IRQ_HANDLED;
814
815 /* Fault callback or TLB/PTE Dynamic loading */
816 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
817 return IRQ_HANDLED;
818
819 iommu_disable(obj);
820
821 iopgd = iopgd_offset(obj, da);
822
823 if (!iopgd_is_table(*iopgd)) {
824 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
825 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
826 return IRQ_NONE;
827 }
828
829 iopte = iopte_offset(iopgd, da);
830
831 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
832 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
833 iopte, *iopte);
834
835 return IRQ_NONE;
836}
837
838static int device_match_by_alias(struct device *dev, void *data)
839{
840 struct iommu *obj = to_iommu(dev);
841 const char *name = data;
842
843 pr_debug("%s: %s %s\n", __func__, obj->name, name);
844
845 return strcmp(obj->name, name) == 0;
846}
847
848/**
849 * iommu_set_da_range - Set a valid device address range
850 * @obj: target iommu
851 * @start Start of valid range
852 * @end End of valid range
853 **/
854int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
855{
856
857 if (!obj)
858 return -EFAULT;
859
860 if (end < start || !PAGE_ALIGN(start | end))
861 return -EINVAL;
862
863 obj->da_start = start;
864 obj->da_end = end;
865
866 return 0;
867}
868EXPORT_SYMBOL_GPL(iommu_set_da_range);
869
870/**
871 * omap_find_iommu_device() - find an omap iommu device by name
872 * @name: name of the iommu device
873 *
874 * The generic iommu API requires the caller to provide the device
875 * he wishes to attach to a certain iommu domain.
876 *
877 * Drivers generally should not bother with this as it should just
878 * be taken care of by the DMA-API using dev_archdata.
879 *
880 * This function is provided as an interim solution until the latter
881 * materializes, and omap3isp is fully migrated to the DMA-API.
882 */
883struct device *omap_find_iommu_device(const char *name)
884{
885 return driver_find_device(&omap_iommu_driver.driver, NULL,
886 (void *)name,
887 device_match_by_alias);
888}
889EXPORT_SYMBOL_GPL(omap_find_iommu_device);
890
891/**
892 * omap_iommu_attach() - attach iommu device to an iommu domain
893 * @dev: target omap iommu device
894 * @iopgd: page table
895 **/
896static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
897{
898 int err = -ENOMEM;
899 struct iommu *obj = to_iommu(dev);
900
901 spin_lock(&obj->iommu_lock);
902
903 /* an iommu device can only be attached once */
904 if (++obj->refcount > 1) {
905 dev_err(dev, "%s: already attached!\n", obj->name);
906 err = -EBUSY;
907 goto err_enable;
908 }
909
910 obj->iopgd = iopgd;
911 err = iommu_enable(obj);
912 if (err)
913 goto err_enable;
914 flush_iotlb_all(obj);
915
916 if (!try_module_get(obj->owner))
917 goto err_module;
918
919 spin_unlock(&obj->iommu_lock);
920
921 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
922 return obj;
923
924err_module:
925 if (obj->refcount == 1)
926 iommu_disable(obj);
927err_enable:
928 obj->refcount--;
929 spin_unlock(&obj->iommu_lock);
930 return ERR_PTR(err);
931}
932
933/**
934 * omap_iommu_detach - release iommu device
935 * @obj: target iommu
936 **/
937static void omap_iommu_detach(struct iommu *obj)
938{
939 if (!obj || IS_ERR(obj))
940 return;
941
942 spin_lock(&obj->iommu_lock);
943
944 if (--obj->refcount == 0)
945 iommu_disable(obj);
946
947 module_put(obj->owner);
948
949 obj->iopgd = NULL;
950
951 spin_unlock(&obj->iommu_lock);
952
953 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
954}
955
956int iommu_set_isr(const char *name,
957 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
958 void *priv),
959 void *isr_priv)
960{
961 struct device *dev;
962 struct iommu *obj;
963
964 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
965 device_match_by_alias);
966 if (!dev)
967 return -ENODEV;
968
969 obj = to_iommu(dev);
970 mutex_lock(&obj->iommu_lock);
971 if (obj->refcount != 0) {
972 mutex_unlock(&obj->iommu_lock);
973 return -EBUSY;
974 }
975 obj->isr = isr;
976 obj->isr_priv = isr_priv;
977 mutex_unlock(&obj->iommu_lock);
978
979 return 0;
980}
981EXPORT_SYMBOL_GPL(iommu_set_isr);
982
983/*
984 * OMAP Device MMU(IOMMU) detection
985 */
986static int __devinit omap_iommu_probe(struct platform_device *pdev)
987{
988 int err = -ENODEV;
989 int irq;
990 struct iommu *obj;
991 struct resource *res;
992 struct iommu_platform_data *pdata = pdev->dev.platform_data;
993
994 if (pdev->num_resources != 2)
995 return -EINVAL;
996
997 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
998 if (!obj)
999 return -ENOMEM;
1000
1001 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
1002 if (IS_ERR(obj->clk))
1003 goto err_clk;
1004
1005 obj->nr_tlb_entries = pdata->nr_tlb_entries;
1006 obj->name = pdata->name;
1007 obj->dev = &pdev->dev;
1008 obj->ctx = (void *)obj + sizeof(*obj);
1009 obj->da_start = pdata->da_start;
1010 obj->da_end = pdata->da_end;
1011
1012 spin_lock_init(&obj->iommu_lock);
1013 mutex_init(&obj->mmap_lock);
1014 spin_lock_init(&obj->page_table_lock);
1015 INIT_LIST_HEAD(&obj->mmap);
1016
1017 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1018 if (!res) {
1019 err = -ENODEV;
1020 goto err_mem;
1021 }
1022
1023 res = request_mem_region(res->start, resource_size(res),
1024 dev_name(&pdev->dev));
1025 if (!res) {
1026 err = -EIO;
1027 goto err_mem;
1028 }
1029
1030 obj->regbase = ioremap(res->start, resource_size(res));
1031 if (!obj->regbase) {
1032 err = -ENOMEM;
1033 goto err_ioremap;
1034 }
1035
1036 irq = platform_get_irq(pdev, 0);
1037 if (irq < 0) {
1038 err = -ENODEV;
1039 goto err_irq;
1040 }
1041 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
1042 dev_name(&pdev->dev), obj);
1043 if (err < 0)
1044 goto err_irq;
1045 platform_set_drvdata(pdev, obj);
1046
1047 dev_info(&pdev->dev, "%s registered\n", obj->name);
1048 return 0;
1049
1050err_irq:
1051 iounmap(obj->regbase);
1052err_ioremap:
1053 release_mem_region(res->start, resource_size(res));
1054err_mem:
1055 clk_put(obj->clk);
1056err_clk:
1057 kfree(obj);
1058 return err;
1059}
1060
1061static int __devexit omap_iommu_remove(struct platform_device *pdev)
1062{
1063 int irq;
1064 struct resource *res;
1065 struct iommu *obj = platform_get_drvdata(pdev);
1066
1067 platform_set_drvdata(pdev, NULL);
1068
1069 iopgtable_clear_entry_all(obj);
1070
1071 irq = platform_get_irq(pdev, 0);
1072 free_irq(irq, obj);
1073 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1074 release_mem_region(res->start, resource_size(res));
1075 iounmap(obj->regbase);
1076
1077 clk_put(obj->clk);
1078 dev_info(&pdev->dev, "%s removed\n", obj->name);
1079 kfree(obj);
1080 return 0;
1081}
1082
1083static struct platform_driver omap_iommu_driver = {
1084 .probe = omap_iommu_probe,
1085 .remove = __devexit_p(omap_iommu_remove),
1086 .driver = {
1087 .name = "omap-iommu",
1088 },
1089};
1090
1091static void iopte_cachep_ctor(void *iopte)
1092{
1093 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1094}
1095
1096static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1097 phys_addr_t pa, int order, int prot)
1098{
1099 struct omap_iommu_domain *omap_domain = domain->priv;
1100 struct iommu *oiommu = omap_domain->iommu_dev;
1101 struct device *dev = oiommu->dev;
1102 size_t bytes = PAGE_SIZE << order;
1103 struct iotlb_entry e;
1104 int omap_pgsz;
1105 u32 ret, flags;
1106
1107 /* we only support mapping a single iommu page for now */
1108 omap_pgsz = bytes_to_iopgsz(bytes);
1109 if (omap_pgsz < 0) {
1110 dev_err(dev, "invalid size to map: %d\n", bytes);
1111 return -EINVAL;
1112 }
1113
1114 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1115
1116 flags = omap_pgsz | prot;
1117
1118 iotlb_init_entry(&e, da, pa, flags);
1119
1120 ret = iopgtable_store_entry(oiommu, &e);
1121 if (ret) {
1122 dev_err(dev, "iopgtable_store_entry failed: %d\n", ret);
1123 return ret;
1124 }
1125
1126 return 0;
1127}
1128
1129static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1130 int order)
1131{
1132 struct omap_iommu_domain *omap_domain = domain->priv;
1133 struct iommu *oiommu = omap_domain->iommu_dev;
1134 struct device *dev = oiommu->dev;
1135 size_t bytes = PAGE_SIZE << order;
1136 size_t ret;
1137
1138 dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes);
1139
1140 ret = iopgtable_clear_entry(oiommu, da);
1141 if (ret != bytes) {
1142 dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes);
1143 return -EINVAL;
1144 }
1145
1146 return 0;
1147}
1148
1149static int
1150omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1151{
1152 struct omap_iommu_domain *omap_domain = domain->priv;
1153 struct iommu *oiommu;
1154 int ret = 0;
1155
1156 spin_lock(&omap_domain->lock);
1157
1158 /* only a single device is supported per domain for now */
1159 if (omap_domain->iommu_dev) {
1160 dev_err(dev, "iommu domain is already attached\n");
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164
1165 /* get a handle to and enable the omap iommu */
1166 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1167 if (IS_ERR(oiommu)) {
1168 ret = PTR_ERR(oiommu);
1169 dev_err(dev, "can't get omap iommu: %d\n", ret);
1170 goto out;
1171 }
1172
1173 omap_domain->iommu_dev = oiommu;
1174
1175out:
1176 spin_unlock(&omap_domain->lock);
1177 return ret;
1178}
1179
1180static void omap_iommu_detach_dev(struct iommu_domain *domain,
1181 struct device *dev)
1182{
1183 struct omap_iommu_domain *omap_domain = domain->priv;
1184 struct iommu *oiommu = to_iommu(dev);
1185
1186 spin_lock(&omap_domain->lock);
1187
1188 /* only a single device is supported per domain for now */
1189 if (omap_domain->iommu_dev != oiommu) {
1190 dev_err(dev, "invalid iommu device\n");
1191 goto out;
1192 }
1193
1194 iopgtable_clear_entry_all(oiommu);
1195
1196 omap_iommu_detach(oiommu);
1197
1198 omap_domain->iommu_dev = NULL;
1199
1200out:
1201 spin_unlock(&omap_domain->lock);
1202}
1203
1204static int omap_iommu_domain_init(struct iommu_domain *domain)
1205{
1206 struct omap_iommu_domain *omap_domain;
1207
1208 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1209 if (!omap_domain) {
1210 pr_err("kzalloc failed\n");
1211 goto out;
1212 }
1213
1214 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1215 if (!omap_domain->pgtable) {
1216 pr_err("kzalloc failed\n");
1217 goto fail_nomem;
1218 }
1219
1220 /*
1221 * should never fail, but please keep this around to ensure
1222 * we keep the hardware happy
1223 */
1224 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1225
1226 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1227 spin_lock_init(&omap_domain->lock);
1228
1229 domain->priv = omap_domain;
1230
1231 return 0;
1232
1233fail_nomem:
1234 kfree(omap_domain);
1235out:
1236 return -ENOMEM;
1237}
1238
1239/* assume device was already detached */
1240static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1241{
1242 struct omap_iommu_domain *omap_domain = domain->priv;
1243
1244 domain->priv = NULL;
1245
1246 kfree(omap_domain->pgtable);
1247 kfree(omap_domain);
1248}
1249
1250static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1251 unsigned long da)
1252{
1253 struct omap_iommu_domain *omap_domain = domain->priv;
1254 struct iommu *oiommu = omap_domain->iommu_dev;
1255 struct device *dev = oiommu->dev;
1256 u32 *pgd, *pte;
1257 phys_addr_t ret = 0;
1258
1259 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1260
1261 if (pte) {
1262 if (iopte_is_small(*pte))
1263 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1264 else if (iopte_is_large(*pte))
1265 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1266 else
1267 dev_err(dev, "bogus pte 0x%x", *pte);
1268 } else {
1269 if (iopgd_is_section(*pgd))
1270 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1271 else if (iopgd_is_super(*pgd))
1272 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1273 else
1274 dev_err(dev, "bogus pgd 0x%x", *pgd);
1275 }
1276
1277 return ret;
1278}
1279
1280static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1281 unsigned long cap)
1282{
1283 return 0;
1284}
1285
1286static struct iommu_ops omap_iommu_ops = {
1287 .domain_init = omap_iommu_domain_init,
1288 .domain_destroy = omap_iommu_domain_destroy,
1289 .attach_dev = omap_iommu_attach_dev,
1290 .detach_dev = omap_iommu_detach_dev,
1291 .map = omap_iommu_map,
1292 .unmap = omap_iommu_unmap,
1293 .iova_to_phys = omap_iommu_iova_to_phys,
1294 .domain_has_cap = omap_iommu_domain_has_cap,
1295};
1296
1297static int __init omap_iommu_init(void)
1298{
1299 struct kmem_cache *p;
1300 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1301 size_t align = 1 << 10; /* L2 pagetable alignement */
1302
1303 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1304 iopte_cachep_ctor);
1305 if (!p)
1306 return -ENOMEM;
1307 iopte_cachep = p;
1308
1309 register_iommu(&omap_iommu_ops);
1310
1311 return platform_driver_register(&omap_iommu_driver);
1312}
1313module_init(omap_iommu_init);
1314
1315static void __exit omap_iommu_exit(void)
1316{
1317 kmem_cache_destroy(iopte_cachep);
1318
1319 platform_driver_unregister(&omap_iommu_driver);
1320}
1321module_exit(omap_iommu_exit);
1322
1323MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1324MODULE_ALIAS("platform:omap-iommu");
1325MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1326MODULE_LICENSE("GPL v2");