aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/omap-iovmm.c
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-08-15 16:21:41 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-08-26 05:46:00 -0400
commitfcf3a6ef4a588c9f06ad7b01c83534ab81985a3f (patch)
treed73b98dda1ad4def8eb2f4cc012eb931ef881e1b /drivers/iommu/omap-iovmm.c
parentf626b52d4a568d4315cd152123ef2d1ea406def2 (diff)
omap: iommu/iovmm: move to dedicated iommu folder
Move OMAP's iommu drivers to the dedicated iommu drivers folder. While OMAP's iovmm (virtual memory manager) driver does not strictly belong to the iommu drivers folder, move it there as well, because it's by no means OMAP-specific (in concept. technically it is still coupled with OMAP's iommu). Eventually, iovmm will be completely replaced with the generic, iommu-based, dma-mapping API. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> Acked-by: Tony Lindgren <tony@atomide.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/omap-iovmm.c')
-rw-r--r--drivers/iommu/omap-iovmm.c923
1 files changed, 923 insertions, 0 deletions
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
new file mode 100644
index 000000000000..809ca124196e
--- /dev/null
+++ b/drivers/iommu/omap-iovmm.c
@@ -0,0 +1,923 @@
1/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
18#include <linux/iommu.h>
19
20#include <asm/cacheflush.h>
21#include <asm/mach/map.h>
22
23#include <plat/iommu.h>
24#include <plat/iovmm.h>
25
26#include <plat/iopgtable.h>
27
28/*
29 * A device driver needs to create address mappings between:
30 *
31 * - iommu/device address
32 * - physical address
33 * - mpu virtual address
34 *
35 * There are 4 possible patterns for them:
36 *
37 * |iova/ mapping iommu_ page
38 * | da pa va (d)-(p)-(v) function type
39 * ---------------------------------------------------------------------------
40 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
41 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
42 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
43 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
44 *
45 *
46 * 'iova': device iommu virtual address
47 * 'da': alias of 'iova'
48 * 'pa': physical address
49 * 'va': mpu virtual address
50 *
51 * 'c': contiguous memory area
52 * 'd': discontiguous memory area
53 * 'a': anonymous memory allocation
54 * '()': optional feature
55 *
56 * 'n': a normal page(4KB) size is used.
57 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
58 *
59 * '*': not yet, but feasible.
60 */
61
62static struct kmem_cache *iovm_area_cachep;
63
64/* return total bytes of sg buffers */
65static size_t sgtable_len(const struct sg_table *sgt)
66{
67 unsigned int i, total = 0;
68 struct scatterlist *sg;
69
70 if (!sgt)
71 return 0;
72
73 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
74 size_t bytes;
75
76 bytes = sg->length;
77
78 if (!iopgsz_ok(bytes)) {
79 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
80 __func__, i, bytes);
81 return 0;
82 }
83
84 total += bytes;
85 }
86
87 return total;
88}
89#define sgtable_ok(x) (!!sgtable_len(x))
90
91static unsigned max_alignment(u32 addr)
92{
93 int i;
94 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
95 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
96 ;
97 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
98}
99
100/*
101 * calculate the optimal number sg elements from total bytes based on
102 * iommu superpages
103 */
104static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
105{
106 unsigned nr_entries = 0, ent_sz;
107
108 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
109 pr_err("%s: wrong size %08x\n", __func__, bytes);
110 return 0;
111 }
112
113 while (bytes) {
114 ent_sz = max_alignment(da | pa);
115 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
116 nr_entries++;
117 da += ent_sz;
118 pa += ent_sz;
119 bytes -= ent_sz;
120 }
121
122 return nr_entries;
123}
124
125/* allocate and initialize sg_table header(a kind of 'superblock') */
126static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
127 u32 da, u32 pa)
128{
129 unsigned int nr_entries;
130 int err;
131 struct sg_table *sgt;
132
133 if (!bytes)
134 return ERR_PTR(-EINVAL);
135
136 if (!IS_ALIGNED(bytes, PAGE_SIZE))
137 return ERR_PTR(-EINVAL);
138
139 if (flags & IOVMF_LINEAR) {
140 nr_entries = sgtable_nents(bytes, da, pa);
141 if (!nr_entries)
142 return ERR_PTR(-EINVAL);
143 } else
144 nr_entries = bytes / PAGE_SIZE;
145
146 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
147 if (!sgt)
148 return ERR_PTR(-ENOMEM);
149
150 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
151 if (err) {
152 kfree(sgt);
153 return ERR_PTR(err);
154 }
155
156 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
157
158 return sgt;
159}
160
161/* free sg_table header(a kind of superblock) */
162static void sgtable_free(struct sg_table *sgt)
163{
164 if (!sgt)
165 return;
166
167 sg_free_table(sgt);
168 kfree(sgt);
169
170 pr_debug("%s: sgt:%p\n", __func__, sgt);
171}
172
173/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
174static void *vmap_sg(const struct sg_table *sgt)
175{
176 u32 va;
177 size_t total;
178 unsigned int i;
179 struct scatterlist *sg;
180 struct vm_struct *new;
181 const struct mem_type *mtype;
182
183 mtype = get_mem_type(MT_DEVICE);
184 if (!mtype)
185 return ERR_PTR(-EINVAL);
186
187 total = sgtable_len(sgt);
188 if (!total)
189 return ERR_PTR(-EINVAL);
190
191 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
192 if (!new)
193 return ERR_PTR(-ENOMEM);
194 va = (u32)new->addr;
195
196 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
197 size_t bytes;
198 u32 pa;
199 int err;
200
201 pa = sg_phys(sg);
202 bytes = sg->length;
203
204 BUG_ON(bytes != PAGE_SIZE);
205
206 err = ioremap_page(va, pa, mtype);
207 if (err)
208 goto err_out;
209
210 va += bytes;
211 }
212
213 flush_cache_vmap((unsigned long)new->addr,
214 (unsigned long)(new->addr + total));
215 return new->addr;
216
217err_out:
218 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
219 vunmap(new->addr);
220 return ERR_PTR(-EAGAIN);
221}
222
223static inline void vunmap_sg(const void *va)
224{
225 vunmap(va);
226}
227
228static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
229{
230 struct iovm_struct *tmp;
231
232 list_for_each_entry(tmp, &obj->mmap, list) {
233 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
234 size_t len;
235
236 len = tmp->da_end - tmp->da_start;
237
238 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
239 __func__, tmp->da_start, da, tmp->da_end, len,
240 tmp->flags);
241
242 return tmp;
243 }
244 }
245
246 return NULL;
247}
248
249/**
250 * find_iovm_area - find iovma which includes @da
251 * @da: iommu device virtual address
252 *
253 * Find the existing iovma starting at @da
254 */
255struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
256{
257 struct iovm_struct *area;
258
259 mutex_lock(&obj->mmap_lock);
260 area = __find_iovm_area(obj, da);
261 mutex_unlock(&obj->mmap_lock);
262
263 return area;
264}
265EXPORT_SYMBOL_GPL(find_iovm_area);
266
267/*
268 * This finds the hole(area) which fits the requested address and len
269 * in iovmas mmap, and returns the new allocated iovma.
270 */
271static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
272 size_t bytes, u32 flags)
273{
274 struct iovm_struct *new, *tmp;
275 u32 start, prev_end, alignment;
276
277 if (!obj || !bytes)
278 return ERR_PTR(-EINVAL);
279
280 start = da;
281 alignment = PAGE_SIZE;
282
283 if (~flags & IOVMF_DA_FIXED) {
284 /* Don't map address 0 */
285 start = obj->da_start ? obj->da_start : alignment;
286
287 if (flags & IOVMF_LINEAR)
288 alignment = iopgsz_max(bytes);
289 start = roundup(start, alignment);
290 } else if (start < obj->da_start || start > obj->da_end ||
291 obj->da_end - start < bytes) {
292 return ERR_PTR(-EINVAL);
293 }
294
295 tmp = NULL;
296 if (list_empty(&obj->mmap))
297 goto found;
298
299 prev_end = 0;
300 list_for_each_entry(tmp, &obj->mmap, list) {
301
302 if (prev_end > start)
303 break;
304
305 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
306 goto found;
307
308 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
309 start = roundup(tmp->da_end + 1, alignment);
310
311 prev_end = tmp->da_end;
312 }
313
314 if ((start >= prev_end) && (obj->da_end - start >= bytes))
315 goto found;
316
317 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
318 __func__, da, bytes, flags);
319
320 return ERR_PTR(-EINVAL);
321
322found:
323 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
324 if (!new)
325 return ERR_PTR(-ENOMEM);
326
327 new->iommu = obj;
328 new->da_start = start;
329 new->da_end = start + bytes;
330 new->flags = flags;
331
332 /*
333 * keep ascending order of iovmas
334 */
335 if (tmp)
336 list_add_tail(&new->list, &tmp->list);
337 else
338 list_add(&new->list, &obj->mmap);
339
340 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
341 __func__, new->da_start, start, new->da_end, bytes, flags);
342
343 return new;
344}
345
346static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
347{
348 size_t bytes;
349
350 BUG_ON(!obj || !area);
351
352 bytes = area->da_end - area->da_start;
353
354 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
355 __func__, area->da_start, area->da_end, bytes, area->flags);
356
357 list_del(&area->list);
358 kmem_cache_free(iovm_area_cachep, area);
359}
360
361/**
362 * da_to_va - convert (d) to (v)
363 * @obj: objective iommu
364 * @da: iommu device virtual address
365 * @va: mpu virtual address
366 *
367 * Returns mpu virtual addr which corresponds to a given device virtual addr
368 */
369void *da_to_va(struct iommu *obj, u32 da)
370{
371 void *va = NULL;
372 struct iovm_struct *area;
373
374 mutex_lock(&obj->mmap_lock);
375
376 area = __find_iovm_area(obj, da);
377 if (!area) {
378 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
379 goto out;
380 }
381 va = area->va;
382out:
383 mutex_unlock(&obj->mmap_lock);
384
385 return va;
386}
387EXPORT_SYMBOL_GPL(da_to_va);
388
389static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
390{
391 unsigned int i;
392 struct scatterlist *sg;
393 void *va = _va;
394 void *va_end;
395
396 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
397 struct page *pg;
398 const size_t bytes = PAGE_SIZE;
399
400 /*
401 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
402 */
403 pg = vmalloc_to_page(va);
404 BUG_ON(!pg);
405 sg_set_page(sg, pg, bytes, 0);
406
407 va += bytes;
408 }
409
410 va_end = _va + PAGE_SIZE * i;
411}
412
413static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
414{
415 /*
416 * Actually this is not necessary at all, just exists for
417 * consistency of the code readability.
418 */
419 BUG_ON(!sgt);
420}
421
422static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
423 size_t len)
424{
425 unsigned int i;
426 struct scatterlist *sg;
427
428 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
429 unsigned bytes;
430
431 bytes = max_alignment(da | pa);
432 bytes = min_t(unsigned, bytes, iopgsz_max(len));
433
434 BUG_ON(!iopgsz_ok(bytes));
435
436 sg_set_buf(sg, phys_to_virt(pa), bytes);
437 /*
438 * 'pa' is cotinuous(linear).
439 */
440 pa += bytes;
441 da += bytes;
442 len -= bytes;
443 }
444 BUG_ON(len);
445}
446
447static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
448{
449 /*
450 * Actually this is not necessary at all, just exists for
451 * consistency of the code readability
452 */
453 BUG_ON(!sgt);
454}
455
456/* create 'da' <-> 'pa' mapping from 'sgt' */
457static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
458 const struct sg_table *sgt, u32 flags)
459{
460 int err;
461 unsigned int i, j;
462 struct scatterlist *sg;
463 u32 da = new->da_start;
464 int order;
465
466 if (!domain || !sgt)
467 return -EINVAL;
468
469 BUG_ON(!sgtable_ok(sgt));
470
471 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
472 u32 pa;
473 size_t bytes;
474
475 pa = sg_phys(sg);
476 bytes = sg->length;
477
478 flags &= ~IOVMF_PGSZ_MASK;
479
480 if (bytes_to_iopgsz(bytes) < 0)
481 goto err_out;
482
483 order = get_order(bytes);
484
485 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
486 i, da, pa, bytes);
487
488 err = iommu_map(domain, da, pa, order, flags);
489 if (err)
490 goto err_out;
491
492 da += bytes;
493 }
494 return 0;
495
496err_out:
497 da = new->da_start;
498
499 for_each_sg(sgt->sgl, sg, i, j) {
500 size_t bytes;
501
502 bytes = sg->length;
503 order = get_order(bytes);
504
505 /* ignore failures.. we're already handling one */
506 iommu_unmap(domain, da, order);
507
508 da += bytes;
509 }
510 return err;
511}
512
513/* release 'da' <-> 'pa' mapping */
514static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
515 struct iovm_struct *area)
516{
517 u32 start;
518 size_t total = area->da_end - area->da_start;
519 const struct sg_table *sgt = area->sgt;
520 struct scatterlist *sg;
521 int i, err;
522
523 BUG_ON(!sgtable_ok(sgt));
524 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
525
526 start = area->da_start;
527 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
528 size_t bytes;
529 int order;
530
531 bytes = sg->length;
532 order = get_order(bytes);
533
534 err = iommu_unmap(domain, start, order);
535 if (err)
536 break;
537
538 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
539 __func__, start, bytes, area->flags);
540
541 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
542
543 total -= bytes;
544 start += bytes;
545 }
546 BUG_ON(total);
547}
548
549/* template function for all unmapping */
550static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
551 struct iommu *obj, const u32 da,
552 void (*fn)(const void *), u32 flags)
553{
554 struct sg_table *sgt = NULL;
555 struct iovm_struct *area;
556
557 if (!IS_ALIGNED(da, PAGE_SIZE)) {
558 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
559 return NULL;
560 }
561
562 mutex_lock(&obj->mmap_lock);
563
564 area = __find_iovm_area(obj, da);
565 if (!area) {
566 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
567 goto out;
568 }
569
570 if ((area->flags & flags) != flags) {
571 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
572 area->flags);
573 goto out;
574 }
575 sgt = (struct sg_table *)area->sgt;
576
577 unmap_iovm_area(domain, obj, area);
578
579 fn(area->va);
580
581 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
582 area->da_start, da, area->da_end,
583 area->da_end - area->da_start, area->flags);
584
585 free_iovm_area(obj, area);
586out:
587 mutex_unlock(&obj->mmap_lock);
588
589 return sgt;
590}
591
592static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj,
593 u32 da, const struct sg_table *sgt, void *va,
594 size_t bytes, u32 flags)
595{
596 int err = -ENOMEM;
597 struct iovm_struct *new;
598
599 mutex_lock(&obj->mmap_lock);
600
601 new = alloc_iovm_area(obj, da, bytes, flags);
602 if (IS_ERR(new)) {
603 err = PTR_ERR(new);
604 goto err_alloc_iovma;
605 }
606 new->va = va;
607 new->sgt = sgt;
608
609 if (map_iovm_area(domain, new, sgt, new->flags))
610 goto err_map;
611
612 mutex_unlock(&obj->mmap_lock);
613
614 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
615 __func__, new->da_start, bytes, new->flags, va);
616
617 return new->da_start;
618
619err_map:
620 free_iovm_area(obj, new);
621err_alloc_iovma:
622 mutex_unlock(&obj->mmap_lock);
623 return err;
624}
625
626static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
627 u32 da, const struct sg_table *sgt,
628 void *va, size_t bytes, u32 flags)
629{
630 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
631}
632
633/**
634 * iommu_vmap - (d)-(p)-(v) address mapper
635 * @obj: objective iommu
636 * @sgt: address of scatter gather table
637 * @flags: iovma and page property
638 *
639 * Creates 1-n-1 mapping with given @sgt and returns @da.
640 * All @sgt element must be io page size aligned.
641 */
642u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
643 const struct sg_table *sgt, u32 flags)
644{
645 size_t bytes;
646 void *va = NULL;
647
648 if (!obj || !obj->dev || !sgt)
649 return -EINVAL;
650
651 bytes = sgtable_len(sgt);
652 if (!bytes)
653 return -EINVAL;
654 bytes = PAGE_ALIGN(bytes);
655
656 if (flags & IOVMF_MMIO) {
657 va = vmap_sg(sgt);
658 if (IS_ERR(va))
659 return PTR_ERR(va);
660 }
661
662 flags |= IOVMF_DISCONT;
663 flags |= IOVMF_MMIO;
664
665 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
666 if (IS_ERR_VALUE(da))
667 vunmap_sg(va);
668
669 return da;
670}
671EXPORT_SYMBOL_GPL(iommu_vmap);
672
673/**
674 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
675 * @obj: objective iommu
676 * @da: iommu device virtual address
677 *
678 * Free the iommu virtually contiguous memory area starting at
679 * @da, which was returned by 'iommu_vmap()'.
680 */
681struct sg_table *
682iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
683{
684 struct sg_table *sgt;
685 /*
686 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
687 * Just returns 'sgt' to the caller to free
688 */
689 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
690 IOVMF_DISCONT | IOVMF_MMIO);
691 if (!sgt)
692 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
693 return sgt;
694}
695EXPORT_SYMBOL_GPL(iommu_vunmap);
696
697/**
698 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
699 * @obj: objective iommu
700 * @da: contiguous iommu virtual memory
701 * @bytes: allocation size
702 * @flags: iovma and page property
703 *
704 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
705 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
706 */
707u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
708 size_t bytes, u32 flags)
709{
710 void *va;
711 struct sg_table *sgt;
712
713 if (!obj || !obj->dev || !bytes)
714 return -EINVAL;
715
716 bytes = PAGE_ALIGN(bytes);
717
718 va = vmalloc(bytes);
719 if (!va)
720 return -ENOMEM;
721
722 flags |= IOVMF_DISCONT;
723 flags |= IOVMF_ALLOC;
724
725 sgt = sgtable_alloc(bytes, flags, da, 0);
726 if (IS_ERR(sgt)) {
727 da = PTR_ERR(sgt);
728 goto err_sgt_alloc;
729 }
730 sgtable_fill_vmalloc(sgt, va);
731
732 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
733 if (IS_ERR_VALUE(da))
734 goto err_iommu_vmap;
735
736 return da;
737
738err_iommu_vmap:
739 sgtable_drain_vmalloc(sgt);
740 sgtable_free(sgt);
741err_sgt_alloc:
742 vfree(va);
743 return da;
744}
745EXPORT_SYMBOL_GPL(iommu_vmalloc);
746
747/**
748 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
749 * @obj: objective iommu
750 * @da: iommu device virtual address
751 *
752 * Frees the iommu virtually continuous memory area starting at
753 * @da, as obtained from 'iommu_vmalloc()'.
754 */
755void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
756{
757 struct sg_table *sgt;
758
759 sgt = unmap_vm_area(domain, obj, da, vfree,
760 IOVMF_DISCONT | IOVMF_ALLOC);
761 if (!sgt)
762 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
763 sgtable_free(sgt);
764}
765EXPORT_SYMBOL_GPL(iommu_vfree);
766
767static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
768 u32 da, u32 pa, void *va, size_t bytes, u32 flags)
769{
770 struct sg_table *sgt;
771
772 sgt = sgtable_alloc(bytes, flags, da, pa);
773 if (IS_ERR(sgt))
774 return PTR_ERR(sgt);
775
776 sgtable_fill_kmalloc(sgt, pa, da, bytes);
777
778 da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
779 if (IS_ERR_VALUE(da)) {
780 sgtable_drain_kmalloc(sgt);
781 sgtable_free(sgt);
782 }
783
784 return da;
785}
786
787/**
788 * iommu_kmap - (d)-(p)-(v) address mapper
789 * @obj: objective iommu
790 * @da: contiguous iommu virtual memory
791 * @pa: contiguous physical memory
792 * @flags: iovma and page property
793 *
794 * Creates 1-1-1 mapping and returns @da again, which can be
795 * adjusted if 'IOVMF_DA_FIXED' is not set.
796 */
797u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
798 size_t bytes, u32 flags)
799{
800 void *va;
801
802 if (!obj || !obj->dev || !bytes)
803 return -EINVAL;
804
805 bytes = PAGE_ALIGN(bytes);
806
807 va = ioremap(pa, bytes);
808 if (!va)
809 return -ENOMEM;
810
811 flags |= IOVMF_LINEAR;
812 flags |= IOVMF_MMIO;
813
814 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
815 if (IS_ERR_VALUE(da))
816 iounmap(va);
817
818 return da;
819}
820EXPORT_SYMBOL_GPL(iommu_kmap);
821
822/**
823 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
824 * @obj: objective iommu
825 * @da: iommu device virtual address
826 *
827 * Frees the iommu virtually contiguous memory area starting at
828 * @da, which was passed to and was returned by'iommu_kmap()'.
829 */
830void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
831{
832 struct sg_table *sgt;
833 typedef void (*func_t)(const void *);
834
835 sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
836 IOVMF_LINEAR | IOVMF_MMIO);
837 if (!sgt)
838 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
839 sgtable_free(sgt);
840}
841EXPORT_SYMBOL_GPL(iommu_kunmap);
842
843/**
844 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
845 * @obj: objective iommu
846 * @da: contiguous iommu virtual memory
847 * @bytes: bytes for allocation
848 * @flags: iovma and page property
849 *
850 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
851 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
852 */
853u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
854 size_t bytes, u32 flags)
855{
856 void *va;
857 u32 pa;
858
859 if (!obj || !obj->dev || !bytes)
860 return -EINVAL;
861
862 bytes = PAGE_ALIGN(bytes);
863
864 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
865 if (!va)
866 return -ENOMEM;
867 pa = virt_to_phys(va);
868
869 flags |= IOVMF_LINEAR;
870 flags |= IOVMF_ALLOC;
871
872 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
873 if (IS_ERR_VALUE(da))
874 kfree(va);
875
876 return da;
877}
878EXPORT_SYMBOL_GPL(iommu_kmalloc);
879
880/**
881 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
882 * @obj: objective iommu
883 * @da: iommu device virtual address
884 *
885 * Frees the iommu virtually contiguous memory area starting at
886 * @da, which was passed to and was returned by'iommu_kmalloc()'.
887 */
888void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
889{
890 struct sg_table *sgt;
891
892 sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
893 if (!sgt)
894 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
895 sgtable_free(sgt);
896}
897EXPORT_SYMBOL_GPL(iommu_kfree);
898
899
900static int __init iovmm_init(void)
901{
902 const unsigned long flags = SLAB_HWCACHE_ALIGN;
903 struct kmem_cache *p;
904
905 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
906 flags, NULL);
907 if (!p)
908 return -ENOMEM;
909 iovm_area_cachep = p;
910
911 return 0;
912}
913module_init(iovmm_init);
914
915static void __exit iovmm_exit(void)
916{
917 kmem_cache_destroy(iovm_area_cachep);
918}
919module_exit(iovmm_exit);
920
921MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
922MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
923MODULE_LICENSE("GPL v2");