diff options
Diffstat (limited to 'drivers/iommu/omap-iovmm.c')
-rw-r--r-- | drivers/iommu/omap-iovmm.c | 743 |
1 files changed, 743 insertions, 0 deletions
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c new file mode 100644 index 000000000000..46be456fcc00 --- /dev/null +++ b/drivers/iommu/omap-iovmm.c | |||
@@ -0,0 +1,743 @@ | |||
1 | /* | ||
2 | * omap iommu: simple virtual address space management | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Nokia Corporation | ||
5 | * | ||
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/scatterlist.h> | ||
19 | #include <linux/iommu.h> | ||
20 | |||
21 | #include <asm/cacheflush.h> | ||
22 | #include <asm/mach/map.h> | ||
23 | |||
24 | #include <plat/iommu.h> | ||
25 | #include <plat/iovmm.h> | ||
26 | |||
27 | #include <plat/iopgtable.h> | ||
28 | |||
29 | static struct kmem_cache *iovm_area_cachep; | ||
30 | |||
31 | /* return the offset of the first scatterlist entry in a sg table */ | ||
32 | static unsigned int sgtable_offset(const struct sg_table *sgt) | ||
33 | { | ||
34 | if (!sgt || !sgt->nents) | ||
35 | return 0; | ||
36 | |||
37 | return sgt->sgl->offset; | ||
38 | } | ||
39 | |||
40 | /* return total bytes of sg buffers */ | ||
41 | static size_t sgtable_len(const struct sg_table *sgt) | ||
42 | { | ||
43 | unsigned int i, total = 0; | ||
44 | struct scatterlist *sg; | ||
45 | |||
46 | if (!sgt) | ||
47 | return 0; | ||
48 | |||
49 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
50 | size_t bytes; | ||
51 | |||
52 | bytes = sg->length + sg->offset; | ||
53 | |||
54 | if (!iopgsz_ok(bytes)) { | ||
55 | pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", | ||
56 | __func__, i, bytes, sg->offset); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | if (i && sg->offset) { | ||
61 | pr_err("%s: sg[%d] offset not allowed in internal " | ||
62 | "entries\n", __func__, i); | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | total += bytes; | ||
67 | } | ||
68 | |||
69 | return total; | ||
70 | } | ||
71 | #define sgtable_ok(x) (!!sgtable_len(x)) | ||
72 | |||
73 | static unsigned max_alignment(u32 addr) | ||
74 | { | ||
75 | int i; | ||
76 | unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | ||
77 | for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) | ||
78 | ; | ||
79 | return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * calculate the optimal number sg elements from total bytes based on | ||
84 | * iommu superpages | ||
85 | */ | ||
86 | static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) | ||
87 | { | ||
88 | unsigned nr_entries = 0, ent_sz; | ||
89 | |||
90 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | ||
91 | pr_err("%s: wrong size %08x\n", __func__, bytes); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | while (bytes) { | ||
96 | ent_sz = max_alignment(da | pa); | ||
97 | ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); | ||
98 | nr_entries++; | ||
99 | da += ent_sz; | ||
100 | pa += ent_sz; | ||
101 | bytes -= ent_sz; | ||
102 | } | ||
103 | |||
104 | return nr_entries; | ||
105 | } | ||
106 | |||
107 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | ||
108 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, | ||
109 | u32 da, u32 pa) | ||
110 | { | ||
111 | unsigned int nr_entries; | ||
112 | int err; | ||
113 | struct sg_table *sgt; | ||
114 | |||
115 | if (!bytes) | ||
116 | return ERR_PTR(-EINVAL); | ||
117 | |||
118 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | ||
119 | return ERR_PTR(-EINVAL); | ||
120 | |||
121 | if (flags & IOVMF_LINEAR) { | ||
122 | nr_entries = sgtable_nents(bytes, da, pa); | ||
123 | if (!nr_entries) | ||
124 | return ERR_PTR(-EINVAL); | ||
125 | } else | ||
126 | nr_entries = bytes / PAGE_SIZE; | ||
127 | |||
128 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
129 | if (!sgt) | ||
130 | return ERR_PTR(-ENOMEM); | ||
131 | |||
132 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | ||
133 | if (err) { | ||
134 | kfree(sgt); | ||
135 | return ERR_PTR(err); | ||
136 | } | ||
137 | |||
138 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | ||
139 | |||
140 | return sgt; | ||
141 | } | ||
142 | |||
143 | /* free sg_table header(a kind of superblock) */ | ||
144 | static void sgtable_free(struct sg_table *sgt) | ||
145 | { | ||
146 | if (!sgt) | ||
147 | return; | ||
148 | |||
149 | sg_free_table(sgt); | ||
150 | kfree(sgt); | ||
151 | |||
152 | pr_debug("%s: sgt:%p\n", __func__, sgt); | ||
153 | } | ||
154 | |||
155 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | ||
156 | static void *vmap_sg(const struct sg_table *sgt) | ||
157 | { | ||
158 | u32 va; | ||
159 | size_t total; | ||
160 | unsigned int i; | ||
161 | struct scatterlist *sg; | ||
162 | struct vm_struct *new; | ||
163 | const struct mem_type *mtype; | ||
164 | |||
165 | mtype = get_mem_type(MT_DEVICE); | ||
166 | if (!mtype) | ||
167 | return ERR_PTR(-EINVAL); | ||
168 | |||
169 | total = sgtable_len(sgt); | ||
170 | if (!total) | ||
171 | return ERR_PTR(-EINVAL); | ||
172 | |||
173 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | ||
174 | if (!new) | ||
175 | return ERR_PTR(-ENOMEM); | ||
176 | va = (u32)new->addr; | ||
177 | |||
178 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
179 | size_t bytes; | ||
180 | u32 pa; | ||
181 | int err; | ||
182 | |||
183 | pa = sg_phys(sg) - sg->offset; | ||
184 | bytes = sg->length + sg->offset; | ||
185 | |||
186 | BUG_ON(bytes != PAGE_SIZE); | ||
187 | |||
188 | err = ioremap_page(va, pa, mtype); | ||
189 | if (err) | ||
190 | goto err_out; | ||
191 | |||
192 | va += bytes; | ||
193 | } | ||
194 | |||
195 | flush_cache_vmap((unsigned long)new->addr, | ||
196 | (unsigned long)(new->addr + total)); | ||
197 | return new->addr; | ||
198 | |||
199 | err_out: | ||
200 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | ||
201 | vunmap(new->addr); | ||
202 | return ERR_PTR(-EAGAIN); | ||
203 | } | ||
204 | |||
205 | static inline void vunmap_sg(const void *va) | ||
206 | { | ||
207 | vunmap(va); | ||
208 | } | ||
209 | |||
210 | static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, | ||
211 | const u32 da) | ||
212 | { | ||
213 | struct iovm_struct *tmp; | ||
214 | |||
215 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
216 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | ||
217 | size_t len; | ||
218 | |||
219 | len = tmp->da_end - tmp->da_start; | ||
220 | |||
221 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | ||
222 | __func__, tmp->da_start, da, tmp->da_end, len, | ||
223 | tmp->flags); | ||
224 | |||
225 | return tmp; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | return NULL; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * omap_find_iovm_area - find iovma which includes @da | ||
234 | * @da: iommu device virtual address | ||
235 | * | ||
236 | * Find the existing iovma starting at @da | ||
237 | */ | ||
238 | struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da) | ||
239 | { | ||
240 | struct iovm_struct *area; | ||
241 | |||
242 | mutex_lock(&obj->mmap_lock); | ||
243 | area = __find_iovm_area(obj, da); | ||
244 | mutex_unlock(&obj->mmap_lock); | ||
245 | |||
246 | return area; | ||
247 | } | ||
248 | EXPORT_SYMBOL_GPL(omap_find_iovm_area); | ||
249 | |||
250 | /* | ||
251 | * This finds the hole(area) which fits the requested address and len | ||
252 | * in iovmas mmap, and returns the new allocated iovma. | ||
253 | */ | ||
254 | static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, | ||
255 | size_t bytes, u32 flags) | ||
256 | { | ||
257 | struct iovm_struct *new, *tmp; | ||
258 | u32 start, prev_end, alignment; | ||
259 | |||
260 | if (!obj || !bytes) | ||
261 | return ERR_PTR(-EINVAL); | ||
262 | |||
263 | start = da; | ||
264 | alignment = PAGE_SIZE; | ||
265 | |||
266 | if (~flags & IOVMF_DA_FIXED) { | ||
267 | /* Don't map address 0 */ | ||
268 | start = obj->da_start ? obj->da_start : alignment; | ||
269 | |||
270 | if (flags & IOVMF_LINEAR) | ||
271 | alignment = iopgsz_max(bytes); | ||
272 | start = roundup(start, alignment); | ||
273 | } else if (start < obj->da_start || start > obj->da_end || | ||
274 | obj->da_end - start < bytes) { | ||
275 | return ERR_PTR(-EINVAL); | ||
276 | } | ||
277 | |||
278 | tmp = NULL; | ||
279 | if (list_empty(&obj->mmap)) | ||
280 | goto found; | ||
281 | |||
282 | prev_end = 0; | ||
283 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
284 | |||
285 | if (prev_end > start) | ||
286 | break; | ||
287 | |||
288 | if (tmp->da_start > start && (tmp->da_start - start) >= bytes) | ||
289 | goto found; | ||
290 | |||
291 | if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) | ||
292 | start = roundup(tmp->da_end + 1, alignment); | ||
293 | |||
294 | prev_end = tmp->da_end; | ||
295 | } | ||
296 | |||
297 | if ((start >= prev_end) && (obj->da_end - start >= bytes)) | ||
298 | goto found; | ||
299 | |||
300 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | ||
301 | __func__, da, bytes, flags); | ||
302 | |||
303 | return ERR_PTR(-EINVAL); | ||
304 | |||
305 | found: | ||
306 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | ||
307 | if (!new) | ||
308 | return ERR_PTR(-ENOMEM); | ||
309 | |||
310 | new->iommu = obj; | ||
311 | new->da_start = start; | ||
312 | new->da_end = start + bytes; | ||
313 | new->flags = flags; | ||
314 | |||
315 | /* | ||
316 | * keep ascending order of iovmas | ||
317 | */ | ||
318 | if (tmp) | ||
319 | list_add_tail(&new->list, &tmp->list); | ||
320 | else | ||
321 | list_add(&new->list, &obj->mmap); | ||
322 | |||
323 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | ||
324 | __func__, new->da_start, start, new->da_end, bytes, flags); | ||
325 | |||
326 | return new; | ||
327 | } | ||
328 | |||
329 | static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) | ||
330 | { | ||
331 | size_t bytes; | ||
332 | |||
333 | BUG_ON(!obj || !area); | ||
334 | |||
335 | bytes = area->da_end - area->da_start; | ||
336 | |||
337 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | ||
338 | __func__, area->da_start, area->da_end, bytes, area->flags); | ||
339 | |||
340 | list_del(&area->list); | ||
341 | kmem_cache_free(iovm_area_cachep, area); | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * omap_da_to_va - convert (d) to (v) | ||
346 | * @obj: objective iommu | ||
347 | * @da: iommu device virtual address | ||
348 | * @va: mpu virtual address | ||
349 | * | ||
350 | * Returns mpu virtual addr which corresponds to a given device virtual addr | ||
351 | */ | ||
352 | void *omap_da_to_va(struct omap_iommu *obj, u32 da) | ||
353 | { | ||
354 | void *va = NULL; | ||
355 | struct iovm_struct *area; | ||
356 | |||
357 | mutex_lock(&obj->mmap_lock); | ||
358 | |||
359 | area = __find_iovm_area(obj, da); | ||
360 | if (!area) { | ||
361 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | ||
362 | goto out; | ||
363 | } | ||
364 | va = area->va; | ||
365 | out: | ||
366 | mutex_unlock(&obj->mmap_lock); | ||
367 | |||
368 | return va; | ||
369 | } | ||
370 | EXPORT_SYMBOL_GPL(omap_da_to_va); | ||
371 | |||
372 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | ||
373 | { | ||
374 | unsigned int i; | ||
375 | struct scatterlist *sg; | ||
376 | void *va = _va; | ||
377 | void *va_end; | ||
378 | |||
379 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
380 | struct page *pg; | ||
381 | const size_t bytes = PAGE_SIZE; | ||
382 | |||
383 | /* | ||
384 | * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' | ||
385 | */ | ||
386 | pg = vmalloc_to_page(va); | ||
387 | BUG_ON(!pg); | ||
388 | sg_set_page(sg, pg, bytes, 0); | ||
389 | |||
390 | va += bytes; | ||
391 | } | ||
392 | |||
393 | va_end = _va + PAGE_SIZE * i; | ||
394 | } | ||
395 | |||
396 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | ||
397 | { | ||
398 | /* | ||
399 | * Actually this is not necessary at all, just exists for | ||
400 | * consistency of the code readability. | ||
401 | */ | ||
402 | BUG_ON(!sgt); | ||
403 | } | ||
404 | |||
405 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | ||
406 | static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, | ||
407 | const struct sg_table *sgt, u32 flags) | ||
408 | { | ||
409 | int err; | ||
410 | unsigned int i, j; | ||
411 | struct scatterlist *sg; | ||
412 | u32 da = new->da_start; | ||
413 | int order; | ||
414 | |||
415 | if (!domain || !sgt) | ||
416 | return -EINVAL; | ||
417 | |||
418 | BUG_ON(!sgtable_ok(sgt)); | ||
419 | |||
420 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
421 | u32 pa; | ||
422 | size_t bytes; | ||
423 | |||
424 | pa = sg_phys(sg) - sg->offset; | ||
425 | bytes = sg->length + sg->offset; | ||
426 | |||
427 | flags &= ~IOVMF_PGSZ_MASK; | ||
428 | |||
429 | if (bytes_to_iopgsz(bytes) < 0) | ||
430 | goto err_out; | ||
431 | |||
432 | order = get_order(bytes); | ||
433 | |||
434 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | ||
435 | i, da, pa, bytes); | ||
436 | |||
437 | err = iommu_map(domain, da, pa, order, flags); | ||
438 | if (err) | ||
439 | goto err_out; | ||
440 | |||
441 | da += bytes; | ||
442 | } | ||
443 | return 0; | ||
444 | |||
445 | err_out: | ||
446 | da = new->da_start; | ||
447 | |||
448 | for_each_sg(sgt->sgl, sg, i, j) { | ||
449 | size_t bytes; | ||
450 | |||
451 | bytes = sg->length + sg->offset; | ||
452 | order = get_order(bytes); | ||
453 | |||
454 | /* ignore failures.. we're already handling one */ | ||
455 | iommu_unmap(domain, da, order); | ||
456 | |||
457 | da += bytes; | ||
458 | } | ||
459 | return err; | ||
460 | } | ||
461 | |||
462 | /* release 'da' <-> 'pa' mapping */ | ||
463 | static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, | ||
464 | struct iovm_struct *area) | ||
465 | { | ||
466 | u32 start; | ||
467 | size_t total = area->da_end - area->da_start; | ||
468 | const struct sg_table *sgt = area->sgt; | ||
469 | struct scatterlist *sg; | ||
470 | int i, err; | ||
471 | |||
472 | BUG_ON(!sgtable_ok(sgt)); | ||
473 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | ||
474 | |||
475 | start = area->da_start; | ||
476 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
477 | size_t bytes; | ||
478 | int order; | ||
479 | |||
480 | bytes = sg->length + sg->offset; | ||
481 | order = get_order(bytes); | ||
482 | |||
483 | err = iommu_unmap(domain, start, order); | ||
484 | if (err < 0) | ||
485 | break; | ||
486 | |||
487 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | ||
488 | __func__, start, bytes, area->flags); | ||
489 | |||
490 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
491 | |||
492 | total -= bytes; | ||
493 | start += bytes; | ||
494 | } | ||
495 | BUG_ON(total); | ||
496 | } | ||
497 | |||
498 | /* template function for all unmapping */ | ||
499 | static struct sg_table *unmap_vm_area(struct iommu_domain *domain, | ||
500 | struct omap_iommu *obj, const u32 da, | ||
501 | void (*fn)(const void *), u32 flags) | ||
502 | { | ||
503 | struct sg_table *sgt = NULL; | ||
504 | struct iovm_struct *area; | ||
505 | |||
506 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | ||
507 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | ||
508 | return NULL; | ||
509 | } | ||
510 | |||
511 | mutex_lock(&obj->mmap_lock); | ||
512 | |||
513 | area = __find_iovm_area(obj, da); | ||
514 | if (!area) { | ||
515 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | ||
516 | goto out; | ||
517 | } | ||
518 | |||
519 | if ((area->flags & flags) != flags) { | ||
520 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | ||
521 | area->flags); | ||
522 | goto out; | ||
523 | } | ||
524 | sgt = (struct sg_table *)area->sgt; | ||
525 | |||
526 | unmap_iovm_area(domain, obj, area); | ||
527 | |||
528 | fn(area->va); | ||
529 | |||
530 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | ||
531 | area->da_start, da, area->da_end, | ||
532 | area->da_end - area->da_start, area->flags); | ||
533 | |||
534 | free_iovm_area(obj, area); | ||
535 | out: | ||
536 | mutex_unlock(&obj->mmap_lock); | ||
537 | |||
538 | return sgt; | ||
539 | } | ||
540 | |||
541 | static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, | ||
542 | u32 da, const struct sg_table *sgt, void *va, | ||
543 | size_t bytes, u32 flags) | ||
544 | { | ||
545 | int err = -ENOMEM; | ||
546 | struct iovm_struct *new; | ||
547 | |||
548 | mutex_lock(&obj->mmap_lock); | ||
549 | |||
550 | new = alloc_iovm_area(obj, da, bytes, flags); | ||
551 | if (IS_ERR(new)) { | ||
552 | err = PTR_ERR(new); | ||
553 | goto err_alloc_iovma; | ||
554 | } | ||
555 | new->va = va; | ||
556 | new->sgt = sgt; | ||
557 | |||
558 | if (map_iovm_area(domain, new, sgt, new->flags)) | ||
559 | goto err_map; | ||
560 | |||
561 | mutex_unlock(&obj->mmap_lock); | ||
562 | |||
563 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | ||
564 | __func__, new->da_start, bytes, new->flags, va); | ||
565 | |||
566 | return new->da_start; | ||
567 | |||
568 | err_map: | ||
569 | free_iovm_area(obj, new); | ||
570 | err_alloc_iovma: | ||
571 | mutex_unlock(&obj->mmap_lock); | ||
572 | return err; | ||
573 | } | ||
574 | |||
575 | static inline u32 | ||
576 | __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, | ||
577 | u32 da, const struct sg_table *sgt, | ||
578 | void *va, size_t bytes, u32 flags) | ||
579 | { | ||
580 | return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); | ||
581 | } | ||
582 | |||
583 | /** | ||
584 | * omap_iommu_vmap - (d)-(p)-(v) address mapper | ||
585 | * @obj: objective iommu | ||
586 | * @sgt: address of scatter gather table | ||
587 | * @flags: iovma and page property | ||
588 | * | ||
589 | * Creates 1-n-1 mapping with given @sgt and returns @da. | ||
590 | * All @sgt element must be io page size aligned. | ||
591 | */ | ||
592 | u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, | ||
593 | const struct sg_table *sgt, u32 flags) | ||
594 | { | ||
595 | size_t bytes; | ||
596 | void *va = NULL; | ||
597 | |||
598 | if (!obj || !obj->dev || !sgt) | ||
599 | return -EINVAL; | ||
600 | |||
601 | bytes = sgtable_len(sgt); | ||
602 | if (!bytes) | ||
603 | return -EINVAL; | ||
604 | bytes = PAGE_ALIGN(bytes); | ||
605 | |||
606 | if (flags & IOVMF_MMIO) { | ||
607 | va = vmap_sg(sgt); | ||
608 | if (IS_ERR(va)) | ||
609 | return PTR_ERR(va); | ||
610 | } | ||
611 | |||
612 | flags |= IOVMF_DISCONT; | ||
613 | flags |= IOVMF_MMIO; | ||
614 | |||
615 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); | ||
616 | if (IS_ERR_VALUE(da)) | ||
617 | vunmap_sg(va); | ||
618 | |||
619 | return da + sgtable_offset(sgt); | ||
620 | } | ||
621 | EXPORT_SYMBOL_GPL(omap_iommu_vmap); | ||
622 | |||
623 | /** | ||
624 | * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' | ||
625 | * @obj: objective iommu | ||
626 | * @da: iommu device virtual address | ||
627 | * | ||
628 | * Free the iommu virtually contiguous memory area starting at | ||
629 | * @da, which was returned by 'omap_iommu_vmap()'. | ||
630 | */ | ||
631 | struct sg_table * | ||
632 | omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da) | ||
633 | { | ||
634 | struct sg_table *sgt; | ||
635 | /* | ||
636 | * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. | ||
637 | * Just returns 'sgt' to the caller to free | ||
638 | */ | ||
639 | da &= PAGE_MASK; | ||
640 | sgt = unmap_vm_area(domain, obj, da, vunmap_sg, | ||
641 | IOVMF_DISCONT | IOVMF_MMIO); | ||
642 | if (!sgt) | ||
643 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
644 | return sgt; | ||
645 | } | ||
646 | EXPORT_SYMBOL_GPL(omap_iommu_vunmap); | ||
647 | |||
648 | /** | ||
649 | * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | ||
650 | * @obj: objective iommu | ||
651 | * @da: contiguous iommu virtual memory | ||
652 | * @bytes: allocation size | ||
653 | * @flags: iovma and page property | ||
654 | * | ||
655 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | ||
656 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. | ||
657 | */ | ||
658 | u32 | ||
659 | omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, | ||
660 | size_t bytes, u32 flags) | ||
661 | { | ||
662 | void *va; | ||
663 | struct sg_table *sgt; | ||
664 | |||
665 | if (!obj || !obj->dev || !bytes) | ||
666 | return -EINVAL; | ||
667 | |||
668 | bytes = PAGE_ALIGN(bytes); | ||
669 | |||
670 | va = vmalloc(bytes); | ||
671 | if (!va) | ||
672 | return -ENOMEM; | ||
673 | |||
674 | flags |= IOVMF_DISCONT; | ||
675 | flags |= IOVMF_ALLOC; | ||
676 | |||
677 | sgt = sgtable_alloc(bytes, flags, da, 0); | ||
678 | if (IS_ERR(sgt)) { | ||
679 | da = PTR_ERR(sgt); | ||
680 | goto err_sgt_alloc; | ||
681 | } | ||
682 | sgtable_fill_vmalloc(sgt, va); | ||
683 | |||
684 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); | ||
685 | if (IS_ERR_VALUE(da)) | ||
686 | goto err_iommu_vmap; | ||
687 | |||
688 | return da; | ||
689 | |||
690 | err_iommu_vmap: | ||
691 | sgtable_drain_vmalloc(sgt); | ||
692 | sgtable_free(sgt); | ||
693 | err_sgt_alloc: | ||
694 | vfree(va); | ||
695 | return da; | ||
696 | } | ||
697 | EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); | ||
698 | |||
699 | /** | ||
700 | * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' | ||
701 | * @obj: objective iommu | ||
702 | * @da: iommu device virtual address | ||
703 | * | ||
704 | * Frees the iommu virtually continuous memory area starting at | ||
705 | * @da, as obtained from 'omap_iommu_vmalloc()'. | ||
706 | */ | ||
707 | void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, | ||
708 | const u32 da) | ||
709 | { | ||
710 | struct sg_table *sgt; | ||
711 | |||
712 | sgt = unmap_vm_area(domain, obj, da, vfree, | ||
713 | IOVMF_DISCONT | IOVMF_ALLOC); | ||
714 | if (!sgt) | ||
715 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
716 | sgtable_free(sgt); | ||
717 | } | ||
718 | EXPORT_SYMBOL_GPL(omap_iommu_vfree); | ||
719 | |||
720 | static int __init iovmm_init(void) | ||
721 | { | ||
722 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | ||
723 | struct kmem_cache *p; | ||
724 | |||
725 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | ||
726 | flags, NULL); | ||
727 | if (!p) | ||
728 | return -ENOMEM; | ||
729 | iovm_area_cachep = p; | ||
730 | |||
731 | return 0; | ||
732 | } | ||
733 | module_init(iovmm_init); | ||
734 | |||
735 | static void __exit iovmm_exit(void) | ||
736 | { | ||
737 | kmem_cache_destroy(iovm_area_cachep); | ||
738 | } | ||
739 | module_exit(iovmm_exit); | ||
740 | |||
741 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | ||
742 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | ||
743 | MODULE_LICENSE("GPL v2"); | ||