aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/omap-iovmm.c
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-08-17 15:57:56 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-08-26 05:46:01 -0400
commit6c32df437c7c5b1fc29d3ca29b0ff44f8dfafc56 (patch)
tree939d751cbae86291f6b5152cee4615284165fece /drivers/iommu/omap-iovmm.c
parent384fa675795ae3796dbc263e5d0f35b9a27d6462 (diff)
omap: iommu: omapify 'struct iommu' and exposed API
Prepend 'omap_' to OMAP's 'struct iommu' and exposed API, to prevent namespace pollution and generally to improve readability of the code that still uses the driver directly. Update the users as needed as well. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> Acked-by: Tony Lindgren <tony@atomide.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/omap-iovmm.c')
-rw-r--r--drivers/iommu/omap-iovmm.c62
1 files changed, 33 insertions, 29 deletions
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 996bec0b4a2b..5e7f97dc76ef 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -191,7 +191,8 @@ static inline void vunmap_sg(const void *va)
191 vunmap(va); 191 vunmap(va);
192} 192}
193 193
194static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) 194static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
195 const u32 da)
195{ 196{
196 struct iovm_struct *tmp; 197 struct iovm_struct *tmp;
197 198
@@ -213,12 +214,12 @@ static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
213} 214}
214 215
215/** 216/**
216 * find_iovm_area - find iovma which includes @da 217 * omap_find_iovm_area - find iovma which includes @da
217 * @da: iommu device virtual address 218 * @da: iommu device virtual address
218 * 219 *
219 * Find the existing iovma starting at @da 220 * Find the existing iovma starting at @da
220 */ 221 */
221struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) 222struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
222{ 223{
223 struct iovm_struct *area; 224 struct iovm_struct *area;
224 225
@@ -228,13 +229,13 @@ struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
228 229
229 return area; 230 return area;
230} 231}
231EXPORT_SYMBOL_GPL(find_iovm_area); 232EXPORT_SYMBOL_GPL(omap_find_iovm_area);
232 233
233/* 234/*
234 * This finds the hole(area) which fits the requested address and len 235 * This finds the hole(area) which fits the requested address and len
235 * in iovmas mmap, and returns the new allocated iovma. 236 * in iovmas mmap, and returns the new allocated iovma.
236 */ 237 */
237static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, 238static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
238 size_t bytes, u32 flags) 239 size_t bytes, u32 flags)
239{ 240{
240 struct iovm_struct *new, *tmp; 241 struct iovm_struct *new, *tmp;
@@ -309,7 +310,7 @@ found:
309 return new; 310 return new;
310} 311}
311 312
312static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) 313static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
313{ 314{
314 size_t bytes; 315 size_t bytes;
315 316
@@ -325,14 +326,14 @@ static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
325} 326}
326 327
327/** 328/**
328 * da_to_va - convert (d) to (v) 329 * omap_da_to_va - convert (d) to (v)
329 * @obj: objective iommu 330 * @obj: objective iommu
330 * @da: iommu device virtual address 331 * @da: iommu device virtual address
331 * @va: mpu virtual address 332 * @va: mpu virtual address
332 * 333 *
333 * Returns mpu virtual addr which corresponds to a given device virtual addr 334 * Returns mpu virtual addr which corresponds to a given device virtual addr
334 */ 335 */
335void *da_to_va(struct iommu *obj, u32 da) 336void *omap_da_to_va(struct omap_iommu *obj, u32 da)
336{ 337{
337 void *va = NULL; 338 void *va = NULL;
338 struct iovm_struct *area; 339 struct iovm_struct *area;
@@ -350,7 +351,7 @@ out:
350 351
351 return va; 352 return va;
352} 353}
353EXPORT_SYMBOL_GPL(da_to_va); 354EXPORT_SYMBOL_GPL(omap_da_to_va);
354 355
355static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) 356static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
356{ 357{
@@ -364,7 +365,7 @@ static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
364 const size_t bytes = PAGE_SIZE; 365 const size_t bytes = PAGE_SIZE;
365 366
366 /* 367 /*
367 * iommu 'superpage' isn't supported with 'iommu_vmalloc()' 368 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
368 */ 369 */
369 pg = vmalloc_to_page(va); 370 pg = vmalloc_to_page(va);
370 BUG_ON(!pg); 371 BUG_ON(!pg);
@@ -443,7 +444,7 @@ err_out:
443} 444}
444 445
445/* release 'da' <-> 'pa' mapping */ 446/* release 'da' <-> 'pa' mapping */
446static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj, 447static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
447 struct iovm_struct *area) 448 struct iovm_struct *area)
448{ 449{
449 u32 start; 450 u32 start;
@@ -480,7 +481,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
480 481
481/* template function for all unmapping */ 482/* template function for all unmapping */
482static struct sg_table *unmap_vm_area(struct iommu_domain *domain, 483static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
483 struct iommu *obj, const u32 da, 484 struct omap_iommu *obj, const u32 da,
484 void (*fn)(const void *), u32 flags) 485 void (*fn)(const void *), u32 flags)
485{ 486{
486 struct sg_table *sgt = NULL; 487 struct sg_table *sgt = NULL;
@@ -521,7 +522,7 @@ out:
521 return sgt; 522 return sgt;
522} 523}
523 524
524static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj, 525static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
525 u32 da, const struct sg_table *sgt, void *va, 526 u32 da, const struct sg_table *sgt, void *va,
526 size_t bytes, u32 flags) 527 size_t bytes, u32 flags)
527{ 528{
@@ -555,7 +556,8 @@ err_alloc_iovma:
555 return err; 556 return err;
556} 557}
557 558
558static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj, 559static inline u32
560__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
559 u32 da, const struct sg_table *sgt, 561 u32 da, const struct sg_table *sgt,
560 void *va, size_t bytes, u32 flags) 562 void *va, size_t bytes, u32 flags)
561{ 563{
@@ -563,7 +565,7 @@ static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
563} 565}
564 566
565/** 567/**
566 * iommu_vmap - (d)-(p)-(v) address mapper 568 * omap_iommu_vmap - (d)-(p)-(v) address mapper
567 * @obj: objective iommu 569 * @obj: objective iommu
568 * @sgt: address of scatter gather table 570 * @sgt: address of scatter gather table
569 * @flags: iovma and page property 571 * @flags: iovma and page property
@@ -571,7 +573,7 @@ static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
571 * Creates 1-n-1 mapping with given @sgt and returns @da. 573 * Creates 1-n-1 mapping with given @sgt and returns @da.
572 * All @sgt element must be io page size aligned. 574 * All @sgt element must be io page size aligned.
573 */ 575 */
574u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da, 576u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
575 const struct sg_table *sgt, u32 flags) 577 const struct sg_table *sgt, u32 flags)
576{ 578{
577 size_t bytes; 579 size_t bytes;
@@ -600,22 +602,22 @@ u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
600 602
601 return da; 603 return da;
602} 604}
603EXPORT_SYMBOL_GPL(iommu_vmap); 605EXPORT_SYMBOL_GPL(omap_iommu_vmap);
604 606
605/** 607/**
606 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' 608 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
607 * @obj: objective iommu 609 * @obj: objective iommu
608 * @da: iommu device virtual address 610 * @da: iommu device virtual address
609 * 611 *
610 * Free the iommu virtually contiguous memory area starting at 612 * Free the iommu virtually contiguous memory area starting at
611 * @da, which was returned by 'iommu_vmap()'. 613 * @da, which was returned by 'omap_iommu_vmap()'.
612 */ 614 */
613struct sg_table * 615struct sg_table *
614iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) 616omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
615{ 617{
616 struct sg_table *sgt; 618 struct sg_table *sgt;
617 /* 619 /*
618 * 'sgt' is allocated before 'iommu_vmalloc()' is called. 620 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
619 * Just returns 'sgt' to the caller to free 621 * Just returns 'sgt' to the caller to free
620 */ 622 */
621 sgt = unmap_vm_area(domain, obj, da, vunmap_sg, 623 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
@@ -624,10 +626,10 @@ iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
624 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 626 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
625 return sgt; 627 return sgt;
626} 628}
627EXPORT_SYMBOL_GPL(iommu_vunmap); 629EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
628 630
629/** 631/**
630 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper 632 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
631 * @obj: objective iommu 633 * @obj: objective iommu
632 * @da: contiguous iommu virtual memory 634 * @da: contiguous iommu virtual memory
633 * @bytes: allocation size 635 * @bytes: allocation size
@@ -636,7 +638,8 @@ EXPORT_SYMBOL_GPL(iommu_vunmap);
636 * Allocate @bytes linearly and creates 1-n-1 mapping and returns 638 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
637 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. 639 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
638 */ 640 */
639u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, 641u32
642omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
640 size_t bytes, u32 flags) 643 size_t bytes, u32 flags)
641{ 644{
642 void *va; 645 void *va;
@@ -674,17 +677,18 @@ err_sgt_alloc:
674 vfree(va); 677 vfree(va);
675 return da; 678 return da;
676} 679}
677EXPORT_SYMBOL_GPL(iommu_vmalloc); 680EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
678 681
679/** 682/**
680 * iommu_vfree - release memory allocated by 'iommu_vmalloc()' 683 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
681 * @obj: objective iommu 684 * @obj: objective iommu
682 * @da: iommu device virtual address 685 * @da: iommu device virtual address
683 * 686 *
684 * Frees the iommu virtually continuous memory area starting at 687 * Frees the iommu virtually continuous memory area starting at
685 * @da, as obtained from 'iommu_vmalloc()'. 688 * @da, as obtained from 'omap_iommu_vmalloc()'.
686 */ 689 */
687void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da) 690void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
691 const u32 da)
688{ 692{
689 struct sg_table *sgt; 693 struct sg_table *sgt;
690 694
@@ -694,7 +698,7 @@ void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
694 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 698 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
695 sgtable_free(sgt); 699 sgtable_free(sgt);
696} 700}
697EXPORT_SYMBOL_GPL(iommu_vfree); 701EXPORT_SYMBOL_GPL(omap_iommu_vfree);
698 702
699static int __init iovmm_init(void) 703static int __init iovmm_init(void)
700{ 704{