aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/omap-iovmm.c
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-08-16 08:31:16 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-08-26 05:46:01 -0400
commit5a6a5b1bcca3247e9161ccada488965c94012c48 (patch)
tree1f9239bdafb2de2b4882c9fdf2cbc168c71b784a /drivers/iommu/omap-iovmm.c
parent5da14a471455bd725534d18604b4d89ffbe158df (diff)
omap: iovmm: remove unused functionality
Remove unused functionality from OMAP's iovmm module. The intention is to eventually completely replace iovmm with the generic DMA-API, so new code that'd need this iovmm functionality will have to extend the DMA-API instead. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> Acked-by: Tony Lindgren <tony@atomide.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/omap-iovmm.c')
-rw-r--r--drivers/iommu/omap-iovmm.c201
1 files changed, 0 insertions, 201 deletions
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 809ca124196e..996bec0b4a2b 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -25,40 +25,6 @@
25 25
26#include <plat/iopgtable.h> 26#include <plat/iopgtable.h>
27 27
28/*
29 * A device driver needs to create address mappings between:
30 *
31 * - iommu/device address
32 * - physical address
33 * - mpu virtual address
34 *
35 * There are 4 possible patterns for them:
36 *
37 * |iova/ mapping iommu_ page
38 * | da pa va (d)-(p)-(v) function type
39 * ---------------------------------------------------------------------------
40 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
41 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
42 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
43 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
44 *
45 *
46 * 'iova': device iommu virtual address
47 * 'da': alias of 'iova'
48 * 'pa': physical address
49 * 'va': mpu virtual address
50 *
51 * 'c': contiguous memory area
52 * 'd': discontiguous memory area
53 * 'a': anonymous memory allocation
54 * '()': optional feature
55 *
56 * 'n': a normal page(4KB) size is used.
57 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
58 *
59 * '*': not yet, but feasible.
60 */
61
62static struct kmem_cache *iovm_area_cachep; 28static struct kmem_cache *iovm_area_cachep;
63 29
64/* return total bytes of sg buffers */ 30/* return total bytes of sg buffers */
@@ -419,40 +385,6 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
419 BUG_ON(!sgt); 385 BUG_ON(!sgt);
420} 386}
421 387
422static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
423 size_t len)
424{
425 unsigned int i;
426 struct scatterlist *sg;
427
428 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
429 unsigned bytes;
430
431 bytes = max_alignment(da | pa);
432 bytes = min_t(unsigned, bytes, iopgsz_max(len));
433
434 BUG_ON(!iopgsz_ok(bytes));
435
436 sg_set_buf(sg, phys_to_virt(pa), bytes);
437 /*
438 * 'pa' is cotinuous(linear).
439 */
440 pa += bytes;
441 da += bytes;
442 len -= bytes;
443 }
444 BUG_ON(len);
445}
446
447static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
448{
449 /*
450 * Actually this is not necessary at all, just exists for
451 * consistency of the code readability
452 */
453 BUG_ON(!sgt);
454}
455
456/* create 'da' <-> 'pa' mapping from 'sgt' */ 388/* create 'da' <-> 'pa' mapping from 'sgt' */
457static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, 389static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
458 const struct sg_table *sgt, u32 flags) 390 const struct sg_table *sgt, u32 flags)
@@ -764,139 +696,6 @@ void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
764} 696}
765EXPORT_SYMBOL_GPL(iommu_vfree); 697EXPORT_SYMBOL_GPL(iommu_vfree);
766 698
767static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
768 u32 da, u32 pa, void *va, size_t bytes, u32 flags)
769{
770 struct sg_table *sgt;
771
772 sgt = sgtable_alloc(bytes, flags, da, pa);
773 if (IS_ERR(sgt))
774 return PTR_ERR(sgt);
775
776 sgtable_fill_kmalloc(sgt, pa, da, bytes);
777
778 da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
779 if (IS_ERR_VALUE(da)) {
780 sgtable_drain_kmalloc(sgt);
781 sgtable_free(sgt);
782 }
783
784 return da;
785}
786
787/**
788 * iommu_kmap - (d)-(p)-(v) address mapper
789 * @obj: objective iommu
790 * @da: contiguous iommu virtual memory
791 * @pa: contiguous physical memory
792 * @flags: iovma and page property
793 *
794 * Creates 1-1-1 mapping and returns @da again, which can be
795 * adjusted if 'IOVMF_DA_FIXED' is not set.
796 */
797u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
798 size_t bytes, u32 flags)
799{
800 void *va;
801
802 if (!obj || !obj->dev || !bytes)
803 return -EINVAL;
804
805 bytes = PAGE_ALIGN(bytes);
806
807 va = ioremap(pa, bytes);
808 if (!va)
809 return -ENOMEM;
810
811 flags |= IOVMF_LINEAR;
812 flags |= IOVMF_MMIO;
813
814 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
815 if (IS_ERR_VALUE(da))
816 iounmap(va);
817
818 return da;
819}
820EXPORT_SYMBOL_GPL(iommu_kmap);
821
822/**
823 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
824 * @obj: objective iommu
825 * @da: iommu device virtual address
826 *
827 * Frees the iommu virtually contiguous memory area starting at
828 * @da, which was passed to and was returned by'iommu_kmap()'.
829 */
830void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
831{
832 struct sg_table *sgt;
833 typedef void (*func_t)(const void *);
834
835 sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
836 IOVMF_LINEAR | IOVMF_MMIO);
837 if (!sgt)
838 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
839 sgtable_free(sgt);
840}
841EXPORT_SYMBOL_GPL(iommu_kunmap);
842
843/**
844 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
845 * @obj: objective iommu
846 * @da: contiguous iommu virtual memory
847 * @bytes: bytes for allocation
848 * @flags: iovma and page property
849 *
850 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
851 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
852 */
853u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
854 size_t bytes, u32 flags)
855{
856 void *va;
857 u32 pa;
858
859 if (!obj || !obj->dev || !bytes)
860 return -EINVAL;
861
862 bytes = PAGE_ALIGN(bytes);
863
864 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
865 if (!va)
866 return -ENOMEM;
867 pa = virt_to_phys(va);
868
869 flags |= IOVMF_LINEAR;
870 flags |= IOVMF_ALLOC;
871
872 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
873 if (IS_ERR_VALUE(da))
874 kfree(va);
875
876 return da;
877}
878EXPORT_SYMBOL_GPL(iommu_kmalloc);
879
880/**
881 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
882 * @obj: objective iommu
883 * @da: iommu device virtual address
884 *
885 * Frees the iommu virtually contiguous memory area starting at
886 * @da, which was passed to and was returned by'iommu_kmalloc()'.
887 */
888void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
889{
890 struct sg_table *sgt;
891
892 sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
893 if (!sgt)
894 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
895 sgtable_free(sgt);
896}
897EXPORT_SYMBOL_GPL(iommu_kfree);
898
899
900static int __init iovmm_init(void) 699static int __init iovmm_init(void)
901{ 700{
902 const unsigned long flags = SLAB_HWCACHE_ALIGN; 701 const unsigned long flags = SLAB_HWCACHE_ALIGN;