aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/iovmm.c
diff options
context:
space:
mode:
authorGuzman Lugo, Fernando <x0095840@ti.com>2010-12-14 19:54:01 -0500
committerHari Kanigeri <h-kanigeri2@ti.com>2010-12-15 12:28:40 -0500
commitad1081210f3c91874f9fe9b48c3934c7db9714b7 (patch)
tree19072b4a24cfd16840aa31fdbb2c2d698214a2bd /arch/arm/plat-omap/iovmm.c
parentba6e1f4ff41314906d81e6d96e646cdeafe42827 (diff)
OMAP: iovmm: add superpages support to fixed da address
This patch adds superpages support to fixed ad address inside iommu_kmap function. Signed-off-by: Fernando Guzman Lugo <x0095840@ti.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
Diffstat (limited to 'arch/arm/plat-omap/iovmm.c')
-rw-r--r--arch/arm/plat-omap/iovmm.c62
1 files changed, 36 insertions, 26 deletions
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 34f0012b5676..93a34d92b3a2 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -87,35 +87,43 @@ static size_t sgtable_len(const struct sg_table *sgt)
87} 87}
88#define sgtable_ok(x) (!!sgtable_len(x)) 88#define sgtable_ok(x) (!!sgtable_len(x))
89 89
90static unsigned max_alignment(u32 addr)
91{
92 int i;
93 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
94 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
95 ;
96 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
97}
98
90/* 99/*
91 * calculate the optimal number sg elements from total bytes based on 100 * calculate the optimal number sg elements from total bytes based on
92 * iommu superpages 101 * iommu superpages
93 */ 102 */
94static unsigned int sgtable_nents(size_t bytes) 103static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
95{ 104{
96 int i; 105 unsigned nr_entries = 0, ent_sz;
97 unsigned int nr_entries;
98 const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
99 106
100 if (!IS_ALIGNED(bytes, PAGE_SIZE)) { 107 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
101 pr_err("%s: wrong size %08x\n", __func__, bytes); 108 pr_err("%s: wrong size %08x\n", __func__, bytes);
102 return 0; 109 return 0;
103 } 110 }
104 111
105 nr_entries = 0; 112 while (bytes) {
106 for (i = 0; i < ARRAY_SIZE(pagesize); i++) { 113 ent_sz = max_alignment(da | pa);
107 if (bytes >= pagesize[i]) { 114 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
108 nr_entries += (bytes / pagesize[i]); 115 nr_entries++;
109 bytes %= pagesize[i]; 116 da += ent_sz;
110 } 117 pa += ent_sz;
118 bytes -= ent_sz;
111 } 119 }
112 BUG_ON(bytes);
113 120
114 return nr_entries; 121 return nr_entries;
115} 122}
116 123
117/* allocate and initialize sg_table header(a kind of 'superblock') */ 124/* allocate and initialize sg_table header(a kind of 'superblock') */
118static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) 125static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
126 u32 da, u32 pa)
119{ 127{
120 unsigned int nr_entries; 128 unsigned int nr_entries;
121 int err; 129 int err;
@@ -127,9 +135,8 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
127 if (!IS_ALIGNED(bytes, PAGE_SIZE)) 135 if (!IS_ALIGNED(bytes, PAGE_SIZE))
128 return ERR_PTR(-EINVAL); 136 return ERR_PTR(-EINVAL);
129 137
130 /* FIXME: IOVMF_DA_FIXED should support 'superpages' */ 138 if (flags & IOVMF_LINEAR) {
131 if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) { 139 nr_entries = sgtable_nents(bytes, da, pa);
132 nr_entries = sgtable_nents(bytes);
133 if (!nr_entries) 140 if (!nr_entries)
134 return ERR_PTR(-EINVAL); 141 return ERR_PTR(-EINVAL);
135 } else 142 } else
@@ -409,7 +416,8 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
409 BUG_ON(!sgt); 416 BUG_ON(!sgt);
410} 417}
411 418
412static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) 419static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
420 size_t len)
413{ 421{
414 unsigned int i; 422 unsigned int i;
415 struct scatterlist *sg; 423 struct scatterlist *sg;
@@ -418,9 +426,10 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
418 va = phys_to_virt(pa); 426 va = phys_to_virt(pa);
419 427
420 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 428 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
421 size_t bytes; 429 unsigned bytes;
422 430
423 bytes = iopgsz_max(len); 431 bytes = max_alignment(da | pa);
432 bytes = min_t(unsigned, bytes, iopgsz_max(len));
424 433
425 BUG_ON(!iopgsz_ok(bytes)); 434 BUG_ON(!iopgsz_ok(bytes));
426 435
@@ -429,6 +438,7 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
429 * 'pa' is cotinuous(linear). 438 * 'pa' is cotinuous(linear).
430 */ 439 */
431 pa += bytes; 440 pa += bytes;
441 da += bytes;
432 len -= bytes; 442 len -= bytes;
433 } 443 }
434 BUG_ON(len); 444 BUG_ON(len);
@@ -695,18 +705,18 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
695 if (!va) 705 if (!va)
696 return -ENOMEM; 706 return -ENOMEM;
697 707
698 sgt = sgtable_alloc(bytes, flags); 708 flags &= IOVMF_HW_MASK;
709 flags |= IOVMF_DISCONT;
710 flags |= IOVMF_ALLOC;
711 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
712
713 sgt = sgtable_alloc(bytes, flags, da, 0);
699 if (IS_ERR(sgt)) { 714 if (IS_ERR(sgt)) {
700 da = PTR_ERR(sgt); 715 da = PTR_ERR(sgt);
701 goto err_sgt_alloc; 716 goto err_sgt_alloc;
702 } 717 }
703 sgtable_fill_vmalloc(sgt, va); 718 sgtable_fill_vmalloc(sgt, va);
704 719
705 flags &= IOVMF_HW_MASK;
706 flags |= IOVMF_DISCONT;
707 flags |= IOVMF_ALLOC;
708 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
709
710 da = __iommu_vmap(obj, da, sgt, va, bytes, flags); 720 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
711 if (IS_ERR_VALUE(da)) 721 if (IS_ERR_VALUE(da))
712 goto err_iommu_vmap; 722 goto err_iommu_vmap;
@@ -746,11 +756,11 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
746{ 756{
747 struct sg_table *sgt; 757 struct sg_table *sgt;
748 758
749 sgt = sgtable_alloc(bytes, flags); 759 sgt = sgtable_alloc(bytes, flags, da, pa);
750 if (IS_ERR(sgt)) 760 if (IS_ERR(sgt))
751 return PTR_ERR(sgt); 761 return PTR_ERR(sgt);
752 762
753 sgtable_fill_kmalloc(sgt, pa, bytes); 763 sgtable_fill_kmalloc(sgt, pa, da, bytes);
754 764
755 da = map_iommu_region(obj, da, sgt, va, bytes, flags); 765 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
756 if (IS_ERR_VALUE(da)) { 766 if (IS_ERR_VALUE(da)) {