diff options
Diffstat (limited to 'arch/arm/plat-omap/iovmm.c')
-rw-r--r-- | arch/arm/plat-omap/iovmm.c | 81 |
1 files changed, 46 insertions, 35 deletions
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index 8ce0de247c71..6dc1296c8c77 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c | |||
@@ -87,35 +87,43 @@ static size_t sgtable_len(const struct sg_table *sgt) | |||
87 | } | 87 | } |
88 | #define sgtable_ok(x) (!!sgtable_len(x)) | 88 | #define sgtable_ok(x) (!!sgtable_len(x)) |
89 | 89 | ||
90 | static unsigned max_alignment(u32 addr) | ||
91 | { | ||
92 | int i; | ||
93 | unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | ||
94 | for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) | ||
95 | ; | ||
96 | return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; | ||
97 | } | ||
98 | |||
90 | /* | 99 | /* |
91 | * calculate the optimal number sg elements from total bytes based on | 100 | * calculate the optimal number sg elements from total bytes based on |
92 | * iommu superpages | 101 | * iommu superpages |
93 | */ | 102 | */ |
94 | static unsigned int sgtable_nents(size_t bytes) | 103 | static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) |
95 | { | 104 | { |
96 | int i; | 105 | unsigned nr_entries = 0, ent_sz; |
97 | unsigned int nr_entries; | ||
98 | const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | ||
99 | 106 | ||
100 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | 107 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { |
101 | pr_err("%s: wrong size %08x\n", __func__, bytes); | 108 | pr_err("%s: wrong size %08x\n", __func__, bytes); |
102 | return 0; | 109 | return 0; |
103 | } | 110 | } |
104 | 111 | ||
105 | nr_entries = 0; | 112 | while (bytes) { |
106 | for (i = 0; i < ARRAY_SIZE(pagesize); i++) { | 113 | ent_sz = max_alignment(da | pa); |
107 | if (bytes >= pagesize[i]) { | 114 | ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); |
108 | nr_entries += (bytes / pagesize[i]); | 115 | nr_entries++; |
109 | bytes %= pagesize[i]; | 116 | da += ent_sz; |
110 | } | 117 | pa += ent_sz; |
118 | bytes -= ent_sz; | ||
111 | } | 119 | } |
112 | BUG_ON(bytes); | ||
113 | 120 | ||
114 | return nr_entries; | 121 | return nr_entries; |
115 | } | 122 | } |
116 | 123 | ||
117 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | 124 | /* allocate and initialize sg_table header(a kind of 'superblock') */ |
118 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) | 125 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, |
126 | u32 da, u32 pa) | ||
119 | { | 127 | { |
120 | unsigned int nr_entries; | 128 | unsigned int nr_entries; |
121 | int err; | 129 | int err; |
@@ -127,9 +135,8 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) | |||
127 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | 135 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) |
128 | return ERR_PTR(-EINVAL); | 136 | return ERR_PTR(-EINVAL); |
129 | 137 | ||
130 | /* FIXME: IOVMF_DA_FIXED should support 'superpages' */ | 138 | if (flags & IOVMF_LINEAR) { |
131 | if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) { | 139 | nr_entries = sgtable_nents(bytes, da, pa); |
132 | nr_entries = sgtable_nents(bytes); | ||
133 | if (!nr_entries) | 140 | if (!nr_entries) |
134 | return ERR_PTR(-EINVAL); | 141 | return ERR_PTR(-EINVAL); |
135 | } else | 142 | } else |
@@ -273,13 +280,14 @@ static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | |||
273 | alignement = PAGE_SIZE; | 280 | alignement = PAGE_SIZE; |
274 | 281 | ||
275 | if (flags & IOVMF_DA_ANON) { | 282 | if (flags & IOVMF_DA_ANON) { |
276 | /* | 283 | start = obj->da_start; |
277 | * Reserve the first page for NULL | 284 | |
278 | */ | ||
279 | start = PAGE_SIZE; | ||
280 | if (flags & IOVMF_LINEAR) | 285 | if (flags & IOVMF_LINEAR) |
281 | alignement = iopgsz_max(bytes); | 286 | alignement = iopgsz_max(bytes); |
282 | start = roundup(start, alignement); | 287 | start = roundup(start, alignement); |
288 | } else if (start < obj->da_start || start > obj->da_end || | ||
289 | obj->da_end - start < bytes) { | ||
290 | return ERR_PTR(-EINVAL); | ||
283 | } | 291 | } |
284 | 292 | ||
285 | tmp = NULL; | 293 | tmp = NULL; |
@@ -289,19 +297,19 @@ static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | |||
289 | prev_end = 0; | 297 | prev_end = 0; |
290 | list_for_each_entry(tmp, &obj->mmap, list) { | 298 | list_for_each_entry(tmp, &obj->mmap, list) { |
291 | 299 | ||
292 | if (prev_end >= start) | 300 | if (prev_end > start) |
293 | break; | 301 | break; |
294 | 302 | ||
295 | if (start + bytes < tmp->da_start) | 303 | if (tmp->da_start > start && (tmp->da_start - start) >= bytes) |
296 | goto found; | 304 | goto found; |
297 | 305 | ||
298 | if (flags & IOVMF_DA_ANON) | 306 | if (tmp->da_end >= start && flags & IOVMF_DA_ANON) |
299 | start = roundup(tmp->da_end + 1, alignement); | 307 | start = roundup(tmp->da_end + 1, alignement); |
300 | 308 | ||
301 | prev_end = tmp->da_end; | 309 | prev_end = tmp->da_end; |
302 | } | 310 | } |
303 | 311 | ||
304 | if ((start > prev_end) && (ULONG_MAX - start >= bytes)) | 312 | if ((start >= prev_end) && (obj->da_end - start >= bytes)) |
305 | goto found; | 313 | goto found; |
306 | 314 | ||
307 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | 315 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", |
@@ -409,7 +417,8 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |||
409 | BUG_ON(!sgt); | 417 | BUG_ON(!sgt); |
410 | } | 418 | } |
411 | 419 | ||
412 | static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) | 420 | static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, |
421 | size_t len) | ||
413 | { | 422 | { |
414 | unsigned int i; | 423 | unsigned int i; |
415 | struct scatterlist *sg; | 424 | struct scatterlist *sg; |
@@ -418,9 +427,10 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) | |||
418 | va = phys_to_virt(pa); | 427 | va = phys_to_virt(pa); |
419 | 428 | ||
420 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 429 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
421 | size_t bytes; | 430 | unsigned bytes; |
422 | 431 | ||
423 | bytes = iopgsz_max(len); | 432 | bytes = max_alignment(da | pa); |
433 | bytes = min_t(unsigned, bytes, iopgsz_max(len)); | ||
424 | 434 | ||
425 | BUG_ON(!iopgsz_ok(bytes)); | 435 | BUG_ON(!iopgsz_ok(bytes)); |
426 | 436 | ||
@@ -429,6 +439,7 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) | |||
429 | * 'pa' is cotinuous(linear). | 439 | * 'pa' is cotinuous(linear). |
430 | */ | 440 | */ |
431 | pa += bytes; | 441 | pa += bytes; |
442 | da += bytes; | ||
432 | len -= bytes; | 443 | len -= bytes; |
433 | } | 444 | } |
434 | BUG_ON(len); | 445 | BUG_ON(len); |
@@ -695,18 +706,18 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | |||
695 | if (!va) | 706 | if (!va) |
696 | return -ENOMEM; | 707 | return -ENOMEM; |
697 | 708 | ||
698 | sgt = sgtable_alloc(bytes, flags); | 709 | flags &= IOVMF_HW_MASK; |
710 | flags |= IOVMF_DISCONT; | ||
711 | flags |= IOVMF_ALLOC; | ||
712 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | ||
713 | |||
714 | sgt = sgtable_alloc(bytes, flags, da, 0); | ||
699 | if (IS_ERR(sgt)) { | 715 | if (IS_ERR(sgt)) { |
700 | da = PTR_ERR(sgt); | 716 | da = PTR_ERR(sgt); |
701 | goto err_sgt_alloc; | 717 | goto err_sgt_alloc; |
702 | } | 718 | } |
703 | sgtable_fill_vmalloc(sgt, va); | 719 | sgtable_fill_vmalloc(sgt, va); |
704 | 720 | ||
705 | flags &= IOVMF_HW_MASK; | ||
706 | flags |= IOVMF_DISCONT; | ||
707 | flags |= IOVMF_ALLOC; | ||
708 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | ||
709 | |||
710 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | 721 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); |
711 | if (IS_ERR_VALUE(da)) | 722 | if (IS_ERR_VALUE(da)) |
712 | goto err_iommu_vmap; | 723 | goto err_iommu_vmap; |
@@ -746,11 +757,11 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, | |||
746 | { | 757 | { |
747 | struct sg_table *sgt; | 758 | struct sg_table *sgt; |
748 | 759 | ||
749 | sgt = sgtable_alloc(bytes, flags); | 760 | sgt = sgtable_alloc(bytes, flags, da, pa); |
750 | if (IS_ERR(sgt)) | 761 | if (IS_ERR(sgt)) |
751 | return PTR_ERR(sgt); | 762 | return PTR_ERR(sgt); |
752 | 763 | ||
753 | sgtable_fill_kmalloc(sgt, pa, bytes); | 764 | sgtable_fill_kmalloc(sgt, pa, da, bytes); |
754 | 765 | ||
755 | da = map_iommu_region(obj, da, sgt, va, bytes, flags); | 766 | da = map_iommu_region(obj, da, sgt, va, bytes, flags); |
756 | if (IS_ERR_VALUE(da)) { | 767 | if (IS_ERR_VALUE(da)) { |
@@ -811,7 +822,7 @@ void iommu_kunmap(struct iommu *obj, u32 da) | |||
811 | struct sg_table *sgt; | 822 | struct sg_table *sgt; |
812 | typedef void (*func_t)(const void *); | 823 | typedef void (*func_t)(const void *); |
813 | 824 | ||
814 | sgt = unmap_vm_area(obj, da, (func_t)__iounmap, | 825 | sgt = unmap_vm_area(obj, da, (func_t)iounmap, |
815 | IOVMF_LINEAR | IOVMF_MMIO); | 826 | IOVMF_LINEAR | IOVMF_MMIO); |
816 | if (!sgt) | 827 | if (!sgt) |
817 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | 828 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); |