aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/omap-iovmm.c
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>2011-09-02 13:32:30 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-09-05 09:14:37 -0400
commit329d8d3b474923087f6988737ff12137b58e55cc (patch)
tree9db30cae05fb15b6dfede6976aae1dfbd9dfd80d /drivers/iommu/omap-iovmm.c
parent024ae884a657f8ddeeff6b472c1fe538f277980e (diff)
iommu/omap-iovmm: support non page-aligned buffers in iommu_vmap
omap_iovmm requires page-aligned buffers, and that sometimes causes omap3isp failures (i.e. whenever the buffer passed from userspace is not page-aligned). Remove this limitation by rounding the address of the first page entry down, and adding the offset back to the device address. Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> [ohad@wizery.com: rebased, but tested only with aligned buffers] [ohad@wizery.com: slightly edited the commit log] Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/omap-iovmm.c')
-rw-r--r--drivers/iommu/omap-iovmm.c36
1 files changed, 26 insertions, 10 deletions
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 5e7f97dc76ef..39bdb92aa96f 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -27,6 +27,15 @@
27 27
28static struct kmem_cache *iovm_area_cachep; 28static struct kmem_cache *iovm_area_cachep;
29 29
30/* return the offset of the first scatterlist entry in a sg table */
31static unsigned int sgtable_offset(const struct sg_table *sgt)
32{
33 if (!sgt || !sgt->nents)
34 return 0;
35
36 return sgt->sgl->offset;
37}
38
30/* return total bytes of sg buffers */ 39/* return total bytes of sg buffers */
31static size_t sgtable_len(const struct sg_table *sgt) 40static size_t sgtable_len(const struct sg_table *sgt)
32{ 41{
@@ -39,11 +48,17 @@ static size_t sgtable_len(const struct sg_table *sgt)
39 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 48 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
40 size_t bytes; 49 size_t bytes;
41 50
42 bytes = sg->length; 51 bytes = sg->length + sg->offset;
43 52
44 if (!iopgsz_ok(bytes)) { 53 if (!iopgsz_ok(bytes)) {
45 pr_err("%s: sg[%d] not iommu pagesize(%x)\n", 54 pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
46 __func__, i, bytes); 55 __func__, i, bytes, sg->offset);
56 return 0;
57 }
58
59 if (i && sg->offset) {
60 pr_err("%s: sg[%d] offset not allowed in internal "
61 "entries\n", __func__, i);
47 return 0; 62 return 0;
48 } 63 }
49 64
@@ -164,8 +179,8 @@ static void *vmap_sg(const struct sg_table *sgt)
164 u32 pa; 179 u32 pa;
165 int err; 180 int err;
166 181
167 pa = sg_phys(sg); 182 pa = sg_phys(sg) - sg->offset;
168 bytes = sg->length; 183 bytes = sg->length + sg->offset;
169 184
170 BUG_ON(bytes != PAGE_SIZE); 185 BUG_ON(bytes != PAGE_SIZE);
171 186
@@ -405,8 +420,8 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
405 u32 pa; 420 u32 pa;
406 size_t bytes; 421 size_t bytes;
407 422
408 pa = sg_phys(sg); 423 pa = sg_phys(sg) - sg->offset;
409 bytes = sg->length; 424 bytes = sg->length + sg->offset;
410 425
411 flags &= ~IOVMF_PGSZ_MASK; 426 flags &= ~IOVMF_PGSZ_MASK;
412 427
@@ -432,7 +447,7 @@ err_out:
432 for_each_sg(sgt->sgl, sg, i, j) { 447 for_each_sg(sgt->sgl, sg, i, j) {
433 size_t bytes; 448 size_t bytes;
434 449
435 bytes = sg->length; 450 bytes = sg->length + sg->offset;
436 order = get_order(bytes); 451 order = get_order(bytes);
437 452
438 /* ignore failures.. we're already handling one */ 453 /* ignore failures.. we're already handling one */
@@ -461,7 +476,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
461 size_t bytes; 476 size_t bytes;
462 int order; 477 int order;
463 478
464 bytes = sg->length; 479 bytes = sg->length + sg->offset;
465 order = get_order(bytes); 480 order = get_order(bytes);
466 481
467 err = iommu_unmap(domain, start, order); 482 err = iommu_unmap(domain, start, order);
@@ -600,7 +615,7 @@ u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
600 if (IS_ERR_VALUE(da)) 615 if (IS_ERR_VALUE(da))
601 vunmap_sg(va); 616 vunmap_sg(va);
602 617
603 return da; 618 return da + sgtable_offset(sgt);
604} 619}
605EXPORT_SYMBOL_GPL(omap_iommu_vmap); 620EXPORT_SYMBOL_GPL(omap_iommu_vmap);
606 621
@@ -620,6 +635,7 @@ omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
620 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. 635 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
621 * Just returns 'sgt' to the caller to free 636 * Just returns 'sgt' to the caller to free
622 */ 637 */
638 da &= PAGE_MASK;
623 sgt = unmap_vm_area(domain, obj, da, vunmap_sg, 639 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
624 IOVMF_DISCONT | IOVMF_MMIO); 640 IOVMF_DISCONT | IOVMF_MMIO);
625 if (!sgt) 641 if (!sgt)