aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRahul Sharma <rahul.sharma@samsung.com>2012-11-05 10:34:29 -0500
committerInki Dae <inki.dae@samsung.com>2012-12-04 00:46:00 -0500
commit4ddc404bc0b3750b015b021653a88943591f40f6 (patch)
tree25a80c8528dde1b8147f6d1e26a3425933baf439
parentea6d66c3a797376d21b23dc8261733ce35970014 (diff)
drm: exynos: fix for mapping of dma buffers
This patch fixes the problem of mapping contigous and non contigous dma buffers. Currently page struct is calculated from the buf->dma_addr which is not the physical address. It is replaced by buf->pages which points to the page struct of the first page of contigous memory chunk. This gives the correct page frame number for mapping. Non-contigous dma buffers are described using SG table and SG lists. Each valid SG List is pointing to a single page or group of pages which are physically contigous. Current implementation just maps the first page of each SG List and leave the other pages unmapped, leading to a crash. Given solution finds the page struct for the faulting page through parsing SG table and map it. Signed-off-by: Rahul Sharma <rahul.sharma@samsung.com> Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c28
1 files changed, 25 insertions, 3 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index b130e6d1a529..5fdfb8f51a41 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -95,15 +95,37 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
95{ 95{
96 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 96 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
97 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 97 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
98 struct scatterlist *sgl;
98 unsigned long pfn; 99 unsigned long pfn;
100 int i;
99 101
100 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 102 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
103 if (!buf->sgt)
104 return -EINTR;
105
106 sgl = buf->sgt->sgl;
107 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
108 if (!sgl) {
109 DRM_ERROR("invalid SG table\n");
110 return -EINTR;
111 }
112 if (page_offset < (sgl->length >> PAGE_SHIFT))
113 break;
114 page_offset -= (sgl->length >> PAGE_SHIFT);
115 }
116
117 if (i >= buf->sgt->nents) {
118 DRM_ERROR("invalid page offset\n");
119 return -EINVAL;
120 }
121
122 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
123 } else {
101 if (!buf->pages) 124 if (!buf->pages)
102 return -EINTR; 125 return -EINTR;
103 126
104 pfn = page_to_pfn(buf->pages[page_offset++]); 127 pfn = page_to_pfn(buf->pages[0]) + page_offset;
105 } else 128 }
106 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
107 129
108 return vm_insert_mixed(vma, f_vaddr, pfn); 130 return vm_insert_mixed(vma, f_vaddr, pfn);
109} 131}