aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/omapdrm/omap_gem.c
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>2017-04-20 17:33:58 -0400
committerTomi Valkeinen <tomi.valkeinen@ti.com>2017-06-02 03:57:08 -0400
commit97817fd46518aa2f457ce41f6a9aecd36b461426 (patch)
tree5c5daa64ada96ac4be65b2e190168935d61eb973 /drivers/gpu/drm/omapdrm/omap_gem.c
parent930dc19c0b7278c26c85f05f92cb417f2bd28aa3 (diff)
drm: omapdrm: Map pages for DMA in DMA_TO_DEVICE direction
The display engine only reads from memory, there's no need to use bidirectional DMA mappings. Use DMA_TO_DEVICE instead. Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 94aef52c36ad..461fbb5c0075 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -254,7 +254,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
254 254
255 for (i = 0; i < npages; i++) { 255 for (i = 0; i < npages; i++) {
256 addrs[i] = dma_map_page(dev->dev, pages[i], 256 addrs[i] = dma_map_page(dev->dev, pages[i],
257 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 257 0, PAGE_SIZE, DMA_TO_DEVICE);
258 258
259 if (dma_mapping_error(dev->dev, addrs[i])) { 259 if (dma_mapping_error(dev->dev, addrs[i])) {
260 dev_warn(dev->dev, 260 dev_warn(dev->dev,
@@ -262,7 +262,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
262 262
263 for (i = i - 1; i >= 0; --i) { 263 for (i = i - 1; i >= 0; --i) {
264 dma_unmap_page(dev->dev, addrs[i], 264 dma_unmap_page(dev->dev, addrs[i],
265 PAGE_SIZE, DMA_BIDIRECTIONAL); 265 PAGE_SIZE, DMA_TO_DEVICE);
266 } 266 }
267 267
268 ret = -ENOMEM; 268 ret = -ENOMEM;
@@ -322,7 +322,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
322 for (i = 0; i < npages; i++) { 322 for (i = 0; i < npages; i++) {
323 if (omap_obj->dma_addrs[i]) 323 if (omap_obj->dma_addrs[i])
324 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i], 324 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
325 PAGE_SIZE, DMA_BIDIRECTIONAL); 325 PAGE_SIZE, DMA_TO_DEVICE);
326 } 326 }
327 327
328 kfree(omap_obj->dma_addrs); 328 kfree(omap_obj->dma_addrs);
@@ -744,7 +744,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
744 744
745 if (omap_obj->dma_addrs[pgoff]) { 745 if (omap_obj->dma_addrs[pgoff]) {
746 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff], 746 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
747 PAGE_SIZE, DMA_BIDIRECTIONAL); 747 PAGE_SIZE, DMA_TO_DEVICE);
748 omap_obj->dma_addrs[pgoff] = 0; 748 omap_obj->dma_addrs[pgoff] = 0;
749 } 749 }
750} 750}
@@ -767,8 +767,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
767 dma_addr_t addr; 767 dma_addr_t addr;
768 768
769 addr = dma_map_page(dev->dev, pages[i], 0, 769 addr = dma_map_page(dev->dev, pages[i], 0,
770 PAGE_SIZE, DMA_BIDIRECTIONAL); 770 PAGE_SIZE, dir);
771
772 if (dma_mapping_error(dev->dev, addr)) { 771 if (dma_mapping_error(dev->dev, addr)) {
773 dev_warn(dev->dev, "%s: failed to map page\n", 772 dev_warn(dev->dev, "%s: failed to map page\n",
774 __func__); 773 __func__);