aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c55
1 files changed, 48 insertions, 7 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 13a8489b39ff..b7937a616e46 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -15,6 +15,7 @@
15#include "exynos_drm_drv.h" 15#include "exynos_drm_drv.h"
16#include "exynos_drm_gem.h" 16#include "exynos_drm_gem.h"
17#include "exynos_drm_buf.h" 17#include "exynos_drm_buf.h"
18#include "exynos_drm_iommu.h"
18 19
19static int lowlevel_buffer_allocate(struct drm_device *dev, 20static int lowlevel_buffer_allocate(struct drm_device *dev,
20 unsigned int flags, struct exynos_drm_gem_buf *buf) 21 unsigned int flags, struct exynos_drm_gem_buf *buf)
@@ -52,14 +53,45 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
52 dma_set_attr(attr, &buf->dma_attrs); 53 dma_set_attr(attr, &buf->dma_attrs);
53 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs); 54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
54 55
55 buf->pages = dma_alloc_attrs(dev->dev, buf->size, 56 nr_pages = buf->size >> PAGE_SHIFT;
56 &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs); 57
57 if (!buf->pages) { 58 if (!is_drm_iommu_supported(dev)) {
58 DRM_ERROR("failed to allocate buffer.\n"); 59 dma_addr_t start_addr;
59 return -ENOMEM; 60 unsigned int i = 0;
61
62 buf->pages = kzalloc(sizeof(struct page) * nr_pages,
63 GFP_KERNEL);
64 if (!buf->pages) {
65 DRM_ERROR("failed to allocate pages.\n");
66 return -ENOMEM;
67 }
68
69 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
70 &buf->dma_addr, GFP_KERNEL,
71 &buf->dma_attrs);
72 if (!buf->kvaddr) {
73 DRM_ERROR("failed to allocate buffer.\n");
74 kfree(buf->pages);
75 return -ENOMEM;
76 }
77
78 start_addr = buf->dma_addr;
79 while (i < nr_pages) {
80 buf->pages[i] = phys_to_page(start_addr);
81 start_addr += PAGE_SIZE;
82 i++;
83 }
84 } else {
85
86 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
87 &buf->dma_addr, GFP_KERNEL,
88 &buf->dma_attrs);
89 if (!buf->pages) {
90 DRM_ERROR("failed to allocate buffer.\n");
91 return -ENOMEM;
92 }
60 } 93 }
61 94
62 nr_pages = buf->size >> PAGE_SHIFT;
63 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); 95 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
64 if (!buf->sgt) { 96 if (!buf->sgt) {
65 DRM_ERROR("failed to get sg table.\n"); 97 DRM_ERROR("failed to get sg table.\n");
@@ -78,6 +110,9 @@ err_free_attrs:
78 (dma_addr_t)buf->dma_addr, &buf->dma_attrs); 110 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
79 buf->dma_addr = (dma_addr_t)NULL; 111 buf->dma_addr = (dma_addr_t)NULL;
80 112
113 if (!is_drm_iommu_supported(dev))
114 kfree(buf->pages);
115
81 return ret; 116 return ret;
82} 117}
83 118
@@ -100,8 +135,14 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
100 kfree(buf->sgt); 135 kfree(buf->sgt);
101 buf->sgt = NULL; 136 buf->sgt = NULL;
102 137
103 dma_free_attrs(dev->dev, buf->size, buf->pages, 138 if (!is_drm_iommu_supported(dev)) {
139 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
104 (dma_addr_t)buf->dma_addr, &buf->dma_attrs); 140 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
141 kfree(buf->pages);
142 } else
143 dma_free_attrs(dev->dev, buf->size, buf->pages,
144 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
145
105 buf->dma_addr = (dma_addr_t)NULL; 146 buf->dma_addr = (dma_addr_t)NULL;
106} 147}
107 148