aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2012-10-20 10:53:42 -0400
committerInki Dae <daeinki@gmail.com>2012-11-29 06:30:35 -0500
commit0519f9a12d0113caab78980c48a7902d2bd40c2c (patch)
tree77d49f8f1e637edf253b6688a2366d8c9e933bfd /drivers
parent549a17e447d72bab648c783abbba98f3bd5b4dd5 (diff)
drm/exynos: add iommu support for exynos drm framework
Changelog v4: - fix condition to drm_iommu_detach_device funtion. Changelog v3: - add dma_parms->max_segment_size setting of drm_device->dev. - use devm_kzalloc instead of kzalloc. Changelog v2: - fix iommu attach condition. . check archdata.dma_ops of drm device instead of subdrv device's one. - code clean to exynos_drm_iommu.c file. . remove '#ifdef CONFIG_ARM_DMA_USE_IOMMU' from exynos_drm_iommu.c and add it to driver/gpu/drm/exynos/Kconfig. Changelog v1: This patch adds iommu support for exynos drm framework with dma mapping api. In this patch, we used dma mapping api to allocate physical memory and maps it with iommu table and removed some existing codes and added new some codes for iommu support. GEM allocation requires one device object to use dma mapping api so this patch uses one iommu mapping for all sub drivers. In other words, all sub drivers have same iommu mapping. Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/Makefile1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c88
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c52
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c210
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h85
11 files changed, 409 insertions, 305 deletions
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..86fb75d3fcad 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
10 Choose this option if you have a Samsung SoC EXYNOS chipset. 10 Choose this option if you have a Samsung SoC EXYNOS chipset.
11 If M is selected the module will be called exynosdrm. 11 If M is selected the module will be called exynosdrm.
12 12
13config DRM_EXYNOS_IOMMU
14 bool "EXYNOS DRM IOMMU Support"
15 depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
16 help
17 Choose this option if you want to use IOMMU feature for DRM.
18
13config DRM_EXYNOS_DMABUF 19config DRM_EXYNOS_DMABUF
14 bool "EXYNOS DRM DMABUF" 20 bool "EXYNOS DRM DMABUF"
15 depends on DRM_EXYNOS 21 depends on DRM_EXYNOS
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..26813b8a5056 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ 8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
9 exynos_drm_plane.o 9 exynos_drm_plane.o
10 10
11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
11exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o 12exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 13exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
13exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ 14exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..48c589661cbe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,71 +33,58 @@
33static int lowlevel_buffer_allocate(struct drm_device *dev, 33static int lowlevel_buffer_allocate(struct drm_device *dev,
34 unsigned int flags, struct exynos_drm_gem_buf *buf) 34 unsigned int flags, struct exynos_drm_gem_buf *buf)
35{ 35{
36 dma_addr_t start_addr; 36 int ret = 0;
37 unsigned int npages, i = 0; 37 unsigned int npages, i = 0;
38 struct scatterlist *sgl; 38 struct scatterlist *sgl;
39 int ret = 0; 39 enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
40 40
41 DRM_DEBUG_KMS("%s\n", __FILE__); 41 DRM_DEBUG_KMS("%s\n", __FILE__);
42 42
43 if (IS_NONCONTIG_BUFFER(flags)) {
44 DRM_DEBUG_KMS("not support allocation type.\n");
45 return -EINVAL;
46 }
47
48 if (buf->dma_addr) { 43 if (buf->dma_addr) {
49 DRM_DEBUG_KMS("already allocated.\n"); 44 DRM_DEBUG_KMS("already allocated.\n");
50 return 0; 45 return 0;
51 } 46 }
52 47
53 if (buf->size >= SZ_1M) { 48 init_dma_attrs(&buf->dma_attrs);
54 npages = buf->size >> SECTION_SHIFT; 49
55 buf->page_size = SECTION_SIZE; 50 if (flags & EXYNOS_BO_NONCONTIG)
56 } else if (buf->size >= SZ_64K) { 51 attr = DMA_ATTR_WRITE_COMBINE;
57 npages = buf->size >> 16; 52
58 buf->page_size = SZ_64K; 53 dma_set_attr(attr, &buf->dma_attrs);
59 } else { 54
60 npages = buf->size >> PAGE_SHIFT; 55 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
61 buf->page_size = PAGE_SIZE; 56 &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
57 if (!buf->kvaddr) {
58 DRM_ERROR("failed to allocate buffer.\n");
59 return -ENOMEM;
62 } 60 }
63 61
64 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 62 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
65 if (!buf->sgt) { 63 if (!buf->sgt) {
66 DRM_ERROR("failed to allocate sg table.\n"); 64 DRM_ERROR("failed to allocate sg table.\n");
67 return -ENOMEM; 65 ret = -ENOMEM;
66 goto err_free_attrs;
68 } 67 }
69 68
70 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); 69 ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
70 buf->size);
71 if (ret < 0) { 71 if (ret < 0) {
72 DRM_ERROR("failed to initialize sg table.\n"); 72 DRM_ERROR("failed to get sgtable.\n");
73 kfree(buf->sgt); 73 goto err_free_sgt;
74 buf->sgt = NULL;
75 return -ENOMEM;
76 } 74 }
77 75
78 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, 76 npages = buf->sgt->nents;
79 &buf->dma_addr, GFP_KERNEL);
80 if (!buf->kvaddr) {
81 DRM_ERROR("failed to allocate buffer.\n");
82 ret = -ENOMEM;
83 goto err1;
84 }
85 77
86 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); 78 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
87 if (!buf->pages) { 79 if (!buf->pages) {
88 DRM_ERROR("failed to allocate pages.\n"); 80 DRM_ERROR("failed to allocate pages.\n");
89 ret = -ENOMEM; 81 ret = -ENOMEM;
90 goto err2; 82 goto err_free_table;
91 } 83 }
92 84
93 sgl = buf->sgt->sgl; 85 sgl = buf->sgt->sgl;
94 start_addr = buf->dma_addr;
95
96 while (i < npages) { 86 while (i < npages) {
97 buf->pages[i] = phys_to_page(start_addr); 87 buf->pages[i] = sg_page(sgl);
98 sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
99 sg_dma_address(sgl) = start_addr;
100 start_addr += buf->page_size;
101 sgl = sg_next(sgl); 88 sgl = sg_next(sgl);
102 i++; 89 i++;
103 } 90 }
@@ -108,14 +95,16 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
108 buf->size); 95 buf->size);
109 96
110 return ret; 97 return ret;
111err2: 98
112 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, 99err_free_table:
113 (dma_addr_t)buf->dma_addr);
114 buf->dma_addr = (dma_addr_t)NULL;
115err1:
116 sg_free_table(buf->sgt); 100 sg_free_table(buf->sgt);
101err_free_sgt:
117 kfree(buf->sgt); 102 kfree(buf->sgt);
118 buf->sgt = NULL; 103 buf->sgt = NULL;
104err_free_attrs:
105 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
106 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
107 buf->dma_addr = (dma_addr_t)NULL;
119 108
120 return ret; 109 return ret;
121} 110}
@@ -125,16 +114,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
125{ 114{
126 DRM_DEBUG_KMS("%s.\n", __FILE__); 115 DRM_DEBUG_KMS("%s.\n", __FILE__);
127 116
128 /*
129 * release only physically continuous memory and
130 * non-continuous memory would be released by exynos
131 * gem framework.
132 */
133 if (IS_NONCONTIG_BUFFER(flags)) {
134 DRM_DEBUG_KMS("not support allocation type.\n");
135 return;
136 }
137
138 if (!buf->dma_addr) { 117 if (!buf->dma_addr) {
139 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 118 DRM_DEBUG_KMS("dma_addr is invalid.\n");
140 return; 119 return;
@@ -150,11 +129,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
150 kfree(buf->sgt); 129 kfree(buf->sgt);
151 buf->sgt = NULL; 130 buf->sgt = NULL;
152 131
153 kfree(buf->pages); 132 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
154 buf->pages = NULL; 133 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
155
156 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
157 (dma_addr_t)buf->dma_addr);
158 buf->dma_addr = (dma_addr_t)NULL; 134 buf->dma_addr = (dma_addr_t)NULL;
159} 135}
160 136
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..b98da307faec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,29 +30,31 @@
30 30
31#include <linux/dma-buf.h> 31#include <linux/dma-buf.h>
32 32
33static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, 33static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
34 unsigned int page_size) 34 struct exynos_drm_gem_buf *buf)
35{ 35{
36 struct sg_table *sgt = NULL; 36 struct sg_table *sgt = NULL;
37 struct scatterlist *sgl; 37 int ret;
38 int i, ret;
39 38
40 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 39 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
41 if (!sgt) 40 if (!sgt)
42 goto out; 41 goto out;
43 42
44 ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); 43 ret = sg_alloc_table(sgt, buf->sgt->nents, GFP_KERNEL);
45 if (ret) 44 if (ret)
46 goto err_free_sgt; 45 goto err_free_sgt;
47 46
48 if (page_size < PAGE_SIZE) 47 ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
49 page_size = PAGE_SIZE; 48 buf->dma_addr, buf->size);
50 49 if (ret < 0) {
51 for_each_sg(sgt->sgl, sgl, nr_pages, i) 50 DRM_ERROR("failed to get sgtable.\n");
52 sg_set_page(sgl, pages[i], page_size, 0); 51 goto err_free_table;
52 }
53 53
54 return sgt; 54 return sgt;
55 55
56err_free_table:
57 sg_free_table(sgt);
56err_free_sgt: 58err_free_sgt:
57 kfree(sgt); 59 kfree(sgt);
58 sgt = NULL; 60 sgt = NULL;
@@ -68,32 +70,31 @@ static struct sg_table *
68 struct drm_device *dev = gem_obj->base.dev; 70 struct drm_device *dev = gem_obj->base.dev;
69 struct exynos_drm_gem_buf *buf; 71 struct exynos_drm_gem_buf *buf;
70 struct sg_table *sgt = NULL; 72 struct sg_table *sgt = NULL;
71 unsigned int npages;
72 int nents; 73 int nents;
73 74
74 DRM_DEBUG_PRIME("%s\n", __FILE__); 75 DRM_DEBUG_PRIME("%s\n", __FILE__);
75 76
76 mutex_lock(&dev->struct_mutex);
77
78 buf = gem_obj->buffer; 77 buf = gem_obj->buffer;
79 78 if (!buf) {
80 /* there should always be pages allocated. */ 79 DRM_ERROR("buffer is null.\n");
81 if (!buf->pages) { 80 return sgt;
82 DRM_ERROR("pages is null.\n");
83 goto err_unlock;
84 } 81 }
85 82
86 npages = buf->size / buf->page_size; 83 mutex_lock(&dev->struct_mutex);
87 84
88 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); 85 sgt = exynos_get_sgt(dev, buf);
89 if (!sgt) { 86 if (!sgt)
90 DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
91 goto err_unlock; 87 goto err_unlock;
92 } 88
93 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); 89 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
90 if (!nents) {
91 DRM_ERROR("failed to map sgl with iommu.\n");
92 sgt = NULL;
93 goto err_unlock;
94 }
94 95
95 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", 96 DRM_DEBUG_PRIME("buffer size = 0x%lx page_size = 0x%lx\n",
96 npages, buf->size, buf->page_size); 97 buf->size, buf->page_size);
97 98
98err_unlock: 99err_unlock:
99 mutex_unlock(&dev->struct_mutex); 100 mutex_unlock(&dev->struct_mutex);
@@ -105,6 +106,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
105 enum dma_data_direction dir) 106 enum dma_data_direction dir)
106{ 107{
107 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 108 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
109
108 sg_free_table(sgt); 110 sg_free_table(sgt);
109 kfree(sgt); 111 kfree(sgt);
110 sgt = NULL; 112 sgt = NULL;
@@ -196,7 +198,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
196 struct scatterlist *sgl; 198 struct scatterlist *sgl;
197 struct exynos_drm_gem_obj *exynos_gem_obj; 199 struct exynos_drm_gem_obj *exynos_gem_obj;
198 struct exynos_drm_gem_buf *buffer; 200 struct exynos_drm_gem_buf *buffer;
199 struct page *page;
200 int ret; 201 int ret;
201 202
202 DRM_DEBUG_PRIME("%s\n", __FILE__); 203 DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +234,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
233 goto err_unmap_attach; 234 goto err_unmap_attach;
234 } 235 }
235 236
236 buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
237 if (!buffer->pages) {
238 DRM_ERROR("failed to allocate pages.\n");
239 ret = -ENOMEM;
240 goto err_free_buffer;
241 }
242
243 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); 237 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
244 if (!exynos_gem_obj) { 238 if (!exynos_gem_obj) {
245 ret = -ENOMEM; 239 ret = -ENOMEM;
246 goto err_free_pages; 240 goto err_free_buffer;
247 } 241 }
248 242
249 sgl = sgt->sgl; 243 sgl = sgt->sgl;
250 244
251 if (sgt->nents == 1) { 245 buffer->size = dma_buf->size;
252 buffer->dma_addr = sg_dma_address(sgt->sgl); 246 buffer->dma_addr = sg_dma_address(sgl);
253 buffer->size = sg_dma_len(sgt->sgl);
254 247
248 if (sgt->nents == 1) {
255 /* always physically continuous memory if sgt->nents is 1. */ 249 /* always physically continuous memory if sgt->nents is 1. */
256 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 250 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
257 } else { 251 } else {
258 unsigned int i = 0; 252 /*
259 253 * this case could be CONTIG or NONCONTIG type but for now
260 buffer->dma_addr = sg_dma_address(sgl); 254 * sets NONCONTIG.
261 while (i < sgt->nents) { 255 * TODO. we have to find a way that exporter can notify
262 buffer->pages[i] = sg_page(sgl); 256 * the type of its own buffer to importer.
263 buffer->size += sg_dma_len(sgl); 257 */
264 sgl = sg_next(sgl);
265 i++;
266 }
267
268 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; 258 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
269 } 259 }
270 260
@@ -277,9 +267,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
277 267
278 return &exynos_gem_obj->base; 268 return &exynos_gem_obj->base;
279 269
280err_free_pages:
281 kfree(buffer->pages);
282 buffer->pages = NULL;
283err_free_buffer: 270err_free_buffer:
284 kfree(buffer); 271 kfree(buffer);
285 buffer = NULL; 272 buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..2b287d2fc92e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,7 @@
40#include "exynos_drm_vidi.h" 40#include "exynos_drm_vidi.h"
41#include "exynos_drm_dmabuf.h" 41#include "exynos_drm_dmabuf.h"
42#include "exynos_drm_g2d.h" 42#include "exynos_drm_g2d.h"
43#include "exynos_drm_iommu.h"
43 44
44#define DRIVER_NAME "exynos" 45#define DRIVER_NAME "exynos"
45#define DRIVER_DESC "Samsung SoC DRM" 46#define DRIVER_DESC "Samsung SoC DRM"
@@ -66,6 +67,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
66 INIT_LIST_HEAD(&private->pageflip_event_list); 67 INIT_LIST_HEAD(&private->pageflip_event_list);
67 dev->dev_private = (void *)private; 68 dev->dev_private = (void *)private;
68 69
70 /*
71 * create mapping to manage iommu table and set a pointer to iommu
72 * mapping structure to iommu_mapping of private data.
73 * also this iommu_mapping can be used to check if iommu is supported
74 * or not.
75 */
76 ret = drm_create_iommu_mapping(dev);
77 if (ret < 0) {
78 DRM_ERROR("failed to create iommu mapping.\n");
79 goto err_crtc;
80 }
81
69 drm_mode_config_init(dev); 82 drm_mode_config_init(dev);
70 83
71 /* init kms poll for handling hpd */ 84 /* init kms poll for handling hpd */
@@ -80,7 +93,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
80 for (nr = 0; nr < MAX_CRTC; nr++) { 93 for (nr = 0; nr < MAX_CRTC; nr++) {
81 ret = exynos_drm_crtc_create(dev, nr); 94 ret = exynos_drm_crtc_create(dev, nr);
82 if (ret) 95 if (ret)
83 goto err_crtc; 96 goto err_release_iommu_mapping;
84 } 97 }
85 98
86 for (nr = 0; nr < MAX_PLANE; nr++) { 99 for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +102,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
89 102
90 plane = exynos_plane_init(dev, possible_crtcs, false); 103 plane = exynos_plane_init(dev, possible_crtcs, false);
91 if (!plane) 104 if (!plane)
92 goto err_crtc; 105 goto err_release_iommu_mapping;
93 } 106 }
94 107
95 ret = drm_vblank_init(dev, MAX_CRTC); 108 ret = drm_vblank_init(dev, MAX_CRTC);
96 if (ret) 109 if (ret)
97 goto err_crtc; 110 goto err_release_iommu_mapping;
98 111
99 /* 112 /*
100 * probe sub drivers such as display controller and hdmi driver, 113 * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +139,8 @@ err_drm_device:
126 exynos_drm_device_unregister(dev); 139 exynos_drm_device_unregister(dev);
127err_vblank: 140err_vblank:
128 drm_vblank_cleanup(dev); 141 drm_vblank_cleanup(dev);
142err_release_iommu_mapping:
143 drm_release_iommu_mapping(dev);
129err_crtc: 144err_crtc:
130 drm_mode_config_cleanup(dev); 145 drm_mode_config_cleanup(dev);
131 kfree(private); 146 kfree(private);
@@ -142,6 +157,8 @@ static int exynos_drm_unload(struct drm_device *dev)
142 drm_vblank_cleanup(dev); 157 drm_vblank_cleanup(dev);
143 drm_kms_helper_poll_fini(dev); 158 drm_kms_helper_poll_fini(dev);
144 drm_mode_config_cleanup(dev); 159 drm_mode_config_cleanup(dev);
160
161 drm_release_iommu_mapping(dev);
145 kfree(dev->dev_private); 162 kfree(dev->dev_private);
146 163
147 dev->dev_private = NULL; 164 dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..8c9f4b05fc17 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -241,6 +241,13 @@ struct drm_exynos_file_private {
241 241
242/* 242/*
243 * Exynos drm private structure. 243 * Exynos drm private structure.
244 *
245 * @da_start: start address to device address space.
246 * with iommu, device address space starts from this address
247 * otherwise default one.
248 * @da_space_size: size of device address space.
249 * if 0 then default value is used for it.
250 * @da_space_order: order to device address space.
244 */ 251 */
245struct exynos_drm_private { 252struct exynos_drm_private {
246 struct drm_fb_helper *fb_helper; 253 struct drm_fb_helper *fb_helper;
@@ -255,6 +262,10 @@ struct exynos_drm_private {
255 struct drm_crtc *crtc[MAX_CRTC]; 262 struct drm_crtc *crtc[MAX_CRTC];
256 struct drm_property *plane_zpos_property; 263 struct drm_property *plane_zpos_property;
257 struct drm_property *crtc_mode_property; 264 struct drm_property *crtc_mode_property;
265
266 unsigned long da_start;
267 unsigned long da_space_size;
268 unsigned long da_space_order;
258}; 269};
259 270
260/* 271/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..7190b64a368b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,12 @@
30#include <drm/drm_crtc.h> 30#include <drm/drm_crtc.h>
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_fb_helper.h> 32#include <drm/drm_fb_helper.h>
33#include <uapi/drm/exynos_drm.h>
33 34
34#include "exynos_drm_drv.h" 35#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 36#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h" 37#include "exynos_drm_gem.h"
38#include "exynos_drm_iommu.h"
37 39
38#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 40#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
39 41
@@ -50,6 +52,32 @@ struct exynos_drm_fb {
50 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; 52 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
51}; 53};
52 54
55static int check_fb_gem_memory_type(struct drm_device *drm_dev,
56 struct exynos_drm_gem_obj *exynos_gem_obj)
57{
58 unsigned int flags;
59
60 /*
61 * if exynos drm driver supports iommu then framebuffer can use
62 * all the buffer types.
63 */
64 if (is_drm_iommu_supported(drm_dev))
65 return 0;
66
67 flags = exynos_gem_obj->flags;
68
69 /*
70 * without iommu support, not support physically non-continuous memory
71 * for framebuffer.
72 */
73 if (IS_NONCONTIG_BUFFER(flags)) {
74 DRM_ERROR("cannot use this gem memory type for fb.\n");
75 return -EINVAL;
76 }
77
78 return 0;
79}
80
53static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 81static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
54{ 82{
55 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 83 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -128,14 +156,25 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
128 struct drm_gem_object *obj) 156 struct drm_gem_object *obj)
129{ 157{
130 struct exynos_drm_fb *exynos_fb; 158 struct exynos_drm_fb *exynos_fb;
159 struct exynos_drm_gem_obj *exynos_gem_obj;
131 int ret; 160 int ret;
132 161
162 exynos_gem_obj = to_exynos_gem_obj(obj);
163
164 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
165 if (ret < 0) {
166 DRM_ERROR("cannot use this gem memory type for fb.\n");
167 return ERR_PTR(-EINVAL);
168 }
169
133 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 170 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
134 if (!exynos_fb) { 171 if (!exynos_fb) {
135 DRM_ERROR("failed to allocate exynos drm framebuffer\n"); 172 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
136 return ERR_PTR(-ENOMEM); 173 return ERR_PTR(-ENOMEM);
137 } 174 }
138 175
176 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
177
139 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 178 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
140 if (ret) { 179 if (ret) {
141 DRM_ERROR("failed to initialize framebuffer\n"); 180 DRM_ERROR("failed to initialize framebuffer\n");
@@ -143,7 +182,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
143 } 182 }
144 183
145 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 184 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
146 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
147 185
148 return &exynos_fb->fb; 186 return &exynos_fb->fb;
149} 187}
@@ -214,6 +252,9 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
214 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 252 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
215 253
216 for (i = 1; i < exynos_fb->buf_cnt; i++) { 254 for (i = 1; i < exynos_fb->buf_cnt; i++) {
255 struct exynos_drm_gem_obj *exynos_gem_obj;
256 int ret;
257
217 obj = drm_gem_object_lookup(dev, file_priv, 258 obj = drm_gem_object_lookup(dev, file_priv,
218 mode_cmd->handles[i]); 259 mode_cmd->handles[i]);
219 if (!obj) { 260 if (!obj) {
@@ -222,6 +263,15 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
222 return ERR_PTR(-ENOENT); 263 return ERR_PTR(-ENOENT);
223 } 264 }
224 265
266 exynos_gem_obj = to_exynos_gem_obj(obj);
267
268 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
269 if (ret < 0) {
270 DRM_ERROR("cannot use this gem memory type for fb.\n");
271 exynos_drm_fb_destroy(fb);
272 return ERR_PTR(ret);
273 }
274
225 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); 275 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
226 } 276 }
227 277
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..08d0218d5ba6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,61 +83,12 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
83 83
84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
85{ 85{
86 if (!IS_NONCONTIG_BUFFER(flags)) { 86 /* TODO */
87 if (size >= SZ_1M)
88 return roundup(size, SECTION_SIZE);
89 else if (size >= SZ_64K)
90 return roundup(size, SZ_64K);
91 else
92 goto out;
93 }
94out:
95 return roundup(size, PAGE_SIZE);
96}
97
98struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
99 gfp_t gfpmask)
100{
101 struct page *p, **pages;
102 int i, npages;
103
104 npages = obj->size >> PAGE_SHIFT;
105
106 pages = drm_malloc_ab(npages, sizeof(struct page *));
107 if (pages == NULL)
108 return ERR_PTR(-ENOMEM);
109
110 for (i = 0; i < npages; i++) {
111 p = alloc_page(gfpmask);
112 if (IS_ERR(p))
113 goto fail;
114 pages[i] = p;
115 }
116
117 return pages;
118
119fail:
120 while (--i)
121 __free_page(pages[i]);
122
123 drm_free_large(pages);
124 return ERR_CAST(p);
125}
126
127static void exynos_gem_put_pages(struct drm_gem_object *obj,
128 struct page **pages)
129{
130 int npages;
131 87
132 npages = obj->size >> PAGE_SHIFT; 88 return roundup(size, PAGE_SIZE);
133
134 while (--npages >= 0)
135 __free_page(pages[npages]);
136
137 drm_free_large(pages);
138} 89}
139 90
140static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, 91static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
141 struct vm_area_struct *vma, 92 struct vm_area_struct *vma,
142 unsigned long f_vaddr, 93 unsigned long f_vaddr,
143 pgoff_t page_offset) 94 pgoff_t page_offset)
@@ -157,85 +108,6 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
157 return vm_insert_mixed(vma, f_vaddr, pfn); 108 return vm_insert_mixed(vma, f_vaddr, pfn);
158} 109}
159 110
160static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
161{
162 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164 struct scatterlist *sgl;
165 struct page **pages;
166 unsigned int npages, i = 0;
167 int ret;
168
169 if (buf->pages) {
170 DRM_DEBUG_KMS("already allocated.\n");
171 return -EINVAL;
172 }
173
174 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
175 if (IS_ERR(pages)) {
176 DRM_ERROR("failed to get pages.\n");
177 return PTR_ERR(pages);
178 }
179
180 npages = obj->size >> PAGE_SHIFT;
181 buf->page_size = PAGE_SIZE;
182
183 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
184 if (!buf->sgt) {
185 DRM_ERROR("failed to allocate sg table.\n");
186 ret = -ENOMEM;
187 goto err;
188 }
189
190 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
191 if (ret < 0) {
192 DRM_ERROR("failed to initialize sg table.\n");
193 ret = -EFAULT;
194 goto err1;
195 }
196
197 sgl = buf->sgt->sgl;
198
199 /* set all pages to sg list. */
200 while (i < npages) {
201 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
202 sg_dma_address(sgl) = page_to_phys(pages[i]);
203 i++;
204 sgl = sg_next(sgl);
205 }
206
207 /* add some codes for UNCACHED type here. TODO */
208
209 buf->pages = pages;
210 return ret;
211err1:
212 kfree(buf->sgt);
213 buf->sgt = NULL;
214err:
215 exynos_gem_put_pages(obj, pages);
216 return ret;
217
218}
219
220static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
221{
222 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
224
225 /*
226 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
227 * allocated at gem fault handler.
228 */
229 sg_free_table(buf->sgt);
230 kfree(buf->sgt);
231 buf->sgt = NULL;
232
233 exynos_gem_put_pages(obj, buf->pages);
234 buf->pages = NULL;
235
236 /* add some codes for UNCACHED type here. TODO */
237}
238
239static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 111static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
240 struct drm_file *file_priv, 112 struct drm_file *file_priv,
241 unsigned int *handle) 113 unsigned int *handle)
@@ -270,9 +142,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
270 142
271 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 143 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
272 144
273 if (!buf->pages)
274 return;
275
276 /* 145 /*
277 * do not release memory region from exporter. 146 * do not release memory region from exporter.
278 * 147 *
@@ -282,10 +151,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
282 if (obj->import_attach) 151 if (obj->import_attach)
283 goto out; 152 goto out;
284 153
285 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) 154 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
286 exynos_drm_gem_put_pages(obj);
287 else
288 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
289 155
290out: 156out:
291 exynos_drm_fini_buf(obj->dev, buf); 157 exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +230,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
364 /* set memory type and cache attribute from user side. */ 230 /* set memory type and cache attribute from user side. */
365 exynos_gem_obj->flags = flags; 231 exynos_gem_obj->flags = flags;
366 232
367 /* 233 ret = exynos_drm_alloc_buf(dev, buf, flags);
368 * allocate all pages as desired size if user wants to allocate 234 if (ret < 0) {
369 * physically non-continuous memory. 235 drm_gem_object_release(&exynos_gem_obj->base);
370 */ 236 goto err_fini_buf;
371 if (flags & EXYNOS_BO_NONCONTIG) {
372 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
373 if (ret < 0) {
374 drm_gem_object_release(&exynos_gem_obj->base);
375 goto err_fini_buf;
376 }
377 } else {
378 ret = exynos_drm_alloc_buf(dev, buf, flags);
379 if (ret < 0) {
380 drm_gem_object_release(&exynos_gem_obj->base);
381 goto err_fini_buf;
382 }
383 } 237 }
384 238
385 return exynos_gem_obj; 239 return exynos_gem_obj;
@@ -495,8 +349,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
495 struct drm_gem_object *obj = filp->private_data; 349 struct drm_gem_object *obj = filp->private_data;
496 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 350 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
497 struct exynos_drm_gem_buf *buffer; 351 struct exynos_drm_gem_buf *buffer;
498 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; 352 unsigned long vm_size;
499 int ret;
500 353
501 DRM_DEBUG_KMS("%s\n", __FILE__); 354 DRM_DEBUG_KMS("%s\n", __FILE__);
502 355
@@ -504,7 +357,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
504 357
505 update_vm_cache_attr(exynos_gem_obj, vma); 358 update_vm_cache_attr(exynos_gem_obj, vma);
506 359
507 vm_size = usize = vma->vm_end - vma->vm_start; 360 vm_size = vma->vm_end - vma->vm_start;
508 361
509 /* 362 /*
510 * a buffer contains information to physically continuous memory 363 * a buffer contains information to physically continuous memory
@@ -516,42 +369,9 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
516 if (vm_size > buffer->size) 369 if (vm_size > buffer->size)
517 return -EINVAL; 370 return -EINVAL;
518 371
519 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 372 return dma_mmap_attrs(obj->dev->dev, vma, buffer->kvaddr,
520 int i = 0; 373 buffer->dma_addr, buffer->size,
521 374 &buffer->dma_attrs);
522 if (!buffer->pages)
523 return -EINVAL;
524
525 vma->vm_flags |= VM_MIXEDMAP;
526
527 do {
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
529 if (ret) {
530 DRM_ERROR("failed to remap user space.\n");
531 return ret;
532 }
533
534 uaddr += PAGE_SIZE;
535 usize -= PAGE_SIZE;
536 } while (usize > 0);
537 } else {
538 /*
539 * get page frame number to physical memory to be mapped
540 * to user space.
541 */
542 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
543 PAGE_SHIFT;
544
545 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
546
547 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 vma->vm_page_prot)) {
549 DRM_ERROR("failed to remap pfn range.\n");
550 return -EAGAIN;
551 }
552 }
553
554 return 0;
555} 375}
556 376
557static const struct file_operations exynos_drm_gem_fops = { 377static const struct file_operations exynos_drm_gem_fops = {
@@ -753,9 +573,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
753 573
754 mutex_lock(&dev->struct_mutex); 574 mutex_lock(&dev->struct_mutex);
755 575
756 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 576 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
757 if (ret < 0) 577 if (ret < 0)
758 DRM_ERROR("failed to map pages.\n"); 578 DRM_ERROR("failed to map a buffer with user.\n");
759 579
760 mutex_unlock(&dev->struct_mutex); 580 mutex_unlock(&dev->struct_mutex);
761 581
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..0236321521a1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -46,6 +46,7 @@
46struct exynos_drm_gem_buf { 46struct exynos_drm_gem_buf {
47 void __iomem *kvaddr; 47 void __iomem *kvaddr;
48 dma_addr_t dma_addr; 48 dma_addr_t dma_addr;
49 struct dma_attrs dma_attrs;
49 struct sg_table *sgt; 50 struct sg_table *sgt;
50 struct page **pages; 51 struct page **pages;
51 unsigned long page_size; 52 unsigned long page_size;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..09db1983eb1a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,150 @@
1/* exynos_drm_iommu.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drmP.h>
27#include <drm/exynos_drm.h>
28
29#include <linux/dma-mapping.h>
30#include <linux/iommu.h>
31#include <linux/kref.h>
32
33#include <asm/dma-iommu.h>
34
35#include "exynos_drm_drv.h"
36#include "exynos_drm_iommu.h"
37
38/*
39 * drm_create_iommu_mapping - create a mapping structure
40 *
41 * @drm_dev: DRM device
42 */
43int drm_create_iommu_mapping(struct drm_device *drm_dev)
44{
45 struct dma_iommu_mapping *mapping = NULL;
46 struct exynos_drm_private *priv = drm_dev->dev_private;
47 struct device *dev = drm_dev->dev;
48
49 if (!priv->da_start)
50 priv->da_start = EXYNOS_DEV_ADDR_START;
51 if (!priv->da_space_size)
52 priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
53 if (!priv->da_space_order)
54 priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
55
56 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
57 priv->da_space_size,
58 priv->da_space_order);
59 if (!mapping)
60 return -ENOMEM;
61
62 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
63 GFP_KERNEL);
64 dma_set_max_seg_size(dev, 0xffffffffu);
65 dev->archdata.mapping = mapping;
66
67 return 0;
68}
69
70/*
71 * drm_release_iommu_mapping - release iommu mapping structure
72 *
73 * @drm_dev: DRM device
74 *
75 * if mapping->kref becomes 0 then all things related to iommu mapping
76 * will be released
77 */
78void drm_release_iommu_mapping(struct drm_device *drm_dev)
79{
80 struct device *dev = drm_dev->dev;
81
82 arm_iommu_release_mapping(dev->archdata.mapping);
83}
84
85/*
86 * drm_iommu_attach_device- attach device to iommu mapping
87 *
88 * @drm_dev: DRM device
89 * @subdrv_dev: device to be attach
90 *
91 * This function should be called by sub drivers to attach it to iommu
92 * mapping.
93 */
94int drm_iommu_attach_device(struct drm_device *drm_dev,
95 struct device *subdrv_dev)
96{
97 struct device *dev = drm_dev->dev;
98 int ret;
99
100 if (!dev->archdata.mapping) {
101 DRM_ERROR("iommu_mapping is null.\n");
102 return -EFAULT;
103 }
104
105 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
106 sizeof(*subdrv_dev->dma_parms),
107 GFP_KERNEL);
108 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
109
110 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
111 if (ret < 0) {
112 DRM_DEBUG_KMS("failed iommu attach.\n");
113 return ret;
114 }
115
116 /*
117 * Set dma_ops to drm_device just one time.
118 *
119 * The dma mapping api needs device object and the api is used
120 * to allocate physial memory and map it with iommu table.
121 * If iommu attach succeeded, the sub driver would have dma_ops
122 * for iommu and also all sub drivers have same dma_ops.
123 */
124 if (!dev->archdata.dma_ops)
125 dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
126
127 return 0;
128}
129
130/*
131 * drm_iommu_detach_device -detach device address space mapping from device
132 *
133 * @drm_dev: DRM device
134 * @subdrv_dev: device to be detached
135 *
136 * This function should be called by sub drivers to detach it from iommu
137 * mapping
138 */
139void drm_iommu_detach_device(struct drm_device *drm_dev,
140 struct device *subdrv_dev)
141{
142 struct device *dev = drm_dev->dev;
143 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
144
145 if (!mapping || !mapping->domain)
146 return;
147
148 iommu_detach_device(mapping->domain, subdrv_dev);
149 drm_release_iommu_mapping(drm_dev);
150}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..18a0ca190b98
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,85 @@
1/* exynos_drm_iommu.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef _EXYNOS_DRM_IOMMU_H_
27#define _EXYNOS_DRM_IOMMU_H_
28
29#define EXYNOS_DEV_ADDR_START 0x20000000
30#define EXYNOS_DEV_ADDR_SIZE 0x40000000
31#define EXYNOS_DEV_ADDR_ORDER 0x4
32
33#ifdef CONFIG_DRM_EXYNOS_IOMMU
34
35int drm_create_iommu_mapping(struct drm_device *drm_dev);
36
37void drm_release_iommu_mapping(struct drm_device *drm_dev);
38
39int drm_iommu_attach_device(struct drm_device *drm_dev,
40 struct device *subdrv_dev);
41
42void drm_iommu_detach_device(struct drm_device *dev_dev,
43 struct device *subdrv_dev);
44
45static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
46{
47#ifdef CONFIG_ARM_DMA_USE_IOMMU
48 struct device *dev = drm_dev->dev;
49
50 return dev->archdata.mapping ? true : false;
51#else
52 return false;
53#endif
54}
55
56#else
57
58struct dma_iommu_mapping;
59static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
60{
61 return 0;
62}
63
64static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
65{
66}
67
68static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
69 struct device *subdrv_dev)
70{
71 return 0;
72}
73
74static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
75 struct device *subdrv_dev)
76{
77}
78
79static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
80{
81 return false;
82}
83
84#endif
85#endif