aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-12-05 14:53:10 -0500
committerDave Airlie <airlied@redhat.com>2012-12-05 14:53:10 -0500
commit00f09afd1740c3b2a1434bf48a124b316aab19f2 (patch)
tree8e7d0c97f1ebecf326f19de32770f868c2994522
parent7136470d4b37b46565b29b8b9425a8361421483b (diff)
parenta144c2e9f17b738ac47716f1fb033cbfcfcde934 (diff)
Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next
This patch set adds iommu support, userptr feature to g2d, minor fixups and code cleanups. And the iommu feature has dependency of the below patches related to dma mapping framework. This patch is used to allocate fully physically contiguous memory region. - add sending AVI and AVI info frames. . this adds some codes for composing AVI and AUI info frames and send them every VSYNC for HDMI Certification. - bug fix to previous pull request. - add some code cleanup * 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos: (32 commits) drm/exynos: sending AVI and AUI info frames drm/exynos: Use devm_clk_get in exynos_drm_fimd.c drm/exynos: Use devm_* APIs in exynos_hdmi.c drm/exynos: Use devm_clk_get in exynos_mixer.c drm/exynos: Fix potential NULL pointer dereference drm/exynos: Use devm_clk_get in exynos_drm_g2d.c drm/exynos: use sgt instead of pages for framebuffer address drm: exynos: fix for loosing display mode header during mode adjustment drm/exynos: fix memory leak to EDID block drm/exynos: remove 'pages' and 'page_size' elements in exynos gem buffer drm/exynos: add exynos drm specific fb_mmap function drm/exynos: make sure that overlay data are updated drm/exynos: add vm_ops to specific gem mmaper drm/exynos: add userptr feature for g2d module drm/exynos: remove unnecessary sg_alloc_table call drm: exynos: fix for mapping of dma buffers drm/exynos: remove EXYNOS_BO_NONCONTIG type checking. drm/exynos: add iommu support for g2d drm/exynos: add iommu support for hdmi driver drm/exynos: add iommu support to fimd driver ...
-rw-r--r--Documentation/DMA-attributes.txt9
-rw-r--r--arch/arm/mm/dma-mapping.c41
-rw-r--r--drivers/gpu/drm/drm_irq.c3
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/Makefile1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c84
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c49
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c56
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c64
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c495
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c435
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h56
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h85
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c237
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c93
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h17
-rw-r--r--include/linux/dma-attrs.h1
-rw-r--r--include/uapi/drm/exynos_drm.h13
30 files changed, 1469 insertions, 637 deletions
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index f50309081ac7..e59480db9ee0 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
91dma_unmap_{single,page,sg} functions family to force buffer to stay in 91dma_unmap_{single,page,sg} functions family to force buffer to stay in
92device domain after releasing a mapping for it. Use this attribute with 92device domain after releasing a mapping for it. Use this attribute with
93care! 93care!
94
95DMA_ATTR_FORCE_CONTIGUOUS
96-------------------------
97
98By default DMA-mapping subsystem is allowed to assemble the buffer
99allocated by dma_alloc_attrs() function from individual pages if it can
100be mapped as contiguous chunk into device dma address space. By
101specifing this attribute the allocated buffer is forced to be contiguous
102also in physical memory.
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 58bc3e4d3bd0..f076f209c7a4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1036,7 +1036,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
1036 spin_unlock_irqrestore(&mapping->lock, flags); 1036 spin_unlock_irqrestore(&mapping->lock, flags);
1037} 1037}
1038 1038
1039static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 1039static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1040 gfp_t gfp, struct dma_attrs *attrs)
1040{ 1041{
1041 struct page **pages; 1042 struct page **pages;
1042 int count = size >> PAGE_SHIFT; 1043 int count = size >> PAGE_SHIFT;
@@ -1050,6 +1051,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
1050 if (!pages) 1051 if (!pages)
1051 return NULL; 1052 return NULL;
1052 1053
1054 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
1055 {
1056 unsigned long order = get_order(size);
1057 struct page *page;
1058
1059 page = dma_alloc_from_contiguous(dev, count, order);
1060 if (!page)
1061 goto error;
1062
1063 __dma_clear_buffer(page, size);
1064
1065 for (i = 0; i < count; i++)
1066 pages[i] = page + i;
1067
1068 return pages;
1069 }
1070
1053 while (count) { 1071 while (count) {
1054 int j, order = __fls(count); 1072 int j, order = __fls(count);
1055 1073
@@ -1083,14 +1101,21 @@ error:
1083 return NULL; 1101 return NULL;
1084} 1102}
1085 1103
1086static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) 1104static int __iommu_free_buffer(struct device *dev, struct page **pages,
1105 size_t size, struct dma_attrs *attrs)
1087{ 1106{
1088 int count = size >> PAGE_SHIFT; 1107 int count = size >> PAGE_SHIFT;
1089 int array_size = count * sizeof(struct page *); 1108 int array_size = count * sizeof(struct page *);
1090 int i; 1109 int i;
1091 for (i = 0; i < count; i++) 1110
1092 if (pages[i]) 1111 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
1093 __free_pages(pages[i], 0); 1112 dma_release_from_contiguous(dev, pages[0], count);
1113 } else {
1114 for (i = 0; i < count; i++)
1115 if (pages[i])
1116 __free_pages(pages[i], 0);
1117 }
1118
1094 if (array_size <= PAGE_SIZE) 1119 if (array_size <= PAGE_SIZE)
1095 kfree(pages); 1120 kfree(pages);
1096 else 1121 else
@@ -1252,7 +1277,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1252 if (gfp & GFP_ATOMIC) 1277 if (gfp & GFP_ATOMIC)
1253 return __iommu_alloc_atomic(dev, size, handle); 1278 return __iommu_alloc_atomic(dev, size, handle);
1254 1279
1255 pages = __iommu_alloc_buffer(dev, size, gfp); 1280 pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
1256 if (!pages) 1281 if (!pages)
1257 return NULL; 1282 return NULL;
1258 1283
@@ -1273,7 +1298,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1273err_mapping: 1298err_mapping:
1274 __iommu_remove_mapping(dev, *handle, size); 1299 __iommu_remove_mapping(dev, *handle, size);
1275err_buffer: 1300err_buffer:
1276 __iommu_free_buffer(dev, pages, size); 1301 __iommu_free_buffer(dev, pages, size, attrs);
1277 return NULL; 1302 return NULL;
1278} 1303}
1279 1304
@@ -1329,7 +1354,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1329 } 1354 }
1330 1355
1331 __iommu_remove_mapping(dev, handle, size); 1356 __iommu_remove_mapping(dev, handle, size);
1332 __iommu_free_buffer(dev, pages, size); 1357 __iommu_free_buffer(dev, pages, size, attrs);
1333} 1358}
1334 1359
1335static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1360static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2ba9d7fac345..19c01ca3cc76 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1021,6 +1021,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
1021 1021
1022 /* Send any queued vblank events, lest the natives grow disquiet */ 1022 /* Send any queued vblank events, lest the natives grow disquiet */
1023 seq = drm_vblank_count_and_time(dev, crtc, &now); 1023 seq = drm_vblank_count_and_time(dev, crtc, &now);
1024
1025 spin_lock(&dev->event_lock);
1024 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1026 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
1025 if (e->pipe != crtc) 1027 if (e->pipe != crtc)
1026 continue; 1028 continue;
@@ -1031,6 +1033,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
1031 drm_vblank_put(dev, e->pipe); 1033 drm_vblank_put(dev, e->pipe);
1032 send_vblank_event(dev, e, seq, &now); 1034 send_vblank_event(dev, e, seq, &now);
1033 } 1035 }
1036 spin_unlock(&dev->event_lock);
1034 1037
1035 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1038 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1036} 1039}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..86fb75d3fcad 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
10 Choose this option if you have a Samsung SoC EXYNOS chipset. 10 Choose this option if you have a Samsung SoC EXYNOS chipset.
11 If M is selected the module will be called exynosdrm. 11 If M is selected the module will be called exynosdrm.
12 12
13config DRM_EXYNOS_IOMMU
14 bool "EXYNOS DRM IOMMU Support"
15 depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
16 help
17 Choose this option if you want to use IOMMU feature for DRM.
18
13config DRM_EXYNOS_DMABUF 19config DRM_EXYNOS_DMABUF
14 bool "EXYNOS DRM DMABUF" 20 bool "EXYNOS DRM DMABUF"
15 depends on DRM_EXYNOS 21 depends on DRM_EXYNOS
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..26813b8a5056 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ 8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
9 exynos_drm_plane.o 9 exynos_drm_plane.o
10 10
11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
11exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o 12exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 13exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
13exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ 14exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..72bf97b96ba0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,73 +33,42 @@
33static int lowlevel_buffer_allocate(struct drm_device *dev, 33static int lowlevel_buffer_allocate(struct drm_device *dev,
34 unsigned int flags, struct exynos_drm_gem_buf *buf) 34 unsigned int flags, struct exynos_drm_gem_buf *buf)
35{ 35{
36 dma_addr_t start_addr;
37 unsigned int npages, i = 0;
38 struct scatterlist *sgl;
39 int ret = 0; 36 int ret = 0;
37 enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
40 38
41 DRM_DEBUG_KMS("%s\n", __FILE__); 39 DRM_DEBUG_KMS("%s\n", __FILE__);
42 40
43 if (IS_NONCONTIG_BUFFER(flags)) {
44 DRM_DEBUG_KMS("not support allocation type.\n");
45 return -EINVAL;
46 }
47
48 if (buf->dma_addr) { 41 if (buf->dma_addr) {
49 DRM_DEBUG_KMS("already allocated.\n"); 42 DRM_DEBUG_KMS("already allocated.\n");
50 return 0; 43 return 0;
51 } 44 }
52 45
53 if (buf->size >= SZ_1M) { 46 init_dma_attrs(&buf->dma_attrs);
54 npages = buf->size >> SECTION_SHIFT;
55 buf->page_size = SECTION_SIZE;
56 } else if (buf->size >= SZ_64K) {
57 npages = buf->size >> 16;
58 buf->page_size = SZ_64K;
59 } else {
60 npages = buf->size >> PAGE_SHIFT;
61 buf->page_size = PAGE_SIZE;
62 }
63 47
64 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 48 if (flags & EXYNOS_BO_NONCONTIG)
65 if (!buf->sgt) { 49 attr = DMA_ATTR_WRITE_COMBINE;
66 DRM_ERROR("failed to allocate sg table.\n");
67 return -ENOMEM;
68 }
69 50
70 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); 51 dma_set_attr(attr, &buf->dma_attrs);
71 if (ret < 0) {
72 DRM_ERROR("failed to initialize sg table.\n");
73 kfree(buf->sgt);
74 buf->sgt = NULL;
75 return -ENOMEM;
76 }
77 52
78 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, 53 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
79 &buf->dma_addr, GFP_KERNEL); 54 &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
80 if (!buf->kvaddr) { 55 if (!buf->kvaddr) {
81 DRM_ERROR("failed to allocate buffer.\n"); 56 DRM_ERROR("failed to allocate buffer.\n");
82 ret = -ENOMEM; 57 return -ENOMEM;
83 goto err1;
84 } 58 }
85 59
86 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); 60 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
87 if (!buf->pages) { 61 if (!buf->sgt) {
88 DRM_ERROR("failed to allocate pages.\n"); 62 DRM_ERROR("failed to allocate sg table.\n");
89 ret = -ENOMEM; 63 ret = -ENOMEM;
90 goto err2; 64 goto err_free_attrs;
91 } 65 }
92 66
93 sgl = buf->sgt->sgl; 67 ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
94 start_addr = buf->dma_addr; 68 buf->size);
95 69 if (ret < 0) {
96 while (i < npages) { 70 DRM_ERROR("failed to get sgtable.\n");
97 buf->pages[i] = phys_to_page(start_addr); 71 goto err_free_sgt;
98 sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
99 sg_dma_address(sgl) = start_addr;
100 start_addr += buf->page_size;
101 sgl = sg_next(sgl);
102 i++;
103 } 72 }
104 73
105 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", 74 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
@@ -108,14 +77,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
108 buf->size); 77 buf->size);
109 78
110 return ret; 79 return ret;
111err2: 80
112 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, 81err_free_sgt:
113 (dma_addr_t)buf->dma_addr);
114 buf->dma_addr = (dma_addr_t)NULL;
115err1:
116 sg_free_table(buf->sgt);
117 kfree(buf->sgt); 82 kfree(buf->sgt);
118 buf->sgt = NULL; 83 buf->sgt = NULL;
84err_free_attrs:
85 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
86 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
87 buf->dma_addr = (dma_addr_t)NULL;
119 88
120 return ret; 89 return ret;
121} 90}
@@ -125,16 +94,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
125{ 94{
126 DRM_DEBUG_KMS("%s.\n", __FILE__); 95 DRM_DEBUG_KMS("%s.\n", __FILE__);
127 96
128 /*
129 * release only physically continuous memory and
130 * non-continuous memory would be released by exynos
131 * gem framework.
132 */
133 if (IS_NONCONTIG_BUFFER(flags)) {
134 DRM_DEBUG_KMS("not support allocation type.\n");
135 return;
136 }
137
138 if (!buf->dma_addr) { 97 if (!buf->dma_addr) {
139 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 98 DRM_DEBUG_KMS("dma_addr is invalid.\n");
140 return; 99 return;
@@ -150,11 +109,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
150 kfree(buf->sgt); 109 kfree(buf->sgt);
151 buf->sgt = NULL; 110 buf->sgt = NULL;
152 111
153 kfree(buf->pages); 112 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
154 buf->pages = NULL; 113 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
155
156 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
157 (dma_addr_t)buf->dma_addr);
158 buf->dma_addr = (dma_addr_t)NULL; 114 buf->dma_addr = (dma_addr_t)NULL;
159} 115}
160 116
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..25cf16285033 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
34void exynos_drm_fini_buf(struct drm_device *dev, 34void exynos_drm_fini_buf(struct drm_device *dev,
35 struct exynos_drm_gem_buf *buffer); 35 struct exynos_drm_gem_buf *buffer);
36 36
37/* allocate physical memory region and setup sgt and pages. */ 37/* allocate physical memory region and setup sgt. */
38int exynos_drm_alloc_buf(struct drm_device *dev, 38int exynos_drm_alloc_buf(struct drm_device *dev,
39 struct exynos_drm_gem_buf *buf, 39 struct exynos_drm_gem_buf *buf,
40 unsigned int flags); 40 unsigned int flags);
41 41
42/* release physical memory region, sgt and pages. */ 42/* release physical memory region, and sgt. */
43void exynos_drm_free_buf(struct drm_device *dev, 43void exynos_drm_free_buf(struct drm_device *dev,
44 unsigned int flags, 44 unsigned int flags,
45 struct exynos_drm_gem_buf *buffer); 45 struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..2efa4b031d73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
236 goto out; 236 goto out;
237 } 237 }
238 238
239 spin_lock_irq(&dev->event_lock);
239 list_add_tail(&event->base.link, 240 list_add_tail(&event->base.link,
240 &dev_priv->pageflip_event_list); 241 &dev_priv->pageflip_event_list);
242 spin_unlock_irq(&dev->event_lock);
241 243
242 crtc->fb = fb; 244 crtc->fb = fb;
243 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, 245 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
244 NULL); 246 NULL);
245 if (ret) { 247 if (ret) {
246 crtc->fb = old_fb; 248 crtc->fb = old_fb;
249
250 spin_lock_irq(&dev->event_lock);
247 drm_vblank_put(dev, exynos_crtc->pipe); 251 drm_vblank_put(dev, exynos_crtc->pipe);
248 list_del(&event->base.link); 252 list_del(&event->base.link);
253 spin_unlock_irq(&dev->event_lock);
249 254
250 goto out; 255 goto out;
251 } 256 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..539da9f4eb97 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,26 +30,22 @@
30 30
31#include <linux/dma-buf.h> 31#include <linux/dma-buf.h>
32 32
33static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, 33static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
34 unsigned int page_size) 34 struct exynos_drm_gem_buf *buf)
35{ 35{
36 struct sg_table *sgt = NULL; 36 struct sg_table *sgt = NULL;
37 struct scatterlist *sgl; 37 int ret;
38 int i, ret;
39 38
40 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 39 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
41 if (!sgt) 40 if (!sgt)
42 goto out; 41 goto out;
43 42
44 ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); 43 ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
45 if (ret) 44 buf->dma_addr, buf->size);
45 if (ret < 0) {
46 DRM_ERROR("failed to get sgtable.\n");
46 goto err_free_sgt; 47 goto err_free_sgt;
47 48 }
48 if (page_size < PAGE_SIZE)
49 page_size = PAGE_SIZE;
50
51 for_each_sg(sgt->sgl, sgl, nr_pages, i)
52 sg_set_page(sgl, pages[i], page_size, 0);
53 49
54 return sgt; 50 return sgt;
55 51
@@ -68,32 +64,30 @@ static struct sg_table *
68 struct drm_device *dev = gem_obj->base.dev; 64 struct drm_device *dev = gem_obj->base.dev;
69 struct exynos_drm_gem_buf *buf; 65 struct exynos_drm_gem_buf *buf;
70 struct sg_table *sgt = NULL; 66 struct sg_table *sgt = NULL;
71 unsigned int npages;
72 int nents; 67 int nents;
73 68
74 DRM_DEBUG_PRIME("%s\n", __FILE__); 69 DRM_DEBUG_PRIME("%s\n", __FILE__);
75 70
76 mutex_lock(&dev->struct_mutex);
77
78 buf = gem_obj->buffer; 71 buf = gem_obj->buffer;
79 72 if (!buf) {
80 /* there should always be pages allocated. */ 73 DRM_ERROR("buffer is null.\n");
81 if (!buf->pages) { 74 return sgt;
82 DRM_ERROR("pages is null.\n");
83 goto err_unlock;
84 } 75 }
85 76
86 npages = buf->size / buf->page_size; 77 mutex_lock(&dev->struct_mutex);
87 78
88 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); 79 sgt = exynos_get_sgt(dev, buf);
89 if (!sgt) { 80 if (!sgt)
90 DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
91 goto err_unlock; 81 goto err_unlock;
92 } 82
93 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); 83 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
84 if (!nents) {
85 DRM_ERROR("failed to map sgl with iommu.\n");
86 sgt = NULL;
87 goto err_unlock;
88 }
94 89
95 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", 90 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
96 npages, buf->size, buf->page_size);
97 91
98err_unlock: 92err_unlock:
99 mutex_unlock(&dev->struct_mutex); 93 mutex_unlock(&dev->struct_mutex);
@@ -105,6 +99,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
105 enum dma_data_direction dir) 99 enum dma_data_direction dir)
106{ 100{
107 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 101 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
102
108 sg_free_table(sgt); 103 sg_free_table(sgt);
109 kfree(sgt); 104 kfree(sgt);
110 sgt = NULL; 105 sgt = NULL;
@@ -196,7 +191,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
196 struct scatterlist *sgl; 191 struct scatterlist *sgl;
197 struct exynos_drm_gem_obj *exynos_gem_obj; 192 struct exynos_drm_gem_obj *exynos_gem_obj;
198 struct exynos_drm_gem_buf *buffer; 193 struct exynos_drm_gem_buf *buffer;
199 struct page *page;
200 int ret; 194 int ret;
201 195
202 DRM_DEBUG_PRIME("%s\n", __FILE__); 196 DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +227,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
233 goto err_unmap_attach; 227 goto err_unmap_attach;
234 } 228 }
235 229
236 buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
237 if (!buffer->pages) {
238 DRM_ERROR("failed to allocate pages.\n");
239 ret = -ENOMEM;
240 goto err_free_buffer;
241 }
242
243 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); 230 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
244 if (!exynos_gem_obj) { 231 if (!exynos_gem_obj) {
245 ret = -ENOMEM; 232 ret = -ENOMEM;
246 goto err_free_pages; 233 goto err_free_buffer;
247 } 234 }
248 235
249 sgl = sgt->sgl; 236 sgl = sgt->sgl;
250 237
251 if (sgt->nents == 1) { 238 buffer->size = dma_buf->size;
252 buffer->dma_addr = sg_dma_address(sgt->sgl); 239 buffer->dma_addr = sg_dma_address(sgl);
253 buffer->size = sg_dma_len(sgt->sgl);
254 240
241 if (sgt->nents == 1) {
255 /* always physically continuous memory if sgt->nents is 1. */ 242 /* always physically continuous memory if sgt->nents is 1. */
256 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 243 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
257 } else { 244 } else {
258 unsigned int i = 0; 245 /*
259 246 * this case could be CONTIG or NONCONTIG type but for now
260 buffer->dma_addr = sg_dma_address(sgl); 247 * sets NONCONTIG.
261 while (i < sgt->nents) { 248 * TODO. we have to find a way that exporter can notify
262 buffer->pages[i] = sg_page(sgl); 249 * the type of its own buffer to importer.
263 buffer->size += sg_dma_len(sgl); 250 */
264 sgl = sg_next(sgl);
265 i++;
266 }
267
268 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; 251 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
269 } 252 }
270 253
@@ -277,9 +260,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
277 260
278 return &exynos_gem_obj->base; 261 return &exynos_gem_obj->base;
279 262
280err_free_pages:
281 kfree(buffer->pages);
282 buffer->pages = NULL;
283err_free_buffer: 263err_free_buffer:
284 kfree(buffer); 264 kfree(buffer);
285 buffer = NULL; 265 buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..2b287d2fc92e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,7 @@
40#include "exynos_drm_vidi.h" 40#include "exynos_drm_vidi.h"
41#include "exynos_drm_dmabuf.h" 41#include "exynos_drm_dmabuf.h"
42#include "exynos_drm_g2d.h" 42#include "exynos_drm_g2d.h"
43#include "exynos_drm_iommu.h"
43 44
44#define DRIVER_NAME "exynos" 45#define DRIVER_NAME "exynos"
45#define DRIVER_DESC "Samsung SoC DRM" 46#define DRIVER_DESC "Samsung SoC DRM"
@@ -66,6 +67,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
66 INIT_LIST_HEAD(&private->pageflip_event_list); 67 INIT_LIST_HEAD(&private->pageflip_event_list);
67 dev->dev_private = (void *)private; 68 dev->dev_private = (void *)private;
68 69
70 /*
71 * create mapping to manage iommu table and set a pointer to iommu
72 * mapping structure to iommu_mapping of private data.
73 * also this iommu_mapping can be used to check if iommu is supported
74 * or not.
75 */
76 ret = drm_create_iommu_mapping(dev);
77 if (ret < 0) {
78 DRM_ERROR("failed to create iommu mapping.\n");
79 goto err_crtc;
80 }
81
69 drm_mode_config_init(dev); 82 drm_mode_config_init(dev);
70 83
71 /* init kms poll for handling hpd */ 84 /* init kms poll for handling hpd */
@@ -80,7 +93,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
80 for (nr = 0; nr < MAX_CRTC; nr++) { 93 for (nr = 0; nr < MAX_CRTC; nr++) {
81 ret = exynos_drm_crtc_create(dev, nr); 94 ret = exynos_drm_crtc_create(dev, nr);
82 if (ret) 95 if (ret)
83 goto err_crtc; 96 goto err_release_iommu_mapping;
84 } 97 }
85 98
86 for (nr = 0; nr < MAX_PLANE; nr++) { 99 for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +102,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
89 102
90 plane = exynos_plane_init(dev, possible_crtcs, false); 103 plane = exynos_plane_init(dev, possible_crtcs, false);
91 if (!plane) 104 if (!plane)
92 goto err_crtc; 105 goto err_release_iommu_mapping;
93 } 106 }
94 107
95 ret = drm_vblank_init(dev, MAX_CRTC); 108 ret = drm_vblank_init(dev, MAX_CRTC);
96 if (ret) 109 if (ret)
97 goto err_crtc; 110 goto err_release_iommu_mapping;
98 111
99 /* 112 /*
100 * probe sub drivers such as display controller and hdmi driver, 113 * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +139,8 @@ err_drm_device:
126 exynos_drm_device_unregister(dev); 139 exynos_drm_device_unregister(dev);
127err_vblank: 140err_vblank:
128 drm_vblank_cleanup(dev); 141 drm_vblank_cleanup(dev);
142err_release_iommu_mapping:
143 drm_release_iommu_mapping(dev);
129err_crtc: 144err_crtc:
130 drm_mode_config_cleanup(dev); 145 drm_mode_config_cleanup(dev);
131 kfree(private); 146 kfree(private);
@@ -142,6 +157,8 @@ static int exynos_drm_unload(struct drm_device *dev)
142 drm_vblank_cleanup(dev); 157 drm_vblank_cleanup(dev);
143 drm_kms_helper_poll_fini(dev); 158 drm_kms_helper_poll_fini(dev);
144 drm_mode_config_cleanup(dev); 159 drm_mode_config_cleanup(dev);
160
161 drm_release_iommu_mapping(dev);
145 kfree(dev->dev_private); 162 kfree(dev->dev_private);
146 163
147 dev->dev_private = NULL; 164 dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..9c9c2dc75828 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -231,8 +231,7 @@ struct exynos_drm_g2d_private {
231 struct device *dev; 231 struct device *dev;
232 struct list_head inuse_cmdlist; 232 struct list_head inuse_cmdlist;
233 struct list_head event_list; 233 struct list_head event_list;
234 struct list_head gem_list; 234 struct list_head userptr_list;
235 unsigned int gem_nr;
236}; 235};
237 236
238struct drm_exynos_file_private { 237struct drm_exynos_file_private {
@@ -241,6 +240,13 @@ struct drm_exynos_file_private {
241 240
242/* 241/*
243 * Exynos drm private structure. 242 * Exynos drm private structure.
243 *
244 * @da_start: start address to device address space.
245 * with iommu, device address space starts from this address
246 * otherwise default one.
247 * @da_space_size: size of device address space.
248 * if 0 then default value is used for it.
249 * @da_space_order: order to device address space.
244 */ 250 */
245struct exynos_drm_private { 251struct exynos_drm_private {
246 struct drm_fb_helper *fb_helper; 252 struct drm_fb_helper *fb_helper;
@@ -255,6 +261,10 @@ struct exynos_drm_private {
255 struct drm_crtc *crtc[MAX_CRTC]; 261 struct drm_crtc *crtc[MAX_CRTC];
256 struct drm_property *plane_zpos_property; 262 struct drm_property *plane_zpos_property;
257 struct drm_property *crtc_mode_property; 263 struct drm_property *crtc_mode_property;
264
265 unsigned long da_start;
266 unsigned long da_space_size;
267 unsigned long da_space_order;
258}; 268};
259 269
260/* 270/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 241ad1eeec64..d9afb11aac76 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -226,8 +226,47 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
226 * already updated or not by exynos_drm_encoder_dpms function. 226 * already updated or not by exynos_drm_encoder_dpms function.
227 */ 227 */
228 exynos_encoder->updated = true; 228 exynos_encoder->updated = true;
229
230 /*
231 * In case of setcrtc, there is no way to update encoder's dpms
232 * so update it here.
233 */
234 exynos_encoder->dpms = DRM_MODE_DPMS_ON;
235}
236
237void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
238{
239 struct exynos_drm_encoder *exynos_encoder;
240 struct exynos_drm_overlay_ops *overlay_ops;
241 struct exynos_drm_manager *manager;
242 struct drm_device *dev = fb->dev;
243 struct drm_encoder *encoder;
244
245 /*
246 * make sure that overlay data are updated to real hardware
247 * for all encoders.
248 */
249 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
250 exynos_encoder = to_exynos_encoder(encoder);
251
252 /* if exynos was disabled, just ignor it. */
253 if (exynos_encoder->dpms > DRM_MODE_DPMS_ON)
254 continue;
255
256 manager = exynos_encoder->manager;
257 overlay_ops = manager->overlay_ops;
258
259 /*
260 * wait for vblank interrupt
261 * - this makes sure that overlay data are updated to
262 * real hardware.
263 */
264 if (overlay_ops->wait_for_vblank)
265 overlay_ops->wait_for_vblank(manager->dev);
266 }
229} 267}
230 268
269
231static void exynos_drm_encoder_disable(struct drm_encoder *encoder) 270static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
232{ 271{
233 struct drm_plane *plane; 272 struct drm_plane *plane;
@@ -499,14 +538,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
499 538
500 if (overlay_ops && overlay_ops->disable) 539 if (overlay_ops && overlay_ops->disable)
501 overlay_ops->disable(manager->dev, zpos); 540 overlay_ops->disable(manager->dev, zpos);
502
503 /*
504 * wait for vblank interrupt
505 * - this makes sure that hardware overlay is disabled to avoid
506 * for the dma accesses to memory after gem buffer was released
507 * because the setting for disabling the overlay will be updated
508 * at vsync.
509 */
510 if (overlay_ops->wait_for_vblank)
511 overlay_ops->wait_for_vblank(manager->dev);
512} 541}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..88bb25a2a917 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
46void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); 46void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
47void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); 47void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
48void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); 48void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
49void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
49 50
50#endif 51#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..7413f4b729b0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,13 @@
30#include <drm/drm_crtc.h> 30#include <drm/drm_crtc.h>
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_fb_helper.h> 32#include <drm/drm_fb_helper.h>
33#include <uapi/drm/exynos_drm.h>
33 34
34#include "exynos_drm_drv.h" 35#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 36#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h" 37#include "exynos_drm_gem.h"
38#include "exynos_drm_iommu.h"
39#include "exynos_drm_encoder.h"
37 40
38#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 41#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
39 42
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
50 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; 53 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
51}; 54};
52 55
56static int check_fb_gem_memory_type(struct drm_device *drm_dev,
57 struct exynos_drm_gem_obj *exynos_gem_obj)
58{
59 unsigned int flags;
60
61 /*
62 * if exynos drm driver supports iommu then framebuffer can use
63 * all the buffer types.
64 */
65 if (is_drm_iommu_supported(drm_dev))
66 return 0;
67
68 flags = exynos_gem_obj->flags;
69
70 /*
71 * without iommu support, not support physically non-continuous memory
72 * for framebuffer.
73 */
74 if (IS_NONCONTIG_BUFFER(flags)) {
75 DRM_ERROR("cannot use this gem memory type for fb.\n");
76 return -EINVAL;
77 }
78
79 return 0;
80}
81
53static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 82static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
54{ 83{
55 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 84 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
57 86
58 DRM_DEBUG_KMS("%s\n", __FILE__); 87 DRM_DEBUG_KMS("%s\n", __FILE__);
59 88
89 /* make sure that overlay data are updated before relesing fb. */
90 exynos_drm_encoder_complete_scanout(fb);
91
60 drm_framebuffer_cleanup(fb); 92 drm_framebuffer_cleanup(fb);
61 93
62 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { 94 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,14 +160,25 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
128 struct drm_gem_object *obj) 160 struct drm_gem_object *obj)
129{ 161{
130 struct exynos_drm_fb *exynos_fb; 162 struct exynos_drm_fb *exynos_fb;
163 struct exynos_drm_gem_obj *exynos_gem_obj;
131 int ret; 164 int ret;
132 165
166 exynos_gem_obj = to_exynos_gem_obj(obj);
167
168 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
169 if (ret < 0) {
170 DRM_ERROR("cannot use this gem memory type for fb.\n");
171 return ERR_PTR(-EINVAL);
172 }
173
133 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 174 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
134 if (!exynos_fb) { 175 if (!exynos_fb) {
135 DRM_ERROR("failed to allocate exynos drm framebuffer\n"); 176 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
136 return ERR_PTR(-ENOMEM); 177 return ERR_PTR(-ENOMEM);
137 } 178 }
138 179
180 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
181
139 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 182 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
140 if (ret) { 183 if (ret) {
141 DRM_ERROR("failed to initialize framebuffer\n"); 184 DRM_ERROR("failed to initialize framebuffer\n");
@@ -143,7 +186,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
143 } 186 }
144 187
145 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 188 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
146 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
147 189
148 return &exynos_fb->fb; 190 return &exynos_fb->fb;
149} 191}
@@ -214,6 +256,9 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
214 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 256 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
215 257
216 for (i = 1; i < exynos_fb->buf_cnt; i++) { 258 for (i = 1; i < exynos_fb->buf_cnt; i++) {
259 struct exynos_drm_gem_obj *exynos_gem_obj;
260 int ret;
261
217 obj = drm_gem_object_lookup(dev, file_priv, 262 obj = drm_gem_object_lookup(dev, file_priv,
218 mode_cmd->handles[i]); 263 mode_cmd->handles[i]);
219 if (!obj) { 264 if (!obj) {
@@ -222,6 +267,15 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
222 return ERR_PTR(-ENOENT); 267 return ERR_PTR(-ENOENT);
223 } 268 }
224 269
270 exynos_gem_obj = to_exynos_gem_obj(obj);
271
272 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
273 if (ret < 0) {
274 DRM_ERROR("cannot use this gem memory type for fb.\n");
275 exynos_drm_fb_destroy(fb);
276 return ERR_PTR(ret);
277 }
278
225 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); 279 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
226 } 280 }
227 281
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 67eb6ba56edf..a2232792e0c0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
46 struct exynos_drm_gem_obj *exynos_gem_obj; 46 struct exynos_drm_gem_obj *exynos_gem_obj;
47}; 47};
48 48
49static int exynos_drm_fb_mmap(struct fb_info *info,
50 struct vm_area_struct *vma)
51{
52 struct drm_fb_helper *helper = info->par;
53 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
54 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
55 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
56 unsigned long vm_size;
57 int ret;
58
59 DRM_DEBUG_KMS("%s\n", __func__);
60
61 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
62
63 vm_size = vma->vm_end - vma->vm_start;
64
65 if (vm_size > buffer->size)
66 return -EINVAL;
67
68 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->kvaddr,
69 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
70 if (ret < 0) {
71 DRM_ERROR("failed to mmap.\n");
72 return ret;
73 }
74
75 return 0;
76}
77
49static struct fb_ops exynos_drm_fb_ops = { 78static struct fb_ops exynos_drm_fb_ops = {
50 .owner = THIS_MODULE, 79 .owner = THIS_MODULE,
80 .fb_mmap = exynos_drm_fb_mmap,
51 .fb_fillrect = cfb_fillrect, 81 .fb_fillrect = cfb_fillrect,
52 .fb_copyarea = cfb_copyarea, 82 .fb_copyarea = cfb_copyarea,
53 .fb_imageblit = cfb_imageblit, 83 .fb_imageblit = cfb_imageblit,
@@ -87,7 +117,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
87 117
88 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; 118 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
89 fbi->screen_base = buffer->kvaddr + offset; 119 fbi->screen_base = buffer->kvaddr + offset;
90 fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset); 120 fbi->fix.smem_start = (unsigned long)
121 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
91 fbi->screen_size = size; 122 fbi->screen_size = size;
92 fbi->fix.smem_len = size; 123 fbi->fix.smem_len = size;
93 124
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 130a2b510d4a..00bd266a31bb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -25,6 +25,7 @@
25#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
26#include "exynos_drm_fbdev.h" 26#include "exynos_drm_fbdev.h"
27#include "exynos_drm_crtc.h" 27#include "exynos_drm_crtc.h"
28#include "exynos_drm_iommu.h"
28 29
29/* 30/*
30 * FIMD is stand for Fully Interactive Mobile Display and 31 * FIMD is stand for Fully Interactive Mobile Display and
@@ -61,11 +62,11 @@ struct fimd_driver_data {
61 unsigned int timing_base; 62 unsigned int timing_base;
62}; 63};
63 64
64struct fimd_driver_data exynos4_fimd_driver_data = { 65static struct fimd_driver_data exynos4_fimd_driver_data = {
65 .timing_base = 0x0, 66 .timing_base = 0x0,
66}; 67};
67 68
68struct fimd_driver_data exynos5_fimd_driver_data = { 69static struct fimd_driver_data exynos5_fimd_driver_data = {
69 .timing_base = 0x20000, 70 .timing_base = 0x20000,
70}; 71};
71 72
@@ -623,7 +624,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
623 struct drm_pending_vblank_event *e, *t; 624 struct drm_pending_vblank_event *e, *t;
624 struct timeval now; 625 struct timeval now;
625 unsigned long flags; 626 unsigned long flags;
626 bool is_checked = false;
627 627
628 spin_lock_irqsave(&drm_dev->event_lock, flags); 628 spin_lock_irqsave(&drm_dev->event_lock, flags);
629 629
@@ -633,8 +633,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
633 if (crtc != e->pipe) 633 if (crtc != e->pipe)
634 continue; 634 continue;
635 635
636 is_checked = true;
637
638 do_gettimeofday(&now); 636 do_gettimeofday(&now);
639 e->event.sequence = 0; 637 e->event.sequence = 0;
640 e->event.tv_sec = now.tv_sec; 638 e->event.tv_sec = now.tv_sec;
@@ -642,22 +640,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
642 640
643 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 641 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
644 wake_up_interruptible(&e->base.file_priv->event_wait); 642 wake_up_interruptible(&e->base.file_priv->event_wait);
645 } 643 drm_vblank_put(drm_dev, crtc);
646
647 if (is_checked) {
648 /*
649 * call drm_vblank_put only in case that drm_vblank_get was
650 * called.
651 */
652 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
653 drm_vblank_put(drm_dev, crtc);
654
655 /*
656 * don't off vblank if vblank_disable_allowed is 1,
657 * because vblank would be off by timer handler.
658 */
659 if (!drm_dev->vblank_disable_allowed)
660 drm_vblank_off(drm_dev, crtc);
661 } 644 }
662 645
663 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 646 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -709,6 +692,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
709 */ 692 */
710 drm_dev->vblank_disable_allowed = 1; 693 drm_dev->vblank_disable_allowed = 1;
711 694
695 /* attach this sub driver to iommu mapping if supported. */
696 if (is_drm_iommu_supported(drm_dev))
697 drm_iommu_attach_device(drm_dev, dev);
698
712 return 0; 699 return 0;
713} 700}
714 701
@@ -716,7 +703,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
716{ 703{
717 DRM_DEBUG_KMS("%s\n", __FILE__); 704 DRM_DEBUG_KMS("%s\n", __FILE__);
718 705
719 /* TODO. */ 706 /* detach this sub driver from iommu mapping if supported. */
707 if (is_drm_iommu_supported(drm_dev))
708 drm_iommu_detach_device(drm_dev, dev);
720} 709}
721 710
722static int fimd_calc_clkdiv(struct fimd_context *ctx, 711static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -857,18 +846,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
857 if (!ctx) 846 if (!ctx)
858 return -ENOMEM; 847 return -ENOMEM;
859 848
860 ctx->bus_clk = clk_get(dev, "fimd"); 849 ctx->bus_clk = devm_clk_get(dev, "fimd");
861 if (IS_ERR(ctx->bus_clk)) { 850 if (IS_ERR(ctx->bus_clk)) {
862 dev_err(dev, "failed to get bus clock\n"); 851 dev_err(dev, "failed to get bus clock\n");
863 ret = PTR_ERR(ctx->bus_clk); 852 return PTR_ERR(ctx->bus_clk);
864 goto err_clk_get;
865 } 853 }
866 854
867 ctx->lcd_clk = clk_get(dev, "sclk_fimd"); 855 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
868 if (IS_ERR(ctx->lcd_clk)) { 856 if (IS_ERR(ctx->lcd_clk)) {
869 dev_err(dev, "failed to get lcd clock\n"); 857 dev_err(dev, "failed to get lcd clock\n");
870 ret = PTR_ERR(ctx->lcd_clk); 858 return PTR_ERR(ctx->lcd_clk);
871 goto err_bus_clk;
872 } 859 }
873 860
874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 861 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +863,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
876 ctx->regs = devm_request_and_ioremap(&pdev->dev, res); 863 ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
877 if (!ctx->regs) { 864 if (!ctx->regs) {
878 dev_err(dev, "failed to map registers\n"); 865 dev_err(dev, "failed to map registers\n");
879 ret = -ENXIO; 866 return -ENXIO;
880 goto err_clk;
881 } 867 }
882 868
883 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 869 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
884 if (!res) { 870 if (!res) {
885 dev_err(dev, "irq request failed.\n"); 871 dev_err(dev, "irq request failed.\n");
886 goto err_clk; 872 return -ENXIO;
887 } 873 }
888 874
889 ctx->irq = res->start; 875 ctx->irq = res->start;
@@ -892,7 +878,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
892 0, "drm_fimd", ctx); 878 0, "drm_fimd", ctx);
893 if (ret) { 879 if (ret) {
894 dev_err(dev, "irq request failed.\n"); 880 dev_err(dev, "irq request failed.\n");
895 goto err_clk; 881 return ret;
896 } 882 }
897 883
898 ctx->vidcon0 = pdata->vidcon0; 884 ctx->vidcon0 = pdata->vidcon0;
@@ -926,17 +912,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
926 exynos_drm_subdrv_register(subdrv); 912 exynos_drm_subdrv_register(subdrv);
927 913
928 return 0; 914 return 0;
929
930err_clk:
931 clk_disable(ctx->lcd_clk);
932 clk_put(ctx->lcd_clk);
933
934err_bus_clk:
935 clk_disable(ctx->bus_clk);
936 clk_put(ctx->bus_clk);
937
938err_clk_get:
939 return ret;
940} 915}
941 916
942static int __devexit fimd_remove(struct platform_device *pdev) 917static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +935,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
960out: 935out:
961 pm_runtime_disable(dev); 936 pm_runtime_disable(dev);
962 937
963 clk_put(ctx->lcd_clk);
964 clk_put(ctx->bus_clk);
965
966 return 0; 938 return 0;
967} 939}
968 940
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46c..6ffa0763c078 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/dma-mapping.h>
21#include <linux/dma-attrs.h>
20 22
21#include <drm/drmP.h> 23#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 24#include <drm/exynos_drm.h>
23#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
24#include "exynos_drm_gem.h" 26#include "exynos_drm_gem.h"
27#include "exynos_drm_iommu.h"
25 28
26#define G2D_HW_MAJOR_VER 4 29#define G2D_HW_MAJOR_VER 4
27#define G2D_HW_MINOR_VER 1 30#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
92#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 95#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
93#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 96#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
94 97
98#define MAX_BUF_ADDR_NR 6
99
100/* maximum buffer pool size of userptr is 64MB as default */
101#define MAX_POOL (64 * 1024 * 1024)
102
103enum {
104 BUF_TYPE_GEM = 1,
105 BUF_TYPE_USERPTR,
106};
107
95/* cmdlist data structure */ 108/* cmdlist data structure */
96struct g2d_cmdlist { 109struct g2d_cmdlist {
97 u32 head; 110 u32 head;
98 u32 data[G2D_CMDLIST_DATA_NUM]; 111 unsigned long data[G2D_CMDLIST_DATA_NUM];
99 u32 last; /* last data offset */ 112 u32 last; /* last data offset */
100}; 113};
101 114
102struct drm_exynos_pending_g2d_event { 115struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
104 struct drm_exynos_g2d_event event; 117 struct drm_exynos_g2d_event event;
105}; 118};
106 119
107struct g2d_gem_node { 120struct g2d_cmdlist_userptr {
108 struct list_head list; 121 struct list_head list;
109 unsigned int handle; 122 dma_addr_t dma_addr;
123 unsigned long userptr;
124 unsigned long size;
125 struct page **pages;
126 unsigned int npages;
127 struct sg_table *sgt;
128 struct vm_area_struct *vma;
129 atomic_t refcount;
130 bool in_pool;
131 bool out_of_list;
110}; 132};
111 133
112struct g2d_cmdlist_node { 134struct g2d_cmdlist_node {
113 struct list_head list; 135 struct list_head list;
114 struct g2d_cmdlist *cmdlist; 136 struct g2d_cmdlist *cmdlist;
115 unsigned int gem_nr; 137 unsigned int map_nr;
138 unsigned long handles[MAX_BUF_ADDR_NR];
139 unsigned int obj_type[MAX_BUF_ADDR_NR];
116 dma_addr_t dma_addr; 140 dma_addr_t dma_addr;
117 141
118 struct drm_exynos_pending_g2d_event *event; 142 struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
122 struct list_head list; 146 struct list_head list;
123 struct list_head run_cmdlist; 147 struct list_head run_cmdlist;
124 struct list_head event_list; 148 struct list_head event_list;
149 struct drm_file *filp;
125 pid_t pid; 150 pid_t pid;
126 struct completion complete; 151 struct completion complete;
127 int async; 152 int async;
@@ -143,23 +168,33 @@ struct g2d_data {
143 struct mutex cmdlist_mutex; 168 struct mutex cmdlist_mutex;
144 dma_addr_t cmdlist_pool; 169 dma_addr_t cmdlist_pool;
145 void *cmdlist_pool_virt; 170 void *cmdlist_pool_virt;
171 struct dma_attrs cmdlist_dma_attrs;
146 172
147 /* runqueue*/ 173 /* runqueue*/
148 struct g2d_runqueue_node *runqueue_node; 174 struct g2d_runqueue_node *runqueue_node;
149 struct list_head runqueue; 175 struct list_head runqueue;
150 struct mutex runqueue_mutex; 176 struct mutex runqueue_mutex;
151 struct kmem_cache *runqueue_slab; 177 struct kmem_cache *runqueue_slab;
178
179 unsigned long current_pool;
180 unsigned long max_pool;
152}; 181};
153 182
154static int g2d_init_cmdlist(struct g2d_data *g2d) 183static int g2d_init_cmdlist(struct g2d_data *g2d)
155{ 184{
156 struct device *dev = g2d->dev; 185 struct device *dev = g2d->dev;
157 struct g2d_cmdlist_node *node = g2d->cmdlist_node; 186 struct g2d_cmdlist_node *node = g2d->cmdlist_node;
187 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
158 int nr; 188 int nr;
159 int ret; 189 int ret;
160 190
161 g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, 191 init_dma_attrs(&g2d->cmdlist_dma_attrs);
162 &g2d->cmdlist_pool, GFP_KERNEL); 192 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
193
194 g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
195 G2D_CMDLIST_POOL_SIZE,
196 &g2d->cmdlist_pool, GFP_KERNEL,
197 &g2d->cmdlist_dma_attrs);
163 if (!g2d->cmdlist_pool_virt) { 198 if (!g2d->cmdlist_pool_virt) {
164 dev_err(dev, "failed to allocate dma memory\n"); 199 dev_err(dev, "failed to allocate dma memory\n");
165 return -ENOMEM; 200 return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
184 return 0; 219 return 0;
185 220
186err: 221err:
187 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, 222 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
188 g2d->cmdlist_pool); 223 g2d->cmdlist_pool_virt,
224 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
189 return ret; 225 return ret;
190} 226}
191 227
192static void g2d_fini_cmdlist(struct g2d_data *g2d) 228static void g2d_fini_cmdlist(struct g2d_data *g2d)
193{ 229{
194 struct device *dev = g2d->dev; 230 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
195 231
196 kfree(g2d->cmdlist_node); 232 kfree(g2d->cmdlist_node);
197 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, 233 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
198 g2d->cmdlist_pool); 234 g2d->cmdlist_pool_virt,
235 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
199} 236}
200 237
201static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) 238static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
245 list_add_tail(&node->event->base.link, &g2d_priv->event_list); 282 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
246} 283}
247 284
248static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, 285static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
249 struct drm_file *file, 286 unsigned long obj,
250 struct g2d_cmdlist_node *node) 287 bool force)
251{ 288{
252 struct drm_exynos_file_private *file_priv = file->driver_priv; 289 struct g2d_cmdlist_userptr *g2d_userptr =
290 (struct g2d_cmdlist_userptr *)obj;
291
292 if (!obj)
293 return;
294
295 if (force)
296 goto out;
297
298 atomic_dec(&g2d_userptr->refcount);
299
300 if (atomic_read(&g2d_userptr->refcount) > 0)
301 return;
302
303 if (g2d_userptr->in_pool)
304 return;
305
306out:
307 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
308 DMA_BIDIRECTIONAL);
309
310 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
311 g2d_userptr->npages,
312 g2d_userptr->vma);
313
314 if (!g2d_userptr->out_of_list)
315 list_del_init(&g2d_userptr->list);
316
317 sg_free_table(g2d_userptr->sgt);
318 kfree(g2d_userptr->sgt);
319 g2d_userptr->sgt = NULL;
320
321 kfree(g2d_userptr->pages);
322 g2d_userptr->pages = NULL;
323 kfree(g2d_userptr);
324 g2d_userptr = NULL;
325}
326
327dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
328 unsigned long userptr,
329 unsigned long size,
330 struct drm_file *filp,
331 unsigned long *obj)
332{
333 struct drm_exynos_file_private *file_priv = filp->driver_priv;
334 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
335 struct g2d_cmdlist_userptr *g2d_userptr;
336 struct g2d_data *g2d;
337 struct page **pages;
338 struct sg_table *sgt;
339 struct vm_area_struct *vma;
340 unsigned long start, end;
341 unsigned int npages, offset;
342 int ret;
343
344 if (!size) {
345 DRM_ERROR("invalid userptr size.\n");
346 return ERR_PTR(-EINVAL);
347 }
348
349 g2d = dev_get_drvdata(g2d_priv->dev);
350
351 /* check if userptr already exists in userptr_list. */
352 list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
353 if (g2d_userptr->userptr == userptr) {
354 /*
355 * also check size because there could be same address
356 * and different size.
357 */
358 if (g2d_userptr->size == size) {
359 atomic_inc(&g2d_userptr->refcount);
360 *obj = (unsigned long)g2d_userptr;
361
362 return &g2d_userptr->dma_addr;
363 }
364
365 /*
366 * at this moment, maybe g2d dma is accessing this
367 * g2d_userptr memory region so just remove this
368 * g2d_userptr object from userptr_list not to be
369 * referred again and also except it the userptr
370 * pool to be released after the dma access completion.
371 */
372 g2d_userptr->out_of_list = true;
373 g2d_userptr->in_pool = false;
374 list_del_init(&g2d_userptr->list);
375
376 break;
377 }
378 }
379
380 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
381 if (!g2d_userptr) {
382 DRM_ERROR("failed to allocate g2d_userptr.\n");
383 return ERR_PTR(-ENOMEM);
384 }
385
386 atomic_set(&g2d_userptr->refcount, 1);
387
388 start = userptr & PAGE_MASK;
389 offset = userptr & ~PAGE_MASK;
390 end = PAGE_ALIGN(userptr + size);
391 npages = (end - start) >> PAGE_SHIFT;
392 g2d_userptr->npages = npages;
393
394 pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
395 if (!pages) {
396 DRM_ERROR("failed to allocate pages.\n");
397 kfree(g2d_userptr);
398 return ERR_PTR(-ENOMEM);
399 }
400
401 vma = find_vma(current->mm, userptr);
402 if (!vma) {
403 DRM_ERROR("failed to get vm region.\n");
404 ret = -EFAULT;
405 goto err_free_pages;
406 }
407
408 if (vma->vm_end < userptr + size) {
409 DRM_ERROR("vma is too small.\n");
410 ret = -EFAULT;
411 goto err_free_pages;
412 }
413
414 g2d_userptr->vma = exynos_gem_get_vma(vma);
415 if (!g2d_userptr->vma) {
416 DRM_ERROR("failed to copy vma.\n");
417 ret = -ENOMEM;
418 goto err_free_pages;
419 }
420
421 g2d_userptr->size = size;
422
423 ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
424 npages, pages, vma);
425 if (ret < 0) {
426 DRM_ERROR("failed to get user pages from userptr.\n");
427 goto err_put_vma;
428 }
429
430 g2d_userptr->pages = pages;
431
432 sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
433 if (!sgt) {
434 DRM_ERROR("failed to allocate sg table.\n");
435 ret = -ENOMEM;
436 goto err_free_userptr;
437 }
438
439 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
440 size, GFP_KERNEL);
441 if (ret < 0) {
442 DRM_ERROR("failed to get sgt from pages.\n");
443 goto err_free_sgt;
444 }
445
446 g2d_userptr->sgt = sgt;
447
448 ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
449 DMA_BIDIRECTIONAL);
450 if (ret < 0) {
451 DRM_ERROR("failed to map sgt with dma region.\n");
452 goto err_free_sgt;
453 }
454
455 g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
456 g2d_userptr->userptr = userptr;
457
458 list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
459
460 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
461 g2d->current_pool += npages << PAGE_SHIFT;
462 g2d_userptr->in_pool = true;
463 }
464
465 *obj = (unsigned long)g2d_userptr;
466
467 return &g2d_userptr->dma_addr;
468
469err_free_sgt:
470 sg_free_table(sgt);
471 kfree(sgt);
472 sgt = NULL;
473
474err_free_userptr:
475 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
476 g2d_userptr->npages,
477 g2d_userptr->vma);
478
479err_put_vma:
480 exynos_gem_put_vma(g2d_userptr->vma);
481
482err_free_pages:
483 kfree(pages);
484 kfree(g2d_userptr);
485 pages = NULL;
486 g2d_userptr = NULL;
487
488 return ERR_PTR(ret);
489}
490
491static void g2d_userptr_free_all(struct drm_device *drm_dev,
492 struct g2d_data *g2d,
493 struct drm_file *filp)
494{
495 struct drm_exynos_file_private *file_priv = filp->driver_priv;
253 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 496 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
497 struct g2d_cmdlist_userptr *g2d_userptr, *n;
498
499 list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
500 if (g2d_userptr->in_pool)
501 g2d_userptr_put_dma_addr(drm_dev,
502 (unsigned long)g2d_userptr,
503 true);
504
505 g2d->current_pool = 0;
506}
507
508static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
509 struct g2d_cmdlist_node *node,
510 struct drm_device *drm_dev,
511 struct drm_file *file)
512{
254 struct g2d_cmdlist *cmdlist = node->cmdlist; 513 struct g2d_cmdlist *cmdlist = node->cmdlist;
255 dma_addr_t *addr;
256 int offset; 514 int offset;
257 int i; 515 int i;
258 516
259 for (i = 0; i < node->gem_nr; i++) { 517 for (i = 0; i < node->map_nr; i++) {
260 struct g2d_gem_node *gem_node; 518 unsigned long handle;
261 519 dma_addr_t *addr;
262 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
263 if (!gem_node) {
264 dev_err(g2d_priv->dev, "failed to allocate gem node\n");
265 return -ENOMEM;
266 }
267 520
268 offset = cmdlist->last - (i * 2 + 1); 521 offset = cmdlist->last - (i * 2 + 1);
269 gem_node->handle = cmdlist->data[offset]; 522 handle = cmdlist->data[offset];
270 523
271 addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, 524 if (node->obj_type[i] == BUF_TYPE_GEM) {
272 file); 525 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
273 if (IS_ERR(addr)) { 526 file);
274 node->gem_nr = i; 527 if (IS_ERR(addr)) {
275 kfree(gem_node); 528 node->map_nr = i;
276 return PTR_ERR(addr); 529 return -EFAULT;
530 }
531 } else {
532 struct drm_exynos_g2d_userptr g2d_userptr;
533
534 if (copy_from_user(&g2d_userptr, (void __user *)handle,
535 sizeof(struct drm_exynos_g2d_userptr))) {
536 node->map_nr = i;
537 return -EFAULT;
538 }
539
540 addr = g2d_userptr_get_dma_addr(drm_dev,
541 g2d_userptr.userptr,
542 g2d_userptr.size,
543 file,
544 &handle);
545 if (IS_ERR(addr)) {
546 node->map_nr = i;
547 return -EFAULT;
548 }
277 } 549 }
278 550
279 cmdlist->data[offset] = *addr; 551 cmdlist->data[offset] = *addr;
280 list_add_tail(&gem_node->list, &g2d_priv->gem_list); 552 node->handles[i] = handle;
281 g2d_priv->gem_nr++;
282 } 553 }
283 554
284 return 0; 555 return 0;
285} 556}
286 557
287static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, 558static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
288 struct drm_file *file, 559 struct g2d_cmdlist_node *node,
289 unsigned int nr) 560 struct drm_file *filp)
290{ 561{
291 struct drm_exynos_file_private *file_priv = file->driver_priv; 562 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
292 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 563 int i;
293 struct g2d_gem_node *node, *n;
294 564
295 list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { 565 for (i = 0; i < node->map_nr; i++) {
296 if (!nr) 566 unsigned long handle = node->handles[i];
297 break;
298 567
299 exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); 568 if (node->obj_type[i] == BUF_TYPE_GEM)
300 list_del_init(&node->list); 569 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
301 kfree(node); 570 filp);
302 nr--; 571 else
572 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
573 false);
574
575 node->handles[i] = 0;
303 } 576 }
577
578 node->map_nr = 0;
304} 579}
305 580
306static void g2d_dma_start(struct g2d_data *g2d, 581static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
337static void g2d_free_runqueue_node(struct g2d_data *g2d, 612static void g2d_free_runqueue_node(struct g2d_data *g2d,
338 struct g2d_runqueue_node *runqueue_node) 613 struct g2d_runqueue_node *runqueue_node)
339{ 614{
615 struct g2d_cmdlist_node *node;
616
340 if (!runqueue_node) 617 if (!runqueue_node)
341 return; 618 return;
342 619
343 mutex_lock(&g2d->cmdlist_mutex); 620 mutex_lock(&g2d->cmdlist_mutex);
621 /*
622 * commands in run_cmdlist have been completed so unmap all gem
623 * objects in each command node so that they are unreferenced.
624 */
625 list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
626 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
344 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); 627 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
345 mutex_unlock(&g2d->cmdlist_mutex); 628 mutex_unlock(&g2d->cmdlist_mutex);
346 629
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
430 return IRQ_HANDLED; 713 return IRQ_HANDLED;
431} 714}
432 715
433static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, 716static int g2d_check_reg_offset(struct device *dev,
717 struct g2d_cmdlist_node *node,
434 int nr, bool for_addr) 718 int nr, bool for_addr)
435{ 719{
720 struct g2d_cmdlist *cmdlist = node->cmdlist;
436 int reg_offset; 721 int reg_offset;
437 int index; 722 int index;
438 int i; 723 int i;
439 724
440 for (i = 0; i < nr; i++) { 725 for (i = 0; i < nr; i++) {
441 index = cmdlist->last - 2 * (i + 1); 726 index = cmdlist->last - 2 * (i + 1);
727
728 if (for_addr) {
729 /* check userptr buffer type. */
730 reg_offset = (cmdlist->data[index] &
731 ~0x7fffffff) >> 31;
732 if (reg_offset) {
733 node->obj_type[i] = BUF_TYPE_USERPTR;
734 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
735 }
736 }
737
442 reg_offset = cmdlist->data[index] & ~0xfffff000; 738 reg_offset = cmdlist->data[index] & ~0xfffff000;
443 739
444 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 740 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
455 case G2D_MSK_BASE_ADDR: 751 case G2D_MSK_BASE_ADDR:
456 if (!for_addr) 752 if (!for_addr)
457 goto err; 753 goto err;
754
755 if (node->obj_type[i] != BUF_TYPE_USERPTR)
756 node->obj_type[i] = BUF_TYPE_GEM;
458 break; 757 break;
459 default: 758 default:
460 if (for_addr) 759 if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
466 return 0; 765 return 0;
467 766
468err: 767err:
469 dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); 768 dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
470 return -EINVAL; 769 return -EINVAL;
471} 770}
472 771
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
566 } 865 }
567 866
568 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ 867 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
569 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; 868 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
570 if (size > G2D_CMDLIST_DATA_NUM) { 869 if (size > G2D_CMDLIST_DATA_NUM) {
571 dev_err(dev, "cmdlist size is too big\n"); 870 dev_err(dev, "cmdlist size is too big\n");
572 ret = -EINVAL; 871 ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
583 } 882 }
584 cmdlist->last += req->cmd_nr * 2; 883 cmdlist->last += req->cmd_nr * 2;
585 884
586 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); 885 ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
587 if (ret < 0) 886 if (ret < 0)
588 goto err_free_event; 887 goto err_free_event;
589 888
590 node->gem_nr = req->cmd_gem_nr; 889 node->map_nr = req->cmd_buf_nr;
591 if (req->cmd_gem_nr) { 890 if (req->cmd_buf_nr) {
592 struct drm_exynos_g2d_cmd *cmd_gem; 891 struct drm_exynos_g2d_cmd *cmd_buf;
593 892
594 cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; 893 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
595 894
596 if (copy_from_user(cmdlist->data + cmdlist->last, 895 if (copy_from_user(cmdlist->data + cmdlist->last,
597 (void __user *)cmd_gem, 896 (void __user *)cmd_buf,
598 sizeof(*cmd_gem) * req->cmd_gem_nr)) { 897 sizeof(*cmd_buf) * req->cmd_buf_nr)) {
599 ret = -EFAULT; 898 ret = -EFAULT;
600 goto err_free_event; 899 goto err_free_event;
601 } 900 }
602 cmdlist->last += req->cmd_gem_nr * 2; 901 cmdlist->last += req->cmd_buf_nr * 2;
603 902
604 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); 903 ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
605 if (ret < 0) 904 if (ret < 0)
606 goto err_free_event; 905 goto err_free_event;
607 906
608 ret = g2d_get_cmdlist_gem(drm_dev, file, node); 907 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
609 if (ret < 0) 908 if (ret < 0)
610 goto err_unmap; 909 goto err_unmap;
611 } 910 }
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
624 return 0; 923 return 0;
625 924
626err_unmap: 925err_unmap:
627 g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); 926 g2d_unmap_cmdlist_gem(g2d, node, file);
628err_free_event: 927err_free_event:
629 if (node->event) { 928 if (node->event) {
630 spin_lock_irqsave(&drm_dev->event_lock, flags); 929 spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
680 979
681 mutex_lock(&g2d->runqueue_mutex); 980 mutex_lock(&g2d->runqueue_mutex);
682 runqueue_node->pid = current->pid; 981 runqueue_node->pid = current->pid;
982 runqueue_node->filp = file;
683 list_add_tail(&runqueue_node->list, &g2d->runqueue); 983 list_add_tail(&runqueue_node->list, &g2d->runqueue);
684 if (!g2d->runqueue_node) 984 if (!g2d->runqueue_node)
685 g2d_exec_runqueue(g2d); 985 g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
696} 996}
697EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); 997EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
698 998
999static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1000{
1001 struct g2d_data *g2d;
1002 int ret;
1003
1004 g2d = dev_get_drvdata(dev);
1005 if (!g2d)
1006 return -EFAULT;
1007
1008 /* allocate dma-aware cmdlist buffer. */
1009 ret = g2d_init_cmdlist(g2d);
1010 if (ret < 0) {
1011 dev_err(dev, "cmdlist init failed\n");
1012 return ret;
1013 }
1014
1015 if (!is_drm_iommu_supported(drm_dev))
1016 return 0;
1017
1018 ret = drm_iommu_attach_device(drm_dev, dev);
1019 if (ret < 0) {
1020 dev_err(dev, "failed to enable iommu.\n");
1021 g2d_fini_cmdlist(g2d);
1022 }
1023
1024 return ret;
1025
1026}
1027
1028static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1029{
1030 if (!is_drm_iommu_supported(drm_dev))
1031 return;
1032
1033 drm_iommu_detach_device(drm_dev, dev);
1034}
1035
699static int g2d_open(struct drm_device *drm_dev, struct device *dev, 1036static int g2d_open(struct drm_device *drm_dev, struct device *dev,
700 struct drm_file *file) 1037 struct drm_file *file)
701{ 1038{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
713 1050
714 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); 1051 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
715 INIT_LIST_HEAD(&g2d_priv->event_list); 1052 INIT_LIST_HEAD(&g2d_priv->event_list);
716 INIT_LIST_HEAD(&g2d_priv->gem_list); 1053 INIT_LIST_HEAD(&g2d_priv->userptr_list);
717 1054
718 return 0; 1055 return 0;
719} 1056}
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
734 return; 1071 return;
735 1072
736 mutex_lock(&g2d->cmdlist_mutex); 1073 mutex_lock(&g2d->cmdlist_mutex);
737 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) 1074 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
1075 /*
1076 * unmap all gem objects not completed.
1077 *
1078 * P.S. if current process was terminated forcely then
1079 * there may be some commands in inuse_cmdlist so unmap
1080 * them.
1081 */
1082 g2d_unmap_cmdlist_gem(g2d, node, file);
738 list_move_tail(&node->list, &g2d->free_cmdlist); 1083 list_move_tail(&node->list, &g2d->free_cmdlist);
1084 }
739 mutex_unlock(&g2d->cmdlist_mutex); 1085 mutex_unlock(&g2d->cmdlist_mutex);
740 1086
741 g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); 1087 /* release all g2d_userptr in pool. */
1088 g2d_userptr_free_all(drm_dev, g2d, file);
742 1089
743 kfree(file_priv->g2d_priv); 1090 kfree(file_priv->g2d_priv);
744} 1091}
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
778 mutex_init(&g2d->cmdlist_mutex); 1125 mutex_init(&g2d->cmdlist_mutex);
779 mutex_init(&g2d->runqueue_mutex); 1126 mutex_init(&g2d->runqueue_mutex);
780 1127
781 ret = g2d_init_cmdlist(g2d); 1128 g2d->gate_clk = devm_clk_get(dev, "fimg2d");
782 if (ret < 0)
783 goto err_destroy_workqueue;
784
785 g2d->gate_clk = clk_get(dev, "fimg2d");
786 if (IS_ERR(g2d->gate_clk)) { 1129 if (IS_ERR(g2d->gate_clk)) {
787 dev_err(dev, "failed to get gate clock\n"); 1130 dev_err(dev, "failed to get gate clock\n");
788 ret = PTR_ERR(g2d->gate_clk); 1131 ret = PTR_ERR(g2d->gate_clk);
789 goto err_fini_cmdlist; 1132 goto err_destroy_workqueue;
790 } 1133 }
791 1134
792 pm_runtime_enable(dev); 1135 pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
814 goto err_put_clk; 1157 goto err_put_clk;
815 } 1158 }
816 1159
1160 g2d->max_pool = MAX_POOL;
1161
817 platform_set_drvdata(pdev, g2d); 1162 platform_set_drvdata(pdev, g2d);
818 1163
819 subdrv = &g2d->subdrv; 1164 subdrv = &g2d->subdrv;
820 subdrv->dev = dev; 1165 subdrv->dev = dev;
1166 subdrv->probe = g2d_subdrv_probe;
1167 subdrv->remove = g2d_subdrv_remove;
821 subdrv->open = g2d_open; 1168 subdrv->open = g2d_open;
822 subdrv->close = g2d_close; 1169 subdrv->close = g2d_close;
823 1170
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
834 1181
835err_put_clk: 1182err_put_clk:
836 pm_runtime_disable(dev); 1183 pm_runtime_disable(dev);
837 clk_put(g2d->gate_clk);
838err_fini_cmdlist:
839 g2d_fini_cmdlist(g2d);
840err_destroy_workqueue: 1184err_destroy_workqueue:
841 destroy_workqueue(g2d->g2d_workq); 1185 destroy_workqueue(g2d->g2d_workq);
842err_destroy_slab: 1186err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
857 } 1201 }
858 1202
859 pm_runtime_disable(&pdev->dev); 1203 pm_runtime_disable(&pdev->dev);
860 clk_put(g2d->gate_clk);
861 1204
862 g2d_fini_cmdlist(g2d); 1205 g2d_fini_cmdlist(g2d);
863 destroy_workqueue(g2d->g2d_workq); 1206 destroy_workqueue(g2d->g2d_workq);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..99227246ce82 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
83 83
84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
85{ 85{
86 if (!IS_NONCONTIG_BUFFER(flags)) { 86 /* TODO */
87 if (size >= SZ_1M)
88 return roundup(size, SECTION_SIZE);
89 else if (size >= SZ_64K)
90 return roundup(size, SZ_64K);
91 else
92 goto out;
93 }
94out:
95 return roundup(size, PAGE_SIZE);
96}
97
98struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
99 gfp_t gfpmask)
100{
101 struct page *p, **pages;
102 int i, npages;
103
104 npages = obj->size >> PAGE_SHIFT;
105
106 pages = drm_malloc_ab(npages, sizeof(struct page *));
107 if (pages == NULL)
108 return ERR_PTR(-ENOMEM);
109
110 for (i = 0; i < npages; i++) {
111 p = alloc_page(gfpmask);
112 if (IS_ERR(p))
113 goto fail;
114 pages[i] = p;
115 }
116
117 return pages;
118
119fail:
120 while (--i)
121 __free_page(pages[i]);
122
123 drm_free_large(pages);
124 return ERR_CAST(p);
125}
126
127static void exynos_gem_put_pages(struct drm_gem_object *obj,
128 struct page **pages)
129{
130 int npages;
131
132 npages = obj->size >> PAGE_SHIFT;
133
134 while (--npages >= 0)
135 __free_page(pages[npages]);
136 87
137 drm_free_large(pages); 88 return roundup(size, PAGE_SIZE);
138} 89}
139 90
140static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, 91static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
141 struct vm_area_struct *vma, 92 struct vm_area_struct *vma,
142 unsigned long f_vaddr, 93 unsigned long f_vaddr,
143 pgoff_t page_offset) 94 pgoff_t page_offset)
144{ 95{
145 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 96 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
146 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 97 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
98 struct scatterlist *sgl;
147 unsigned long pfn; 99 unsigned long pfn;
100 int i;
148 101
149 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 102 if (!buf->sgt)
150 if (!buf->pages) 103 return -EINTR;
151 return -EINTR;
152
153 pfn = page_to_pfn(buf->pages[page_offset++]);
154 } else
155 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
156
157 return vm_insert_mixed(vma, f_vaddr, pfn);
158}
159 104
160static int exynos_drm_gem_get_pages(struct drm_gem_object *obj) 105 if (page_offset >= (buf->size >> PAGE_SHIFT)) {
161{ 106 DRM_ERROR("invalid page offset\n");
162 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164 struct scatterlist *sgl;
165 struct page **pages;
166 unsigned int npages, i = 0;
167 int ret;
168
169 if (buf->pages) {
170 DRM_DEBUG_KMS("already allocated.\n");
171 return -EINVAL; 107 return -EINVAL;
172 } 108 }
173 109
174 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
175 if (IS_ERR(pages)) {
176 DRM_ERROR("failed to get pages.\n");
177 return PTR_ERR(pages);
178 }
179
180 npages = obj->size >> PAGE_SHIFT;
181 buf->page_size = PAGE_SIZE;
182
183 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
184 if (!buf->sgt) {
185 DRM_ERROR("failed to allocate sg table.\n");
186 ret = -ENOMEM;
187 goto err;
188 }
189
190 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
191 if (ret < 0) {
192 DRM_ERROR("failed to initialize sg table.\n");
193 ret = -EFAULT;
194 goto err1;
195 }
196
197 sgl = buf->sgt->sgl; 110 sgl = buf->sgt->sgl;
198 111 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
199 /* set all pages to sg list. */ 112 if (page_offset < (sgl->length >> PAGE_SHIFT))
200 while (i < npages) { 113 break;
201 sg_set_page(sgl, pages[i], PAGE_SIZE, 0); 114 page_offset -= (sgl->length >> PAGE_SHIFT);
202 sg_dma_address(sgl) = page_to_phys(pages[i]);
203 i++;
204 sgl = sg_next(sgl);
205 } 115 }
206 116
207 /* add some codes for UNCACHED type here. TODO */ 117 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
208
209 buf->pages = pages;
210 return ret;
211err1:
212 kfree(buf->sgt);
213 buf->sgt = NULL;
214err:
215 exynos_gem_put_pages(obj, pages);
216 return ret;
217
218}
219
220static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
221{
222 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
224
225 /*
226 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
227 * allocated at gem fault handler.
228 */
229 sg_free_table(buf->sgt);
230 kfree(buf->sgt);
231 buf->sgt = NULL;
232
233 exynos_gem_put_pages(obj, buf->pages);
234 buf->pages = NULL;
235 118
236 /* add some codes for UNCACHED type here. TODO */ 119 return vm_insert_mixed(vma, f_vaddr, pfn);
237} 120}
238 121
239static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 122static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
270 153
271 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 154 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
272 155
273 if (!buf->pages)
274 return;
275
276 /* 156 /*
277 * do not release memory region from exporter. 157 * do not release memory region from exporter.
278 * 158 *
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
282 if (obj->import_attach) 162 if (obj->import_attach)
283 goto out; 163 goto out;
284 164
285 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) 165 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
286 exynos_drm_gem_put_pages(obj);
287 else
288 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
289 166
290out: 167out:
291 exynos_drm_fini_buf(obj->dev, buf); 168 exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
364 /* set memory type and cache attribute from user side. */ 241 /* set memory type and cache attribute from user side. */
365 exynos_gem_obj->flags = flags; 242 exynos_gem_obj->flags = flags;
366 243
367 /* 244 ret = exynos_drm_alloc_buf(dev, buf, flags);
368 * allocate all pages as desired size if user wants to allocate 245 if (ret < 0) {
369 * physically non-continuous memory. 246 drm_gem_object_release(&exynos_gem_obj->base);
370 */ 247 goto err_fini_buf;
371 if (flags & EXYNOS_BO_NONCONTIG) {
372 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
373 if (ret < 0) {
374 drm_gem_object_release(&exynos_gem_obj->base);
375 goto err_fini_buf;
376 }
377 } else {
378 ret = exynos_drm_alloc_buf(dev, buf, flags);
379 if (ret < 0) {
380 drm_gem_object_release(&exynos_gem_obj->base);
381 goto err_fini_buf;
382 }
383 } 248 }
384 249
385 return exynos_gem_obj; 250 return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
412 return 0; 277 return 0;
413} 278}
414 279
415void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 280dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
416 unsigned int gem_handle, 281 unsigned int gem_handle,
417 struct drm_file *file_priv) 282 struct drm_file *filp)
418{ 283{
419 struct exynos_drm_gem_obj *exynos_gem_obj; 284 struct exynos_drm_gem_obj *exynos_gem_obj;
420 struct drm_gem_object *obj; 285 struct drm_gem_object *obj;
421 286
422 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 287 obj = drm_gem_object_lookup(dev, filp, gem_handle);
423 if (!obj) { 288 if (!obj) {
424 DRM_ERROR("failed to lookup gem object.\n"); 289 DRM_ERROR("failed to lookup gem object.\n");
425 return ERR_PTR(-EINVAL); 290 return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
427 292
428 exynos_gem_obj = to_exynos_gem_obj(obj); 293 exynos_gem_obj = to_exynos_gem_obj(obj);
429 294
430 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
431 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
432 drm_gem_object_unreference_unlocked(obj);
433
434 /* TODO */
435 return ERR_PTR(-EINVAL);
436 }
437
438 return &exynos_gem_obj->buffer->dma_addr; 295 return &exynos_gem_obj->buffer->dma_addr;
439} 296}
440 297
441void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 298void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
442 unsigned int gem_handle, 299 unsigned int gem_handle,
443 struct drm_file *file_priv) 300 struct drm_file *filp)
444{ 301{
445 struct exynos_drm_gem_obj *exynos_gem_obj; 302 struct exynos_drm_gem_obj *exynos_gem_obj;
446 struct drm_gem_object *obj; 303 struct drm_gem_object *obj;
447 304
448 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 305 obj = drm_gem_object_lookup(dev, filp, gem_handle);
449 if (!obj) { 306 if (!obj) {
450 DRM_ERROR("failed to lookup gem object.\n"); 307 DRM_ERROR("failed to lookup gem object.\n");
451 return; 308 return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
453 310
454 exynos_gem_obj = to_exynos_gem_obj(obj); 311 exynos_gem_obj = to_exynos_gem_obj(obj);
455 312
456 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
457 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
458 drm_gem_object_unreference_unlocked(obj);
459
460 /* TODO */
461 return;
462 }
463
464 drm_gem_object_unreference_unlocked(obj); 313 drm_gem_object_unreference_unlocked(obj);
465 314
466 /* 315 /*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
489 &args->offset); 338 &args->offset);
490} 339}
491 340
341static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
342 struct file *filp)
343{
344 struct drm_file *file_priv;
345
346 mutex_lock(&drm_dev->struct_mutex);
347
348 /* find current process's drm_file from filelist. */
349 list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
350 if (file_priv->filp == filp) {
351 mutex_unlock(&drm_dev->struct_mutex);
352 return file_priv;
353 }
354 }
355
356 mutex_unlock(&drm_dev->struct_mutex);
357 WARN_ON(1);
358
359 return ERR_PTR(-EFAULT);
360}
361
492static int exynos_drm_gem_mmap_buffer(struct file *filp, 362static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 struct vm_area_struct *vma) 363 struct vm_area_struct *vma)
494{ 364{
495 struct drm_gem_object *obj = filp->private_data; 365 struct drm_gem_object *obj = filp->private_data;
496 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 366 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
367 struct drm_device *drm_dev = obj->dev;
497 struct exynos_drm_gem_buf *buffer; 368 struct exynos_drm_gem_buf *buffer;
498 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; 369 struct drm_file *file_priv;
370 unsigned long vm_size;
499 int ret; 371 int ret;
500 372
501 DRM_DEBUG_KMS("%s\n", __FILE__); 373 DRM_DEBUG_KMS("%s\n", __FILE__);
502 374
503 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 375 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
376 vma->vm_private_data = obj;
377 vma->vm_ops = drm_dev->driver->gem_vm_ops;
378
379 /* restore it to driver's fops. */
380 filp->f_op = fops_get(drm_dev->driver->fops);
381
382 file_priv = exynos_drm_find_drm_file(drm_dev, filp);
383 if (IS_ERR(file_priv))
384 return PTR_ERR(file_priv);
385
386 /* restore it to drm_file. */
387 filp->private_data = file_priv;
504 388
505 update_vm_cache_attr(exynos_gem_obj, vma); 389 update_vm_cache_attr(exynos_gem_obj, vma);
506 390
507 vm_size = usize = vma->vm_end - vma->vm_start; 391 vm_size = vma->vm_end - vma->vm_start;
508 392
509 /* 393 /*
510 * a buffer contains information to physically continuous memory 394 * a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
516 if (vm_size > buffer->size) 400 if (vm_size > buffer->size)
517 return -EINVAL; 401 return -EINVAL;
518 402
519 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 403 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->kvaddr,
520 int i = 0; 404 buffer->dma_addr, buffer->size,
521 405 &buffer->dma_attrs);
522 if (!buffer->pages) 406 if (ret < 0) {
523 return -EINVAL; 407 DRM_ERROR("failed to mmap.\n");
408 return ret;
409 }
524 410
525 vma->vm_flags |= VM_MIXEDMAP; 411 /*
412 * take a reference to this mapping of the object. And this reference
413 * is unreferenced by the corresponding vm_close call.
414 */
415 drm_gem_object_reference(obj);
526 416
527 do { 417 mutex_lock(&drm_dev->struct_mutex);
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); 418 drm_vm_open_locked(drm_dev, vma);
529 if (ret) { 419 mutex_unlock(&drm_dev->struct_mutex);
530 DRM_ERROR("failed to remap user space.\n");
531 return ret;
532 }
533
534 uaddr += PAGE_SIZE;
535 usize -= PAGE_SIZE;
536 } while (usize > 0);
537 } else {
538 /*
539 * get page frame number to physical memory to be mapped
540 * to user space.
541 */
542 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
543 PAGE_SHIFT;
544
545 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
546
547 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 vma->vm_page_prot)) {
549 DRM_ERROR("failed to remap pfn range.\n");
550 return -EAGAIN;
551 }
552 }
553 420
554 return 0; 421 return 0;
555} 422}
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
578 return -EINVAL; 445 return -EINVAL;
579 } 446 }
580 447
581 obj->filp->f_op = &exynos_drm_gem_fops; 448 /*
582 obj->filp->private_data = obj; 449 * Set specific mmper's fops. And it will be restored by
450 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
451 * This is used to call specific mapper temporarily.
452 */
453 file_priv->filp->f_op = &exynos_drm_gem_fops;
583 454
584 addr = vm_mmap(obj->filp, 0, args->size, 455 /*
456 * Set gem object to private_data so that specific mmaper
457 * can get the gem object. And it will be restored by
458 * exynos_drm_gem_mmap_buffer to drm_file.
459 */
460 file_priv->filp->private_data = obj;
461
462 addr = vm_mmap(file_priv->filp, 0, args->size,
585 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 463 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
586 464
587 drm_gem_object_unreference_unlocked(obj); 465 drm_gem_object_unreference_unlocked(obj);
588 466
589 if (IS_ERR((void *)addr)) 467 if (IS_ERR((void *)addr)) {
468 file_priv->filp->private_data = file_priv;
590 return PTR_ERR((void *)addr); 469 return PTR_ERR((void *)addr);
470 }
591 471
592 args->mapped = addr; 472 args->mapped = addr;
593 473
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
622 return 0; 502 return 0;
623} 503}
624 504
505struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
506{
507 struct vm_area_struct *vma_copy;
508
509 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
510 if (!vma_copy)
511 return NULL;
512
513 if (vma->vm_ops && vma->vm_ops->open)
514 vma->vm_ops->open(vma);
515
516 if (vma->vm_file)
517 get_file(vma->vm_file);
518
519 memcpy(vma_copy, vma, sizeof(*vma));
520
521 vma_copy->vm_mm = NULL;
522 vma_copy->vm_next = NULL;
523 vma_copy->vm_prev = NULL;
524
525 return vma_copy;
526}
527
528void exynos_gem_put_vma(struct vm_area_struct *vma)
529{
530 if (!vma)
531 return;
532
533 if (vma->vm_ops && vma->vm_ops->close)
534 vma->vm_ops->close(vma);
535
536 if (vma->vm_file)
537 fput(vma->vm_file);
538
539 kfree(vma);
540}
541
542int exynos_gem_get_pages_from_userptr(unsigned long start,
543 unsigned int npages,
544 struct page **pages,
545 struct vm_area_struct *vma)
546{
547 int get_npages;
548
549 /* the memory region mmaped with VM_PFNMAP. */
550 if (vma_is_io(vma)) {
551 unsigned int i;
552
553 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
554 unsigned long pfn;
555 int ret = follow_pfn(vma, start, &pfn);
556 if (ret)
557 return ret;
558
559 pages[i] = pfn_to_page(pfn);
560 }
561
562 if (i != npages) {
563 DRM_ERROR("failed to get user_pages.\n");
564 return -EINVAL;
565 }
566
567 return 0;
568 }
569
570 get_npages = get_user_pages(current, current->mm, start,
571 npages, 1, 1, pages, NULL);
572 get_npages = max(get_npages, 0);
573 if (get_npages != npages) {
574 DRM_ERROR("failed to get user_pages.\n");
575 while (get_npages)
576 put_page(pages[--get_npages]);
577 return -EFAULT;
578 }
579
580 return 0;
581}
582
583void exynos_gem_put_pages_to_userptr(struct page **pages,
584 unsigned int npages,
585 struct vm_area_struct *vma)
586{
587 if (!vma_is_io(vma)) {
588 unsigned int i;
589
590 for (i = 0; i < npages; i++) {
591 set_page_dirty_lock(pages[i]);
592
593 /*
594 * undo the reference we took when populating
595 * the table.
596 */
597 put_page(pages[i]);
598 }
599 }
600}
601
602int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
603 struct sg_table *sgt,
604 enum dma_data_direction dir)
605{
606 int nents;
607
608 mutex_lock(&drm_dev->struct_mutex);
609
610 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
611 if (!nents) {
612 DRM_ERROR("failed to map sgl with dma.\n");
613 mutex_unlock(&drm_dev->struct_mutex);
614 return nents;
615 }
616
617 mutex_unlock(&drm_dev->struct_mutex);
618 return 0;
619}
620
621void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
622 struct sg_table *sgt,
623 enum dma_data_direction dir)
624{
625 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
626}
627
625int exynos_drm_gem_init_object(struct drm_gem_object *obj) 628int exynos_drm_gem_init_object(struct drm_gem_object *obj)
626{ 629{
627 DRM_DEBUG_KMS("%s\n", __FILE__); 630 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
753 756
754 mutex_lock(&dev->struct_mutex); 757 mutex_lock(&dev->struct_mutex);
755 758
756 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 759 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
757 if (ret < 0) 760 if (ret < 0)
758 DRM_ERROR("failed to map pages.\n"); 761 DRM_ERROR("failed to map a buffer with user.\n");
759 762
760 mutex_unlock(&dev->struct_mutex); 763 mutex_unlock(&dev->struct_mutex);
761 764
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..d3ea106a9a77 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,21 +35,25 @@
35 * exynos drm gem buffer structure. 35 * exynos drm gem buffer structure.
36 * 36 *
37 * @kvaddr: kernel virtual address to allocated memory region. 37 * @kvaddr: kernel virtual address to allocated memory region.
38 * *userptr: user space address.
38 * @dma_addr: bus address(accessed by dma) to allocated memory region. 39 * @dma_addr: bus address(accessed by dma) to allocated memory region.
39 * - this address could be physical address without IOMMU and 40 * - this address could be physical address without IOMMU and
40 * device address with IOMMU. 41 * device address with IOMMU.
42 * @write: whether pages will be written to by the caller.
41 * @sgt: sg table to transfer page data. 43 * @sgt: sg table to transfer page data.
42 * @pages: contain all pages to allocated memory region.
43 * @page_size: could be 4K, 64K or 1MB.
44 * @size: size of allocated memory region. 44 * @size: size of allocated memory region.
45 * @pfnmap: indicate whether memory region from userptr is mmaped with
46 * VM_PFNMAP or not.
45 */ 47 */
46struct exynos_drm_gem_buf { 48struct exynos_drm_gem_buf {
47 void __iomem *kvaddr; 49 void __iomem *kvaddr;
50 unsigned long userptr;
48 dma_addr_t dma_addr; 51 dma_addr_t dma_addr;
52 struct dma_attrs dma_attrs;
53 unsigned int write;
49 struct sg_table *sgt; 54 struct sg_table *sgt;
50 struct page **pages;
51 unsigned long page_size;
52 unsigned long size; 55 unsigned long size;
56 bool pfnmap;
53}; 57};
54 58
55/* 59/*
@@ -65,6 +69,7 @@ struct exynos_drm_gem_buf {
65 * or at framebuffer creation. 69 * or at framebuffer creation.
66 * @size: size requested from user, in bytes and this size is aligned 70 * @size: size requested from user, in bytes and this size is aligned
67 * in page unit. 71 * in page unit.
72 * @vma: a pointer to vm_area.
68 * @flags: indicate memory type to allocated buffer and cache attruibute. 73 * @flags: indicate memory type to allocated buffer and cache attruibute.
69 * 74 *
70 * P.S. this object would be transfered to user as kms_bo.handle so 75 * P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +79,7 @@ struct exynos_drm_gem_obj {
74 struct drm_gem_object base; 79 struct drm_gem_object base;
75 struct exynos_drm_gem_buf *buffer; 80 struct exynos_drm_gem_buf *buffer;
76 unsigned long size; 81 unsigned long size;
82 struct vm_area_struct *vma;
77 unsigned int flags; 83 unsigned int flags;
78}; 84};
79 85
@@ -104,9 +110,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
104 * other drivers such as 2d/3d acceleration drivers. 110 * other drivers such as 2d/3d acceleration drivers.
105 * with this function call, gem object reference count would be increased. 111 * with this function call, gem object reference count would be increased.
106 */ 112 */
107void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 113dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
108 unsigned int gem_handle, 114 unsigned int gem_handle,
109 struct drm_file *file_priv); 115 struct drm_file *filp);
110 116
111/* 117/*
112 * put dma address from gem handle and this function could be used for 118 * put dma address from gem handle and this function could be used for
@@ -115,7 +121,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
115 */ 121 */
116void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 122void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
117 unsigned int gem_handle, 123 unsigned int gem_handle,
118 struct drm_file *file_priv); 124 struct drm_file *filp);
119 125
120/* get buffer offset to map to user space. */ 126/* get buffer offset to map to user space. */
121int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 127int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +134,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
128int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 134int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
129 struct drm_file *file_priv); 135 struct drm_file *file_priv);
130 136
137/* map user space allocated by malloc to pages. */
138int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
139 struct drm_file *file_priv);
140
131/* get buffer information to memory region allocated by gem. */ 141/* get buffer information to memory region allocated by gem. */
132int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 142int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
133 struct drm_file *file_priv); 143 struct drm_file *file_priv);
@@ -163,4 +173,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
163/* set vm_flags and we can change the vm attribute to other one at here. */ 173/* set vm_flags and we can change the vm attribute to other one at here. */
164int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 174int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
165 175
176static inline int vma_is_io(struct vm_area_struct *vma)
177{
178 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
179}
180
181/* get a copy of a virtual memory region. */
182struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
183
184/* release a userspace virtual memory area. */
185void exynos_gem_put_vma(struct vm_area_struct *vma);
186
187/* get pages from user space. */
188int exynos_gem_get_pages_from_userptr(unsigned long start,
189 unsigned int npages,
190 struct page **pages,
191 struct vm_area_struct *vma);
192
193/* drop the reference to pages. */
194void exynos_gem_put_pages_to_userptr(struct page **pages,
195 unsigned int npages,
196 struct vm_area_struct *vma);
197
198/* map sgt with dma region. */
199int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
200 struct sg_table *sgt,
201 enum dma_data_direction dir);
202
203/* unmap sgt from dma region. */
204void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
205 struct sg_table *sgt,
206 enum dma_data_direction dir);
207
166#endif 208#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b45185..2d11e70b601a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -346,9 +346,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
346 ctx->hdmi_ctx->drm_dev = drm_dev; 346 ctx->hdmi_ctx->drm_dev = drm_dev;
347 ctx->mixer_ctx->drm_dev = drm_dev; 347 ctx->mixer_ctx->drm_dev = drm_dev;
348 348
349 if (mixer_ops->iommu_on)
350 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
351
349 return 0; 352 return 0;
350} 353}
351 354
355static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
356{
357 struct drm_hdmi_context *ctx;
358 struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
359
360 ctx = get_ctx_from_subdrv(subdrv);
361
362 if (mixer_ops->iommu_on)
363 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
364}
365
352static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) 366static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
353{ 367{
354 struct device *dev = &pdev->dev; 368 struct device *dev = &pdev->dev;
@@ -368,6 +382,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
368 subdrv->dev = dev; 382 subdrv->dev = dev;
369 subdrv->manager = &hdmi_manager; 383 subdrv->manager = &hdmi_manager;
370 subdrv->probe = hdmi_subdrv_probe; 384 subdrv->probe = hdmi_subdrv_probe;
385 subdrv->remove = hdmi_subdrv_remove;
371 386
372 platform_set_drvdata(pdev, subdrv); 387 platform_set_drvdata(pdev, subdrv);
373 388
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a059..54b522353e48 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -62,6 +62,7 @@ struct exynos_hdmi_ops {
62 62
63struct exynos_mixer_ops { 63struct exynos_mixer_ops {
64 /* manager */ 64 /* manager */
65 int (*iommu_on)(void *ctx, bool enable);
65 int (*enable_vblank)(void *ctx, int pipe); 66 int (*enable_vblank)(void *ctx, int pipe);
66 void (*disable_vblank)(void *ctx); 67 void (*disable_vblank)(void *ctx);
67 void (*dpms)(void *ctx, int mode); 68 void (*dpms)(void *ctx, int mode);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..09db1983eb1a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,150 @@
1/* exynos_drm_iommu.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drmP.h>
27#include <drm/exynos_drm.h>
28
29#include <linux/dma-mapping.h>
30#include <linux/iommu.h>
31#include <linux/kref.h>
32
33#include <asm/dma-iommu.h>
34
35#include "exynos_drm_drv.h"
36#include "exynos_drm_iommu.h"
37
38/*
39 * drm_create_iommu_mapping - create a mapping structure
40 *
41 * @drm_dev: DRM device
42 */
43int drm_create_iommu_mapping(struct drm_device *drm_dev)
44{
45 struct dma_iommu_mapping *mapping = NULL;
46 struct exynos_drm_private *priv = drm_dev->dev_private;
47 struct device *dev = drm_dev->dev;
48
49 if (!priv->da_start)
50 priv->da_start = EXYNOS_DEV_ADDR_START;
51 if (!priv->da_space_size)
52 priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
53 if (!priv->da_space_order)
54 priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
55
56 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
57 priv->da_space_size,
58 priv->da_space_order);
59 if (!mapping)
60 return -ENOMEM;
61
62 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
63 GFP_KERNEL);
64 dma_set_max_seg_size(dev, 0xffffffffu);
65 dev->archdata.mapping = mapping;
66
67 return 0;
68}
69
70/*
71 * drm_release_iommu_mapping - release iommu mapping structure
72 *
73 * @drm_dev: DRM device
74 *
75 * if mapping->kref becomes 0 then all things related to iommu mapping
76 * will be released
77 */
78void drm_release_iommu_mapping(struct drm_device *drm_dev)
79{
80 struct device *dev = drm_dev->dev;
81
82 arm_iommu_release_mapping(dev->archdata.mapping);
83}
84
85/*
86 * drm_iommu_attach_device- attach device to iommu mapping
87 *
88 * @drm_dev: DRM device
89 * @subdrv_dev: device to be attach
90 *
91 * This function should be called by sub drivers to attach it to iommu
92 * mapping.
93 */
94int drm_iommu_attach_device(struct drm_device *drm_dev,
95 struct device *subdrv_dev)
96{
97 struct device *dev = drm_dev->dev;
98 int ret;
99
100 if (!dev->archdata.mapping) {
101 DRM_ERROR("iommu_mapping is null.\n");
102 return -EFAULT;
103 }
104
105 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
106 sizeof(*subdrv_dev->dma_parms),
107 GFP_KERNEL);
108 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
109
110 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
111 if (ret < 0) {
112 DRM_DEBUG_KMS("failed iommu attach.\n");
113 return ret;
114 }
115
116 /*
117 * Set dma_ops to drm_device just one time.
118 *
119 * The dma mapping api needs device object and the api is used
120 * to allocate physial memory and map it with iommu table.
121 * If iommu attach succeeded, the sub driver would have dma_ops
122 * for iommu and also all sub drivers have same dma_ops.
123 */
124 if (!dev->archdata.dma_ops)
125 dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
126
127 return 0;
128}
129
130/*
131 * drm_iommu_detach_device -detach device address space mapping from device
132 *
133 * @drm_dev: DRM device
134 * @subdrv_dev: device to be detached
135 *
136 * This function should be called by sub drivers to detach it from iommu
137 * mapping
138 */
139void drm_iommu_detach_device(struct drm_device *drm_dev,
140 struct device *subdrv_dev)
141{
142 struct device *dev = drm_dev->dev;
143 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
144
145 if (!mapping || !mapping->domain)
146 return;
147
148 iommu_detach_device(mapping->domain, subdrv_dev);
149 drm_release_iommu_mapping(drm_dev);
150}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..18a0ca190b98
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,85 @@
1/* exynos_drm_iommu.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef _EXYNOS_DRM_IOMMU_H_
27#define _EXYNOS_DRM_IOMMU_H_
28
29#define EXYNOS_DEV_ADDR_START 0x20000000
30#define EXYNOS_DEV_ADDR_SIZE 0x40000000
31#define EXYNOS_DEV_ADDR_ORDER 0x4
32
33#ifdef CONFIG_DRM_EXYNOS_IOMMU
34
35int drm_create_iommu_mapping(struct drm_device *drm_dev);
36
37void drm_release_iommu_mapping(struct drm_device *drm_dev);
38
39int drm_iommu_attach_device(struct drm_device *drm_dev,
40 struct device *subdrv_dev);
41
42void drm_iommu_detach_device(struct drm_device *dev_dev,
43 struct device *subdrv_dev);
44
45static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
46{
47#ifdef CONFIG_ARM_DMA_USE_IOMMU
48 struct device *dev = drm_dev->dev;
49
50 return dev->archdata.mapping ? true : false;
51#else
52 return false;
53#endif
54}
55
56#else
57
58struct dma_iommu_mapping;
59static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
60{
61 return 0;
62}
63
64static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
65{
66}
67
68static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
69 struct device *subdrv_dev)
70{
71 return 0;
72}
73
74static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
75 struct device *subdrv_dev)
76{
77}
78
79static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
80{
81 return false;
82}
83
84#endif
85#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 60b877a388c2..862ca1eb2102 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -204,7 +204,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
204 return ret; 204 return ret;
205 205
206 plane->crtc = crtc; 206 plane->crtc = crtc;
207 plane->fb = crtc->fb;
208 207
209 exynos_plane_commit(plane); 208 exynos_plane_commit(plane);
210 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); 209 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f7..4b0c16bfd1da 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -382,7 +382,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
382 struct drm_pending_vblank_event *e, *t; 382 struct drm_pending_vblank_event *e, *t;
383 struct timeval now; 383 struct timeval now;
384 unsigned long flags; 384 unsigned long flags;
385 bool is_checked = false;
386 385
387 spin_lock_irqsave(&drm_dev->event_lock, flags); 386 spin_lock_irqsave(&drm_dev->event_lock, flags);
388 387
@@ -392,8 +391,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
392 if (crtc != e->pipe) 391 if (crtc != e->pipe)
393 continue; 392 continue;
394 393
395 is_checked = true;
396
397 do_gettimeofday(&now); 394 do_gettimeofday(&now);
398 e->event.sequence = 0; 395 e->event.sequence = 0;
399 e->event.tv_sec = now.tv_sec; 396 e->event.tv_sec = now.tv_sec;
@@ -401,22 +398,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
401 398
402 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 399 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
403 wake_up_interruptible(&e->base.file_priv->event_wait); 400 wake_up_interruptible(&e->base.file_priv->event_wait);
404 } 401 drm_vblank_put(drm_dev, crtc);
405
406 if (is_checked) {
407 /*
408 * call drm_vblank_put only in case that drm_vblank_get was
409 * called.
410 */
411 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
412 drm_vblank_put(drm_dev, crtc);
413
414 /*
415 * don't off vblank if vblank_disable_allowed is 1,
416 * because vblank would be off by timer handler.
417 */
418 if (!drm_dev->vblank_disable_allowed)
419 drm_vblank_off(drm_dev, crtc);
420 } 402 }
421 403
422 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 404 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a3..bafb65389562 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,6 +50,29 @@
50#define MAX_HEIGHT 1080 50#define MAX_HEIGHT 1080
51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) 51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
52 52
53/* AVI header and aspect ratio */
54#define HDMI_AVI_VERSION 0x02
55#define HDMI_AVI_LENGTH 0x0D
56#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
57#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
58
59/* AUI header info */
60#define HDMI_AUI_VERSION 0x01
61#define HDMI_AUI_LENGTH 0x0A
62
63/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
64enum HDMI_PACKET_TYPE {
65 /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
66 /* InfoFrame packet type */
67 HDMI_PACKET_TYPE_INFOFRAME = 0x80,
68 /* Vendor-Specific InfoFrame */
69 HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
70 /* Auxiliary Video information InfoFrame */
71 HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
72 /* Audio information InfoFrame */
73 HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
74};
75
53enum hdmi_type { 76enum hdmi_type {
54 HDMI_TYPE13, 77 HDMI_TYPE13,
55 HDMI_TYPE14, 78 HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
74 struct mutex hdmi_mutex; 97 struct mutex hdmi_mutex;
75 98
76 void __iomem *regs; 99 void __iomem *regs;
100 void *parent_ctx;
77 int external_irq; 101 int external_irq;
78 int internal_irq; 102 int internal_irq;
79 103
@@ -84,7 +108,6 @@ struct hdmi_context {
84 int cur_conf; 108 int cur_conf;
85 109
86 struct hdmi_resources res; 110 struct hdmi_resources res;
87 void *parent_ctx;
88 111
89 int hpd_gpio; 112 int hpd_gpio;
90 113
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
182 int height; 205 int height;
183 int vrefresh; 206 int vrefresh;
184 bool interlace; 207 bool interlace;
208 int cea_video_id;
185 const u8 *hdmiphy_data; 209 const u8 *hdmiphy_data;
186 const struct hdmi_v13_preset_conf *conf; 210 const struct hdmi_v13_preset_conf *conf;
187}; 211};
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
353}; 377};
354 378
355static const struct hdmi_v13_conf hdmi_v13_confs[] = { 379static const struct hdmi_v13_conf hdmi_v13_confs[] = {
356 { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, 380 { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
357 { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, 381 &hdmi_v13_conf_720p60 },
358 { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, 382 { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
359 { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, 383 &hdmi_v13_conf_720p60 },
360 { 1920, 1080, 50, false, hdmiphy_v13_conf148_5, 384 { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
361 &hdmi_v13_conf_1080p50 }, 385 &hdmi_v13_conf_480p },
362 { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, 386 { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
363 { 1920, 1080, 60, false, hdmiphy_v13_conf148_5, 387 &hdmi_v13_conf_1080i50 },
364 &hdmi_v13_conf_1080p60 }, 388 { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
389 &hdmi_v13_conf_1080p50 },
390 { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
391 &hdmi_v13_conf_1080i60 },
392 { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
393 &hdmi_v13_conf_1080p60 },
365}; 394};
366 395
367/* HDMI Version 1.4 */ 396/* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
479 int height; 508 int height;
480 int vrefresh; 509 int vrefresh;
481 bool interlace; 510 bool interlace;
511 int cea_video_id;
482 const u8 *hdmiphy_data; 512 const u8 *hdmiphy_data;
483 const struct hdmi_preset_conf *conf; 513 const struct hdmi_preset_conf *conf;
484}; 514};
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
934}; 964};
935 965
936static const struct hdmi_conf hdmi_confs[] = { 966static const struct hdmi_conf hdmi_confs[] = {
937 { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, 967 { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
938 { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, 968 { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
939 { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, 969 { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
940 { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, 970 { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
941 { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, 971 { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
942 { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, 972 { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
943 { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, 973 { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
944 { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, 974 { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
945}; 975};
946 976
977struct hdmi_infoframe {
978 enum HDMI_PACKET_TYPE type;
979 u8 ver;
980 u8 len;
981};
947 982
948static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) 983static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
949{ 984{
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
1267 return hdmi_v14_conf_index(mode); 1302 return hdmi_v14_conf_index(mode);
1268} 1303}
1269 1304
1305static u8 hdmi_chksum(struct hdmi_context *hdata,
1306 u32 start, u8 len, u32 hdr_sum)
1307{
1308 int i;
1309
1310 /* hdr_sum : header0 + header1 + header2
1311 * start : start address of packet byte1
1312 * len : packet bytes - 1 */
1313 for (i = 0; i < len; ++i)
1314 hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
1315
1316 /* return 2's complement of 8 bit hdr_sum */
1317 return (u8)(~(hdr_sum & 0xff) + 1);
1318}
1319
1320static void hdmi_reg_infoframe(struct hdmi_context *hdata,
1321 struct hdmi_infoframe *infoframe)
1322{
1323 u32 hdr_sum;
1324 u8 chksum;
1325 u32 aspect_ratio;
1326 u32 mod;
1327 u32 vic;
1328
1329 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1330
1331 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
1332 if (hdata->dvi_mode) {
1333 hdmi_reg_writeb(hdata, HDMI_VSI_CON,
1334 HDMI_VSI_CON_DO_NOT_TRANSMIT);
1335 hdmi_reg_writeb(hdata, HDMI_AVI_CON,
1336 HDMI_AVI_CON_DO_NOT_TRANSMIT);
1337 hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
1338 return;
1339 }
1340
1341 switch (infoframe->type) {
1342 case HDMI_PACKET_TYPE_AVI:
1343 hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
1344 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
1345 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
1346 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
1347 hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
1348
1349 /* Output format zero hardcoded ,RGB YBCR selection */
1350 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
1351 AVI_ACTIVE_FORMAT_VALID |
1352 AVI_UNDERSCANNED_DISPLAY_VALID);
1353
1354 aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
1355
1356 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
1357 AVI_SAME_AS_PIC_ASPECT_RATIO);
1358
1359 if (hdata->type == HDMI_TYPE13)
1360 vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
1361 else
1362 vic = hdmi_confs[hdata->cur_conf].cea_video_id;
1363
1364 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
1365
1366 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
1367 infoframe->len, hdr_sum);
1368 DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
1369 hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
1370 break;
1371 case HDMI_PACKET_TYPE_AUI:
1372 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
1373 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
1374 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
1375 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
1376 hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
1377 chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
1378 infoframe->len, hdr_sum);
1379 DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
1380 hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
1381 break;
1382 default:
1383 break;
1384 }
1385}
1386
1270static bool hdmi_is_connected(void *ctx) 1387static bool hdmi_is_connected(void *ctx)
1271{ 1388{
1272 struct hdmi_context *hdata = ctx; 1389 struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
1293 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", 1410 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
1294 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), 1411 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
1295 raw_edid->width_cm, raw_edid->height_cm); 1412 raw_edid->width_cm, raw_edid->height_cm);
1413 kfree(raw_edid);
1296 } else { 1414 } else {
1297 return -ENODEV; 1415 return -ENODEV;
1298 } 1416 }
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
1541 1659
1542static void hdmi_conf_init(struct hdmi_context *hdata) 1660static void hdmi_conf_init(struct hdmi_context *hdata)
1543{ 1661{
1662 struct hdmi_infoframe infoframe;
1663
1544 /* disable HPD interrupts */ 1664 /* disable HPD interrupts */
1545 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 1665 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
1546 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1666 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1575 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); 1695 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
1576 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); 1696 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
1577 } else { 1697 } else {
1698 infoframe.type = HDMI_PACKET_TYPE_AVI;
1699 infoframe.ver = HDMI_AVI_VERSION;
1700 infoframe.len = HDMI_AVI_LENGTH;
1701 hdmi_reg_infoframe(hdata, &infoframe);
1702
1703 infoframe.type = HDMI_PACKET_TYPE_AUI;
1704 infoframe.ver = HDMI_AUI_VERSION;
1705 infoframe.len = HDMI_AUI_LENGTH;
1706 hdmi_reg_infoframe(hdata, &infoframe);
1707
1578 /* enable AVI packet every vsync, fixes purple line problem */ 1708 /* enable AVI packet every vsync, fixes purple line problem */
1579 hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
1580 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
1581 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); 1709 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
1582 } 1710 }
1583} 1711}
@@ -1978,9 +2106,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
1978 index = hdmi_v14_conf_index(m); 2106 index = hdmi_v14_conf_index(m);
1979 2107
1980 if (index >= 0) { 2108 if (index >= 0) {
2109 struct drm_mode_object base;
2110 struct list_head head;
2111
1981 DRM_INFO("desired mode doesn't exist so\n"); 2112 DRM_INFO("desired mode doesn't exist so\n");
1982 DRM_INFO("use the most suitable mode among modes.\n"); 2113 DRM_INFO("use the most suitable mode among modes.\n");
2114
2115 /* preserve display mode header while copying. */
2116 head = adjusted_mode->head;
2117 base = adjusted_mode->base;
1983 memcpy(adjusted_mode, m, sizeof(*m)); 2118 memcpy(adjusted_mode, m, sizeof(*m));
2119 adjusted_mode->head = head;
2120 adjusted_mode->base = base;
1984 break; 2121 break;
1985 } 2122 }
1986 } 2123 }
@@ -2166,27 +2303,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2166 memset(res, 0, sizeof(*res)); 2303 memset(res, 0, sizeof(*res));
2167 2304
2168 /* get clocks, power */ 2305 /* get clocks, power */
2169 res->hdmi = clk_get(dev, "hdmi"); 2306 res->hdmi = devm_clk_get(dev, "hdmi");
2170 if (IS_ERR_OR_NULL(res->hdmi)) { 2307 if (IS_ERR_OR_NULL(res->hdmi)) {
2171 DRM_ERROR("failed to get clock 'hdmi'\n"); 2308 DRM_ERROR("failed to get clock 'hdmi'\n");
2172 goto fail; 2309 goto fail;
2173 } 2310 }
2174 res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 2311 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
2175 if (IS_ERR_OR_NULL(res->sclk_hdmi)) { 2312 if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
2176 DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); 2313 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
2177 goto fail; 2314 goto fail;
2178 } 2315 }
2179 res->sclk_pixel = clk_get(dev, "sclk_pixel"); 2316 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
2180 if (IS_ERR_OR_NULL(res->sclk_pixel)) { 2317 if (IS_ERR_OR_NULL(res->sclk_pixel)) {
2181 DRM_ERROR("failed to get clock 'sclk_pixel'\n"); 2318 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
2182 goto fail; 2319 goto fail;
2183 } 2320 }
2184 res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy"); 2321 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
2185 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { 2322 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
2186 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); 2323 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
2187 goto fail; 2324 goto fail;
2188 } 2325 }
2189 res->hdmiphy = clk_get(dev, "hdmiphy"); 2326 res->hdmiphy = devm_clk_get(dev, "hdmiphy");
2190 if (IS_ERR_OR_NULL(res->hdmiphy)) { 2327 if (IS_ERR_OR_NULL(res->hdmiphy)) {
2191 DRM_ERROR("failed to get clock 'hdmiphy'\n"); 2328 DRM_ERROR("failed to get clock 'hdmiphy'\n");
2192 goto fail; 2329 goto fail;
@@ -2194,7 +2331,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2194 2331
2195 clk_set_parent(res->sclk_hdmi, res->sclk_pixel); 2332 clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
2196 2333
2197 res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * 2334 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
2198 sizeof(res->regul_bulk[0]), GFP_KERNEL); 2335 sizeof(res->regul_bulk[0]), GFP_KERNEL);
2199 if (!res->regul_bulk) { 2336 if (!res->regul_bulk) {
2200 DRM_ERROR("failed to get memory for regulators\n"); 2337 DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2341,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2204 res->regul_bulk[i].supply = supply[i]; 2341 res->regul_bulk[i].supply = supply[i];
2205 res->regul_bulk[i].consumer = NULL; 2342 res->regul_bulk[i].consumer = NULL;
2206 } 2343 }
2207 ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); 2344 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
2208 if (ret) { 2345 if (ret) {
2209 DRM_ERROR("failed to get regulators\n"); 2346 DRM_ERROR("failed to get regulators\n");
2210 goto fail; 2347 goto fail;
@@ -2217,28 +2354,6 @@ fail:
2217 return -ENODEV; 2354 return -ENODEV;
2218} 2355}
2219 2356
2220static int hdmi_resources_cleanup(struct hdmi_context *hdata)
2221{
2222 struct hdmi_resources *res = &hdata->res;
2223
2224 regulator_bulk_free(res->regul_count, res->regul_bulk);
2225 /* kfree is NULL-safe */
2226 kfree(res->regul_bulk);
2227 if (!IS_ERR_OR_NULL(res->hdmiphy))
2228 clk_put(res->hdmiphy);
2229 if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
2230 clk_put(res->sclk_hdmiphy);
2231 if (!IS_ERR_OR_NULL(res->sclk_pixel))
2232 clk_put(res->sclk_pixel);
2233 if (!IS_ERR_OR_NULL(res->sclk_hdmi))
2234 clk_put(res->sclk_hdmi);
2235 if (!IS_ERR_OR_NULL(res->hdmi))
2236 clk_put(res->hdmi);
2237 memset(res, 0, sizeof(*res));
2238
2239 return 0;
2240}
2241
2242static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; 2357static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
2243 2358
2244void hdmi_attach_ddc_client(struct i2c_client *ddc) 2359void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2378,36 +2493,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2378 ret = hdmi_resources_init(hdata); 2493 ret = hdmi_resources_init(hdata);
2379 2494
2380 if (ret) { 2495 if (ret) {
2381 ret = -EINVAL;
2382 DRM_ERROR("hdmi_resources_init failed\n"); 2496 DRM_ERROR("hdmi_resources_init failed\n");
2383 goto err_data; 2497 return -EINVAL;
2384 } 2498 }
2385 2499
2386 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2500 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2387 if (!res) { 2501 if (!res) {
2388 DRM_ERROR("failed to find registers\n"); 2502 DRM_ERROR("failed to find registers\n");
2389 ret = -ENOENT; 2503 return -ENOENT;
2390 goto err_resource;
2391 } 2504 }
2392 2505
2393 hdata->regs = devm_request_and_ioremap(&pdev->dev, res); 2506 hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
2394 if (!hdata->regs) { 2507 if (!hdata->regs) {
2395 DRM_ERROR("failed to map registers\n"); 2508 DRM_ERROR("failed to map registers\n");
2396 ret = -ENXIO; 2509 return -ENXIO;
2397 goto err_resource;
2398 } 2510 }
2399 2511
2400 ret = gpio_request(hdata->hpd_gpio, "HPD"); 2512 ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
2401 if (ret) { 2513 if (ret) {
2402 DRM_ERROR("failed to request HPD gpio\n"); 2514 DRM_ERROR("failed to request HPD gpio\n");
2403 goto err_resource; 2515 return ret;
2404 } 2516 }
2405 2517
2406 /* DDC i2c driver */ 2518 /* DDC i2c driver */
2407 if (i2c_add_driver(&ddc_driver)) { 2519 if (i2c_add_driver(&ddc_driver)) {
2408 DRM_ERROR("failed to register ddc i2c driver\n"); 2520 DRM_ERROR("failed to register ddc i2c driver\n");
2409 ret = -ENOENT; 2521 return -ENOENT;
2410 goto err_gpio;
2411 } 2522 }
2412 2523
2413 hdata->ddc_port = hdmi_ddc; 2524 hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2581,6 @@ err_hdmiphy:
2470 i2c_del_driver(&hdmiphy_driver); 2581 i2c_del_driver(&hdmiphy_driver);
2471err_ddc: 2582err_ddc:
2472 i2c_del_driver(&ddc_driver); 2583 i2c_del_driver(&ddc_driver);
2473err_gpio:
2474 gpio_free(hdata->hpd_gpio);
2475err_resource:
2476 hdmi_resources_cleanup(hdata);
2477err_data:
2478 return ret; 2584 return ret;
2479} 2585}
2480 2586
@@ -2491,9 +2597,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
2491 free_irq(hdata->internal_irq, hdata); 2597 free_irq(hdata->internal_irq, hdata);
2492 free_irq(hdata->external_irq, hdata); 2598 free_irq(hdata->external_irq, hdata);
2493 2599
2494 gpio_free(hdata->hpd_gpio);
2495
2496 hdmi_resources_cleanup(hdata);
2497 2600
2498 /* hdmiphy i2c driver */ 2601 /* hdmiphy i2c driver */
2499 i2c_del_driver(&hdmiphy_driver); 2602 i2c_del_driver(&hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8e..40a6e1906fbb 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,6 +36,7 @@
36 36
37#include "exynos_drm_drv.h" 37#include "exynos_drm_drv.h"
38#include "exynos_drm_hdmi.h" 38#include "exynos_drm_hdmi.h"
39#include "exynos_drm_iommu.h"
39 40
40#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) 41#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
41 42
@@ -80,6 +81,7 @@ enum mixer_version_id {
80 81
81struct mixer_context { 82struct mixer_context {
82 struct device *dev; 83 struct device *dev;
84 struct drm_device *drm_dev;
83 int pipe; 85 int pipe;
84 bool interlace; 86 bool interlace;
85 bool powered; 87 bool powered;
@@ -90,6 +92,7 @@ struct mixer_context {
90 struct mixer_resources mixer_res; 92 struct mixer_resources mixer_res;
91 struct hdmi_win_data win_data[MIXER_WIN_NR]; 93 struct hdmi_win_data win_data[MIXER_WIN_NR];
92 enum mixer_version_id mxr_ver; 94 enum mixer_version_id mxr_ver;
95 void *parent_ctx;
93}; 96};
94 97
95struct mixer_drv_data { 98struct mixer_drv_data {
@@ -665,6 +668,24 @@ static void mixer_win_reset(struct mixer_context *ctx)
665 spin_unlock_irqrestore(&res->reg_slock, flags); 668 spin_unlock_irqrestore(&res->reg_slock, flags);
666} 669}
667 670
671static int mixer_iommu_on(void *ctx, bool enable)
672{
673 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
674 struct mixer_context *mdata = ctx;
675 struct drm_device *drm_dev;
676
677 drm_hdmi_ctx = mdata->parent_ctx;
678 drm_dev = drm_hdmi_ctx->drm_dev;
679
680 if (is_drm_iommu_supported(drm_dev)) {
681 if (enable)
682 return drm_iommu_attach_device(drm_dev, mdata->dev);
683
684 drm_iommu_detach_device(drm_dev, mdata->dev);
685 }
686 return 0;
687}
688
668static void mixer_poweron(struct mixer_context *ctx) 689static void mixer_poweron(struct mixer_context *ctx)
669{ 690{
670 struct mixer_resources *res = &ctx->mixer_res; 691 struct mixer_resources *res = &ctx->mixer_res;
@@ -866,6 +887,7 @@ static void mixer_win_disable(void *ctx, int win)
866 887
867static struct exynos_mixer_ops mixer_ops = { 888static struct exynos_mixer_ops mixer_ops = {
868 /* manager */ 889 /* manager */
890 .iommu_on = mixer_iommu_on,
869 .enable_vblank = mixer_enable_vblank, 891 .enable_vblank = mixer_enable_vblank,
870 .disable_vblank = mixer_disable_vblank, 892 .disable_vblank = mixer_disable_vblank,
871 .dpms = mixer_dpms, 893 .dpms = mixer_dpms,
@@ -884,7 +906,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
884 struct drm_pending_vblank_event *e, *t; 906 struct drm_pending_vblank_event *e, *t;
885 struct timeval now; 907 struct timeval now;
886 unsigned long flags; 908 unsigned long flags;
887 bool is_checked = false;
888 909
889 spin_lock_irqsave(&drm_dev->event_lock, flags); 910 spin_lock_irqsave(&drm_dev->event_lock, flags);
890 911
@@ -894,7 +915,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
894 if (crtc != e->pipe) 915 if (crtc != e->pipe)
895 continue; 916 continue;
896 917
897 is_checked = true;
898 do_gettimeofday(&now); 918 do_gettimeofday(&now);
899 e->event.sequence = 0; 919 e->event.sequence = 0;
900 e->event.tv_sec = now.tv_sec; 920 e->event.tv_sec = now.tv_sec;
@@ -902,16 +922,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
902 922
903 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 923 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
904 wake_up_interruptible(&e->base.file_priv->event_wait); 924 wake_up_interruptible(&e->base.file_priv->event_wait);
925 drm_vblank_put(drm_dev, crtc);
905 } 926 }
906 927
907 if (is_checked)
908 /*
909 * call drm_vblank_put only in case that drm_vblank_get was
910 * called.
911 */
912 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
913 drm_vblank_put(drm_dev, crtc);
914
915 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 928 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
916} 929}
917 930
@@ -971,57 +984,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
971 984
972 spin_lock_init(&mixer_res->reg_slock); 985 spin_lock_init(&mixer_res->reg_slock);
973 986
974 mixer_res->mixer = clk_get(dev, "mixer"); 987 mixer_res->mixer = devm_clk_get(dev, "mixer");
975 if (IS_ERR_OR_NULL(mixer_res->mixer)) { 988 if (IS_ERR_OR_NULL(mixer_res->mixer)) {
976 dev_err(dev, "failed to get clock 'mixer'\n"); 989 dev_err(dev, "failed to get clock 'mixer'\n");
977 ret = -ENODEV; 990 return -ENODEV;
978 goto fail;
979 } 991 }
980 992
981 mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 993 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
982 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { 994 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
983 dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); 995 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
984 ret = -ENODEV; 996 return -ENODEV;
985 goto fail;
986 } 997 }
987 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 998 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
988 if (res == NULL) { 999 if (res == NULL) {
989 dev_err(dev, "get memory resource failed.\n"); 1000 dev_err(dev, "get memory resource failed.\n");
990 ret = -ENXIO; 1001 return -ENXIO;
991 goto fail;
992 } 1002 }
993 1003
994 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, 1004 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
995 resource_size(res)); 1005 resource_size(res));
996 if (mixer_res->mixer_regs == NULL) { 1006 if (mixer_res->mixer_regs == NULL) {
997 dev_err(dev, "register mapping failed.\n"); 1007 dev_err(dev, "register mapping failed.\n");
998 ret = -ENXIO; 1008 return -ENXIO;
999 goto fail;
1000 } 1009 }
1001 1010
1002 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1011 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1003 if (res == NULL) { 1012 if (res == NULL) {
1004 dev_err(dev, "get interrupt resource failed.\n"); 1013 dev_err(dev, "get interrupt resource failed.\n");
1005 ret = -ENXIO; 1014 return -ENXIO;
1006 goto fail;
1007 } 1015 }
1008 1016
1009 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, 1017 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
1010 0, "drm_mixer", ctx); 1018 0, "drm_mixer", ctx);
1011 if (ret) { 1019 if (ret) {
1012 dev_err(dev, "request interrupt failed.\n"); 1020 dev_err(dev, "request interrupt failed.\n");
1013 goto fail; 1021 return ret;
1014 } 1022 }
1015 mixer_res->irq = res->start; 1023 mixer_res->irq = res->start;
1016 1024
1017 return 0; 1025 return 0;
1018
1019fail:
1020 if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
1021 clk_put(mixer_res->sclk_hdmi);
1022 if (!IS_ERR_OR_NULL(mixer_res->mixer))
1023 clk_put(mixer_res->mixer);
1024 return ret;
1025} 1026}
1026 1027
1027static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, 1028static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1032,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1031 struct device *dev = &pdev->dev; 1032 struct device *dev = &pdev->dev;
1032 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; 1033 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
1033 struct resource *res; 1034 struct resource *res;
1034 int ret;
1035 1035
1036 mixer_res->vp = clk_get(dev, "vp"); 1036 mixer_res->vp = devm_clk_get(dev, "vp");
1037 if (IS_ERR_OR_NULL(mixer_res->vp)) { 1037 if (IS_ERR_OR_NULL(mixer_res->vp)) {
1038 dev_err(dev, "failed to get clock 'vp'\n"); 1038 dev_err(dev, "failed to get clock 'vp'\n");
1039 ret = -ENODEV; 1039 return -ENODEV;
1040 goto fail;
1041 } 1040 }
1042 mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); 1041 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
1043 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { 1042 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
1044 dev_err(dev, "failed to get clock 'sclk_mixer'\n"); 1043 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
1045 ret = -ENODEV; 1044 return -ENODEV;
1046 goto fail;
1047 } 1045 }
1048 mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); 1046 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
1049 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { 1047 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
1050 dev_err(dev, "failed to get clock 'sclk_dac'\n"); 1048 dev_err(dev, "failed to get clock 'sclk_dac'\n");
1051 ret = -ENODEV; 1049 return -ENODEV;
1052 goto fail;
1053 } 1050 }
1054 1051
1055 if (mixer_res->sclk_hdmi) 1052 if (mixer_res->sclk_hdmi)
@@ -1058,28 +1055,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1058 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1055 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1059 if (res == NULL) { 1056 if (res == NULL) {
1060 dev_err(dev, "get memory resource failed.\n"); 1057 dev_err(dev, "get memory resource failed.\n");
1061 ret = -ENXIO; 1058 return -ENXIO;
1062 goto fail;
1063 } 1059 }
1064 1060
1065 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, 1061 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
1066 resource_size(res)); 1062 resource_size(res));
1067 if (mixer_res->vp_regs == NULL) { 1063 if (mixer_res->vp_regs == NULL) {
1068 dev_err(dev, "register mapping failed.\n"); 1064 dev_err(dev, "register mapping failed.\n");
1069 ret = -ENXIO; 1065 return -ENXIO;
1070 goto fail;
1071 } 1066 }
1072 1067
1073 return 0; 1068 return 0;
1074
1075fail:
1076 if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
1077 clk_put(mixer_res->sclk_dac);
1078 if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
1079 clk_put(mixer_res->sclk_mixer);
1080 if (!IS_ERR_OR_NULL(mixer_res->vp))
1081 clk_put(mixer_res->vp);
1082 return ret;
1083} 1069}
1084 1070
1085static struct mixer_drv_data exynos5_mxr_drv_data = { 1071static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,6 +1135,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1149 } 1135 }
1150 1136
1151 ctx->dev = &pdev->dev; 1137 ctx->dev = &pdev->dev;
1138 ctx->parent_ctx = (void *)drm_hdmi_ctx;
1152 drm_hdmi_ctx->ctx = (void *)ctx; 1139 drm_hdmi_ctx->ctx = (void *)ctx;
1153 ctx->vp_enabled = drv->is_vp_enabled; 1140 ctx->vp_enabled = drv->is_vp_enabled;
1154 ctx->mxr_ver = drv->version; 1141 ctx->mxr_ver = drv->version;
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718c..970cdb518eb1 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -298,14 +298,14 @@
298#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) 298#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
299#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) 299#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
300#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) 300#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
301#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) 301#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
302 302
303#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) 303#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
304#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) 304#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
305#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) 305#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
306#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) 306#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
307#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) 307#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
308#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) 308#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
309 309
310#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) 310#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
311#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) 311#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +338,19 @@
338#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) 338#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
339#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) 339#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
340 340
341/* AVI bit definition */
342#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
343#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
344
345#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
346#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
347
348/* AUI bit definition */
349#define HDMI_AUI_CON_NO_TRAN (0 << 0)
350
351/* VSI bit definition */
352#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
353
341/* HDCP related registers */ 354/* HDCP related registers */
342#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) 355#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
343#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) 356#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index f83f793223ff..c8e1831d7572 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -17,6 +17,7 @@ enum dma_attr {
17 DMA_ATTR_NON_CONSISTENT, 17 DMA_ATTR_NON_CONSISTENT,
18 DMA_ATTR_NO_KERNEL_MAPPING, 18 DMA_ATTR_NO_KERNEL_MAPPING,
19 DMA_ATTR_SKIP_CPU_SYNC, 19 DMA_ATTR_SKIP_CPU_SYNC,
20 DMA_ATTR_FORCE_CONTIGUOUS,
20 DMA_ATTR_MAX, 21 DMA_ATTR_MAX,
21}; 22};
22 23
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index c0494d586e23..49f010f2b27f 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
133 __u32 data; 133 __u32 data;
134}; 134};
135 135
136enum drm_exynos_g2d_buf_type {
137 G2D_BUF_USERPTR = 1 << 31,
138};
139
136enum drm_exynos_g2d_event_type { 140enum drm_exynos_g2d_event_type {
137 G2D_EVENT_NOT, 141 G2D_EVENT_NOT,
138 G2D_EVENT_NONSTOP, 142 G2D_EVENT_NONSTOP,
139 G2D_EVENT_STOP, /* not yet */ 143 G2D_EVENT_STOP, /* not yet */
140}; 144};
141 145
146struct drm_exynos_g2d_userptr {
147 unsigned long userptr;
148 unsigned long size;
149};
150
142struct drm_exynos_g2d_set_cmdlist { 151struct drm_exynos_g2d_set_cmdlist {
143 __u64 cmd; 152 __u64 cmd;
144 __u64 cmd_gem; 153 __u64 cmd_buf;
145 __u32 cmd_nr; 154 __u32 cmd_nr;
146 __u32 cmd_gem_nr; 155 __u32 cmd_buf_nr;
147 156
148 /* for g2d event */ 157 /* for g2d event */
149 __u64 event_type; 158 __u64 event_type;