aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/exynos
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/exynos')
-rw-r--r--drivers/gpu/drm/exynos/Kconfig30
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c115
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2001
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c200
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c495
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c435
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1870
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c59
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h85
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2060
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c855
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c324
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c376
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
38 files changed, 10290 insertions, 823 deletions
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..1d1f1e5e33f0 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
10 Choose this option if you have a Samsung SoC EXYNOS chipset. 10 Choose this option if you have a Samsung SoC EXYNOS chipset.
11 If M is selected the module will be called exynosdrm. 11 If M is selected the module will be called exynosdrm.
12 12
13config DRM_EXYNOS_IOMMU
14 bool "EXYNOS DRM IOMMU Support"
15 depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
16 help
17 Choose this option if you want to use IOMMU feature for DRM.
18
13config DRM_EXYNOS_DMABUF 19config DRM_EXYNOS_DMABUF
14 bool "EXYNOS DRM DMABUF" 20 bool "EXYNOS DRM DMABUF"
15 depends on DRM_EXYNOS 21 depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
39 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D 45 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
40 help 46 help
41 Choose this option if you want to use Exynos G2D for DRM. 47 Choose this option if you want to use Exynos G2D for DRM.
48
49config DRM_EXYNOS_IPP
50 bool "Exynos DRM IPP"
51 depends on DRM_EXYNOS
52 help
53 Choose this option if you want to use IPP feature for DRM.
54
55config DRM_EXYNOS_FIMC
56 bool "Exynos DRM FIMC"
57 depends on DRM_EXYNOS_IPP
58 help
59 Choose this option if you want to use Exynos FIMC for DRM.
60
61config DRM_EXYNOS_ROTATOR
62 bool "Exynos DRM Rotator"
63 depends on DRM_EXYNOS_IPP
64 help
65 Choose this option if you want to use Exynos Rotator for DRM.
66
67config DRM_EXYNOS_GSC
68 bool "Exynos DRM GSC"
69 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
70 help
71 Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..639b49e1ec05 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ 8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
9 exynos_drm_plane.o 9 exynos_drm_plane.o
10 10
11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
11exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o 12exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 13exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
13exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ 14exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
15 exynos_drm_hdmi.o 16 exynos_drm_hdmi.o
16exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 17exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
17exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o 18exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
19exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
20exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
21exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
22exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
18 23
19obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o 24obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1d..bef43e0342a6 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
48 { }, 48 { },
49}; 49};
50 50
51#ifdef CONFIG_OF
51static struct of_device_id hdmiddc_match_types[] = { 52static struct of_device_id hdmiddc_match_types[] = {
52 { 53 {
53 .compatible = "samsung,exynos5-hdmiddc", 54 .compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
55 /* end node */ 56 /* end node */
56 } 57 }
57}; 58};
59#endif
58 60
59struct i2c_driver ddc_driver = { 61struct i2c_driver ddc_driver = {
60 .driver = { 62 .driver = {
61 .name = "exynos-hdmiddc", 63 .name = "exynos-hdmiddc",
62 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
63 .of_match_table = hdmiddc_match_types, 65 .of_match_table = of_match_ptr(hdmiddc_match_types),
64 }, 66 },
65 .id_table = ddc_idtable, 67 .id_table = ddc_idtable,
66 .probe = s5p_ddc_probe, 68 .probe = s5p_ddc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..9601bad47a2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,89 +33,64 @@
33static int lowlevel_buffer_allocate(struct drm_device *dev, 33static int lowlevel_buffer_allocate(struct drm_device *dev,
34 unsigned int flags, struct exynos_drm_gem_buf *buf) 34 unsigned int flags, struct exynos_drm_gem_buf *buf)
35{ 35{
36 dma_addr_t start_addr;
37 unsigned int npages, i = 0;
38 struct scatterlist *sgl;
39 int ret = 0; 36 int ret = 0;
37 enum dma_attr attr;
38 unsigned int nr_pages;
40 39
41 DRM_DEBUG_KMS("%s\n", __FILE__); 40 DRM_DEBUG_KMS("%s\n", __FILE__);
42 41
43 if (IS_NONCONTIG_BUFFER(flags)) {
44 DRM_DEBUG_KMS("not support allocation type.\n");
45 return -EINVAL;
46 }
47
48 if (buf->dma_addr) { 42 if (buf->dma_addr) {
49 DRM_DEBUG_KMS("already allocated.\n"); 43 DRM_DEBUG_KMS("already allocated.\n");
50 return 0; 44 return 0;
51 } 45 }
52 46
53 if (buf->size >= SZ_1M) { 47 init_dma_attrs(&buf->dma_attrs);
54 npages = buf->size >> SECTION_SHIFT;
55 buf->page_size = SECTION_SIZE;
56 } else if (buf->size >= SZ_64K) {
57 npages = buf->size >> 16;
58 buf->page_size = SZ_64K;
59 } else {
60 npages = buf->size >> PAGE_SHIFT;
61 buf->page_size = PAGE_SIZE;
62 }
63 48
64 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 49 /*
65 if (!buf->sgt) { 50 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
66 DRM_ERROR("failed to allocate sg table.\n"); 51 * region will be allocated else physically contiguous
67 return -ENOMEM; 52 * as possible.
68 } 53 */
54 if (flags & EXYNOS_BO_CONTIG)
55 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
69 56
70 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); 57 /*
71 if (ret < 0) { 58 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
72 DRM_ERROR("failed to initialize sg table.\n"); 59 * else cachable mapping.
73 kfree(buf->sgt); 60 */
74 buf->sgt = NULL; 61 if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
75 return -ENOMEM; 62 attr = DMA_ATTR_WRITE_COMBINE;
76 } 63 else
64 attr = DMA_ATTR_NON_CONSISTENT;
77 65
78 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, 66 dma_set_attr(attr, &buf->dma_attrs);
79 &buf->dma_addr, GFP_KERNEL); 67 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
80 if (!buf->kvaddr) {
81 DRM_ERROR("failed to allocate buffer.\n");
82 ret = -ENOMEM;
83 goto err1;
84 }
85 68
86 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); 69 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
70 &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
87 if (!buf->pages) { 71 if (!buf->pages) {
88 DRM_ERROR("failed to allocate pages.\n"); 72 DRM_ERROR("failed to allocate buffer.\n");
89 ret = -ENOMEM; 73 return -ENOMEM;
90 goto err2;
91 } 74 }
92 75
93 sgl = buf->sgt->sgl; 76 nr_pages = buf->size >> PAGE_SHIFT;
94 start_addr = buf->dma_addr; 77 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
95 78 if (!buf->sgt) {
96 while (i < npages) { 79 DRM_ERROR("failed to get sg table.\n");
97 buf->pages[i] = phys_to_page(start_addr); 80 ret = -ENOMEM;
98 sg_set_page(sgl, buf->pages[i], buf->page_size, 0); 81 goto err_free_attrs;
99 sg_dma_address(sgl) = start_addr;
100 start_addr += buf->page_size;
101 sgl = sg_next(sgl);
102 i++;
103 } 82 }
104 83
105 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", 84 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
106 (unsigned long)buf->kvaddr,
107 (unsigned long)buf->dma_addr, 85 (unsigned long)buf->dma_addr,
108 buf->size); 86 buf->size);
109 87
110 return ret; 88 return ret;
111err2: 89
112 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, 90err_free_attrs:
113 (dma_addr_t)buf->dma_addr); 91 dma_free_attrs(dev->dev, buf->size, buf->pages,
92 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
114 buf->dma_addr = (dma_addr_t)NULL; 93 buf->dma_addr = (dma_addr_t)NULL;
115err1:
116 sg_free_table(buf->sgt);
117 kfree(buf->sgt);
118 buf->sgt = NULL;
119 94
120 return ret; 95 return ret;
121} 96}
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
125{ 100{
126 DRM_DEBUG_KMS("%s.\n", __FILE__); 101 DRM_DEBUG_KMS("%s.\n", __FILE__);
127 102
128 /*
129 * release only physically continuous memory and
130 * non-continuous memory would be released by exynos
131 * gem framework.
132 */
133 if (IS_NONCONTIG_BUFFER(flags)) {
134 DRM_DEBUG_KMS("not support allocation type.\n");
135 return;
136 }
137
138 if (!buf->dma_addr) { 103 if (!buf->dma_addr) {
139 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 104 DRM_DEBUG_KMS("dma_addr is invalid.\n");
140 return; 105 return;
141 } 106 }
142 107
143 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", 108 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
144 (unsigned long)buf->kvaddr,
145 (unsigned long)buf->dma_addr, 109 (unsigned long)buf->dma_addr,
146 buf->size); 110 buf->size);
147 111
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
150 kfree(buf->sgt); 114 kfree(buf->sgt);
151 buf->sgt = NULL; 115 buf->sgt = NULL;
152 116
153 kfree(buf->pages); 117 dma_free_attrs(dev->dev, buf->size, buf->pages,
154 buf->pages = NULL; 118 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
155
156 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
157 (dma_addr_t)buf->dma_addr);
158 buf->dma_addr = (dma_addr_t)NULL; 119 buf->dma_addr = (dma_addr_t)NULL;
159} 120}
160 121
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..25cf16285033 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
34void exynos_drm_fini_buf(struct drm_device *dev, 34void exynos_drm_fini_buf(struct drm_device *dev,
35 struct exynos_drm_gem_buf *buffer); 35 struct exynos_drm_gem_buf *buffer);
36 36
37/* allocate physical memory region and setup sgt and pages. */ 37/* allocate physical memory region and setup sgt. */
38int exynos_drm_alloc_buf(struct drm_device *dev, 38int exynos_drm_alloc_buf(struct drm_device *dev,
39 struct exynos_drm_gem_buf *buf, 39 struct exynos_drm_gem_buf *buf,
40 unsigned int flags); 40 unsigned int flags);
41 41
42/* release physical memory region, sgt and pages. */ 42/* release physical memory region, and sgt. */
43void exynos_drm_free_buf(struct drm_device *dev, 43void exynos_drm_free_buf(struct drm_device *dev,
44 unsigned int flags, 44 unsigned int flags,
45 struct exynos_drm_gem_buf *buffer); 45 struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..2efa4b031d73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
236 goto out; 236 goto out;
237 } 237 }
238 238
239 spin_lock_irq(&dev->event_lock);
239 list_add_tail(&event->base.link, 240 list_add_tail(&event->base.link,
240 &dev_priv->pageflip_event_list); 241 &dev_priv->pageflip_event_list);
242 spin_unlock_irq(&dev->event_lock);
241 243
242 crtc->fb = fb; 244 crtc->fb = fb;
243 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, 245 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
244 NULL); 246 NULL);
245 if (ret) { 247 if (ret) {
246 crtc->fb = old_fb; 248 crtc->fb = old_fb;
249
250 spin_lock_irq(&dev->event_lock);
247 drm_vblank_put(dev, exynos_crtc->pipe); 251 drm_vblank_put(dev, exynos_crtc->pipe);
248 list_del(&event->base.link); 252 list_del(&event->base.link);
253 spin_unlock_irq(&dev->event_lock);
249 254
250 goto out; 255 goto out;
251 } 256 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..61d5a8402eb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,70 +30,108 @@
30 30
31#include <linux/dma-buf.h> 31#include <linux/dma-buf.h>
32 32
33static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, 33struct exynos_drm_dmabuf_attachment {
34 unsigned int page_size) 34 struct sg_table sgt;
35 enum dma_data_direction dir;
36};
37
38static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
39 struct device *dev,
40 struct dma_buf_attachment *attach)
35{ 41{
36 struct sg_table *sgt = NULL; 42 struct exynos_drm_dmabuf_attachment *exynos_attach;
37 struct scatterlist *sgl;
38 int i, ret;
39 43
40 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 44 exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
41 if (!sgt) 45 if (!exynos_attach)
42 goto out; 46 return -ENOMEM;
43 47
44 ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); 48 exynos_attach->dir = DMA_NONE;
45 if (ret) 49 attach->priv = exynos_attach;
46 goto err_free_sgt;
47 50
48 if (page_size < PAGE_SIZE) 51 return 0;
49 page_size = PAGE_SIZE; 52}
50 53
51 for_each_sg(sgt->sgl, sgl, nr_pages, i) 54static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
52 sg_set_page(sgl, pages[i], page_size, 0); 55 struct dma_buf_attachment *attach)
56{
57 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
58 struct sg_table *sgt;
53 59
54 return sgt; 60 if (!exynos_attach)
61 return;
55 62
56err_free_sgt: 63 sgt = &exynos_attach->sgt;
57 kfree(sgt); 64
58 sgt = NULL; 65 if (exynos_attach->dir != DMA_NONE)
59out: 66 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
60 return NULL; 67 exynos_attach->dir);
68
69 sg_free_table(sgt);
70 kfree(exynos_attach);
71 attach->priv = NULL;
61} 72}
62 73
63static struct sg_table * 74static struct sg_table *
64 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, 75 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
65 enum dma_data_direction dir) 76 enum dma_data_direction dir)
66{ 77{
78 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
67 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; 79 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
68 struct drm_device *dev = gem_obj->base.dev; 80 struct drm_device *dev = gem_obj->base.dev;
69 struct exynos_drm_gem_buf *buf; 81 struct exynos_drm_gem_buf *buf;
82 struct scatterlist *rd, *wr;
70 struct sg_table *sgt = NULL; 83 struct sg_table *sgt = NULL;
71 unsigned int npages; 84 unsigned int i;
72 int nents; 85 int nents, ret;
73 86
74 DRM_DEBUG_PRIME("%s\n", __FILE__); 87 DRM_DEBUG_PRIME("%s\n", __FILE__);
75 88
76 mutex_lock(&dev->struct_mutex); 89 if (WARN_ON(dir == DMA_NONE))
90 return ERR_PTR(-EINVAL);
91
92 /* just return current sgt if already requested. */
93 if (exynos_attach->dir == dir)
94 return &exynos_attach->sgt;
95
96 /* reattaching is not allowed. */
97 if (WARN_ON(exynos_attach->dir != DMA_NONE))
98 return ERR_PTR(-EBUSY);
77 99
78 buf = gem_obj->buffer; 100 buf = gem_obj->buffer;
101 if (!buf) {
102 DRM_ERROR("buffer is null.\n");
103 return ERR_PTR(-ENOMEM);
104 }
79 105
80 /* there should always be pages allocated. */ 106 sgt = &exynos_attach->sgt;
81 if (!buf->pages) { 107
82 DRM_ERROR("pages is null.\n"); 108 ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
83 goto err_unlock; 109 if (ret) {
110 DRM_ERROR("failed to alloc sgt.\n");
111 return ERR_PTR(-ENOMEM);
84 } 112 }
85 113
86 npages = buf->size / buf->page_size; 114 mutex_lock(&dev->struct_mutex);
87 115
88 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); 116 rd = buf->sgt->sgl;
89 if (!sgt) { 117 wr = sgt->sgl;
90 DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n"); 118 for (i = 0; i < sgt->orig_nents; ++i) {
119 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
120 rd = sg_next(rd);
121 wr = sg_next(wr);
122 }
123
124 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
125 if (!nents) {
126 DRM_ERROR("failed to map sgl with iommu.\n");
127 sgt = ERR_PTR(-EIO);
91 goto err_unlock; 128 goto err_unlock;
92 } 129 }
93 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
94 130
95 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", 131 exynos_attach->dir = dir;
96 npages, buf->size, buf->page_size); 132 attach->priv = exynos_attach;
133
134 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
97 135
98err_unlock: 136err_unlock:
99 mutex_unlock(&dev->struct_mutex); 137 mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
104 struct sg_table *sgt, 142 struct sg_table *sgt,
105 enum dma_data_direction dir) 143 enum dma_data_direction dir)
106{ 144{
107 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 145 /* Nothing to do. */
108 sg_free_table(sgt);
109 kfree(sgt);
110 sgt = NULL;
111} 146}
112 147
113static void exynos_dmabuf_release(struct dma_buf *dmabuf) 148static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
169} 204}
170 205
171static struct dma_buf_ops exynos_dmabuf_ops = { 206static struct dma_buf_ops exynos_dmabuf_ops = {
207 .attach = exynos_gem_attach_dma_buf,
208 .detach = exynos_gem_detach_dma_buf,
172 .map_dma_buf = exynos_gem_map_dma_buf, 209 .map_dma_buf = exynos_gem_map_dma_buf,
173 .unmap_dma_buf = exynos_gem_unmap_dma_buf, 210 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
174 .kmap = exynos_gem_dmabuf_kmap, 211 .kmap = exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
196 struct scatterlist *sgl; 233 struct scatterlist *sgl;
197 struct exynos_drm_gem_obj *exynos_gem_obj; 234 struct exynos_drm_gem_obj *exynos_gem_obj;
198 struct exynos_drm_gem_buf *buffer; 235 struct exynos_drm_gem_buf *buffer;
199 struct page *page;
200 int ret; 236 int ret;
201 237
202 DRM_DEBUG_PRIME("%s\n", __FILE__); 238 DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
233 goto err_unmap_attach; 269 goto err_unmap_attach;
234 } 270 }
235 271
236 buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
237 if (!buffer->pages) {
238 DRM_ERROR("failed to allocate pages.\n");
239 ret = -ENOMEM;
240 goto err_free_buffer;
241 }
242
243 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); 272 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
244 if (!exynos_gem_obj) { 273 if (!exynos_gem_obj) {
245 ret = -ENOMEM; 274 ret = -ENOMEM;
246 goto err_free_pages; 275 goto err_free_buffer;
247 } 276 }
248 277
249 sgl = sgt->sgl; 278 sgl = sgt->sgl;
250 279
251 if (sgt->nents == 1) { 280 buffer->size = dma_buf->size;
252 buffer->dma_addr = sg_dma_address(sgt->sgl); 281 buffer->dma_addr = sg_dma_address(sgl);
253 buffer->size = sg_dma_len(sgt->sgl);
254 282
283 if (sgt->nents == 1) {
255 /* always physically continuous memory if sgt->nents is 1. */ 284 /* always physically continuous memory if sgt->nents is 1. */
256 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 285 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
257 } else { 286 } else {
258 unsigned int i = 0; 287 /*
259 288 * this case could be CONTIG or NONCONTIG type but for now
260 buffer->dma_addr = sg_dma_address(sgl); 289 * sets NONCONTIG.
261 while (i < sgt->nents) { 290 * TODO. we have to find a way that exporter can notify
262 buffer->pages[i] = sg_page(sgl); 291 * the type of its own buffer to importer.
263 buffer->size += sg_dma_len(sgl); 292 */
264 sgl = sg_next(sgl);
265 i++;
266 }
267
268 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; 293 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
269 } 294 }
270 295
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
277 302
278 return &exynos_gem_obj->base; 303 return &exynos_gem_obj->base;
279 304
280err_free_pages:
281 kfree(buffer->pages);
282 buffer->pages = NULL;
283err_free_buffer: 305err_free_buffer:
284 kfree(buffer); 306 kfree(buffer);
285 buffer = NULL; 307 buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..e0a8e8024b01 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,8 @@
40#include "exynos_drm_vidi.h" 40#include "exynos_drm_vidi.h"
41#include "exynos_drm_dmabuf.h" 41#include "exynos_drm_dmabuf.h"
42#include "exynos_drm_g2d.h" 42#include "exynos_drm_g2d.h"
43#include "exynos_drm_ipp.h"
44#include "exynos_drm_iommu.h"
43 45
44#define DRIVER_NAME "exynos" 46#define DRIVER_NAME "exynos"
45#define DRIVER_DESC "Samsung SoC DRM" 47#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +51,9 @@
49 51
50#define VBLANK_OFF_DELAY 50000 52#define VBLANK_OFF_DELAY 50000
51 53
54/* platform device pointer for eynos drm device. */
55static struct platform_device *exynos_drm_pdev;
56
52static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 57static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
53{ 58{
54 struct exynos_drm_private *private; 59 struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
66 INIT_LIST_HEAD(&private->pageflip_event_list); 71 INIT_LIST_HEAD(&private->pageflip_event_list);
67 dev->dev_private = (void *)private; 72 dev->dev_private = (void *)private;
68 73
74 /*
75 * create mapping to manage iommu table and set a pointer to iommu
76 * mapping structure to iommu_mapping of private data.
77 * also this iommu_mapping can be used to check if iommu is supported
78 * or not.
79 */
80 ret = drm_create_iommu_mapping(dev);
81 if (ret < 0) {
82 DRM_ERROR("failed to create iommu mapping.\n");
83 goto err_crtc;
84 }
85
69 drm_mode_config_init(dev); 86 drm_mode_config_init(dev);
70 87
71 /* init kms poll for handling hpd */ 88 /* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
80 for (nr = 0; nr < MAX_CRTC; nr++) { 97 for (nr = 0; nr < MAX_CRTC; nr++) {
81 ret = exynos_drm_crtc_create(dev, nr); 98 ret = exynos_drm_crtc_create(dev, nr);
82 if (ret) 99 if (ret)
83 goto err_crtc; 100 goto err_release_iommu_mapping;
84 } 101 }
85 102
86 for (nr = 0; nr < MAX_PLANE; nr++) { 103 for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
89 106
90 plane = exynos_plane_init(dev, possible_crtcs, false); 107 plane = exynos_plane_init(dev, possible_crtcs, false);
91 if (!plane) 108 if (!plane)
92 goto err_crtc; 109 goto err_release_iommu_mapping;
93 } 110 }
94 111
95 ret = drm_vblank_init(dev, MAX_CRTC); 112 ret = drm_vblank_init(dev, MAX_CRTC);
96 if (ret) 113 if (ret)
97 goto err_crtc; 114 goto err_release_iommu_mapping;
98 115
99 /* 116 /*
100 * probe sub drivers such as display controller and hdmi driver, 117 * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ err_drm_device:
126 exynos_drm_device_unregister(dev); 143 exynos_drm_device_unregister(dev);
127err_vblank: 144err_vblank:
128 drm_vblank_cleanup(dev); 145 drm_vblank_cleanup(dev);
146err_release_iommu_mapping:
147 drm_release_iommu_mapping(dev);
129err_crtc: 148err_crtc:
130 drm_mode_config_cleanup(dev); 149 drm_mode_config_cleanup(dev);
131 kfree(private); 150 kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
142 drm_vblank_cleanup(dev); 161 drm_vblank_cleanup(dev);
143 drm_kms_helper_poll_fini(dev); 162 drm_kms_helper_poll_fini(dev);
144 drm_mode_config_cleanup(dev); 163 drm_mode_config_cleanup(dev);
164
165 drm_release_iommu_mapping(dev);
145 kfree(dev->dev_private); 166 kfree(dev->dev_private);
146 167
147 dev->dev_private = NULL; 168 dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
229 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH), 250 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
230 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, 251 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
231 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH), 252 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
253 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
254 exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
255 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
256 exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
257 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
258 exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
259 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
260 exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
232}; 261};
233 262
234static const struct file_operations exynos_drm_driver_fops = { 263static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
279{ 308{
280 DRM_DEBUG_DRIVER("%s\n", __FILE__); 309 DRM_DEBUG_DRIVER("%s\n", __FILE__);
281 310
311 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
282 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls); 312 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
283 313
284 return drm_platform_init(&exynos_drm_driver, pdev); 314 return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
324 ret = platform_driver_register(&exynos_drm_common_hdmi_driver); 354 ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
325 if (ret < 0) 355 if (ret < 0)
326 goto out_common_hdmi; 356 goto out_common_hdmi;
357
358 ret = exynos_platform_device_hdmi_register();
359 if (ret < 0)
360 goto out_common_hdmi_dev;
327#endif 361#endif
328 362
329#ifdef CONFIG_DRM_EXYNOS_VIDI 363#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
338 goto out_g2d; 372 goto out_g2d;
339#endif 373#endif
340 374
375#ifdef CONFIG_DRM_EXYNOS_FIMC
376 ret = platform_driver_register(&fimc_driver);
377 if (ret < 0)
378 goto out_fimc;
379#endif
380
381#ifdef CONFIG_DRM_EXYNOS_ROTATOR
382 ret = platform_driver_register(&rotator_driver);
383 if (ret < 0)
384 goto out_rotator;
385#endif
386
387#ifdef CONFIG_DRM_EXYNOS_GSC
388 ret = platform_driver_register(&gsc_driver);
389 if (ret < 0)
390 goto out_gsc;
391#endif
392
393#ifdef CONFIG_DRM_EXYNOS_IPP
394 ret = platform_driver_register(&ipp_driver);
395 if (ret < 0)
396 goto out_ipp;
397#endif
398
341 ret = platform_driver_register(&exynos_drm_platform_driver); 399 ret = platform_driver_register(&exynos_drm_platform_driver);
342 if (ret < 0) 400 if (ret < 0)
401 goto out_drm;
402
403 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
404 NULL, 0);
405 if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
406 ret = PTR_ERR(exynos_drm_pdev);
343 goto out; 407 goto out;
408 }
344 409
345 return 0; 410 return 0;
346 411
347out: 412out:
413 platform_driver_unregister(&exynos_drm_platform_driver);
414
415out_drm:
416#ifdef CONFIG_DRM_EXYNOS_IPP
417 platform_driver_unregister(&ipp_driver);
418out_ipp:
419#endif
420
421#ifdef CONFIG_DRM_EXYNOS_GSC
422 platform_driver_unregister(&gsc_driver);
423out_gsc:
424#endif
425
426#ifdef CONFIG_DRM_EXYNOS_ROTATOR
427 platform_driver_unregister(&rotator_driver);
428out_rotator:
429#endif
430
431#ifdef CONFIG_DRM_EXYNOS_FIMC
432 platform_driver_unregister(&fimc_driver);
433out_fimc:
434#endif
435
348#ifdef CONFIG_DRM_EXYNOS_G2D 436#ifdef CONFIG_DRM_EXYNOS_G2D
349 platform_driver_unregister(&g2d_driver); 437 platform_driver_unregister(&g2d_driver);
350out_g2d: 438out_g2d:
351#endif 439#endif
352 440
353#ifdef CONFIG_DRM_EXYNOS_VIDI 441#ifdef CONFIG_DRM_EXYNOS_VIDI
354out_vidi:
355 platform_driver_unregister(&vidi_driver); 442 platform_driver_unregister(&vidi_driver);
443out_vidi:
356#endif 444#endif
357 445
358#ifdef CONFIG_DRM_EXYNOS_HDMI 446#ifdef CONFIG_DRM_EXYNOS_HDMI
447 exynos_platform_device_hdmi_unregister();
448out_common_hdmi_dev:
359 platform_driver_unregister(&exynos_drm_common_hdmi_driver); 449 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
360out_common_hdmi: 450out_common_hdmi:
361 platform_driver_unregister(&mixer_driver); 451 platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
375{ 465{
376 DRM_DEBUG_DRIVER("%s\n", __FILE__); 466 DRM_DEBUG_DRIVER("%s\n", __FILE__);
377 467
468 platform_device_unregister(exynos_drm_pdev);
469
378 platform_driver_unregister(&exynos_drm_platform_driver); 470 platform_driver_unregister(&exynos_drm_platform_driver);
379 471
472#ifdef CONFIG_DRM_EXYNOS_IPP
473 platform_driver_unregister(&ipp_driver);
474#endif
475
476#ifdef CONFIG_DRM_EXYNOS_GSC
477 platform_driver_unregister(&gsc_driver);
478#endif
479
480#ifdef CONFIG_DRM_EXYNOS_ROTATOR
481 platform_driver_unregister(&rotator_driver);
482#endif
483
484#ifdef CONFIG_DRM_EXYNOS_FIMC
485 platform_driver_unregister(&fimc_driver);
486#endif
487
380#ifdef CONFIG_DRM_EXYNOS_G2D 488#ifdef CONFIG_DRM_EXYNOS_G2D
381 platform_driver_unregister(&g2d_driver); 489 platform_driver_unregister(&g2d_driver);
382#endif 490#endif
383 491
384#ifdef CONFIG_DRM_EXYNOS_HDMI 492#ifdef CONFIG_DRM_EXYNOS_HDMI
493 exynos_platform_device_hdmi_unregister();
385 platform_driver_unregister(&exynos_drm_common_hdmi_driver); 494 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
386 platform_driver_unregister(&mixer_driver); 495 platform_driver_unregister(&mixer_driver);
387 platform_driver_unregister(&hdmi_driver); 496 platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..f5a97745bf93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
74 * @commit: apply hardware specific overlay data to registers. 74 * @commit: apply hardware specific overlay data to registers.
75 * @enable: enable hardware specific overlay. 75 * @enable: enable hardware specific overlay.
76 * @disable: disable hardware specific overlay. 76 * @disable: disable hardware specific overlay.
77 * @wait_for_vblank: wait for vblank interrupt to make sure that
78 * hardware overlay is disabled.
79 */ 77 */
80struct exynos_drm_overlay_ops { 78struct exynos_drm_overlay_ops {
81 void (*mode_set)(struct device *subdrv_dev, 79 void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
83 void (*commit)(struct device *subdrv_dev, int zpos); 81 void (*commit)(struct device *subdrv_dev, int zpos);
84 void (*enable)(struct device *subdrv_dev, int zpos); 82 void (*enable)(struct device *subdrv_dev, int zpos);
85 void (*disable)(struct device *subdrv_dev, int zpos); 83 void (*disable)(struct device *subdrv_dev, int zpos);
86 void (*wait_for_vblank)(struct device *subdrv_dev);
87}; 84};
88 85
89/* 86/*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
110 * @pixel_format: fourcc pixel format of this overlay 107 * @pixel_format: fourcc pixel format of this overlay
111 * @dma_addr: array of bus(accessed by dma) address to the memory region 108 * @dma_addr: array of bus(accessed by dma) address to the memory region
112 * allocated for a overlay. 109 * allocated for a overlay.
113 * @vaddr: array of virtual memory addresss to this overlay.
114 * @zpos: order of overlay layer(z position). 110 * @zpos: order of overlay layer(z position).
115 * @default_win: a window to be enabled. 111 * @default_win: a window to be enabled.
116 * @color_key: color key on or off. 112 * @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
142 unsigned int pitch; 138 unsigned int pitch;
143 uint32_t pixel_format; 139 uint32_t pixel_format;
144 dma_addr_t dma_addr[MAX_FB_BUFFER]; 140 dma_addr_t dma_addr[MAX_FB_BUFFER];
145 void __iomem *vaddr[MAX_FB_BUFFER];
146 int zpos; 141 int zpos;
147 142
148 bool default_win; 143 bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
186 * @commit: set current hw specific display mode to hw. 181 * @commit: set current hw specific display mode to hw.
187 * @enable_vblank: specific driver callback for enabling vblank interrupt. 182 * @enable_vblank: specific driver callback for enabling vblank interrupt.
188 * @disable_vblank: specific driver callback for disabling vblank interrupt. 183 * @disable_vblank: specific driver callback for disabling vblank interrupt.
184 * @wait_for_vblank: wait for vblank interrupt to make sure that
185 * hardware overlay is updated.
189 */ 186 */
190struct exynos_drm_manager_ops { 187struct exynos_drm_manager_ops {
191 void (*dpms)(struct device *subdrv_dev, int mode); 188 void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
200 void (*commit)(struct device *subdrv_dev); 197 void (*commit)(struct device *subdrv_dev);
201 int (*enable_vblank)(struct device *subdrv_dev); 198 int (*enable_vblank)(struct device *subdrv_dev);
202 void (*disable_vblank)(struct device *subdrv_dev); 199 void (*disable_vblank)(struct device *subdrv_dev);
200 void (*wait_for_vblank)(struct device *subdrv_dev);
203}; 201};
204 202
205/* 203/*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
231 struct device *dev; 229 struct device *dev;
232 struct list_head inuse_cmdlist; 230 struct list_head inuse_cmdlist;
233 struct list_head event_list; 231 struct list_head event_list;
234 struct list_head gem_list; 232 struct list_head userptr_list;
235 unsigned int gem_nr; 233};
234
235struct exynos_drm_ipp_private {
236 struct device *dev;
237 struct list_head event_list;
236}; 238};
237 239
238struct drm_exynos_file_private { 240struct drm_exynos_file_private {
239 struct exynos_drm_g2d_private *g2d_priv; 241 struct exynos_drm_g2d_private *g2d_priv;
242 struct exynos_drm_ipp_private *ipp_priv;
240}; 243};
241 244
242/* 245/*
243 * Exynos drm private structure. 246 * Exynos drm private structure.
247 *
248 * @da_start: start address to device address space.
249 * with iommu, device address space starts from this address
250 * otherwise default one.
251 * @da_space_size: size of device address space.
252 * if 0 then default value is used for it.
253 * @da_space_order: order to device address space.
244 */ 254 */
245struct exynos_drm_private { 255struct exynos_drm_private {
246 struct drm_fb_helper *fb_helper; 256 struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
255 struct drm_crtc *crtc[MAX_CRTC]; 265 struct drm_crtc *crtc[MAX_CRTC];
256 struct drm_property *plane_zpos_property; 266 struct drm_property *plane_zpos_property;
257 struct drm_property *crtc_mode_property; 267 struct drm_property *crtc_mode_property;
268
269 unsigned long da_start;
270 unsigned long da_space_size;
271 unsigned long da_space_order;
258}; 272};
259 273
260/* 274/*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
318int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); 332int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
319void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); 333void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
320 334
335/*
336 * this function registers exynos drm hdmi platform device. It ensures only one
337 * instance of the device is created.
338 */
339extern int exynos_platform_device_hdmi_register(void);
340
341/*
342 * this function unregisters exynos drm hdmi platform device if it exists.
343 */
344void exynos_platform_device_hdmi_unregister(void);
345
321extern struct platform_driver fimd_driver; 346extern struct platform_driver fimd_driver;
322extern struct platform_driver hdmi_driver; 347extern struct platform_driver hdmi_driver;
323extern struct platform_driver mixer_driver; 348extern struct platform_driver mixer_driver;
324extern struct platform_driver exynos_drm_common_hdmi_driver; 349extern struct platform_driver exynos_drm_common_hdmi_driver;
325extern struct platform_driver vidi_driver; 350extern struct platform_driver vidi_driver;
326extern struct platform_driver g2d_driver; 351extern struct platform_driver g2d_driver;
352extern struct platform_driver fimc_driver;
353extern struct platform_driver rotator_driver;
354extern struct platform_driver gsc_driver;
355extern struct platform_driver ipp_driver;
327#endif 356#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index f2df06c603f7..301485215a70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -234,6 +234,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
234 exynos_encoder->dpms = DRM_MODE_DPMS_ON; 234 exynos_encoder->dpms = DRM_MODE_DPMS_ON;
235} 235}
236 236
237void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
238{
239 struct exynos_drm_encoder *exynos_encoder;
240 struct exynos_drm_manager_ops *ops;
241 struct drm_device *dev = fb->dev;
242 struct drm_encoder *encoder;
243
244 /*
245 * make sure that overlay data are updated to real hardware
246 * for all encoders.
247 */
248 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
249 exynos_encoder = to_exynos_encoder(encoder);
250 ops = exynos_encoder->manager->ops;
251
252 /*
253 * wait for vblank interrupt
254 * - this makes sure that overlay data are updated to
255 * real hardware.
256 */
257 if (ops->wait_for_vblank)
258 ops->wait_for_vblank(exynos_encoder->manager->dev);
259 }
260}
261
262
237static void exynos_drm_encoder_disable(struct drm_encoder *encoder) 263static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
238{ 264{
239 struct drm_plane *plane; 265 struct drm_plane *plane;
@@ -505,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
505 531
506 if (overlay_ops && overlay_ops->disable) 532 if (overlay_ops && overlay_ops->disable)
507 overlay_ops->disable(manager->dev, zpos); 533 overlay_ops->disable(manager->dev, zpos);
508
509 /*
510 * wait for vblank interrupt
511 * - this makes sure that hardware overlay is disabled to avoid
512 * for the dma accesses to memory after gem buffer was released
513 * because the setting for disabling the overlay will be updated
514 * at vsync.
515 */
516 if (overlay_ops && overlay_ops->wait_for_vblank)
517 overlay_ops->wait_for_vblank(manager->dev);
518} 534}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..88bb25a2a917 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
46void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); 46void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
47void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); 47void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
48void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); 48void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
49void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
49 50
50#endif 51#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..5426cc5a5e8d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,13 @@
30#include <drm/drm_crtc.h> 30#include <drm/drm_crtc.h>
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_fb_helper.h> 32#include <drm/drm_fb_helper.h>
33#include <uapi/drm/exynos_drm.h>
33 34
34#include "exynos_drm_drv.h" 35#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 36#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h" 37#include "exynos_drm_gem.h"
38#include "exynos_drm_iommu.h"
39#include "exynos_drm_encoder.h"
37 40
38#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 41#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
39 42
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
50 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; 53 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
51}; 54};
52 55
56static int check_fb_gem_memory_type(struct drm_device *drm_dev,
57 struct exynos_drm_gem_obj *exynos_gem_obj)
58{
59 unsigned int flags;
60
61 /*
62 * if exynos drm driver supports iommu then framebuffer can use
63 * all the buffer types.
64 */
65 if (is_drm_iommu_supported(drm_dev))
66 return 0;
67
68 flags = exynos_gem_obj->flags;
69
70 /*
71 * without iommu support, not support physically non-continuous memory
72 * for framebuffer.
73 */
74 if (IS_NONCONTIG_BUFFER(flags)) {
75 DRM_ERROR("cannot use this gem memory type for fb.\n");
76 return -EINVAL;
77 }
78
79 return 0;
80}
81
53static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 82static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
54{ 83{
55 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 84 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
57 86
58 DRM_DEBUG_KMS("%s\n", __FILE__); 87 DRM_DEBUG_KMS("%s\n", __FILE__);
59 88
89 /* make sure that overlay data are updated before relesing fb. */
90 exynos_drm_encoder_complete_scanout(fb);
91
60 drm_framebuffer_cleanup(fb); 92 drm_framebuffer_cleanup(fb);
61 93
62 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { 94 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
128 struct drm_gem_object *obj) 160 struct drm_gem_object *obj)
129{ 161{
130 struct exynos_drm_fb *exynos_fb; 162 struct exynos_drm_fb *exynos_fb;
163 struct exynos_drm_gem_obj *exynos_gem_obj;
131 int ret; 164 int ret;
132 165
166 exynos_gem_obj = to_exynos_gem_obj(obj);
167
168 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
169 if (ret < 0) {
170 DRM_ERROR("cannot use this gem memory type for fb.\n");
171 return ERR_PTR(-EINVAL);
172 }
173
133 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 174 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
134 if (!exynos_fb) { 175 if (!exynos_fb) {
135 DRM_ERROR("failed to allocate exynos drm framebuffer\n"); 176 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
136 return ERR_PTR(-ENOMEM); 177 return ERR_PTR(-ENOMEM);
137 } 178 }
138 179
180 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
181 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
182
139 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 183 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
140 if (ret) { 184 if (ret) {
141 DRM_ERROR("failed to initialize framebuffer\n"); 185 DRM_ERROR("failed to initialize framebuffer\n");
142 return ERR_PTR(ret); 186 return ERR_PTR(ret);
143 } 187 }
144 188
145 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
146 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
147
148 return &exynos_fb->fb; 189 return &exynos_fb->fb;
149} 190}
150 191
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
190 struct drm_mode_fb_cmd2 *mode_cmd) 231 struct drm_mode_fb_cmd2 *mode_cmd)
191{ 232{
192 struct drm_gem_object *obj; 233 struct drm_gem_object *obj;
193 struct drm_framebuffer *fb;
194 struct exynos_drm_fb *exynos_fb; 234 struct exynos_drm_fb *exynos_fb;
195 int i; 235 int i, ret;
196 236
197 DRM_DEBUG_KMS("%s\n", __FILE__); 237 DRM_DEBUG_KMS("%s\n", __FILE__);
198 238
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
202 return ERR_PTR(-ENOENT); 242 return ERR_PTR(-ENOENT);
203 } 243 }
204 244
205 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); 245 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
206 if (IS_ERR(fb)) { 246 if (!exynos_fb) {
207 drm_gem_object_unreference_unlocked(obj); 247 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
208 return fb; 248 return ERR_PTR(-ENOMEM);
209 } 249 }
210 250
211 exynos_fb = to_exynos_fb(fb); 251 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
252 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
212 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); 253 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
213 254
214 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 255 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
215 256
216 for (i = 1; i < exynos_fb->buf_cnt; i++) { 257 for (i = 1; i < exynos_fb->buf_cnt; i++) {
258 struct exynos_drm_gem_obj *exynos_gem_obj;
259 int ret;
260
217 obj = drm_gem_object_lookup(dev, file_priv, 261 obj = drm_gem_object_lookup(dev, file_priv,
218 mode_cmd->handles[i]); 262 mode_cmd->handles[i]);
219 if (!obj) { 263 if (!obj) {
220 DRM_ERROR("failed to lookup gem object\n"); 264 DRM_ERROR("failed to lookup gem object\n");
221 exynos_drm_fb_destroy(fb); 265 kfree(exynos_fb);
222 return ERR_PTR(-ENOENT); 266 return ERR_PTR(-ENOENT);
223 } 267 }
224 268
269 exynos_gem_obj = to_exynos_gem_obj(obj);
270
271 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
272 if (ret < 0) {
273 DRM_ERROR("cannot use this gem memory type for fb.\n");
274 kfree(exynos_fb);
275 return ERR_PTR(ret);
276 }
277
225 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); 278 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
226 } 279 }
227 280
228 return fb; 281 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
282 if (ret) {
283 for (i = 0; i < exynos_fb->buf_cnt; i++) {
284 struct exynos_drm_gem_obj *gem_obj;
285
286 gem_obj = exynos_fb->exynos_gem_obj[i];
287 drm_gem_object_unreference_unlocked(&gem_obj->base);
288 }
289
290 kfree(exynos_fb);
291 return ERR_PTR(ret);
292 }
293
294 return &exynos_fb->fb;
229} 295}
230 296
231struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, 297struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
243 if (!buffer) 309 if (!buffer)
244 return NULL; 310 return NULL;
245 311
246 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", 312 DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
247 (unsigned long)buffer->kvaddr,
248 (unsigned long)buffer->dma_addr);
249 313
250 return buffer; 314 return buffer;
251} 315}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7466c4414cb..f433eb7533a9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
46 struct exynos_drm_gem_obj *exynos_gem_obj; 46 struct exynos_drm_gem_obj *exynos_gem_obj;
47}; 47};
48 48
49static int exynos_drm_fb_mmap(struct fb_info *info,
50 struct vm_area_struct *vma)
51{
52 struct drm_fb_helper *helper = info->par;
53 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
54 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
55 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
56 unsigned long vm_size;
57 int ret;
58
59 DRM_DEBUG_KMS("%s\n", __func__);
60
61 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
62
63 vm_size = vma->vm_end - vma->vm_start;
64
65 if (vm_size > buffer->size)
66 return -EINVAL;
67
68 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
69 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
70 if (ret < 0) {
71 DRM_ERROR("failed to mmap.\n");
72 return ret;
73 }
74
75 return 0;
76}
77
49static struct fb_ops exynos_drm_fb_ops = { 78static struct fb_ops exynos_drm_fb_ops = {
50 .owner = THIS_MODULE, 79 .owner = THIS_MODULE,
80 .fb_mmap = exynos_drm_fb_mmap,
51 .fb_fillrect = cfb_fillrect, 81 .fb_fillrect = cfb_fillrect,
52 .fb_copyarea = cfb_copyarea, 82 .fb_copyarea = cfb_copyarea,
53 .fb_imageblit = cfb_imageblit, 83 .fb_imageblit = cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79 return -EFAULT; 109 return -EFAULT;
80 } 110 }
81 111
112 /* map pages with kernel virtual space. */
113 if (!buffer->kvaddr) {
114 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
115 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
116 pgprot_writecombine(PAGE_KERNEL));
117 if (!buffer->kvaddr) {
118 DRM_ERROR("failed to map pages to kernel space.\n");
119 return -EIO;
120 }
121 }
122
82 /* buffer count to framebuffer always is 1 at booting time. */ 123 /* buffer count to framebuffer always is 1 at booting time. */
83 exynos_drm_fb_set_buf_cnt(fb, 1); 124 exynos_drm_fb_set_buf_cnt(fb, 1);
84 125
@@ -87,8 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
87 128
88 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; 129 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
89 fbi->screen_base = buffer->kvaddr + offset; 130 fbi->screen_base = buffer->kvaddr + offset;
90 fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) + 131 fbi->fix.smem_start = (unsigned long)
91 offset); 132 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
92 fbi->screen_size = size; 133 fbi->screen_size = size;
93 fbi->fix.smem_len = size; 134 fbi->fix.smem_len = size;
94 135
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
134 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); 175 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
135 if (IS_ERR(exynos_gem_obj)) { 176 if (IS_ERR(exynos_gem_obj)) {
136 ret = PTR_ERR(exynos_gem_obj); 177 ret = PTR_ERR(exynos_gem_obj);
137 goto out; 178 goto err_release_framebuffer;
138 } 179 }
139 180
140 exynos_fbdev->exynos_gem_obj = exynos_gem_obj; 181 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
144 if (IS_ERR_OR_NULL(helper->fb)) { 185 if (IS_ERR_OR_NULL(helper->fb)) {
145 DRM_ERROR("failed to create drm framebuffer.\n"); 186 DRM_ERROR("failed to create drm framebuffer.\n");
146 ret = PTR_ERR(helper->fb); 187 ret = PTR_ERR(helper->fb);
147 goto out; 188 goto err_destroy_gem;
148 } 189 }
149 190
150 helper->fbdev = fbi; 191 helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
156 ret = fb_alloc_cmap(&fbi->cmap, 256, 0); 197 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
157 if (ret) { 198 if (ret) {
158 DRM_ERROR("failed to allocate cmap.\n"); 199 DRM_ERROR("failed to allocate cmap.\n");
159 goto out; 200 goto err_destroy_framebuffer;
160 } 201 }
161 202
162 ret = exynos_drm_fbdev_update(helper, helper->fb); 203 ret = exynos_drm_fbdev_update(helper, helper->fb);
163 if (ret < 0) { 204 if (ret < 0)
164 fb_dealloc_cmap(&fbi->cmap); 205 goto err_dealloc_cmap;
165 goto out; 206
166 } 207 mutex_unlock(&dev->struct_mutex);
208 return ret;
209
210err_dealloc_cmap:
211 fb_dealloc_cmap(&fbi->cmap);
212err_destroy_framebuffer:
213 drm_framebuffer_cleanup(helper->fb);
214err_destroy_gem:
215 exynos_drm_gem_destroy(exynos_gem_obj);
216err_release_framebuffer:
217 framebuffer_release(fbi);
167 218
168/* 219/*
169 * if failed, all resources allocated above would be released by 220 * if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ err_init:
265static void exynos_drm_fbdev_destroy(struct drm_device *dev, 316static void exynos_drm_fbdev_destroy(struct drm_device *dev,
266 struct drm_fb_helper *fb_helper) 317 struct drm_fb_helper *fb_helper)
267{ 318{
319 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
320 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
268 struct drm_framebuffer *fb; 321 struct drm_framebuffer *fb;
269 322
323 if (exynos_gem_obj->buffer->kvaddr)
324 vunmap(exynos_gem_obj->buffer->kvaddr);
325
270 /* release drm framebuffer and real buffer */ 326 /* release drm framebuffer and real buffer */
271 if (fb_helper->fb && fb_helper->fb->funcs) { 327 if (fb_helper->fb && fb_helper->fb->funcs) {
272 fb = fb_helper->fb; 328 fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 000000000000..61ea24296b52
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,2001 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-fimc.h"
24#include "exynos_drm_ipp.h"
25#include "exynos_drm_fimc.h"
26
27/*
28 * FIMC is stand for Fully Interactive Mobile Camera and
29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory.
32 * FIMC supports image rotation and image effect functions.
33 *
34 * M2M operation : supports crop/scale/rotation/csc so on.
35 * Memory ----> FIMC H/W ----> Memory.
36 * Writeback operation : supports cloned screen with FIMD.
37 * FIMD ----> FIMC H/W ----> Memory.
38 * Output operation : supports direct display using local path.
39 * Memory ----> FIMC H/W ----> FIMD.
40 */
41
42/*
43 * TODO
44 * 1. check suspend/resume api if needed.
45 * 2. need to check use case platform_device_id.
46 * 3. check src/dst size with, height.
47 * 4. added check_prepare api for right register.
48 * 5. need to add supported list in prop_list.
49 * 6. check prescaler/scaler optimization.
50 */
51
52#define FIMC_MAX_DEVS 4
53#define FIMC_MAX_SRC 2
54#define FIMC_MAX_DST 32
55#define FIMC_SHFACTOR 10
56#define FIMC_BUF_STOP 1
57#define FIMC_BUF_START 2
58#define FIMC_REG_SZ 32
59#define FIMC_WIDTH_ITU_709 1280
60#define FIMC_REFRESH_MAX 60
61#define FIMC_REFRESH_MIN 12
62#define FIMC_CROP_MAX 8192
63#define FIMC_CROP_MIN 32
64#define FIMC_SCALE_MAX 4224
65#define FIMC_SCALE_MIN 32
66
67#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
68#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
69 struct fimc_context, ippdrv);
70#define fimc_read(offset) readl(ctx->regs + (offset))
71#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
72
73enum fimc_wb {
74 FIMC_WB_NONE,
75 FIMC_WB_A,
76 FIMC_WB_B,
77};
78
79/*
80 * A structure of scaler.
81 *
82 * @range: narrow, wide.
83 * @bypass: unused scaler path.
84 * @up_h: horizontal scale up.
85 * @up_v: vertical scale up.
86 * @hratio: horizontal ratio.
87 * @vratio: vertical ratio.
88 */
89struct fimc_scaler {
90 bool range;
91 bool bypass;
92 bool up_h;
93 bool up_v;
94 u32 hratio;
95 u32 vratio;
96};
97
98/*
99 * A structure of scaler capability.
100 *
101 * find user manual table 43-1.
102 * @in_hori: scaler input horizontal size.
103 * @bypass: scaler bypass mode.
104 * @dst_h_wo_rot: target horizontal size without output rotation.
105 * @dst_h_rot: target horizontal size with output rotation.
106 * @rl_w_wo_rot: real width without input rotation.
107 * @rl_h_rot: real height without output rotation.
108 */
109struct fimc_capability {
110 /* scaler */
111 u32 in_hori;
112 u32 bypass;
113 /* output rotator */
114 u32 dst_h_wo_rot;
115 u32 dst_h_rot;
116 /* input rotator */
117 u32 rl_w_wo_rot;
118 u32 rl_h_rot;
119};
120
121/*
122 * A structure of fimc driver data.
123 *
124 * @parent_clk: name of parent clock.
125 */
126struct fimc_driverdata {
127 char *parent_clk;
128};
129
130/*
131 * A structure of fimc context.
132 *
133 * @ippdrv: prepare initialization using ippdrv.
134 * @regs_res: register resources.
135 * @regs: memory mapped io registers.
136 * @lock: locking of operations.
137 * @sclk_fimc_clk: fimc source clock.
138 * @fimc_clk: fimc clock.
139 * @wb_clk: writeback a clock.
140 * @wb_b_clk: writeback b clock.
141 * @sc: scaler infomations.
142 * @odr: ordering of YUV.
143 * @ver: fimc version.
144 * @pol: porarity of writeback.
145 * @id: fimc id.
146 * @irq: irq number.
147 * @suspended: qos operations.
148 */
149struct fimc_context {
150 struct exynos_drm_ippdrv ippdrv;
151 struct resource *regs_res;
152 void __iomem *regs;
153 struct mutex lock;
154 struct clk *sclk_fimc_clk;
155 struct clk *fimc_clk;
156 struct clk *wb_clk;
157 struct clk *wb_b_clk;
158 struct fimc_scaler sc;
159 struct fimc_driverdata *ddata;
160 struct exynos_drm_ipp_pol pol;
161 int id;
162 int irq;
163 bool suspended;
164};
165
166static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
167{
168 u32 cfg;
169
170 DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
171
172 cfg = fimc_read(EXYNOS_CISRCFMT);
173 cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
174 if (pattern)
175 cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
176
177 fimc_write(cfg, EXYNOS_CISRCFMT);
178
179 /* s/w reset */
180 cfg = fimc_read(EXYNOS_CIGCTRL);
181 cfg |= (EXYNOS_CIGCTRL_SWRST);
182 fimc_write(cfg, EXYNOS_CIGCTRL);
183
184 /* s/w reset complete */
185 cfg = fimc_read(EXYNOS_CIGCTRL);
186 cfg &= ~EXYNOS_CIGCTRL_SWRST;
187 fimc_write(cfg, EXYNOS_CIGCTRL);
188
189 /* reset sequence */
190 fimc_write(0x0, EXYNOS_CIFCNTSEQ);
191}
192
193static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
194{
195 u32 camblk_cfg;
196
197 DRM_DEBUG_KMS("%s\n", __func__);
198
199 camblk_cfg = readl(SYSREG_CAMERA_BLK);
200 camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
201 camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
202
203 writel(camblk_cfg, SYSREG_CAMERA_BLK);
204}
205
206static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
207{
208 u32 cfg;
209
210 DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
211
212 cfg = fimc_read(EXYNOS_CIGCTRL);
213 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
214 EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
215 EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
216 EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
217 EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
218 EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
219
220 switch (wb) {
221 case FIMC_WB_A:
222 cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
223 EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
224 break;
225 case FIMC_WB_B:
226 cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
227 EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
228 break;
229 case FIMC_WB_NONE:
230 default:
231 cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
232 EXYNOS_CIGCTRL_SELWRITEBACK_A |
233 EXYNOS_CIGCTRL_SELCAM_MIPI_A |
234 EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
235 break;
236 }
237
238 fimc_write(cfg, EXYNOS_CIGCTRL);
239}
240
241static void fimc_set_polarity(struct fimc_context *ctx,
242 struct exynos_drm_ipp_pol *pol)
243{
244 u32 cfg;
245
246 DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
247 __func__, pol->inv_pclk, pol->inv_vsync);
248 DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
249 __func__, pol->inv_href, pol->inv_hsync);
250
251 cfg = fimc_read(EXYNOS_CIGCTRL);
252 cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
253 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
254
255 if (pol->inv_pclk)
256 cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
257 if (pol->inv_vsync)
258 cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
259 if (pol->inv_href)
260 cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
261 if (pol->inv_hsync)
262 cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
263
264 fimc_write(cfg, EXYNOS_CIGCTRL);
265}
266
267static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
268{
269 u32 cfg;
270
271 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
272
273 cfg = fimc_read(EXYNOS_CIGCTRL);
274 if (enable)
275 cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
276 else
277 cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
278
279 fimc_write(cfg, EXYNOS_CIGCTRL);
280}
281
282static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
283 bool overflow, bool level)
284{
285 u32 cfg;
286
287 DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
288 enable, overflow, level);
289
290 cfg = fimc_read(EXYNOS_CIGCTRL);
291 if (enable) {
292 cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
293 cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
294 if (overflow)
295 cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
296 if (level)
297 cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
298 } else
299 cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
300
301 fimc_write(cfg, EXYNOS_CIGCTRL);
302}
303
304static void fimc_clear_irq(struct fimc_context *ctx)
305{
306 u32 cfg;
307
308 DRM_DEBUG_KMS("%s\n", __func__);
309
310 cfg = fimc_read(EXYNOS_CIGCTRL);
311 cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
312 fimc_write(cfg, EXYNOS_CIGCTRL);
313}
314
315static bool fimc_check_ovf(struct fimc_context *ctx)
316{
317 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
318 u32 cfg, status, flag;
319
320 status = fimc_read(EXYNOS_CISTATUS);
321 flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
322 EXYNOS_CISTATUS_OVFICR;
323
324 DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
325
326 if (status & flag) {
327 cfg = fimc_read(EXYNOS_CIWDOFST);
328 cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
329 EXYNOS_CIWDOFST_CLROVFICR);
330
331 fimc_write(cfg, EXYNOS_CIWDOFST);
332
333 cfg = fimc_read(EXYNOS_CIWDOFST);
334 cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
335 EXYNOS_CIWDOFST_CLROVFICR);
336
337 fimc_write(cfg, EXYNOS_CIWDOFST);
338
339 dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
340 ctx->id, status);
341 return true;
342 }
343
344 return false;
345}
346
347static bool fimc_check_frame_end(struct fimc_context *ctx)
348{
349 u32 cfg;
350
351 cfg = fimc_read(EXYNOS_CISTATUS);
352
353 DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
354
355 if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
356 return false;
357
358 cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
359 fimc_write(cfg, EXYNOS_CISTATUS);
360
361 return true;
362}
363
364static int fimc_get_buf_id(struct fimc_context *ctx)
365{
366 u32 cfg;
367 int frame_cnt, buf_id;
368
369 DRM_DEBUG_KMS("%s\n", __func__);
370
371 cfg = fimc_read(EXYNOS_CISTATUS2);
372 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
373
374 if (frame_cnt == 0)
375 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
376
377 DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
378 EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
379 EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
380
381 if (frame_cnt == 0) {
382 DRM_ERROR("failed to get frame count.\n");
383 return -EIO;
384 }
385
386 buf_id = frame_cnt - 1;
387 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
388
389 return buf_id;
390}
391
392static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
393{
394 u32 cfg;
395
396 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
397
398 cfg = fimc_read(EXYNOS_CIOCTRL);
399 if (enable)
400 cfg |= EXYNOS_CIOCTRL_LASTENDEN;
401 else
402 cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
403
404 fimc_write(cfg, EXYNOS_CIOCTRL);
405}
406
407
408static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
409{
410 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
411 u32 cfg;
412
413 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
414
415 /* RGB */
416 cfg = fimc_read(EXYNOS_CISCCTRL);
417 cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
418
419 switch (fmt) {
420 case DRM_FORMAT_RGB565:
421 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
422 fimc_write(cfg, EXYNOS_CISCCTRL);
423 return 0;
424 case DRM_FORMAT_RGB888:
425 case DRM_FORMAT_XRGB8888:
426 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
427 fimc_write(cfg, EXYNOS_CISCCTRL);
428 return 0;
429 default:
430 /* bypass */
431 break;
432 }
433
434 /* YUV */
435 cfg = fimc_read(EXYNOS_MSCTRL);
436 cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
437 EXYNOS_MSCTRL_C_INT_IN_2PLANE |
438 EXYNOS_MSCTRL_ORDER422_YCBYCR);
439
440 switch (fmt) {
441 case DRM_FORMAT_YUYV:
442 cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
443 break;
444 case DRM_FORMAT_YVYU:
445 cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
446 break;
447 case DRM_FORMAT_UYVY:
448 cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
449 break;
450 case DRM_FORMAT_VYUY:
451 case DRM_FORMAT_YUV444:
452 cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
453 break;
454 case DRM_FORMAT_NV21:
455 case DRM_FORMAT_NV61:
456 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
457 EXYNOS_MSCTRL_C_INT_IN_2PLANE);
458 break;
459 case DRM_FORMAT_YUV422:
460 case DRM_FORMAT_YUV420:
461 case DRM_FORMAT_YVU420:
462 cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
463 break;
464 case DRM_FORMAT_NV12:
465 case DRM_FORMAT_NV12MT:
466 case DRM_FORMAT_NV16:
467 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
468 EXYNOS_MSCTRL_C_INT_IN_2PLANE);
469 break;
470 default:
471 dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
472 return -EINVAL;
473 }
474
475 fimc_write(cfg, EXYNOS_MSCTRL);
476
477 return 0;
478}
479
480static int fimc_src_set_fmt(struct device *dev, u32 fmt)
481{
482 struct fimc_context *ctx = get_fimc_context(dev);
483 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
484 u32 cfg;
485
486 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
487
488 cfg = fimc_read(EXYNOS_MSCTRL);
489 cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
490
491 switch (fmt) {
492 case DRM_FORMAT_RGB565:
493 case DRM_FORMAT_RGB888:
494 case DRM_FORMAT_XRGB8888:
495 cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
496 break;
497 case DRM_FORMAT_YUV444:
498 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
499 break;
500 case DRM_FORMAT_YUYV:
501 case DRM_FORMAT_YVYU:
502 case DRM_FORMAT_UYVY:
503 case DRM_FORMAT_VYUY:
504 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
505 break;
506 case DRM_FORMAT_NV16:
507 case DRM_FORMAT_NV61:
508 case DRM_FORMAT_YUV422:
509 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
510 break;
511 case DRM_FORMAT_YUV420:
512 case DRM_FORMAT_YVU420:
513 case DRM_FORMAT_NV12:
514 case DRM_FORMAT_NV21:
515 case DRM_FORMAT_NV12MT:
516 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
517 break;
518 default:
519 dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
520 return -EINVAL;
521 }
522
523 fimc_write(cfg, EXYNOS_MSCTRL);
524
525 cfg = fimc_read(EXYNOS_CIDMAPARAM);
526 cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
527
528 if (fmt == DRM_FORMAT_NV12MT)
529 cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
530 else
531 cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
532
533 fimc_write(cfg, EXYNOS_CIDMAPARAM);
534
535 return fimc_src_set_fmt_order(ctx, fmt);
536}
537
538static int fimc_src_set_transf(struct device *dev,
539 enum drm_exynos_degree degree,
540 enum drm_exynos_flip flip, bool *swap)
541{
542 struct fimc_context *ctx = get_fimc_context(dev);
543 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
544 u32 cfg1, cfg2;
545
546 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
547 degree, flip);
548
549 cfg1 = fimc_read(EXYNOS_MSCTRL);
550 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
551 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
552
553 cfg2 = fimc_read(EXYNOS_CITRGFMT);
554 cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
555
556 switch (degree) {
557 case EXYNOS_DRM_DEGREE_0:
558 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
559 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
560 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
561 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
562 break;
563 case EXYNOS_DRM_DEGREE_90:
564 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
565 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
566 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
567 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
568 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
569 break;
570 case EXYNOS_DRM_DEGREE_180:
571 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
572 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
573 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
574 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
575 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
576 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
577 break;
578 case EXYNOS_DRM_DEGREE_270:
579 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
580 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
581 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
582 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
583 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
584 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
585 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
586 break;
587 default:
588 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
589 return -EINVAL;
590 }
591
592 fimc_write(cfg1, EXYNOS_MSCTRL);
593 fimc_write(cfg2, EXYNOS_CITRGFMT);
594 *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
595
596 return 0;
597}
598
599static int fimc_set_window(struct fimc_context *ctx,
600 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
601{
602 u32 cfg, h1, h2, v1, v2;
603
604 /* cropped image */
605 h1 = pos->x;
606 h2 = sz->hsize - pos->w - pos->x;
607 v1 = pos->y;
608 v2 = sz->vsize - pos->h - pos->y;
609
610 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
611 __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
612 DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
613 h1, h2, v1, v2);
614
615 /*
616 * set window offset 1, 2 size
617 * check figure 43-21 in user manual
618 */
619 cfg = fimc_read(EXYNOS_CIWDOFST);
620 cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
621 EXYNOS_CIWDOFST_WINVEROFST_MASK);
622 cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
623 EXYNOS_CIWDOFST_WINVEROFST(v1));
624 cfg |= EXYNOS_CIWDOFST_WINOFSEN;
625 fimc_write(cfg, EXYNOS_CIWDOFST);
626
627 cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
628 EXYNOS_CIWDOFST2_WINVEROFST2(v2));
629 fimc_write(cfg, EXYNOS_CIWDOFST2);
630
631 return 0;
632}
633
634static int fimc_src_set_size(struct device *dev, int swap,
635 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
636{
637 struct fimc_context *ctx = get_fimc_context(dev);
638 struct drm_exynos_pos img_pos = *pos;
639 struct drm_exynos_sz img_sz = *sz;
640 u32 cfg;
641
642 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
643 __func__, swap, sz->hsize, sz->vsize);
644
645 /* original size */
646 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
647 EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
648
649 fimc_write(cfg, EXYNOS_ORGISIZE);
650
651 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
652 pos->x, pos->y, pos->w, pos->h);
653
654 if (swap) {
655 img_pos.w = pos->h;
656 img_pos.h = pos->w;
657 img_sz.hsize = sz->vsize;
658 img_sz.vsize = sz->hsize;
659 }
660
661 /* set input DMA image size */
662 cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
663 cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
664 EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
665 cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
666 EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
667 fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
668
669 /*
670 * set input FIFO image size
671 * for now, we support only ITU601 8 bit mode
672 */
673 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
674 EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
675 EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
676 fimc_write(cfg, EXYNOS_CISRCFMT);
677
678 /* offset Y(RGB), Cb, Cr */
679 cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
680 EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
681 fimc_write(cfg, EXYNOS_CIIYOFF);
682 cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
683 EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
684 fimc_write(cfg, EXYNOS_CIICBOFF);
685 cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
686 EXYNOS_CIICROFF_VERTICAL(img_pos.y));
687 fimc_write(cfg, EXYNOS_CIICROFF);
688
689 return fimc_set_window(ctx, &img_pos, &img_sz);
690}
691
692static int fimc_src_set_addr(struct device *dev,
693 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
694 enum drm_exynos_ipp_buf_type buf_type)
695{
696 struct fimc_context *ctx = get_fimc_context(dev);
697 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
698 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
699 struct drm_exynos_ipp_property *property;
700 struct drm_exynos_ipp_config *config;
701
702 if (!c_node) {
703 DRM_ERROR("failed to get c_node.\n");
704 return -EINVAL;
705 }
706
707 property = &c_node->property;
708 if (!property) {
709 DRM_ERROR("failed to get property.\n");
710 return -EINVAL;
711 }
712
713 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
714 property->prop_id, buf_id, buf_type);
715
716 if (buf_id > FIMC_MAX_SRC) {
717 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
718 return -ENOMEM;
719 }
720
721 /* address register set */
722 switch (buf_type) {
723 case IPP_BUF_ENQUEUE:
724 config = &property->config[EXYNOS_DRM_OPS_SRC];
725 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
726 EXYNOS_CIIYSA(buf_id));
727
728 if (config->fmt == DRM_FORMAT_YVU420) {
729 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
730 EXYNOS_CIICBSA(buf_id));
731 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
732 EXYNOS_CIICRSA(buf_id));
733 } else {
734 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
735 EXYNOS_CIICBSA(buf_id));
736 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
737 EXYNOS_CIICRSA(buf_id));
738 }
739 break;
740 case IPP_BUF_DEQUEUE:
741 fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
742 fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
743 fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
744 break;
745 default:
746 /* bypass */
747 break;
748 }
749
750 return 0;
751}
752
753static struct exynos_drm_ipp_ops fimc_src_ops = {
754 .set_fmt = fimc_src_set_fmt,
755 .set_transf = fimc_src_set_transf,
756 .set_size = fimc_src_set_size,
757 .set_addr = fimc_src_set_addr,
758};
759
760static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
761{
762 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
763 u32 cfg;
764
765 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
766
767 /* RGB */
768 cfg = fimc_read(EXYNOS_CISCCTRL);
769 cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
770
771 switch (fmt) {
772 case DRM_FORMAT_RGB565:
773 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
774 fimc_write(cfg, EXYNOS_CISCCTRL);
775 return 0;
776 case DRM_FORMAT_RGB888:
777 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
778 fimc_write(cfg, EXYNOS_CISCCTRL);
779 return 0;
780 case DRM_FORMAT_XRGB8888:
781 cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
782 EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
783 fimc_write(cfg, EXYNOS_CISCCTRL);
784 break;
785 default:
786 /* bypass */
787 break;
788 }
789
790 /* YUV */
791 cfg = fimc_read(EXYNOS_CIOCTRL);
792 cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
793 EXYNOS_CIOCTRL_ORDER422_MASK |
794 EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
795
796 switch (fmt) {
797 case DRM_FORMAT_XRGB8888:
798 cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
799 break;
800 case DRM_FORMAT_YUYV:
801 cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
802 break;
803 case DRM_FORMAT_YVYU:
804 cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
805 break;
806 case DRM_FORMAT_UYVY:
807 cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
808 break;
809 case DRM_FORMAT_VYUY:
810 cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
811 break;
812 case DRM_FORMAT_NV21:
813 case DRM_FORMAT_NV61:
814 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
815 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
816 break;
817 case DRM_FORMAT_YUV422:
818 case DRM_FORMAT_YUV420:
819 case DRM_FORMAT_YVU420:
820 cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
821 break;
822 case DRM_FORMAT_NV12:
823 case DRM_FORMAT_NV12MT:
824 case DRM_FORMAT_NV16:
825 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
826 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
827 break;
828 default:
829 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
830 return -EINVAL;
831 }
832
833 fimc_write(cfg, EXYNOS_CIOCTRL);
834
835 return 0;
836}
837
838static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
839{
840 struct fimc_context *ctx = get_fimc_context(dev);
841 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
842 u32 cfg;
843
844 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
845
846 cfg = fimc_read(EXYNOS_CIEXTEN);
847
848 if (fmt == DRM_FORMAT_AYUV) {
849 cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
850 fimc_write(cfg, EXYNOS_CIEXTEN);
851 } else {
852 cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
853 fimc_write(cfg, EXYNOS_CIEXTEN);
854
855 cfg = fimc_read(EXYNOS_CITRGFMT);
856 cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
857
858 switch (fmt) {
859 case DRM_FORMAT_RGB565:
860 case DRM_FORMAT_RGB888:
861 case DRM_FORMAT_XRGB8888:
862 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
863 break;
864 case DRM_FORMAT_YUYV:
865 case DRM_FORMAT_YVYU:
866 case DRM_FORMAT_UYVY:
867 case DRM_FORMAT_VYUY:
868 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
869 break;
870 case DRM_FORMAT_NV16:
871 case DRM_FORMAT_NV61:
872 case DRM_FORMAT_YUV422:
873 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
874 break;
875 case DRM_FORMAT_YUV420:
876 case DRM_FORMAT_YVU420:
877 case DRM_FORMAT_NV12:
878 case DRM_FORMAT_NV12MT:
879 case DRM_FORMAT_NV21:
880 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
881 break;
882 default:
883 dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
884 fmt);
885 return -EINVAL;
886 }
887
888 fimc_write(cfg, EXYNOS_CITRGFMT);
889 }
890
891 cfg = fimc_read(EXYNOS_CIDMAPARAM);
892 cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
893
894 if (fmt == DRM_FORMAT_NV12MT)
895 cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
896 else
897 cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
898
899 fimc_write(cfg, EXYNOS_CIDMAPARAM);
900
901 return fimc_dst_set_fmt_order(ctx, fmt);
902}
903
904static int fimc_dst_set_transf(struct device *dev,
905 enum drm_exynos_degree degree,
906 enum drm_exynos_flip flip, bool *swap)
907{
908 struct fimc_context *ctx = get_fimc_context(dev);
909 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
910 u32 cfg;
911
912 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
913 degree, flip);
914
915 cfg = fimc_read(EXYNOS_CITRGFMT);
916 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
917 cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
918
919 switch (degree) {
920 case EXYNOS_DRM_DEGREE_0:
921 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
922 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
923 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
924 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
925 break;
926 case EXYNOS_DRM_DEGREE_90:
927 cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
928 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
929 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
930 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
931 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
932 break;
933 case EXYNOS_DRM_DEGREE_180:
934 cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
935 EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
936 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
937 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
938 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
939 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
940 break;
941 case EXYNOS_DRM_DEGREE_270:
942 cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
943 EXYNOS_CITRGFMT_FLIP_X_MIRROR |
944 EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
945 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
946 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
947 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
948 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
949 break;
950 default:
951 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
952 return -EINVAL;
953 }
954
955 fimc_write(cfg, EXYNOS_CITRGFMT);
956 *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
957
958 return 0;
959}
960
961static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
962{
963 DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
964
965 if (src >= dst * 64) {
966 DRM_ERROR("failed to make ratio and shift.\n");
967 return -EINVAL;
968 } else if (src >= dst * 32) {
969 *ratio = 32;
970 *shift = 5;
971 } else if (src >= dst * 16) {
972 *ratio = 16;
973 *shift = 4;
974 } else if (src >= dst * 8) {
975 *ratio = 8;
976 *shift = 3;
977 } else if (src >= dst * 4) {
978 *ratio = 4;
979 *shift = 2;
980 } else if (src >= dst * 2) {
981 *ratio = 2;
982 *shift = 1;
983 } else {
984 *ratio = 1;
985 *shift = 0;
986 }
987
988 return 0;
989}
990
991static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
992 struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
993{
994 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
995 u32 cfg, cfg_ext, shfactor;
996 u32 pre_dst_width, pre_dst_height;
997 u32 pre_hratio, hfactor, pre_vratio, vfactor;
998 int ret = 0;
999 u32 src_w, src_h, dst_w, dst_h;
1000
1001 cfg_ext = fimc_read(EXYNOS_CITRGFMT);
1002 if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
1003 src_w = src->h;
1004 src_h = src->w;
1005 } else {
1006 src_w = src->w;
1007 src_h = src->h;
1008 }
1009
1010 if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
1011 dst_w = dst->h;
1012 dst_h = dst->w;
1013 } else {
1014 dst_w = dst->w;
1015 dst_h = dst->h;
1016 }
1017
1018 ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
1019 if (ret) {
1020 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
1021 return ret;
1022 }
1023
1024 ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
1025 if (ret) {
1026 dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
1027 return ret;
1028 }
1029
1030 pre_dst_width = src_w / pre_hratio;
1031 pre_dst_height = src_h / pre_vratio;
1032 DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
1033 pre_dst_width, pre_dst_height);
1034 DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
1035 __func__, pre_hratio, hfactor, pre_vratio, vfactor);
1036
1037 sc->hratio = (src_w << 14) / (dst_w << hfactor);
1038 sc->vratio = (src_h << 14) / (dst_h << vfactor);
1039 sc->up_h = (dst_w >= src_w) ? true : false;
1040 sc->up_v = (dst_h >= src_h) ? true : false;
1041 DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
1042 __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
1043
1044 shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
1045 DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
1046
1047 cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
1048 EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
1049 EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
1050 fimc_write(cfg, EXYNOS_CISCPRERATIO);
1051
1052 cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
1053 EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
1054 fimc_write(cfg, EXYNOS_CISCPREDST);
1055
1056 return ret;
1057}
1058
1059static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
1060{
1061 u32 cfg, cfg_ext;
1062
1063 DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
1064 __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
1065 DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
1066 __func__, sc->hratio, sc->vratio);
1067
1068 cfg = fimc_read(EXYNOS_CISCCTRL);
1069 cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
1070 EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
1071 EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
1072 EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
1073 EXYNOS_CISCCTRL_CSCR2Y_WIDE |
1074 EXYNOS_CISCCTRL_CSCY2R_WIDE);
1075
1076 if (sc->range)
1077 cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
1078 EXYNOS_CISCCTRL_CSCY2R_WIDE);
1079 if (sc->bypass)
1080 cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
1081 if (sc->up_h)
1082 cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
1083 if (sc->up_v)
1084 cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
1085
1086 cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
1087 EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
1088 fimc_write(cfg, EXYNOS_CISCCTRL);
1089
1090 cfg_ext = fimc_read(EXYNOS_CIEXTEN);
1091 cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
1092 cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
1093 cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
1094 EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
1095 fimc_write(cfg_ext, EXYNOS_CIEXTEN);
1096}
1097
1098static int fimc_dst_set_size(struct device *dev, int swap,
1099 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1100{
1101 struct fimc_context *ctx = get_fimc_context(dev);
1102 struct drm_exynos_pos img_pos = *pos;
1103 struct drm_exynos_sz img_sz = *sz;
1104 u32 cfg;
1105
1106 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
1107 __func__, swap, sz->hsize, sz->vsize);
1108
1109 /* original size */
1110 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
1111 EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
1112
1113 fimc_write(cfg, EXYNOS_ORGOSIZE);
1114
1115 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
1116 __func__, pos->x, pos->y, pos->w, pos->h);
1117
1118 /* CSC ITU */
1119 cfg = fimc_read(EXYNOS_CIGCTRL);
1120 cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
1121
1122 if (sz->hsize >= FIMC_WIDTH_ITU_709)
1123 cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
1124 else
1125 cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
1126
1127 fimc_write(cfg, EXYNOS_CIGCTRL);
1128
1129 if (swap) {
1130 img_pos.w = pos->h;
1131 img_pos.h = pos->w;
1132 img_sz.hsize = sz->vsize;
1133 img_sz.vsize = sz->hsize;
1134 }
1135
1136 /* target image size */
1137 cfg = fimc_read(EXYNOS_CITRGFMT);
1138 cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
1139 EXYNOS_CITRGFMT_TARGETV_MASK);
1140 cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
1141 EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
1142 fimc_write(cfg, EXYNOS_CITRGFMT);
1143
1144 /* target area */
1145 cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
1146 fimc_write(cfg, EXYNOS_CITAREA);
1147
1148 /* offset Y(RGB), Cb, Cr */
1149 cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
1150 EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
1151 fimc_write(cfg, EXYNOS_CIOYOFF);
1152 cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
1153 EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
1154 fimc_write(cfg, EXYNOS_CIOCBOFF);
1155 cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
1156 EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
1157 fimc_write(cfg, EXYNOS_CIOCROFF);
1158
1159 return 0;
1160}
1161
1162static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
1163{
1164 u32 cfg, i, buf_num = 0;
1165 u32 mask = 0x00000001;
1166
1167 cfg = fimc_read(EXYNOS_CIFCNTSEQ);
1168
1169 for (i = 0; i < FIMC_REG_SZ; i++)
1170 if (cfg & (mask << i))
1171 buf_num++;
1172
1173 DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
1174
1175 return buf_num;
1176}
1177
1178static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1179 enum drm_exynos_ipp_buf_type buf_type)
1180{
1181 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1182 bool enable;
1183 u32 cfg;
1184 u32 mask = 0x00000001 << buf_id;
1185 int ret = 0;
1186
1187 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
1188 buf_id, buf_type);
1189
1190 mutex_lock(&ctx->lock);
1191
1192 /* mask register set */
1193 cfg = fimc_read(EXYNOS_CIFCNTSEQ);
1194
1195 switch (buf_type) {
1196 case IPP_BUF_ENQUEUE:
1197 enable = true;
1198 break;
1199 case IPP_BUF_DEQUEUE:
1200 enable = false;
1201 break;
1202 default:
1203 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1204 ret = -EINVAL;
1205 goto err_unlock;
1206 }
1207
1208 /* sequence id */
1209 cfg &= (~mask);
1210 cfg |= (enable << buf_id);
1211 fimc_write(cfg, EXYNOS_CIFCNTSEQ);
1212
1213 /* interrupt enable */
1214 if (buf_type == IPP_BUF_ENQUEUE &&
1215 fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
1216 fimc_handle_irq(ctx, true, false, true);
1217
1218 /* interrupt disable */
1219 if (buf_type == IPP_BUF_DEQUEUE &&
1220 fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
1221 fimc_handle_irq(ctx, false, false, true);
1222
1223err_unlock:
1224 mutex_unlock(&ctx->lock);
1225 return ret;
1226}
1227
1228static int fimc_dst_set_addr(struct device *dev,
1229 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1230 enum drm_exynos_ipp_buf_type buf_type)
1231{
1232 struct fimc_context *ctx = get_fimc_context(dev);
1233 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1234 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1235 struct drm_exynos_ipp_property *property;
1236 struct drm_exynos_ipp_config *config;
1237
1238 if (!c_node) {
1239 DRM_ERROR("failed to get c_node.\n");
1240 return -EINVAL;
1241 }
1242
1243 property = &c_node->property;
1244 if (!property) {
1245 DRM_ERROR("failed to get property.\n");
1246 return -EINVAL;
1247 }
1248
1249 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1250 property->prop_id, buf_id, buf_type);
1251
1252 if (buf_id > FIMC_MAX_DST) {
1253 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1254 return -ENOMEM;
1255 }
1256
1257 /* address register set */
1258 switch (buf_type) {
1259 case IPP_BUF_ENQUEUE:
1260 config = &property->config[EXYNOS_DRM_OPS_DST];
1261
1262 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1263 EXYNOS_CIOYSA(buf_id));
1264
1265 if (config->fmt == DRM_FORMAT_YVU420) {
1266 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1267 EXYNOS_CIOCBSA(buf_id));
1268 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1269 EXYNOS_CIOCRSA(buf_id));
1270 } else {
1271 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1272 EXYNOS_CIOCBSA(buf_id));
1273 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1274 EXYNOS_CIOCRSA(buf_id));
1275 }
1276 break;
1277 case IPP_BUF_DEQUEUE:
1278 fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
1279 fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
1280 fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
1281 break;
1282 default:
1283 /* bypass */
1284 break;
1285 }
1286
1287 return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
1288}
1289
1290static struct exynos_drm_ipp_ops fimc_dst_ops = {
1291 .set_fmt = fimc_dst_set_fmt,
1292 .set_transf = fimc_dst_set_transf,
1293 .set_size = fimc_dst_set_size,
1294 .set_addr = fimc_dst_set_addr,
1295};
1296
1297static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1298{
1299 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1300
1301 if (enable) {
1302 clk_enable(ctx->sclk_fimc_clk);
1303 clk_enable(ctx->fimc_clk);
1304 clk_enable(ctx->wb_clk);
1305 ctx->suspended = false;
1306 } else {
1307 clk_disable(ctx->sclk_fimc_clk);
1308 clk_disable(ctx->fimc_clk);
1309 clk_disable(ctx->wb_clk);
1310 ctx->suspended = true;
1311 }
1312
1313 return 0;
1314}
1315
1316static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1317{
1318 struct fimc_context *ctx = dev_id;
1319 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1320 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1321 struct drm_exynos_ipp_event_work *event_work =
1322 c_node->event_work;
1323 int buf_id;
1324
1325 DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
1326
1327 fimc_clear_irq(ctx);
1328 if (fimc_check_ovf(ctx))
1329 return IRQ_NONE;
1330
1331 if (!fimc_check_frame_end(ctx))
1332 return IRQ_NONE;
1333
1334 buf_id = fimc_get_buf_id(ctx);
1335 if (buf_id < 0)
1336 return IRQ_HANDLED;
1337
1338 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
1339
1340 if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
1341 DRM_ERROR("failed to dequeue.\n");
1342 return IRQ_HANDLED;
1343 }
1344
1345 event_work->ippdrv = ippdrv;
1346 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
1347 queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
1348
1349 return IRQ_HANDLED;
1350}
1351
1352static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1353{
1354 struct drm_exynos_ipp_prop_list *prop_list;
1355
1356 DRM_DEBUG_KMS("%s\n", __func__);
1357
1358 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1359 if (!prop_list) {
1360 DRM_ERROR("failed to alloc property list.\n");
1361 return -ENOMEM;
1362 }
1363
1364 prop_list->version = 1;
1365 prop_list->writeback = 1;
1366 prop_list->refresh_min = FIMC_REFRESH_MIN;
1367 prop_list->refresh_max = FIMC_REFRESH_MAX;
1368 prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
1369 (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1370 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1371 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1372 (1 << EXYNOS_DRM_DEGREE_90) |
1373 (1 << EXYNOS_DRM_DEGREE_180) |
1374 (1 << EXYNOS_DRM_DEGREE_270);
1375 prop_list->csc = 1;
1376 prop_list->crop = 1;
1377 prop_list->crop_max.hsize = FIMC_CROP_MAX;
1378 prop_list->crop_max.vsize = FIMC_CROP_MAX;
1379 prop_list->crop_min.hsize = FIMC_CROP_MIN;
1380 prop_list->crop_min.vsize = FIMC_CROP_MIN;
1381 prop_list->scale = 1;
1382 prop_list->scale_max.hsize = FIMC_SCALE_MAX;
1383 prop_list->scale_max.vsize = FIMC_SCALE_MAX;
1384 prop_list->scale_min.hsize = FIMC_SCALE_MIN;
1385 prop_list->scale_min.vsize = FIMC_SCALE_MIN;
1386
1387 ippdrv->prop_list = prop_list;
1388
1389 return 0;
1390}
1391
1392static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
1393{
1394 switch (flip) {
1395 case EXYNOS_DRM_FLIP_NONE:
1396 case EXYNOS_DRM_FLIP_VERTICAL:
1397 case EXYNOS_DRM_FLIP_HORIZONTAL:
1398 return true;
1399 default:
1400 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
1401 return false;
1402 }
1403}
1404
1405static int fimc_ippdrv_check_property(struct device *dev,
1406 struct drm_exynos_ipp_property *property)
1407{
1408 struct fimc_context *ctx = get_fimc_context(dev);
1409 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1410 struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
1411 struct drm_exynos_ipp_config *config;
1412 struct drm_exynos_pos *pos;
1413 struct drm_exynos_sz *sz;
1414 bool swap;
1415 int i;
1416
1417 DRM_DEBUG_KMS("%s\n", __func__);
1418
1419 for_each_ipp_ops(i) {
1420 if ((i == EXYNOS_DRM_OPS_SRC) &&
1421 (property->cmd == IPP_CMD_WB))
1422 continue;
1423
1424 config = &property->config[i];
1425 pos = &config->pos;
1426 sz = &config->sz;
1427
1428 /* check for flip */
1429 if (!fimc_check_drm_flip(config->flip)) {
1430 DRM_ERROR("invalid flip.\n");
1431 goto err_property;
1432 }
1433
1434 /* check for degree */
1435 switch (config->degree) {
1436 case EXYNOS_DRM_DEGREE_90:
1437 case EXYNOS_DRM_DEGREE_270:
1438 swap = true;
1439 break;
1440 case EXYNOS_DRM_DEGREE_0:
1441 case EXYNOS_DRM_DEGREE_180:
1442 swap = false;
1443 break;
1444 default:
1445 DRM_ERROR("invalid degree.\n");
1446 goto err_property;
1447 }
1448
1449 /* check for buffer bound */
1450 if ((pos->x + pos->w > sz->hsize) ||
1451 (pos->y + pos->h > sz->vsize)) {
1452 DRM_ERROR("out of buf bound.\n");
1453 goto err_property;
1454 }
1455
1456 /* check for crop */
1457 if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1458 if (swap) {
1459 if ((pos->h < pp->crop_min.hsize) ||
1460 (sz->vsize > pp->crop_max.hsize) ||
1461 (pos->w < pp->crop_min.vsize) ||
1462 (sz->hsize > pp->crop_max.vsize)) {
1463 DRM_ERROR("out of crop size.\n");
1464 goto err_property;
1465 }
1466 } else {
1467 if ((pos->w < pp->crop_min.hsize) ||
1468 (sz->hsize > pp->crop_max.hsize) ||
1469 (pos->h < pp->crop_min.vsize) ||
1470 (sz->vsize > pp->crop_max.vsize)) {
1471 DRM_ERROR("out of crop size.\n");
1472 goto err_property;
1473 }
1474 }
1475 }
1476
1477 /* check for scale */
1478 if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1479 if (swap) {
1480 if ((pos->h < pp->scale_min.hsize) ||
1481 (sz->vsize > pp->scale_max.hsize) ||
1482 (pos->w < pp->scale_min.vsize) ||
1483 (sz->hsize > pp->scale_max.vsize)) {
1484 DRM_ERROR("out of scale size.\n");
1485 goto err_property;
1486 }
1487 } else {
1488 if ((pos->w < pp->scale_min.hsize) ||
1489 (sz->hsize > pp->scale_max.hsize) ||
1490 (pos->h < pp->scale_min.vsize) ||
1491 (sz->vsize > pp->scale_max.vsize)) {
1492 DRM_ERROR("out of scale size.\n");
1493 goto err_property;
1494 }
1495 }
1496 }
1497 }
1498
1499 return 0;
1500
1501err_property:
1502 for_each_ipp_ops(i) {
1503 if ((i == EXYNOS_DRM_OPS_SRC) &&
1504 (property->cmd == IPP_CMD_WB))
1505 continue;
1506
1507 config = &property->config[i];
1508 pos = &config->pos;
1509 sz = &config->sz;
1510
1511 DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1512 i ? "dst" : "src", config->flip, config->degree,
1513 pos->x, pos->y, pos->w, pos->h,
1514 sz->hsize, sz->vsize);
1515 }
1516
1517 return -EINVAL;
1518}
1519
1520static void fimc_clear_addr(struct fimc_context *ctx)
1521{
1522 int i;
1523
1524 DRM_DEBUG_KMS("%s:\n", __func__);
1525
1526 for (i = 0; i < FIMC_MAX_SRC; i++) {
1527 fimc_write(0, EXYNOS_CIIYSA(i));
1528 fimc_write(0, EXYNOS_CIICBSA(i));
1529 fimc_write(0, EXYNOS_CIICRSA(i));
1530 }
1531
1532 for (i = 0; i < FIMC_MAX_DST; i++) {
1533 fimc_write(0, EXYNOS_CIOYSA(i));
1534 fimc_write(0, EXYNOS_CIOCBSA(i));
1535 fimc_write(0, EXYNOS_CIOCRSA(i));
1536 }
1537}
1538
1539static int fimc_ippdrv_reset(struct device *dev)
1540{
1541 struct fimc_context *ctx = get_fimc_context(dev);
1542
1543 DRM_DEBUG_KMS("%s\n", __func__);
1544
1545 /* reset h/w block */
1546 fimc_sw_reset(ctx, false);
1547
1548 /* reset scaler capability */
1549 memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1550
1551 fimc_clear_addr(ctx);
1552
1553 return 0;
1554}
1555
1556static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1557{
1558 struct fimc_context *ctx = get_fimc_context(dev);
1559 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1560 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1561 struct drm_exynos_ipp_property *property;
1562 struct drm_exynos_ipp_config *config;
1563 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
1564 struct drm_exynos_ipp_set_wb set_wb;
1565 int ret, i;
1566 u32 cfg0, cfg1;
1567
1568 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1569
1570 if (!c_node) {
1571 DRM_ERROR("failed to get c_node.\n");
1572 return -EINVAL;
1573 }
1574
1575 property = &c_node->property;
1576 if (!property) {
1577 DRM_ERROR("failed to get property.\n");
1578 return -EINVAL;
1579 }
1580
1581 fimc_handle_irq(ctx, true, false, true);
1582
1583 for_each_ipp_ops(i) {
1584 config = &property->config[i];
1585 img_pos[i] = config->pos;
1586 }
1587
1588 ret = fimc_set_prescaler(ctx, &ctx->sc,
1589 &img_pos[EXYNOS_DRM_OPS_SRC],
1590 &img_pos[EXYNOS_DRM_OPS_DST]);
1591 if (ret) {
1592 dev_err(dev, "failed to set precalser.\n");
1593 return ret;
1594 }
1595
1596 /* If set ture, we can save jpeg about screen */
1597 fimc_handle_jpeg(ctx, false);
1598 fimc_set_scaler(ctx, &ctx->sc);
1599 fimc_set_polarity(ctx, &ctx->pol);
1600
1601 switch (cmd) {
1602 case IPP_CMD_M2M:
1603 fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
1604 fimc_handle_lastend(ctx, false);
1605
1606 /* setup dma */
1607 cfg0 = fimc_read(EXYNOS_MSCTRL);
1608 cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
1609 cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
1610 fimc_write(cfg0, EXYNOS_MSCTRL);
1611 break;
1612 case IPP_CMD_WB:
1613 fimc_set_type_ctrl(ctx, FIMC_WB_A);
1614 fimc_handle_lastend(ctx, true);
1615
1616 /* setup FIMD */
1617 fimc_set_camblk_fimd0_wb(ctx);
1618
1619 set_wb.enable = 1;
1620 set_wb.refresh = property->refresh_rate;
1621 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1622 break;
1623 case IPP_CMD_OUTPUT:
1624 default:
1625 ret = -EINVAL;
1626 dev_err(dev, "invalid operations.\n");
1627 return ret;
1628 }
1629
1630 /* Reset status */
1631 fimc_write(0x0, EXYNOS_CISTATUS);
1632
1633 cfg0 = fimc_read(EXYNOS_CIIMGCPT);
1634 cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
1635 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
1636
1637 /* Scaler */
1638 cfg1 = fimc_read(EXYNOS_CISCCTRL);
1639 cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
1640 cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
1641 EXYNOS_CISCCTRL_SCALERSTART);
1642
1643 fimc_write(cfg1, EXYNOS_CISCCTRL);
1644
1645 /* Enable image capture*/
1646 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
1647 fimc_write(cfg0, EXYNOS_CIIMGCPT);
1648
1649 /* Disable frame end irq */
1650 cfg0 = fimc_read(EXYNOS_CIGCTRL);
1651 cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
1652 fimc_write(cfg0, EXYNOS_CIGCTRL);
1653
1654 cfg0 = fimc_read(EXYNOS_CIOCTRL);
1655 cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
1656 fimc_write(cfg0, EXYNOS_CIOCTRL);
1657
1658 if (cmd == IPP_CMD_M2M) {
1659 cfg0 = fimc_read(EXYNOS_MSCTRL);
1660 cfg0 |= EXYNOS_MSCTRL_ENVID;
1661 fimc_write(cfg0, EXYNOS_MSCTRL);
1662
1663 cfg0 = fimc_read(EXYNOS_MSCTRL);
1664 cfg0 |= EXYNOS_MSCTRL_ENVID;
1665 fimc_write(cfg0, EXYNOS_MSCTRL);
1666 }
1667
1668 return 0;
1669}
1670
1671static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1672{
1673 struct fimc_context *ctx = get_fimc_context(dev);
1674 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1675 u32 cfg;
1676
1677 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1678
1679 switch (cmd) {
1680 case IPP_CMD_M2M:
1681 /* Source clear */
1682 cfg = fimc_read(EXYNOS_MSCTRL);
1683 cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
1684 cfg &= ~EXYNOS_MSCTRL_ENVID;
1685 fimc_write(cfg, EXYNOS_MSCTRL);
1686 break;
1687 case IPP_CMD_WB:
1688 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1689 break;
1690 case IPP_CMD_OUTPUT:
1691 default:
1692 dev_err(dev, "invalid operations.\n");
1693 break;
1694 }
1695
1696 fimc_handle_irq(ctx, false, false, true);
1697
1698 /* reset sequence */
1699 fimc_write(0x0, EXYNOS_CIFCNTSEQ);
1700
1701 /* Scaler disable */
1702 cfg = fimc_read(EXYNOS_CISCCTRL);
1703 cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
1704 fimc_write(cfg, EXYNOS_CISCCTRL);
1705
1706 /* Disable image capture */
1707 cfg = fimc_read(EXYNOS_CIIMGCPT);
1708 cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
1709 fimc_write(cfg, EXYNOS_CIIMGCPT);
1710
1711 /* Enable frame end irq */
1712 cfg = fimc_read(EXYNOS_CIGCTRL);
1713 cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
1714 fimc_write(cfg, EXYNOS_CIGCTRL);
1715}
1716
1717static int __devinit fimc_probe(struct platform_device *pdev)
1718{
1719 struct device *dev = &pdev->dev;
1720 struct fimc_context *ctx;
1721 struct clk *parent_clk;
1722 struct resource *res;
1723 struct exynos_drm_ippdrv *ippdrv;
1724 struct exynos_drm_fimc_pdata *pdata;
1725 struct fimc_driverdata *ddata;
1726 int ret;
1727
1728 pdata = pdev->dev.platform_data;
1729 if (!pdata) {
1730 dev_err(dev, "no platform data specified.\n");
1731 return -EINVAL;
1732 }
1733
1734 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1735 if (!ctx)
1736 return -ENOMEM;
1737
1738 ddata = (struct fimc_driverdata *)
1739 platform_get_device_id(pdev)->driver_data;
1740
1741 /* clock control */
1742 ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
1743 if (IS_ERR(ctx->sclk_fimc_clk)) {
1744 dev_err(dev, "failed to get src fimc clock.\n");
1745 ret = PTR_ERR(ctx->sclk_fimc_clk);
1746 goto err_ctx;
1747 }
1748 clk_enable(ctx->sclk_fimc_clk);
1749
1750 ctx->fimc_clk = clk_get(dev, "fimc");
1751 if (IS_ERR(ctx->fimc_clk)) {
1752 dev_err(dev, "failed to get fimc clock.\n");
1753 ret = PTR_ERR(ctx->fimc_clk);
1754 clk_disable(ctx->sclk_fimc_clk);
1755 clk_put(ctx->sclk_fimc_clk);
1756 goto err_ctx;
1757 }
1758
1759 ctx->wb_clk = clk_get(dev, "pxl_async0");
1760 if (IS_ERR(ctx->wb_clk)) {
1761 dev_err(dev, "failed to get writeback a clock.\n");
1762 ret = PTR_ERR(ctx->wb_clk);
1763 clk_disable(ctx->sclk_fimc_clk);
1764 clk_put(ctx->sclk_fimc_clk);
1765 clk_put(ctx->fimc_clk);
1766 goto err_ctx;
1767 }
1768
1769 ctx->wb_b_clk = clk_get(dev, "pxl_async1");
1770 if (IS_ERR(ctx->wb_b_clk)) {
1771 dev_err(dev, "failed to get writeback b clock.\n");
1772 ret = PTR_ERR(ctx->wb_b_clk);
1773 clk_disable(ctx->sclk_fimc_clk);
1774 clk_put(ctx->sclk_fimc_clk);
1775 clk_put(ctx->fimc_clk);
1776 clk_put(ctx->wb_clk);
1777 goto err_ctx;
1778 }
1779
1780 parent_clk = clk_get(dev, ddata->parent_clk);
1781
1782 if (IS_ERR(parent_clk)) {
1783 dev_err(dev, "failed to get parent clock.\n");
1784 ret = PTR_ERR(parent_clk);
1785 clk_disable(ctx->sclk_fimc_clk);
1786 clk_put(ctx->sclk_fimc_clk);
1787 clk_put(ctx->fimc_clk);
1788 clk_put(ctx->wb_clk);
1789 clk_put(ctx->wb_b_clk);
1790 goto err_ctx;
1791 }
1792
1793 if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
1794 dev_err(dev, "failed to set parent.\n");
1795 ret = -EINVAL;
1796 clk_put(parent_clk);
1797 clk_disable(ctx->sclk_fimc_clk);
1798 clk_put(ctx->sclk_fimc_clk);
1799 clk_put(ctx->fimc_clk);
1800 clk_put(ctx->wb_clk);
1801 clk_put(ctx->wb_b_clk);
1802 goto err_ctx;
1803 }
1804
1805 clk_put(parent_clk);
1806 clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
1807
1808 /* resource memory */
1809 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1810 if (!ctx->regs_res) {
1811 dev_err(dev, "failed to find registers.\n");
1812 ret = -ENOENT;
1813 goto err_clk;
1814 }
1815
1816 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1817 if (!ctx->regs) {
1818 dev_err(dev, "failed to map registers.\n");
1819 ret = -ENXIO;
1820 goto err_clk;
1821 }
1822
1823 /* resource irq */
1824 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1825 if (!res) {
1826 dev_err(dev, "failed to request irq resource.\n");
1827 ret = -ENOENT;
1828 goto err_get_regs;
1829 }
1830
1831 ctx->irq = res->start;
1832 ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
1833 IRQF_ONESHOT, "drm_fimc", ctx);
1834 if (ret < 0) {
1835 dev_err(dev, "failed to request irq.\n");
1836 goto err_get_regs;
1837 }
1838
1839 /* context initailization */
1840 ctx->id = pdev->id;
1841 ctx->pol = pdata->pol;
1842 ctx->ddata = ddata;
1843
1844 ippdrv = &ctx->ippdrv;
1845 ippdrv->dev = dev;
1846 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
1847 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
1848 ippdrv->check_property = fimc_ippdrv_check_property;
1849 ippdrv->reset = fimc_ippdrv_reset;
1850 ippdrv->start = fimc_ippdrv_start;
1851 ippdrv->stop = fimc_ippdrv_stop;
1852 ret = fimc_init_prop_list(ippdrv);
1853 if (ret < 0) {
1854 dev_err(dev, "failed to init property list.\n");
1855 goto err_get_irq;
1856 }
1857
1858 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
1859 (int)ippdrv);
1860
1861 mutex_init(&ctx->lock);
1862 platform_set_drvdata(pdev, ctx);
1863
1864 pm_runtime_set_active(dev);
1865 pm_runtime_enable(dev);
1866
1867 ret = exynos_drm_ippdrv_register(ippdrv);
1868 if (ret < 0) {
1869 dev_err(dev, "failed to register drm fimc device.\n");
1870 goto err_ippdrv_register;
1871 }
1872
1873 dev_info(&pdev->dev, "drm fimc registered successfully.\n");
1874
1875 return 0;
1876
1877err_ippdrv_register:
1878 devm_kfree(dev, ippdrv->prop_list);
1879 pm_runtime_disable(dev);
1880err_get_irq:
1881 free_irq(ctx->irq, ctx);
1882err_get_regs:
1883 devm_iounmap(dev, ctx->regs);
1884err_clk:
1885 clk_put(ctx->sclk_fimc_clk);
1886 clk_put(ctx->fimc_clk);
1887 clk_put(ctx->wb_clk);
1888 clk_put(ctx->wb_b_clk);
1889err_ctx:
1890 devm_kfree(dev, ctx);
1891 return ret;
1892}
1893
1894static int __devexit fimc_remove(struct platform_device *pdev)
1895{
1896 struct device *dev = &pdev->dev;
1897 struct fimc_context *ctx = get_fimc_context(dev);
1898 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1899
1900 devm_kfree(dev, ippdrv->prop_list);
1901 exynos_drm_ippdrv_unregister(ippdrv);
1902 mutex_destroy(&ctx->lock);
1903
1904 pm_runtime_set_suspended(dev);
1905 pm_runtime_disable(dev);
1906
1907 free_irq(ctx->irq, ctx);
1908 devm_iounmap(dev, ctx->regs);
1909
1910 clk_put(ctx->sclk_fimc_clk);
1911 clk_put(ctx->fimc_clk);
1912 clk_put(ctx->wb_clk);
1913 clk_put(ctx->wb_b_clk);
1914
1915 devm_kfree(dev, ctx);
1916
1917 return 0;
1918}
1919
1920#ifdef CONFIG_PM_SLEEP
1921static int fimc_suspend(struct device *dev)
1922{
1923 struct fimc_context *ctx = get_fimc_context(dev);
1924
1925 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1926
1927 if (pm_runtime_suspended(dev))
1928 return 0;
1929
1930 return fimc_clk_ctrl(ctx, false);
1931}
1932
1933static int fimc_resume(struct device *dev)
1934{
1935 struct fimc_context *ctx = get_fimc_context(dev);
1936
1937 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1938
1939 if (!pm_runtime_suspended(dev))
1940 return fimc_clk_ctrl(ctx, true);
1941
1942 return 0;
1943}
1944#endif
1945
1946#ifdef CONFIG_PM_RUNTIME
1947static int fimc_runtime_suspend(struct device *dev)
1948{
1949 struct fimc_context *ctx = get_fimc_context(dev);
1950
1951 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1952
1953 return fimc_clk_ctrl(ctx, false);
1954}
1955
1956static int fimc_runtime_resume(struct device *dev)
1957{
1958 struct fimc_context *ctx = get_fimc_context(dev);
1959
1960 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1961
1962 return fimc_clk_ctrl(ctx, true);
1963}
1964#endif
1965
1966static struct fimc_driverdata exynos4210_fimc_data = {
1967 .parent_clk = "mout_mpll",
1968};
1969
1970static struct fimc_driverdata exynos4410_fimc_data = {
1971 .parent_clk = "mout_mpll_user",
1972};
1973
1974static struct platform_device_id fimc_driver_ids[] = {
1975 {
1976 .name = "exynos4210-fimc",
1977 .driver_data = (unsigned long)&exynos4210_fimc_data,
1978 }, {
1979 .name = "exynos4412-fimc",
1980 .driver_data = (unsigned long)&exynos4410_fimc_data,
1981 },
1982 {},
1983};
1984MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
1985
1986static const struct dev_pm_ops fimc_pm_ops = {
1987 SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
1988 SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
1989};
1990
1991struct platform_driver fimc_driver = {
1992 .probe = fimc_probe,
1993 .remove = __devexit_p(fimc_remove),
1994 .id_table = fimc_driver_ids,
1995 .driver = {
1996 .name = "exynos-drm-fimc",
1997 .owner = THIS_MODULE,
1998 .pm = &fimc_pm_ops,
1999 },
2000};
2001
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 000000000000..dc970fa0d888
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef _EXYNOS_DRM_FIMC_H_
30#define _EXYNOS_DRM_FIMC_H_
31
32/*
33 * TODO
34 * FIMD output interface notifier callback.
35 */
36
37#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e08478f19f1a..bf0d9baca2bc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/of_device.h>
20#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
21 22
22#include <video/samsung_fimd.h> 23#include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
25#include "exynos_drm_drv.h" 26#include "exynos_drm_drv.h"
26#include "exynos_drm_fbdev.h" 27#include "exynos_drm_fbdev.h"
27#include "exynos_drm_crtc.h" 28#include "exynos_drm_crtc.h"
29#include "exynos_drm_iommu.h"
28 30
29/* 31/*
30 * FIMD is stand for Fully Interactive Mobile Display and 32 * FIMD is stand for Fully Interactive Mobile Display and
@@ -78,10 +80,10 @@ struct fimd_win_data {
78 unsigned int fb_height; 80 unsigned int fb_height;
79 unsigned int bpp; 81 unsigned int bpp;
80 dma_addr_t dma_addr; 82 dma_addr_t dma_addr;
81 void __iomem *vaddr;
82 unsigned int buf_offsize; 83 unsigned int buf_offsize;
83 unsigned int line_size; /* bytes */ 84 unsigned int line_size; /* bytes */
84 bool enabled; 85 bool enabled;
86 bool resume;
85}; 87};
86 88
87struct fimd_context { 89struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
99 u32 vidcon1; 101 u32 vidcon1;
100 bool suspended; 102 bool suspended;
101 struct mutex lock; 103 struct mutex lock;
104 wait_queue_head_t wait_vsync_queue;
105 atomic_t wait_vsync_event;
102 106
103 struct exynos_drm_panel_info *panel; 107 struct exynos_drm_panel_info *panel;
104}; 108};
105 109
110#ifdef CONFIG_OF
111static const struct of_device_id fimd_driver_dt_match[] = {
112 { .compatible = "samsung,exynos4-fimd",
113 .data = &exynos4_fimd_driver_data },
114 { .compatible = "samsung,exynos5-fimd",
115 .data = &exynos5_fimd_driver_data },
116 {},
117};
118MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
119#endif
120
106static inline struct fimd_driver_data *drm_fimd_get_driver_data( 121static inline struct fimd_driver_data *drm_fimd_get_driver_data(
107 struct platform_device *pdev) 122 struct platform_device *pdev)
108{ 123{
124#ifdef CONFIG_OF
125 const struct of_device_id *of_id =
126 of_match_device(fimd_driver_dt_match, &pdev->dev);
127
128 if (of_id)
129 return (struct fimd_driver_data *)of_id->data;
130#endif
131
109 return (struct fimd_driver_data *) 132 return (struct fimd_driver_data *)
110 platform_get_device_id(pdev)->driver_data; 133 platform_get_device_id(pdev)->driver_data;
111} 134}
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
240 263
241 /* setup horizontal and vertical display size. */ 264 /* setup horizontal and vertical display size. */
242 val = VIDTCON2_LINEVAL(timing->yres - 1) | 265 val = VIDTCON2_LINEVAL(timing->yres - 1) |
243 VIDTCON2_HOZVAL(timing->xres - 1); 266 VIDTCON2_HOZVAL(timing->xres - 1) |
267 VIDTCON2_LINEVAL_E(timing->yres - 1) |
268 VIDTCON2_HOZVAL_E(timing->xres - 1);
244 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); 269 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
245 270
246 /* setup clock source, clock divider, enable dma. */ 271 /* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
307 } 332 }
308} 333}
309 334
335static void fimd_wait_for_vblank(struct device *dev)
336{
337 struct fimd_context *ctx = get_fimd_context(dev);
338
339 if (ctx->suspended)
340 return;
341
342 atomic_set(&ctx->wait_vsync_event, 1);
343
344 /*
345 * wait for FIMD to signal VSYNC interrupt or return after
346 * timeout which is set to 50ms (refresh rate of 20).
347 */
348 if (!wait_event_timeout(ctx->wait_vsync_queue,
349 !atomic_read(&ctx->wait_vsync_event),
350 DRM_HZ/20))
351 DRM_DEBUG_KMS("vblank wait timed out.\n");
352}
353
310static struct exynos_drm_manager_ops fimd_manager_ops = { 354static struct exynos_drm_manager_ops fimd_manager_ops = {
311 .dpms = fimd_dpms, 355 .dpms = fimd_dpms,
312 .apply = fimd_apply, 356 .apply = fimd_apply,
313 .commit = fimd_commit, 357 .commit = fimd_commit,
314 .enable_vblank = fimd_enable_vblank, 358 .enable_vblank = fimd_enable_vblank,
315 .disable_vblank = fimd_disable_vblank, 359 .disable_vblank = fimd_disable_vblank,
360 .wait_for_vblank = fimd_wait_for_vblank,
316}; 361};
317 362
318static void fimd_win_mode_set(struct device *dev, 363static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
351 win_data->fb_width = overlay->fb_width; 396 win_data->fb_width = overlay->fb_width;
352 win_data->fb_height = overlay->fb_height; 397 win_data->fb_height = overlay->fb_height;
353 win_data->dma_addr = overlay->dma_addr[0] + offset; 398 win_data->dma_addr = overlay->dma_addr[0] + offset;
354 win_data->vaddr = overlay->vaddr[0] + offset;
355 win_data->bpp = overlay->bpp; 399 win_data->bpp = overlay->bpp;
356 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 400 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
357 (overlay->bpp >> 3); 401 (overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
361 win_data->offset_x, win_data->offset_y); 405 win_data->offset_x, win_data->offset_y);
362 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 406 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
363 win_data->ovl_width, win_data->ovl_height); 407 win_data->ovl_width, win_data->ovl_height);
364 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 408 DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
365 (unsigned long)win_data->dma_addr,
366 (unsigned long)win_data->vaddr);
367 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 409 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
368 overlay->fb_width, overlay->crtc_width); 410 overlay->fb_width, overlay->crtc_width);
369} 411}
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
451 struct fimd_win_data *win_data; 493 struct fimd_win_data *win_data;
452 int win = zpos; 494 int win = zpos;
453 unsigned long val, alpha, size; 495 unsigned long val, alpha, size;
496 unsigned int last_x;
497 unsigned int last_y;
454 498
455 DRM_DEBUG_KMS("%s\n", __FILE__); 499 DRM_DEBUG_KMS("%s\n", __FILE__);
456 500
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
496 540
497 /* buffer size */ 541 /* buffer size */
498 val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) | 542 val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
499 VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size); 543 VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
544 VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
545 VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
500 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); 546 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
501 547
502 /* OSD position */ 548 /* OSD position */
503 val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) | 549 val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
504 VIDOSDxA_TOPLEFT_Y(win_data->offset_y); 550 VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
551 VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
552 VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
505 writel(val, ctx->regs + VIDOSD_A(win)); 553 writel(val, ctx->regs + VIDOSD_A(win));
506 554
507 val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x + 555 last_x = win_data->offset_x + win_data->ovl_width;
508 win_data->ovl_width - 1) | 556 if (last_x)
509 VIDOSDxB_BOTRIGHT_Y(win_data->offset_y + 557 last_x--;
510 win_data->ovl_height - 1); 558 last_y = win_data->offset_y + win_data->ovl_height;
559 if (last_y)
560 last_y--;
561
562 val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
563 VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
564
511 writel(val, ctx->regs + VIDOSD_B(win)); 565 writel(val, ctx->regs + VIDOSD_B(win));
512 566
513 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", 567 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
514 win_data->offset_x, win_data->offset_y, 568 win_data->offset_x, win_data->offset_y, last_x, last_y);
515 win_data->offset_x + win_data->ovl_width - 1,
516 win_data->offset_y + win_data->ovl_height - 1);
517 569
518 /* hardware window 0 doesn't support alpha channel. */ 570 /* hardware window 0 doesn't support alpha channel. */
519 if (win != 0) { 571 if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
573 625
574 win_data = &ctx->win_data[win]; 626 win_data = &ctx->win_data[win];
575 627
628 if (ctx->suspended) {
629 /* do not resume this window*/
630 win_data->resume = false;
631 return;
632 }
633
576 /* protect windows */ 634 /* protect windows */
577 val = readl(ctx->regs + SHADOWCON); 635 val = readl(ctx->regs + SHADOWCON);
578 val |= SHADOWCON_WINx_PROTECT(win); 636 val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
592 win_data->enabled = false; 650 win_data->enabled = false;
593} 651}
594 652
595static void fimd_wait_for_vblank(struct device *dev)
596{
597 struct fimd_context *ctx = get_fimd_context(dev);
598 int ret;
599
600 ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
601 VIDCON1_VSTATUS_VSYNC), 50);
602 if (ret < 0)
603 DRM_DEBUG_KMS("vblank wait timed out.\n");
604}
605
606static struct exynos_drm_overlay_ops fimd_overlay_ops = { 653static struct exynos_drm_overlay_ops fimd_overlay_ops = {
607 .mode_set = fimd_win_mode_set, 654 .mode_set = fimd_win_mode_set,
608 .commit = fimd_win_commit, 655 .commit = fimd_win_commit,
609 .disable = fimd_win_disable, 656 .disable = fimd_win_disable,
610 .wait_for_vblank = fimd_wait_for_vblank,
611}; 657};
612 658
613static struct exynos_drm_manager fimd_manager = { 659static struct exynos_drm_manager fimd_manager = {
@@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
623 struct drm_pending_vblank_event *e, *t; 669 struct drm_pending_vblank_event *e, *t;
624 struct timeval now; 670 struct timeval now;
625 unsigned long flags; 671 unsigned long flags;
626 bool is_checked = false;
627 672
628 spin_lock_irqsave(&drm_dev->event_lock, flags); 673 spin_lock_irqsave(&drm_dev->event_lock, flags);
629 674
@@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
633 if (crtc != e->pipe) 678 if (crtc != e->pipe)
634 continue; 679 continue;
635 680
636 is_checked = true;
637
638 do_gettimeofday(&now); 681 do_gettimeofday(&now);
639 e->event.sequence = 0; 682 e->event.sequence = 0;
640 e->event.tv_sec = now.tv_sec; 683 e->event.tv_sec = now.tv_sec;
@@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
642 685
643 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 686 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
644 wake_up_interruptible(&e->base.file_priv->event_wait); 687 wake_up_interruptible(&e->base.file_priv->event_wait);
645 } 688 drm_vblank_put(drm_dev, crtc);
646
647 if (is_checked) {
648 /*
649 * call drm_vblank_put only in case that drm_vblank_get was
650 * called.
651 */
652 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
653 drm_vblank_put(drm_dev, crtc);
654
655 /*
656 * don't off vblank if vblank_disable_allowed is 1,
657 * because vblank would be off by timer handler.
658 */
659 if (!drm_dev->vblank_disable_allowed)
660 drm_vblank_off(drm_dev, crtc);
661 } 689 }
662 690
663 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 691 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
684 drm_handle_vblank(drm_dev, manager->pipe); 712 drm_handle_vblank(drm_dev, manager->pipe);
685 fimd_finish_pageflip(drm_dev, manager->pipe); 713 fimd_finish_pageflip(drm_dev, manager->pipe);
686 714
715 /* set wait vsync event to zero and wake up queue. */
716 if (atomic_read(&ctx->wait_vsync_event)) {
717 atomic_set(&ctx->wait_vsync_event, 0);
718 DRM_WAKEUP(&ctx->wait_vsync_queue);
719 }
687out: 720out:
688 return IRQ_HANDLED; 721 return IRQ_HANDLED;
689} 722}
@@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
709 */ 742 */
710 drm_dev->vblank_disable_allowed = 1; 743 drm_dev->vblank_disable_allowed = 1;
711 744
745 /* attach this sub driver to iommu mapping if supported. */
746 if (is_drm_iommu_supported(drm_dev))
747 drm_iommu_attach_device(drm_dev, dev);
748
712 return 0; 749 return 0;
713} 750}
714 751
@@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
716{ 753{
717 DRM_DEBUG_KMS("%s\n", __FILE__); 754 DRM_DEBUG_KMS("%s\n", __FILE__);
718 755
719 /* TODO. */ 756 /* detach this sub driver from iommu mapping if supported. */
757 if (is_drm_iommu_supported(drm_dev))
758 drm_iommu_detach_device(drm_dev, dev);
720} 759}
721 760
722static int fimd_calc_clkdiv(struct fimd_context *ctx, 761static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
805 return 0; 844 return 0;
806} 845}
807 846
847static void fimd_window_suspend(struct device *dev)
848{
849 struct fimd_context *ctx = get_fimd_context(dev);
850 struct fimd_win_data *win_data;
851 int i;
852
853 for (i = 0; i < WINDOWS_NR; i++) {
854 win_data = &ctx->win_data[i];
855 win_data->resume = win_data->enabled;
856 fimd_win_disable(dev, i);
857 }
858 fimd_wait_for_vblank(dev);
859}
860
861static void fimd_window_resume(struct device *dev)
862{
863 struct fimd_context *ctx = get_fimd_context(dev);
864 struct fimd_win_data *win_data;
865 int i;
866
867 for (i = 0; i < WINDOWS_NR; i++) {
868 win_data = &ctx->win_data[i];
869 win_data->enabled = win_data->resume;
870 win_data->resume = false;
871 }
872}
873
808static int fimd_activate(struct fimd_context *ctx, bool enable) 874static int fimd_activate(struct fimd_context *ctx, bool enable)
809{ 875{
876 struct device *dev = ctx->subdrv.dev;
810 if (enable) { 877 if (enable) {
811 int ret; 878 int ret;
812 struct device *dev = ctx->subdrv.dev;
813 879
814 ret = fimd_clock(ctx, true); 880 ret = fimd_clock(ctx, true);
815 if (ret < 0) 881 if (ret < 0)
@@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
820 /* if vblank was enabled status, enable it again. */ 886 /* if vblank was enabled status, enable it again. */
821 if (test_and_clear_bit(0, &ctx->irq_flags)) 887 if (test_and_clear_bit(0, &ctx->irq_flags))
822 fimd_enable_vblank(dev); 888 fimd_enable_vblank(dev);
889
890 fimd_window_resume(dev);
823 } else { 891 } else {
892 fimd_window_suspend(dev);
893
824 fimd_clock(ctx, false); 894 fimd_clock(ctx, false);
825 ctx->suspended = true; 895 ctx->suspended = true;
826 } 896 }
@@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
857 if (!ctx) 927 if (!ctx)
858 return -ENOMEM; 928 return -ENOMEM;
859 929
860 ctx->bus_clk = clk_get(dev, "fimd"); 930 ctx->bus_clk = devm_clk_get(dev, "fimd");
861 if (IS_ERR(ctx->bus_clk)) { 931 if (IS_ERR(ctx->bus_clk)) {
862 dev_err(dev, "failed to get bus clock\n"); 932 dev_err(dev, "failed to get bus clock\n");
863 ret = PTR_ERR(ctx->bus_clk); 933 return PTR_ERR(ctx->bus_clk);
864 goto err_clk_get;
865 } 934 }
866 935
867 ctx->lcd_clk = clk_get(dev, "sclk_fimd"); 936 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
868 if (IS_ERR(ctx->lcd_clk)) { 937 if (IS_ERR(ctx->lcd_clk)) {
869 dev_err(dev, "failed to get lcd clock\n"); 938 dev_err(dev, "failed to get lcd clock\n");
870 ret = PTR_ERR(ctx->lcd_clk); 939 return PTR_ERR(ctx->lcd_clk);
871 goto err_bus_clk;
872 } 940 }
873 941
874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 942 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
876 ctx->regs = devm_request_and_ioremap(&pdev->dev, res); 944 ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
877 if (!ctx->regs) { 945 if (!ctx->regs) {
878 dev_err(dev, "failed to map registers\n"); 946 dev_err(dev, "failed to map registers\n");
879 ret = -ENXIO; 947 return -ENXIO;
880 goto err_clk;
881 } 948 }
882 949
883 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 950 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
884 if (!res) { 951 if (!res) {
885 dev_err(dev, "irq request failed.\n"); 952 dev_err(dev, "irq request failed.\n");
886 goto err_clk; 953 return -ENXIO;
887 } 954 }
888 955
889 ctx->irq = res->start; 956 ctx->irq = res->start;
@@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
892 0, "drm_fimd", ctx); 959 0, "drm_fimd", ctx);
893 if (ret) { 960 if (ret) {
894 dev_err(dev, "irq request failed.\n"); 961 dev_err(dev, "irq request failed.\n");
895 goto err_clk; 962 return ret;
896 } 963 }
897 964
898 ctx->vidcon0 = pdata->vidcon0; 965 ctx->vidcon0 = pdata->vidcon0;
899 ctx->vidcon1 = pdata->vidcon1; 966 ctx->vidcon1 = pdata->vidcon1;
900 ctx->default_win = pdata->default_win; 967 ctx->default_win = pdata->default_win;
901 ctx->panel = panel; 968 ctx->panel = panel;
969 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
970 atomic_set(&ctx->wait_vsync_event, 0);
902 971
903 subdrv = &ctx->subdrv; 972 subdrv = &ctx->subdrv;
904 973
@@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
926 exynos_drm_subdrv_register(subdrv); 995 exynos_drm_subdrv_register(subdrv);
927 996
928 return 0; 997 return 0;
929
930err_clk:
931 clk_disable(ctx->lcd_clk);
932 clk_put(ctx->lcd_clk);
933
934err_bus_clk:
935 clk_disable(ctx->bus_clk);
936 clk_put(ctx->bus_clk);
937
938err_clk_get:
939 return ret;
940} 998}
941 999
942static int __devexit fimd_remove(struct platform_device *pdev) 1000static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
960out: 1018out:
961 pm_runtime_disable(dev); 1019 pm_runtime_disable(dev);
962 1020
963 clk_put(ctx->lcd_clk);
964 clk_put(ctx->bus_clk);
965
966 return 0; 1021 return 0;
967} 1022}
968 1023
@@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
1056 .name = "exynos4-fb", 1111 .name = "exynos4-fb",
1057 .owner = THIS_MODULE, 1112 .owner = THIS_MODULE,
1058 .pm = &fimd_pm_ops, 1113 .pm = &fimd_pm_ops,
1114 .of_match_table = of_match_ptr(fimd_driver_dt_match),
1059 }, 1115 },
1060}; 1116};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46c..6ffa0763c078 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/dma-mapping.h>
21#include <linux/dma-attrs.h>
20 22
21#include <drm/drmP.h> 23#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 24#include <drm/exynos_drm.h>
23#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
24#include "exynos_drm_gem.h" 26#include "exynos_drm_gem.h"
27#include "exynos_drm_iommu.h"
25 28
26#define G2D_HW_MAJOR_VER 4 29#define G2D_HW_MAJOR_VER 4
27#define G2D_HW_MINOR_VER 1 30#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
92#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 95#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
93#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 96#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
94 97
98#define MAX_BUF_ADDR_NR 6
99
100/* maximum buffer pool size of userptr is 64MB as default */
101#define MAX_POOL (64 * 1024 * 1024)
102
103enum {
104 BUF_TYPE_GEM = 1,
105 BUF_TYPE_USERPTR,
106};
107
95/* cmdlist data structure */ 108/* cmdlist data structure */
96struct g2d_cmdlist { 109struct g2d_cmdlist {
97 u32 head; 110 u32 head;
98 u32 data[G2D_CMDLIST_DATA_NUM]; 111 unsigned long data[G2D_CMDLIST_DATA_NUM];
99 u32 last; /* last data offset */ 112 u32 last; /* last data offset */
100}; 113};
101 114
102struct drm_exynos_pending_g2d_event { 115struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
104 struct drm_exynos_g2d_event event; 117 struct drm_exynos_g2d_event event;
105}; 118};
106 119
107struct g2d_gem_node { 120struct g2d_cmdlist_userptr {
108 struct list_head list; 121 struct list_head list;
109 unsigned int handle; 122 dma_addr_t dma_addr;
123 unsigned long userptr;
124 unsigned long size;
125 struct page **pages;
126 unsigned int npages;
127 struct sg_table *sgt;
128 struct vm_area_struct *vma;
129 atomic_t refcount;
130 bool in_pool;
131 bool out_of_list;
110}; 132};
111 133
112struct g2d_cmdlist_node { 134struct g2d_cmdlist_node {
113 struct list_head list; 135 struct list_head list;
114 struct g2d_cmdlist *cmdlist; 136 struct g2d_cmdlist *cmdlist;
115 unsigned int gem_nr; 137 unsigned int map_nr;
138 unsigned long handles[MAX_BUF_ADDR_NR];
139 unsigned int obj_type[MAX_BUF_ADDR_NR];
116 dma_addr_t dma_addr; 140 dma_addr_t dma_addr;
117 141
118 struct drm_exynos_pending_g2d_event *event; 142 struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
122 struct list_head list; 146 struct list_head list;
123 struct list_head run_cmdlist; 147 struct list_head run_cmdlist;
124 struct list_head event_list; 148 struct list_head event_list;
149 struct drm_file *filp;
125 pid_t pid; 150 pid_t pid;
126 struct completion complete; 151 struct completion complete;
127 int async; 152 int async;
@@ -143,23 +168,33 @@ struct g2d_data {
143 struct mutex cmdlist_mutex; 168 struct mutex cmdlist_mutex;
144 dma_addr_t cmdlist_pool; 169 dma_addr_t cmdlist_pool;
145 void *cmdlist_pool_virt; 170 void *cmdlist_pool_virt;
171 struct dma_attrs cmdlist_dma_attrs;
146 172
147 /* runqueue*/ 173 /* runqueue*/
148 struct g2d_runqueue_node *runqueue_node; 174 struct g2d_runqueue_node *runqueue_node;
149 struct list_head runqueue; 175 struct list_head runqueue;
150 struct mutex runqueue_mutex; 176 struct mutex runqueue_mutex;
151 struct kmem_cache *runqueue_slab; 177 struct kmem_cache *runqueue_slab;
178
179 unsigned long current_pool;
180 unsigned long max_pool;
152}; 181};
153 182
154static int g2d_init_cmdlist(struct g2d_data *g2d) 183static int g2d_init_cmdlist(struct g2d_data *g2d)
155{ 184{
156 struct device *dev = g2d->dev; 185 struct device *dev = g2d->dev;
157 struct g2d_cmdlist_node *node = g2d->cmdlist_node; 186 struct g2d_cmdlist_node *node = g2d->cmdlist_node;
187 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
158 int nr; 188 int nr;
159 int ret; 189 int ret;
160 190
161 g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, 191 init_dma_attrs(&g2d->cmdlist_dma_attrs);
162 &g2d->cmdlist_pool, GFP_KERNEL); 192 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
193
194 g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
195 G2D_CMDLIST_POOL_SIZE,
196 &g2d->cmdlist_pool, GFP_KERNEL,
197 &g2d->cmdlist_dma_attrs);
163 if (!g2d->cmdlist_pool_virt) { 198 if (!g2d->cmdlist_pool_virt) {
164 dev_err(dev, "failed to allocate dma memory\n"); 199 dev_err(dev, "failed to allocate dma memory\n");
165 return -ENOMEM; 200 return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
184 return 0; 219 return 0;
185 220
186err: 221err:
187 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, 222 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
188 g2d->cmdlist_pool); 223 g2d->cmdlist_pool_virt,
224 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
189 return ret; 225 return ret;
190} 226}
191 227
192static void g2d_fini_cmdlist(struct g2d_data *g2d) 228static void g2d_fini_cmdlist(struct g2d_data *g2d)
193{ 229{
194 struct device *dev = g2d->dev; 230 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
195 231
196 kfree(g2d->cmdlist_node); 232 kfree(g2d->cmdlist_node);
197 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, 233 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
198 g2d->cmdlist_pool); 234 g2d->cmdlist_pool_virt,
235 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
199} 236}
200 237
201static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) 238static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
245 list_add_tail(&node->event->base.link, &g2d_priv->event_list); 282 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
246} 283}
247 284
248static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, 285static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
249 struct drm_file *file, 286 unsigned long obj,
250 struct g2d_cmdlist_node *node) 287 bool force)
251{ 288{
252 struct drm_exynos_file_private *file_priv = file->driver_priv; 289 struct g2d_cmdlist_userptr *g2d_userptr =
290 (struct g2d_cmdlist_userptr *)obj;
291
292 if (!obj)
293 return;
294
295 if (force)
296 goto out;
297
298 atomic_dec(&g2d_userptr->refcount);
299
300 if (atomic_read(&g2d_userptr->refcount) > 0)
301 return;
302
303 if (g2d_userptr->in_pool)
304 return;
305
306out:
307 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
308 DMA_BIDIRECTIONAL);
309
310 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
311 g2d_userptr->npages,
312 g2d_userptr->vma);
313
314 if (!g2d_userptr->out_of_list)
315 list_del_init(&g2d_userptr->list);
316
317 sg_free_table(g2d_userptr->sgt);
318 kfree(g2d_userptr->sgt);
319 g2d_userptr->sgt = NULL;
320
321 kfree(g2d_userptr->pages);
322 g2d_userptr->pages = NULL;
323 kfree(g2d_userptr);
324 g2d_userptr = NULL;
325}
326
327dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
328 unsigned long userptr,
329 unsigned long size,
330 struct drm_file *filp,
331 unsigned long *obj)
332{
333 struct drm_exynos_file_private *file_priv = filp->driver_priv;
334 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
335 struct g2d_cmdlist_userptr *g2d_userptr;
336 struct g2d_data *g2d;
337 struct page **pages;
338 struct sg_table *sgt;
339 struct vm_area_struct *vma;
340 unsigned long start, end;
341 unsigned int npages, offset;
342 int ret;
343
344 if (!size) {
345 DRM_ERROR("invalid userptr size.\n");
346 return ERR_PTR(-EINVAL);
347 }
348
349 g2d = dev_get_drvdata(g2d_priv->dev);
350
351 /* check if userptr already exists in userptr_list. */
352 list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
353 if (g2d_userptr->userptr == userptr) {
354 /*
355 * also check size because there could be same address
356 * and different size.
357 */
358 if (g2d_userptr->size == size) {
359 atomic_inc(&g2d_userptr->refcount);
360 *obj = (unsigned long)g2d_userptr;
361
362 return &g2d_userptr->dma_addr;
363 }
364
365 /*
366 * at this moment, maybe g2d dma is accessing this
367 * g2d_userptr memory region so just remove this
368 * g2d_userptr object from userptr_list not to be
369 * referred again and also except it the userptr
370 * pool to be released after the dma access completion.
371 */
372 g2d_userptr->out_of_list = true;
373 g2d_userptr->in_pool = false;
374 list_del_init(&g2d_userptr->list);
375
376 break;
377 }
378 }
379
380 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
381 if (!g2d_userptr) {
382 DRM_ERROR("failed to allocate g2d_userptr.\n");
383 return ERR_PTR(-ENOMEM);
384 }
385
386 atomic_set(&g2d_userptr->refcount, 1);
387
388 start = userptr & PAGE_MASK;
389 offset = userptr & ~PAGE_MASK;
390 end = PAGE_ALIGN(userptr + size);
391 npages = (end - start) >> PAGE_SHIFT;
392 g2d_userptr->npages = npages;
393
394 pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
395 if (!pages) {
396 DRM_ERROR("failed to allocate pages.\n");
397 kfree(g2d_userptr);
398 return ERR_PTR(-ENOMEM);
399 }
400
401 vma = find_vma(current->mm, userptr);
402 if (!vma) {
403 DRM_ERROR("failed to get vm region.\n");
404 ret = -EFAULT;
405 goto err_free_pages;
406 }
407
408 if (vma->vm_end < userptr + size) {
409 DRM_ERROR("vma is too small.\n");
410 ret = -EFAULT;
411 goto err_free_pages;
412 }
413
414 g2d_userptr->vma = exynos_gem_get_vma(vma);
415 if (!g2d_userptr->vma) {
416 DRM_ERROR("failed to copy vma.\n");
417 ret = -ENOMEM;
418 goto err_free_pages;
419 }
420
421 g2d_userptr->size = size;
422
423 ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
424 npages, pages, vma);
425 if (ret < 0) {
426 DRM_ERROR("failed to get user pages from userptr.\n");
427 goto err_put_vma;
428 }
429
430 g2d_userptr->pages = pages;
431
432 sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
433 if (!sgt) {
434 DRM_ERROR("failed to allocate sg table.\n");
435 ret = -ENOMEM;
436 goto err_free_userptr;
437 }
438
439 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
440 size, GFP_KERNEL);
441 if (ret < 0) {
442 DRM_ERROR("failed to get sgt from pages.\n");
443 goto err_free_sgt;
444 }
445
446 g2d_userptr->sgt = sgt;
447
448 ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
449 DMA_BIDIRECTIONAL);
450 if (ret < 0) {
451 DRM_ERROR("failed to map sgt with dma region.\n");
452 goto err_free_sgt;
453 }
454
455 g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
456 g2d_userptr->userptr = userptr;
457
458 list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
459
460 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
461 g2d->current_pool += npages << PAGE_SHIFT;
462 g2d_userptr->in_pool = true;
463 }
464
465 *obj = (unsigned long)g2d_userptr;
466
467 return &g2d_userptr->dma_addr;
468
469err_free_sgt:
470 sg_free_table(sgt);
471 kfree(sgt);
472 sgt = NULL;
473
474err_free_userptr:
475 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
476 g2d_userptr->npages,
477 g2d_userptr->vma);
478
479err_put_vma:
480 exynos_gem_put_vma(g2d_userptr->vma);
481
482err_free_pages:
483 kfree(pages);
484 kfree(g2d_userptr);
485 pages = NULL;
486 g2d_userptr = NULL;
487
488 return ERR_PTR(ret);
489}
490
491static void g2d_userptr_free_all(struct drm_device *drm_dev,
492 struct g2d_data *g2d,
493 struct drm_file *filp)
494{
495 struct drm_exynos_file_private *file_priv = filp->driver_priv;
253 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 496 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
497 struct g2d_cmdlist_userptr *g2d_userptr, *n;
498
499 list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
500 if (g2d_userptr->in_pool)
501 g2d_userptr_put_dma_addr(drm_dev,
502 (unsigned long)g2d_userptr,
503 true);
504
505 g2d->current_pool = 0;
506}
507
508static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
509 struct g2d_cmdlist_node *node,
510 struct drm_device *drm_dev,
511 struct drm_file *file)
512{
254 struct g2d_cmdlist *cmdlist = node->cmdlist; 513 struct g2d_cmdlist *cmdlist = node->cmdlist;
255 dma_addr_t *addr;
256 int offset; 514 int offset;
257 int i; 515 int i;
258 516
259 for (i = 0; i < node->gem_nr; i++) { 517 for (i = 0; i < node->map_nr; i++) {
260 struct g2d_gem_node *gem_node; 518 unsigned long handle;
261 519 dma_addr_t *addr;
262 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
263 if (!gem_node) {
264 dev_err(g2d_priv->dev, "failed to allocate gem node\n");
265 return -ENOMEM;
266 }
267 520
268 offset = cmdlist->last - (i * 2 + 1); 521 offset = cmdlist->last - (i * 2 + 1);
269 gem_node->handle = cmdlist->data[offset]; 522 handle = cmdlist->data[offset];
270 523
271 addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, 524 if (node->obj_type[i] == BUF_TYPE_GEM) {
272 file); 525 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
273 if (IS_ERR(addr)) { 526 file);
274 node->gem_nr = i; 527 if (IS_ERR(addr)) {
275 kfree(gem_node); 528 node->map_nr = i;
276 return PTR_ERR(addr); 529 return -EFAULT;
530 }
531 } else {
532 struct drm_exynos_g2d_userptr g2d_userptr;
533
534 if (copy_from_user(&g2d_userptr, (void __user *)handle,
535 sizeof(struct drm_exynos_g2d_userptr))) {
536 node->map_nr = i;
537 return -EFAULT;
538 }
539
540 addr = g2d_userptr_get_dma_addr(drm_dev,
541 g2d_userptr.userptr,
542 g2d_userptr.size,
543 file,
544 &handle);
545 if (IS_ERR(addr)) {
546 node->map_nr = i;
547 return -EFAULT;
548 }
277 } 549 }
278 550
279 cmdlist->data[offset] = *addr; 551 cmdlist->data[offset] = *addr;
280 list_add_tail(&gem_node->list, &g2d_priv->gem_list); 552 node->handles[i] = handle;
281 g2d_priv->gem_nr++;
282 } 553 }
283 554
284 return 0; 555 return 0;
285} 556}
286 557
287static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, 558static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
288 struct drm_file *file, 559 struct g2d_cmdlist_node *node,
289 unsigned int nr) 560 struct drm_file *filp)
290{ 561{
291 struct drm_exynos_file_private *file_priv = file->driver_priv; 562 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
292 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 563 int i;
293 struct g2d_gem_node *node, *n;
294 564
295 list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { 565 for (i = 0; i < node->map_nr; i++) {
296 if (!nr) 566 unsigned long handle = node->handles[i];
297 break;
298 567
299 exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); 568 if (node->obj_type[i] == BUF_TYPE_GEM)
300 list_del_init(&node->list); 569 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
301 kfree(node); 570 filp);
302 nr--; 571 else
572 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
573 false);
574
575 node->handles[i] = 0;
303 } 576 }
577
578 node->map_nr = 0;
304} 579}
305 580
306static void g2d_dma_start(struct g2d_data *g2d, 581static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
337static void g2d_free_runqueue_node(struct g2d_data *g2d, 612static void g2d_free_runqueue_node(struct g2d_data *g2d,
338 struct g2d_runqueue_node *runqueue_node) 613 struct g2d_runqueue_node *runqueue_node)
339{ 614{
615 struct g2d_cmdlist_node *node;
616
340 if (!runqueue_node) 617 if (!runqueue_node)
341 return; 618 return;
342 619
343 mutex_lock(&g2d->cmdlist_mutex); 620 mutex_lock(&g2d->cmdlist_mutex);
621 /*
622 * commands in run_cmdlist have been completed so unmap all gem
623 * objects in each command node so that they are unreferenced.
624 */
625 list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
626 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
344 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); 627 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
345 mutex_unlock(&g2d->cmdlist_mutex); 628 mutex_unlock(&g2d->cmdlist_mutex);
346 629
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
430 return IRQ_HANDLED; 713 return IRQ_HANDLED;
431} 714}
432 715
433static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, 716static int g2d_check_reg_offset(struct device *dev,
717 struct g2d_cmdlist_node *node,
434 int nr, bool for_addr) 718 int nr, bool for_addr)
435{ 719{
720 struct g2d_cmdlist *cmdlist = node->cmdlist;
436 int reg_offset; 721 int reg_offset;
437 int index; 722 int index;
438 int i; 723 int i;
439 724
440 for (i = 0; i < nr; i++) { 725 for (i = 0; i < nr; i++) {
441 index = cmdlist->last - 2 * (i + 1); 726 index = cmdlist->last - 2 * (i + 1);
727
728 if (for_addr) {
729 /* check userptr buffer type. */
730 reg_offset = (cmdlist->data[index] &
731 ~0x7fffffff) >> 31;
732 if (reg_offset) {
733 node->obj_type[i] = BUF_TYPE_USERPTR;
734 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
735 }
736 }
737
442 reg_offset = cmdlist->data[index] & ~0xfffff000; 738 reg_offset = cmdlist->data[index] & ~0xfffff000;
443 739
444 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 740 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
455 case G2D_MSK_BASE_ADDR: 751 case G2D_MSK_BASE_ADDR:
456 if (!for_addr) 752 if (!for_addr)
457 goto err; 753 goto err;
754
755 if (node->obj_type[i] != BUF_TYPE_USERPTR)
756 node->obj_type[i] = BUF_TYPE_GEM;
458 break; 757 break;
459 default: 758 default:
460 if (for_addr) 759 if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
466 return 0; 765 return 0;
467 766
468err: 767err:
469 dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); 768 dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
470 return -EINVAL; 769 return -EINVAL;
471} 770}
472 771
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
566 } 865 }
567 866
568 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ 867 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
569 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; 868 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
570 if (size > G2D_CMDLIST_DATA_NUM) { 869 if (size > G2D_CMDLIST_DATA_NUM) {
571 dev_err(dev, "cmdlist size is too big\n"); 870 dev_err(dev, "cmdlist size is too big\n");
572 ret = -EINVAL; 871 ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
583 } 882 }
584 cmdlist->last += req->cmd_nr * 2; 883 cmdlist->last += req->cmd_nr * 2;
585 884
586 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); 885 ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
587 if (ret < 0) 886 if (ret < 0)
588 goto err_free_event; 887 goto err_free_event;
589 888
590 node->gem_nr = req->cmd_gem_nr; 889 node->map_nr = req->cmd_buf_nr;
591 if (req->cmd_gem_nr) { 890 if (req->cmd_buf_nr) {
592 struct drm_exynos_g2d_cmd *cmd_gem; 891 struct drm_exynos_g2d_cmd *cmd_buf;
593 892
594 cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; 893 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
595 894
596 if (copy_from_user(cmdlist->data + cmdlist->last, 895 if (copy_from_user(cmdlist->data + cmdlist->last,
597 (void __user *)cmd_gem, 896 (void __user *)cmd_buf,
598 sizeof(*cmd_gem) * req->cmd_gem_nr)) { 897 sizeof(*cmd_buf) * req->cmd_buf_nr)) {
599 ret = -EFAULT; 898 ret = -EFAULT;
600 goto err_free_event; 899 goto err_free_event;
601 } 900 }
602 cmdlist->last += req->cmd_gem_nr * 2; 901 cmdlist->last += req->cmd_buf_nr * 2;
603 902
604 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); 903 ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
605 if (ret < 0) 904 if (ret < 0)
606 goto err_free_event; 905 goto err_free_event;
607 906
608 ret = g2d_get_cmdlist_gem(drm_dev, file, node); 907 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
609 if (ret < 0) 908 if (ret < 0)
610 goto err_unmap; 909 goto err_unmap;
611 } 910 }
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
624 return 0; 923 return 0;
625 924
626err_unmap: 925err_unmap:
627 g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); 926 g2d_unmap_cmdlist_gem(g2d, node, file);
628err_free_event: 927err_free_event:
629 if (node->event) { 928 if (node->event) {
630 spin_lock_irqsave(&drm_dev->event_lock, flags); 929 spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
680 979
681 mutex_lock(&g2d->runqueue_mutex); 980 mutex_lock(&g2d->runqueue_mutex);
682 runqueue_node->pid = current->pid; 981 runqueue_node->pid = current->pid;
982 runqueue_node->filp = file;
683 list_add_tail(&runqueue_node->list, &g2d->runqueue); 983 list_add_tail(&runqueue_node->list, &g2d->runqueue);
684 if (!g2d->runqueue_node) 984 if (!g2d->runqueue_node)
685 g2d_exec_runqueue(g2d); 985 g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
696} 996}
697EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); 997EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
698 998
999static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1000{
1001 struct g2d_data *g2d;
1002 int ret;
1003
1004 g2d = dev_get_drvdata(dev);
1005 if (!g2d)
1006 return -EFAULT;
1007
1008 /* allocate dma-aware cmdlist buffer. */
1009 ret = g2d_init_cmdlist(g2d);
1010 if (ret < 0) {
1011 dev_err(dev, "cmdlist init failed\n");
1012 return ret;
1013 }
1014
1015 if (!is_drm_iommu_supported(drm_dev))
1016 return 0;
1017
1018 ret = drm_iommu_attach_device(drm_dev, dev);
1019 if (ret < 0) {
1020 dev_err(dev, "failed to enable iommu.\n");
1021 g2d_fini_cmdlist(g2d);
1022 }
1023
1024 return ret;
1025
1026}
1027
1028static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1029{
1030 if (!is_drm_iommu_supported(drm_dev))
1031 return;
1032
1033 drm_iommu_detach_device(drm_dev, dev);
1034}
1035
699static int g2d_open(struct drm_device *drm_dev, struct device *dev, 1036static int g2d_open(struct drm_device *drm_dev, struct device *dev,
700 struct drm_file *file) 1037 struct drm_file *file)
701{ 1038{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
713 1050
714 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); 1051 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
715 INIT_LIST_HEAD(&g2d_priv->event_list); 1052 INIT_LIST_HEAD(&g2d_priv->event_list);
716 INIT_LIST_HEAD(&g2d_priv->gem_list); 1053 INIT_LIST_HEAD(&g2d_priv->userptr_list);
717 1054
718 return 0; 1055 return 0;
719} 1056}
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
734 return; 1071 return;
735 1072
736 mutex_lock(&g2d->cmdlist_mutex); 1073 mutex_lock(&g2d->cmdlist_mutex);
737 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) 1074 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
1075 /*
1076 * unmap all gem objects not completed.
1077 *
1078 * P.S. if current process was terminated forcely then
1079 * there may be some commands in inuse_cmdlist so unmap
1080 * them.
1081 */
1082 g2d_unmap_cmdlist_gem(g2d, node, file);
738 list_move_tail(&node->list, &g2d->free_cmdlist); 1083 list_move_tail(&node->list, &g2d->free_cmdlist);
1084 }
739 mutex_unlock(&g2d->cmdlist_mutex); 1085 mutex_unlock(&g2d->cmdlist_mutex);
740 1086
741 g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); 1087 /* release all g2d_userptr in pool. */
1088 g2d_userptr_free_all(drm_dev, g2d, file);
742 1089
743 kfree(file_priv->g2d_priv); 1090 kfree(file_priv->g2d_priv);
744} 1091}
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
778 mutex_init(&g2d->cmdlist_mutex); 1125 mutex_init(&g2d->cmdlist_mutex);
779 mutex_init(&g2d->runqueue_mutex); 1126 mutex_init(&g2d->runqueue_mutex);
780 1127
781 ret = g2d_init_cmdlist(g2d); 1128 g2d->gate_clk = devm_clk_get(dev, "fimg2d");
782 if (ret < 0)
783 goto err_destroy_workqueue;
784
785 g2d->gate_clk = clk_get(dev, "fimg2d");
786 if (IS_ERR(g2d->gate_clk)) { 1129 if (IS_ERR(g2d->gate_clk)) {
787 dev_err(dev, "failed to get gate clock\n"); 1130 dev_err(dev, "failed to get gate clock\n");
788 ret = PTR_ERR(g2d->gate_clk); 1131 ret = PTR_ERR(g2d->gate_clk);
789 goto err_fini_cmdlist; 1132 goto err_destroy_workqueue;
790 } 1133 }
791 1134
792 pm_runtime_enable(dev); 1135 pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
814 goto err_put_clk; 1157 goto err_put_clk;
815 } 1158 }
816 1159
1160 g2d->max_pool = MAX_POOL;
1161
817 platform_set_drvdata(pdev, g2d); 1162 platform_set_drvdata(pdev, g2d);
818 1163
819 subdrv = &g2d->subdrv; 1164 subdrv = &g2d->subdrv;
820 subdrv->dev = dev; 1165 subdrv->dev = dev;
1166 subdrv->probe = g2d_subdrv_probe;
1167 subdrv->remove = g2d_subdrv_remove;
821 subdrv->open = g2d_open; 1168 subdrv->open = g2d_open;
822 subdrv->close = g2d_close; 1169 subdrv->close = g2d_close;
823 1170
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
834 1181
835err_put_clk: 1182err_put_clk:
836 pm_runtime_disable(dev); 1183 pm_runtime_disable(dev);
837 clk_put(g2d->gate_clk);
838err_fini_cmdlist:
839 g2d_fini_cmdlist(g2d);
840err_destroy_workqueue: 1184err_destroy_workqueue:
841 destroy_workqueue(g2d->g2d_workq); 1185 destroy_workqueue(g2d->g2d_workq);
842err_destroy_slab: 1186err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
857 } 1201 }
858 1202
859 pm_runtime_disable(&pdev->dev); 1203 pm_runtime_disable(&pdev->dev);
860 clk_put(g2d->gate_clk);
861 1204
862 g2d_fini_cmdlist(g2d); 1205 g2d_fini_cmdlist(g2d);
863 destroy_workqueue(g2d->g2d_workq); 1206 destroy_workqueue(g2d->g2d_workq);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..d48183e7e056 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
83 83
84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
85{ 85{
86 if (!IS_NONCONTIG_BUFFER(flags)) { 86 /* TODO */
87 if (size >= SZ_1M)
88 return roundup(size, SECTION_SIZE);
89 else if (size >= SZ_64K)
90 return roundup(size, SZ_64K);
91 else
92 goto out;
93 }
94out:
95 return roundup(size, PAGE_SIZE);
96}
97
98struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
99 gfp_t gfpmask)
100{
101 struct page *p, **pages;
102 int i, npages;
103
104 npages = obj->size >> PAGE_SHIFT;
105
106 pages = drm_malloc_ab(npages, sizeof(struct page *));
107 if (pages == NULL)
108 return ERR_PTR(-ENOMEM);
109
110 for (i = 0; i < npages; i++) {
111 p = alloc_page(gfpmask);
112 if (IS_ERR(p))
113 goto fail;
114 pages[i] = p;
115 }
116
117 return pages;
118
119fail:
120 while (--i)
121 __free_page(pages[i]);
122
123 drm_free_large(pages);
124 return ERR_CAST(p);
125}
126
127static void exynos_gem_put_pages(struct drm_gem_object *obj,
128 struct page **pages)
129{
130 int npages;
131
132 npages = obj->size >> PAGE_SHIFT;
133
134 while (--npages >= 0)
135 __free_page(pages[npages]);
136 87
137 drm_free_large(pages); 88 return roundup(size, PAGE_SIZE);
138} 89}
139 90
140static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, 91static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
141 struct vm_area_struct *vma, 92 struct vm_area_struct *vma,
142 unsigned long f_vaddr, 93 unsigned long f_vaddr,
143 pgoff_t page_offset) 94 pgoff_t page_offset)
144{ 95{
145 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 96 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
146 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 97 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
98 struct scatterlist *sgl;
147 unsigned long pfn; 99 unsigned long pfn;
100 int i;
148 101
149 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 102 if (!buf->sgt)
150 if (!buf->pages) 103 return -EINTR;
151 return -EINTR;
152
153 pfn = page_to_pfn(buf->pages[page_offset++]);
154 } else
155 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
156
157 return vm_insert_mixed(vma, f_vaddr, pfn);
158}
159 104
160static int exynos_drm_gem_get_pages(struct drm_gem_object *obj) 105 if (page_offset >= (buf->size >> PAGE_SHIFT)) {
161{ 106 DRM_ERROR("invalid page offset\n");
162 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164 struct scatterlist *sgl;
165 struct page **pages;
166 unsigned int npages, i = 0;
167 int ret;
168
169 if (buf->pages) {
170 DRM_DEBUG_KMS("already allocated.\n");
171 return -EINVAL; 107 return -EINVAL;
172 } 108 }
173 109
174 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
175 if (IS_ERR(pages)) {
176 DRM_ERROR("failed to get pages.\n");
177 return PTR_ERR(pages);
178 }
179
180 npages = obj->size >> PAGE_SHIFT;
181 buf->page_size = PAGE_SIZE;
182
183 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
184 if (!buf->sgt) {
185 DRM_ERROR("failed to allocate sg table.\n");
186 ret = -ENOMEM;
187 goto err;
188 }
189
190 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
191 if (ret < 0) {
192 DRM_ERROR("failed to initialize sg table.\n");
193 ret = -EFAULT;
194 goto err1;
195 }
196
197 sgl = buf->sgt->sgl; 110 sgl = buf->sgt->sgl;
198 111 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
199 /* set all pages to sg list. */ 112 if (page_offset < (sgl->length >> PAGE_SHIFT))
200 while (i < npages) { 113 break;
201 sg_set_page(sgl, pages[i], PAGE_SIZE, 0); 114 page_offset -= (sgl->length >> PAGE_SHIFT);
202 sg_dma_address(sgl) = page_to_phys(pages[i]);
203 i++;
204 sgl = sg_next(sgl);
205 } 115 }
206 116
207 /* add some codes for UNCACHED type here. TODO */ 117 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
208
209 buf->pages = pages;
210 return ret;
211err1:
212 kfree(buf->sgt);
213 buf->sgt = NULL;
214err:
215 exynos_gem_put_pages(obj, pages);
216 return ret;
217
218}
219
220static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
221{
222 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
224
225 /*
226 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
227 * allocated at gem fault handler.
228 */
229 sg_free_table(buf->sgt);
230 kfree(buf->sgt);
231 buf->sgt = NULL;
232
233 exynos_gem_put_pages(obj, buf->pages);
234 buf->pages = NULL;
235 118
236 /* add some codes for UNCACHED type here. TODO */ 119 return vm_insert_mixed(vma, f_vaddr, pfn);
237} 120}
238 121
239static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 122static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
270 153
271 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 154 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
272 155
273 if (!buf->pages)
274 return;
275
276 /* 156 /*
277 * do not release memory region from exporter. 157 * do not release memory region from exporter.
278 * 158 *
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
282 if (obj->import_attach) 162 if (obj->import_attach)
283 goto out; 163 goto out;
284 164
285 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) 165 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
286 exynos_drm_gem_put_pages(obj);
287 else
288 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
289 166
290out: 167out:
291 exynos_drm_fini_buf(obj->dev, buf); 168 exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
364 /* set memory type and cache attribute from user side. */ 241 /* set memory type and cache attribute from user side. */
365 exynos_gem_obj->flags = flags; 242 exynos_gem_obj->flags = flags;
366 243
367 /* 244 ret = exynos_drm_alloc_buf(dev, buf, flags);
368 * allocate all pages as desired size if user wants to allocate 245 if (ret < 0) {
369 * physically non-continuous memory. 246 drm_gem_object_release(&exynos_gem_obj->base);
370 */ 247 goto err_fini_buf;
371 if (flags & EXYNOS_BO_NONCONTIG) {
372 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
373 if (ret < 0) {
374 drm_gem_object_release(&exynos_gem_obj->base);
375 goto err_fini_buf;
376 }
377 } else {
378 ret = exynos_drm_alloc_buf(dev, buf, flags);
379 if (ret < 0) {
380 drm_gem_object_release(&exynos_gem_obj->base);
381 goto err_fini_buf;
382 }
383 } 248 }
384 249
385 return exynos_gem_obj; 250 return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
412 return 0; 277 return 0;
413} 278}
414 279
415void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 280dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
416 unsigned int gem_handle, 281 unsigned int gem_handle,
417 struct drm_file *file_priv) 282 struct drm_file *filp)
418{ 283{
419 struct exynos_drm_gem_obj *exynos_gem_obj; 284 struct exynos_drm_gem_obj *exynos_gem_obj;
420 struct drm_gem_object *obj; 285 struct drm_gem_object *obj;
421 286
422 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 287 obj = drm_gem_object_lookup(dev, filp, gem_handle);
423 if (!obj) { 288 if (!obj) {
424 DRM_ERROR("failed to lookup gem object.\n"); 289 DRM_ERROR("failed to lookup gem object.\n");
425 return ERR_PTR(-EINVAL); 290 return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
427 292
428 exynos_gem_obj = to_exynos_gem_obj(obj); 293 exynos_gem_obj = to_exynos_gem_obj(obj);
429 294
430 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
431 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
432 drm_gem_object_unreference_unlocked(obj);
433
434 /* TODO */
435 return ERR_PTR(-EINVAL);
436 }
437
438 return &exynos_gem_obj->buffer->dma_addr; 295 return &exynos_gem_obj->buffer->dma_addr;
439} 296}
440 297
441void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 298void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
442 unsigned int gem_handle, 299 unsigned int gem_handle,
443 struct drm_file *file_priv) 300 struct drm_file *filp)
444{ 301{
445 struct exynos_drm_gem_obj *exynos_gem_obj; 302 struct exynos_drm_gem_obj *exynos_gem_obj;
446 struct drm_gem_object *obj; 303 struct drm_gem_object *obj;
447 304
448 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 305 obj = drm_gem_object_lookup(dev, filp, gem_handle);
449 if (!obj) { 306 if (!obj) {
450 DRM_ERROR("failed to lookup gem object.\n"); 307 DRM_ERROR("failed to lookup gem object.\n");
451 return; 308 return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
453 310
454 exynos_gem_obj = to_exynos_gem_obj(obj); 311 exynos_gem_obj = to_exynos_gem_obj(obj);
455 312
456 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
457 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
458 drm_gem_object_unreference_unlocked(obj);
459
460 /* TODO */
461 return;
462 }
463
464 drm_gem_object_unreference_unlocked(obj); 313 drm_gem_object_unreference_unlocked(obj);
465 314
466 /* 315 /*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
489 &args->offset); 338 &args->offset);
490} 339}
491 340
341static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
342 struct file *filp)
343{
344 struct drm_file *file_priv;
345
346 mutex_lock(&drm_dev->struct_mutex);
347
348 /* find current process's drm_file from filelist. */
349 list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
350 if (file_priv->filp == filp) {
351 mutex_unlock(&drm_dev->struct_mutex);
352 return file_priv;
353 }
354 }
355
356 mutex_unlock(&drm_dev->struct_mutex);
357 WARN_ON(1);
358
359 return ERR_PTR(-EFAULT);
360}
361
492static int exynos_drm_gem_mmap_buffer(struct file *filp, 362static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 struct vm_area_struct *vma) 363 struct vm_area_struct *vma)
494{ 364{
495 struct drm_gem_object *obj = filp->private_data; 365 struct drm_gem_object *obj = filp->private_data;
496 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 366 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
367 struct drm_device *drm_dev = obj->dev;
497 struct exynos_drm_gem_buf *buffer; 368 struct exynos_drm_gem_buf *buffer;
498 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; 369 struct drm_file *file_priv;
370 unsigned long vm_size;
499 int ret; 371 int ret;
500 372
501 DRM_DEBUG_KMS("%s\n", __FILE__); 373 DRM_DEBUG_KMS("%s\n", __FILE__);
502 374
503 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 375 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
376 vma->vm_private_data = obj;
377 vma->vm_ops = drm_dev->driver->gem_vm_ops;
378
379 /* restore it to driver's fops. */
380 filp->f_op = fops_get(drm_dev->driver->fops);
381
382 file_priv = exynos_drm_find_drm_file(drm_dev, filp);
383 if (IS_ERR(file_priv))
384 return PTR_ERR(file_priv);
385
386 /* restore it to drm_file. */
387 filp->private_data = file_priv;
504 388
505 update_vm_cache_attr(exynos_gem_obj, vma); 389 update_vm_cache_attr(exynos_gem_obj, vma);
506 390
507 vm_size = usize = vma->vm_end - vma->vm_start; 391 vm_size = vma->vm_end - vma->vm_start;
508 392
509 /* 393 /*
510 * a buffer contains information to physically continuous memory 394 * a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
516 if (vm_size > buffer->size) 400 if (vm_size > buffer->size)
517 return -EINVAL; 401 return -EINVAL;
518 402
519 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 403 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
520 int i = 0; 404 buffer->dma_addr, buffer->size,
521 405 &buffer->dma_attrs);
522 if (!buffer->pages) 406 if (ret < 0) {
523 return -EINVAL; 407 DRM_ERROR("failed to mmap.\n");
408 return ret;
409 }
524 410
525 vma->vm_flags |= VM_MIXEDMAP; 411 /*
412 * take a reference to this mapping of the object. And this reference
413 * is unreferenced by the corresponding vm_close call.
414 */
415 drm_gem_object_reference(obj);
526 416
527 do { 417 mutex_lock(&drm_dev->struct_mutex);
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); 418 drm_vm_open_locked(drm_dev, vma);
529 if (ret) { 419 mutex_unlock(&drm_dev->struct_mutex);
530 DRM_ERROR("failed to remap user space.\n");
531 return ret;
532 }
533
534 uaddr += PAGE_SIZE;
535 usize -= PAGE_SIZE;
536 } while (usize > 0);
537 } else {
538 /*
539 * get page frame number to physical memory to be mapped
540 * to user space.
541 */
542 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
543 PAGE_SHIFT;
544
545 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
546
547 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 vma->vm_page_prot)) {
549 DRM_ERROR("failed to remap pfn range.\n");
550 return -EAGAIN;
551 }
552 }
553 420
554 return 0; 421 return 0;
555} 422}
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
578 return -EINVAL; 445 return -EINVAL;
579 } 446 }
580 447
581 obj->filp->f_op = &exynos_drm_gem_fops; 448 /*
582 obj->filp->private_data = obj; 449 * Set specific mmper's fops. And it will be restored by
450 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
451 * This is used to call specific mapper temporarily.
452 */
453 file_priv->filp->f_op = &exynos_drm_gem_fops;
583 454
584 addr = vm_mmap(obj->filp, 0, args->size, 455 /*
456 * Set gem object to private_data so that specific mmaper
457 * can get the gem object. And it will be restored by
458 * exynos_drm_gem_mmap_buffer to drm_file.
459 */
460 file_priv->filp->private_data = obj;
461
462 addr = vm_mmap(file_priv->filp, 0, args->size,
585 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 463 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
586 464
587 drm_gem_object_unreference_unlocked(obj); 465 drm_gem_object_unreference_unlocked(obj);
588 466
589 if (IS_ERR((void *)addr)) 467 if (IS_ERR((void *)addr)) {
468 file_priv->filp->private_data = file_priv;
590 return PTR_ERR((void *)addr); 469 return PTR_ERR((void *)addr);
470 }
591 471
592 args->mapped = addr; 472 args->mapped = addr;
593 473
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
622 return 0; 502 return 0;
623} 503}
624 504
505struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
506{
507 struct vm_area_struct *vma_copy;
508
509 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
510 if (!vma_copy)
511 return NULL;
512
513 if (vma->vm_ops && vma->vm_ops->open)
514 vma->vm_ops->open(vma);
515
516 if (vma->vm_file)
517 get_file(vma->vm_file);
518
519 memcpy(vma_copy, vma, sizeof(*vma));
520
521 vma_copy->vm_mm = NULL;
522 vma_copy->vm_next = NULL;
523 vma_copy->vm_prev = NULL;
524
525 return vma_copy;
526}
527
528void exynos_gem_put_vma(struct vm_area_struct *vma)
529{
530 if (!vma)
531 return;
532
533 if (vma->vm_ops && vma->vm_ops->close)
534 vma->vm_ops->close(vma);
535
536 if (vma->vm_file)
537 fput(vma->vm_file);
538
539 kfree(vma);
540}
541
542int exynos_gem_get_pages_from_userptr(unsigned long start,
543 unsigned int npages,
544 struct page **pages,
545 struct vm_area_struct *vma)
546{
547 int get_npages;
548
549 /* the memory region mmaped with VM_PFNMAP. */
550 if (vma_is_io(vma)) {
551 unsigned int i;
552
553 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
554 unsigned long pfn;
555 int ret = follow_pfn(vma, start, &pfn);
556 if (ret)
557 return ret;
558
559 pages[i] = pfn_to_page(pfn);
560 }
561
562 if (i != npages) {
563 DRM_ERROR("failed to get user_pages.\n");
564 return -EINVAL;
565 }
566
567 return 0;
568 }
569
570 get_npages = get_user_pages(current, current->mm, start,
571 npages, 1, 1, pages, NULL);
572 get_npages = max(get_npages, 0);
573 if (get_npages != npages) {
574 DRM_ERROR("failed to get user_pages.\n");
575 while (get_npages)
576 put_page(pages[--get_npages]);
577 return -EFAULT;
578 }
579
580 return 0;
581}
582
583void exynos_gem_put_pages_to_userptr(struct page **pages,
584 unsigned int npages,
585 struct vm_area_struct *vma)
586{
587 if (!vma_is_io(vma)) {
588 unsigned int i;
589
590 for (i = 0; i < npages; i++) {
591 set_page_dirty_lock(pages[i]);
592
593 /*
594 * undo the reference we took when populating
595 * the table.
596 */
597 put_page(pages[i]);
598 }
599 }
600}
601
602int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
603 struct sg_table *sgt,
604 enum dma_data_direction dir)
605{
606 int nents;
607
608 mutex_lock(&drm_dev->struct_mutex);
609
610 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
611 if (!nents) {
612 DRM_ERROR("failed to map sgl with dma.\n");
613 mutex_unlock(&drm_dev->struct_mutex);
614 return nents;
615 }
616
617 mutex_unlock(&drm_dev->struct_mutex);
618 return 0;
619}
620
621void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
622 struct sg_table *sgt,
623 enum dma_data_direction dir)
624{
625 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
626}
627
625int exynos_drm_gem_init_object(struct drm_gem_object *obj) 628int exynos_drm_gem_init_object(struct drm_gem_object *obj)
626{ 629{
627 DRM_DEBUG_KMS("%s\n", __FILE__); 630 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
753 756
754 mutex_lock(&dev->struct_mutex); 757 mutex_lock(&dev->struct_mutex);
755 758
756 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 759 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
757 if (ret < 0) 760 if (ret < 0)
758 DRM_ERROR("failed to map pages.\n"); 761 DRM_ERROR("failed to map a buffer with user.\n");
759 762
760 mutex_unlock(&dev->struct_mutex); 763 mutex_unlock(&dev->struct_mutex);
761 764
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..f11f2afd5bfc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,21 +35,27 @@
35 * exynos drm gem buffer structure. 35 * exynos drm gem buffer structure.
36 * 36 *
37 * @kvaddr: kernel virtual address to allocated memory region. 37 * @kvaddr: kernel virtual address to allocated memory region.
38 * *userptr: user space address.
38 * @dma_addr: bus address(accessed by dma) to allocated memory region. 39 * @dma_addr: bus address(accessed by dma) to allocated memory region.
39 * - this address could be physical address without IOMMU and 40 * - this address could be physical address without IOMMU and
40 * device address with IOMMU. 41 * device address with IOMMU.
42 * @write: whether pages will be written to by the caller.
43 * @pages: Array of backing pages.
41 * @sgt: sg table to transfer page data. 44 * @sgt: sg table to transfer page data.
42 * @pages: contain all pages to allocated memory region.
43 * @page_size: could be 4K, 64K or 1MB.
44 * @size: size of allocated memory region. 45 * @size: size of allocated memory region.
46 * @pfnmap: indicate whether memory region from userptr is mmaped with
47 * VM_PFNMAP or not.
45 */ 48 */
46struct exynos_drm_gem_buf { 49struct exynos_drm_gem_buf {
47 void __iomem *kvaddr; 50 void __iomem *kvaddr;
51 unsigned long userptr;
48 dma_addr_t dma_addr; 52 dma_addr_t dma_addr;
49 struct sg_table *sgt; 53 struct dma_attrs dma_attrs;
54 unsigned int write;
50 struct page **pages; 55 struct page **pages;
51 unsigned long page_size; 56 struct sg_table *sgt;
52 unsigned long size; 57 unsigned long size;
58 bool pfnmap;
53}; 59};
54 60
55/* 61/*
@@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
65 * or at framebuffer creation. 71 * or at framebuffer creation.
66 * @size: size requested from user, in bytes and this size is aligned 72 * @size: size requested from user, in bytes and this size is aligned
67 * in page unit. 73 * in page unit.
74 * @vma: a pointer to vm_area.
68 * @flags: indicate memory type to allocated buffer and cache attruibute. 75 * @flags: indicate memory type to allocated buffer and cache attruibute.
69 * 76 *
70 * P.S. this object would be transfered to user as kms_bo.handle so 77 * P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
74 struct drm_gem_object base; 81 struct drm_gem_object base;
75 struct exynos_drm_gem_buf *buffer; 82 struct exynos_drm_gem_buf *buffer;
76 unsigned long size; 83 unsigned long size;
84 struct vm_area_struct *vma;
77 unsigned int flags; 85 unsigned int flags;
78}; 86};
79 87
@@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
104 * other drivers such as 2d/3d acceleration drivers. 112 * other drivers such as 2d/3d acceleration drivers.
105 * with this function call, gem object reference count would be increased. 113 * with this function call, gem object reference count would be increased.
106 */ 114 */
107void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 115dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
108 unsigned int gem_handle, 116 unsigned int gem_handle,
109 struct drm_file *file_priv); 117 struct drm_file *filp);
110 118
111/* 119/*
112 * put dma address from gem handle and this function could be used for 120 * put dma address from gem handle and this function could be used for
@@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
115 */ 123 */
116void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 124void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
117 unsigned int gem_handle, 125 unsigned int gem_handle,
118 struct drm_file *file_priv); 126 struct drm_file *filp);
119 127
120/* get buffer offset to map to user space. */ 128/* get buffer offset to map to user space. */
121int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 129int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
128int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 136int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
129 struct drm_file *file_priv); 137 struct drm_file *file_priv);
130 138
139/* map user space allocated by malloc to pages. */
140int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
141 struct drm_file *file_priv);
142
131/* get buffer information to memory region allocated by gem. */ 143/* get buffer information to memory region allocated by gem. */
132int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 144int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
133 struct drm_file *file_priv); 145 struct drm_file *file_priv);
@@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
163/* set vm_flags and we can change the vm attribute to other one at here. */ 175/* set vm_flags and we can change the vm attribute to other one at here. */
164int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 176int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
165 177
178static inline int vma_is_io(struct vm_area_struct *vma)
179{
180 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
181}
182
183/* get a copy of a virtual memory region. */
184struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
185
186/* release a userspace virtual memory area. */
187void exynos_gem_put_vma(struct vm_area_struct *vma);
188
189/* get pages from user space. */
190int exynos_gem_get_pages_from_userptr(unsigned long start,
191 unsigned int npages,
192 struct page **pages,
193 struct vm_area_struct *vma);
194
195/* drop the reference to pages. */
196void exynos_gem_put_pages_to_userptr(struct page **pages,
197 unsigned int npages,
198 struct vm_area_struct *vma);
199
200/* map sgt with dma region. */
201int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
202 struct sg_table *sgt,
203 enum dma_data_direction dir);
204
205/* unmap sgt from dma region. */
206void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
207 struct sg_table *sgt,
208 enum dma_data_direction dir);
209
166#endif 210#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 000000000000..5639353d47b9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1870 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-gsc.h"
24#include "exynos_drm_ipp.h"
25#include "exynos_drm_gsc.h"
26
27/*
28 * GSC is stand for General SCaler and
29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory.
32 * GSC supports image rotation and image effect functions.
33 *
34 * M2M operation : supports crop/scale/rotation/csc so on.
35 * Memory ----> GSC H/W ----> Memory.
36 * Writeback operation : supports cloned screen with FIMD.
37 * FIMD ----> GSC H/W ----> Memory.
38 * Output operation : supports direct display using local path.
39 * Memory ----> GSC H/W ----> FIMD, Mixer.
40 */
41
42/*
43 * TODO
44 * 1. check suspend/resume api if needed.
45 * 2. need to check use case platform_device_id.
46 * 3. check src/dst size with, height.
47 * 4. added check_prepare api for right register.
48 * 5. need to add supported list in prop_list.
49 * 6. check prescaler/scaler optimization.
50 */
51
52#define GSC_MAX_DEVS 4
53#define GSC_MAX_SRC 4
54#define GSC_MAX_DST 16
55#define GSC_RESET_TIMEOUT 50
56#define GSC_BUF_STOP 1
57#define GSC_BUF_START 2
58#define GSC_REG_SZ 16
59#define GSC_WIDTH_ITU_709 1280
60#define GSC_SC_UP_MAX_RATIO 65536
61#define GSC_SC_DOWN_RATIO_7_8 74898
62#define GSC_SC_DOWN_RATIO_6_8 87381
63#define GSC_SC_DOWN_RATIO_5_8 104857
64#define GSC_SC_DOWN_RATIO_4_8 131072
65#define GSC_SC_DOWN_RATIO_3_8 174762
66#define GSC_SC_DOWN_RATIO_2_8 262144
67#define GSC_REFRESH_MIN 12
68#define GSC_REFRESH_MAX 60
69#define GSC_CROP_MAX 8192
70#define GSC_CROP_MIN 32
71#define GSC_SCALE_MAX 4224
72#define GSC_SCALE_MIN 32
73#define GSC_COEF_RATIO 7
74#define GSC_COEF_PHASE 9
75#define GSC_COEF_ATTR 16
76#define GSC_COEF_H_8T 8
77#define GSC_COEF_V_4T 4
78#define GSC_COEF_DEPTH 3
79
80#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
81#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
82 struct gsc_context, ippdrv);
83#define gsc_read(offset) readl(ctx->regs + (offset))
84#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
85
86/*
87 * A structure of scaler.
88 *
89 * @range: narrow, wide.
90 * @pre_shfactor: pre sclaer shift factor.
91 * @pre_hratio: horizontal ratio of the prescaler.
92 * @pre_vratio: vertical ratio of the prescaler.
93 * @main_hratio: the main scaler's horizontal ratio.
94 * @main_vratio: the main scaler's vertical ratio.
95 */
96struct gsc_scaler {
97 bool range;
98 u32 pre_shfactor;
99 u32 pre_hratio;
100 u32 pre_vratio;
101 unsigned long main_hratio;
102 unsigned long main_vratio;
103};
104
105/*
106 * A structure of scaler capability.
107 *
108 * find user manual 49.2 features.
109 * @tile_w: tile mode or rotation width.
110 * @tile_h: tile mode or rotation height.
111 * @w: other cases width.
112 * @h: other cases height.
113 */
114struct gsc_capability {
115 /* tile or rotation */
116 u32 tile_w;
117 u32 tile_h;
118 /* other cases */
119 u32 w;
120 u32 h;
121};
122
123/*
124 * A structure of gsc context.
125 *
126 * @ippdrv: prepare initialization using ippdrv.
127 * @regs_res: register resources.
128 * @regs: memory mapped io registers.
129 * @lock: locking of operations.
130 * @gsc_clk: gsc gate clock.
131 * @sc: scaler infomations.
132 * @id: gsc id.
133 * @irq: irq number.
134 * @rotation: supports rotation of src.
135 * @suspended: qos operations.
136 */
137struct gsc_context {
138 struct exynos_drm_ippdrv ippdrv;
139 struct resource *regs_res;
140 void __iomem *regs;
141 struct mutex lock;
142 struct clk *gsc_clk;
143 struct gsc_scaler sc;
144 int id;
145 int irq;
146 bool rotation;
147 bool suspended;
148};
149
150/* 8-tap Filter Coefficient */
151static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
152 { /* Ratio <= 65536 (~8:8) */
153 { 0, 0, 0, 128, 0, 0, 0, 0 },
154 { -1, 2, -6, 127, 7, -2, 1, 0 },
155 { -1, 4, -12, 125, 16, -5, 1, 0 },
156 { -1, 5, -15, 120, 25, -8, 2, 0 },
157 { -1, 6, -18, 114, 35, -10, 3, -1 },
158 { -1, 6, -20, 107, 46, -13, 4, -1 },
159 { -2, 7, -21, 99, 57, -16, 5, -1 },
160 { -1, 6, -20, 89, 68, -18, 5, -1 },
161 { -1, 6, -20, 79, 79, -20, 6, -1 },
162 { -1, 5, -18, 68, 89, -20, 6, -1 },
163 { -1, 5, -16, 57, 99, -21, 7, -2 },
164 { -1, 4, -13, 46, 107, -20, 6, -1 },
165 { -1, 3, -10, 35, 114, -18, 6, -1 },
166 { 0, 2, -8, 25, 120, -15, 5, -1 },
167 { 0, 1, -5, 16, 125, -12, 4, -1 },
168 { 0, 1, -2, 7, 127, -6, 2, -1 }
169 }, { /* 65536 < Ratio <= 74898 (~8:7) */
170 { 3, -8, 14, 111, 13, -8, 3, 0 },
171 { 2, -6, 7, 112, 21, -10, 3, -1 },
172 { 2, -4, 1, 110, 28, -12, 4, -1 },
173 { 1, -2, -3, 106, 36, -13, 4, -1 },
174 { 1, -1, -7, 103, 44, -15, 4, -1 },
175 { 1, 1, -11, 97, 53, -16, 4, -1 },
176 { 0, 2, -13, 91, 61, -16, 4, -1 },
177 { 0, 3, -15, 85, 69, -17, 4, -1 },
178 { 0, 3, -16, 77, 77, -16, 3, 0 },
179 { -1, 4, -17, 69, 85, -15, 3, 0 },
180 { -1, 4, -16, 61, 91, -13, 2, 0 },
181 { -1, 4, -16, 53, 97, -11, 1, 1 },
182 { -1, 4, -15, 44, 103, -7, -1, 1 },
183 { -1, 4, -13, 36, 106, -3, -2, 1 },
184 { -1, 4, -12, 28, 110, 1, -4, 2 },
185 { -1, 3, -10, 21, 112, 7, -6, 2 }
186 }, { /* 74898 < Ratio <= 87381 (~8:6) */
187 { 2, -11, 25, 96, 25, -11, 2, 0 },
188 { 2, -10, 19, 96, 31, -12, 2, 0 },
189 { 2, -9, 14, 94, 37, -12, 2, 0 },
190 { 2, -8, 10, 92, 43, -12, 1, 0 },
191 { 2, -7, 5, 90, 49, -12, 1, 0 },
192 { 2, -5, 1, 86, 55, -12, 0, 1 },
193 { 2, -4, -2, 82, 61, -11, -1, 1 },
194 { 1, -3, -5, 77, 67, -9, -1, 1 },
195 { 1, -2, -7, 72, 72, -7, -2, 1 },
196 { 1, -1, -9, 67, 77, -5, -3, 1 },
197 { 1, -1, -11, 61, 82, -2, -4, 2 },
198 { 1, 0, -12, 55, 86, 1, -5, 2 },
199 { 0, 1, -12, 49, 90, 5, -7, 2 },
200 { 0, 1, -12, 43, 92, 10, -8, 2 },
201 { 0, 2, -12, 37, 94, 14, -9, 2 },
202 { 0, 2, -12, 31, 96, 19, -10, 2 }
203 }, { /* 87381 < Ratio <= 104857 (~8:5) */
204 { -1, -8, 33, 80, 33, -8, -1, 0 },
205 { -1, -8, 28, 80, 37, -7, -2, 1 },
206 { 0, -8, 24, 79, 41, -7, -2, 1 },
207 { 0, -8, 20, 78, 46, -6, -3, 1 },
208 { 0, -8, 16, 76, 50, -4, -3, 1 },
209 { 0, -7, 13, 74, 54, -3, -4, 1 },
210 { 1, -7, 10, 71, 58, -1, -5, 1 },
211 { 1, -6, 6, 68, 62, 1, -5, 1 },
212 { 1, -6, 4, 65, 65, 4, -6, 1 },
213 { 1, -5, 1, 62, 68, 6, -6, 1 },
214 { 1, -5, -1, 58, 71, 10, -7, 1 },
215 { 1, -4, -3, 54, 74, 13, -7, 0 },
216 { 1, -3, -4, 50, 76, 16, -8, 0 },
217 { 1, -3, -6, 46, 78, 20, -8, 0 },
218 { 1, -2, -7, 41, 79, 24, -8, 0 },
219 { 1, -2, -7, 37, 80, 28, -8, -1 }
220 }, { /* 104857 < Ratio <= 131072 (~8:4) */
221 { -3, 0, 35, 64, 35, 0, -3, 0 },
222 { -3, -1, 32, 64, 38, 1, -3, 0 },
223 { -2, -2, 29, 63, 41, 2, -3, 0 },
224 { -2, -3, 27, 63, 43, 4, -4, 0 },
225 { -2, -3, 24, 61, 46, 6, -4, 0 },
226 { -2, -3, 21, 60, 49, 7, -4, 0 },
227 { -1, -4, 19, 59, 51, 9, -4, -1 },
228 { -1, -4, 16, 57, 53, 12, -4, -1 },
229 { -1, -4, 14, 55, 55, 14, -4, -1 },
230 { -1, -4, 12, 53, 57, 16, -4, -1 },
231 { -1, -4, 9, 51, 59, 19, -4, -1 },
232 { 0, -4, 7, 49, 60, 21, -3, -2 },
233 { 0, -4, 6, 46, 61, 24, -3, -2 },
234 { 0, -4, 4, 43, 63, 27, -3, -2 },
235 { 0, -3, 2, 41, 63, 29, -2, -2 },
236 { 0, -3, 1, 38, 64, 32, -1, -3 }
237 }, { /* 131072 < Ratio <= 174762 (~8:3) */
238 { -1, 8, 33, 48, 33, 8, -1, 0 },
239 { -1, 7, 31, 49, 35, 9, -1, -1 },
240 { -1, 6, 30, 49, 36, 10, -1, -1 },
241 { -1, 5, 28, 48, 38, 12, -1, -1 },
242 { -1, 4, 26, 48, 39, 13, 0, -1 },
243 { -1, 3, 24, 47, 41, 15, 0, -1 },
244 { -1, 2, 23, 47, 42, 16, 0, -1 },
245 { -1, 2, 21, 45, 43, 18, 1, -1 },
246 { -1, 1, 19, 45, 45, 19, 1, -1 },
247 { -1, 1, 18, 43, 45, 21, 2, -1 },
248 { -1, 0, 16, 42, 47, 23, 2, -1 },
249 { -1, 0, 15, 41, 47, 24, 3, -1 },
250 { -1, 0, 13, 39, 48, 26, 4, -1 },
251 { -1, -1, 12, 38, 48, 28, 5, -1 },
252 { -1, -1, 10, 36, 49, 30, 6, -1 },
253 { -1, -1, 9, 35, 49, 31, 7, -1 }
254 }, { /* 174762 < Ratio <= 262144 (~8:2) */
255 { 2, 13, 30, 38, 30, 13, 2, 0 },
256 { 2, 12, 29, 38, 30, 14, 3, 0 },
257 { 2, 11, 28, 38, 31, 15, 3, 0 },
258 { 2, 10, 26, 38, 32, 16, 4, 0 },
259 { 1, 10, 26, 37, 33, 17, 4, 0 },
260 { 1, 9, 24, 37, 34, 18, 5, 0 },
261 { 1, 8, 24, 37, 34, 19, 5, 0 },
262 { 1, 7, 22, 36, 35, 20, 6, 1 },
263 { 1, 6, 21, 36, 36, 21, 6, 1 },
264 { 1, 6, 20, 35, 36, 22, 7, 1 },
265 { 0, 5, 19, 34, 37, 24, 8, 1 },
266 { 0, 5, 18, 34, 37, 24, 9, 1 },
267 { 0, 4, 17, 33, 37, 26, 10, 1 },
268 { 0, 4, 16, 32, 38, 26, 10, 2 },
269 { 0, 3, 15, 31, 38, 28, 11, 2 },
270 { 0, 3, 14, 30, 38, 29, 12, 2 }
271 }
272};
273
274/* 4-tap Filter Coefficient */
275static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
276 { /* Ratio <= 65536 (~8:8) */
277 { 0, 128, 0, 0 },
278 { -4, 127, 5, 0 },
279 { -6, 124, 11, -1 },
280 { -8, 118, 19, -1 },
281 { -8, 111, 27, -2 },
282 { -8, 102, 37, -3 },
283 { -8, 92, 48, -4 },
284 { -7, 81, 59, -5 },
285 { -6, 70, 70, -6 },
286 { -5, 59, 81, -7 },
287 { -4, 48, 92, -8 },
288 { -3, 37, 102, -8 },
289 { -2, 27, 111, -8 },
290 { -1, 19, 118, -8 },
291 { -1, 11, 124, -6 },
292 { 0, 5, 127, -4 }
293 }, { /* 65536 < Ratio <= 74898 (~8:7) */
294 { 8, 112, 8, 0 },
295 { 4, 111, 14, -1 },
296 { 1, 109, 20, -2 },
297 { -2, 105, 27, -2 },
298 { -3, 100, 34, -3 },
299 { -5, 93, 43, -3 },
300 { -5, 86, 51, -4 },
301 { -5, 77, 60, -4 },
302 { -5, 69, 69, -5 },
303 { -4, 60, 77, -5 },
304 { -4, 51, 86, -5 },
305 { -3, 43, 93, -5 },
306 { -3, 34, 100, -3 },
307 { -2, 27, 105, -2 },
308 { -2, 20, 109, 1 },
309 { -1, 14, 111, 4 }
310 }, { /* 74898 < Ratio <= 87381 (~8:6) */
311 { 16, 96, 16, 0 },
312 { 12, 97, 21, -2 },
313 { 8, 96, 26, -2 },
314 { 5, 93, 32, -2 },
315 { 2, 89, 39, -2 },
316 { 0, 84, 46, -2 },
317 { -1, 79, 53, -3 },
318 { -2, 73, 59, -2 },
319 { -2, 66, 66, -2 },
320 { -2, 59, 73, -2 },
321 { -3, 53, 79, -1 },
322 { -2, 46, 84, 0 },
323 { -2, 39, 89, 2 },
324 { -2, 32, 93, 5 },
325 { -2, 26, 96, 8 },
326 { -2, 21, 97, 12 }
327 }, { /* 87381 < Ratio <= 104857 (~8:5) */
328 { 22, 84, 22, 0 },
329 { 18, 85, 26, -1 },
330 { 14, 84, 31, -1 },
331 { 11, 82, 36, -1 },
332 { 8, 79, 42, -1 },
333 { 6, 76, 47, -1 },
334 { 4, 72, 52, 0 },
335 { 2, 68, 58, 0 },
336 { 1, 63, 63, 1 },
337 { 0, 58, 68, 2 },
338 { 0, 52, 72, 4 },
339 { -1, 47, 76, 6 },
340 { -1, 42, 79, 8 },
341 { -1, 36, 82, 11 },
342 { -1, 31, 84, 14 },
343 { -1, 26, 85, 18 }
344 }, { /* 104857 < Ratio <= 131072 (~8:4) */
345 { 26, 76, 26, 0 },
346 { 22, 76, 30, 0 },
347 { 19, 75, 34, 0 },
348 { 16, 73, 38, 1 },
349 { 13, 71, 43, 1 },
350 { 10, 69, 47, 2 },
351 { 8, 66, 51, 3 },
352 { 6, 63, 55, 4 },
353 { 5, 59, 59, 5 },
354 { 4, 55, 63, 6 },
355 { 3, 51, 66, 8 },
356 { 2, 47, 69, 10 },
357 { 1, 43, 71, 13 },
358 { 1, 38, 73, 16 },
359 { 0, 34, 75, 19 },
360 { 0, 30, 76, 22 }
361 }, { /* 131072 < Ratio <= 174762 (~8:3) */
362 { 29, 70, 29, 0 },
363 { 26, 68, 32, 2 },
364 { 23, 67, 36, 2 },
365 { 20, 66, 39, 3 },
366 { 17, 65, 43, 3 },
367 { 15, 63, 46, 4 },
368 { 12, 61, 50, 5 },
369 { 10, 58, 53, 7 },
370 { 8, 56, 56, 8 },
371 { 7, 53, 58, 10 },
372 { 5, 50, 61, 12 },
373 { 4, 46, 63, 15 },
374 { 3, 43, 65, 17 },
375 { 3, 39, 66, 20 },
376 { 2, 36, 67, 23 },
377 { 2, 32, 68, 26 }
378 }, { /* 174762 < Ratio <= 262144 (~8:2) */
379 { 32, 64, 32, 0 },
380 { 28, 63, 34, 3 },
381 { 25, 62, 37, 4 },
382 { 22, 62, 40, 4 },
383 { 19, 61, 43, 5 },
384 { 17, 59, 46, 6 },
385 { 15, 58, 48, 7 },
386 { 13, 55, 51, 9 },
387 { 11, 53, 53, 11 },
388 { 9, 51, 55, 13 },
389 { 7, 48, 58, 15 },
390 { 6, 46, 59, 17 },
391 { 5, 43, 61, 19 },
392 { 4, 40, 62, 22 },
393 { 4, 37, 62, 25 },
394 { 3, 34, 63, 28 }
395 }
396};
397
398static int gsc_sw_reset(struct gsc_context *ctx)
399{
400 u32 cfg;
401 int count = GSC_RESET_TIMEOUT;
402
403 DRM_DEBUG_KMS("%s\n", __func__);
404
405 /* s/w reset */
406 cfg = (GSC_SW_RESET_SRESET);
407 gsc_write(cfg, GSC_SW_RESET);
408
409 /* wait s/w reset complete */
410 while (count--) {
411 cfg = gsc_read(GSC_SW_RESET);
412 if (!cfg)
413 break;
414 usleep_range(1000, 2000);
415 }
416
417 if (cfg) {
418 DRM_ERROR("failed to reset gsc h/w.\n");
419 return -EBUSY;
420 }
421
422 /* reset sequence */
423 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
424 cfg |= (GSC_IN_BASE_ADDR_MASK |
425 GSC_IN_BASE_ADDR_PINGPONG(0));
426 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
427 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
428 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
429
430 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
431 cfg |= (GSC_OUT_BASE_ADDR_MASK |
432 GSC_OUT_BASE_ADDR_PINGPONG(0));
433 gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
434 gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
435 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
436
437 return 0;
438}
439
440static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
441{
442 u32 gscblk_cfg;
443
444 DRM_DEBUG_KMS("%s\n", __func__);
445
446 gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
447
448 if (enable)
449 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
450 GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
451 GSC_BLK_SW_RESET_WB_DEST(ctx->id);
452 else
453 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
454
455 writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
456}
457
458static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
459 bool overflow, bool done)
460{
461 u32 cfg;
462
463 DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
464 enable, overflow, done);
465
466 cfg = gsc_read(GSC_IRQ);
467 cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
468
469 if (enable)
470 cfg |= GSC_IRQ_ENABLE;
471 else
472 cfg &= ~GSC_IRQ_ENABLE;
473
474 if (overflow)
475 cfg &= ~GSC_IRQ_OR_MASK;
476 else
477 cfg |= GSC_IRQ_OR_MASK;
478
479 if (done)
480 cfg &= ~GSC_IRQ_FRMDONE_MASK;
481 else
482 cfg |= GSC_IRQ_FRMDONE_MASK;
483
484 gsc_write(cfg, GSC_IRQ);
485}
486
487
488static int gsc_src_set_fmt(struct device *dev, u32 fmt)
489{
490 struct gsc_context *ctx = get_gsc_context(dev);
491 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
492 u32 cfg;
493
494 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
495
496 cfg = gsc_read(GSC_IN_CON);
497 cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
498 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
499 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
500 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
501
502 switch (fmt) {
503 case DRM_FORMAT_RGB565:
504 cfg |= GSC_IN_RGB565;
505 break;
506 case DRM_FORMAT_XRGB8888:
507 cfg |= GSC_IN_XRGB8888;
508 break;
509 case DRM_FORMAT_BGRX8888:
510 cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
511 break;
512 case DRM_FORMAT_YUYV:
513 cfg |= (GSC_IN_YUV422_1P |
514 GSC_IN_YUV422_1P_ORDER_LSB_Y |
515 GSC_IN_CHROMA_ORDER_CBCR);
516 break;
517 case DRM_FORMAT_YVYU:
518 cfg |= (GSC_IN_YUV422_1P |
519 GSC_IN_YUV422_1P_ORDER_LSB_Y |
520 GSC_IN_CHROMA_ORDER_CRCB);
521 break;
522 case DRM_FORMAT_UYVY:
523 cfg |= (GSC_IN_YUV422_1P |
524 GSC_IN_YUV422_1P_OEDER_LSB_C |
525 GSC_IN_CHROMA_ORDER_CBCR);
526 break;
527 case DRM_FORMAT_VYUY:
528 cfg |= (GSC_IN_YUV422_1P |
529 GSC_IN_YUV422_1P_OEDER_LSB_C |
530 GSC_IN_CHROMA_ORDER_CRCB);
531 break;
532 case DRM_FORMAT_NV21:
533 case DRM_FORMAT_NV61:
534 cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
535 GSC_IN_YUV420_2P);
536 break;
537 case DRM_FORMAT_YUV422:
538 cfg |= GSC_IN_YUV422_3P;
539 break;
540 case DRM_FORMAT_YUV420:
541 case DRM_FORMAT_YVU420:
542 cfg |= GSC_IN_YUV420_3P;
543 break;
544 case DRM_FORMAT_NV12:
545 case DRM_FORMAT_NV16:
546 cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
547 GSC_IN_YUV420_2P);
548 break;
549 case DRM_FORMAT_NV12MT:
550 cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
551 break;
552 default:
553 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
554 return -EINVAL;
555 }
556
557 gsc_write(cfg, GSC_IN_CON);
558
559 return 0;
560}
561
562static int gsc_src_set_transf(struct device *dev,
563 enum drm_exynos_degree degree,
564 enum drm_exynos_flip flip, bool *swap)
565{
566 struct gsc_context *ctx = get_gsc_context(dev);
567 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
568 u32 cfg;
569
570 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
571 degree, flip);
572
573 cfg = gsc_read(GSC_IN_CON);
574 cfg &= ~GSC_IN_ROT_MASK;
575
576 switch (degree) {
577 case EXYNOS_DRM_DEGREE_0:
578 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
579 cfg |= GSC_IN_ROT_XFLIP;
580 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
581 cfg |= GSC_IN_ROT_YFLIP;
582 break;
583 case EXYNOS_DRM_DEGREE_90:
584 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
585 cfg |= GSC_IN_ROT_90_XFLIP;
586 else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
587 cfg |= GSC_IN_ROT_90_YFLIP;
588 else
589 cfg |= GSC_IN_ROT_90;
590 break;
591 case EXYNOS_DRM_DEGREE_180:
592 cfg |= GSC_IN_ROT_180;
593 break;
594 case EXYNOS_DRM_DEGREE_270:
595 cfg |= GSC_IN_ROT_270;
596 break;
597 default:
598 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
599 return -EINVAL;
600 }
601
602 gsc_write(cfg, GSC_IN_CON);
603
604 ctx->rotation = cfg &
605 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
606 *swap = ctx->rotation;
607
608 return 0;
609}
610
611static int gsc_src_set_size(struct device *dev, int swap,
612 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
613{
614 struct gsc_context *ctx = get_gsc_context(dev);
615 struct drm_exynos_pos img_pos = *pos;
616 struct gsc_scaler *sc = &ctx->sc;
617 u32 cfg;
618
619 DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
620 __func__, swap, pos->x, pos->y, pos->w, pos->h);
621
622 if (swap) {
623 img_pos.w = pos->h;
624 img_pos.h = pos->w;
625 }
626
627 /* pixel offset */
628 cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
629 GSC_SRCIMG_OFFSET_Y(img_pos.y));
630 gsc_write(cfg, GSC_SRCIMG_OFFSET);
631
632 /* cropped size */
633 cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
634 GSC_CROPPED_HEIGHT(img_pos.h));
635 gsc_write(cfg, GSC_CROPPED_SIZE);
636
637 DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
638 __func__, sz->hsize, sz->vsize);
639
640 /* original size */
641 cfg = gsc_read(GSC_SRCIMG_SIZE);
642 cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
643 GSC_SRCIMG_WIDTH_MASK);
644
645 cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
646 GSC_SRCIMG_HEIGHT(sz->vsize));
647
648 gsc_write(cfg, GSC_SRCIMG_SIZE);
649
650 cfg = gsc_read(GSC_IN_CON);
651 cfg &= ~GSC_IN_RGB_TYPE_MASK;
652
653 DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
654 __func__, pos->w, sc->range);
655
656 if (pos->w >= GSC_WIDTH_ITU_709)
657 if (sc->range)
658 cfg |= GSC_IN_RGB_HD_WIDE;
659 else
660 cfg |= GSC_IN_RGB_HD_NARROW;
661 else
662 if (sc->range)
663 cfg |= GSC_IN_RGB_SD_WIDE;
664 else
665 cfg |= GSC_IN_RGB_SD_NARROW;
666
667 gsc_write(cfg, GSC_IN_CON);
668
669 return 0;
670}
671
672static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
673 enum drm_exynos_ipp_buf_type buf_type)
674{
675 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
676 bool masked;
677 u32 cfg;
678 u32 mask = 0x00000001 << buf_id;
679
680 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
681 buf_id, buf_type);
682
683 /* mask register set */
684 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
685
686 switch (buf_type) {
687 case IPP_BUF_ENQUEUE:
688 masked = false;
689 break;
690 case IPP_BUF_DEQUEUE:
691 masked = true;
692 break;
693 default:
694 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
695 return -EINVAL;
696 }
697
698 /* sequence id */
699 cfg &= ~mask;
700 cfg |= masked << buf_id;
701 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
702 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
703 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
704
705 return 0;
706}
707
708static int gsc_src_set_addr(struct device *dev,
709 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
710 enum drm_exynos_ipp_buf_type buf_type)
711{
712 struct gsc_context *ctx = get_gsc_context(dev);
713 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
714 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
715 struct drm_exynos_ipp_property *property;
716
717 if (!c_node) {
718 DRM_ERROR("failed to get c_node.\n");
719 return -EFAULT;
720 }
721
722 property = &c_node->property;
723 if (!property) {
724 DRM_ERROR("failed to get property.\n");
725 return -EFAULT;
726 }
727
728 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
729 property->prop_id, buf_id, buf_type);
730
731 if (buf_id > GSC_MAX_SRC) {
732 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
733 return -EINVAL;
734 }
735
736 /* address register set */
737 switch (buf_type) {
738 case IPP_BUF_ENQUEUE:
739 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
740 GSC_IN_BASE_ADDR_Y(buf_id));
741 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
742 GSC_IN_BASE_ADDR_CB(buf_id));
743 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
744 GSC_IN_BASE_ADDR_CR(buf_id));
745 break;
746 case IPP_BUF_DEQUEUE:
747 gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
748 gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
749 gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
750 break;
751 default:
752 /* bypass */
753 break;
754 }
755
756 return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
757}
758
759static struct exynos_drm_ipp_ops gsc_src_ops = {
760 .set_fmt = gsc_src_set_fmt,
761 .set_transf = gsc_src_set_transf,
762 .set_size = gsc_src_set_size,
763 .set_addr = gsc_src_set_addr,
764};
765
766static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
767{
768 struct gsc_context *ctx = get_gsc_context(dev);
769 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
770 u32 cfg;
771
772 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
773
774 cfg = gsc_read(GSC_OUT_CON);
775 cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
776 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
777 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
778 GSC_OUT_GLOBAL_ALPHA_MASK);
779
780 switch (fmt) {
781 case DRM_FORMAT_RGB565:
782 cfg |= GSC_OUT_RGB565;
783 break;
784 case DRM_FORMAT_XRGB8888:
785 cfg |= GSC_OUT_XRGB8888;
786 break;
787 case DRM_FORMAT_BGRX8888:
788 cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
789 break;
790 case DRM_FORMAT_YUYV:
791 cfg |= (GSC_OUT_YUV422_1P |
792 GSC_OUT_YUV422_1P_ORDER_LSB_Y |
793 GSC_OUT_CHROMA_ORDER_CBCR);
794 break;
795 case DRM_FORMAT_YVYU:
796 cfg |= (GSC_OUT_YUV422_1P |
797 GSC_OUT_YUV422_1P_ORDER_LSB_Y |
798 GSC_OUT_CHROMA_ORDER_CRCB);
799 break;
800 case DRM_FORMAT_UYVY:
801 cfg |= (GSC_OUT_YUV422_1P |
802 GSC_OUT_YUV422_1P_OEDER_LSB_C |
803 GSC_OUT_CHROMA_ORDER_CBCR);
804 break;
805 case DRM_FORMAT_VYUY:
806 cfg |= (GSC_OUT_YUV422_1P |
807 GSC_OUT_YUV422_1P_OEDER_LSB_C |
808 GSC_OUT_CHROMA_ORDER_CRCB);
809 break;
810 case DRM_FORMAT_NV21:
811 case DRM_FORMAT_NV61:
812 cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
813 break;
814 case DRM_FORMAT_YUV422:
815 case DRM_FORMAT_YUV420:
816 case DRM_FORMAT_YVU420:
817 cfg |= GSC_OUT_YUV420_3P;
818 break;
819 case DRM_FORMAT_NV12:
820 case DRM_FORMAT_NV16:
821 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
822 GSC_OUT_YUV420_2P);
823 break;
824 case DRM_FORMAT_NV12MT:
825 cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
826 break;
827 default:
828 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
829 return -EINVAL;
830 }
831
832 gsc_write(cfg, GSC_OUT_CON);
833
834 return 0;
835}
836
837static int gsc_dst_set_transf(struct device *dev,
838 enum drm_exynos_degree degree,
839 enum drm_exynos_flip flip, bool *swap)
840{
841 struct gsc_context *ctx = get_gsc_context(dev);
842 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
843 u32 cfg;
844
845 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
846 degree, flip);
847
848 cfg = gsc_read(GSC_IN_CON);
849 cfg &= ~GSC_IN_ROT_MASK;
850
851 switch (degree) {
852 case EXYNOS_DRM_DEGREE_0:
853 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
854 cfg |= GSC_IN_ROT_XFLIP;
855 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
856 cfg |= GSC_IN_ROT_YFLIP;
857 break;
858 case EXYNOS_DRM_DEGREE_90:
859 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
860 cfg |= GSC_IN_ROT_90_XFLIP;
861 else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
862 cfg |= GSC_IN_ROT_90_YFLIP;
863 else
864 cfg |= GSC_IN_ROT_90;
865 break;
866 case EXYNOS_DRM_DEGREE_180:
867 cfg |= GSC_IN_ROT_180;
868 break;
869 case EXYNOS_DRM_DEGREE_270:
870 cfg |= GSC_IN_ROT_270;
871 break;
872 default:
873 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
874 return -EINVAL;
875 }
876
877 gsc_write(cfg, GSC_IN_CON);
878
879 ctx->rotation = cfg &
880 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
881 *swap = ctx->rotation;
882
883 return 0;
884}
885
886static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
887{
888 DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
889
890 if (src >= dst * 8) {
891 DRM_ERROR("failed to make ratio and shift.\n");
892 return -EINVAL;
893 } else if (src >= dst * 4)
894 *ratio = 4;
895 else if (src >= dst * 2)
896 *ratio = 2;
897 else
898 *ratio = 1;
899
900 return 0;
901}
902
903static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
904{
905 if (hratio == 4 && vratio == 4)
906 *shfactor = 4;
907 else if ((hratio == 4 && vratio == 2) ||
908 (hratio == 2 && vratio == 4))
909 *shfactor = 3;
910 else if ((hratio == 4 && vratio == 1) ||
911 (hratio == 1 && vratio == 4) ||
912 (hratio == 2 && vratio == 2))
913 *shfactor = 2;
914 else if (hratio == 1 && vratio == 1)
915 *shfactor = 0;
916 else
917 *shfactor = 1;
918}
919
920static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
921 struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
922{
923 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
924 u32 cfg;
925 u32 src_w, src_h, dst_w, dst_h;
926 int ret = 0;
927
928 src_w = src->w;
929 src_h = src->h;
930
931 if (ctx->rotation) {
932 dst_w = dst->h;
933 dst_h = dst->w;
934 } else {
935 dst_w = dst->w;
936 dst_h = dst->h;
937 }
938
939 ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
940 if (ret) {
941 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
942 return ret;
943 }
944
945 ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
946 if (ret) {
947 dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
948 return ret;
949 }
950
951 DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
952 __func__, sc->pre_hratio, sc->pre_vratio);
953
954 sc->main_hratio = (src_w << 16) / dst_w;
955 sc->main_vratio = (src_h << 16) / dst_h;
956
957 DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
958 __func__, sc->main_hratio, sc->main_vratio);
959
960 gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
961 &sc->pre_shfactor);
962
963 DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
964 sc->pre_shfactor);
965
966 cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
967 GSC_PRESC_H_RATIO(sc->pre_hratio) |
968 GSC_PRESC_V_RATIO(sc->pre_vratio));
969 gsc_write(cfg, GSC_PRE_SCALE_RATIO);
970
971 return ret;
972}
973
974static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
975{
976 int i, j, k, sc_ratio;
977
978 if (main_hratio <= GSC_SC_UP_MAX_RATIO)
979 sc_ratio = 0;
980 else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
981 sc_ratio = 1;
982 else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
983 sc_ratio = 2;
984 else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
985 sc_ratio = 3;
986 else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
987 sc_ratio = 4;
988 else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
989 sc_ratio = 5;
990 else
991 sc_ratio = 6;
992
993 for (i = 0; i < GSC_COEF_PHASE; i++)
994 for (j = 0; j < GSC_COEF_H_8T; j++)
995 for (k = 0; k < GSC_COEF_DEPTH; k++)
996 gsc_write(h_coef_8t[sc_ratio][i][j],
997 GSC_HCOEF(i, j, k));
998}
999
1000static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
1001{
1002 int i, j, k, sc_ratio;
1003
1004 if (main_vratio <= GSC_SC_UP_MAX_RATIO)
1005 sc_ratio = 0;
1006 else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
1007 sc_ratio = 1;
1008 else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
1009 sc_ratio = 2;
1010 else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
1011 sc_ratio = 3;
1012 else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
1013 sc_ratio = 4;
1014 else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
1015 sc_ratio = 5;
1016 else
1017 sc_ratio = 6;
1018
1019 for (i = 0; i < GSC_COEF_PHASE; i++)
1020 for (j = 0; j < GSC_COEF_V_4T; j++)
1021 for (k = 0; k < GSC_COEF_DEPTH; k++)
1022 gsc_write(v_coef_4t[sc_ratio][i][j],
1023 GSC_VCOEF(i, j, k));
1024}
1025
1026static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
1027{
1028 u32 cfg;
1029
1030 DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
1031 __func__, sc->main_hratio, sc->main_vratio);
1032
1033 gsc_set_h_coef(ctx, sc->main_hratio);
1034 cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
1035 gsc_write(cfg, GSC_MAIN_H_RATIO);
1036
1037 gsc_set_v_coef(ctx, sc->main_vratio);
1038 cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
1039 gsc_write(cfg, GSC_MAIN_V_RATIO);
1040}
1041
1042static int gsc_dst_set_size(struct device *dev, int swap,
1043 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1044{
1045 struct gsc_context *ctx = get_gsc_context(dev);
1046 struct drm_exynos_pos img_pos = *pos;
1047 struct gsc_scaler *sc = &ctx->sc;
1048 u32 cfg;
1049
1050 DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
1051 __func__, swap, pos->x, pos->y, pos->w, pos->h);
1052
1053 if (swap) {
1054 img_pos.w = pos->h;
1055 img_pos.h = pos->w;
1056 }
1057
1058 /* pixel offset */
1059 cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
1060 GSC_DSTIMG_OFFSET_Y(pos->y));
1061 gsc_write(cfg, GSC_DSTIMG_OFFSET);
1062
1063 /* scaled size */
1064 cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
1065 gsc_write(cfg, GSC_SCALED_SIZE);
1066
1067 DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
1068 __func__, sz->hsize, sz->vsize);
1069
1070 /* original size */
1071 cfg = gsc_read(GSC_DSTIMG_SIZE);
1072 cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
1073 GSC_DSTIMG_WIDTH_MASK);
1074 cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
1075 GSC_DSTIMG_HEIGHT(sz->vsize));
1076 gsc_write(cfg, GSC_DSTIMG_SIZE);
1077
1078 cfg = gsc_read(GSC_OUT_CON);
1079 cfg &= ~GSC_OUT_RGB_TYPE_MASK;
1080
1081 DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
1082 __func__, pos->w, sc->range);
1083
1084 if (pos->w >= GSC_WIDTH_ITU_709)
1085 if (sc->range)
1086 cfg |= GSC_OUT_RGB_HD_WIDE;
1087 else
1088 cfg |= GSC_OUT_RGB_HD_NARROW;
1089 else
1090 if (sc->range)
1091 cfg |= GSC_OUT_RGB_SD_WIDE;
1092 else
1093 cfg |= GSC_OUT_RGB_SD_NARROW;
1094
1095 gsc_write(cfg, GSC_OUT_CON);
1096
1097 return 0;
1098}
1099
1100static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
1101{
1102 u32 cfg, i, buf_num = GSC_REG_SZ;
1103 u32 mask = 0x00000001;
1104
1105 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1106
1107 for (i = 0; i < GSC_REG_SZ; i++)
1108 if (cfg & (mask << i))
1109 buf_num--;
1110
1111 DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
1112
1113 return buf_num;
1114}
1115
1116static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1117 enum drm_exynos_ipp_buf_type buf_type)
1118{
1119 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1120 bool masked;
1121 u32 cfg;
1122 u32 mask = 0x00000001 << buf_id;
1123 int ret = 0;
1124
1125 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
1126 buf_id, buf_type);
1127
1128 mutex_lock(&ctx->lock);
1129
1130 /* mask register set */
1131 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1132
1133 switch (buf_type) {
1134 case IPP_BUF_ENQUEUE:
1135 masked = false;
1136 break;
1137 case IPP_BUF_DEQUEUE:
1138 masked = true;
1139 break;
1140 default:
1141 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1142 ret = -EINVAL;
1143 goto err_unlock;
1144 }
1145
1146 /* sequence id */
1147 cfg &= ~mask;
1148 cfg |= masked << buf_id;
1149 gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
1150 gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
1151 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
1152
1153 /* interrupt enable */
1154 if (buf_type == IPP_BUF_ENQUEUE &&
1155 gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
1156 gsc_handle_irq(ctx, true, false, true);
1157
1158 /* interrupt disable */
1159 if (buf_type == IPP_BUF_DEQUEUE &&
1160 gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
1161 gsc_handle_irq(ctx, false, false, true);
1162
1163err_unlock:
1164 mutex_unlock(&ctx->lock);
1165 return ret;
1166}
1167
1168static int gsc_dst_set_addr(struct device *dev,
1169 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1170 enum drm_exynos_ipp_buf_type buf_type)
1171{
1172 struct gsc_context *ctx = get_gsc_context(dev);
1173 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1174 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1175 struct drm_exynos_ipp_property *property;
1176
1177 if (!c_node) {
1178 DRM_ERROR("failed to get c_node.\n");
1179 return -EFAULT;
1180 }
1181
1182 property = &c_node->property;
1183 if (!property) {
1184 DRM_ERROR("failed to get property.\n");
1185 return -EFAULT;
1186 }
1187
1188 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1189 property->prop_id, buf_id, buf_type);
1190
1191 if (buf_id > GSC_MAX_DST) {
1192 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1193 return -EINVAL;
1194 }
1195
1196 /* address register set */
1197 switch (buf_type) {
1198 case IPP_BUF_ENQUEUE:
1199 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1200 GSC_OUT_BASE_ADDR_Y(buf_id));
1201 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1202 GSC_OUT_BASE_ADDR_CB(buf_id));
1203 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1204 GSC_OUT_BASE_ADDR_CR(buf_id));
1205 break;
1206 case IPP_BUF_DEQUEUE:
1207 gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
1208 gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
1209 gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
1210 break;
1211 default:
1212 /* bypass */
1213 break;
1214 }
1215
1216 return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
1217}
1218
1219static struct exynos_drm_ipp_ops gsc_dst_ops = {
1220 .set_fmt = gsc_dst_set_fmt,
1221 .set_transf = gsc_dst_set_transf,
1222 .set_size = gsc_dst_set_size,
1223 .set_addr = gsc_dst_set_addr,
1224};
1225
1226static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1227{
1228 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1229
1230 if (enable) {
1231 clk_enable(ctx->gsc_clk);
1232 ctx->suspended = false;
1233 } else {
1234 clk_disable(ctx->gsc_clk);
1235 ctx->suspended = true;
1236 }
1237
1238 return 0;
1239}
1240
1241static int gsc_get_src_buf_index(struct gsc_context *ctx)
1242{
1243 u32 cfg, curr_index, i;
1244 u32 buf_id = GSC_MAX_SRC;
1245 int ret;
1246
1247 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1248
1249 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
1250 curr_index = GSC_IN_CURR_GET_INDEX(cfg);
1251
1252 for (i = curr_index; i < GSC_MAX_SRC; i++) {
1253 if (!((cfg >> i) & 0x1)) {
1254 buf_id = i;
1255 break;
1256 }
1257 }
1258
1259 if (buf_id == GSC_MAX_SRC) {
1260 DRM_ERROR("failed to get in buffer index.\n");
1261 return -EINVAL;
1262 }
1263
1264 ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1265 if (ret < 0) {
1266 DRM_ERROR("failed to dequeue.\n");
1267 return ret;
1268 }
1269
1270 DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1271 curr_index, buf_id);
1272
1273 return buf_id;
1274}
1275
1276static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1277{
1278 u32 cfg, curr_index, i;
1279 u32 buf_id = GSC_MAX_DST;
1280 int ret;
1281
1282 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1283
1284 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1285 curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
1286
1287 for (i = curr_index; i < GSC_MAX_DST; i++) {
1288 if (!((cfg >> i) & 0x1)) {
1289 buf_id = i;
1290 break;
1291 }
1292 }
1293
1294 if (buf_id == GSC_MAX_DST) {
1295 DRM_ERROR("failed to get out buffer index.\n");
1296 return -EINVAL;
1297 }
1298
1299 ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1300 if (ret < 0) {
1301 DRM_ERROR("failed to dequeue.\n");
1302 return ret;
1303 }
1304
1305 DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1306 curr_index, buf_id);
1307
1308 return buf_id;
1309}
1310
1311static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1312{
1313 struct gsc_context *ctx = dev_id;
1314 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1315 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1316 struct drm_exynos_ipp_event_work *event_work =
1317 c_node->event_work;
1318 u32 status;
1319 int buf_id[EXYNOS_DRM_OPS_MAX];
1320
1321 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1322
1323 status = gsc_read(GSC_IRQ);
1324 if (status & GSC_IRQ_STATUS_OR_IRQ) {
1325 dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
1326 ctx->id, status);
1327 return IRQ_NONE;
1328 }
1329
1330 if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
1331 dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
1332 ctx->id, status);
1333
1334 buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
1335 if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
1336 return IRQ_HANDLED;
1337
1338 buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
1339 if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
1340 return IRQ_HANDLED;
1341
1342 DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
1343 buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
1344
1345 event_work->ippdrv = ippdrv;
1346 event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
1347 buf_id[EXYNOS_DRM_OPS_SRC];
1348 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1349 buf_id[EXYNOS_DRM_OPS_DST];
1350 queue_work(ippdrv->event_workq,
1351 (struct work_struct *)event_work);
1352 }
1353
1354 return IRQ_HANDLED;
1355}
1356
1357static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1358{
1359 struct drm_exynos_ipp_prop_list *prop_list;
1360
1361 DRM_DEBUG_KMS("%s\n", __func__);
1362
1363 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1364 if (!prop_list) {
1365 DRM_ERROR("failed to alloc property list.\n");
1366 return -ENOMEM;
1367 }
1368
1369 prop_list->version = 1;
1370 prop_list->writeback = 1;
1371 prop_list->refresh_min = GSC_REFRESH_MIN;
1372 prop_list->refresh_max = GSC_REFRESH_MAX;
1373 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1374 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1375 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1376 (1 << EXYNOS_DRM_DEGREE_90) |
1377 (1 << EXYNOS_DRM_DEGREE_180) |
1378 (1 << EXYNOS_DRM_DEGREE_270);
1379 prop_list->csc = 1;
1380 prop_list->crop = 1;
1381 prop_list->crop_max.hsize = GSC_CROP_MAX;
1382 prop_list->crop_max.vsize = GSC_CROP_MAX;
1383 prop_list->crop_min.hsize = GSC_CROP_MIN;
1384 prop_list->crop_min.vsize = GSC_CROP_MIN;
1385 prop_list->scale = 1;
1386 prop_list->scale_max.hsize = GSC_SCALE_MAX;
1387 prop_list->scale_max.vsize = GSC_SCALE_MAX;
1388 prop_list->scale_min.hsize = GSC_SCALE_MIN;
1389 prop_list->scale_min.vsize = GSC_SCALE_MIN;
1390
1391 ippdrv->prop_list = prop_list;
1392
1393 return 0;
1394}
1395
1396static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1397{
1398 switch (flip) {
1399 case EXYNOS_DRM_FLIP_NONE:
1400 case EXYNOS_DRM_FLIP_VERTICAL:
1401 case EXYNOS_DRM_FLIP_HORIZONTAL:
1402 case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
1403 return true;
1404 default:
1405 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
1406 return false;
1407 }
1408}
1409
1410static int gsc_ippdrv_check_property(struct device *dev,
1411 struct drm_exynos_ipp_property *property)
1412{
1413 struct gsc_context *ctx = get_gsc_context(dev);
1414 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1415 struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
1416 struct drm_exynos_ipp_config *config;
1417 struct drm_exynos_pos *pos;
1418 struct drm_exynos_sz *sz;
1419 bool swap;
1420 int i;
1421
1422 DRM_DEBUG_KMS("%s\n", __func__);
1423
1424 for_each_ipp_ops(i) {
1425 if ((i == EXYNOS_DRM_OPS_SRC) &&
1426 (property->cmd == IPP_CMD_WB))
1427 continue;
1428
1429 config = &property->config[i];
1430 pos = &config->pos;
1431 sz = &config->sz;
1432
1433 /* check for flip */
1434 if (!gsc_check_drm_flip(config->flip)) {
1435 DRM_ERROR("invalid flip.\n");
1436 goto err_property;
1437 }
1438
1439 /* check for degree */
1440 switch (config->degree) {
1441 case EXYNOS_DRM_DEGREE_90:
1442 case EXYNOS_DRM_DEGREE_270:
1443 swap = true;
1444 break;
1445 case EXYNOS_DRM_DEGREE_0:
1446 case EXYNOS_DRM_DEGREE_180:
1447 swap = false;
1448 break;
1449 default:
1450 DRM_ERROR("invalid degree.\n");
1451 goto err_property;
1452 }
1453
1454 /* check for buffer bound */
1455 if ((pos->x + pos->w > sz->hsize) ||
1456 (pos->y + pos->h > sz->vsize)) {
1457 DRM_ERROR("out of buf bound.\n");
1458 goto err_property;
1459 }
1460
1461 /* check for crop */
1462 if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1463 if (swap) {
1464 if ((pos->h < pp->crop_min.hsize) ||
1465 (sz->vsize > pp->crop_max.hsize) ||
1466 (pos->w < pp->crop_min.vsize) ||
1467 (sz->hsize > pp->crop_max.vsize)) {
1468 DRM_ERROR("out of crop size.\n");
1469 goto err_property;
1470 }
1471 } else {
1472 if ((pos->w < pp->crop_min.hsize) ||
1473 (sz->hsize > pp->crop_max.hsize) ||
1474 (pos->h < pp->crop_min.vsize) ||
1475 (sz->vsize > pp->crop_max.vsize)) {
1476 DRM_ERROR("out of crop size.\n");
1477 goto err_property;
1478 }
1479 }
1480 }
1481
1482 /* check for scale */
1483 if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1484 if (swap) {
1485 if ((pos->h < pp->scale_min.hsize) ||
1486 (sz->vsize > pp->scale_max.hsize) ||
1487 (pos->w < pp->scale_min.vsize) ||
1488 (sz->hsize > pp->scale_max.vsize)) {
1489 DRM_ERROR("out of scale size.\n");
1490 goto err_property;
1491 }
1492 } else {
1493 if ((pos->w < pp->scale_min.hsize) ||
1494 (sz->hsize > pp->scale_max.hsize) ||
1495 (pos->h < pp->scale_min.vsize) ||
1496 (sz->vsize > pp->scale_max.vsize)) {
1497 DRM_ERROR("out of scale size.\n");
1498 goto err_property;
1499 }
1500 }
1501 }
1502 }
1503
1504 return 0;
1505
1506err_property:
1507 for_each_ipp_ops(i) {
1508 if ((i == EXYNOS_DRM_OPS_SRC) &&
1509 (property->cmd == IPP_CMD_WB))
1510 continue;
1511
1512 config = &property->config[i];
1513 pos = &config->pos;
1514 sz = &config->sz;
1515
1516 DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1517 i ? "dst" : "src", config->flip, config->degree,
1518 pos->x, pos->y, pos->w, pos->h,
1519 sz->hsize, sz->vsize);
1520 }
1521
1522 return -EINVAL;
1523}
1524
1525
1526static int gsc_ippdrv_reset(struct device *dev)
1527{
1528 struct gsc_context *ctx = get_gsc_context(dev);
1529 struct gsc_scaler *sc = &ctx->sc;
1530 int ret;
1531
1532 DRM_DEBUG_KMS("%s\n", __func__);
1533
1534 /* reset h/w block */
1535 ret = gsc_sw_reset(ctx);
1536 if (ret < 0) {
1537 dev_err(dev, "failed to reset hardware.\n");
1538 return ret;
1539 }
1540
1541 /* scaler setting */
1542 memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1543 sc->range = true;
1544
1545 return 0;
1546}
1547
1548static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1549{
1550 struct gsc_context *ctx = get_gsc_context(dev);
1551 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1552 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1553 struct drm_exynos_ipp_property *property;
1554 struct drm_exynos_ipp_config *config;
1555 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
1556 struct drm_exynos_ipp_set_wb set_wb;
1557 u32 cfg;
1558 int ret, i;
1559
1560 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1561
1562 if (!c_node) {
1563 DRM_ERROR("failed to get c_node.\n");
1564 return -EINVAL;
1565 }
1566
1567 property = &c_node->property;
1568 if (!property) {
1569 DRM_ERROR("failed to get property.\n");
1570 return -EINVAL;
1571 }
1572
1573 gsc_handle_irq(ctx, true, false, true);
1574
1575 for_each_ipp_ops(i) {
1576 config = &property->config[i];
1577 img_pos[i] = config->pos;
1578 }
1579
1580 switch (cmd) {
1581 case IPP_CMD_M2M:
1582 /* enable one shot */
1583 cfg = gsc_read(GSC_ENABLE);
1584 cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
1585 GSC_ENABLE_CLK_GATE_MODE_MASK);
1586 cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
1587 gsc_write(cfg, GSC_ENABLE);
1588
1589 /* src dma memory */
1590 cfg = gsc_read(GSC_IN_CON);
1591 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1592 cfg |= GSC_IN_PATH_MEMORY;
1593 gsc_write(cfg, GSC_IN_CON);
1594
1595 /* dst dma memory */
1596 cfg = gsc_read(GSC_OUT_CON);
1597 cfg |= GSC_OUT_PATH_MEMORY;
1598 gsc_write(cfg, GSC_OUT_CON);
1599 break;
1600 case IPP_CMD_WB:
1601 set_wb.enable = 1;
1602 set_wb.refresh = property->refresh_rate;
1603 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1604 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1605
1606 /* src local path */
1607 cfg = readl(GSC_IN_CON);
1608 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1609 cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
1610 gsc_write(cfg, GSC_IN_CON);
1611
1612 /* dst dma memory */
1613 cfg = gsc_read(GSC_OUT_CON);
1614 cfg |= GSC_OUT_PATH_MEMORY;
1615 gsc_write(cfg, GSC_OUT_CON);
1616 break;
1617 case IPP_CMD_OUTPUT:
1618 /* src dma memory */
1619 cfg = gsc_read(GSC_IN_CON);
1620 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1621 cfg |= GSC_IN_PATH_MEMORY;
1622 gsc_write(cfg, GSC_IN_CON);
1623
1624 /* dst local path */
1625 cfg = gsc_read(GSC_OUT_CON);
1626 cfg |= GSC_OUT_PATH_MEMORY;
1627 gsc_write(cfg, GSC_OUT_CON);
1628 break;
1629 default:
1630 ret = -EINVAL;
1631 dev_err(dev, "invalid operations.\n");
1632 return ret;
1633 }
1634
1635 ret = gsc_set_prescaler(ctx, &ctx->sc,
1636 &img_pos[EXYNOS_DRM_OPS_SRC],
1637 &img_pos[EXYNOS_DRM_OPS_DST]);
1638 if (ret) {
1639 dev_err(dev, "failed to set precalser.\n");
1640 return ret;
1641 }
1642
1643 gsc_set_scaler(ctx, &ctx->sc);
1644
1645 cfg = gsc_read(GSC_ENABLE);
1646 cfg |= GSC_ENABLE_ON;
1647 gsc_write(cfg, GSC_ENABLE);
1648
1649 return 0;
1650}
1651
1652static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1653{
1654 struct gsc_context *ctx = get_gsc_context(dev);
1655 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1656 u32 cfg;
1657
1658 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1659
1660 switch (cmd) {
1661 case IPP_CMD_M2M:
1662 /* bypass */
1663 break;
1664 case IPP_CMD_WB:
1665 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1666 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1667 break;
1668 case IPP_CMD_OUTPUT:
1669 default:
1670 dev_err(dev, "invalid operations.\n");
1671 break;
1672 }
1673
1674 gsc_handle_irq(ctx, false, false, true);
1675
1676 /* reset sequence */
1677 gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
1678 gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
1679 gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
1680
1681 cfg = gsc_read(GSC_ENABLE);
1682 cfg &= ~GSC_ENABLE_ON;
1683 gsc_write(cfg, GSC_ENABLE);
1684}
1685
1686static int __devinit gsc_probe(struct platform_device *pdev)
1687{
1688 struct device *dev = &pdev->dev;
1689 struct gsc_context *ctx;
1690 struct resource *res;
1691 struct exynos_drm_ippdrv *ippdrv;
1692 int ret;
1693
1694 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1695 if (!ctx)
1696 return -ENOMEM;
1697
1698 /* clock control */
1699 ctx->gsc_clk = clk_get(dev, "gscl");
1700 if (IS_ERR(ctx->gsc_clk)) {
1701 dev_err(dev, "failed to get gsc clock.\n");
1702 ret = PTR_ERR(ctx->gsc_clk);
1703 goto err_ctx;
1704 }
1705
1706 /* resource memory */
1707 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1708 if (!ctx->regs_res) {
1709 dev_err(dev, "failed to find registers.\n");
1710 ret = -ENOENT;
1711 goto err_clk;
1712 }
1713
1714 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1715 if (!ctx->regs) {
1716 dev_err(dev, "failed to map registers.\n");
1717 ret = -ENXIO;
1718 goto err_clk;
1719 }
1720
1721 /* resource irq */
1722 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1723 if (!res) {
1724 dev_err(dev, "failed to request irq resource.\n");
1725 ret = -ENOENT;
1726 goto err_get_regs;
1727 }
1728
1729 ctx->irq = res->start;
1730 ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
1731 IRQF_ONESHOT, "drm_gsc", ctx);
1732 if (ret < 0) {
1733 dev_err(dev, "failed to request irq.\n");
1734 goto err_get_regs;
1735 }
1736
1737 /* context initailization */
1738 ctx->id = pdev->id;
1739
1740 ippdrv = &ctx->ippdrv;
1741 ippdrv->dev = dev;
1742 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
1743 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
1744 ippdrv->check_property = gsc_ippdrv_check_property;
1745 ippdrv->reset = gsc_ippdrv_reset;
1746 ippdrv->start = gsc_ippdrv_start;
1747 ippdrv->stop = gsc_ippdrv_stop;
1748 ret = gsc_init_prop_list(ippdrv);
1749 if (ret < 0) {
1750 dev_err(dev, "failed to init property list.\n");
1751 goto err_get_irq;
1752 }
1753
1754 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
1755 (int)ippdrv);
1756
1757 mutex_init(&ctx->lock);
1758 platform_set_drvdata(pdev, ctx);
1759
1760 pm_runtime_set_active(dev);
1761 pm_runtime_enable(dev);
1762
1763 ret = exynos_drm_ippdrv_register(ippdrv);
1764 if (ret < 0) {
1765 dev_err(dev, "failed to register drm gsc device.\n");
1766 goto err_ippdrv_register;
1767 }
1768
1769 dev_info(&pdev->dev, "drm gsc registered successfully.\n");
1770
1771 return 0;
1772
1773err_ippdrv_register:
1774 devm_kfree(dev, ippdrv->prop_list);
1775 pm_runtime_disable(dev);
1776err_get_irq:
1777 free_irq(ctx->irq, ctx);
1778err_get_regs:
1779 devm_iounmap(dev, ctx->regs);
1780err_clk:
1781 clk_put(ctx->gsc_clk);
1782err_ctx:
1783 devm_kfree(dev, ctx);
1784 return ret;
1785}
1786
1787static int __devexit gsc_remove(struct platform_device *pdev)
1788{
1789 struct device *dev = &pdev->dev;
1790 struct gsc_context *ctx = get_gsc_context(dev);
1791 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1792
1793 devm_kfree(dev, ippdrv->prop_list);
1794 exynos_drm_ippdrv_unregister(ippdrv);
1795 mutex_destroy(&ctx->lock);
1796
1797 pm_runtime_set_suspended(dev);
1798 pm_runtime_disable(dev);
1799
1800 free_irq(ctx->irq, ctx);
1801 devm_iounmap(dev, ctx->regs);
1802
1803 clk_put(ctx->gsc_clk);
1804
1805 devm_kfree(dev, ctx);
1806
1807 return 0;
1808}
1809
1810#ifdef CONFIG_PM_SLEEP
1811static int gsc_suspend(struct device *dev)
1812{
1813 struct gsc_context *ctx = get_gsc_context(dev);
1814
1815 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1816
1817 if (pm_runtime_suspended(dev))
1818 return 0;
1819
1820 return gsc_clk_ctrl(ctx, false);
1821}
1822
1823static int gsc_resume(struct device *dev)
1824{
1825 struct gsc_context *ctx = get_gsc_context(dev);
1826
1827 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1828
1829 if (!pm_runtime_suspended(dev))
1830 return gsc_clk_ctrl(ctx, true);
1831
1832 return 0;
1833}
1834#endif
1835
1836#ifdef CONFIG_PM_RUNTIME
1837static int gsc_runtime_suspend(struct device *dev)
1838{
1839 struct gsc_context *ctx = get_gsc_context(dev);
1840
1841 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1842
1843 return gsc_clk_ctrl(ctx, false);
1844}
1845
1846static int gsc_runtime_resume(struct device *dev)
1847{
1848 struct gsc_context *ctx = get_gsc_context(dev);
1849
1850 DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
1851
1852 return gsc_clk_ctrl(ctx, true);
1853}
1854#endif
1855
1856static const struct dev_pm_ops gsc_pm_ops = {
1857 SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
1858 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1859};
1860
1861struct platform_driver gsc_driver = {
1862 .probe = gsc_probe,
1863 .remove = __devexit_p(gsc_remove),
1864 .driver = {
1865 .name = "exynos-drm-gsc",
1866 .owner = THIS_MODULE,
1867 .pm = &gsc_pm_ops,
1868 },
1869};
1870
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 000000000000..b3c3bc618c0f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef _EXYNOS_DRM_GSC_H_
30#define _EXYNOS_DRM_GSC_H_
31
32/*
33 * TODO
34 * FIMD output interface notifier callback.
35 * Mixer output interface notifier callback.
36 */
37
38#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b45185..55793c46e3c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
29#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\ 29#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
30 struct drm_hdmi_context, subdrv); 30 struct drm_hdmi_context, subdrv);
31 31
32/* platform device pointer for common drm hdmi device. */
33static struct platform_device *exynos_drm_hdmi_pdev;
34
32/* Common hdmi subdrv needs to access the hdmi and mixer though context. 35/* Common hdmi subdrv needs to access the hdmi and mixer though context.
33* These should be initialied by the repective drivers */ 36* These should be initialied by the repective drivers */
34static struct exynos_drm_hdmi_context *hdmi_ctx; 37static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
46 bool enabled[MIXER_WIN_NR]; 49 bool enabled[MIXER_WIN_NR];
47}; 50};
48 51
52int exynos_platform_device_hdmi_register(void)
53{
54 if (exynos_drm_hdmi_pdev)
55 return -EEXIST;
56
57 exynos_drm_hdmi_pdev = platform_device_register_simple(
58 "exynos-drm-hdmi", -1, NULL, 0);
59 if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
60 return PTR_ERR(exynos_drm_hdmi_pdev);
61
62 return 0;
63}
64
65void exynos_platform_device_hdmi_unregister(void)
66{
67 if (exynos_drm_hdmi_pdev)
68 platform_device_unregister(exynos_drm_hdmi_pdev);
69}
70
49void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx) 71void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
50{ 72{
51 if (ctx) 73 if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
157 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx); 179 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
158} 180}
159 181
182static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
183{
184 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
185
186 DRM_DEBUG_KMS("%s\n", __FILE__);
187
188 if (mixer_ops && mixer_ops->wait_for_vblank)
189 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
190}
191
160static void drm_hdmi_mode_fixup(struct device *subdrv_dev, 192static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
161 struct drm_connector *connector, 193 struct drm_connector *connector,
162 const struct drm_display_mode *mode, 194 const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
238 .apply = drm_hdmi_apply, 270 .apply = drm_hdmi_apply,
239 .enable_vblank = drm_hdmi_enable_vblank, 271 .enable_vblank = drm_hdmi_enable_vblank,
240 .disable_vblank = drm_hdmi_disable_vblank, 272 .disable_vblank = drm_hdmi_disable_vblank,
273 .wait_for_vblank = drm_hdmi_wait_for_vblank,
241 .mode_fixup = drm_hdmi_mode_fixup, 274 .mode_fixup = drm_hdmi_mode_fixup,
242 .mode_set = drm_hdmi_mode_set, 275 .mode_set = drm_hdmi_mode_set,
243 .get_max_resol = drm_hdmi_get_max_resol, 276 .get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
291 ctx->enabled[win] = false; 324 ctx->enabled[win] = false;
292} 325}
293 326
294static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
295{
296 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
297
298 DRM_DEBUG_KMS("%s\n", __FILE__);
299
300 if (mixer_ops && mixer_ops->wait_for_vblank)
301 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
302}
303
304static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { 327static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
305 .mode_set = drm_mixer_mode_set, 328 .mode_set = drm_mixer_mode_set,
306 .commit = drm_mixer_commit, 329 .commit = drm_mixer_commit,
307 .disable = drm_mixer_disable, 330 .disable = drm_mixer_disable,
308 .wait_for_vblank = drm_mixer_wait_for_vblank,
309}; 331};
310 332
311static struct exynos_drm_manager hdmi_manager = { 333static struct exynos_drm_manager hdmi_manager = {
@@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
346 ctx->hdmi_ctx->drm_dev = drm_dev; 368 ctx->hdmi_ctx->drm_dev = drm_dev;
347 ctx->mixer_ctx->drm_dev = drm_dev; 369 ctx->mixer_ctx->drm_dev = drm_dev;
348 370
371 if (mixer_ops->iommu_on)
372 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
373
349 return 0; 374 return 0;
350} 375}
351 376
377static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
378{
379 struct drm_hdmi_context *ctx;
380 struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
381
382 ctx = get_ctx_from_subdrv(subdrv);
383
384 if (mixer_ops->iommu_on)
385 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
386}
387
352static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) 388static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
353{ 389{
354 struct device *dev = &pdev->dev; 390 struct device *dev = &pdev->dev;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
368 subdrv->dev = dev; 404 subdrv->dev = dev;
369 subdrv->manager = &hdmi_manager; 405 subdrv->manager = &hdmi_manager;
370 subdrv->probe = hdmi_subdrv_probe; 406 subdrv->probe = hdmi_subdrv_probe;
407 subdrv->remove = hdmi_subdrv_remove;
371 408
372 platform_set_drvdata(pdev, subdrv); 409 platform_set_drvdata(pdev, subdrv);
373 410
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a059..fcc3093ec8fe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
62 62
63struct exynos_mixer_ops { 63struct exynos_mixer_ops {
64 /* manager */ 64 /* manager */
65 int (*iommu_on)(void *ctx, bool enable);
65 int (*enable_vblank)(void *ctx, int pipe); 66 int (*enable_vblank)(void *ctx, int pipe);
66 void (*disable_vblank)(void *ctx); 67 void (*disable_vblank)(void *ctx);
68 void (*wait_for_vblank)(void *ctx);
67 void (*dpms)(void *ctx, int mode); 69 void (*dpms)(void *ctx, int mode);
68 70
69 /* overlay */ 71 /* overlay */
70 void (*wait_for_vblank)(void *ctx);
71 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); 72 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
72 void (*win_commit)(void *ctx, int zpos); 73 void (*win_commit)(void *ctx, int zpos);
73 void (*win_disable)(void *ctx, int zpos); 74 void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..2482b7f96341
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,150 @@
1/* exynos_drm_iommu.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drmP.h>
27#include <drm/exynos_drm.h>
28
29#include <linux/dma-mapping.h>
30#include <linux/iommu.h>
31#include <linux/kref.h>
32
33#include <asm/dma-iommu.h>
34
35#include "exynos_drm_drv.h"
36#include "exynos_drm_iommu.h"
37
38/*
39 * drm_create_iommu_mapping - create a mapping structure
40 *
41 * @drm_dev: DRM device
42 */
43int drm_create_iommu_mapping(struct drm_device *drm_dev)
44{
45 struct dma_iommu_mapping *mapping = NULL;
46 struct exynos_drm_private *priv = drm_dev->dev_private;
47 struct device *dev = drm_dev->dev;
48
49 if (!priv->da_start)
50 priv->da_start = EXYNOS_DEV_ADDR_START;
51 if (!priv->da_space_size)
52 priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
53 if (!priv->da_space_order)
54 priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
55
56 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
57 priv->da_space_size,
58 priv->da_space_order);
59 if (IS_ERR(mapping))
60 return PTR_ERR(mapping);
61
62 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
63 GFP_KERNEL);
64 dma_set_max_seg_size(dev, 0xffffffffu);
65 dev->archdata.mapping = mapping;
66
67 return 0;
68}
69
70/*
71 * drm_release_iommu_mapping - release iommu mapping structure
72 *
73 * @drm_dev: DRM device
74 *
75 * if mapping->kref becomes 0 then all things related to iommu mapping
76 * will be released
77 */
78void drm_release_iommu_mapping(struct drm_device *drm_dev)
79{
80 struct device *dev = drm_dev->dev;
81
82 arm_iommu_release_mapping(dev->archdata.mapping);
83}
84
85/*
86 * drm_iommu_attach_device- attach device to iommu mapping
87 *
88 * @drm_dev: DRM device
89 * @subdrv_dev: device to be attach
90 *
91 * This function should be called by sub drivers to attach it to iommu
92 * mapping.
93 */
94int drm_iommu_attach_device(struct drm_device *drm_dev,
95 struct device *subdrv_dev)
96{
97 struct device *dev = drm_dev->dev;
98 int ret;
99
100 if (!dev->archdata.mapping) {
101 DRM_ERROR("iommu_mapping is null.\n");
102 return -EFAULT;
103 }
104
105 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
106 sizeof(*subdrv_dev->dma_parms),
107 GFP_KERNEL);
108 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
109
110 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
111 if (ret < 0) {
112 DRM_DEBUG_KMS("failed iommu attach.\n");
113 return ret;
114 }
115
116 /*
117 * Set dma_ops to drm_device just one time.
118 *
119 * The dma mapping api needs device object and the api is used
120 * to allocate physial memory and map it with iommu table.
121 * If iommu attach succeeded, the sub driver would have dma_ops
122 * for iommu and also all sub drivers have same dma_ops.
123 */
124 if (!dev->archdata.dma_ops)
125 dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
126
127 return 0;
128}
129
130/*
131 * drm_iommu_detach_device -detach device address space mapping from device
132 *
133 * @drm_dev: DRM device
134 * @subdrv_dev: device to be detached
135 *
136 * This function should be called by sub drivers to detach it from iommu
137 * mapping
138 */
139void drm_iommu_detach_device(struct drm_device *drm_dev,
140 struct device *subdrv_dev)
141{
142 struct device *dev = drm_dev->dev;
143 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
144
145 if (!mapping || !mapping->domain)
146 return;
147
148 iommu_detach_device(mapping->domain, subdrv_dev);
149 drm_release_iommu_mapping(drm_dev);
150}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..18a0ca190b98
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,85 @@
1/* exynos_drm_iommu.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef _EXYNOS_DRM_IOMMU_H_
27#define _EXYNOS_DRM_IOMMU_H_
28
29#define EXYNOS_DEV_ADDR_START 0x20000000
30#define EXYNOS_DEV_ADDR_SIZE 0x40000000
31#define EXYNOS_DEV_ADDR_ORDER 0x4
32
33#ifdef CONFIG_DRM_EXYNOS_IOMMU
34
35int drm_create_iommu_mapping(struct drm_device *drm_dev);
36
37void drm_release_iommu_mapping(struct drm_device *drm_dev);
38
39int drm_iommu_attach_device(struct drm_device *drm_dev,
40 struct device *subdrv_dev);
41
42void drm_iommu_detach_device(struct drm_device *dev_dev,
43 struct device *subdrv_dev);
44
45static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
46{
47#ifdef CONFIG_ARM_DMA_USE_IOMMU
48 struct device *dev = drm_dev->dev;
49
50 return dev->archdata.mapping ? true : false;
51#else
52 return false;
53#endif
54}
55
56#else
57
58struct dma_iommu_mapping;
59static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
60{
61 return 0;
62}
63
64static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
65{
66}
67
68static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
69 struct device *subdrv_dev)
70{
71 return 0;
72}
73
74static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
75 struct device *subdrv_dev)
76{
77}
78
79static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
80{
81 return false;
82}
83
84#endif
85#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..49eebe948ed2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2060 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/types.h>
18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
20#include <plat/map-base.h>
21
22#include <drm/drmP.h>
23#include <drm/exynos_drm.h>
24#include "exynos_drm_drv.h"
25#include "exynos_drm_gem.h"
26#include "exynos_drm_ipp.h"
27#include "exynos_drm_iommu.h"
28
29/*
30 * IPP is stand for Image Post Processing and
31 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w
34 */
35
36/*
37 * TODO
38 * 1. expand command control id.
39 * 2. integrate property and config.
40 * 3. removed send_event id check routine.
41 * 4. compare send_event id if needed.
42 * 5. free subdrv_remove notifier callback list if needed.
43 * 6. need to check subdrv_open about multi-open.
44 * 7. need to power_on implement power and sysmmu ctrl.
45 */
46
47#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
49
50/*
51 * A structure of event.
52 *
53 * @base: base of event.
54 * @event: ipp event.
55 */
56struct drm_exynos_ipp_send_event {
57 struct drm_pending_event base;
58 struct drm_exynos_ipp_event event;
59};
60
61/*
62 * A structure of memory node.
63 *
64 * @list: list head to memory queue information.
65 * @ops_id: id of operations.
66 * @prop_id: id of property.
67 * @buf_id: id of buffer.
68 * @buf_info: gem objects and dma address, size.
69 * @filp: a pointer to drm_file.
70 */
71struct drm_exynos_ipp_mem_node {
72 struct list_head list;
73 enum drm_exynos_ops_id ops_id;
74 u32 prop_id;
75 u32 buf_id;
76 struct drm_exynos_ipp_buf_info buf_info;
77 struct drm_file *filp;
78};
79
80/*
81 * A structure of ipp context.
82 *
83 * @subdrv: prepare initialization using subdrv.
84 * @ipp_lock: lock for synchronization of access to ipp_idr.
85 * @prop_lock: lock for synchronization of access to prop_idr.
86 * @ipp_idr: ipp driver idr.
87 * @prop_idr: property idr.
88 * @event_workq: event work queue.
89 * @cmd_workq: command work queue.
90 */
91struct ipp_context {
92 struct exynos_drm_subdrv subdrv;
93 struct mutex ipp_lock;
94 struct mutex prop_lock;
95 struct idr ipp_idr;
96 struct idr prop_idr;
97 struct workqueue_struct *event_workq;
98 struct workqueue_struct *cmd_workq;
99};
100
101static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104
105int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
106{
107 DRM_DEBUG_KMS("%s\n", __func__);
108
109 if (!ippdrv)
110 return -EINVAL;
111
112 mutex_lock(&exynos_drm_ippdrv_lock);
113 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
114 mutex_unlock(&exynos_drm_ippdrv_lock);
115
116 return 0;
117}
118
119int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
120{
121 DRM_DEBUG_KMS("%s\n", __func__);
122
123 if (!ippdrv)
124 return -EINVAL;
125
126 mutex_lock(&exynos_drm_ippdrv_lock);
127 list_del(&ippdrv->drv_list);
128 mutex_unlock(&exynos_drm_ippdrv_lock);
129
130 return 0;
131}
132
133static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
134 u32 *idp)
135{
136 int ret;
137
138 DRM_DEBUG_KMS("%s\n", __func__);
139
140again:
141 /* ensure there is space available to allocate a handle */
142 if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
143 DRM_ERROR("failed to get idr.\n");
144 return -ENOMEM;
145 }
146
147 /* do the allocation under our mutexlock */
148 mutex_lock(lock);
149 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
150 mutex_unlock(lock);
151 if (ret == -EAGAIN)
152 goto again;
153
154 return ret;
155}
156
157static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
158{
159 void *obj;
160
161 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
162
163 mutex_lock(lock);
164
165 /* find object using handle */
166 obj = idr_find(id_idr, id);
167 if (!obj) {
168 DRM_ERROR("failed to find object.\n");
169 mutex_unlock(lock);
170 return ERR_PTR(-ENODEV);
171 }
172
173 mutex_unlock(lock);
174
175 return obj;
176}
177
178static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
179 enum drm_exynos_ipp_cmd cmd)
180{
181 /*
182 * check dedicated flag and WB, OUTPUT operation with
183 * power on state.
184 */
185 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
186 !pm_runtime_suspended(ippdrv->dev)))
187 return true;
188
189 return false;
190}
191
192static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
193 struct drm_exynos_ipp_property *property)
194{
195 struct exynos_drm_ippdrv *ippdrv;
196 u32 ipp_id = property->ipp_id;
197
198 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
199
200 if (ipp_id) {
201 /* find ipp driver using idr */
202 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
203 ipp_id);
204 if (IS_ERR_OR_NULL(ippdrv)) {
205 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
206 return ippdrv;
207 }
208
209 /*
210 * WB, OUTPUT opertion not supported multi-operation.
211 * so, make dedicated state at set property ioctl.
212 * when ipp driver finished operations, clear dedicated flags.
213 */
214 if (ipp_check_dedicated(ippdrv, property->cmd)) {
215 DRM_ERROR("already used choose device.\n");
216 return ERR_PTR(-EBUSY);
217 }
218
219 /*
220 * This is necessary to find correct device in ipp drivers.
221 * ipp drivers have different abilities,
222 * so need to check property.
223 */
224 if (ippdrv->check_property &&
225 ippdrv->check_property(ippdrv->dev, property)) {
226 DRM_ERROR("not support property.\n");
227 return ERR_PTR(-EINVAL);
228 }
229
230 return ippdrv;
231 } else {
232 /*
233 * This case is search all ipp driver for finding.
234 * user application don't set ipp_id in this case,
235 * so ipp subsystem search correct driver in driver list.
236 */
237 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
238 if (ipp_check_dedicated(ippdrv, property->cmd)) {
239 DRM_DEBUG_KMS("%s:used device.\n", __func__);
240 continue;
241 }
242
243 if (ippdrv->check_property &&
244 ippdrv->check_property(ippdrv->dev, property)) {
245 DRM_DEBUG_KMS("%s:not support property.\n",
246 __func__);
247 continue;
248 }
249
250 return ippdrv;
251 }
252
253 DRM_ERROR("not support ipp driver operations.\n");
254 }
255
256 return ERR_PTR(-ENODEV);
257}
258
259static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
260{
261 struct exynos_drm_ippdrv *ippdrv;
262 struct drm_exynos_ipp_cmd_node *c_node;
263 int count = 0;
264
265 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
266
267 if (list_empty(&exynos_drm_ippdrv_list)) {
268 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
269 return ERR_PTR(-ENODEV);
270 }
271
272 /*
273 * This case is search ipp driver by prop_id handle.
274 * sometimes, ipp subsystem find driver by prop_id.
275 * e.g PAUSE state, queue buf, command contro.
276 */
277 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
278 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
279 count++, (int)ippdrv);
280
281 if (!list_empty(&ippdrv->cmd_list)) {
282 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
283 if (c_node->property.prop_id == prop_id)
284 return ippdrv;
285 }
286 }
287
288 return ERR_PTR(-ENODEV);
289}
290
291int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
292 struct drm_file *file)
293{
294 struct drm_exynos_file_private *file_priv = file->driver_priv;
295 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
296 struct device *dev = priv->dev;
297 struct ipp_context *ctx = get_ipp_context(dev);
298 struct drm_exynos_ipp_prop_list *prop_list = data;
299 struct exynos_drm_ippdrv *ippdrv;
300 int count = 0;
301
302 DRM_DEBUG_KMS("%s\n", __func__);
303
304 if (!ctx) {
305 DRM_ERROR("invalid context.\n");
306 return -EINVAL;
307 }
308
309 if (!prop_list) {
310 DRM_ERROR("invalid property parameter.\n");
311 return -EINVAL;
312 }
313
314 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
315
316 if (!prop_list->ipp_id) {
317 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
318 count++;
319 /*
320 * Supports ippdrv list count for user application.
321 * First step user application getting ippdrv count.
322 * and second step getting ippdrv capability using ipp_id.
323 */
324 prop_list->count = count;
325 } else {
326 /*
327 * Getting ippdrv capability by ipp_id.
328 * some deivce not supported wb, output interface.
329 * so, user application detect correct ipp driver
330 * using this ioctl.
331 */
332 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
333 prop_list->ipp_id);
334 if (!ippdrv) {
335 DRM_ERROR("not found ipp%d driver.\n",
336 prop_list->ipp_id);
337 return -EINVAL;
338 }
339
340 prop_list = ippdrv->prop_list;
341 }
342
343 return 0;
344}
345
346static void ipp_print_property(struct drm_exynos_ipp_property *property,
347 int idx)
348{
349 struct drm_exynos_ipp_config *config = &property->config[idx];
350 struct drm_exynos_pos *pos = &config->pos;
351 struct drm_exynos_sz *sz = &config->sz;
352
353 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
354 __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
355
356 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
357 __func__, pos->x, pos->y, pos->w, pos->h,
358 sz->hsize, sz->vsize, config->flip, config->degree);
359}
360
361static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
362{
363 struct exynos_drm_ippdrv *ippdrv;
364 struct drm_exynos_ipp_cmd_node *c_node;
365 u32 prop_id = property->prop_id;
366
367 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
368
369 ippdrv = ipp_find_drv_by_handle(prop_id);
370 if (IS_ERR_OR_NULL(ippdrv)) {
371 DRM_ERROR("failed to get ipp driver.\n");
372 return -EINVAL;
373 }
374
375 /*
376 * Find command node using command list in ippdrv.
377 * when we find this command no using prop_id.
378 * return property information set in this command node.
379 */
380 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
381 if ((c_node->property.prop_id == prop_id) &&
382 (c_node->state == IPP_STATE_STOP)) {
383 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
384 __func__, property->cmd, (int)ippdrv);
385
386 c_node->property = *property;
387 return 0;
388 }
389 }
390
391 DRM_ERROR("failed to search property.\n");
392
393 return -EINVAL;
394}
395
396static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
397{
398 struct drm_exynos_ipp_cmd_work *cmd_work;
399
400 DRM_DEBUG_KMS("%s\n", __func__);
401
402 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
403 if (!cmd_work) {
404 DRM_ERROR("failed to alloc cmd_work.\n");
405 return ERR_PTR(-ENOMEM);
406 }
407
408 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
409
410 return cmd_work;
411}
412
413static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
414{
415 struct drm_exynos_ipp_event_work *event_work;
416
417 DRM_DEBUG_KMS("%s\n", __func__);
418
419 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
420 if (!event_work) {
421 DRM_ERROR("failed to alloc event_work.\n");
422 return ERR_PTR(-ENOMEM);
423 }
424
425 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
426
427 return event_work;
428}
429
430int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
431 struct drm_file *file)
432{
433 struct drm_exynos_file_private *file_priv = file->driver_priv;
434 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
435 struct device *dev = priv->dev;
436 struct ipp_context *ctx = get_ipp_context(dev);
437 struct drm_exynos_ipp_property *property = data;
438 struct exynos_drm_ippdrv *ippdrv;
439 struct drm_exynos_ipp_cmd_node *c_node;
440 int ret, i;
441
442 DRM_DEBUG_KMS("%s\n", __func__);
443
444 if (!ctx) {
445 DRM_ERROR("invalid context.\n");
446 return -EINVAL;
447 }
448
449 if (!property) {
450 DRM_ERROR("invalid property parameter.\n");
451 return -EINVAL;
452 }
453
454 /*
455 * This is log print for user application property.
456 * user application set various property.
457 */
458 for_each_ipp_ops(i)
459 ipp_print_property(property, i);
460
461 /*
462 * set property ioctl generated new prop_id.
463 * but in this case already asigned prop_id using old set property.
464 * e.g PAUSE state. this case supports find current prop_id and use it
465 * instead of allocation.
466 */
467 if (property->prop_id) {
468 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
469 return ipp_find_and_set_property(property);
470 }
471
472 /* find ipp driver using ipp id */
473 ippdrv = ipp_find_driver(ctx, property);
474 if (IS_ERR_OR_NULL(ippdrv)) {
475 DRM_ERROR("failed to get ipp driver.\n");
476 return -EINVAL;
477 }
478
479 /* allocate command node */
480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
481 if (!c_node) {
482 DRM_ERROR("failed to allocate map node.\n");
483 return -ENOMEM;
484 }
485
486 /* create property id */
487 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
488 &property->prop_id);
489 if (ret) {
490 DRM_ERROR("failed to create id.\n");
491 goto err_clear;
492 }
493
494 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
495 __func__, property->prop_id, property->cmd, (int)ippdrv);
496
497 /* stored property information and ippdrv in private data */
498 c_node->priv = priv;
499 c_node->property = *property;
500 c_node->state = IPP_STATE_IDLE;
501
502 c_node->start_work = ipp_create_cmd_work();
503 if (IS_ERR_OR_NULL(c_node->start_work)) {
504 DRM_ERROR("failed to create start work.\n");
505 goto err_clear;
506 }
507
508 c_node->stop_work = ipp_create_cmd_work();
509 if (IS_ERR_OR_NULL(c_node->stop_work)) {
510 DRM_ERROR("failed to create stop work.\n");
511 goto err_free_start;
512 }
513
514 c_node->event_work = ipp_create_event_work();
515 if (IS_ERR_OR_NULL(c_node->event_work)) {
516 DRM_ERROR("failed to create event work.\n");
517 goto err_free_stop;
518 }
519
520 mutex_init(&c_node->cmd_lock);
521 mutex_init(&c_node->mem_lock);
522 mutex_init(&c_node->event_lock);
523
524 init_completion(&c_node->start_complete);
525 init_completion(&c_node->stop_complete);
526
527 for_each_ipp_ops(i)
528 INIT_LIST_HEAD(&c_node->mem_list[i]);
529
530 INIT_LIST_HEAD(&c_node->event_list);
531 list_splice_init(&priv->event_list, &c_node->event_list);
532 list_add_tail(&c_node->list, &ippdrv->cmd_list);
533
534 /* make dedicated state without m2m */
535 if (!ipp_is_m2m_cmd(property->cmd))
536 ippdrv->dedicated = true;
537
538 return 0;
539
540err_free_stop:
541 kfree(c_node->stop_work);
542err_free_start:
543 kfree(c_node->start_work);
544err_clear:
545 kfree(c_node);
546 return ret;
547}
548
549static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
550{
551 DRM_DEBUG_KMS("%s\n", __func__);
552
553 /* delete list */
554 list_del(&c_node->list);
555
556 /* destroy mutex */
557 mutex_destroy(&c_node->cmd_lock);
558 mutex_destroy(&c_node->mem_lock);
559 mutex_destroy(&c_node->event_lock);
560
561 /* free command node */
562 kfree(c_node->start_work);
563 kfree(c_node->stop_work);
564 kfree(c_node->event_work);
565 kfree(c_node);
566}
567
568static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
569{
570 struct drm_exynos_ipp_property *property = &c_node->property;
571 struct drm_exynos_ipp_mem_node *m_node;
572 struct list_head *head;
573 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
574
575 DRM_DEBUG_KMS("%s\n", __func__);
576
577 mutex_lock(&c_node->mem_lock);
578
579 for_each_ipp_ops(i) {
580 /* source/destination memory list */
581 head = &c_node->mem_list[i];
582
583 if (list_empty(head)) {
584 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
585 i ? "dst" : "src");
586 continue;
587 }
588
589 /* find memory node entry */
590 list_for_each_entry(m_node, head, list) {
591 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
592 i ? "dst" : "src", count[i], (int)m_node);
593 count[i]++;
594 }
595 }
596
597 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
598 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
599 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
600
601 /*
602 * M2M operations should be need paired memory address.
603 * so, need to check minimum count about src, dst.
604 * other case not use paired memory, so use maximum count
605 */
606 if (ipp_is_m2m_cmd(property->cmd))
607 ret = min(count[EXYNOS_DRM_OPS_SRC],
608 count[EXYNOS_DRM_OPS_DST]);
609 else
610 ret = max(count[EXYNOS_DRM_OPS_SRC],
611 count[EXYNOS_DRM_OPS_DST]);
612
613 mutex_unlock(&c_node->mem_lock);
614
615 return ret;
616}
617
618static struct drm_exynos_ipp_mem_node
619 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
620 struct drm_exynos_ipp_queue_buf *qbuf)
621{
622 struct drm_exynos_ipp_mem_node *m_node;
623 struct list_head *head;
624 int count = 0;
625
626 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
627
628 /* source/destination memory list */
629 head = &c_node->mem_list[qbuf->ops_id];
630
631 /* find memory node from memory list */
632 list_for_each_entry(m_node, head, list) {
633 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
634 __func__, count++, (int)m_node);
635
636 /* compare buffer id */
637 if (m_node->buf_id == qbuf->buf_id)
638 return m_node;
639 }
640
641 return NULL;
642}
643
644static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
645 struct drm_exynos_ipp_cmd_node *c_node,
646 struct drm_exynos_ipp_mem_node *m_node)
647{
648 struct exynos_drm_ipp_ops *ops = NULL;
649 int ret = 0;
650
651 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
652
653 if (!m_node) {
654 DRM_ERROR("invalid queue node.\n");
655 return -EFAULT;
656 }
657
658 mutex_lock(&c_node->mem_lock);
659
660 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
661
662 /* get operations callback */
663 ops = ippdrv->ops[m_node->ops_id];
664 if (!ops) {
665 DRM_ERROR("not support ops.\n");
666 ret = -EFAULT;
667 goto err_unlock;
668 }
669
670 /* set address and enable irq */
671 if (ops->set_addr) {
672 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
673 m_node->buf_id, IPP_BUF_ENQUEUE);
674 if (ret) {
675 DRM_ERROR("failed to set addr.\n");
676 goto err_unlock;
677 }
678 }
679
680err_unlock:
681 mutex_unlock(&c_node->mem_lock);
682 return ret;
683}
684
685static struct drm_exynos_ipp_mem_node
686 *ipp_get_mem_node(struct drm_device *drm_dev,
687 struct drm_file *file,
688 struct drm_exynos_ipp_cmd_node *c_node,
689 struct drm_exynos_ipp_queue_buf *qbuf)
690{
691 struct drm_exynos_ipp_mem_node *m_node;
692 struct drm_exynos_ipp_buf_info buf_info;
693 void *addr;
694 int i;
695
696 DRM_DEBUG_KMS("%s\n", __func__);
697
698 mutex_lock(&c_node->mem_lock);
699
700 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
701 if (!m_node) {
702 DRM_ERROR("failed to allocate queue node.\n");
703 goto err_unlock;
704 }
705
706 /* clear base address for error handling */
707 memset(&buf_info, 0x0, sizeof(buf_info));
708
709 /* operations, buffer id */
710 m_node->ops_id = qbuf->ops_id;
711 m_node->prop_id = qbuf->prop_id;
712 m_node->buf_id = qbuf->buf_id;
713
714 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
715 (int)m_node, qbuf->ops_id);
716 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
717 qbuf->prop_id, m_node->buf_id);
718
719 for_each_ipp_planar(i) {
720 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
721 i, qbuf->handle[i]);
722
723 /* get dma address by handle */
724 if (qbuf->handle[i]) {
725 addr = exynos_drm_gem_get_dma_addr(drm_dev,
726 qbuf->handle[i], file);
727 if (IS_ERR(addr)) {
728 DRM_ERROR("failed to get addr.\n");
729 goto err_clear;
730 }
731
732 buf_info.handles[i] = qbuf->handle[i];
733 buf_info.base[i] = *(dma_addr_t *) addr;
734 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
735 __func__, i, buf_info.base[i],
736 (int)buf_info.handles[i]);
737 }
738 }
739
740 m_node->filp = file;
741 m_node->buf_info = buf_info;
742 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
743
744 mutex_unlock(&c_node->mem_lock);
745 return m_node;
746
747err_clear:
748 kfree(m_node);
749err_unlock:
750 mutex_unlock(&c_node->mem_lock);
751 return ERR_PTR(-EFAULT);
752}
753
754static int ipp_put_mem_node(struct drm_device *drm_dev,
755 struct drm_exynos_ipp_cmd_node *c_node,
756 struct drm_exynos_ipp_mem_node *m_node)
757{
758 int i;
759
760 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
761
762 if (!m_node) {
763 DRM_ERROR("invalid dequeue node.\n");
764 return -EFAULT;
765 }
766
767 if (list_empty(&m_node->list)) {
768 DRM_ERROR("empty memory node.\n");
769 return -ENOMEM;
770 }
771
772 mutex_lock(&c_node->mem_lock);
773
774 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
775
776 /* put gem buffer */
777 for_each_ipp_planar(i) {
778 unsigned long handle = m_node->buf_info.handles[i];
779 if (handle)
780 exynos_drm_gem_put_dma_addr(drm_dev, handle,
781 m_node->filp);
782 }
783
784 /* delete list in queue */
785 list_del(&m_node->list);
786 kfree(m_node);
787
788 mutex_unlock(&c_node->mem_lock);
789
790 return 0;
791}
792
793static void ipp_free_event(struct drm_pending_event *event)
794{
795 kfree(event);
796}
797
798static int ipp_get_event(struct drm_device *drm_dev,
799 struct drm_file *file,
800 struct drm_exynos_ipp_cmd_node *c_node,
801 struct drm_exynos_ipp_queue_buf *qbuf)
802{
803 struct drm_exynos_ipp_send_event *e;
804 unsigned long flags;
805
806 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
807 qbuf->ops_id, qbuf->buf_id);
808
809 e = kzalloc(sizeof(*e), GFP_KERNEL);
810
811 if (!e) {
812 DRM_ERROR("failed to allocate event.\n");
813 spin_lock_irqsave(&drm_dev->event_lock, flags);
814 file->event_space += sizeof(e->event);
815 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
816 return -ENOMEM;
817 }
818
819 /* make event */
820 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
821 e->event.base.length = sizeof(e->event);
822 e->event.user_data = qbuf->user_data;
823 e->event.prop_id = qbuf->prop_id;
824 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
825 e->base.event = &e->event.base;
826 e->base.file_priv = file;
827 e->base.destroy = ipp_free_event;
828 list_add_tail(&e->base.link, &c_node->event_list);
829
830 return 0;
831}
832
833static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
834 struct drm_exynos_ipp_queue_buf *qbuf)
835{
836 struct drm_exynos_ipp_send_event *e, *te;
837 int count = 0;
838
839 DRM_DEBUG_KMS("%s\n", __func__);
840
841 if (list_empty(&c_node->event_list)) {
842 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
843 return;
844 }
845
846 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
847 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
848 __func__, count++, (int)e);
849
850 /*
851 * quf == NULL condition means all event deletion.
852 * stop operations want to delete all event list.
853 * another case delete only same buf id.
854 */
855 if (!qbuf) {
856 /* delete list */
857 list_del(&e->base.link);
858 kfree(e);
859 }
860
861 /* compare buffer id */
862 if (qbuf && (qbuf->buf_id ==
863 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
864 /* delete list */
865 list_del(&e->base.link);
866 kfree(e);
867 return;
868 }
869 }
870}
871
872void ipp_handle_cmd_work(struct device *dev,
873 struct exynos_drm_ippdrv *ippdrv,
874 struct drm_exynos_ipp_cmd_work *cmd_work,
875 struct drm_exynos_ipp_cmd_node *c_node)
876{
877 struct ipp_context *ctx = get_ipp_context(dev);
878
879 cmd_work->ippdrv = ippdrv;
880 cmd_work->c_node = c_node;
881 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
882}
883
884static int ipp_queue_buf_with_run(struct device *dev,
885 struct drm_exynos_ipp_cmd_node *c_node,
886 struct drm_exynos_ipp_mem_node *m_node,
887 struct drm_exynos_ipp_queue_buf *qbuf)
888{
889 struct exynos_drm_ippdrv *ippdrv;
890 struct drm_exynos_ipp_property *property;
891 struct exynos_drm_ipp_ops *ops;
892 int ret;
893
894 DRM_DEBUG_KMS("%s\n", __func__);
895
896 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
897 if (IS_ERR_OR_NULL(ippdrv)) {
898 DRM_ERROR("failed to get ipp driver.\n");
899 return -EFAULT;
900 }
901
902 ops = ippdrv->ops[qbuf->ops_id];
903 if (!ops) {
904 DRM_ERROR("failed to get ops.\n");
905 return -EFAULT;
906 }
907
908 property = &c_node->property;
909
910 if (c_node->state != IPP_STATE_START) {
911 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
912 return 0;
913 }
914
915 if (!ipp_check_mem_list(c_node)) {
916 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
917 return 0;
918 }
919
920 /*
921 * If set destination buffer and enabled clock,
922 * then m2m operations need start operations at queue_buf
923 */
924 if (ipp_is_m2m_cmd(property->cmd)) {
925 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
926
927 cmd_work->ctrl = IPP_CTRL_PLAY;
928 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
929 } else {
930 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
931 if (ret) {
932 DRM_ERROR("failed to set m node.\n");
933 return ret;
934 }
935 }
936
937 return 0;
938}
939
940static void ipp_clean_queue_buf(struct drm_device *drm_dev,
941 struct drm_exynos_ipp_cmd_node *c_node,
942 struct drm_exynos_ipp_queue_buf *qbuf)
943{
944 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
945
946 DRM_DEBUG_KMS("%s\n", __func__);
947
948 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
949 /* delete list */
950 list_for_each_entry_safe(m_node, tm_node,
951 &c_node->mem_list[qbuf->ops_id], list) {
952 if (m_node->buf_id == qbuf->buf_id &&
953 m_node->ops_id == qbuf->ops_id)
954 ipp_put_mem_node(drm_dev, c_node, m_node);
955 }
956 }
957}
958
959int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
960 struct drm_file *file)
961{
962 struct drm_exynos_file_private *file_priv = file->driver_priv;
963 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
964 struct device *dev = priv->dev;
965 struct ipp_context *ctx = get_ipp_context(dev);
966 struct drm_exynos_ipp_queue_buf *qbuf = data;
967 struct drm_exynos_ipp_cmd_node *c_node;
968 struct drm_exynos_ipp_mem_node *m_node;
969 int ret;
970
971 DRM_DEBUG_KMS("%s\n", __func__);
972
973 if (!qbuf) {
974 DRM_ERROR("invalid buf parameter.\n");
975 return -EINVAL;
976 }
977
978 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
979 DRM_ERROR("invalid ops parameter.\n");
980 return -EINVAL;
981 }
982
983 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
984 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
985 qbuf->buf_id, qbuf->buf_type);
986
987 /* find command node */
988 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
989 qbuf->prop_id);
990 if (!c_node) {
991 DRM_ERROR("failed to get command node.\n");
992 return -EFAULT;
993 }
994
995 /* buffer control */
996 switch (qbuf->buf_type) {
997 case IPP_BUF_ENQUEUE:
998 /* get memory node */
999 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
1000 if (IS_ERR(m_node)) {
1001 DRM_ERROR("failed to get m_node.\n");
1002 return PTR_ERR(m_node);
1003 }
1004
1005 /*
1006 * first step get event for destination buffer.
1007 * and second step when M2M case run with destination buffer
1008 * if needed.
1009 */
1010 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
1011 /* get event for destination buffer */
1012 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
1013 if (ret) {
1014 DRM_ERROR("failed to get event.\n");
1015 goto err_clean_node;
1016 }
1017
1018 /*
1019 * M2M case run play control for streaming feature.
1020 * other case set address and waiting.
1021 */
1022 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1023 if (ret) {
1024 DRM_ERROR("failed to run command.\n");
1025 goto err_clean_node;
1026 }
1027 }
1028 break;
1029 case IPP_BUF_DEQUEUE:
1030 mutex_lock(&c_node->cmd_lock);
1031
1032 /* put event for destination buffer */
1033 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1034 ipp_put_event(c_node, qbuf);
1035
1036 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1037
1038 mutex_unlock(&c_node->cmd_lock);
1039 break;
1040 default:
1041 DRM_ERROR("invalid buffer control.\n");
1042 return -EINVAL;
1043 }
1044
1045 return 0;
1046
1047err_clean_node:
1048 DRM_ERROR("clean memory nodes.\n");
1049
1050 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1051 return ret;
1052}
1053
1054static bool exynos_drm_ipp_check_valid(struct device *dev,
1055 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1056{
1057 DRM_DEBUG_KMS("%s\n", __func__);
1058
1059 if (ctrl != IPP_CTRL_PLAY) {
1060 if (pm_runtime_suspended(dev)) {
1061 DRM_ERROR("pm:runtime_suspended.\n");
1062 goto err_status;
1063 }
1064 }
1065
1066 switch (ctrl) {
1067 case IPP_CTRL_PLAY:
1068 if (state != IPP_STATE_IDLE)
1069 goto err_status;
1070 break;
1071 case IPP_CTRL_STOP:
1072 if (state == IPP_STATE_STOP)
1073 goto err_status;
1074 break;
1075 case IPP_CTRL_PAUSE:
1076 if (state != IPP_STATE_START)
1077 goto err_status;
1078 break;
1079 case IPP_CTRL_RESUME:
1080 if (state != IPP_STATE_STOP)
1081 goto err_status;
1082 break;
1083 default:
1084 DRM_ERROR("invalid state.\n");
1085 goto err_status;
1086 break;
1087 }
1088
1089 return true;
1090
1091err_status:
1092 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1093 return false;
1094}
1095
1096int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1097 struct drm_file *file)
1098{
1099 struct drm_exynos_file_private *file_priv = file->driver_priv;
1100 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1101 struct exynos_drm_ippdrv *ippdrv = NULL;
1102 struct device *dev = priv->dev;
1103 struct ipp_context *ctx = get_ipp_context(dev);
1104 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1105 struct drm_exynos_ipp_cmd_work *cmd_work;
1106 struct drm_exynos_ipp_cmd_node *c_node;
1107
1108 DRM_DEBUG_KMS("%s\n", __func__);
1109
1110 if (!ctx) {
1111 DRM_ERROR("invalid context.\n");
1112 return -EINVAL;
1113 }
1114
1115 if (!cmd_ctrl) {
1116 DRM_ERROR("invalid control parameter.\n");
1117 return -EINVAL;
1118 }
1119
1120 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1121 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1122
1123 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1124 if (IS_ERR(ippdrv)) {
1125 DRM_ERROR("failed to get ipp driver.\n");
1126 return PTR_ERR(ippdrv);
1127 }
1128
1129 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1130 cmd_ctrl->prop_id);
1131 if (!c_node) {
1132 DRM_ERROR("invalid command node list.\n");
1133 return -EINVAL;
1134 }
1135
1136 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1137 c_node->state)) {
1138 DRM_ERROR("invalid state.\n");
1139 return -EINVAL;
1140 }
1141
1142 switch (cmd_ctrl->ctrl) {
1143 case IPP_CTRL_PLAY:
1144 if (pm_runtime_suspended(ippdrv->dev))
1145 pm_runtime_get_sync(ippdrv->dev);
1146 c_node->state = IPP_STATE_START;
1147
1148 cmd_work = c_node->start_work;
1149 cmd_work->ctrl = cmd_ctrl->ctrl;
1150 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1151 c_node->state = IPP_STATE_START;
1152 break;
1153 case IPP_CTRL_STOP:
1154 cmd_work = c_node->stop_work;
1155 cmd_work->ctrl = cmd_ctrl->ctrl;
1156 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1157
1158 if (!wait_for_completion_timeout(&c_node->stop_complete,
1159 msecs_to_jiffies(300))) {
1160 DRM_ERROR("timeout stop:prop_id[%d]\n",
1161 c_node->property.prop_id);
1162 }
1163
1164 c_node->state = IPP_STATE_STOP;
1165 ippdrv->dedicated = false;
1166 ipp_clean_cmd_node(c_node);
1167
1168 if (list_empty(&ippdrv->cmd_list))
1169 pm_runtime_put_sync(ippdrv->dev);
1170 break;
1171 case IPP_CTRL_PAUSE:
1172 cmd_work = c_node->stop_work;
1173 cmd_work->ctrl = cmd_ctrl->ctrl;
1174 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1175
1176 if (!wait_for_completion_timeout(&c_node->stop_complete,
1177 msecs_to_jiffies(200))) {
1178 DRM_ERROR("timeout stop:prop_id[%d]\n",
1179 c_node->property.prop_id);
1180 }
1181
1182 c_node->state = IPP_STATE_STOP;
1183 break;
1184 case IPP_CTRL_RESUME:
1185 c_node->state = IPP_STATE_START;
1186 cmd_work = c_node->start_work;
1187 cmd_work->ctrl = cmd_ctrl->ctrl;
1188 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1189 break;
1190 default:
1191 DRM_ERROR("could not support this state currently.\n");
1192 return -EINVAL;
1193 }
1194
1195 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1196 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1197
1198 return 0;
1199}
1200
1201int exynos_drm_ippnb_register(struct notifier_block *nb)
1202{
1203 return blocking_notifier_chain_register(
1204 &exynos_drm_ippnb_list, nb);
1205}
1206
1207int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1208{
1209 return blocking_notifier_chain_unregister(
1210 &exynos_drm_ippnb_list, nb);
1211}
1212
1213int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1214{
1215 return blocking_notifier_call_chain(
1216 &exynos_drm_ippnb_list, val, v);
1217}
1218
1219static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1220 struct drm_exynos_ipp_property *property)
1221{
1222 struct exynos_drm_ipp_ops *ops = NULL;
1223 bool swap = false;
1224 int ret, i;
1225
1226 if (!property) {
1227 DRM_ERROR("invalid property parameter.\n");
1228 return -EINVAL;
1229 }
1230
1231 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1232
1233 /* reset h/w block */
1234 if (ippdrv->reset &&
1235 ippdrv->reset(ippdrv->dev)) {
1236 DRM_ERROR("failed to reset.\n");
1237 return -EINVAL;
1238 }
1239
1240 /* set source,destination operations */
1241 for_each_ipp_ops(i) {
1242 struct drm_exynos_ipp_config *config =
1243 &property->config[i];
1244
1245 ops = ippdrv->ops[i];
1246 if (!ops || !config) {
1247 DRM_ERROR("not support ops and config.\n");
1248 return -EINVAL;
1249 }
1250
1251 /* set format */
1252 if (ops->set_fmt) {
1253 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1254 if (ret) {
1255 DRM_ERROR("not support format.\n");
1256 return ret;
1257 }
1258 }
1259
1260 /* set transform for rotation, flip */
1261 if (ops->set_transf) {
1262 ret = ops->set_transf(ippdrv->dev, config->degree,
1263 config->flip, &swap);
1264 if (ret) {
1265 DRM_ERROR("not support tranf.\n");
1266 return -EINVAL;
1267 }
1268 }
1269
1270 /* set size */
1271 if (ops->set_size) {
1272 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1273 &config->sz);
1274 if (ret) {
1275 DRM_ERROR("not support size.\n");
1276 return ret;
1277 }
1278 }
1279 }
1280
1281 return 0;
1282}
1283
1284static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1285 struct drm_exynos_ipp_cmd_node *c_node)
1286{
1287 struct drm_exynos_ipp_mem_node *m_node;
1288 struct drm_exynos_ipp_property *property = &c_node->property;
1289 struct list_head *head;
1290 int ret, i;
1291
1292 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1293
1294 /* store command info in ippdrv */
1295 ippdrv->cmd = c_node;
1296
1297 if (!ipp_check_mem_list(c_node)) {
1298 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1299 return -ENOMEM;
1300 }
1301
1302 /* set current property in ippdrv */
1303 ret = ipp_set_property(ippdrv, property);
1304 if (ret) {
1305 DRM_ERROR("failed to set property.\n");
1306 ippdrv->cmd = NULL;
1307 return ret;
1308 }
1309
1310 /* check command */
1311 switch (property->cmd) {
1312 case IPP_CMD_M2M:
1313 for_each_ipp_ops(i) {
1314 /* source/destination memory list */
1315 head = &c_node->mem_list[i];
1316
1317 m_node = list_first_entry(head,
1318 struct drm_exynos_ipp_mem_node, list);
1319 if (!m_node) {
1320 DRM_ERROR("failed to get node.\n");
1321 ret = -EFAULT;
1322 return ret;
1323 }
1324
1325 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1326 __func__, (int)m_node);
1327
1328 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1329 if (ret) {
1330 DRM_ERROR("failed to set m node.\n");
1331 return ret;
1332 }
1333 }
1334 break;
1335 case IPP_CMD_WB:
1336 /* destination memory list */
1337 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1338
1339 list_for_each_entry(m_node, head, list) {
1340 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1341 if (ret) {
1342 DRM_ERROR("failed to set m node.\n");
1343 return ret;
1344 }
1345 }
1346 break;
1347 case IPP_CMD_OUTPUT:
1348 /* source memory list */
1349 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1350
1351 list_for_each_entry(m_node, head, list) {
1352 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1353 if (ret) {
1354 DRM_ERROR("failed to set m node.\n");
1355 return ret;
1356 }
1357 }
1358 break;
1359 default:
1360 DRM_ERROR("invalid operations.\n");
1361 return -EINVAL;
1362 }
1363
1364 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1365
1366 /* start operations */
1367 if (ippdrv->start) {
1368 ret = ippdrv->start(ippdrv->dev, property->cmd);
1369 if (ret) {
1370 DRM_ERROR("failed to start ops.\n");
1371 return ret;
1372 }
1373 }
1374
1375 return 0;
1376}
1377
1378static int ipp_stop_property(struct drm_device *drm_dev,
1379 struct exynos_drm_ippdrv *ippdrv,
1380 struct drm_exynos_ipp_cmd_node *c_node)
1381{
1382 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1383 struct drm_exynos_ipp_property *property = &c_node->property;
1384 struct list_head *head;
1385 int ret = 0, i;
1386
1387 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1388
1389 /* put event */
1390 ipp_put_event(c_node, NULL);
1391
1392 /* check command */
1393 switch (property->cmd) {
1394 case IPP_CMD_M2M:
1395 for_each_ipp_ops(i) {
1396 /* source/destination memory list */
1397 head = &c_node->mem_list[i];
1398
1399 if (list_empty(head)) {
1400 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1401 __func__);
1402 break;
1403 }
1404
1405 list_for_each_entry_safe(m_node, tm_node,
1406 head, list) {
1407 ret = ipp_put_mem_node(drm_dev, c_node,
1408 m_node);
1409 if (ret) {
1410 DRM_ERROR("failed to put m_node.\n");
1411 goto err_clear;
1412 }
1413 }
1414 }
1415 break;
1416 case IPP_CMD_WB:
1417 /* destination memory list */
1418 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1419
1420 if (list_empty(head)) {
1421 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1422 break;
1423 }
1424
1425 list_for_each_entry_safe(m_node, tm_node, head, list) {
1426 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1427 if (ret) {
1428 DRM_ERROR("failed to put m_node.\n");
1429 goto err_clear;
1430 }
1431 }
1432 break;
1433 case IPP_CMD_OUTPUT:
1434 /* source memory list */
1435 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1436
1437 if (list_empty(head)) {
1438 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1439 break;
1440 }
1441
1442 list_for_each_entry_safe(m_node, tm_node, head, list) {
1443 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1444 if (ret) {
1445 DRM_ERROR("failed to put m_node.\n");
1446 goto err_clear;
1447 }
1448 }
1449 break;
1450 default:
1451 DRM_ERROR("invalid operations.\n");
1452 ret = -EINVAL;
1453 goto err_clear;
1454 }
1455
1456err_clear:
1457 /* stop operations */
1458 if (ippdrv->stop)
1459 ippdrv->stop(ippdrv->dev, property->cmd);
1460
1461 return ret;
1462}
1463
1464void ipp_sched_cmd(struct work_struct *work)
1465{
1466 struct drm_exynos_ipp_cmd_work *cmd_work =
1467 (struct drm_exynos_ipp_cmd_work *)work;
1468 struct exynos_drm_ippdrv *ippdrv;
1469 struct drm_exynos_ipp_cmd_node *c_node;
1470 struct drm_exynos_ipp_property *property;
1471 int ret;
1472
1473 DRM_DEBUG_KMS("%s\n", __func__);
1474
1475 ippdrv = cmd_work->ippdrv;
1476 if (!ippdrv) {
1477 DRM_ERROR("invalid ippdrv list.\n");
1478 return;
1479 }
1480
1481 c_node = cmd_work->c_node;
1482 if (!c_node) {
1483 DRM_ERROR("invalid command node list.\n");
1484 return;
1485 }
1486
1487 mutex_lock(&c_node->cmd_lock);
1488
1489 property = &c_node->property;
1490 if (!property) {
1491 DRM_ERROR("failed to get property:prop_id[%d]\n",
1492 c_node->property.prop_id);
1493 goto err_unlock;
1494 }
1495
1496 switch (cmd_work->ctrl) {
1497 case IPP_CTRL_PLAY:
1498 case IPP_CTRL_RESUME:
1499 ret = ipp_start_property(ippdrv, c_node);
1500 if (ret) {
1501 DRM_ERROR("failed to start property:prop_id[%d]\n",
1502 c_node->property.prop_id);
1503 goto err_unlock;
1504 }
1505
1506 /*
1507 * M2M case supports wait_completion of transfer.
1508 * because M2M case supports single unit operation
1509 * with multiple queue.
1510 * M2M need to wait completion of data transfer.
1511 */
1512 if (ipp_is_m2m_cmd(property->cmd)) {
1513 if (!wait_for_completion_timeout
1514 (&c_node->start_complete, msecs_to_jiffies(200))) {
1515 DRM_ERROR("timeout event:prop_id[%d]\n",
1516 c_node->property.prop_id);
1517 goto err_unlock;
1518 }
1519 }
1520 break;
1521 case IPP_CTRL_STOP:
1522 case IPP_CTRL_PAUSE:
1523 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1524 c_node);
1525 if (ret) {
1526 DRM_ERROR("failed to stop property.\n");
1527 goto err_unlock;
1528 }
1529
1530 complete(&c_node->stop_complete);
1531 break;
1532 default:
1533 DRM_ERROR("unknown control type\n");
1534 break;
1535 }
1536
1537 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1538
1539err_unlock:
1540 mutex_unlock(&c_node->cmd_lock);
1541}
1542
1543static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1544 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1545{
1546 struct drm_device *drm_dev = ippdrv->drm_dev;
1547 struct drm_exynos_ipp_property *property = &c_node->property;
1548 struct drm_exynos_ipp_mem_node *m_node;
1549 struct drm_exynos_ipp_queue_buf qbuf;
1550 struct drm_exynos_ipp_send_event *e;
1551 struct list_head *head;
1552 struct timeval now;
1553 unsigned long flags;
1554 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1555 int ret, i;
1556
1557 for_each_ipp_ops(i)
1558 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1559 i ? "dst" : "src", buf_id[i]);
1560
1561 if (!drm_dev) {
1562 DRM_ERROR("failed to get drm_dev.\n");
1563 return -EINVAL;
1564 }
1565
1566 if (!property) {
1567 DRM_ERROR("failed to get property.\n");
1568 return -EINVAL;
1569 }
1570
1571 if (list_empty(&c_node->event_list)) {
1572 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1573 return 0;
1574 }
1575
1576 if (!ipp_check_mem_list(c_node)) {
1577 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1578 return 0;
1579 }
1580
1581 /* check command */
1582 switch (property->cmd) {
1583 case IPP_CMD_M2M:
1584 for_each_ipp_ops(i) {
1585 /* source/destination memory list */
1586 head = &c_node->mem_list[i];
1587
1588 m_node = list_first_entry(head,
1589 struct drm_exynos_ipp_mem_node, list);
1590 if (!m_node) {
1591 DRM_ERROR("empty memory node.\n");
1592 return -ENOMEM;
1593 }
1594
1595 tbuf_id[i] = m_node->buf_id;
1596 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1597 i ? "dst" : "src", tbuf_id[i]);
1598
1599 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1600 if (ret)
1601 DRM_ERROR("failed to put m_node.\n");
1602 }
1603 break;
1604 case IPP_CMD_WB:
1605 /* clear buf for finding */
1606 memset(&qbuf, 0x0, sizeof(qbuf));
1607 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1608 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1609
1610 /* get memory node entry */
1611 m_node = ipp_find_mem_node(c_node, &qbuf);
1612 if (!m_node) {
1613 DRM_ERROR("empty memory node.\n");
1614 return -ENOMEM;
1615 }
1616
1617 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1618
1619 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1620 if (ret)
1621 DRM_ERROR("failed to put m_node.\n");
1622 break;
1623 case IPP_CMD_OUTPUT:
1624 /* source memory list */
1625 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1626
1627 m_node = list_first_entry(head,
1628 struct drm_exynos_ipp_mem_node, list);
1629 if (!m_node) {
1630 DRM_ERROR("empty memory node.\n");
1631 return -ENOMEM;
1632 }
1633
1634 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1635
1636 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1637 if (ret)
1638 DRM_ERROR("failed to put m_node.\n");
1639 break;
1640 default:
1641 DRM_ERROR("invalid operations.\n");
1642 return -EINVAL;
1643 }
1644
1645 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1646 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1647 tbuf_id[1], buf_id[1], property->prop_id);
1648
1649 /*
1650 * command node have event list of destination buffer
1651 * If destination buffer enqueue to mem list,
1652 * then we make event and link to event list tail.
1653 * so, we get first event for first enqueued buffer.
1654 */
1655 e = list_first_entry(&c_node->event_list,
1656 struct drm_exynos_ipp_send_event, base.link);
1657
1658 if (!e) {
1659 DRM_ERROR("empty event.\n");
1660 return -EINVAL;
1661 }
1662
1663 do_gettimeofday(&now);
1664 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1665 , __func__, now.tv_sec, now.tv_usec);
1666 e->event.tv_sec = now.tv_sec;
1667 e->event.tv_usec = now.tv_usec;
1668 e->event.prop_id = property->prop_id;
1669
1670 /* set buffer id about source destination */
1671 for_each_ipp_ops(i)
1672 e->event.buf_id[i] = tbuf_id[i];
1673
1674 spin_lock_irqsave(&drm_dev->event_lock, flags);
1675 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1676 wake_up_interruptible(&e->base.file_priv->event_wait);
1677 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1678
1679 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1680 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1681
1682 return 0;
1683}
1684
1685void ipp_sched_event(struct work_struct *work)
1686{
1687 struct drm_exynos_ipp_event_work *event_work =
1688 (struct drm_exynos_ipp_event_work *)work;
1689 struct exynos_drm_ippdrv *ippdrv;
1690 struct drm_exynos_ipp_cmd_node *c_node;
1691 int ret;
1692
1693 if (!event_work) {
1694 DRM_ERROR("failed to get event_work.\n");
1695 return;
1696 }
1697
1698 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1699 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1700
1701 ippdrv = event_work->ippdrv;
1702 if (!ippdrv) {
1703 DRM_ERROR("failed to get ipp driver.\n");
1704 return;
1705 }
1706
1707 c_node = ippdrv->cmd;
1708 if (!c_node) {
1709 DRM_ERROR("failed to get command node.\n");
1710 return;
1711 }
1712
1713 /*
1714 * IPP supports command thread, event thread synchronization.
1715 * If IPP close immediately from user land, then IPP make
1716 * synchronization with command thread, so make complete event.
1717 * or going out operations.
1718 */
1719 if (c_node->state != IPP_STATE_START) {
1720 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1721 __func__, c_node->state, c_node->property.prop_id);
1722 goto err_completion;
1723 }
1724
1725 mutex_lock(&c_node->event_lock);
1726
1727 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1728 if (ret) {
1729 DRM_ERROR("failed to send event.\n");
1730 goto err_completion;
1731 }
1732
1733err_completion:
1734 if (ipp_is_m2m_cmd(c_node->property.cmd))
1735 complete(&c_node->start_complete);
1736
1737 mutex_unlock(&c_node->event_lock);
1738}
1739
1740static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1741{
1742 struct ipp_context *ctx = get_ipp_context(dev);
1743 struct exynos_drm_ippdrv *ippdrv;
1744 int ret, count = 0;
1745
1746 DRM_DEBUG_KMS("%s\n", __func__);
1747
1748 /* get ipp driver entry */
1749 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1750 ippdrv->drm_dev = drm_dev;
1751
1752 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1753 &ippdrv->ipp_id);
1754 if (ret) {
1755 DRM_ERROR("failed to create id.\n");
1756 goto err_idr;
1757 }
1758
1759 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1760 count++, (int)ippdrv, ippdrv->ipp_id);
1761
1762 if (ippdrv->ipp_id == 0) {
1763 DRM_ERROR("failed to get ipp_id[%d]\n",
1764 ippdrv->ipp_id);
1765 goto err_idr;
1766 }
1767
1768 /* store parent device for node */
1769 ippdrv->parent_dev = dev;
1770
1771 /* store event work queue and handler */
1772 ippdrv->event_workq = ctx->event_workq;
1773 ippdrv->sched_event = ipp_sched_event;
1774 INIT_LIST_HEAD(&ippdrv->cmd_list);
1775
1776 if (is_drm_iommu_supported(drm_dev)) {
1777 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1778 if (ret) {
1779 DRM_ERROR("failed to activate iommu\n");
1780 goto err_iommu;
1781 }
1782 }
1783 }
1784
1785 return 0;
1786
1787err_iommu:
1788 /* get ipp driver entry */
1789 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1790 if (is_drm_iommu_supported(drm_dev))
1791 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1792
1793err_idr:
1794 idr_remove_all(&ctx->ipp_idr);
1795 idr_remove_all(&ctx->prop_idr);
1796 idr_destroy(&ctx->ipp_idr);
1797 idr_destroy(&ctx->prop_idr);
1798 return ret;
1799}
1800
1801static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1802{
1803 struct exynos_drm_ippdrv *ippdrv;
1804
1805 DRM_DEBUG_KMS("%s\n", __func__);
1806
1807 /* get ipp driver entry */
1808 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1809 if (is_drm_iommu_supported(drm_dev))
1810 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1811
1812 ippdrv->drm_dev = NULL;
1813 exynos_drm_ippdrv_unregister(ippdrv);
1814 }
1815}
1816
1817static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1818 struct drm_file *file)
1819{
1820 struct drm_exynos_file_private *file_priv = file->driver_priv;
1821 struct exynos_drm_ipp_private *priv;
1822
1823 DRM_DEBUG_KMS("%s\n", __func__);
1824
1825 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1826 if (!priv) {
1827 DRM_ERROR("failed to allocate priv.\n");
1828 return -ENOMEM;
1829 }
1830 priv->dev = dev;
1831 file_priv->ipp_priv = priv;
1832
1833 INIT_LIST_HEAD(&priv->event_list);
1834
1835 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1836
1837 return 0;
1838}
1839
1840static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1841 struct drm_file *file)
1842{
1843 struct drm_exynos_file_private *file_priv = file->driver_priv;
1844 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1845 struct exynos_drm_ippdrv *ippdrv = NULL;
1846 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1847 int count = 0;
1848
1849 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1850
1851 if (list_empty(&exynos_drm_ippdrv_list)) {
1852 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1853 goto err_clear;
1854 }
1855
1856 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1857 if (list_empty(&ippdrv->cmd_list))
1858 continue;
1859
1860 list_for_each_entry_safe(c_node, tc_node,
1861 &ippdrv->cmd_list, list) {
1862 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1863 __func__, count++, (int)ippdrv);
1864
1865 if (c_node->priv == priv) {
1866 /*
1867 * userland goto unnormal state. process killed.
1868 * and close the file.
1869 * so, IPP didn't called stop cmd ctrl.
1870 * so, we are make stop operation in this state.
1871 */
1872 if (c_node->state == IPP_STATE_START) {
1873 ipp_stop_property(drm_dev, ippdrv,
1874 c_node);
1875 c_node->state = IPP_STATE_STOP;
1876 }
1877
1878 ippdrv->dedicated = false;
1879 ipp_clean_cmd_node(c_node);
1880 if (list_empty(&ippdrv->cmd_list))
1881 pm_runtime_put_sync(ippdrv->dev);
1882 }
1883 }
1884 }
1885
1886err_clear:
1887 kfree(priv);
1888 return;
1889}
1890
1891static int __devinit ipp_probe(struct platform_device *pdev)
1892{
1893 struct device *dev = &pdev->dev;
1894 struct ipp_context *ctx;
1895 struct exynos_drm_subdrv *subdrv;
1896 int ret;
1897
1898 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1899 if (!ctx)
1900 return -ENOMEM;
1901
1902 DRM_DEBUG_KMS("%s\n", __func__);
1903
1904 mutex_init(&ctx->ipp_lock);
1905 mutex_init(&ctx->prop_lock);
1906
1907 idr_init(&ctx->ipp_idr);
1908 idr_init(&ctx->prop_idr);
1909
1910 /*
1911 * create single thread for ipp event
1912 * IPP supports event thread for IPP drivers.
1913 * IPP driver send event_work to this thread.
1914 * and IPP event thread send event to user process.
1915 */
1916 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1917 if (!ctx->event_workq) {
1918 dev_err(dev, "failed to create event workqueue\n");
1919 ret = -EINVAL;
1920 goto err_clear;
1921 }
1922
1923 /*
1924 * create single thread for ipp command
1925 * IPP supports command thread for user process.
1926 * user process make command node using set property ioctl.
1927 * and make start_work and send this work to command thread.
1928 * and then this command thread start property.
1929 */
1930 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1931 if (!ctx->cmd_workq) {
1932 dev_err(dev, "failed to create cmd workqueue\n");
1933 ret = -EINVAL;
1934 goto err_event_workq;
1935 }
1936
1937 /* set sub driver informations */
1938 subdrv = &ctx->subdrv;
1939 subdrv->dev = dev;
1940 subdrv->probe = ipp_subdrv_probe;
1941 subdrv->remove = ipp_subdrv_remove;
1942 subdrv->open = ipp_subdrv_open;
1943 subdrv->close = ipp_subdrv_close;
1944
1945 platform_set_drvdata(pdev, ctx);
1946
1947 ret = exynos_drm_subdrv_register(subdrv);
1948 if (ret < 0) {
1949 DRM_ERROR("failed to register drm ipp device.\n");
1950 goto err_cmd_workq;
1951 }
1952
1953 dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1954
1955 return 0;
1956
1957err_cmd_workq:
1958 destroy_workqueue(ctx->cmd_workq);
1959err_event_workq:
1960 destroy_workqueue(ctx->event_workq);
1961err_clear:
1962 kfree(ctx);
1963 return ret;
1964}
1965
1966static int __devexit ipp_remove(struct platform_device *pdev)
1967{
1968 struct ipp_context *ctx = platform_get_drvdata(pdev);
1969
1970 DRM_DEBUG_KMS("%s\n", __func__);
1971
1972 /* unregister sub driver */
1973 exynos_drm_subdrv_unregister(&ctx->subdrv);
1974
1975 /* remove,destroy ipp idr */
1976 idr_remove_all(&ctx->ipp_idr);
1977 idr_remove_all(&ctx->prop_idr);
1978 idr_destroy(&ctx->ipp_idr);
1979 idr_destroy(&ctx->prop_idr);
1980
1981 mutex_destroy(&ctx->ipp_lock);
1982 mutex_destroy(&ctx->prop_lock);
1983
1984 /* destroy command, event work queue */
1985 destroy_workqueue(ctx->cmd_workq);
1986 destroy_workqueue(ctx->event_workq);
1987
1988 kfree(ctx);
1989
1990 return 0;
1991}
1992
1993static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1994{
1995 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1996
1997 return 0;
1998}
1999
2000#ifdef CONFIG_PM_SLEEP
2001static int ipp_suspend(struct device *dev)
2002{
2003 struct ipp_context *ctx = get_ipp_context(dev);
2004
2005 DRM_DEBUG_KMS("%s\n", __func__);
2006
2007 if (pm_runtime_suspended(dev))
2008 return 0;
2009
2010 return ipp_power_ctrl(ctx, false);
2011}
2012
2013static int ipp_resume(struct device *dev)
2014{
2015 struct ipp_context *ctx = get_ipp_context(dev);
2016
2017 DRM_DEBUG_KMS("%s\n", __func__);
2018
2019 if (!pm_runtime_suspended(dev))
2020 return ipp_power_ctrl(ctx, true);
2021
2022 return 0;
2023}
2024#endif
2025
2026#ifdef CONFIG_PM_RUNTIME
2027static int ipp_runtime_suspend(struct device *dev)
2028{
2029 struct ipp_context *ctx = get_ipp_context(dev);
2030
2031 DRM_DEBUG_KMS("%s\n", __func__);
2032
2033 return ipp_power_ctrl(ctx, false);
2034}
2035
2036static int ipp_runtime_resume(struct device *dev)
2037{
2038 struct ipp_context *ctx = get_ipp_context(dev);
2039
2040 DRM_DEBUG_KMS("%s\n", __func__);
2041
2042 return ipp_power_ctrl(ctx, true);
2043}
2044#endif
2045
2046static const struct dev_pm_ops ipp_pm_ops = {
2047 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
2048 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
2049};
2050
2051struct platform_driver ipp_driver = {
2052 .probe = ipp_probe,
2053 .remove = __devexit_p(ipp_remove),
2054 .driver = {
2055 .name = "exynos-drm-ipp",
2056 .owner = THIS_MODULE,
2057 .pm = &ipp_pm_ops,
2058 },
2059};
2060
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..28ffac95386c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,266 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef _EXYNOS_DRM_IPP_H_
30#define _EXYNOS_DRM_IPP_H_
31
32#define for_each_ipp_ops(pos) \
33 for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
34#define for_each_ipp_planar(pos) \
35 for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
36
37#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
38#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
39#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
40
41/* definition of state */
42enum drm_exynos_ipp_state {
43 IPP_STATE_IDLE,
44 IPP_STATE_START,
45 IPP_STATE_STOP,
46};
47
48/*
49 * A structure of command work information.
50 * @work: work structure.
51 * @ippdrv: current work ippdrv.
52 * @c_node: command node information.
53 * @ctrl: command control.
54 */
55struct drm_exynos_ipp_cmd_work {
56 struct work_struct work;
57 struct exynos_drm_ippdrv *ippdrv;
58 struct drm_exynos_ipp_cmd_node *c_node;
59 enum drm_exynos_ipp_ctrl ctrl;
60};
61
62/*
63 * A structure of command node.
64 *
65 * @priv: IPP private infomation.
66 * @list: list head to command queue information.
67 * @event_list: list head of event.
68 * @mem_list: list head to source,destination memory queue information.
69 * @cmd_lock: lock for synchronization of access to ioctl.
70 * @mem_lock: lock for synchronization of access to memory nodes.
71 * @event_lock: lock for synchronization of access to scheduled event.
72 * @start_complete: completion of start of command.
73 * @stop_complete: completion of stop of command.
74 * @property: property information.
75 * @start_work: start command work structure.
76 * @stop_work: stop command work structure.
77 * @event_work: event work structure.
78 * @state: state of command node.
79 */
80struct drm_exynos_ipp_cmd_node {
81 struct exynos_drm_ipp_private *priv;
82 struct list_head list;
83 struct list_head event_list;
84 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
85 struct mutex cmd_lock;
86 struct mutex mem_lock;
87 struct mutex event_lock;
88 struct completion start_complete;
89 struct completion stop_complete;
90 struct drm_exynos_ipp_property property;
91 struct drm_exynos_ipp_cmd_work *start_work;
92 struct drm_exynos_ipp_cmd_work *stop_work;
93 struct drm_exynos_ipp_event_work *event_work;
94 enum drm_exynos_ipp_state state;
95};
96
97/*
98 * A structure of buffer information.
99 *
100 * @gem_objs: Y, Cb, Cr each gem object.
101 * @base: Y, Cb, Cr each planar address.
102 */
103struct drm_exynos_ipp_buf_info {
104 unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
105 dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
106};
107
108/*
109 * A structure of wb setting infomation.
110 *
111 * @enable: enable flag for wb.
112 * @refresh: HZ of the refresh rate.
113 */
114struct drm_exynos_ipp_set_wb {
115 __u32 enable;
116 __u32 refresh;
117};
118
119/*
120 * A structure of event work information.
121 *
122 * @work: work structure.
123 * @ippdrv: current work ippdrv.
124 * @buf_id: id of src, dst buffer.
125 */
126struct drm_exynos_ipp_event_work {
127 struct work_struct work;
128 struct exynos_drm_ippdrv *ippdrv;
129 u32 buf_id[EXYNOS_DRM_OPS_MAX];
130};
131
132/*
133 * A structure of source,destination operations.
134 *
135 * @set_fmt: set format of image.
136 * @set_transf: set transform(rotations, flip).
137 * @set_size: set size of region.
138 * @set_addr: set address for dma.
139 */
140struct exynos_drm_ipp_ops {
141 int (*set_fmt)(struct device *dev, u32 fmt);
142 int (*set_transf)(struct device *dev,
143 enum drm_exynos_degree degree,
144 enum drm_exynos_flip flip, bool *swap);
145 int (*set_size)(struct device *dev, int swap,
146 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
147 int (*set_addr)(struct device *dev,
148 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
149 enum drm_exynos_ipp_buf_type buf_type);
150};
151
152/*
153 * A structure of ipp driver.
154 *
155 * @drv_list: list head for registed sub driver information.
156 * @parent_dev: parent device information.
157 * @dev: platform device.
158 * @drm_dev: drm device.
159 * @ipp_id: id of ipp driver.
160 * @dedicated: dedicated ipp device.
161 * @ops: source, destination operations.
162 * @event_workq: event work queue.
163 * @cmd: current command information.
164 * @cmd_list: list head for command information.
165 * @prop_list: property informations of current ipp driver.
166 * @check_property: check property about format, size, buffer.
167 * @reset: reset ipp block.
168 * @start: ipp each device start.
169 * @stop: ipp each device stop.
170 * @sched_event: work schedule handler.
171 */
172struct exynos_drm_ippdrv {
173 struct list_head drv_list;
174 struct device *parent_dev;
175 struct device *dev;
176 struct drm_device *drm_dev;
177 u32 ipp_id;
178 bool dedicated;
179 struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
180 struct workqueue_struct *event_workq;
181 struct drm_exynos_ipp_cmd_node *cmd;
182 struct list_head cmd_list;
183 struct drm_exynos_ipp_prop_list *prop_list;
184
185 int (*check_property)(struct device *dev,
186 struct drm_exynos_ipp_property *property);
187 int (*reset)(struct device *dev);
188 int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
189 void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
190 void (*sched_event)(struct work_struct *work);
191};
192
193#ifdef CONFIG_DRM_EXYNOS_IPP
194extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
195extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
196extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
197 struct drm_file *file);
198extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
199 struct drm_file *file);
200extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
201 struct drm_file *file);
202extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
203 struct drm_file *file);
204extern int exynos_drm_ippnb_register(struct notifier_block *nb);
205extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
206extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
207extern void ipp_sched_cmd(struct work_struct *work);
208extern void ipp_sched_event(struct work_struct *work);
209
210#else
211static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
212{
213 return -ENODEV;
214}
215
216static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
217{
218 return -ENODEV;
219}
220
221static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
222 void *data,
223 struct drm_file *file_priv)
224{
225 return -ENOTTY;
226}
227
228static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
229 void *data,
230 struct drm_file *file_priv)
231{
232 return -ENOTTY;
233}
234
235static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
236 void *data,
237 struct drm_file *file)
238{
239 return -ENOTTY;
240}
241
242static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
243 void *data,
244 struct drm_file *file)
245{
246 return -ENOTTY;
247}
248
249static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
250{
251 return -ENODEV;
252}
253
254static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
255{
256 return -ENODEV;
257}
258
259static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
260{
261 return -ENOTTY;
262}
263#endif
264
265#endif /* _EXYNOS_DRM_IPP_H_ */
266
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 862ca1eb2102..83efc662d65a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
40 * CRTC ---------------- 40 * CRTC ----------------
41 * ^ start ^ end 41 * ^ start ^ end
42 * 42 *
43 * There are six cases from a to b. 43 * There are six cases from a to f.
44 * 44 *
45 * <----- SCREEN -----> 45 * <----- SCREEN ----->
46 * 0 last 46 * 0 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
93 } 93 }
94 94
95 overlay->dma_addr[i] = buffer->dma_addr; 95 overlay->dma_addr[i] = buffer->dma_addr;
96 overlay->vaddr[i] = buffer->kvaddr;
97 96
98 DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n", 97 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
99 i, (unsigned long)overlay->vaddr[i], 98 i, (unsigned long)overlay->dma_addr[i]);
100 (unsigned long)overlay->dma_addr[i]);
101 } 99 }
102 100
103 actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay); 101 actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
106 if (crtc_x < 0) { 104 if (crtc_x < 0) {
107 if (actual_w) 105 if (actual_w)
108 src_x -= crtc_x; 106 src_x -= crtc_x;
109 else
110 src_x += crtc_w;
111 crtc_x = 0; 107 crtc_x = 0;
112 } 108 }
113 109
114 if (crtc_y < 0) { 110 if (crtc_y < 0) {
115 if (actual_h) 111 if (actual_h)
116 src_y -= crtc_y; 112 src_y -= crtc_y;
117 else
118 src_y += crtc_h;
119 crtc_y = 0; 113 crtc_y = 0;
120 } 114 }
121 115
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 000000000000..1c2366083c70
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,855 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * YoungJun Cho <yj44.cho@samsung.com>
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundationr
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-rotator.h"
24#include "exynos_drm.h"
25#include "exynos_drm_ipp.h"
26
27/*
28 * Rotator supports image crop/rotator and input/output DMA operations.
29 * input DMA reads image data from the memory.
30 * output DMA writes image data to memory.
31 *
32 * M2M operation : supports crop/scale/rotation/csc so on.
33 * Memory ----> Rotator H/W ----> Memory.
34 */
35
36/*
37 * TODO
38 * 1. check suspend/resume api if needed.
39 * 2. need to check use case platform_device_id.
40 * 3. check src/dst size with, height.
41 * 4. need to add supported list in prop_list.
42 */
43
44#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
45#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
46 struct rot_context, ippdrv);
47#define rot_read(offset) readl(rot->regs + (offset))
48#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
49
50enum rot_irq_status {
51 ROT_IRQ_STATUS_COMPLETE = 8,
52 ROT_IRQ_STATUS_ILLEGAL = 9,
53};
54
55/*
56 * A structure of limitation.
57 *
58 * @min_w: minimum width.
59 * @min_h: minimum height.
60 * @max_w: maximum width.
61 * @max_h: maximum height.
62 * @align: align size.
63 */
64struct rot_limit {
65 u32 min_w;
66 u32 min_h;
67 u32 max_w;
68 u32 max_h;
69 u32 align;
70};
71
72/*
73 * A structure of limitation table.
74 *
75 * @ycbcr420_2p: case of YUV.
76 * @rgb888: case of RGB.
77 */
78struct rot_limit_table {
79 struct rot_limit ycbcr420_2p;
80 struct rot_limit rgb888;
81};
82
83/*
84 * A structure of rotator context.
85 * @ippdrv: prepare initialization using ippdrv.
86 * @regs_res: register resources.
87 * @regs: memory mapped io registers.
88 * @clock: rotator gate clock.
89 * @limit_tbl: limitation of rotator.
90 * @irq: irq number.
91 * @cur_buf_id: current operation buffer id.
92 * @suspended: suspended state.
93 */
94struct rot_context {
95 struct exynos_drm_ippdrv ippdrv;
96 struct resource *regs_res;
97 void __iomem *regs;
98 struct clk *clock;
99 struct rot_limit_table *limit_tbl;
100 int irq;
101 int cur_buf_id[EXYNOS_DRM_OPS_MAX];
102 bool suspended;
103};
104
105static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
106{
107 u32 val = rot_read(ROT_CONFIG);
108
109 if (enable == true)
110 val |= ROT_CONFIG_IRQ;
111 else
112 val &= ~ROT_CONFIG_IRQ;
113
114 rot_write(val, ROT_CONFIG);
115}
116
117static u32 rotator_reg_get_fmt(struct rot_context *rot)
118{
119 u32 val = rot_read(ROT_CONTROL);
120
121 val &= ROT_CONTROL_FMT_MASK;
122
123 return val;
124}
125
126static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
127{
128 u32 val = rot_read(ROT_STATUS);
129
130 val = ROT_STATUS_IRQ(val);
131
132 if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
133 return ROT_IRQ_STATUS_COMPLETE;
134
135 return ROT_IRQ_STATUS_ILLEGAL;
136}
137
138static irqreturn_t rotator_irq_handler(int irq, void *arg)
139{
140 struct rot_context *rot = arg;
141 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
142 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
143 struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
144 enum rot_irq_status irq_status;
145 u32 val;
146
147 /* Get execution result */
148 irq_status = rotator_reg_get_irq_status(rot);
149
150 /* clear status */
151 val = rot_read(ROT_STATUS);
152 val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
153 rot_write(val, ROT_STATUS);
154
155 if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
156 event_work->ippdrv = ippdrv;
157 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
159 queue_work(ippdrv->event_workq,
160 (struct work_struct *)event_work);
161 } else
162 DRM_ERROR("the SFR is set illegally\n");
163
164 return IRQ_HANDLED;
165}
166
167static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
168 u32 *vsize)
169{
170 struct rot_limit_table *limit_tbl = rot->limit_tbl;
171 struct rot_limit *limit;
172 u32 mask, val;
173
174 /* Get size limit */
175 if (fmt == ROT_CONTROL_FMT_RGB888)
176 limit = &limit_tbl->rgb888;
177 else
178 limit = &limit_tbl->ycbcr420_2p;
179
180 /* Get mask for rounding to nearest aligned val */
181 mask = ~((1 << limit->align) - 1);
182
183 /* Set aligned width */
184 val = ROT_ALIGN(*hsize, limit->align, mask);
185 if (val < limit->min_w)
186 *hsize = ROT_MIN(limit->min_w, mask);
187 else if (val > limit->max_w)
188 *hsize = ROT_MAX(limit->max_w, mask);
189 else
190 *hsize = val;
191
192 /* Set aligned height */
193 val = ROT_ALIGN(*vsize, limit->align, mask);
194 if (val < limit->min_h)
195 *vsize = ROT_MIN(limit->min_h, mask);
196 else if (val > limit->max_h)
197 *vsize = ROT_MAX(limit->max_h, mask);
198 else
199 *vsize = val;
200}
201
202static int rotator_src_set_fmt(struct device *dev, u32 fmt)
203{
204 struct rot_context *rot = dev_get_drvdata(dev);
205 u32 val;
206
207 val = rot_read(ROT_CONTROL);
208 val &= ~ROT_CONTROL_FMT_MASK;
209
210 switch (fmt) {
211 case DRM_FORMAT_NV12:
212 val |= ROT_CONTROL_FMT_YCBCR420_2P;
213 break;
214 case DRM_FORMAT_XRGB8888:
215 val |= ROT_CONTROL_FMT_RGB888;
216 break;
217 default:
218 DRM_ERROR("invalid image format\n");
219 return -EINVAL;
220 }
221
222 rot_write(val, ROT_CONTROL);
223
224 return 0;
225}
226
227static inline bool rotator_check_reg_fmt(u32 fmt)
228{
229 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
230 (fmt == ROT_CONTROL_FMT_RGB888))
231 return true;
232
233 return false;
234}
235
236static int rotator_src_set_size(struct device *dev, int swap,
237 struct drm_exynos_pos *pos,
238 struct drm_exynos_sz *sz)
239{
240 struct rot_context *rot = dev_get_drvdata(dev);
241 u32 fmt, hsize, vsize;
242 u32 val;
243
244 /* Get format */
245 fmt = rotator_reg_get_fmt(rot);
246 if (!rotator_check_reg_fmt(fmt)) {
247 DRM_ERROR("%s:invalid format.\n", __func__);
248 return -EINVAL;
249 }
250
251 /* Align buffer size */
252 hsize = sz->hsize;
253 vsize = sz->vsize;
254 rotator_align_size(rot, fmt, &hsize, &vsize);
255
256 /* Set buffer size configuration */
257 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
258 rot_write(val, ROT_SRC_BUF_SIZE);
259
260 /* Set crop image position configuration */
261 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
262 rot_write(val, ROT_SRC_CROP_POS);
263 val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
264 rot_write(val, ROT_SRC_CROP_SIZE);
265
266 return 0;
267}
268
269static int rotator_src_set_addr(struct device *dev,
270 struct drm_exynos_ipp_buf_info *buf_info,
271 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
272{
273 struct rot_context *rot = dev_get_drvdata(dev);
274 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
275 u32 val, fmt, hsize, vsize;
276 int i;
277
278 /* Set current buf_id */
279 rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
280
281 switch (buf_type) {
282 case IPP_BUF_ENQUEUE:
283 /* Set address configuration */
284 for_each_ipp_planar(i)
285 addr[i] = buf_info->base[i];
286
287 /* Get format */
288 fmt = rotator_reg_get_fmt(rot);
289 if (!rotator_check_reg_fmt(fmt)) {
290 DRM_ERROR("%s:invalid format.\n", __func__);
291 return -EINVAL;
292 }
293
294 /* Re-set cb planar for NV12 format */
295 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
296 !addr[EXYNOS_DRM_PLANAR_CB]) {
297
298 val = rot_read(ROT_SRC_BUF_SIZE);
299 hsize = ROT_GET_BUF_SIZE_W(val);
300 vsize = ROT_GET_BUF_SIZE_H(val);
301
302 /* Set cb planar */
303 addr[EXYNOS_DRM_PLANAR_CB] =
304 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
305 }
306
307 for_each_ipp_planar(i)
308 rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
309 break;
310 case IPP_BUF_DEQUEUE:
311 for_each_ipp_planar(i)
312 rot_write(0x0, ROT_SRC_BUF_ADDR(i));
313 break;
314 default:
315 /* Nothing to do */
316 break;
317 }
318
319 return 0;
320}
321
322static int rotator_dst_set_transf(struct device *dev,
323 enum drm_exynos_degree degree,
324 enum drm_exynos_flip flip, bool *swap)
325{
326 struct rot_context *rot = dev_get_drvdata(dev);
327 u32 val;
328
329 /* Set transform configuration */
330 val = rot_read(ROT_CONTROL);
331 val &= ~ROT_CONTROL_FLIP_MASK;
332
333 switch (flip) {
334 case EXYNOS_DRM_FLIP_VERTICAL:
335 val |= ROT_CONTROL_FLIP_VERTICAL;
336 break;
337 case EXYNOS_DRM_FLIP_HORIZONTAL:
338 val |= ROT_CONTROL_FLIP_HORIZONTAL;
339 break;
340 default:
341 /* Flip None */
342 break;
343 }
344
345 val &= ~ROT_CONTROL_ROT_MASK;
346
347 switch (degree) {
348 case EXYNOS_DRM_DEGREE_90:
349 val |= ROT_CONTROL_ROT_90;
350 break;
351 case EXYNOS_DRM_DEGREE_180:
352 val |= ROT_CONTROL_ROT_180;
353 break;
354 case EXYNOS_DRM_DEGREE_270:
355 val |= ROT_CONTROL_ROT_270;
356 break;
357 default:
358 /* Rotation 0 Degree */
359 break;
360 }
361
362 rot_write(val, ROT_CONTROL);
363
364 /* Check degree for setting buffer size swap */
365 if ((degree == EXYNOS_DRM_DEGREE_90) ||
366 (degree == EXYNOS_DRM_DEGREE_270))
367 *swap = true;
368 else
369 *swap = false;
370
371 return 0;
372}
373
374static int rotator_dst_set_size(struct device *dev, int swap,
375 struct drm_exynos_pos *pos,
376 struct drm_exynos_sz *sz)
377{
378 struct rot_context *rot = dev_get_drvdata(dev);
379 u32 val, fmt, hsize, vsize;
380
381 /* Get format */
382 fmt = rotator_reg_get_fmt(rot);
383 if (!rotator_check_reg_fmt(fmt)) {
384 DRM_ERROR("%s:invalid format.\n", __func__);
385 return -EINVAL;
386 }
387
388 /* Align buffer size */
389 hsize = sz->hsize;
390 vsize = sz->vsize;
391 rotator_align_size(rot, fmt, &hsize, &vsize);
392
393 /* Set buffer size configuration */
394 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
395 rot_write(val, ROT_DST_BUF_SIZE);
396
397 /* Set crop image position configuration */
398 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
399 rot_write(val, ROT_DST_CROP_POS);
400
401 return 0;
402}
403
404static int rotator_dst_set_addr(struct device *dev,
405 struct drm_exynos_ipp_buf_info *buf_info,
406 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
407{
408 struct rot_context *rot = dev_get_drvdata(dev);
409 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
410 u32 val, fmt, hsize, vsize;
411 int i;
412
413 /* Set current buf_id */
414 rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
415
416 switch (buf_type) {
417 case IPP_BUF_ENQUEUE:
418 /* Set address configuration */
419 for_each_ipp_planar(i)
420 addr[i] = buf_info->base[i];
421
422 /* Get format */
423 fmt = rotator_reg_get_fmt(rot);
424 if (!rotator_check_reg_fmt(fmt)) {
425 DRM_ERROR("%s:invalid format.\n", __func__);
426 return -EINVAL;
427 }
428
429 /* Re-set cb planar for NV12 format */
430 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
431 !addr[EXYNOS_DRM_PLANAR_CB]) {
432 /* Get buf size */
433 val = rot_read(ROT_DST_BUF_SIZE);
434
435 hsize = ROT_GET_BUF_SIZE_W(val);
436 vsize = ROT_GET_BUF_SIZE_H(val);
437
438 /* Set cb planar */
439 addr[EXYNOS_DRM_PLANAR_CB] =
440 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
441 }
442
443 for_each_ipp_planar(i)
444 rot_write(addr[i], ROT_DST_BUF_ADDR(i));
445 break;
446 case IPP_BUF_DEQUEUE:
447 for_each_ipp_planar(i)
448 rot_write(0x0, ROT_DST_BUF_ADDR(i));
449 break;
450 default:
451 /* Nothing to do */
452 break;
453 }
454
455 return 0;
456}
457
458static struct exynos_drm_ipp_ops rot_src_ops = {
459 .set_fmt = rotator_src_set_fmt,
460 .set_size = rotator_src_set_size,
461 .set_addr = rotator_src_set_addr,
462};
463
464static struct exynos_drm_ipp_ops rot_dst_ops = {
465 .set_transf = rotator_dst_set_transf,
466 .set_size = rotator_dst_set_size,
467 .set_addr = rotator_dst_set_addr,
468};
469
470static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
471{
472 struct drm_exynos_ipp_prop_list *prop_list;
473
474 DRM_DEBUG_KMS("%s\n", __func__);
475
476 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
477 if (!prop_list) {
478 DRM_ERROR("failed to alloc property list.\n");
479 return -ENOMEM;
480 }
481
482 prop_list->version = 1;
483 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
484 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
485 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
486 (1 << EXYNOS_DRM_DEGREE_90) |
487 (1 << EXYNOS_DRM_DEGREE_180) |
488 (1 << EXYNOS_DRM_DEGREE_270);
489 prop_list->csc = 0;
490 prop_list->crop = 0;
491 prop_list->scale = 0;
492
493 ippdrv->prop_list = prop_list;
494
495 return 0;
496}
497
498static inline bool rotator_check_drm_fmt(u32 fmt)
499{
500 switch (fmt) {
501 case DRM_FORMAT_XRGB8888:
502 case DRM_FORMAT_NV12:
503 return true;
504 default:
505 DRM_DEBUG_KMS("%s:not support format\n", __func__);
506 return false;
507 }
508}
509
510static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
511{
512 switch (flip) {
513 case EXYNOS_DRM_FLIP_NONE:
514 case EXYNOS_DRM_FLIP_VERTICAL:
515 case EXYNOS_DRM_FLIP_HORIZONTAL:
516 return true;
517 default:
518 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
519 return false;
520 }
521}
522
523static int rotator_ippdrv_check_property(struct device *dev,
524 struct drm_exynos_ipp_property *property)
525{
526 struct drm_exynos_ipp_config *src_config =
527 &property->config[EXYNOS_DRM_OPS_SRC];
528 struct drm_exynos_ipp_config *dst_config =
529 &property->config[EXYNOS_DRM_OPS_DST];
530 struct drm_exynos_pos *src_pos = &src_config->pos;
531 struct drm_exynos_pos *dst_pos = &dst_config->pos;
532 struct drm_exynos_sz *src_sz = &src_config->sz;
533 struct drm_exynos_sz *dst_sz = &dst_config->sz;
534 bool swap = false;
535
536 /* Check format configuration */
537 if (src_config->fmt != dst_config->fmt) {
538 DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
539 return -EINVAL;
540 }
541
542 if (!rotator_check_drm_fmt(dst_config->fmt)) {
543 DRM_DEBUG_KMS("%s:invalid format\n", __func__);
544 return -EINVAL;
545 }
546
547 /* Check transform configuration */
548 if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
549 DRM_DEBUG_KMS("%s:not support source-side rotation\n",
550 __func__);
551 return -EINVAL;
552 }
553
554 switch (dst_config->degree) {
555 case EXYNOS_DRM_DEGREE_90:
556 case EXYNOS_DRM_DEGREE_270:
557 swap = true;
558 case EXYNOS_DRM_DEGREE_0:
559 case EXYNOS_DRM_DEGREE_180:
560 /* No problem */
561 break;
562 default:
563 DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
564 return -EINVAL;
565 }
566
567 if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
568 DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
569 return -EINVAL;
570 }
571
572 if (!rotator_check_drm_flip(dst_config->flip)) {
573 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
574 return -EINVAL;
575 }
576
577 /* Check size configuration */
578 if ((src_pos->x + src_pos->w > src_sz->hsize) ||
579 (src_pos->y + src_pos->h > src_sz->vsize)) {
580 DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
581 return -EINVAL;
582 }
583
584 if (swap) {
585 if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
586 (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
587 DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
588 __func__);
589 return -EINVAL;
590 }
591
592 if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
593 DRM_DEBUG_KMS("%s:not support scale feature\n",
594 __func__);
595 return -EINVAL;
596 }
597 } else {
598 if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
599 (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
600 DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
601 __func__);
602 return -EINVAL;
603 }
604
605 if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
606 DRM_DEBUG_KMS("%s:not support scale feature\n",
607 __func__);
608 return -EINVAL;
609 }
610 }
611
612 return 0;
613}
614
615static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
616{
617 struct rot_context *rot = dev_get_drvdata(dev);
618 u32 val;
619
620 if (rot->suspended) {
621 DRM_ERROR("suspended state\n");
622 return -EPERM;
623 }
624
625 if (cmd != IPP_CMD_M2M) {
626 DRM_ERROR("not support cmd: %d\n", cmd);
627 return -EINVAL;
628 }
629
630 /* Set interrupt enable */
631 rotator_reg_set_irq(rot, true);
632
633 val = rot_read(ROT_CONTROL);
634 val |= ROT_CONTROL_START;
635
636 rot_write(val, ROT_CONTROL);
637
638 return 0;
639}
640
641static int __devinit rotator_probe(struct platform_device *pdev)
642{
643 struct device *dev = &pdev->dev;
644 struct rot_context *rot;
645 struct exynos_drm_ippdrv *ippdrv;
646 int ret;
647
648 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
649 if (!rot) {
650 dev_err(dev, "failed to allocate rot\n");
651 return -ENOMEM;
652 }
653
654 rot->limit_tbl = (struct rot_limit_table *)
655 platform_get_device_id(pdev)->driver_data;
656
657 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
658 if (!rot->regs_res) {
659 dev_err(dev, "failed to find registers\n");
660 ret = -ENOENT;
661 goto err_get_resource;
662 }
663
664 rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
665 if (!rot->regs) {
666 dev_err(dev, "failed to map register\n");
667 ret = -ENXIO;
668 goto err_get_resource;
669 }
670
671 rot->irq = platform_get_irq(pdev, 0);
672 if (rot->irq < 0) {
673 dev_err(dev, "failed to get irq\n");
674 ret = rot->irq;
675 goto err_get_irq;
676 }
677
678 ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
679 IRQF_ONESHOT, "drm_rotator", rot);
680 if (ret < 0) {
681 dev_err(dev, "failed to request irq\n");
682 goto err_get_irq;
683 }
684
685 rot->clock = clk_get(dev, "rotator");
686 if (IS_ERR_OR_NULL(rot->clock)) {
687 dev_err(dev, "failed to get clock\n");
688 ret = PTR_ERR(rot->clock);
689 goto err_clk_get;
690 }
691
692 pm_runtime_enable(dev);
693
694 ippdrv = &rot->ippdrv;
695 ippdrv->dev = dev;
696 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
697 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
698 ippdrv->check_property = rotator_ippdrv_check_property;
699 ippdrv->start = rotator_ippdrv_start;
700 ret = rotator_init_prop_list(ippdrv);
701 if (ret < 0) {
702 dev_err(dev, "failed to init property list.\n");
703 goto err_ippdrv_register;
704 }
705
706 DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
707
708 platform_set_drvdata(pdev, rot);
709
710 ret = exynos_drm_ippdrv_register(ippdrv);
711 if (ret < 0) {
712 dev_err(dev, "failed to register drm rotator device\n");
713 goto err_ippdrv_register;
714 }
715
716 dev_info(dev, "The exynos rotator is probed successfully\n");
717
718 return 0;
719
720err_ippdrv_register:
721 devm_kfree(dev, ippdrv->prop_list);
722 pm_runtime_disable(dev);
723 clk_put(rot->clock);
724err_clk_get:
725 free_irq(rot->irq, rot);
726err_get_irq:
727 devm_iounmap(dev, rot->regs);
728err_get_resource:
729 devm_kfree(dev, rot);
730 return ret;
731}
732
733static int __devexit rotator_remove(struct platform_device *pdev)
734{
735 struct device *dev = &pdev->dev;
736 struct rot_context *rot = dev_get_drvdata(dev);
737 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
738
739 devm_kfree(dev, ippdrv->prop_list);
740 exynos_drm_ippdrv_unregister(ippdrv);
741
742 pm_runtime_disable(dev);
743 clk_put(rot->clock);
744
745 free_irq(rot->irq, rot);
746 devm_iounmap(dev, rot->regs);
747
748 devm_kfree(dev, rot);
749
750 return 0;
751}
752
753struct rot_limit_table rot_limit_tbl = {
754 .ycbcr420_2p = {
755 .min_w = 32,
756 .min_h = 32,
757 .max_w = SZ_32K,
758 .max_h = SZ_32K,
759 .align = 3,
760 },
761 .rgb888 = {
762 .min_w = 8,
763 .min_h = 8,
764 .max_w = SZ_8K,
765 .max_h = SZ_8K,
766 .align = 2,
767 },
768};
769
770struct platform_device_id rotator_driver_ids[] = {
771 {
772 .name = "exynos-rot",
773 .driver_data = (unsigned long)&rot_limit_tbl,
774 },
775 {},
776};
777
778static int rotator_clk_crtl(struct rot_context *rot, bool enable)
779{
780 DRM_DEBUG_KMS("%s\n", __func__);
781
782 if (enable) {
783 clk_enable(rot->clock);
784 rot->suspended = false;
785 } else {
786 clk_disable(rot->clock);
787 rot->suspended = true;
788 }
789
790 return 0;
791}
792
793
794#ifdef CONFIG_PM_SLEEP
795static int rotator_suspend(struct device *dev)
796{
797 struct rot_context *rot = dev_get_drvdata(dev);
798
799 DRM_DEBUG_KMS("%s\n", __func__);
800
801 if (pm_runtime_suspended(dev))
802 return 0;
803
804 return rotator_clk_crtl(rot, false);
805}
806
807static int rotator_resume(struct device *dev)
808{
809 struct rot_context *rot = dev_get_drvdata(dev);
810
811 DRM_DEBUG_KMS("%s\n", __func__);
812
813 if (!pm_runtime_suspended(dev))
814 return rotator_clk_crtl(rot, true);
815
816 return 0;
817}
818#endif
819
820#ifdef CONFIG_PM_RUNTIME
821static int rotator_runtime_suspend(struct device *dev)
822{
823 struct rot_context *rot = dev_get_drvdata(dev);
824
825 DRM_DEBUG_KMS("%s\n", __func__);
826
827 return rotator_clk_crtl(rot, false);
828}
829
830static int rotator_runtime_resume(struct device *dev)
831{
832 struct rot_context *rot = dev_get_drvdata(dev);
833
834 DRM_DEBUG_KMS("%s\n", __func__);
835
836 return rotator_clk_crtl(rot, true);
837}
838#endif
839
840static const struct dev_pm_ops rotator_pm_ops = {
841 SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
842 SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
843 NULL)
844};
845
846struct platform_driver rotator_driver = {
847 .probe = rotator_probe,
848 .remove = __devexit_p(rotator_remove),
849 .id_table = rotator_driver_ids,
850 .driver = {
851 .name = "exynos-rot",
852 .owner = THIS_MODULE,
853 .pm = &rotator_pm_ops,
854 },
855};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 000000000000..a2d7a14a52b6
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * YoungJun Cho <yj44.cho@samsung.com>
6 * Eunchul Kim <chulspro.kim@samsung.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#ifndef _EXYNOS_DRM_ROTATOR_H_
29#define _EXYNOS_DRM_ROTATOR_H_
30
31/* TODO */
32
33#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f7..99bfc38dfaa2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
39 unsigned int fb_height; 39 unsigned int fb_height;
40 unsigned int bpp; 40 unsigned int bpp;
41 dma_addr_t dma_addr; 41 dma_addr_t dma_addr;
42 void __iomem *vaddr;
43 unsigned int buf_offsize; 42 unsigned int buf_offsize;
44 unsigned int line_size; /* bytes */ 43 unsigned int line_size; /* bytes */
45 bool enabled; 44 bool enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
294 win_data->fb_width = overlay->fb_width; 293 win_data->fb_width = overlay->fb_width;
295 win_data->fb_height = overlay->fb_height; 294 win_data->fb_height = overlay->fb_height;
296 win_data->dma_addr = overlay->dma_addr[0] + offset; 295 win_data->dma_addr = overlay->dma_addr[0] + offset;
297 win_data->vaddr = overlay->vaddr[0] + offset;
298 win_data->bpp = overlay->bpp; 296 win_data->bpp = overlay->bpp;
299 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 297 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
300 (overlay->bpp >> 3); 298 (overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
309 win_data->offset_x, win_data->offset_y); 307 win_data->offset_x, win_data->offset_y);
310 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 308 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
311 win_data->ovl_width, win_data->ovl_height); 309 win_data->ovl_width, win_data->ovl_height);
312 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 310 DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
313 (unsigned long)win_data->dma_addr,
314 (unsigned long)win_data->vaddr);
315 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 311 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
316 overlay->fb_width, overlay->crtc_width); 312 overlay->fb_width, overlay->crtc_width);
317} 313}
@@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
382 struct drm_pending_vblank_event *e, *t; 378 struct drm_pending_vblank_event *e, *t;
383 struct timeval now; 379 struct timeval now;
384 unsigned long flags; 380 unsigned long flags;
385 bool is_checked = false;
386 381
387 spin_lock_irqsave(&drm_dev->event_lock, flags); 382 spin_lock_irqsave(&drm_dev->event_lock, flags);
388 383
@@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
392 if (crtc != e->pipe) 387 if (crtc != e->pipe)
393 continue; 388 continue;
394 389
395 is_checked = true;
396
397 do_gettimeofday(&now); 390 do_gettimeofday(&now);
398 e->event.sequence = 0; 391 e->event.sequence = 0;
399 e->event.tv_sec = now.tv_sec; 392 e->event.tv_sec = now.tv_sec;
@@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
401 394
402 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 395 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
403 wake_up_interruptible(&e->base.file_priv->event_wait); 396 wake_up_interruptible(&e->base.file_priv->event_wait);
404 } 397 drm_vblank_put(drm_dev, crtc);
405
406 if (is_checked) {
407 /*
408 * call drm_vblank_put only in case that drm_vblank_get was
409 * called.
410 */
411 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
412 drm_vblank_put(drm_dev, crtc);
413
414 /*
415 * don't off vblank if vblank_disable_allowed is 1,
416 * because vblank would be off by timer handler.
417 */
418 if (!drm_dev->vblank_disable_allowed)
419 drm_vblank_off(drm_dev, crtc);
420 } 398 }
421 399
422 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 400 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a3..2c46b6c0b82c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,6 +50,29 @@
50#define MAX_HEIGHT 1080 50#define MAX_HEIGHT 1080
51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) 51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
52 52
53/* AVI header and aspect ratio */
54#define HDMI_AVI_VERSION 0x02
55#define HDMI_AVI_LENGTH 0x0D
56#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
57#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
58
59/* AUI header info */
60#define HDMI_AUI_VERSION 0x01
61#define HDMI_AUI_LENGTH 0x0A
62
63/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
64enum HDMI_PACKET_TYPE {
65 /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
66 /* InfoFrame packet type */
67 HDMI_PACKET_TYPE_INFOFRAME = 0x80,
68 /* Vendor-Specific InfoFrame */
69 HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
70 /* Auxiliary Video information InfoFrame */
71 HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
72 /* Audio information InfoFrame */
73 HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
74};
75
53enum hdmi_type { 76enum hdmi_type {
54 HDMI_TYPE13, 77 HDMI_TYPE13,
55 HDMI_TYPE14, 78 HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
74 struct mutex hdmi_mutex; 97 struct mutex hdmi_mutex;
75 98
76 void __iomem *regs; 99 void __iomem *regs;
100 void *parent_ctx;
77 int external_irq; 101 int external_irq;
78 int internal_irq; 102 int internal_irq;
79 103
@@ -84,7 +108,6 @@ struct hdmi_context {
84 int cur_conf; 108 int cur_conf;
85 109
86 struct hdmi_resources res; 110 struct hdmi_resources res;
87 void *parent_ctx;
88 111
89 int hpd_gpio; 112 int hpd_gpio;
90 113
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
182 int height; 205 int height;
183 int vrefresh; 206 int vrefresh;
184 bool interlace; 207 bool interlace;
208 int cea_video_id;
185 const u8 *hdmiphy_data; 209 const u8 *hdmiphy_data;
186 const struct hdmi_v13_preset_conf *conf; 210 const struct hdmi_v13_preset_conf *conf;
187}; 211};
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
353}; 377};
354 378
355static const struct hdmi_v13_conf hdmi_v13_confs[] = { 379static const struct hdmi_v13_conf hdmi_v13_confs[] = {
356 { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, 380 { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
357 { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, 381 &hdmi_v13_conf_720p60 },
358 { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, 382 { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
359 { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, 383 &hdmi_v13_conf_720p60 },
360 { 1920, 1080, 50, false, hdmiphy_v13_conf148_5, 384 { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
361 &hdmi_v13_conf_1080p50 }, 385 &hdmi_v13_conf_480p },
362 { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, 386 { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
363 { 1920, 1080, 60, false, hdmiphy_v13_conf148_5, 387 &hdmi_v13_conf_1080i50 },
364 &hdmi_v13_conf_1080p60 }, 388 { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
389 &hdmi_v13_conf_1080p50 },
390 { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
391 &hdmi_v13_conf_1080i60 },
392 { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
393 &hdmi_v13_conf_1080p60 },
365}; 394};
366 395
367/* HDMI Version 1.4 */ 396/* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
479 int height; 508 int height;
480 int vrefresh; 509 int vrefresh;
481 bool interlace; 510 bool interlace;
511 int cea_video_id;
482 const u8 *hdmiphy_data; 512 const u8 *hdmiphy_data;
483 const struct hdmi_preset_conf *conf; 513 const struct hdmi_preset_conf *conf;
484}; 514};
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
934}; 964};
935 965
936static const struct hdmi_conf hdmi_confs[] = { 966static const struct hdmi_conf hdmi_confs[] = {
937 { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, 967 { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
938 { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, 968 { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
939 { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, 969 { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
940 { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, 970 { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
941 { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, 971 { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
942 { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, 972 { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
943 { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, 973 { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
944 { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, 974 { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
945}; 975};
946 976
977struct hdmi_infoframe {
978 enum HDMI_PACKET_TYPE type;
979 u8 ver;
980 u8 len;
981};
947 982
948static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) 983static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
949{ 984{
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
1267 return hdmi_v14_conf_index(mode); 1302 return hdmi_v14_conf_index(mode);
1268} 1303}
1269 1304
1305static u8 hdmi_chksum(struct hdmi_context *hdata,
1306 u32 start, u8 len, u32 hdr_sum)
1307{
1308 int i;
1309
1310 /* hdr_sum : header0 + header1 + header2
1311 * start : start address of packet byte1
1312 * len : packet bytes - 1 */
1313 for (i = 0; i < len; ++i)
1314 hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
1315
1316 /* return 2's complement of 8 bit hdr_sum */
1317 return (u8)(~(hdr_sum & 0xff) + 1);
1318}
1319
1320static void hdmi_reg_infoframe(struct hdmi_context *hdata,
1321 struct hdmi_infoframe *infoframe)
1322{
1323 u32 hdr_sum;
1324 u8 chksum;
1325 u32 aspect_ratio;
1326 u32 mod;
1327 u32 vic;
1328
1329 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1330
1331 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
1332 if (hdata->dvi_mode) {
1333 hdmi_reg_writeb(hdata, HDMI_VSI_CON,
1334 HDMI_VSI_CON_DO_NOT_TRANSMIT);
1335 hdmi_reg_writeb(hdata, HDMI_AVI_CON,
1336 HDMI_AVI_CON_DO_NOT_TRANSMIT);
1337 hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
1338 return;
1339 }
1340
1341 switch (infoframe->type) {
1342 case HDMI_PACKET_TYPE_AVI:
1343 hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
1344 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
1345 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
1346 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
1347 hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
1348
1349 /* Output format zero hardcoded ,RGB YBCR selection */
1350 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
1351 AVI_ACTIVE_FORMAT_VALID |
1352 AVI_UNDERSCANNED_DISPLAY_VALID);
1353
1354 aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
1355
1356 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
1357 AVI_SAME_AS_PIC_ASPECT_RATIO);
1358
1359 if (hdata->type == HDMI_TYPE13)
1360 vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
1361 else
1362 vic = hdmi_confs[hdata->cur_conf].cea_video_id;
1363
1364 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
1365
1366 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
1367 infoframe->len, hdr_sum);
1368 DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
1369 hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
1370 break;
1371 case HDMI_PACKET_TYPE_AUI:
1372 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
1373 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
1374 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
1375 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
1376 hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
1377 chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
1378 infoframe->len, hdr_sum);
1379 DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
1380 hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
1381 break;
1382 default:
1383 break;
1384 }
1385}
1386
1270static bool hdmi_is_connected(void *ctx) 1387static bool hdmi_is_connected(void *ctx)
1271{ 1388{
1272 struct hdmi_context *hdata = ctx; 1389 struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
1293 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", 1410 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
1294 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), 1411 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
1295 raw_edid->width_cm, raw_edid->height_cm); 1412 raw_edid->width_cm, raw_edid->height_cm);
1413 kfree(raw_edid);
1296 } else { 1414 } else {
1297 return -ENODEV; 1415 return -ENODEV;
1298 } 1416 }
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
1541 1659
1542static void hdmi_conf_init(struct hdmi_context *hdata) 1660static void hdmi_conf_init(struct hdmi_context *hdata)
1543{ 1661{
1662 struct hdmi_infoframe infoframe;
1663
1544 /* disable HPD interrupts */ 1664 /* disable HPD interrupts */
1545 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 1665 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
1546 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1666 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1575 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); 1695 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
1576 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); 1696 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
1577 } else { 1697 } else {
1698 infoframe.type = HDMI_PACKET_TYPE_AVI;
1699 infoframe.ver = HDMI_AVI_VERSION;
1700 infoframe.len = HDMI_AVI_LENGTH;
1701 hdmi_reg_infoframe(hdata, &infoframe);
1702
1703 infoframe.type = HDMI_PACKET_TYPE_AUI;
1704 infoframe.ver = HDMI_AUI_VERSION;
1705 infoframe.len = HDMI_AUI_LENGTH;
1706 hdmi_reg_infoframe(hdata, &infoframe);
1707
1578 /* enable AVI packet every vsync, fixes purple line problem */ 1708 /* enable AVI packet every vsync, fixes purple line problem */
1579 hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
1580 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
1581 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); 1709 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
1582 } 1710 }
1583} 1711}
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1875 mdelay(10); 2003 mdelay(10);
1876} 2004}
1877 2005
2006static void hdmiphy_poweron(struct hdmi_context *hdata)
2007{
2008 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2009
2010 if (hdata->type == HDMI_TYPE14)
2011 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
2012 HDMI_PHY_POWER_OFF_EN);
2013}
2014
2015static void hdmiphy_poweroff(struct hdmi_context *hdata)
2016{
2017 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2018
2019 if (hdata->type == HDMI_TYPE14)
2020 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
2021 HDMI_PHY_POWER_OFF_EN);
2022}
2023
1878static void hdmiphy_conf_apply(struct hdmi_context *hdata) 2024static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1879{ 2025{
1880 const u8 *hdmiphy_data; 2026 const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
1978 index = hdmi_v14_conf_index(m); 2124 index = hdmi_v14_conf_index(m);
1979 2125
1980 if (index >= 0) { 2126 if (index >= 0) {
2127 struct drm_mode_object base;
2128 struct list_head head;
2129
1981 DRM_INFO("desired mode doesn't exist so\n"); 2130 DRM_INFO("desired mode doesn't exist so\n");
1982 DRM_INFO("use the most suitable mode among modes.\n"); 2131 DRM_INFO("use the most suitable mode among modes.\n");
2132
2133 /* preserve display mode header while copying. */
2134 head = adjusted_mode->head;
2135 base = adjusted_mode->base;
1983 memcpy(adjusted_mode, m, sizeof(*m)); 2136 memcpy(adjusted_mode, m, sizeof(*m));
2137 adjusted_mode->head = head;
2138 adjusted_mode->base = base;
1984 break; 2139 break;
1985 } 2140 }
1986 } 2141 }
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
2034 2189
2035 mutex_unlock(&hdata->hdmi_mutex); 2190 mutex_unlock(&hdata->hdmi_mutex);
2036 2191
2037 pm_runtime_get_sync(hdata->dev);
2038
2039 regulator_bulk_enable(res->regul_count, res->regul_bulk); 2192 regulator_bulk_enable(res->regul_count, res->regul_bulk);
2040 clk_enable(res->hdmiphy); 2193 clk_enable(res->hdmiphy);
2041 clk_enable(res->hdmi); 2194 clk_enable(res->hdmi);
2042 clk_enable(res->sclk_hdmi); 2195 clk_enable(res->sclk_hdmi);
2196
2197 hdmiphy_poweron(hdata);
2043} 2198}
2044 2199
2045static void hdmi_poweroff(struct hdmi_context *hdata) 2200static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
2058 * its reset state seems to meet the condition. 2213 * its reset state seems to meet the condition.
2059 */ 2214 */
2060 hdmiphy_conf_reset(hdata); 2215 hdmiphy_conf_reset(hdata);
2216 hdmiphy_poweroff(hdata);
2061 2217
2062 clk_disable(res->sclk_hdmi); 2218 clk_disable(res->sclk_hdmi);
2063 clk_disable(res->hdmi); 2219 clk_disable(res->hdmi);
2064 clk_disable(res->hdmiphy); 2220 clk_disable(res->hdmiphy);
2065 regulator_bulk_disable(res->regul_count, res->regul_bulk); 2221 regulator_bulk_disable(res->regul_count, res->regul_bulk);
2066 2222
2067 pm_runtime_put_sync(hdata->dev);
2068
2069 mutex_lock(&hdata->hdmi_mutex); 2223 mutex_lock(&hdata->hdmi_mutex);
2070 2224
2071 hdata->powered = false; 2225 hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
2078{ 2232{
2079 struct hdmi_context *hdata = ctx; 2233 struct hdmi_context *hdata = ctx;
2080 2234
2081 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2235 DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
2082 2236
2083 switch (mode) { 2237 switch (mode) {
2084 case DRM_MODE_DPMS_ON: 2238 case DRM_MODE_DPMS_ON:
2085 hdmi_poweron(hdata); 2239 if (pm_runtime_suspended(hdata->dev))
2240 pm_runtime_get_sync(hdata->dev);
2086 break; 2241 break;
2087 case DRM_MODE_DPMS_STANDBY: 2242 case DRM_MODE_DPMS_STANDBY:
2088 case DRM_MODE_DPMS_SUSPEND: 2243 case DRM_MODE_DPMS_SUSPEND:
2089 case DRM_MODE_DPMS_OFF: 2244 case DRM_MODE_DPMS_OFF:
2090 hdmi_poweroff(hdata); 2245 if (!pm_runtime_suspended(hdata->dev))
2246 pm_runtime_put_sync(hdata->dev);
2091 break; 2247 break;
2092 default: 2248 default:
2093 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); 2249 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2166 memset(res, 0, sizeof(*res)); 2322 memset(res, 0, sizeof(*res));
2167 2323
2168 /* get clocks, power */ 2324 /* get clocks, power */
2169 res->hdmi = clk_get(dev, "hdmi"); 2325 res->hdmi = devm_clk_get(dev, "hdmi");
2170 if (IS_ERR_OR_NULL(res->hdmi)) { 2326 if (IS_ERR_OR_NULL(res->hdmi)) {
2171 DRM_ERROR("failed to get clock 'hdmi'\n"); 2327 DRM_ERROR("failed to get clock 'hdmi'\n");
2172 goto fail; 2328 goto fail;
2173 } 2329 }
2174 res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 2330 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
2175 if (IS_ERR_OR_NULL(res->sclk_hdmi)) { 2331 if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
2176 DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); 2332 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
2177 goto fail; 2333 goto fail;
2178 } 2334 }
2179 res->sclk_pixel = clk_get(dev, "sclk_pixel"); 2335 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
2180 if (IS_ERR_OR_NULL(res->sclk_pixel)) { 2336 if (IS_ERR_OR_NULL(res->sclk_pixel)) {
2181 DRM_ERROR("failed to get clock 'sclk_pixel'\n"); 2337 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
2182 goto fail; 2338 goto fail;
2183 } 2339 }
2184 res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy"); 2340 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
2185 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { 2341 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
2186 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); 2342 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
2187 goto fail; 2343 goto fail;
2188 } 2344 }
2189 res->hdmiphy = clk_get(dev, "hdmiphy"); 2345 res->hdmiphy = devm_clk_get(dev, "hdmiphy");
2190 if (IS_ERR_OR_NULL(res->hdmiphy)) { 2346 if (IS_ERR_OR_NULL(res->hdmiphy)) {
2191 DRM_ERROR("failed to get clock 'hdmiphy'\n"); 2347 DRM_ERROR("failed to get clock 'hdmiphy'\n");
2192 goto fail; 2348 goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2194 2350
2195 clk_set_parent(res->sclk_hdmi, res->sclk_pixel); 2351 clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
2196 2352
2197 res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * 2353 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
2198 sizeof(res->regul_bulk[0]), GFP_KERNEL); 2354 sizeof(res->regul_bulk[0]), GFP_KERNEL);
2199 if (!res->regul_bulk) { 2355 if (!res->regul_bulk) {
2200 DRM_ERROR("failed to get memory for regulators\n"); 2356 DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2204 res->regul_bulk[i].supply = supply[i]; 2360 res->regul_bulk[i].supply = supply[i];
2205 res->regul_bulk[i].consumer = NULL; 2361 res->regul_bulk[i].consumer = NULL;
2206 } 2362 }
2207 ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); 2363 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
2208 if (ret) { 2364 if (ret) {
2209 DRM_ERROR("failed to get regulators\n"); 2365 DRM_ERROR("failed to get regulators\n");
2210 goto fail; 2366 goto fail;
@@ -2217,28 +2373,6 @@ fail:
2217 return -ENODEV; 2373 return -ENODEV;
2218} 2374}
2219 2375
2220static int hdmi_resources_cleanup(struct hdmi_context *hdata)
2221{
2222 struct hdmi_resources *res = &hdata->res;
2223
2224 regulator_bulk_free(res->regul_count, res->regul_bulk);
2225 /* kfree is NULL-safe */
2226 kfree(res->regul_bulk);
2227 if (!IS_ERR_OR_NULL(res->hdmiphy))
2228 clk_put(res->hdmiphy);
2229 if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
2230 clk_put(res->sclk_hdmiphy);
2231 if (!IS_ERR_OR_NULL(res->sclk_pixel))
2232 clk_put(res->sclk_pixel);
2233 if (!IS_ERR_OR_NULL(res->sclk_hdmi))
2234 clk_put(res->sclk_hdmi);
2235 if (!IS_ERR_OR_NULL(res->hdmi))
2236 clk_put(res->hdmi);
2237 memset(res, 0, sizeof(*res));
2238
2239 return 0;
2240}
2241
2242static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; 2376static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
2243 2377
2244void hdmi_attach_ddc_client(struct i2c_client *ddc) 2378void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
2306 } 2440 }
2307}; 2441};
2308 2442
2443#ifdef CONFIG_OF
2309static struct of_device_id hdmi_match_types[] = { 2444static struct of_device_id hdmi_match_types[] = {
2310 { 2445 {
2311 .compatible = "samsung,exynos5-hdmi", 2446 .compatible = "samsung,exynos5-hdmi",
@@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
2314 /* end node */ 2449 /* end node */
2315 } 2450 }
2316}; 2451};
2452#endif
2317 2453
2318static int __devinit hdmi_probe(struct platform_device *pdev) 2454static int __devinit hdmi_probe(struct platform_device *pdev)
2319{ 2455{
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2366 const struct of_device_id *match; 2502 const struct of_device_id *match;
2367 match = of_match_node(of_match_ptr(hdmi_match_types), 2503 match = of_match_node(of_match_ptr(hdmi_match_types),
2368 pdev->dev.of_node); 2504 pdev->dev.of_node);
2505 if (match == NULL)
2506 return -ENODEV;
2369 hdata->type = (enum hdmi_type)match->data; 2507 hdata->type = (enum hdmi_type)match->data;
2370 } else { 2508 } else {
2371 hdata->type = (enum hdmi_type)platform_get_device_id 2509 hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2378 ret = hdmi_resources_init(hdata); 2516 ret = hdmi_resources_init(hdata);
2379 2517
2380 if (ret) { 2518 if (ret) {
2381 ret = -EINVAL;
2382 DRM_ERROR("hdmi_resources_init failed\n"); 2519 DRM_ERROR("hdmi_resources_init failed\n");
2383 goto err_data; 2520 return -EINVAL;
2384 } 2521 }
2385 2522
2386 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2523 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2387 if (!res) { 2524 if (!res) {
2388 DRM_ERROR("failed to find registers\n"); 2525 DRM_ERROR("failed to find registers\n");
2389 ret = -ENOENT; 2526 return -ENOENT;
2390 goto err_resource;
2391 } 2527 }
2392 2528
2393 hdata->regs = devm_request_and_ioremap(&pdev->dev, res); 2529 hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
2394 if (!hdata->regs) { 2530 if (!hdata->regs) {
2395 DRM_ERROR("failed to map registers\n"); 2531 DRM_ERROR("failed to map registers\n");
2396 ret = -ENXIO; 2532 return -ENXIO;
2397 goto err_resource;
2398 } 2533 }
2399 2534
2400 ret = gpio_request(hdata->hpd_gpio, "HPD"); 2535 ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
2401 if (ret) { 2536 if (ret) {
2402 DRM_ERROR("failed to request HPD gpio\n"); 2537 DRM_ERROR("failed to request HPD gpio\n");
2403 goto err_resource; 2538 return ret;
2404 } 2539 }
2405 2540
2406 /* DDC i2c driver */ 2541 /* DDC i2c driver */
2407 if (i2c_add_driver(&ddc_driver)) { 2542 if (i2c_add_driver(&ddc_driver)) {
2408 DRM_ERROR("failed to register ddc i2c driver\n"); 2543 DRM_ERROR("failed to register ddc i2c driver\n");
2409 ret = -ENOENT; 2544 return -ENOENT;
2410 goto err_gpio;
2411 } 2545 }
2412 2546
2413 hdata->ddc_port = hdmi_ddc; 2547 hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2604,6 @@ err_hdmiphy:
2470 i2c_del_driver(&hdmiphy_driver); 2604 i2c_del_driver(&hdmiphy_driver);
2471err_ddc: 2605err_ddc:
2472 i2c_del_driver(&ddc_driver); 2606 i2c_del_driver(&ddc_driver);
2473err_gpio:
2474 gpio_free(hdata->hpd_gpio);
2475err_resource:
2476 hdmi_resources_cleanup(hdata);
2477err_data:
2478 return ret; 2607 return ret;
2479} 2608}
2480 2609
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
2491 free_irq(hdata->internal_irq, hdata); 2620 free_irq(hdata->internal_irq, hdata);
2492 free_irq(hdata->external_irq, hdata); 2621 free_irq(hdata->external_irq, hdata);
2493 2622
2494 gpio_free(hdata->hpd_gpio);
2495
2496 hdmi_resources_cleanup(hdata);
2497 2623
2498 /* hdmiphy i2c driver */ 2624 /* hdmiphy i2c driver */
2499 i2c_del_driver(&hdmiphy_driver); 2625 i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
2509 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2635 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2510 struct hdmi_context *hdata = ctx->ctx; 2636 struct hdmi_context *hdata = ctx->ctx;
2511 2637
2638 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2639
2512 disable_irq(hdata->internal_irq); 2640 disable_irq(hdata->internal_irq);
2513 disable_irq(hdata->external_irq); 2641 disable_irq(hdata->external_irq);
2514 2642
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
2516 if (ctx->drm_dev) 2644 if (ctx->drm_dev)
2517 drm_helper_hpd_irq_event(ctx->drm_dev); 2645 drm_helper_hpd_irq_event(ctx->drm_dev);
2518 2646
2647 if (pm_runtime_suspended(dev)) {
2648 DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
2649 return 0;
2650 }
2651
2519 hdmi_poweroff(hdata); 2652 hdmi_poweroff(hdata);
2520 2653
2521 return 0; 2654 return 0;
@@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
2526 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2659 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2527 struct hdmi_context *hdata = ctx->ctx; 2660 struct hdmi_context *hdata = ctx->ctx;
2528 2661
2662 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2663
2664 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2665
2529 enable_irq(hdata->external_irq); 2666 enable_irq(hdata->external_irq);
2530 enable_irq(hdata->internal_irq); 2667 enable_irq(hdata->internal_irq);
2668
2669 if (!pm_runtime_suspended(dev)) {
2670 DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
2671 return 0;
2672 }
2673
2674 hdmi_poweron(hdata);
2675
2531 return 0; 2676 return 0;
2532} 2677}
2533#endif 2678#endif
2534 2679
2535static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume); 2680#ifdef CONFIG_PM_RUNTIME
2681static int hdmi_runtime_suspend(struct device *dev)
2682{
2683 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2684 struct hdmi_context *hdata = ctx->ctx;
2685 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2686
2687 hdmi_poweroff(hdata);
2688
2689 return 0;
2690}
2691
2692static int hdmi_runtime_resume(struct device *dev)
2693{
2694 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2695 struct hdmi_context *hdata = ctx->ctx;
2696 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2697
2698 hdmi_poweron(hdata);
2699
2700 return 0;
2701}
2702#endif
2703
2704static const struct dev_pm_ops hdmi_pm_ops = {
2705 SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
2706 SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
2707};
2536 2708
2537struct platform_driver hdmi_driver = { 2709struct platform_driver hdmi_driver = {
2538 .probe = hdmi_probe, 2710 .probe = hdmi_probe,
@@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
2542 .name = "exynos-hdmi", 2714 .name = "exynos-hdmi",
2543 .owner = THIS_MODULE, 2715 .owner = THIS_MODULE,
2544 .pm = &hdmi_pm_ops, 2716 .pm = &hdmi_pm_ops,
2545 .of_match_table = hdmi_match_types, 2717 .of_match_table = of_match_ptr(hdmi_match_types),
2546 }, 2718 },
2547}; 2719};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bbd..6206056f4a33 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
46 { }, 46 { },
47}; 47};
48 48
49#ifdef CONFIG_OF
49static struct of_device_id hdmiphy_match_types[] = { 50static struct of_device_id hdmiphy_match_types[] = {
50 { 51 {
51 .compatible = "samsung,exynos5-hdmiphy", 52 .compatible = "samsung,exynos5-hdmiphy",
@@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
53 /* end node */ 54 /* end node */
54 } 55 }
55}; 56};
57#endif
56 58
57struct i2c_driver hdmiphy_driver = { 59struct i2c_driver hdmiphy_driver = {
58 .driver = { 60 .driver = {
59 .name = "exynos-hdmiphy", 61 .name = "exynos-hdmiphy",
60 .owner = THIS_MODULE, 62 .owner = THIS_MODULE,
61 .of_match_table = hdmiphy_match_types, 63 .of_match_table = of_match_ptr(hdmiphy_match_types),
62 }, 64 },
63 .id_table = hdmiphy_id, 65 .id_table = hdmiphy_id,
64 .probe = hdmiphy_probe, 66 .probe = hdmiphy_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8e..21db89530fc7 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,14 +36,13 @@
36 36
37#include "exynos_drm_drv.h" 37#include "exynos_drm_drv.h"
38#include "exynos_drm_hdmi.h" 38#include "exynos_drm_hdmi.h"
39#include "exynos_drm_iommu.h"
39 40
40#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) 41#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
41 42
42struct hdmi_win_data { 43struct hdmi_win_data {
43 dma_addr_t dma_addr; 44 dma_addr_t dma_addr;
44 void __iomem *vaddr;
45 dma_addr_t chroma_dma_addr; 45 dma_addr_t chroma_dma_addr;
46 void __iomem *chroma_vaddr;
47 uint32_t pixel_format; 46 uint32_t pixel_format;
48 unsigned int bpp; 47 unsigned int bpp;
49 unsigned int crtc_x; 48 unsigned int crtc_x;
@@ -59,6 +58,8 @@ struct hdmi_win_data {
59 unsigned int mode_width; 58 unsigned int mode_width;
60 unsigned int mode_height; 59 unsigned int mode_height;
61 unsigned int scan_flags; 60 unsigned int scan_flags;
61 bool enabled;
62 bool resume;
62}; 63};
63 64
64struct mixer_resources { 65struct mixer_resources {
@@ -80,6 +81,7 @@ enum mixer_version_id {
80 81
81struct mixer_context { 82struct mixer_context {
82 struct device *dev; 83 struct device *dev;
84 struct drm_device *drm_dev;
83 int pipe; 85 int pipe;
84 bool interlace; 86 bool interlace;
85 bool powered; 87 bool powered;
@@ -90,6 +92,9 @@ struct mixer_context {
90 struct mixer_resources mixer_res; 92 struct mixer_resources mixer_res;
91 struct hdmi_win_data win_data[MIXER_WIN_NR]; 93 struct hdmi_win_data win_data[MIXER_WIN_NR];
92 enum mixer_version_id mxr_ver; 94 enum mixer_version_id mxr_ver;
95 void *parent_ctx;
96 wait_queue_head_t wait_vsync_queue;
97 atomic_t wait_vsync_event;
93}; 98};
94 99
95struct mixer_drv_data { 100struct mixer_drv_data {
@@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
665 spin_unlock_irqrestore(&res->reg_slock, flags); 670 spin_unlock_irqrestore(&res->reg_slock, flags);
666} 671}
667 672
668static void mixer_poweron(struct mixer_context *ctx) 673static int mixer_iommu_on(void *ctx, bool enable)
669{
670 struct mixer_resources *res = &ctx->mixer_res;
671
672 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
673
674 mutex_lock(&ctx->mixer_mutex);
675 if (ctx->powered) {
676 mutex_unlock(&ctx->mixer_mutex);
677 return;
678 }
679 ctx->powered = true;
680 mutex_unlock(&ctx->mixer_mutex);
681
682 pm_runtime_get_sync(ctx->dev);
683
684 clk_enable(res->mixer);
685 if (ctx->vp_enabled) {
686 clk_enable(res->vp);
687 clk_enable(res->sclk_mixer);
688 }
689
690 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
691 mixer_win_reset(ctx);
692}
693
694static void mixer_poweroff(struct mixer_context *ctx)
695{ 674{
696 struct mixer_resources *res = &ctx->mixer_res; 675 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
697 676 struct mixer_context *mdata = ctx;
698 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 677 struct drm_device *drm_dev;
699 678
700 mutex_lock(&ctx->mixer_mutex); 679 drm_hdmi_ctx = mdata->parent_ctx;
701 if (!ctx->powered) 680 drm_dev = drm_hdmi_ctx->drm_dev;
702 goto out;
703 mutex_unlock(&ctx->mixer_mutex);
704 681
705 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 682 if (is_drm_iommu_supported(drm_dev)) {
683 if (enable)
684 return drm_iommu_attach_device(drm_dev, mdata->dev);
706 685
707 clk_disable(res->mixer); 686 drm_iommu_detach_device(drm_dev, mdata->dev);
708 if (ctx->vp_enabled) {
709 clk_disable(res->vp);
710 clk_disable(res->sclk_mixer);
711 } 687 }
712 688 return 0;
713 pm_runtime_put_sync(ctx->dev);
714
715 mutex_lock(&ctx->mixer_mutex);
716 ctx->powered = false;
717
718out:
719 mutex_unlock(&ctx->mixer_mutex);
720} 689}
721 690
722static int mixer_enable_vblank(void *ctx, int pipe) 691static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
746 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 715 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
747} 716}
748 717
749static void mixer_dpms(void *ctx, int mode)
750{
751 struct mixer_context *mixer_ctx = ctx;
752
753 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
754
755 switch (mode) {
756 case DRM_MODE_DPMS_ON:
757 mixer_poweron(mixer_ctx);
758 break;
759 case DRM_MODE_DPMS_STANDBY:
760 case DRM_MODE_DPMS_SUSPEND:
761 case DRM_MODE_DPMS_OFF:
762 mixer_poweroff(mixer_ctx);
763 break;
764 default:
765 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
766 break;
767 }
768}
769
770static void mixer_wait_for_vblank(void *ctx)
771{
772 struct mixer_context *mixer_ctx = ctx;
773 struct mixer_resources *res = &mixer_ctx->mixer_res;
774 int ret;
775
776 ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
777 MXR_INT_STATUS_VSYNC), 50);
778 if (ret < 0)
779 DRM_DEBUG_KMS("vblank wait timed out.\n");
780}
781
782static void mixer_win_mode_set(void *ctx, 718static void mixer_win_mode_set(void *ctx,
783 struct exynos_drm_overlay *overlay) 719 struct exynos_drm_overlay *overlay)
784{ 720{
@@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
811 win_data = &mixer_ctx->win_data[win]; 747 win_data = &mixer_ctx->win_data[win];
812 748
813 win_data->dma_addr = overlay->dma_addr[0]; 749 win_data->dma_addr = overlay->dma_addr[0];
814 win_data->vaddr = overlay->vaddr[0];
815 win_data->chroma_dma_addr = overlay->dma_addr[1]; 750 win_data->chroma_dma_addr = overlay->dma_addr[1];
816 win_data->chroma_vaddr = overlay->vaddr[1];
817 win_data->pixel_format = overlay->pixel_format; 751 win_data->pixel_format = overlay->pixel_format;
818 win_data->bpp = overlay->bpp; 752 win_data->bpp = overlay->bpp;
819 753
@@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
845 vp_video_buffer(mixer_ctx, win); 779 vp_video_buffer(mixer_ctx, win);
846 else 780 else
847 mixer_graph_buffer(mixer_ctx, win); 781 mixer_graph_buffer(mixer_ctx, win);
782
783 mixer_ctx->win_data[win].enabled = true;
848} 784}
849 785
850static void mixer_win_disable(void *ctx, int win) 786static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
855 791
856 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 792 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
857 793
794 mutex_lock(&mixer_ctx->mixer_mutex);
795 if (!mixer_ctx->powered) {
796 mutex_unlock(&mixer_ctx->mixer_mutex);
797 mixer_ctx->win_data[win].resume = false;
798 return;
799 }
800 mutex_unlock(&mixer_ctx->mixer_mutex);
801
858 spin_lock_irqsave(&res->reg_slock, flags); 802 spin_lock_irqsave(&res->reg_slock, flags);
859 mixer_vsync_set_update(mixer_ctx, false); 803 mixer_vsync_set_update(mixer_ctx, false);
860 804
@@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
862 806
863 mixer_vsync_set_update(mixer_ctx, true); 807 mixer_vsync_set_update(mixer_ctx, true);
864 spin_unlock_irqrestore(&res->reg_slock, flags); 808 spin_unlock_irqrestore(&res->reg_slock, flags);
809
810 mixer_ctx->win_data[win].enabled = false;
811}
812
813static void mixer_wait_for_vblank(void *ctx)
814{
815 struct mixer_context *mixer_ctx = ctx;
816
817 mutex_lock(&mixer_ctx->mixer_mutex);
818 if (!mixer_ctx->powered) {
819 mutex_unlock(&mixer_ctx->mixer_mutex);
820 return;
821 }
822 mutex_unlock(&mixer_ctx->mixer_mutex);
823
824 atomic_set(&mixer_ctx->wait_vsync_event, 1);
825
826 /*
827 * wait for MIXER to signal VSYNC interrupt or return after
828 * timeout which is set to 50ms (refresh rate of 20).
829 */
830 if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
831 !atomic_read(&mixer_ctx->wait_vsync_event),
832 DRM_HZ/20))
833 DRM_DEBUG_KMS("vblank wait timed out.\n");
834}
835
836static void mixer_window_suspend(struct mixer_context *ctx)
837{
838 struct hdmi_win_data *win_data;
839 int i;
840
841 for (i = 0; i < MIXER_WIN_NR; i++) {
842 win_data = &ctx->win_data[i];
843 win_data->resume = win_data->enabled;
844 mixer_win_disable(ctx, i);
845 }
846 mixer_wait_for_vblank(ctx);
847}
848
849static void mixer_window_resume(struct mixer_context *ctx)
850{
851 struct hdmi_win_data *win_data;
852 int i;
853
854 for (i = 0; i < MIXER_WIN_NR; i++) {
855 win_data = &ctx->win_data[i];
856 win_data->enabled = win_data->resume;
857 win_data->resume = false;
858 }
859}
860
861static void mixer_poweron(struct mixer_context *ctx)
862{
863 struct mixer_resources *res = &ctx->mixer_res;
864
865 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
866
867 mutex_lock(&ctx->mixer_mutex);
868 if (ctx->powered) {
869 mutex_unlock(&ctx->mixer_mutex);
870 return;
871 }
872 ctx->powered = true;
873 mutex_unlock(&ctx->mixer_mutex);
874
875 clk_enable(res->mixer);
876 if (ctx->vp_enabled) {
877 clk_enable(res->vp);
878 clk_enable(res->sclk_mixer);
879 }
880
881 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
882 mixer_win_reset(ctx);
883
884 mixer_window_resume(ctx);
885}
886
887static void mixer_poweroff(struct mixer_context *ctx)
888{
889 struct mixer_resources *res = &ctx->mixer_res;
890
891 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
892
893 mutex_lock(&ctx->mixer_mutex);
894 if (!ctx->powered)
895 goto out;
896 mutex_unlock(&ctx->mixer_mutex);
897
898 mixer_window_suspend(ctx);
899
900 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
901
902 clk_disable(res->mixer);
903 if (ctx->vp_enabled) {
904 clk_disable(res->vp);
905 clk_disable(res->sclk_mixer);
906 }
907
908 mutex_lock(&ctx->mixer_mutex);
909 ctx->powered = false;
910
911out:
912 mutex_unlock(&ctx->mixer_mutex);
913}
914
915static void mixer_dpms(void *ctx, int mode)
916{
917 struct mixer_context *mixer_ctx = ctx;
918
919 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
920
921 switch (mode) {
922 case DRM_MODE_DPMS_ON:
923 if (pm_runtime_suspended(mixer_ctx->dev))
924 pm_runtime_get_sync(mixer_ctx->dev);
925 break;
926 case DRM_MODE_DPMS_STANDBY:
927 case DRM_MODE_DPMS_SUSPEND:
928 case DRM_MODE_DPMS_OFF:
929 if (!pm_runtime_suspended(mixer_ctx->dev))
930 pm_runtime_put_sync(mixer_ctx->dev);
931 break;
932 default:
933 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
934 break;
935 }
865} 936}
866 937
867static struct exynos_mixer_ops mixer_ops = { 938static struct exynos_mixer_ops mixer_ops = {
868 /* manager */ 939 /* manager */
940 .iommu_on = mixer_iommu_on,
869 .enable_vblank = mixer_enable_vblank, 941 .enable_vblank = mixer_enable_vblank,
870 .disable_vblank = mixer_disable_vblank, 942 .disable_vblank = mixer_disable_vblank,
943 .wait_for_vblank = mixer_wait_for_vblank,
871 .dpms = mixer_dpms, 944 .dpms = mixer_dpms,
872 945
873 /* overlay */ 946 /* overlay */
874 .wait_for_vblank = mixer_wait_for_vblank,
875 .win_mode_set = mixer_win_mode_set, 947 .win_mode_set = mixer_win_mode_set,
876 .win_commit = mixer_win_commit, 948 .win_commit = mixer_win_commit,
877 .win_disable = mixer_win_disable, 949 .win_disable = mixer_win_disable,
@@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
884 struct drm_pending_vblank_event *e, *t; 956 struct drm_pending_vblank_event *e, *t;
885 struct timeval now; 957 struct timeval now;
886 unsigned long flags; 958 unsigned long flags;
887 bool is_checked = false;
888 959
889 spin_lock_irqsave(&drm_dev->event_lock, flags); 960 spin_lock_irqsave(&drm_dev->event_lock, flags);
890 961
@@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
894 if (crtc != e->pipe) 965 if (crtc != e->pipe)
895 continue; 966 continue;
896 967
897 is_checked = true;
898 do_gettimeofday(&now); 968 do_gettimeofday(&now);
899 e->event.sequence = 0; 969 e->event.sequence = 0;
900 e->event.tv_sec = now.tv_sec; 970 e->event.tv_sec = now.tv_sec;
@@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
902 972
903 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 973 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
904 wake_up_interruptible(&e->base.file_priv->event_wait); 974 wake_up_interruptible(&e->base.file_priv->event_wait);
975 drm_vblank_put(drm_dev, crtc);
905 } 976 }
906 977
907 if (is_checked)
908 /*
909 * call drm_vblank_put only in case that drm_vblank_get was
910 * called.
911 */
912 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
913 drm_vblank_put(drm_dev, crtc);
914
915 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 978 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
916} 979}
917 980
@@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
944 1007
945 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe); 1008 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
946 mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe); 1009 mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
1010
1011 /* set wait vsync event to zero and wake up queue. */
1012 if (atomic_read(&ctx->wait_vsync_event)) {
1013 atomic_set(&ctx->wait_vsync_event, 0);
1014 DRM_WAKEUP(&ctx->wait_vsync_queue);
1015 }
947 } 1016 }
948 1017
949out: 1018out:
@@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
971 1040
972 spin_lock_init(&mixer_res->reg_slock); 1041 spin_lock_init(&mixer_res->reg_slock);
973 1042
974 mixer_res->mixer = clk_get(dev, "mixer"); 1043 mixer_res->mixer = devm_clk_get(dev, "mixer");
975 if (IS_ERR_OR_NULL(mixer_res->mixer)) { 1044 if (IS_ERR_OR_NULL(mixer_res->mixer)) {
976 dev_err(dev, "failed to get clock 'mixer'\n"); 1045 dev_err(dev, "failed to get clock 'mixer'\n");
977 ret = -ENODEV; 1046 return -ENODEV;
978 goto fail;
979 } 1047 }
980 1048
981 mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 1049 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
982 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { 1050 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
983 dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); 1051 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
984 ret = -ENODEV; 1052 return -ENODEV;
985 goto fail;
986 } 1053 }
987 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1054 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
988 if (res == NULL) { 1055 if (res == NULL) {
989 dev_err(dev, "get memory resource failed.\n"); 1056 dev_err(dev, "get memory resource failed.\n");
990 ret = -ENXIO; 1057 return -ENXIO;
991 goto fail;
992 } 1058 }
993 1059
994 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, 1060 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
995 resource_size(res)); 1061 resource_size(res));
996 if (mixer_res->mixer_regs == NULL) { 1062 if (mixer_res->mixer_regs == NULL) {
997 dev_err(dev, "register mapping failed.\n"); 1063 dev_err(dev, "register mapping failed.\n");
998 ret = -ENXIO; 1064 return -ENXIO;
999 goto fail;
1000 } 1065 }
1001 1066
1002 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1067 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1003 if (res == NULL) { 1068 if (res == NULL) {
1004 dev_err(dev, "get interrupt resource failed.\n"); 1069 dev_err(dev, "get interrupt resource failed.\n");
1005 ret = -ENXIO; 1070 return -ENXIO;
1006 goto fail;
1007 } 1071 }
1008 1072
1009 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, 1073 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
1010 0, "drm_mixer", ctx); 1074 0, "drm_mixer", ctx);
1011 if (ret) { 1075 if (ret) {
1012 dev_err(dev, "request interrupt failed.\n"); 1076 dev_err(dev, "request interrupt failed.\n");
1013 goto fail; 1077 return ret;
1014 } 1078 }
1015 mixer_res->irq = res->start; 1079 mixer_res->irq = res->start;
1016 1080
1017 return 0; 1081 return 0;
1018
1019fail:
1020 if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
1021 clk_put(mixer_res->sclk_hdmi);
1022 if (!IS_ERR_OR_NULL(mixer_res->mixer))
1023 clk_put(mixer_res->mixer);
1024 return ret;
1025} 1082}
1026 1083
1027static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, 1084static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1031 struct device *dev = &pdev->dev; 1088 struct device *dev = &pdev->dev;
1032 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; 1089 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
1033 struct resource *res; 1090 struct resource *res;
1034 int ret;
1035 1091
1036 mixer_res->vp = clk_get(dev, "vp"); 1092 mixer_res->vp = devm_clk_get(dev, "vp");
1037 if (IS_ERR_OR_NULL(mixer_res->vp)) { 1093 if (IS_ERR_OR_NULL(mixer_res->vp)) {
1038 dev_err(dev, "failed to get clock 'vp'\n"); 1094 dev_err(dev, "failed to get clock 'vp'\n");
1039 ret = -ENODEV; 1095 return -ENODEV;
1040 goto fail;
1041 } 1096 }
1042 mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); 1097 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
1043 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { 1098 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
1044 dev_err(dev, "failed to get clock 'sclk_mixer'\n"); 1099 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
1045 ret = -ENODEV; 1100 return -ENODEV;
1046 goto fail;
1047 } 1101 }
1048 mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); 1102 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
1049 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { 1103 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
1050 dev_err(dev, "failed to get clock 'sclk_dac'\n"); 1104 dev_err(dev, "failed to get clock 'sclk_dac'\n");
1051 ret = -ENODEV; 1105 return -ENODEV;
1052 goto fail;
1053 } 1106 }
1054 1107
1055 if (mixer_res->sclk_hdmi) 1108 if (mixer_res->sclk_hdmi)
@@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1058 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1111 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1059 if (res == NULL) { 1112 if (res == NULL) {
1060 dev_err(dev, "get memory resource failed.\n"); 1113 dev_err(dev, "get memory resource failed.\n");
1061 ret = -ENXIO; 1114 return -ENXIO;
1062 goto fail;
1063 } 1115 }
1064 1116
1065 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, 1117 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
1066 resource_size(res)); 1118 resource_size(res));
1067 if (mixer_res->vp_regs == NULL) { 1119 if (mixer_res->vp_regs == NULL) {
1068 dev_err(dev, "register mapping failed.\n"); 1120 dev_err(dev, "register mapping failed.\n");
1069 ret = -ENXIO; 1121 return -ENXIO;
1070 goto fail;
1071 } 1122 }
1072 1123
1073 return 0; 1124 return 0;
1074
1075fail:
1076 if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
1077 clk_put(mixer_res->sclk_dac);
1078 if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
1079 clk_put(mixer_res->sclk_mixer);
1080 if (!IS_ERR_OR_NULL(mixer_res->vp))
1081 clk_put(mixer_res->vp);
1082 return ret;
1083} 1125}
1084 1126
1085static struct mixer_drv_data exynos5_mxr_drv_data = { 1127static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1149 } 1191 }
1150 1192
1151 ctx->dev = &pdev->dev; 1193 ctx->dev = &pdev->dev;
1194 ctx->parent_ctx = (void *)drm_hdmi_ctx;
1152 drm_hdmi_ctx->ctx = (void *)ctx; 1195 drm_hdmi_ctx->ctx = (void *)ctx;
1153 ctx->vp_enabled = drv->is_vp_enabled; 1196 ctx->vp_enabled = drv->is_vp_enabled;
1154 ctx->mxr_ver = drv->version; 1197 ctx->mxr_ver = drv->version;
1198 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
1199 atomic_set(&ctx->wait_vsync_event, 0);
1155 1200
1156 platform_set_drvdata(pdev, drm_hdmi_ctx); 1201 platform_set_drvdata(pdev, drm_hdmi_ctx);
1157 1202
@@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
1202 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); 1247 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1203 struct mixer_context *ctx = drm_hdmi_ctx->ctx; 1248 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1204 1249
1250 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1251
1252 if (pm_runtime_suspended(dev)) {
1253 DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
1254 return 0;
1255 }
1256
1205 mixer_poweroff(ctx); 1257 mixer_poweroff(ctx);
1206 1258
1207 return 0; 1259 return 0;
1208} 1260}
1261
1262static int mixer_resume(struct device *dev)
1263{
1264 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1265 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1266
1267 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1268
1269 if (!pm_runtime_suspended(dev)) {
1270 DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
1271 return 0;
1272 }
1273
1274 mixer_poweron(ctx);
1275
1276 return 0;
1277}
1209#endif 1278#endif
1210 1279
1211static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL); 1280#ifdef CONFIG_PM_RUNTIME
1281static int mixer_runtime_suspend(struct device *dev)
1282{
1283 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1284 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1285
1286 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1287
1288 mixer_poweroff(ctx);
1289
1290 return 0;
1291}
1292
1293static int mixer_runtime_resume(struct device *dev)
1294{
1295 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1296 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1297
1298 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1299
1300 mixer_poweron(ctx);
1301
1302 return 0;
1303}
1304#endif
1305
1306static const struct dev_pm_ops mixer_pm_ops = {
1307 SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
1308 SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
1309};
1212 1310
1213struct platform_driver mixer_driver = { 1311struct platform_driver mixer_driver = {
1214 .driver = { 1312 .driver = {
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 000000000000..b4f9ca1fd851
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
1/* drivers/gpu/drm/exynos/regs-fimc.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Register definition file for Samsung Camera Interface (FIMC) driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef EXYNOS_REGS_FIMC_H
14#define EXYNOS_REGS_FIMC_H
15
16/*
17 * Register part
18*/
19/* Input source format */
20#define EXYNOS_CISRCFMT (0x00)
21/* Window offset */
22#define EXYNOS_CIWDOFST (0x04)
23/* Global control */
24#define EXYNOS_CIGCTRL (0x08)
25/* Window offset 2 */
26#define EXYNOS_CIWDOFST2 (0x14)
27/* Y 1st frame start address for output DMA */
28#define EXYNOS_CIOYSA1 (0x18)
29/* Y 2nd frame start address for output DMA */
30#define EXYNOS_CIOYSA2 (0x1c)
31/* Y 3rd frame start address for output DMA */
32#define EXYNOS_CIOYSA3 (0x20)
33/* Y 4th frame start address for output DMA */
34#define EXYNOS_CIOYSA4 (0x24)
35/* Cb 1st frame start address for output DMA */
36#define EXYNOS_CIOCBSA1 (0x28)
37/* Cb 2nd frame start address for output DMA */
38#define EXYNOS_CIOCBSA2 (0x2c)
39/* Cb 3rd frame start address for output DMA */
40#define EXYNOS_CIOCBSA3 (0x30)
41/* Cb 4th frame start address for output DMA */
42#define EXYNOS_CIOCBSA4 (0x34)
43/* Cr 1st frame start address for output DMA */
44#define EXYNOS_CIOCRSA1 (0x38)
45/* Cr 2nd frame start address for output DMA */
46#define EXYNOS_CIOCRSA2 (0x3c)
47/* Cr 3rd frame start address for output DMA */
48#define EXYNOS_CIOCRSA3 (0x40)
49/* Cr 4th frame start address for output DMA */
50#define EXYNOS_CIOCRSA4 (0x44)
51/* Target image format */
52#define EXYNOS_CITRGFMT (0x48)
53/* Output DMA control */
54#define EXYNOS_CIOCTRL (0x4c)
55/* Pre-scaler control 1 */
56#define EXYNOS_CISCPRERATIO (0x50)
57/* Pre-scaler control 2 */
58#define EXYNOS_CISCPREDST (0x54)
59/* Main scaler control */
60#define EXYNOS_CISCCTRL (0x58)
61/* Target area */
62#define EXYNOS_CITAREA (0x5c)
63/* Status */
64#define EXYNOS_CISTATUS (0x64)
65/* Status2 */
66#define EXYNOS_CISTATUS2 (0x68)
67/* Image capture enable command */
68#define EXYNOS_CIIMGCPT (0xc0)
69/* Capture sequence */
70#define EXYNOS_CICPTSEQ (0xc4)
71/* Image effects */
72#define EXYNOS_CIIMGEFF (0xd0)
73/* Y frame start address for input DMA */
74#define EXYNOS_CIIYSA0 (0xd4)
75/* Cb frame start address for input DMA */
76#define EXYNOS_CIICBSA0 (0xd8)
77/* Cr frame start address for input DMA */
78#define EXYNOS_CIICRSA0 (0xdc)
79/* Input DMA Y Line Skip */
80#define EXYNOS_CIILINESKIP_Y (0xec)
81/* Input DMA Cb Line Skip */
82#define EXYNOS_CIILINESKIP_CB (0xf0)
83/* Input DMA Cr Line Skip */
84#define EXYNOS_CIILINESKIP_CR (0xf4)
85/* Real input DMA image size */
86#define EXYNOS_CIREAL_ISIZE (0xf8)
87/* Input DMA control */
88#define EXYNOS_MSCTRL (0xfc)
89/* Y frame start address for input DMA */
90#define EXYNOS_CIIYSA1 (0x144)
91/* Cb frame start address for input DMA */
92#define EXYNOS_CIICBSA1 (0x148)
93/* Cr frame start address for input DMA */
94#define EXYNOS_CIICRSA1 (0x14c)
95/* Output DMA Y offset */
96#define EXYNOS_CIOYOFF (0x168)
97/* Output DMA CB offset */
98#define EXYNOS_CIOCBOFF (0x16c)
99/* Output DMA CR offset */
100#define EXYNOS_CIOCROFF (0x170)
101/* Input DMA Y offset */
102#define EXYNOS_CIIYOFF (0x174)
103/* Input DMA CB offset */
104#define EXYNOS_CIICBOFF (0x178)
105/* Input DMA CR offset */
106#define EXYNOS_CIICROFF (0x17c)
107/* Input DMA original image size */
108#define EXYNOS_ORGISIZE (0x180)
109/* Output DMA original image size */
110#define EXYNOS_ORGOSIZE (0x184)
111/* Real output DMA image size */
112#define EXYNOS_CIEXTEN (0x188)
113/* DMA parameter */
114#define EXYNOS_CIDMAPARAM (0x18c)
115/* MIPI CSI image format */
116#define EXYNOS_CSIIMGFMT (0x194)
117/* FIMC Clock Source Select */
118#define EXYNOS_MISC_FIMC (0x198)
119
120/* Add for FIMC v5.1 */
121/* Output Frame Buffer Sequence */
122#define EXYNOS_CIFCNTSEQ (0x1fc)
123/* Y 5th frame start address for output DMA */
124#define EXYNOS_CIOYSA5 (0x200)
125/* Y 6th frame start address for output DMA */
126#define EXYNOS_CIOYSA6 (0x204)
127/* Y 7th frame start address for output DMA */
128#define EXYNOS_CIOYSA7 (0x208)
129/* Y 8th frame start address for output DMA */
130#define EXYNOS_CIOYSA8 (0x20c)
131/* Y 9th frame start address for output DMA */
132#define EXYNOS_CIOYSA9 (0x210)
133/* Y 10th frame start address for output DMA */
134#define EXYNOS_CIOYSA10 (0x214)
135/* Y 11th frame start address for output DMA */
136#define EXYNOS_CIOYSA11 (0x218)
137/* Y 12th frame start address for output DMA */
138#define EXYNOS_CIOYSA12 (0x21c)
139/* Y 13th frame start address for output DMA */
140#define EXYNOS_CIOYSA13 (0x220)
141/* Y 14th frame start address for output DMA */
142#define EXYNOS_CIOYSA14 (0x224)
143/* Y 15th frame start address for output DMA */
144#define EXYNOS_CIOYSA15 (0x228)
145/* Y 16th frame start address for output DMA */
146#define EXYNOS_CIOYSA16 (0x22c)
147/* Y 17th frame start address for output DMA */
148#define EXYNOS_CIOYSA17 (0x230)
149/* Y 18th frame start address for output DMA */
150#define EXYNOS_CIOYSA18 (0x234)
151/* Y 19th frame start address for output DMA */
152#define EXYNOS_CIOYSA19 (0x238)
153/* Y 20th frame start address for output DMA */
154#define EXYNOS_CIOYSA20 (0x23c)
155/* Y 21th frame start address for output DMA */
156#define EXYNOS_CIOYSA21 (0x240)
157/* Y 22th frame start address for output DMA */
158#define EXYNOS_CIOYSA22 (0x244)
159/* Y 23th frame start address for output DMA */
160#define EXYNOS_CIOYSA23 (0x248)
161/* Y 24th frame start address for output DMA */
162#define EXYNOS_CIOYSA24 (0x24c)
163/* Y 25th frame start address for output DMA */
164#define EXYNOS_CIOYSA25 (0x250)
165/* Y 26th frame start address for output DMA */
166#define EXYNOS_CIOYSA26 (0x254)
167/* Y 27th frame start address for output DMA */
168#define EXYNOS_CIOYSA27 (0x258)
169/* Y 28th frame start address for output DMA */
170#define EXYNOS_CIOYSA28 (0x25c)
171/* Y 29th frame start address for output DMA */
172#define EXYNOS_CIOYSA29 (0x260)
173/* Y 30th frame start address for output DMA */
174#define EXYNOS_CIOYSA30 (0x264)
175/* Y 31th frame start address for output DMA */
176#define EXYNOS_CIOYSA31 (0x268)
177/* Y 32th frame start address for output DMA */
178#define EXYNOS_CIOYSA32 (0x26c)
179
180/* CB 5th frame start address for output DMA */
181#define EXYNOS_CIOCBSA5 (0x270)
182/* CB 6th frame start address for output DMA */
183#define EXYNOS_CIOCBSA6 (0x274)
184/* CB 7th frame start address for output DMA */
185#define EXYNOS_CIOCBSA7 (0x278)
186/* CB 8th frame start address for output DMA */
187#define EXYNOS_CIOCBSA8 (0x27c)
188/* CB 9th frame start address for output DMA */
189#define EXYNOS_CIOCBSA9 (0x280)
190/* CB 10th frame start address for output DMA */
191#define EXYNOS_CIOCBSA10 (0x284)
192/* CB 11th frame start address for output DMA */
193#define EXYNOS_CIOCBSA11 (0x288)
194/* CB 12th frame start address for output DMA */
195#define EXYNOS_CIOCBSA12 (0x28c)
196/* CB 13th frame start address for output DMA */
197#define EXYNOS_CIOCBSA13 (0x290)
198/* CB 14th frame start address for output DMA */
199#define EXYNOS_CIOCBSA14 (0x294)
200/* CB 15th frame start address for output DMA */
201#define EXYNOS_CIOCBSA15 (0x298)
202/* CB 16th frame start address for output DMA */
203#define EXYNOS_CIOCBSA16 (0x29c)
204/* CB 17th frame start address for output DMA */
205#define EXYNOS_CIOCBSA17 (0x2a0)
206/* CB 18th frame start address for output DMA */
207#define EXYNOS_CIOCBSA18 (0x2a4)
208/* CB 19th frame start address for output DMA */
209#define EXYNOS_CIOCBSA19 (0x2a8)
210/* CB 20th frame start address for output DMA */
211#define EXYNOS_CIOCBSA20 (0x2ac)
212/* CB 21th frame start address for output DMA */
213#define EXYNOS_CIOCBSA21 (0x2b0)
214/* CB 22th frame start address for output DMA */
215#define EXYNOS_CIOCBSA22 (0x2b4)
216/* CB 23th frame start address for output DMA */
217#define EXYNOS_CIOCBSA23 (0x2b8)
218/* CB 24th frame start address for output DMA */
219#define EXYNOS_CIOCBSA24 (0x2bc)
220/* CB 25th frame start address for output DMA */
221#define EXYNOS_CIOCBSA25 (0x2c0)
222/* CB 26th frame start address for output DMA */
223#define EXYNOS_CIOCBSA26 (0x2c4)
224/* CB 27th frame start address for output DMA */
225#define EXYNOS_CIOCBSA27 (0x2c8)
226/* CB 28th frame start address for output DMA */
227#define EXYNOS_CIOCBSA28 (0x2cc)
228/* CB 29th frame start address for output DMA */
229#define EXYNOS_CIOCBSA29 (0x2d0)
230/* CB 30th frame start address for output DMA */
231#define EXYNOS_CIOCBSA30 (0x2d4)
232/* CB 31th frame start address for output DMA */
233#define EXYNOS_CIOCBSA31 (0x2d8)
234/* CB 32th frame start address for output DMA */
235#define EXYNOS_CIOCBSA32 (0x2dc)
236
237/* CR 5th frame start address for output DMA */
238#define EXYNOS_CIOCRSA5 (0x2e0)
239/* CR 6th frame start address for output DMA */
240#define EXYNOS_CIOCRSA6 (0x2e4)
241/* CR 7th frame start address for output DMA */
242#define EXYNOS_CIOCRSA7 (0x2e8)
243/* CR 8th frame start address for output DMA */
244#define EXYNOS_CIOCRSA8 (0x2ec)
245/* CR 9th frame start address for output DMA */
246#define EXYNOS_CIOCRSA9 (0x2f0)
247/* CR 10th frame start address for output DMA */
248#define EXYNOS_CIOCRSA10 (0x2f4)
249/* CR 11th frame start address for output DMA */
250#define EXYNOS_CIOCRSA11 (0x2f8)
251/* CR 12th frame start address for output DMA */
252#define EXYNOS_CIOCRSA12 (0x2fc)
253/* CR 13th frame start address for output DMA */
254#define EXYNOS_CIOCRSA13 (0x300)
255/* CR 14th frame start address for output DMA */
256#define EXYNOS_CIOCRSA14 (0x304)
257/* CR 15th frame start address for output DMA */
258#define EXYNOS_CIOCRSA15 (0x308)
259/* CR 16th frame start address for output DMA */
260#define EXYNOS_CIOCRSA16 (0x30c)
261/* CR 17th frame start address for output DMA */
262#define EXYNOS_CIOCRSA17 (0x310)
263/* CR 18th frame start address for output DMA */
264#define EXYNOS_CIOCRSA18 (0x314)
265/* CR 19th frame start address for output DMA */
266#define EXYNOS_CIOCRSA19 (0x318)
267/* CR 20th frame start address for output DMA */
268#define EXYNOS_CIOCRSA20 (0x31c)
269/* CR 21th frame start address for output DMA */
270#define EXYNOS_CIOCRSA21 (0x320)
271/* CR 22th frame start address for output DMA */
272#define EXYNOS_CIOCRSA22 (0x324)
273/* CR 23th frame start address for output DMA */
274#define EXYNOS_CIOCRSA23 (0x328)
275/* CR 24th frame start address for output DMA */
276#define EXYNOS_CIOCRSA24 (0x32c)
277/* CR 25th frame start address for output DMA */
278#define EXYNOS_CIOCRSA25 (0x330)
279/* CR 26th frame start address for output DMA */
280#define EXYNOS_CIOCRSA26 (0x334)
281/* CR 27th frame start address for output DMA */
282#define EXYNOS_CIOCRSA27 (0x338)
283/* CR 28th frame start address for output DMA */
284#define EXYNOS_CIOCRSA28 (0x33c)
285/* CR 29th frame start address for output DMA */
286#define EXYNOS_CIOCRSA29 (0x340)
287/* CR 30th frame start address for output DMA */
288#define EXYNOS_CIOCRSA30 (0x344)
289/* CR 31th frame start address for output DMA */
290#define EXYNOS_CIOCRSA31 (0x348)
291/* CR 32th frame start address for output DMA */
292#define EXYNOS_CIOCRSA32 (0x34c)
293
294/*
295 * Macro part
296*/
297/* frame start address 1 ~ 4, 5 ~ 32 */
298/* Number of Default PingPong Memory */
299#define DEF_PP 4
300#define EXYNOS_CIOYSA(__x) \
301 (((__x) < DEF_PP) ? \
302 (EXYNOS_CIOYSA1 + (__x) * 4) : \
303 (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
304#define EXYNOS_CIOCBSA(__x) \
305 (((__x) < DEF_PP) ? \
306 (EXYNOS_CIOCBSA1 + (__x) * 4) : \
307 (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
308#define EXYNOS_CIOCRSA(__x) \
309 (((__x) < DEF_PP) ? \
310 (EXYNOS_CIOCRSA1 + (__x) * 4) : \
311 (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
312/* Number of Default PingPong Memory */
313#define DEF_IPP 1
314#define EXYNOS_CIIYSA(__x) \
315 (((__x) < DEF_IPP) ? \
316 (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
317#define EXYNOS_CIICBSA(__x) \
318 (((__x) < DEF_IPP) ? \
319 (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
320#define EXYNOS_CIICRSA(__x) \
321 (((__x) < DEF_IPP) ? \
322 (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
323
324#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
325#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
326
327#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
328#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
329
330#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
331#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
332
333#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
334#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
335
336#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
337#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
338#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
339
340#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
341#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
342
343#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
344#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
345
346#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
347
348#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
349#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
350#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
351#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
352#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
353
354#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
355#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
356
357#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
358#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
359#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
360
361#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
362
363#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
364#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
365
366#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
367#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
368
369#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
370#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
371
372#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
373#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
374
375#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
376#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
377
378#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
379#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
380
381#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
382#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
383
384#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
385#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
386
387#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
388#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
389
390#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
391#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
392
393#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
394#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
395#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
396#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
397
398/*
399 * Bit definition part
400*/
401/* Source format register */
402#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
403#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
404#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
405#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
406#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
407#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
408#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
409/* ITU601 16bit only */
410#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
411/* ITU601 16bit only */
412#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
413
414/* Window offset register */
415#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
416#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
417#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
418#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
419#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
420#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
421#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
422
423/* Global control register */
424#define EXYNOS_CIGCTRL_SWRST (1 << 31)
425#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
426#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
427#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
428#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
429#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
430#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
431#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
432#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
433#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
434#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
435#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
436#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
437#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
438#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
439#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
440#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
441#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
442#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
443#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
444#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
445#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
446#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
447#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
448#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
449#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
450#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
451#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
452#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
453#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
454#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
455#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
456#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
457#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
458#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
459#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
460#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
461#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
462#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
463#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
464#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
465#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
466
467/* Window offset2 register */
468#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
469#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
470
471/* Target format register */
472#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
473#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
474#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
475#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
476#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
477#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
478#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
479#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
480#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
481#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
482#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
483#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
484#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
485#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
486#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
487
488/* Output DMA control register */
489#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
490#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
491#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
492#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
493#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
494#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
495#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
496#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
497#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
498#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
499#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
500#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
501#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
502#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
503#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
504#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
505#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
506#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
507#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
508
509/* Main scaler control register */
510#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
511#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
512#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
513#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
514#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
515#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
516#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
517#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
518#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
519#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
520#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
521#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
522#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
523#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
524#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
525#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
526#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
527#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
528#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
529#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
530#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
531#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
532#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
533#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
534#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
535
536/* Status register */
537#define EXYNOS_CISTATUS_OVFIY (1 << 31)
538#define EXYNOS_CISTATUS_OVFICB (1 << 30)
539#define EXYNOS_CISTATUS_OVFICR (1 << 29)
540#define EXYNOS_CISTATUS_VSYNC (1 << 28)
541#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
542#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
543#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
544#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
545#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
546#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
547#define EXYNOS_CISTATUS_OVRLB (1 << 18)
548#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
549#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
550#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
551#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
552
553/* Image capture enable register */
554#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
555#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
556#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
557#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
558#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
559
560/* Image effects register */
561#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
562#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
563#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
564#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
565#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
566#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
567#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
568#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
569#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
570#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
571#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
572#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
573
574/* Real input DMA size register */
575#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
576#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
577#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
578#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
579
580/* Input DMA control register */
581#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
582#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
583#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
584#define EXYNOS_MSCTRL_BURST_CNT (24)
585#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
586#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
587#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
588#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
589#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
590#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
591#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
592#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
593#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
594#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
595#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
596#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
597#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
598#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
599#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
600#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
601#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
602#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
603#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
604#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
605#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
606#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
607#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
608#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
609#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
610#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
611#define EXYNOS_MSCTRL_ENVID (1 << 0)
612
613/* DMA parameter register */
614#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
615#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
616#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
617#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
618#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
619#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
620#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
621#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
622#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
623#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
624#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
625#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
626#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
627#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
628#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
629#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
630#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
631#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
632#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
633#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
634#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
635#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
636#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
637#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
638#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
639#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
640#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
641#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
642#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
643#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
644#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
645#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
646#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
647#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
648#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
649#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
650
651/* Gathering Extension register */
652#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
653#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
654#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
655#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
656#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
657
658/* FIMC Clock Source Select register */
659#define EXYNOS_CLKSRC_HCLK (0 << 1)
660#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
661#define EXYNOS_CLKSRC_SCLK (1 << 1)
662
663/* SYSREG for FIMC writeback */
664#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
665#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
666#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
667#define SYSREG_FIMD0WB_DEST_SHIFT 23
668
669#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 000000000000..9ad592707aaf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
1/* linux/drivers/gpu/drm/exynos/regs-gsc.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Register definition file for Samsung G-Scaler driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef EXYNOS_REGS_GSC_H_
14#define EXYNOS_REGS_GSC_H_
15
16/* G-Scaler enable */
17#define GSC_ENABLE 0x00
18#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
19#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
20#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
21#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
22#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
23#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
24#define GSC_ENABLE_NORM_MODE (0 << 7)
25#define GSC_ENABLE_IPC_MODE (1 << 7)
26#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
27#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
28#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
29#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
30#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
31#define GSC_ENABLE_QOS_ENABLE (1 << 3)
32#define GSC_ENABLE_OP_STATUS (1 << 2)
33#define GSC_ENABLE_SFR_UPDATE (1 << 1)
34#define GSC_ENABLE_ON (1 << 0)
35
36/* G-Scaler S/W reset */
37#define GSC_SW_RESET 0x04
38#define GSC_SW_RESET_SRESET (1 << 0)
39
40/* G-Scaler IRQ */
41#define GSC_IRQ 0x08
42#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
43#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
44#define GSC_IRQ_OR_MASK (1 << 2)
45#define GSC_IRQ_FRMDONE_MASK (1 << 1)
46#define GSC_IRQ_ENABLE (1 << 0)
47
48/* G-Scaler input control */
49#define GSC_IN_CON 0x10
50#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
51#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
52#define GSC_IN_RB_SWAP_MASK (1 << 19)
53#define GSC_IN_RB_SWAP (1 << 19)
54#define GSC_IN_ROT_MASK (7 << 16)
55#define GSC_IN_ROT_270 (7 << 16)
56#define GSC_IN_ROT_90_YFLIP (6 << 16)
57#define GSC_IN_ROT_90_XFLIP (5 << 16)
58#define GSC_IN_ROT_90 (4 << 16)
59#define GSC_IN_ROT_180 (3 << 16)
60#define GSC_IN_ROT_YFLIP (2 << 16)
61#define GSC_IN_ROT_XFLIP (1 << 16)
62#define GSC_IN_RGB_TYPE_MASK (3 << 14)
63#define GSC_IN_RGB_HD_WIDE (3 << 14)
64#define GSC_IN_RGB_HD_NARROW (2 << 14)
65#define GSC_IN_RGB_SD_WIDE (1 << 14)
66#define GSC_IN_RGB_SD_NARROW (0 << 14)
67#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
68#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
69#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
70#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
71#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
72#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
73#define GSC_IN_FORMAT_MASK (7 << 8)
74#define GSC_IN_XRGB8888 (0 << 8)
75#define GSC_IN_RGB565 (1 << 8)
76#define GSC_IN_YUV420_2P (2 << 8)
77#define GSC_IN_YUV420_3P (3 << 8)
78#define GSC_IN_YUV422_1P (4 << 8)
79#define GSC_IN_YUV422_2P (5 << 8)
80#define GSC_IN_YUV422_3P (6 << 8)
81#define GSC_IN_TILE_TYPE_MASK (1 << 4)
82#define GSC_IN_TILE_C_16x8 (0 << 4)
83#define GSC_IN_TILE_C_16x16 (1 << 4)
84#define GSC_IN_TILE_MODE (1 << 3)
85#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
86#define GSC_IN_LOCAL_CAM3 (3 << 1)
87#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
88#define GSC_IN_LOCAL_CAM1 (1 << 1)
89#define GSC_IN_LOCAL_CAM0 (0 << 1)
90#define GSC_IN_PATH_MASK (1 << 0)
91#define GSC_IN_PATH_LOCAL (1 << 0)
92#define GSC_IN_PATH_MEMORY (0 << 0)
93
94/* G-Scaler source image size */
95#define GSC_SRCIMG_SIZE 0x14
96#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
97#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
98#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
99#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
100
101/* G-Scaler source image offset */
102#define GSC_SRCIMG_OFFSET 0x18
103#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
104#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
105#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
106#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
107
108/* G-Scaler cropped source image size */
109#define GSC_CROPPED_SIZE 0x1C
110#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
111#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
112#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
113#define GSC_CROPPED_WIDTH(x) ((x) << 0)
114
115/* G-Scaler output control */
116#define GSC_OUT_CON 0x20
117#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
118#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
119#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
120#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
121#define GSC_OUT_RB_SWAP_MASK (1 << 12)
122#define GSC_OUT_RB_SWAP (1 << 12)
123#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
124#define GSC_OUT_RGB_HD_NARROW (3 << 10)
125#define GSC_OUT_RGB_HD_WIDE (2 << 10)
126#define GSC_OUT_RGB_SD_NARROW (1 << 10)
127#define GSC_OUT_RGB_SD_WIDE (0 << 10)
128#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
129#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
130#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
131#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
132#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
133#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
134#define GSC_OUT_FORMAT_MASK (7 << 4)
135#define GSC_OUT_XRGB8888 (0 << 4)
136#define GSC_OUT_RGB565 (1 << 4)
137#define GSC_OUT_YUV420_2P (2 << 4)
138#define GSC_OUT_YUV420_3P (3 << 4)
139#define GSC_OUT_YUV422_1P (4 << 4)
140#define GSC_OUT_YUV422_2P (5 << 4)
141#define GSC_OUT_YUV444 (7 << 4)
142#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
143#define GSC_OUT_TILE_C_16x8 (0 << 2)
144#define GSC_OUT_TILE_C_16x16 (1 << 2)
145#define GSC_OUT_TILE_MODE (1 << 1)
146#define GSC_OUT_PATH_MASK (1 << 0)
147#define GSC_OUT_PATH_LOCAL (1 << 0)
148#define GSC_OUT_PATH_MEMORY (0 << 0)
149
150/* G-Scaler scaled destination image size */
151#define GSC_SCALED_SIZE 0x24
152#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
153#define GSC_SCALED_HEIGHT(x) ((x) << 16)
154#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
155#define GSC_SCALED_WIDTH(x) ((x) << 0)
156
157/* G-Scaler pre scale ratio */
158#define GSC_PRE_SCALE_RATIO 0x28
159#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
160#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
161#define GSC_PRESC_V_RATIO_MASK (7 << 16)
162#define GSC_PRESC_V_RATIO(x) ((x) << 16)
163#define GSC_PRESC_H_RATIO_MASK (7 << 0)
164#define GSC_PRESC_H_RATIO(x) ((x) << 0)
165
166/* G-Scaler main scale horizontal ratio */
167#define GSC_MAIN_H_RATIO 0x2C
168#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
169#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
170
171/* G-Scaler main scale vertical ratio */
172#define GSC_MAIN_V_RATIO 0x30
173#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
174#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
175
176/* G-Scaler input chrominance stride */
177#define GSC_IN_CHROM_STRIDE 0x3C
178#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
179#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
180
181/* G-Scaler destination image size */
182#define GSC_DSTIMG_SIZE 0x40
183#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
184#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
185#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
186#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
187
188/* G-Scaler destination image offset */
189#define GSC_DSTIMG_OFFSET 0x44
190#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
191#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
192#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
193#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
194
195/* G-Scaler output chrominance stride */
196#define GSC_OUT_CHROM_STRIDE 0x48
197#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
198#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
199
200/* G-Scaler input y address mask */
201#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
202/* G-Scaler input y base address */
203#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
204/* G-Scaler input y base current address */
205#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
206
207/* G-Scaler input cb address mask */
208#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
209/* G-Scaler input cb base address */
210#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
211/* G-Scaler input cb base current address */
212#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
213
214/* G-Scaler input cr address mask */
215#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
216/* G-Scaler input cr base address */
217#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
218/* G-Scaler input cr base current address */
219#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
220
221/* G-Scaler input address mask */
222#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
223#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
224#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
225#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
226
227/* G-Scaler output y address mask */
228#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
229/* G-Scaler output y base address */
230#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
231
232/* G-Scaler output cb address mask */
233#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
234/* G-Scaler output cb base address */
235#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
236
237/* G-Scaler output cr address mask */
238#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
239/* G-Scaler output cr base address */
240#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
241
242/* G-Scaler output address mask */
243#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
244#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
245#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
246#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
247
248/* G-Scaler horizontal scaling filter */
249#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
250
251/* G-Scaler vertical scaling filter */
252#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
253
254/* G-Scaler BUS control */
255#define GSC_BUSCON 0xA78
256#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
257#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
258#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
259#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
260#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
261
262/* G-Scaler V position */
263#define GSC_VPOSITION 0xA7C
264#define GSC_VPOS_F(x) ((x) << 0)
265
266
267/* G-Scaler clock initial count */
268#define GSC_CLK_INIT_COUNT 0xC00
269#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
270
271/* G-Scaler clock snoop count */
272#define GSC_CLK_SNOOP_COUNT 0xC04
273#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
274
275/* SYSCON. GSCBLK_CFG */
276#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
277#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
278#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
279#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
280#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
281#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
282#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
283
284#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718c..ef1b3eb3ba6e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
176#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C) 176#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
177#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080) 177#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
178 178
179/* PHY Control bit definition */
180
181/* HDMI_PHY_CON_0 */
182#define HDMI_PHY_POWER_OFF_EN (1 << 0)
183
179/* Video related registers */ 184/* Video related registers */
180#define HDMI_YMAX HDMI_CORE_BASE(0x0060) 185#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
181#define HDMI_YMIN HDMI_CORE_BASE(0x0064) 186#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
298#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) 303#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
299#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) 304#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
300#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) 305#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
301#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) 306#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
302 307
303#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) 308#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
304#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) 309#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
305#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) 310#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
306#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) 311#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
307#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) 312#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
308#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) 313#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
309 314
310#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) 315#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
311#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) 316#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
338#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) 343#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
339#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) 344#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
340 345
346/* AVI bit definition */
347#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
348#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
349
350#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
351#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
352
353/* AUI bit definition */
354#define HDMI_AUI_CON_NO_TRAN (0 << 0)
355
356/* VSI bit definition */
357#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
358
341/* HDCP related registers */ 359/* HDCP related registers */
342#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) 360#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
343#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) 361#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 000000000000..a09ac6e180da
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
1/* drivers/gpu/drm/exynos/regs-rotator.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Register definition file for Samsung Rotator Interface (Rotator) driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef EXYNOS_REGS_ROTATOR_H
14#define EXYNOS_REGS_ROTATOR_H
15
16/* Configuration */
17#define ROT_CONFIG 0x00
18#define ROT_CONFIG_IRQ (3 << 8)
19
20/* Image Control */
21#define ROT_CONTROL 0x10
22#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
23#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
24#define ROT_CONTROL_FMT_RGB888 (6 << 8)
25#define ROT_CONTROL_FMT_MASK (7 << 8)
26#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
27#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
28#define ROT_CONTROL_FLIP_MASK (3 << 6)
29#define ROT_CONTROL_ROT_90 (1 << 4)
30#define ROT_CONTROL_ROT_180 (2 << 4)
31#define ROT_CONTROL_ROT_270 (3 << 4)
32#define ROT_CONTROL_ROT_MASK (3 << 4)
33#define ROT_CONTROL_START (1 << 0)
34
35/* Status */
36#define ROT_STATUS 0x20
37#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
38#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
39#define ROT_STATUS_IRQ_VAL_COMPLETE 1
40#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
41
42/* Buffer Address */
43#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
44#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
45
46/* Buffer Size */
47#define ROT_SRC_BUF_SIZE 0x3c
48#define ROT_DST_BUF_SIZE 0x5c
49#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
50#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
51#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
52#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
53
54/* Crop Position */
55#define ROT_SRC_CROP_POS 0x40
56#define ROT_DST_CROP_POS 0x60
57#define ROT_CROP_POS_Y(x) ((x) << 16)
58#define ROT_CROP_POS_X(x) ((x) << 0)
59
60/* Source Crop Size */
61#define ROT_SRC_CROP_SIZE 0x44
62#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
63#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
64
65/* Round to nearest aligned value */
66#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
67/* Minimum limit value */
68#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
69/* Maximum limit value */
70#define ROT_MAX(max, mask) ((max) & (mask))
71
72#endif /* EXYNOS_REGS_ROTATOR_H */
73