aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-09-21 20:07:25 -0400
committerDave Airlie <airlied@redhat.com>2014-09-21 20:07:25 -0400
commit42532512ee0eff90076fc7c49391e17b1d51d91b (patch)
tree872b1c8a6fcd81d732a7b4977bad261f4137980e /drivers/gpu/drm
parent6b654af5e694754672894c134eb50cfa2b79dc2e (diff)
parent72ed6ccd086f679aa61c79cd3af733756b72429e (diff)
Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next
Sorry for late. This pull request includes some enhancements for Exynos drm, new feature supports, cleanups and fixups like below, - Consider low power transmission for drm mipi dsi module, and also add non-continuous clock mode support for Exynos mipi dsi driver. - Add Exynos3250 SoC support. - Enhance and clean up ipp framework and fimc driver. - Update to use component match support and fix up de-initialization order. - Remove a direct mmap interface and relevant stuff specific to Exynos drm, use drm generic mmap interface instead. And we will remove the specific interface from userspace library, libdrm soon. - Use universal plane which allows to replace fake primary plane with the real one. - Some code cleanups and fixups. * 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos: (40 commits) drm/exynos: switch to universal plane API drm/exynos: use drm generic mmap interface drm/exynos: remove DRM_EXYNOS_GEM_MAP_OFFSET ioctl drm/exynos: factor out initial setting of each driver drm/exynos/hdmi: unregister connector on removal drm/exynos/dp: unregister connector on removal drm/exynos/dpi: unregister connector and panel on removal drm/exynos/dsi: unregister connector on removal drm/exynos/fb: free exynos framebuffer on error drm/exynos/fbdev: fix fbdev gem object cleanup drm/exynos: fix drm driver de-initialization order drm/exynos/ipp: traverse ipp drivers list safely drm/exynos: update to use component match support drm/exynos/ipp: add file checks for ioctls drm/exynos/ipp: remove file argument from node related functions drm/exynos/fimc: fix source buffer registers drm/exynos/fimc: simplify buffer queuing drm/exynos/fimc: do not enable fimc twice drm/exynos/fimc: avoid clearing overflow bits drm/exynos/ipp: remove events during command cleaning ...
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c103
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c90
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c106
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c453
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c3
22 files changed, 396 insertions, 607 deletions
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 6aa6a9e95570..eb6dfe52cab2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -231,6 +231,9 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
231 break; 231 break;
232 } 232 }
233 233
234 if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
235 msg.flags = MIPI_DSI_MSG_USE_LPM;
236
234 return ops->transfer(dsi->host, &msg); 237 return ops->transfer(dsi->host, &msg);
235} 238}
236EXPORT_SYMBOL(mipi_dsi_dcs_write); 239EXPORT_SYMBOL(mipi_dsi_dcs_write);
@@ -260,6 +263,9 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
260 if (!ops || !ops->transfer) 263 if (!ops || !ops->transfer)
261 return -ENOSYS; 264 return -ENOSYS;
262 265
266 if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
267 msg.flags = MIPI_DSI_MSG_USE_LPM;
268
263 return ops->transfer(dsi->host, &msg); 269 return ops->transfer(dsi->host, &msg);
264} 270}
265EXPORT_SYMBOL(mipi_dsi_dcs_read); 271EXPORT_SYMBOL(mipi_dsi_dcs_read);
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 02602a8254c4..cd50ece31601 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -937,6 +937,8 @@ static enum drm_connector_status exynos_dp_detect(
937 937
938static void exynos_dp_connector_destroy(struct drm_connector *connector) 938static void exynos_dp_connector_destroy(struct drm_connector *connector)
939{ 939{
940 drm_connector_unregister(connector);
941 drm_connector_cleanup(connector);
940} 942}
941 943
942static struct drm_connector_funcs exynos_dp_connector_funcs = { 944static struct drm_connector_funcs exynos_dp_connector_funcs = {
@@ -1358,8 +1360,8 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
1358 1360
1359 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); 1361 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
1360 1362
1363 exynos_dp_connector_destroy(&dp->connector);
1361 encoder->funcs->destroy(encoder); 1364 encoder->funcs->destroy(encoder);
1362 drm_connector_cleanup(&dp->connector);
1363} 1365}
1364 1366
1365static const struct component_ops exynos_dp_ops = { 1367static const struct component_ops exynos_dp_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b68e58f78cd1..8e38e9f8e542 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -32,7 +32,6 @@ enum exynos_crtc_mode {
32 * Exynos specific crtc structure. 32 * Exynos specific crtc structure.
33 * 33 *
34 * @drm_crtc: crtc object. 34 * @drm_crtc: crtc object.
35 * @drm_plane: pointer of private plane object for this crtc
36 * @manager: the manager associated with this crtc 35 * @manager: the manager associated with this crtc
37 * @pipe: a crtc index created at load() with a new crtc object creation 36 * @pipe: a crtc index created at load() with a new crtc object creation
38 * and the crtc object would be set to private->crtc array 37 * and the crtc object would be set to private->crtc array
@@ -46,7 +45,6 @@ enum exynos_crtc_mode {
46 */ 45 */
47struct exynos_drm_crtc { 46struct exynos_drm_crtc {
48 struct drm_crtc drm_crtc; 47 struct drm_crtc drm_crtc;
49 struct drm_plane *plane;
50 struct exynos_drm_manager *manager; 48 struct exynos_drm_manager *manager;
51 unsigned int pipe; 49 unsigned int pipe;
52 unsigned int dpms; 50 unsigned int dpms;
@@ -94,12 +92,12 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
94 92
95 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 93 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
96 94
97 exynos_plane_commit(exynos_crtc->plane); 95 exynos_plane_commit(crtc->primary);
98 96
99 if (manager->ops->commit) 97 if (manager->ops->commit)
100 manager->ops->commit(manager); 98 manager->ops->commit(manager);
101 99
102 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); 100 exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_ON);
103} 101}
104 102
105static bool 103static bool
@@ -123,10 +121,9 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
123{ 121{
124 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 122 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
125 struct exynos_drm_manager *manager = exynos_crtc->manager; 123 struct exynos_drm_manager *manager = exynos_crtc->manager;
126 struct drm_plane *plane = exynos_crtc->plane; 124 struct drm_framebuffer *fb = crtc->primary->fb;
127 unsigned int crtc_w; 125 unsigned int crtc_w;
128 unsigned int crtc_h; 126 unsigned int crtc_h;
129 int ret;
130 127
131 /* 128 /*
132 * copy the mode data adjusted by mode_fixup() into crtc->mode 129 * copy the mode data adjusted by mode_fixup() into crtc->mode
@@ -134,29 +131,21 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
134 */ 131 */
135 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); 132 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
136 133
137 crtc_w = crtc->primary->fb->width - x; 134 crtc_w = fb->width - x;
138 crtc_h = crtc->primary->fb->height - y; 135 crtc_h = fb->height - y;
139 136
140 if (manager->ops->mode_set) 137 if (manager->ops->mode_set)
141 manager->ops->mode_set(manager, &crtc->mode); 138 manager->ops->mode_set(manager, &crtc->mode);
142 139
143 ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h, 140 return exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
144 x, y, crtc_w, crtc_h); 141 crtc_w, crtc_h, x, y, crtc_w, crtc_h);
145 if (ret)
146 return ret;
147
148 plane->crtc = crtc;
149 plane->fb = crtc->primary->fb;
150 drm_framebuffer_reference(plane->fb);
151
152 return 0;
153} 142}
154 143
155static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y, 144static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
156 struct drm_framebuffer *old_fb) 145 struct drm_framebuffer *old_fb)
157{ 146{
158 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 147 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
159 struct drm_plane *plane = exynos_crtc->plane; 148 struct drm_framebuffer *fb = crtc->primary->fb;
160 unsigned int crtc_w; 149 unsigned int crtc_w;
161 unsigned int crtc_h; 150 unsigned int crtc_h;
162 int ret; 151 int ret;
@@ -167,11 +156,11 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
167 return -EPERM; 156 return -EPERM;
168 } 157 }
169 158
170 crtc_w = crtc->primary->fb->width - x; 159 crtc_w = fb->width - x;
171 crtc_h = crtc->primary->fb->height - y; 160 crtc_h = fb->height - y;
172 161
173 ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h, 162 ret = exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
174 x, y, crtc_w, crtc_h); 163 crtc_w, crtc_h, x, y, crtc_w, crtc_h);
175 if (ret) 164 if (ret)
176 return ret; 165 return ret;
177 166
@@ -304,8 +293,7 @@ static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
304 exynos_drm_crtc_commit(crtc); 293 exynos_drm_crtc_commit(crtc);
305 break; 294 break;
306 case CRTC_MODE_BLANK: 295 case CRTC_MODE_BLANK:
307 exynos_plane_dpms(exynos_crtc->plane, 296 exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_OFF);
308 DRM_MODE_DPMS_OFF);
309 break; 297 break;
310 default: 298 default:
311 break; 299 break;
@@ -351,8 +339,10 @@ static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
351int exynos_drm_crtc_create(struct exynos_drm_manager *manager) 339int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
352{ 340{
353 struct exynos_drm_crtc *exynos_crtc; 341 struct exynos_drm_crtc *exynos_crtc;
342 struct drm_plane *plane;
354 struct exynos_drm_private *private = manager->drm_dev->dev_private; 343 struct exynos_drm_private *private = manager->drm_dev->dev_private;
355 struct drm_crtc *crtc; 344 struct drm_crtc *crtc;
345 int ret;
356 346
357 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); 347 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
358 if (!exynos_crtc) 348 if (!exynos_crtc)
@@ -364,11 +354,11 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
364 exynos_crtc->dpms = DRM_MODE_DPMS_OFF; 354 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
365 exynos_crtc->manager = manager; 355 exynos_crtc->manager = manager;
366 exynos_crtc->pipe = manager->pipe; 356 exynos_crtc->pipe = manager->pipe;
367 exynos_crtc->plane = exynos_plane_init(manager->drm_dev, 357 plane = exynos_plane_init(manager->drm_dev, 1 << manager->pipe,
368 1 << manager->pipe, true); 358 DRM_PLANE_TYPE_PRIMARY);
369 if (!exynos_crtc->plane) { 359 if (IS_ERR(plane)) {
370 kfree(exynos_crtc); 360 ret = PTR_ERR(plane);
371 return -ENOMEM; 361 goto err_plane;
372 } 362 }
373 363
374 manager->crtc = &exynos_crtc->drm_crtc; 364 manager->crtc = &exynos_crtc->drm_crtc;
@@ -376,12 +366,22 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
376 366
377 private->crtc[manager->pipe] = crtc; 367 private->crtc[manager->pipe] = crtc;
378 368
379 drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs); 369 ret = drm_crtc_init_with_planes(manager->drm_dev, crtc, plane, NULL,
370 &exynos_crtc_funcs);
371 if (ret < 0)
372 goto err_crtc;
373
380 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); 374 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
381 375
382 exynos_drm_crtc_attach_mode_property(crtc); 376 exynos_drm_crtc_attach_mode_property(crtc);
383 377
384 return 0; 378 return 0;
379
380err_crtc:
381 plane->funcs->destroy(plane);
382err_plane:
383 kfree(exynos_crtc);
384 return ret;
385} 385}
386 386
387int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) 387int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index fa08f05e3e34..96c87db388fb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -342,8 +342,12 @@ int exynos_dpi_remove(struct device *dev)
342 struct exynos_dpi *ctx = exynos_dpi_display.ctx; 342 struct exynos_dpi *ctx = exynos_dpi_display.ctx;
343 343
344 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF); 344 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
345
346 exynos_dpi_connector_destroy(&ctx->connector);
345 encoder->funcs->destroy(encoder); 347 encoder->funcs->destroy(encoder);
346 drm_connector_cleanup(&ctx->connector); 348
349 if (ctx->panel)
350 drm_panel_detach(ctx->panel);
347 351
348 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR); 352 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
349 353
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 5aae95cf5b23..443a2069858a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -15,7 +15,6 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include <linux/anon_inodes.h>
19#include <linux/component.h> 18#include <linux/component.h>
20 19
21#include <drm/exynos_drm.h> 20#include <drm/exynos_drm.h>
@@ -86,8 +85,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
86 struct drm_plane *plane; 85 struct drm_plane *plane;
87 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1; 86 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
88 87
89 plane = exynos_plane_init(dev, possible_crtcs, false); 88 plane = exynos_plane_init(dev, possible_crtcs,
90 if (!plane) 89 DRM_PLANE_TYPE_OVERLAY);
90 if (IS_ERR(plane))
91 goto err_mode_config_cleanup; 91 goto err_mode_config_cleanup;
92 } 92 }
93 93
@@ -116,6 +116,23 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
116 /* force connectors detection */ 116 /* force connectors detection */
117 drm_helper_hpd_irq_event(dev); 117 drm_helper_hpd_irq_event(dev);
118 118
119 /*
120 * enable drm irq mode.
121 * - with irq_enabled = true, we can use the vblank feature.
122 *
123 * P.S. note that we wouldn't use drm irq handler but
124 * just specific driver own one instead because
125 * drm framework supports only one irq handler.
126 */
127 dev->irq_enabled = true;
128
129 /*
130 * with vblank_disable_allowed = true, vblank interrupt will be disabled
131 * by drm timer once a current process gives up ownership of
132 * vblank event.(after drm_vblank_put function is called)
133 */
134 dev->vblank_disable_allowed = true;
135
119 return 0; 136 return 0;
120 137
121err_unbind_all: 138err_unbind_all:
@@ -136,23 +153,19 @@ static int exynos_drm_unload(struct drm_device *dev)
136 exynos_drm_device_subdrv_remove(dev); 153 exynos_drm_device_subdrv_remove(dev);
137 154
138 exynos_drm_fbdev_fini(dev); 155 exynos_drm_fbdev_fini(dev);
139 drm_vblank_cleanup(dev);
140 drm_kms_helper_poll_fini(dev); 156 drm_kms_helper_poll_fini(dev);
141 drm_mode_config_cleanup(dev);
142 157
158 component_unbind_all(dev->dev, dev);
159 drm_vblank_cleanup(dev);
160 drm_mode_config_cleanup(dev);
143 drm_release_iommu_mapping(dev); 161 drm_release_iommu_mapping(dev);
144 kfree(dev->dev_private);
145 162
146 component_unbind_all(dev->dev, dev); 163 kfree(dev->dev_private);
147 dev->dev_private = NULL; 164 dev->dev_private = NULL;
148 165
149 return 0; 166 return 0;
150} 167}
151 168
152static const struct file_operations exynos_drm_gem_fops = {
153 .mmap = exynos_drm_gem_mmap_buffer,
154};
155
156static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) 169static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
157{ 170{
158 struct drm_connector *connector; 171 struct drm_connector *connector;
@@ -191,7 +204,6 @@ static int exynos_drm_resume(struct drm_device *dev)
191static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 204static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
192{ 205{
193 struct drm_exynos_file_private *file_priv; 206 struct drm_exynos_file_private *file_priv;
194 struct file *anon_filp;
195 int ret; 207 int ret;
196 208
197 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 209 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@@ -204,21 +216,8 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
204 if (ret) 216 if (ret)
205 goto err_file_priv_free; 217 goto err_file_priv_free;
206 218
207 anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
208 NULL, 0);
209 if (IS_ERR(anon_filp)) {
210 ret = PTR_ERR(anon_filp);
211 goto err_subdrv_close;
212 }
213
214 anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
215 file_priv->anon_filp = anon_filp;
216
217 return ret; 219 return ret;
218 220
219err_subdrv_close:
220 exynos_drm_subdrv_close(dev, file);
221
222err_file_priv_free: 221err_file_priv_free:
223 kfree(file_priv); 222 kfree(file_priv);
224 file->driver_priv = NULL; 223 file->driver_priv = NULL;
@@ -234,7 +233,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
234static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 233static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
235{ 234{
236 struct exynos_drm_private *private = dev->dev_private; 235 struct exynos_drm_private *private = dev->dev_private;
237 struct drm_exynos_file_private *file_priv;
238 struct drm_pending_vblank_event *v, *vt; 236 struct drm_pending_vblank_event *v, *vt;
239 struct drm_pending_event *e, *et; 237 struct drm_pending_event *e, *et;
240 unsigned long flags; 238 unsigned long flags;
@@ -260,10 +258,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
260 } 258 }
261 spin_unlock_irqrestore(&dev->event_lock, flags); 259 spin_unlock_irqrestore(&dev->event_lock, flags);
262 260
263 file_priv = file->driver_priv;
264 if (file_priv->anon_filp)
265 fput(file_priv->anon_filp);
266
267 kfree(file->driver_priv); 261 kfree(file->driver_priv);
268 file->driver_priv = NULL; 262 file->driver_priv = NULL;
269} 263}
@@ -282,11 +276,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
282static const struct drm_ioctl_desc exynos_ioctls[] = { 276static const struct drm_ioctl_desc exynos_ioctls[] = {
283 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 277 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
284 DRM_UNLOCKED | DRM_AUTH), 278 DRM_UNLOCKED | DRM_AUTH),
285 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
286 exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
287 DRM_AUTH),
288 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
289 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
290 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, 279 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
291 exynos_drm_gem_get_ioctl, DRM_UNLOCKED), 280 exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
292 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, 281 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
@@ -486,21 +475,20 @@ void exynos_drm_component_del(struct device *dev,
486 mutex_unlock(&drm_component_lock); 475 mutex_unlock(&drm_component_lock);
487} 476}
488 477
489static int compare_of(struct device *dev, void *data) 478static int compare_dev(struct device *dev, void *data)
490{ 479{
491 return dev == (struct device *)data; 480 return dev == (struct device *)data;
492} 481}
493 482
494static int exynos_drm_add_components(struct device *dev, struct master *m) 483static struct component_match *exynos_drm_match_add(struct device *dev)
495{ 484{
485 struct component_match *match = NULL;
496 struct component_dev *cdev; 486 struct component_dev *cdev;
497 unsigned int attach_cnt = 0; 487 unsigned int attach_cnt = 0;
498 488
499 mutex_lock(&drm_component_lock); 489 mutex_lock(&drm_component_lock);
500 490
501 list_for_each_entry(cdev, &drm_component_list, list) { 491 list_for_each_entry(cdev, &drm_component_list, list) {
502 int ret;
503
504 /* 492 /*
505 * Add components to master only in case that crtc and 493 * Add components to master only in case that crtc and
506 * encoder/connector device objects exist. 494 * encoder/connector device objects exist.
@@ -515,16 +503,10 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
515 /* 503 /*
516 * fimd and dpi modules have same device object so add 504 * fimd and dpi modules have same device object so add
517 * only crtc device object in this case. 505 * only crtc device object in this case.
518 *
519 * TODO. if dpi module follows driver-model driver then
520 * below codes can be removed.
521 */ 506 */
522 if (cdev->crtc_dev == cdev->conn_dev) { 507 if (cdev->crtc_dev == cdev->conn_dev) {
523 ret = component_master_add_child(m, compare_of, 508 component_match_add(dev, &match, compare_dev,
524 cdev->crtc_dev); 509 cdev->crtc_dev);
525 if (ret < 0)
526 return ret;
527
528 goto out_lock; 510 goto out_lock;
529 } 511 }
530 512
@@ -534,11 +516,8 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
534 * connector/encoder need pipe number of crtc when they 516 * connector/encoder need pipe number of crtc when they
535 * are created. 517 * are created.
536 */ 518 */
537 ret = component_master_add_child(m, compare_of, cdev->crtc_dev); 519 component_match_add(dev, &match, compare_dev, cdev->crtc_dev);
538 ret |= component_master_add_child(m, compare_of, 520 component_match_add(dev, &match, compare_dev, cdev->conn_dev);
539 cdev->conn_dev);
540 if (ret < 0)
541 return ret;
542 521
543out_lock: 522out_lock:
544 mutex_lock(&drm_component_lock); 523 mutex_lock(&drm_component_lock);
@@ -546,7 +525,7 @@ out_lock:
546 525
547 mutex_unlock(&drm_component_lock); 526 mutex_unlock(&drm_component_lock);
548 527
549 return attach_cnt ? 0 : -ENODEV; 528 return attach_cnt ? match : ERR_PTR(-EPROBE_DEFER);
550} 529}
551 530
552static int exynos_drm_bind(struct device *dev) 531static int exynos_drm_bind(struct device *dev)
@@ -560,13 +539,13 @@ static void exynos_drm_unbind(struct device *dev)
560} 539}
561 540
562static const struct component_master_ops exynos_drm_ops = { 541static const struct component_master_ops exynos_drm_ops = {
563 .add_components = exynos_drm_add_components,
564 .bind = exynos_drm_bind, 542 .bind = exynos_drm_bind,
565 .unbind = exynos_drm_unbind, 543 .unbind = exynos_drm_unbind,
566}; 544};
567 545
568static int exynos_drm_platform_probe(struct platform_device *pdev) 546static int exynos_drm_platform_probe(struct platform_device *pdev)
569{ 547{
548 struct component_match *match;
570 int ret; 549 int ret;
571 550
572 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 551 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
@@ -633,13 +612,23 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
633 goto err_unregister_ipp_drv; 612 goto err_unregister_ipp_drv;
634#endif 613#endif
635 614
636 ret = component_master_add(&pdev->dev, &exynos_drm_ops); 615 match = exynos_drm_match_add(&pdev->dev);
616 if (IS_ERR(match)) {
617 ret = PTR_ERR(match);
618 goto err_unregister_resources;
619 }
620
621 ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
622 match);
637 if (ret < 0) 623 if (ret < 0)
638 DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n"); 624 goto err_unregister_resources;
639 625
640 return 0; 626 return ret;
627
628err_unregister_resources:
641 629
642#ifdef CONFIG_DRM_EXYNOS_IPP 630#ifdef CONFIG_DRM_EXYNOS_IPP
631 exynos_platform_device_ipp_unregister();
643err_unregister_ipp_drv: 632err_unregister_ipp_drv:
644 platform_driver_unregister(&ipp_driver); 633 platform_driver_unregister(&ipp_driver);
645err_unregister_gsc_drv: 634err_unregister_gsc_drv:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 69a6fa397d75..d22e640f59a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -240,7 +240,6 @@ struct exynos_drm_g2d_private {
240struct drm_exynos_file_private { 240struct drm_exynos_file_private {
241 struct exynos_drm_g2d_private *g2d_priv; 241 struct exynos_drm_g2d_private *g2d_priv;
242 struct device *ipp_dev; 242 struct device *ipp_dev;
243 struct file *anon_filp;
244}; 243};
245 244
246/* 245/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 442aa2d00132..24741d8758e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -114,6 +114,8 @@
114#define DSIM_SYNC_INFORM (1 << 27) 114#define DSIM_SYNC_INFORM (1 << 27)
115#define DSIM_EOT_DISABLE (1 << 28) 115#define DSIM_EOT_DISABLE (1 << 28)
116#define DSIM_MFLUSH_VS (1 << 29) 116#define DSIM_MFLUSH_VS (1 << 29)
117/* This flag is valid only for exynos3250/3472/4415/5260/5430 */
118#define DSIM_CLKLANE_STOP (1 << 30)
117 119
118/* DSIM_ESCMODE */ 120/* DSIM_ESCMODE */
119#define DSIM_TX_TRIGGER_RST (1 << 4) 121#define DSIM_TX_TRIGGER_RST (1 << 4)
@@ -262,6 +264,7 @@ struct exynos_dsi_driver_data {
262 unsigned int plltmr_reg; 264 unsigned int plltmr_reg;
263 265
264 unsigned int has_freqband:1; 266 unsigned int has_freqband:1;
267 unsigned int has_clklane_stop:1;
265}; 268};
266 269
267struct exynos_dsi { 270struct exynos_dsi {
@@ -301,9 +304,16 @@ struct exynos_dsi {
301#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) 304#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
302#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) 305#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
303 306
307static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
308 .plltmr_reg = 0x50,
309 .has_freqband = 1,
310 .has_clklane_stop = 1,
311};
312
304static struct exynos_dsi_driver_data exynos4_dsi_driver_data = { 313static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
305 .plltmr_reg = 0x50, 314 .plltmr_reg = 0x50,
306 .has_freqband = 1, 315 .has_freqband = 1,
316 .has_clklane_stop = 1,
307}; 317};
308 318
309static struct exynos_dsi_driver_data exynos5_dsi_driver_data = { 319static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
@@ -311,6 +321,8 @@ static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
311}; 321};
312 322
313static struct of_device_id exynos_dsi_of_match[] = { 323static struct of_device_id exynos_dsi_of_match[] = {
324 { .compatible = "samsung,exynos3250-mipi-dsi",
325 .data = &exynos3_dsi_driver_data },
314 { .compatible = "samsung,exynos4210-mipi-dsi", 326 { .compatible = "samsung,exynos4210-mipi-dsi",
315 .data = &exynos4_dsi_driver_data }, 327 .data = &exynos4_dsi_driver_data },
316 { .compatible = "samsung,exynos5410-mipi-dsi", 328 { .compatible = "samsung,exynos5410-mipi-dsi",
@@ -421,7 +433,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
421 if (!fout) { 433 if (!fout) {
422 dev_err(dsi->dev, 434 dev_err(dsi->dev,
423 "failed to find PLL PMS for requested frequency\n"); 435 "failed to find PLL PMS for requested frequency\n");
424 return -EFAULT; 436 return 0;
425 } 437 }
426 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s); 438 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
427 439
@@ -453,7 +465,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
453 do { 465 do {
454 if (timeout-- == 0) { 466 if (timeout-- == 0) {
455 dev_err(dsi->dev, "PLL failed to stabilize\n"); 467 dev_err(dsi->dev, "PLL failed to stabilize\n");
456 return -EFAULT; 468 return 0;
457 } 469 }
458 reg = readl(dsi->reg_base + DSIM_STATUS_REG); 470 reg = readl(dsi->reg_base + DSIM_STATUS_REG);
459 } while ((reg & DSIM_PLL_STABLE) == 0); 471 } while ((reg & DSIM_PLL_STABLE) == 0);
@@ -569,6 +581,7 @@ static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
569 581
570static int exynos_dsi_init_link(struct exynos_dsi *dsi) 582static int exynos_dsi_init_link(struct exynos_dsi *dsi)
571{ 583{
584 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
572 int timeout; 585 int timeout;
573 u32 reg; 586 u32 reg;
574 u32 lanes_mask; 587 u32 lanes_mask;
@@ -650,6 +663,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
650 reg |= DSIM_LANE_EN(lanes_mask); 663 reg |= DSIM_LANE_EN(lanes_mask);
651 writel(reg, dsi->reg_base + DSIM_CONFIG_REG); 664 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
652 665
666 /*
667 * Use non-continuous clock mode if the periparal wants and
668 * host controller supports
669 *
670 * In non-continous clock mode, host controller will turn off
671 * the HS clock between high-speed transmissions to reduce
672 * power consumption.
673 */
674 if (driver_data->has_clklane_stop &&
675 dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
676 reg |= DSIM_CLKLANE_STOP;
677 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
678 }
679
653 /* Check clock and data lane state are stop state */ 680 /* Check clock and data lane state are stop state */
654 timeout = 100; 681 timeout = 100;
655 do { 682 do {
@@ -1414,6 +1441,9 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
1414 1441
1415static void exynos_dsi_connector_destroy(struct drm_connector *connector) 1442static void exynos_dsi_connector_destroy(struct drm_connector *connector)
1416{ 1443{
1444 drm_connector_unregister(connector);
1445 drm_connector_cleanup(connector);
1446 connector->dev = NULL;
1417} 1447}
1418 1448
1419static struct drm_connector_funcs exynos_dsi_connector_funcs = { 1449static struct drm_connector_funcs exynos_dsi_connector_funcs = {
@@ -1634,10 +1664,10 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master,
1634 1664
1635 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF); 1665 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
1636 1666
1637 mipi_dsi_host_unregister(&dsi->dsi_host); 1667 exynos_dsi_connector_destroy(&dsi->connector);
1638
1639 encoder->funcs->destroy(encoder); 1668 encoder->funcs->destroy(encoder);
1640 drm_connector_cleanup(&dsi->connector); 1669
1670 mipi_dsi_host_unregister(&dsi->dsi_host);
1641} 1671}
1642 1672
1643static const struct component_ops exynos_dsi_component_ops = { 1673static const struct component_ops exynos_dsi_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 65a22cad7b36..d346d1e6eda0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -165,6 +165,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
165 165
166 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 166 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
167 if (ret) { 167 if (ret) {
168 kfree(exynos_fb);
168 DRM_ERROR("failed to initialize framebuffer\n"); 169 DRM_ERROR("failed to initialize framebuffer\n");
169 return ERR_PTR(ret); 170 return ERR_PTR(ret);
170 } 171 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 32e63f60e1d1..e12ea90c6237 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -123,6 +123,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
123 123
124 fbi->screen_base = buffer->kvaddr + offset; 124 fbi->screen_base = buffer->kvaddr + offset;
125 fbi->screen_size = size; 125 fbi->screen_size = size;
126 fbi->fix.smem_len = size;
126 127
127 return 0; 128 return 0;
128} 129}
@@ -353,9 +354,6 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
353 354
354 fbdev = to_exynos_fbdev(private->fb_helper); 355 fbdev = to_exynos_fbdev(private->fb_helper);
355 356
356 if (fbdev->exynos_gem_obj)
357 exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
358
359 exynos_drm_fbdev_destroy(dev, private->fb_helper); 357 exynos_drm_fbdev_destroy(dev, private->fb_helper);
360 kfree(fbdev); 358 kfree(fbdev);
361 private->fb_helper = NULL; 359 private->fb_helper = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index ec7cc9ea50df..68d38eb6774d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -336,9 +336,6 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
336 fimc_set_bits(ctx, EXYNOS_CIWDOFST, 336 fimc_set_bits(ctx, EXYNOS_CIWDOFST,
337 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | 337 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
338 EXYNOS_CIWDOFST_CLROVFICR); 338 EXYNOS_CIWDOFST_CLROVFICR);
339 fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
340 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
341 EXYNOS_CIWDOFST_CLROVFICR);
342 339
343 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", 340 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
344 ctx->id, status); 341 ctx->id, status);
@@ -718,24 +715,24 @@ static int fimc_src_set_addr(struct device *dev,
718 case IPP_BUF_ENQUEUE: 715 case IPP_BUF_ENQUEUE:
719 config = &property->config[EXYNOS_DRM_OPS_SRC]; 716 config = &property->config[EXYNOS_DRM_OPS_SRC];
720 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], 717 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
721 EXYNOS_CIIYSA(buf_id)); 718 EXYNOS_CIIYSA0);
722 719
723 if (config->fmt == DRM_FORMAT_YVU420) { 720 if (config->fmt == DRM_FORMAT_YVU420) {
724 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 721 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
725 EXYNOS_CIICBSA(buf_id)); 722 EXYNOS_CIICBSA0);
726 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 723 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
727 EXYNOS_CIICRSA(buf_id)); 724 EXYNOS_CIICRSA0);
728 } else { 725 } else {
729 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 726 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
730 EXYNOS_CIICBSA(buf_id)); 727 EXYNOS_CIICBSA0);
731 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 728 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
732 EXYNOS_CIICRSA(buf_id)); 729 EXYNOS_CIICRSA0);
733 } 730 }
734 break; 731 break;
735 case IPP_BUF_DEQUEUE: 732 case IPP_BUF_DEQUEUE:
736 fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id)); 733 fimc_write(ctx, 0x0, EXYNOS_CIIYSA0);
737 fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id)); 734 fimc_write(ctx, 0x0, EXYNOS_CIICBSA0);
738 fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id)); 735 fimc_write(ctx, 0x0, EXYNOS_CIICRSA0);
739 break; 736 break;
740 default: 737 default:
741 /* bypass */ 738 /* bypass */
@@ -1122,67 +1119,34 @@ static int fimc_dst_set_size(struct device *dev, int swap,
1122 return 0; 1119 return 0;
1123} 1120}
1124 1121
1125static int fimc_dst_get_buf_count(struct fimc_context *ctx) 1122static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1126{
1127 u32 cfg, buf_num;
1128
1129 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1130
1131 buf_num = hweight32(cfg);
1132
1133 DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
1134
1135 return buf_num;
1136}
1137
1138static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1139 enum drm_exynos_ipp_buf_type buf_type) 1123 enum drm_exynos_ipp_buf_type buf_type)
1140{ 1124{
1141 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1142 bool enable;
1143 u32 cfg;
1144 u32 mask = 0x00000001 << buf_id;
1145 int ret = 0;
1146 unsigned long flags; 1125 unsigned long flags;
1126 u32 buf_num;
1127 u32 cfg;
1147 1128
1148 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); 1129 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
1149 1130
1150 spin_lock_irqsave(&ctx->lock, flags); 1131 spin_lock_irqsave(&ctx->lock, flags);
1151 1132
1152 /* mask register set */
1153 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); 1133 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1154 1134
1155 switch (buf_type) { 1135 if (buf_type == IPP_BUF_ENQUEUE)
1156 case IPP_BUF_ENQUEUE: 1136 cfg |= (1 << buf_id);
1157 enable = true; 1137 else
1158 break; 1138 cfg &= ~(1 << buf_id);
1159 case IPP_BUF_DEQUEUE:
1160 enable = false;
1161 break;
1162 default:
1163 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1164 ret = -EINVAL;
1165 goto err_unlock;
1166 }
1167 1139
1168 /* sequence id */
1169 cfg &= ~mask;
1170 cfg |= (enable << buf_id);
1171 fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ); 1140 fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
1172 1141
1173 /* interrupt enable */ 1142 buf_num = hweight32(cfg);
1174 if (buf_type == IPP_BUF_ENQUEUE &&
1175 fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
1176 fimc_mask_irq(ctx, true);
1177 1143
1178 /* interrupt disable */ 1144 if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START)
1179 if (buf_type == IPP_BUF_DEQUEUE && 1145 fimc_mask_irq(ctx, true);
1180 fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP) 1146 else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP)
1181 fimc_mask_irq(ctx, false); 1147 fimc_mask_irq(ctx, false);
1182 1148
1183err_unlock:
1184 spin_unlock_irqrestore(&ctx->lock, flags); 1149 spin_unlock_irqrestore(&ctx->lock, flags);
1185 return ret;
1186} 1150}
1187 1151
1188static int fimc_dst_set_addr(struct device *dev, 1152static int fimc_dst_set_addr(struct device *dev,
@@ -1240,7 +1204,9 @@ static int fimc_dst_set_addr(struct device *dev,
1240 break; 1204 break;
1241 } 1205 }
1242 1206
1243 return fimc_dst_set_buf_seq(ctx, buf_id, buf_type); 1207 fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
1208
1209 return 0;
1244} 1210}
1245 1211
1246static struct exynos_drm_ipp_ops fimc_dst_ops = { 1212static struct exynos_drm_ipp_ops fimc_dst_ops = {
@@ -1291,14 +1257,11 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1291 1257
1292 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); 1258 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
1293 1259
1294 if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) { 1260 fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1295 DRM_ERROR("failed to dequeue.\n");
1296 return IRQ_HANDLED;
1297 }
1298 1261
1299 event_work->ippdrv = ippdrv; 1262 event_work->ippdrv = ippdrv;
1300 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; 1263 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
1301 queue_work(ippdrv->event_workq, (struct work_struct *)event_work); 1264 queue_work(ippdrv->event_workq, &event_work->work);
1302 1265
1303 return IRQ_HANDLED; 1266 return IRQ_HANDLED;
1304} 1267}
@@ -1590,11 +1553,8 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1590 1553
1591 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK); 1554 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
1592 1555
1593 if (cmd == IPP_CMD_M2M) { 1556 if (cmd == IPP_CMD_M2M)
1594 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1595
1596 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); 1557 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1597 }
1598 1558
1599 return 0; 1559 return 0;
1600} 1560}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 5d09e33fef87..085b066a9993 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -104,6 +104,14 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
104 .has_limited_fmt = 1, 104 .has_limited_fmt = 1,
105}; 105};
106 106
107static struct fimd_driver_data exynos3_fimd_driver_data = {
108 .timing_base = 0x20000,
109 .lcdblk_offset = 0x210,
110 .lcdblk_bypass_shift = 1,
111 .has_shadowcon = 1,
112 .has_vidoutcon = 1,
113};
114
107static struct fimd_driver_data exynos4_fimd_driver_data = { 115static struct fimd_driver_data exynos4_fimd_driver_data = {
108 .timing_base = 0x0, 116 .timing_base = 0x0,
109 .lcdblk_offset = 0x210, 117 .lcdblk_offset = 0x210,
@@ -168,6 +176,8 @@ struct fimd_context {
168static const struct of_device_id fimd_driver_dt_match[] = { 176static const struct of_device_id fimd_driver_dt_match[] = {
169 { .compatible = "samsung,s3c6400-fimd", 177 { .compatible = "samsung,s3c6400-fimd",
170 .data = &s3c64xx_fimd_driver_data }, 178 .data = &s3c64xx_fimd_driver_data },
179 { .compatible = "samsung,exynos3250-fimd",
180 .data = &exynos3_fimd_driver_data },
171 { .compatible = "samsung,exynos4210-fimd", 181 { .compatible = "samsung,exynos4210-fimd",
172 .data = &exynos4_fimd_driver_data }, 182 .data = &exynos4_fimd_driver_data },
173 { .compatible = "samsung,exynos5250-fimd", 183 { .compatible = "samsung,exynos5250-fimd",
@@ -204,7 +214,6 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
204 DRM_DEBUG_KMS("vblank wait timed out.\n"); 214 DRM_DEBUG_KMS("vblank wait timed out.\n");
205} 215}
206 216
207
208static void fimd_clear_channel(struct exynos_drm_manager *mgr) 217static void fimd_clear_channel(struct exynos_drm_manager *mgr)
209{ 218{
210 struct fimd_context *ctx = mgr->ctx; 219 struct fimd_context *ctx = mgr->ctx;
@@ -214,17 +223,31 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
214 223
215 /* Check if any channel is enabled. */ 224 /* Check if any channel is enabled. */
216 for (win = 0; win < WINDOWS_NR; win++) { 225 for (win = 0; win < WINDOWS_NR; win++) {
217 u32 val = readl(ctx->regs + SHADOWCON); 226 u32 val = readl(ctx->regs + WINCON(win));
218 if (val & SHADOWCON_CHx_ENABLE(win)) { 227
219 val &= ~SHADOWCON_CHx_ENABLE(win); 228 if (val & WINCONx_ENWIN) {
220 writel(val, ctx->regs + SHADOWCON); 229 /* wincon */
230 val &= ~WINCONx_ENWIN;
231 writel(val, ctx->regs + WINCON(win));
232
233 /* unprotect windows */
234 if (ctx->driver_data->has_shadowcon) {
235 val = readl(ctx->regs + SHADOWCON);
236 val &= ~SHADOWCON_CHx_ENABLE(win);
237 writel(val, ctx->regs + SHADOWCON);
238 }
221 ch_enabled = 1; 239 ch_enabled = 1;
222 } 240 }
223 } 241 }
224 242
225 /* Wait for vsync, as disable channel takes effect at next vsync */ 243 /* Wait for vsync, as disable channel takes effect at next vsync */
226 if (ch_enabled) 244 if (ch_enabled) {
245 unsigned int state = ctx->suspended;
246
247 ctx->suspended = 0;
227 fimd_wait_for_vblank(mgr); 248 fimd_wait_for_vblank(mgr);
249 ctx->suspended = state;
250 }
228} 251}
229 252
230static int fimd_mgr_initialize(struct exynos_drm_manager *mgr, 253static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
@@ -237,23 +260,6 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
237 mgr->drm_dev = ctx->drm_dev = drm_dev; 260 mgr->drm_dev = ctx->drm_dev = drm_dev;
238 mgr->pipe = ctx->pipe = priv->pipe++; 261 mgr->pipe = ctx->pipe = priv->pipe++;
239 262
240 /*
241 * enable drm irq mode.
242 * - with irq_enabled = true, we can use the vblank feature.
243 *
244 * P.S. note that we wouldn't use drm irq handler but
245 * just specific driver own one instead because
246 * drm framework supports only one irq handler.
247 */
248 drm_dev->irq_enabled = true;
249
250 /*
251 * with vblank_disable_allowed = true, vblank interrupt will be disabled
252 * by drm timer once a current process gives up ownership of
253 * vblank event.(after drm_vblank_put function is called)
254 */
255 drm_dev->vblank_disable_allowed = true;
256
257 /* attach this sub driver to iommu mapping if supported. */ 263 /* attach this sub driver to iommu mapping if supported. */
258 if (is_drm_iommu_supported(ctx->drm_dev)) { 264 if (is_drm_iommu_supported(ctx->drm_dev)) {
259 /* 265 /*
@@ -1051,7 +1057,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
1051{ 1057{
1052 struct exynos_drm_manager *mgr = dev_get_drvdata(dev); 1058 struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
1053 struct fimd_context *ctx = fimd_manager.ctx; 1059 struct fimd_context *ctx = fimd_manager.ctx;
1054 struct drm_crtc *crtc = mgr->crtc;
1055 1060
1056 fimd_dpms(mgr, DRM_MODE_DPMS_OFF); 1061 fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
1057 1062
@@ -1059,8 +1064,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
1059 exynos_dpi_remove(dev); 1064 exynos_dpi_remove(dev);
1060 1065
1061 fimd_mgr_remove(mgr); 1066 fimd_mgr_remove(mgr);
1062
1063 crtc->funcs->destroy(crtc);
1064} 1067}
1065 1068
1066static const struct component_ops fimd_component_ops = { 1069static const struct component_ops fimd_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 15db80138382..0d5b9698d384 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -318,40 +318,16 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
318 drm_gem_object_unreference_unlocked(obj); 318 drm_gem_object_unreference_unlocked(obj);
319} 319}
320 320
321int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 321int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
322 struct drm_file *file_priv)
323{
324 struct drm_exynos_gem_map_off *args = data;
325
326 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
327 args->handle, (unsigned long)args->offset);
328
329 if (!(dev->driver->driver_features & DRIVER_GEM)) {
330 DRM_ERROR("does not support GEM.\n");
331 return -ENODEV;
332 }
333
334 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
335 &args->offset);
336}
337
338int exynos_drm_gem_mmap_buffer(struct file *filp,
339 struct vm_area_struct *vma) 322 struct vm_area_struct *vma)
340{ 323{
341 struct drm_gem_object *obj = filp->private_data; 324 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
342 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
343 struct drm_device *drm_dev = obj->dev;
344 struct exynos_drm_gem_buf *buffer; 325 struct exynos_drm_gem_buf *buffer;
345 unsigned long vm_size; 326 unsigned long vm_size;
346 int ret; 327 int ret;
347 328
348 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 329 vma->vm_flags &= ~VM_PFNMAP;
349 330 vma->vm_pgoff = 0;
350 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
351 vma->vm_private_data = obj;
352 vma->vm_ops = drm_dev->driver->gem_vm_ops;
353
354 update_vm_cache_attr(exynos_gem_obj, vma);
355 331
356 vm_size = vma->vm_end - vma->vm_start; 332 vm_size = vma->vm_end - vma->vm_start;
357 333
@@ -373,60 +349,6 @@ int exynos_drm_gem_mmap_buffer(struct file *filp,
373 return ret; 349 return ret;
374 } 350 }
375 351
376 /*
377 * take a reference to this mapping of the object. And this reference
378 * is unreferenced by the corresponding vm_close call.
379 */
380 drm_gem_object_reference(obj);
381
382 drm_vm_open_locked(drm_dev, vma);
383
384 return 0;
385}
386
387int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *file_priv)
389{
390 struct drm_exynos_file_private *exynos_file_priv;
391 struct drm_exynos_gem_mmap *args = data;
392 struct drm_gem_object *obj;
393 struct file *anon_filp;
394 unsigned long addr;
395
396 if (!(dev->driver->driver_features & DRIVER_GEM)) {
397 DRM_ERROR("does not support GEM.\n");
398 return -ENODEV;
399 }
400
401 mutex_lock(&dev->struct_mutex);
402
403 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
404 if (!obj) {
405 DRM_ERROR("failed to lookup gem object.\n");
406 mutex_unlock(&dev->struct_mutex);
407 return -EINVAL;
408 }
409
410 exynos_file_priv = file_priv->driver_priv;
411 anon_filp = exynos_file_priv->anon_filp;
412 anon_filp->private_data = obj;
413
414 addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
415 MAP_SHARED, 0);
416
417 drm_gem_object_unreference(obj);
418
419 if (IS_ERR_VALUE(addr)) {
420 mutex_unlock(&dev->struct_mutex);
421 return (int)addr;
422 }
423
424 mutex_unlock(&dev->struct_mutex);
425
426 args->mapped = addr;
427
428 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
429
430 return 0; 352 return 0;
431} 353}
432 354
@@ -710,16 +632,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
710 exynos_gem_obj = to_exynos_gem_obj(obj); 632 exynos_gem_obj = to_exynos_gem_obj(obj);
711 633
712 ret = check_gem_flags(exynos_gem_obj->flags); 634 ret = check_gem_flags(exynos_gem_obj->flags);
713 if (ret) { 635 if (ret)
714 drm_gem_vm_close(vma); 636 goto err_close_vm;
715 drm_gem_free_mmap_offset(obj);
716 return ret;
717 }
718
719 vma->vm_flags &= ~VM_PFNMAP;
720 vma->vm_flags |= VM_MIXEDMAP;
721 637
722 update_vm_cache_attr(exynos_gem_obj, vma); 638 update_vm_cache_attr(exynos_gem_obj, vma);
723 639
640 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
641 if (ret)
642 goto err_close_vm;
643
644 return ret;
645
646err_close_vm:
647 drm_gem_vm_close(vma);
648 drm_gem_free_mmap_offset(obj);
649
724 return ret; 650 return ret;
725} 651}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 1592c0ba7de8..09d021bbccf5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -111,20 +111,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
111 unsigned int gem_handle, 111 unsigned int gem_handle,
112 struct drm_file *filp); 112 struct drm_file *filp);
113 113
114/* get buffer offset to map to user space. */
115int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
116 struct drm_file *file_priv);
117
118/*
119 * mmap the physically continuous memory that a gem object contains
120 * to user space.
121 */
122int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
123 struct drm_file *file_priv);
124
125int exynos_drm_gem_mmap_buffer(struct file *filp,
126 struct vm_area_struct *vma);
127
128/* map user space allocated by malloc to pages. */ 114/* map user space allocated by malloc to pages. */
129int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, 115int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
130 struct drm_file *file_priv); 116 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 9e3ff1672965..c6a013fc321c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1326,8 +1326,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1326 buf_id[EXYNOS_DRM_OPS_SRC]; 1326 buf_id[EXYNOS_DRM_OPS_SRC];
1327 event_work->buf_id[EXYNOS_DRM_OPS_DST] = 1327 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1328 buf_id[EXYNOS_DRM_OPS_DST]; 1328 buf_id[EXYNOS_DRM_OPS_DST];
1329 queue_work(ippdrv->event_workq, 1329 queue_work(ippdrv->event_workq, &event_work->work);
1330 (struct work_struct *)event_work);
1331 } 1330 }
1332 1331
1333 return IRQ_HANDLED; 1332 return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index c411399070d6..00d74b18f7cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -75,7 +75,6 @@ struct drm_exynos_ipp_mem_node {
75 u32 prop_id; 75 u32 prop_id;
76 u32 buf_id; 76 u32 buf_id;
77 struct drm_exynos_ipp_buf_info buf_info; 77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
79}; 78};
80 79
81/* 80/*
@@ -319,44 +318,6 @@ static void ipp_print_property(struct drm_exynos_ipp_property *property,
319 sz->hsize, sz->vsize, config->flip, config->degree); 318 sz->hsize, sz->vsize, config->flip, config->degree);
320} 319}
321 320
322static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
323{
324 struct exynos_drm_ippdrv *ippdrv;
325 struct drm_exynos_ipp_cmd_node *c_node;
326 u32 prop_id = property->prop_id;
327
328 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
329
330 ippdrv = ipp_find_drv_by_handle(prop_id);
331 if (IS_ERR(ippdrv)) {
332 DRM_ERROR("failed to get ipp driver.\n");
333 return -EINVAL;
334 }
335
336 /*
337 * Find command node using command list in ippdrv.
338 * when we find this command no using prop_id.
339 * return property information set in this command node.
340 */
341 mutex_lock(&ippdrv->cmd_lock);
342 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
343 if ((c_node->property.prop_id == prop_id) &&
344 (c_node->state == IPP_STATE_STOP)) {
345 mutex_unlock(&ippdrv->cmd_lock);
346 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
347 property->cmd, (int)ippdrv);
348
349 c_node->property = *property;
350 return 0;
351 }
352 }
353 mutex_unlock(&ippdrv->cmd_lock);
354
355 DRM_ERROR("failed to search property.\n");
356
357 return -EINVAL;
358}
359
360static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) 321static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
361{ 322{
362 struct drm_exynos_ipp_cmd_work *cmd_work; 323 struct drm_exynos_ipp_cmd_work *cmd_work;
@@ -392,6 +353,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
392 struct drm_exynos_ipp_property *property = data; 353 struct drm_exynos_ipp_property *property = data;
393 struct exynos_drm_ippdrv *ippdrv; 354 struct exynos_drm_ippdrv *ippdrv;
394 struct drm_exynos_ipp_cmd_node *c_node; 355 struct drm_exynos_ipp_cmd_node *c_node;
356 u32 prop_id;
395 int ret, i; 357 int ret, i;
396 358
397 if (!ctx) { 359 if (!ctx) {
@@ -404,6 +366,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
404 return -EINVAL; 366 return -EINVAL;
405 } 367 }
406 368
369 prop_id = property->prop_id;
370
407 /* 371 /*
408 * This is log print for user application property. 372 * This is log print for user application property.
409 * user application set various property. 373 * user application set various property.
@@ -412,14 +376,24 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
412 ipp_print_property(property, i); 376 ipp_print_property(property, i);
413 377
414 /* 378 /*
415 * set property ioctl generated new prop_id. 379 * In case prop_id is not zero try to set existing property.
416 * but in this case already asigned prop_id using old set property.
417 * e.g PAUSE state. this case supports find current prop_id and use it
418 * instead of allocation.
419 */ 380 */
420 if (property->prop_id) { 381 if (prop_id) {
421 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 382 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
422 return ipp_find_and_set_property(property); 383
384 if (!c_node || c_node->filp != file) {
385 DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
386 return -EINVAL;
387 }
388
389 if (c_node->state != IPP_STATE_STOP) {
390 DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
391 return -EINVAL;
392 }
393
394 c_node->property = *property;
395
396 return 0;
423 } 397 }
424 398
425 /* find ipp driver using ipp id */ 399 /* find ipp driver using ipp id */
@@ -445,9 +419,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
445 property->prop_id, property->cmd, (int)ippdrv); 419 property->prop_id, property->cmd, (int)ippdrv);
446 420
447 /* stored property information and ippdrv in private data */ 421 /* stored property information and ippdrv in private data */
448 c_node->dev = dev;
449 c_node->property = *property; 422 c_node->property = *property;
450 c_node->state = IPP_STATE_IDLE; 423 c_node->state = IPP_STATE_IDLE;
424 c_node->filp = file;
451 425
452 c_node->start_work = ipp_create_cmd_work(); 426 c_node->start_work = ipp_create_cmd_work();
453 if (IS_ERR(c_node->start_work)) { 427 if (IS_ERR(c_node->start_work)) {
@@ -499,105 +473,37 @@ err_clear:
499 return ret; 473 return ret;
500} 474}
501 475
502static void ipp_clean_cmd_node(struct ipp_context *ctx, 476static int ipp_put_mem_node(struct drm_device *drm_dev,
503 struct drm_exynos_ipp_cmd_node *c_node)
504{
505 /* delete list */
506 list_del(&c_node->list);
507
508 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
509 c_node->property.prop_id);
510
511 /* destroy mutex */
512 mutex_destroy(&c_node->lock);
513 mutex_destroy(&c_node->mem_lock);
514 mutex_destroy(&c_node->event_lock);
515
516 /* free command node */
517 kfree(c_node->start_work);
518 kfree(c_node->stop_work);
519 kfree(c_node->event_work);
520 kfree(c_node);
521}
522
523static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
524{
525 switch (c_node->property.cmd) {
526 case IPP_CMD_WB:
527 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
528 case IPP_CMD_OUTPUT:
529 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
530 case IPP_CMD_M2M:
531 default:
532 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
533 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
534 }
535}
536
537static struct drm_exynos_ipp_mem_node
538 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
539 struct drm_exynos_ipp_queue_buf *qbuf)
540{
541 struct drm_exynos_ipp_mem_node *m_node;
542 struct list_head *head;
543 int count = 0;
544
545 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
546
547 /* source/destination memory list */
548 head = &c_node->mem_list[qbuf->ops_id];
549
550 /* find memory node from memory list */
551 list_for_each_entry(m_node, head, list) {
552 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
553
554 /* compare buffer id */
555 if (m_node->buf_id == qbuf->buf_id)
556 return m_node;
557 }
558
559 return NULL;
560}
561
562static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
563 struct drm_exynos_ipp_cmd_node *c_node, 477 struct drm_exynos_ipp_cmd_node *c_node,
564 struct drm_exynos_ipp_mem_node *m_node) 478 struct drm_exynos_ipp_mem_node *m_node)
565{ 479{
566 struct exynos_drm_ipp_ops *ops = NULL; 480 int i;
567 int ret = 0;
568 481
569 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 482 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
570 483
571 if (!m_node) { 484 if (!m_node) {
572 DRM_ERROR("invalid queue node.\n"); 485 DRM_ERROR("invalid dequeue node.\n");
573 return -EFAULT; 486 return -EFAULT;
574 } 487 }
575 488
576 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 489 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
577 490
578 /* get operations callback */ 491 /* put gem buffer */
579 ops = ippdrv->ops[m_node->ops_id]; 492 for_each_ipp_planar(i) {
580 if (!ops) { 493 unsigned long handle = m_node->buf_info.handles[i];
581 DRM_ERROR("not support ops.\n"); 494 if (handle)
582 return -EFAULT; 495 exynos_drm_gem_put_dma_addr(drm_dev, handle,
496 c_node->filp);
583 } 497 }
584 498
585 /* set address and enable irq */ 499 list_del(&m_node->list);
586 if (ops->set_addr) { 500 kfree(m_node);
587 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
588 m_node->buf_id, IPP_BUF_ENQUEUE);
589 if (ret) {
590 DRM_ERROR("failed to set addr.\n");
591 return ret;
592 }
593 }
594 501
595 return ret; 502 return 0;
596} 503}
597 504
598static struct drm_exynos_ipp_mem_node 505static struct drm_exynos_ipp_mem_node
599 *ipp_get_mem_node(struct drm_device *drm_dev, 506 *ipp_get_mem_node(struct drm_device *drm_dev,
600 struct drm_file *file,
601 struct drm_exynos_ipp_cmd_node *c_node, 507 struct drm_exynos_ipp_cmd_node *c_node,
602 struct drm_exynos_ipp_queue_buf *qbuf) 508 struct drm_exynos_ipp_queue_buf *qbuf)
603{ 509{
@@ -615,6 +521,7 @@ static struct drm_exynos_ipp_mem_node
615 m_node->ops_id = qbuf->ops_id; 521 m_node->ops_id = qbuf->ops_id;
616 m_node->prop_id = qbuf->prop_id; 522 m_node->prop_id = qbuf->prop_id;
617 m_node->buf_id = qbuf->buf_id; 523 m_node->buf_id = qbuf->buf_id;
524 INIT_LIST_HEAD(&m_node->list);
618 525
619 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 526 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
620 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 527 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
@@ -627,10 +534,11 @@ static struct drm_exynos_ipp_mem_node
627 dma_addr_t *addr; 534 dma_addr_t *addr;
628 535
629 addr = exynos_drm_gem_get_dma_addr(drm_dev, 536 addr = exynos_drm_gem_get_dma_addr(drm_dev,
630 qbuf->handle[i], file); 537 qbuf->handle[i], c_node->filp);
631 if (IS_ERR(addr)) { 538 if (IS_ERR(addr)) {
632 DRM_ERROR("failed to get addr.\n"); 539 DRM_ERROR("failed to get addr.\n");
633 goto err_clear; 540 ipp_put_mem_node(drm_dev, c_node, m_node);
541 return ERR_PTR(-EFAULT);
634 } 542 }
635 543
636 buf_info->handles[i] = qbuf->handle[i]; 544 buf_info->handles[i] = qbuf->handle[i];
@@ -640,46 +548,30 @@ static struct drm_exynos_ipp_mem_node
640 } 548 }
641 } 549 }
642 550
643 m_node->filp = file;
644 mutex_lock(&c_node->mem_lock); 551 mutex_lock(&c_node->mem_lock);
645 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 552 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
646 mutex_unlock(&c_node->mem_lock); 553 mutex_unlock(&c_node->mem_lock);
647 554
648 return m_node; 555 return m_node;
649
650err_clear:
651 kfree(m_node);
652 return ERR_PTR(-EFAULT);
653} 556}
654 557
655static int ipp_put_mem_node(struct drm_device *drm_dev, 558static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
656 struct drm_exynos_ipp_cmd_node *c_node, 559 struct drm_exynos_ipp_cmd_node *c_node, int ops)
657 struct drm_exynos_ipp_mem_node *m_node)
658{ 560{
659 int i; 561 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
660 562 struct list_head *head = &c_node->mem_list[ops];
661 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
662 563
663 if (!m_node) { 564 mutex_lock(&c_node->mem_lock);
664 DRM_ERROR("invalid dequeue node.\n");
665 return -EFAULT;
666 }
667 565
668 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 566 list_for_each_entry_safe(m_node, tm_node, head, list) {
567 int ret;
669 568
670 /* put gem buffer */ 569 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
671 for_each_ipp_planar(i) { 570 if (ret)
672 unsigned long handle = m_node->buf_info.handles[i]; 571 DRM_ERROR("failed to put m_node.\n");
673 if (handle)
674 exynos_drm_gem_put_dma_addr(drm_dev, handle,
675 m_node->filp);
676 } 572 }
677 573
678 /* delete list in queue */ 574 mutex_unlock(&c_node->mem_lock);
679 list_del(&m_node->list);
680 kfree(m_node);
681
682 return 0;
683} 575}
684 576
685static void ipp_free_event(struct drm_pending_event *event) 577static void ipp_free_event(struct drm_pending_event *event)
@@ -688,7 +580,6 @@ static void ipp_free_event(struct drm_pending_event *event)
688} 580}
689 581
690static int ipp_get_event(struct drm_device *drm_dev, 582static int ipp_get_event(struct drm_device *drm_dev,
691 struct drm_file *file,
692 struct drm_exynos_ipp_cmd_node *c_node, 583 struct drm_exynos_ipp_cmd_node *c_node,
693 struct drm_exynos_ipp_queue_buf *qbuf) 584 struct drm_exynos_ipp_queue_buf *qbuf)
694{ 585{
@@ -700,7 +591,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
700 e = kzalloc(sizeof(*e), GFP_KERNEL); 591 e = kzalloc(sizeof(*e), GFP_KERNEL);
701 if (!e) { 592 if (!e) {
702 spin_lock_irqsave(&drm_dev->event_lock, flags); 593 spin_lock_irqsave(&drm_dev->event_lock, flags);
703 file->event_space += sizeof(e->event); 594 c_node->filp->event_space += sizeof(e->event);
704 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 595 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
705 return -ENOMEM; 596 return -ENOMEM;
706 } 597 }
@@ -712,7 +603,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
712 e->event.prop_id = qbuf->prop_id; 603 e->event.prop_id = qbuf->prop_id;
713 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; 604 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
714 e->base.event = &e->event.base; 605 e->base.event = &e->event.base;
715 e->base.file_priv = file; 606 e->base.file_priv = c_node->filp;
716 e->base.destroy = ipp_free_event; 607 e->base.destroy = ipp_free_event;
717 mutex_lock(&c_node->event_lock); 608 mutex_lock(&c_node->event_lock);
718 list_add_tail(&e->base.link, &c_node->event_list); 609 list_add_tail(&e->base.link, &c_node->event_list);
@@ -757,6 +648,115 @@ out_unlock:
757 return; 648 return;
758} 649}
759 650
651static void ipp_clean_cmd_node(struct ipp_context *ctx,
652 struct drm_exynos_ipp_cmd_node *c_node)
653{
654 int i;
655
656 /* cancel works */
657 cancel_work_sync(&c_node->start_work->work);
658 cancel_work_sync(&c_node->stop_work->work);
659 cancel_work_sync(&c_node->event_work->work);
660
661 /* put event */
662 ipp_put_event(c_node, NULL);
663
664 for_each_ipp_ops(i)
665 ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
666
667 /* delete list */
668 list_del(&c_node->list);
669
670 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
671 c_node->property.prop_id);
672
673 /* destroy mutex */
674 mutex_destroy(&c_node->lock);
675 mutex_destroy(&c_node->mem_lock);
676 mutex_destroy(&c_node->event_lock);
677
678 /* free command node */
679 kfree(c_node->start_work);
680 kfree(c_node->stop_work);
681 kfree(c_node->event_work);
682 kfree(c_node);
683}
684
685static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
686{
687 switch (c_node->property.cmd) {
688 case IPP_CMD_WB:
689 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
690 case IPP_CMD_OUTPUT:
691 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
692 case IPP_CMD_M2M:
693 default:
694 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
695 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
696 }
697}
698
699static struct drm_exynos_ipp_mem_node
700 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
701 struct drm_exynos_ipp_queue_buf *qbuf)
702{
703 struct drm_exynos_ipp_mem_node *m_node;
704 struct list_head *head;
705 int count = 0;
706
707 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
708
709 /* source/destination memory list */
710 head = &c_node->mem_list[qbuf->ops_id];
711
712 /* find memory node from memory list */
713 list_for_each_entry(m_node, head, list) {
714 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
715
716 /* compare buffer id */
717 if (m_node->buf_id == qbuf->buf_id)
718 return m_node;
719 }
720
721 return NULL;
722}
723
724static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
725 struct drm_exynos_ipp_cmd_node *c_node,
726 struct drm_exynos_ipp_mem_node *m_node)
727{
728 struct exynos_drm_ipp_ops *ops = NULL;
729 int ret = 0;
730
731 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
732
733 if (!m_node) {
734 DRM_ERROR("invalid queue node.\n");
735 return -EFAULT;
736 }
737
738 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
739
740 /* get operations callback */
741 ops = ippdrv->ops[m_node->ops_id];
742 if (!ops) {
743 DRM_ERROR("not support ops.\n");
744 return -EFAULT;
745 }
746
747 /* set address and enable irq */
748 if (ops->set_addr) {
749 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
750 m_node->buf_id, IPP_BUF_ENQUEUE);
751 if (ret) {
752 DRM_ERROR("failed to set addr.\n");
753 return ret;
754 }
755 }
756
757 return ret;
758}
759
760static void ipp_handle_cmd_work(struct device *dev, 760static void ipp_handle_cmd_work(struct device *dev,
761 struct exynos_drm_ippdrv *ippdrv, 761 struct exynos_drm_ippdrv *ippdrv,
762 struct drm_exynos_ipp_cmd_work *cmd_work, 762 struct drm_exynos_ipp_cmd_work *cmd_work,
@@ -766,7 +766,7 @@ static void ipp_handle_cmd_work(struct device *dev,
766 766
767 cmd_work->ippdrv = ippdrv; 767 cmd_work->ippdrv = ippdrv;
768 cmd_work->c_node = c_node; 768 cmd_work->c_node = c_node;
769 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); 769 queue_work(ctx->cmd_workq, &cmd_work->work);
770} 770}
771 771
772static int ipp_queue_buf_with_run(struct device *dev, 772static int ipp_queue_buf_with_run(struct device *dev,
@@ -872,7 +872,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
872 /* find command node */ 872 /* find command node */
873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
874 qbuf->prop_id); 874 qbuf->prop_id);
875 if (!c_node) { 875 if (!c_node || c_node->filp != file) {
876 DRM_ERROR("failed to get command node.\n"); 876 DRM_ERROR("failed to get command node.\n");
877 return -ENODEV; 877 return -ENODEV;
878 } 878 }
@@ -881,7 +881,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
881 switch (qbuf->buf_type) { 881 switch (qbuf->buf_type) {
882 case IPP_BUF_ENQUEUE: 882 case IPP_BUF_ENQUEUE:
883 /* get memory node */ 883 /* get memory node */
884 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); 884 m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
885 if (IS_ERR(m_node)) { 885 if (IS_ERR(m_node)) {
886 DRM_ERROR("failed to get m_node.\n"); 886 DRM_ERROR("failed to get m_node.\n");
887 return PTR_ERR(m_node); 887 return PTR_ERR(m_node);
@@ -894,7 +894,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
894 */ 894 */
895 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { 895 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
896 /* get event for destination buffer */ 896 /* get event for destination buffer */
897 ret = ipp_get_event(drm_dev, file, c_node, qbuf); 897 ret = ipp_get_event(drm_dev, c_node, qbuf);
898 if (ret) { 898 if (ret) {
899 DRM_ERROR("failed to get event.\n"); 899 DRM_ERROR("failed to get event.\n");
900 goto err_clean_node; 900 goto err_clean_node;
@@ -1007,7 +1007,7 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1007 1007
1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1009 cmd_ctrl->prop_id); 1009 cmd_ctrl->prop_id);
1010 if (!c_node) { 1010 if (!c_node || c_node->filp != file) {
1011 DRM_ERROR("invalid command node list.\n"); 1011 DRM_ERROR("invalid command node list.\n");
1012 return -ENODEV; 1012 return -ENODEV;
1013 } 1013 }
@@ -1257,80 +1257,39 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1257 struct exynos_drm_ippdrv *ippdrv, 1257 struct exynos_drm_ippdrv *ippdrv,
1258 struct drm_exynos_ipp_cmd_node *c_node) 1258 struct drm_exynos_ipp_cmd_node *c_node)
1259{ 1259{
1260 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1261 struct drm_exynos_ipp_property *property = &c_node->property; 1260 struct drm_exynos_ipp_property *property = &c_node->property;
1262 struct list_head *head; 1261 int i;
1263 int ret = 0, i;
1264 1262
1265 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1263 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1266 1264
1267 /* put event */ 1265 /* stop operations */
1268 ipp_put_event(c_node, NULL); 1266 if (ippdrv->stop)
1269 1267 ippdrv->stop(ippdrv->dev, property->cmd);
1270 mutex_lock(&c_node->mem_lock);
1271 1268
1272 /* check command */ 1269 /* check command */
1273 switch (property->cmd) { 1270 switch (property->cmd) {
1274 case IPP_CMD_M2M: 1271 case IPP_CMD_M2M:
1275 for_each_ipp_ops(i) { 1272 for_each_ipp_ops(i)
1276 /* source/destination memory list */ 1273 ipp_clean_mem_nodes(drm_dev, c_node, i);
1277 head = &c_node->mem_list[i];
1278
1279 list_for_each_entry_safe(m_node, tm_node,
1280 head, list) {
1281 ret = ipp_put_mem_node(drm_dev, c_node,
1282 m_node);
1283 if (ret) {
1284 DRM_ERROR("failed to put m_node.\n");
1285 goto err_clear;
1286 }
1287 }
1288 }
1289 break; 1274 break;
1290 case IPP_CMD_WB: 1275 case IPP_CMD_WB:
1291 /* destination memory list */ 1276 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
1292 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1293
1294 list_for_each_entry_safe(m_node, tm_node, head, list) {
1295 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1296 if (ret) {
1297 DRM_ERROR("failed to put m_node.\n");
1298 goto err_clear;
1299 }
1300 }
1301 break; 1277 break;
1302 case IPP_CMD_OUTPUT: 1278 case IPP_CMD_OUTPUT:
1303 /* source memory list */ 1279 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
1304 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1305
1306 list_for_each_entry_safe(m_node, tm_node, head, list) {
1307 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1308 if (ret) {
1309 DRM_ERROR("failed to put m_node.\n");
1310 goto err_clear;
1311 }
1312 }
1313 break; 1280 break;
1314 default: 1281 default:
1315 DRM_ERROR("invalid operations.\n"); 1282 DRM_ERROR("invalid operations.\n");
1316 ret = -EINVAL; 1283 return -EINVAL;
1317 goto err_clear;
1318 } 1284 }
1319 1285
1320err_clear: 1286 return 0;
1321 mutex_unlock(&c_node->mem_lock);
1322
1323 /* stop operations */
1324 if (ippdrv->stop)
1325 ippdrv->stop(ippdrv->dev, property->cmd);
1326
1327 return ret;
1328} 1287}
1329 1288
1330void ipp_sched_cmd(struct work_struct *work) 1289void ipp_sched_cmd(struct work_struct *work)
1331{ 1290{
1332 struct drm_exynos_ipp_cmd_work *cmd_work = 1291 struct drm_exynos_ipp_cmd_work *cmd_work =
1333 (struct drm_exynos_ipp_cmd_work *)work; 1292 container_of(work, struct drm_exynos_ipp_cmd_work, work);
1334 struct exynos_drm_ippdrv *ippdrv; 1293 struct exynos_drm_ippdrv *ippdrv;
1335 struct drm_exynos_ipp_cmd_node *c_node; 1294 struct drm_exynos_ipp_cmd_node *c_node;
1336 struct drm_exynos_ipp_property *property; 1295 struct drm_exynos_ipp_property *property;
@@ -1543,7 +1502,7 @@ err_event_unlock:
1543void ipp_sched_event(struct work_struct *work) 1502void ipp_sched_event(struct work_struct *work)
1544{ 1503{
1545 struct drm_exynos_ipp_event_work *event_work = 1504 struct drm_exynos_ipp_event_work *event_work =
1546 (struct drm_exynos_ipp_event_work *)work; 1505 container_of(work, struct drm_exynos_ipp_event_work, work);
1547 struct exynos_drm_ippdrv *ippdrv; 1506 struct exynos_drm_ippdrv *ippdrv;
1548 struct drm_exynos_ipp_cmd_node *c_node; 1507 struct drm_exynos_ipp_cmd_node *c_node;
1549 int ret; 1508 int ret;
@@ -1646,11 +1605,11 @@ err:
1646 1605
1647static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1606static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1648{ 1607{
1649 struct exynos_drm_ippdrv *ippdrv; 1608 struct exynos_drm_ippdrv *ippdrv, *t;
1650 struct ipp_context *ctx = get_ipp_context(dev); 1609 struct ipp_context *ctx = get_ipp_context(dev);
1651 1610
1652 /* get ipp driver entry */ 1611 /* get ipp driver entry */
1653 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1612 list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
1654 if (is_drm_iommu_supported(drm_dev)) 1613 if (is_drm_iommu_supported(drm_dev))
1655 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1614 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1656 1615
@@ -1677,14 +1636,11 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1677static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, 1636static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1678 struct drm_file *file) 1637 struct drm_file *file)
1679{ 1638{
1680 struct drm_exynos_file_private *file_priv = file->driver_priv;
1681 struct exynos_drm_ippdrv *ippdrv = NULL; 1639 struct exynos_drm_ippdrv *ippdrv = NULL;
1682 struct ipp_context *ctx = get_ipp_context(dev); 1640 struct ipp_context *ctx = get_ipp_context(dev);
1683 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1641 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1684 int count = 0; 1642 int count = 0;
1685 1643
1686 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
1687
1688 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1644 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1689 mutex_lock(&ippdrv->cmd_lock); 1645 mutex_lock(&ippdrv->cmd_lock);
1690 list_for_each_entry_safe(c_node, tc_node, 1646 list_for_each_entry_safe(c_node, tc_node,
@@ -1692,7 +1648,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1692 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1648 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1693 count++, (int)ippdrv); 1649 count++, (int)ippdrv);
1694 1650
1695 if (c_node->dev == file_priv->ipp_dev) { 1651 if (c_node->filp == file) {
1696 /* 1652 /*
1697 * userland goto unnormal state. process killed. 1653 * userland goto unnormal state. process killed.
1698 * and close the file. 1654 * and close the file.
@@ -1808,63 +1764,12 @@ static int ipp_remove(struct platform_device *pdev)
1808 return 0; 1764 return 0;
1809} 1765}
1810 1766
1811static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1812{
1813 DRM_DEBUG_KMS("enable[%d]\n", enable);
1814
1815 return 0;
1816}
1817
1818#ifdef CONFIG_PM_SLEEP
1819static int ipp_suspend(struct device *dev)
1820{
1821 struct ipp_context *ctx = get_ipp_context(dev);
1822
1823 if (pm_runtime_suspended(dev))
1824 return 0;
1825
1826 return ipp_power_ctrl(ctx, false);
1827}
1828
1829static int ipp_resume(struct device *dev)
1830{
1831 struct ipp_context *ctx = get_ipp_context(dev);
1832
1833 if (!pm_runtime_suspended(dev))
1834 return ipp_power_ctrl(ctx, true);
1835
1836 return 0;
1837}
1838#endif
1839
1840#ifdef CONFIG_PM_RUNTIME
1841static int ipp_runtime_suspend(struct device *dev)
1842{
1843 struct ipp_context *ctx = get_ipp_context(dev);
1844
1845 return ipp_power_ctrl(ctx, false);
1846}
1847
1848static int ipp_runtime_resume(struct device *dev)
1849{
1850 struct ipp_context *ctx = get_ipp_context(dev);
1851
1852 return ipp_power_ctrl(ctx, true);
1853}
1854#endif
1855
1856static const struct dev_pm_ops ipp_pm_ops = {
1857 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1858 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1859};
1860
1861struct platform_driver ipp_driver = { 1767struct platform_driver ipp_driver = {
1862 .probe = ipp_probe, 1768 .probe = ipp_probe,
1863 .remove = ipp_remove, 1769 .remove = ipp_remove,
1864 .driver = { 1770 .driver = {
1865 .name = "exynos-drm-ipp", 1771 .name = "exynos-drm-ipp",
1866 .owner = THIS_MODULE, 1772 .owner = THIS_MODULE,
1867 .pm = &ipp_pm_ops,
1868 }, 1773 },
1869}; 1774};
1870 1775
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 6f48d62aeb30..2a61547a39d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,6 @@ struct drm_exynos_ipp_cmd_work {
48/* 48/*
49 * A structure of command node. 49 * A structure of command node.
50 * 50 *
51 * @dev: IPP device.
52 * @list: list head to command queue information. 51 * @list: list head to command queue information.
53 * @event_list: list head of event. 52 * @event_list: list head of event.
54 * @mem_list: list head to source,destination memory queue information. 53 * @mem_list: list head to source,destination memory queue information.
@@ -62,9 +61,9 @@ struct drm_exynos_ipp_cmd_work {
62 * @stop_work: stop command work structure. 61 * @stop_work: stop command work structure.
63 * @event_work: event work structure. 62 * @event_work: event work structure.
64 * @state: state of command node. 63 * @state: state of command node.
64 * @filp: associated file pointer.
65 */ 65 */
66struct drm_exynos_ipp_cmd_node { 66struct drm_exynos_ipp_cmd_node {
67 struct device *dev;
68 struct list_head list; 67 struct list_head list;
69 struct list_head event_list; 68 struct list_head event_list;
70 struct list_head mem_list[EXYNOS_DRM_OPS_MAX]; 69 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
@@ -78,6 +77,7 @@ struct drm_exynos_ipp_cmd_node {
78 struct drm_exynos_ipp_cmd_work *stop_work; 77 struct drm_exynos_ipp_cmd_work *stop_work;
79 struct drm_exynos_ipp_event_work *event_work; 78 struct drm_exynos_ipp_event_work *event_work;
80 enum drm_exynos_ipp_state state; 79 enum drm_exynos_ipp_state state;
80 struct drm_file *filp;
81}; 81};
82 82
83/* 83/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 8371cbd7631d..c7045a663763 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -139,6 +139,8 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
139 overlay->crtc_x, overlay->crtc_y, 139 overlay->crtc_x, overlay->crtc_y,
140 overlay->crtc_width, overlay->crtc_height); 140 overlay->crtc_width, overlay->crtc_height);
141 141
142 plane->crtc = crtc;
143
142 exynos_drm_crtc_plane_mode_set(crtc, overlay); 144 exynos_drm_crtc_plane_mode_set(crtc, overlay);
143 145
144 return 0; 146 return 0;
@@ -187,8 +189,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
187 if (ret < 0) 189 if (ret < 0)
188 return ret; 190 return ret;
189 191
190 plane->crtc = crtc;
191
192 exynos_plane_commit(plane); 192 exynos_plane_commit(plane);
193 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); 193 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
194 194
@@ -254,25 +254,26 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
254} 254}
255 255
256struct drm_plane *exynos_plane_init(struct drm_device *dev, 256struct drm_plane *exynos_plane_init(struct drm_device *dev,
257 unsigned long possible_crtcs, bool priv) 257 unsigned long possible_crtcs,
258 enum drm_plane_type type)
258{ 259{
259 struct exynos_plane *exynos_plane; 260 struct exynos_plane *exynos_plane;
260 int err; 261 int err;
261 262
262 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); 263 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
263 if (!exynos_plane) 264 if (!exynos_plane)
264 return NULL; 265 return ERR_PTR(-ENOMEM);
265 266
266 err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, 267 err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
267 &exynos_plane_funcs, formats, ARRAY_SIZE(formats), 268 &exynos_plane_funcs, formats,
268 priv); 269 ARRAY_SIZE(formats), type);
269 if (err) { 270 if (err) {
270 DRM_ERROR("failed to initialize plane\n"); 271 DRM_ERROR("failed to initialize plane\n");
271 kfree(exynos_plane); 272 kfree(exynos_plane);
272 return NULL; 273 return ERR_PTR(err);
273 } 274 }
274 275
275 if (priv) 276 if (type == DRM_PLANE_TYPE_PRIMARY)
276 exynos_plane->overlay.zpos = DEFAULT_ZPOS; 277 exynos_plane->overlay.zpos = DEFAULT_ZPOS;
277 else 278 else
278 exynos_plane_attach_zpos_property(&exynos_plane->base); 279 exynos_plane_attach_zpos_property(&exynos_plane->base);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 84d464c90d3d..0d1986b115f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -17,4 +17,5 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
17void exynos_plane_commit(struct drm_plane *plane); 17void exynos_plane_commit(struct drm_plane *plane);
18void exynos_plane_dpms(struct drm_plane *plane, int mode); 18void exynos_plane_dpms(struct drm_plane *plane, int mode);
19struct drm_plane *exynos_plane_init(struct drm_device *dev, 19struct drm_plane *exynos_plane_init(struct drm_device *dev,
20 unsigned long possible_crtcs, bool priv); 20 unsigned long possible_crtcs,
21 enum drm_plane_type type);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 55af6b41c1df..b6a37d4f5b13 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -156,8 +156,7 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
156 event_work->ippdrv = ippdrv; 156 event_work->ippdrv = ippdrv;
157 event_work->buf_id[EXYNOS_DRM_OPS_DST] = 157 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; 158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
159 queue_work(ippdrv->event_workq, 159 queue_work(ippdrv->event_workq, &event_work->work);
160 (struct work_struct *)event_work);
161 } else { 160 } else {
162 DRM_ERROR("the SFR is set illegally\n"); 161 DRM_ERROR("the SFR is set illegally\n");
163 } 162 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 9528d81d8004..d565207040a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -303,23 +303,6 @@ static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
303 mgr->drm_dev = ctx->drm_dev = drm_dev; 303 mgr->drm_dev = ctx->drm_dev = drm_dev;
304 mgr->pipe = ctx->pipe = priv->pipe++; 304 mgr->pipe = ctx->pipe = priv->pipe++;
305 305
306 /*
307 * enable drm irq mode.
308 * - with irq_enabled = 1, we can use the vblank feature.
309 *
310 * P.S. note that we wouldn't use drm irq handler but
311 * just specific driver own one instead because
312 * drm framework supports only one irq handler.
313 */
314 drm_dev->irq_enabled = 1;
315
316 /*
317 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
318 * by drm timer once a current process gives up ownership of
319 * vblank event.(after drm_vblank_put function is called)
320 */
321 drm_dev->vblank_disable_allowed = 1;
322
323 return 0; 306 return 0;
324} 307}
325 308
@@ -648,7 +631,6 @@ static int vidi_remove(struct platform_device *pdev)
648 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev); 631 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
649 struct vidi_context *ctx = mgr->ctx; 632 struct vidi_context *ctx = mgr->ctx;
650 struct drm_encoder *encoder = ctx->encoder; 633 struct drm_encoder *encoder = ctx->encoder;
651 struct drm_crtc *crtc = mgr->crtc;
652 634
653 if (ctx->raw_edid != (struct edid *)fake_edid_info) { 635 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
654 kfree(ctx->raw_edid); 636 kfree(ctx->raw_edid);
@@ -657,7 +639,6 @@ static int vidi_remove(struct platform_device *pdev)
657 return -EINVAL; 639 return -EINVAL;
658 } 640 }
659 641
660 crtc->funcs->destroy(crtc);
661 encoder->funcs->destroy(encoder); 642 encoder->funcs->destroy(encoder);
662 drm_connector_cleanup(&ctx->connector); 643 drm_connector_cleanup(&ctx->connector);
663 644
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 562966db2aa1..7910fb37d9bb 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1040,6 +1040,8 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
1040 1040
1041static void hdmi_connector_destroy(struct drm_connector *connector) 1041static void hdmi_connector_destroy(struct drm_connector *connector)
1042{ 1042{
1043 drm_connector_unregister(connector);
1044 drm_connector_cleanup(connector);
1043} 1045}
1044 1046
1045static struct drm_connector_funcs hdmi_connector_funcs = { 1047static struct drm_connector_funcs hdmi_connector_funcs = {
@@ -2314,8 +2316,8 @@ static void hdmi_unbind(struct device *dev, struct device *master, void *data)
2314 struct drm_encoder *encoder = display->encoder; 2316 struct drm_encoder *encoder = display->encoder;
2315 struct hdmi_context *hdata = display->ctx; 2317 struct hdmi_context *hdata = display->ctx;
2316 2318
2319 hdmi_connector_destroy(&hdata->connector);
2317 encoder->funcs->destroy(encoder); 2320 encoder->funcs->destroy(encoder);
2318 drm_connector_cleanup(&hdata->connector);
2319} 2321}
2320 2322
2321static const struct component_ops hdmi_component_ops = { 2323static const struct component_ops hdmi_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e8b4ec84b312..a41c84ee3a2d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1302,15 +1302,12 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1302static void mixer_unbind(struct device *dev, struct device *master, void *data) 1302static void mixer_unbind(struct device *dev, struct device *master, void *data)
1303{ 1303{
1304 struct exynos_drm_manager *mgr = dev_get_drvdata(dev); 1304 struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
1305 struct drm_crtc *crtc = mgr->crtc;
1306 1305
1307 dev_info(dev, "remove successful\n"); 1306 dev_info(dev, "remove successful\n");
1308 1307
1309 mixer_mgr_remove(mgr); 1308 mixer_mgr_remove(mgr);
1310 1309
1311 pm_runtime_disable(dev); 1310 pm_runtime_disable(dev);
1312
1313 crtc->funcs->destroy(crtc);
1314} 1311}
1315 1312
1316static const struct component_ops mixer_component_ops = { 1313static const struct component_ops mixer_component_ops = {