aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorEunchul Kim <chulspro.kim@samsung.com>2012-12-14 04:10:31 -0500
committerInki Dae <daeinki@gmail.com>2012-12-14 12:29:08 -0500
commitcb471f14b5eebfed22bb9f2d0f06601f171c574a (patch)
tree955334335ef7d041553dc6d6b1351b9a8f3fa593 /drivers/gpu
parentd636ead86fb806085de4ce98693e8d91c419d8f3 (diff)
drm/exynos: add ipp subsystem
This patch adds Image Post Processing(IPP) support for exynos drm driver. IPP supports image scaler/rotator and input/output DMA operations using IPP subsystem framework to control FIMC, Rotator and GSC hardware and supports some user interfaces for user side. And each IPP-based drivers support Memory to Memory operations with various converting. And in case of FIMC hardware, it also supports Writeback and Display output operations through local path. Features: - Memory to Memory operation support. - Various pixel formats support. - Image scaling support. - Color Space Conversion support. - Image crop operation support. - Rotate operation support to 90, 180 or 270 degree. - Flip operation support to vertical, horizontal or both. - Writeback operation support to display blended image of FIMD fifo on screen A summary to IPP Subsystem operations: First of all, user should get property capabilities from IPP subsystem and set these properties to hardware registers for desired operations. The properties could be pixel format, position, rotation degree and flip operation. And next, user should set source and destination buffer data using DRM_EXYNOS_IPP_QUEUE_BUF ioctl command with gem handles to source and destinition buffers. And next, user can control user-desired hardware with desired operations such as play, stop, pause and resume controls. And finally, user can aware of dma operation completion and also get destination buffer that it contains user-desried result through dequeue command. IOCTL commands: - DRM_EXYNOS_IPP_GET_PROPERTY . get ipp driver capabilitis and id. - DRM_EXYNOS_IPP_SET_PROPERTY . set format, position, rotation, flip to source and destination buffers - DRM_EXYNOS_IPP_QUEUE_BUF . enqueue/dequeue buffer and make event list. - DRM_EXYNOS_IPP_CMD_CTRL . play/stop/pause/resume control. Event: - DRM_EXYNOS_IPP_EVENT . a event to notify dma operation completion to user side. Basic control flow: Open -> Get properties -> User choose desired IPP sub driver(FIMC, Rotator or GSCALER) -> Set Property -> Create gem handle -> Enqueue to source and destination buffers -> Command control(Play) -> Event is notified to User -> User gets destinition buffer complated -> (Enqueue to source and destination buffers -> Event is notified to User) * N -> Queue/Dequeue to source and destination buffers -> Command control(Stop) -> Free gem handle -> Close Changelog v1 ~ v5: - added comments, code fixups and cleanups. Signed-off-by: Eunchul Kim <chulspro.kim@samsung.com> Signed-off-by: Jinyoung Jeon <jy0.jeon@samsung.com> Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/Makefile1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2042
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h266
6 files changed, 2346 insertions, 0 deletions
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 86fb75d3fcad..80ab242e2739 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -45,3 +45,9 @@ config DRM_EXYNOS_G2D
45 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D 45 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
46 help 46 help
47 Choose this option if you want to use Exynos G2D for DRM. 47 Choose this option if you want to use Exynos G2D for DRM.
48
49config DRM_EXYNOS_IPP
50 bool "Exynos DRM IPP"
51 depends on DRM_EXYNOS
52 help
53 Choose this option if you want to use IPP feature for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 26813b8a5056..6c536ce4d95b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -16,5 +16,6 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
16 exynos_drm_hdmi.o 16 exynos_drm_hdmi.o
17exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 17exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
18exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o 18exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
19exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
19 20
20obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o 21obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 4a1168d3e907..0eb8a972e21c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,7 @@
40#include "exynos_drm_vidi.h" 40#include "exynos_drm_vidi.h"
41#include "exynos_drm_dmabuf.h" 41#include "exynos_drm_dmabuf.h"
42#include "exynos_drm_g2d.h" 42#include "exynos_drm_g2d.h"
43#include "exynos_drm_ipp.h"
43#include "exynos_drm_iommu.h" 44#include "exynos_drm_iommu.h"
44 45
45#define DRIVER_NAME "exynos" 46#define DRIVER_NAME "exynos"
@@ -249,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
249 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH), 250 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
250 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, 251 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
251 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH), 252 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
253 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
254 exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
255 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
256 exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
257 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
258 exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
259 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
260 exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
252}; 261};
253 262
254static const struct file_operations exynos_drm_driver_fops = { 263static const struct file_operations exynos_drm_driver_fops = {
@@ -363,6 +372,12 @@ static int __init exynos_drm_init(void)
363 goto out_g2d; 372 goto out_g2d;
364#endif 373#endif
365 374
375#ifdef CONFIG_DRM_EXYNOS_IPP
376 ret = platform_driver_register(&ipp_driver);
377 if (ret < 0)
378 goto out_ipp;
379#endif
380
366 ret = platform_driver_register(&exynos_drm_platform_driver); 381 ret = platform_driver_register(&exynos_drm_platform_driver);
367 if (ret < 0) 382 if (ret < 0)
368 goto out_drm; 383 goto out_drm;
@@ -380,6 +395,11 @@ out:
380 platform_driver_unregister(&exynos_drm_platform_driver); 395 platform_driver_unregister(&exynos_drm_platform_driver);
381 396
382out_drm: 397out_drm:
398#ifdef CONFIG_DRM_EXYNOS_IPP
399 platform_driver_unregister(&ipp_driver);
400out_ipp:
401#endif
402
383#ifdef CONFIG_DRM_EXYNOS_G2D 403#ifdef CONFIG_DRM_EXYNOS_G2D
384 platform_driver_unregister(&g2d_driver); 404 platform_driver_unregister(&g2d_driver);
385out_g2d: 405out_g2d:
@@ -416,6 +436,10 @@ static void __exit exynos_drm_exit(void)
416 436
417 platform_driver_unregister(&exynos_drm_platform_driver); 437 platform_driver_unregister(&exynos_drm_platform_driver);
418 438
439#ifdef CONFIG_DRM_EXYNOS_IPP
440 platform_driver_unregister(&ipp_driver);
441#endif
442
419#ifdef CONFIG_DRM_EXYNOS_G2D 443#ifdef CONFIG_DRM_EXYNOS_G2D
420 platform_driver_unregister(&g2d_driver); 444 platform_driver_unregister(&g2d_driver);
421#endif 445#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index e4ea74df4fc2..44ab3c7b6a90 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -232,8 +232,14 @@ struct exynos_drm_g2d_private {
232 struct list_head userptr_list; 232 struct list_head userptr_list;
233}; 233};
234 234
235struct exynos_drm_ipp_private {
236 struct device *dev;
237 struct list_head event_list;
238};
239
235struct drm_exynos_file_private { 240struct drm_exynos_file_private {
236 struct exynos_drm_g2d_private *g2d_priv; 241 struct exynos_drm_g2d_private *g2d_priv;
242 struct exynos_drm_ipp_private *ipp_priv;
237}; 243};
238 244
239/* 245/*
@@ -343,4 +349,5 @@ extern struct platform_driver mixer_driver;
343extern struct platform_driver exynos_drm_common_hdmi_driver; 349extern struct platform_driver exynos_drm_common_hdmi_driver;
344extern struct platform_driver vidi_driver; 350extern struct platform_driver vidi_driver;
345extern struct platform_driver g2d_driver; 351extern struct platform_driver g2d_driver;
352extern struct platform_driver ipp_driver;
346#endif 353#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..c640935ab7d7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2042 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/types.h>
18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
20#include <plat/map-base.h>
21
22#include <drm/drmP.h>
23#include <drm/exynos_drm.h>
24#include "exynos_drm_drv.h"
25#include "exynos_drm_gem.h"
26#include "exynos_drm_ipp.h"
27
28/*
29 * IPP is stand for Image Post Processing and
30 * supports image scaler/rotator and input/output DMA operations.
31 * using FIMC, GSC, Rotator, so on.
32 * IPP is integration device driver of same attribute h/w
33 */
34
35/*
36 * TODO
37 * 1. expand command control id.
38 * 2. integrate property and config.
39 * 3. removed send_event id check routine.
40 * 4. compare send_event id if needed.
41 * 5. free subdrv_remove notifier callback list if needed.
42 * 6. need to check subdrv_open about multi-open.
43 * 7. need to power_on implement power and sysmmu ctrl.
44 */
45
46#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
47#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
48
49/*
50 * A structure of event.
51 *
52 * @base: base of event.
53 * @event: ipp event.
54 */
55struct drm_exynos_ipp_send_event {
56 struct drm_pending_event base;
57 struct drm_exynos_ipp_event event;
58};
59
60/*
61 * A structure of memory node.
62 *
63 * @list: list head to memory queue information.
64 * @ops_id: id of operations.
65 * @prop_id: id of property.
66 * @buf_id: id of buffer.
67 * @buf_info: gem objects and dma address, size.
68 * @filp: a pointer to drm_file.
69 */
70struct drm_exynos_ipp_mem_node {
71 struct list_head list;
72 enum drm_exynos_ops_id ops_id;
73 u32 prop_id;
74 u32 buf_id;
75 struct drm_exynos_ipp_buf_info buf_info;
76 struct drm_file *filp;
77};
78
79/*
80 * A structure of ipp context.
81 *
82 * @subdrv: prepare initialization using subdrv.
83 * @ipp_lock: lock for synchronization of access to ipp_idr.
84 * @prop_lock: lock for synchronization of access to prop_idr.
85 * @ipp_idr: ipp driver idr.
86 * @prop_idr: property idr.
87 * @event_workq: event work queue.
88 * @cmd_workq: command work queue.
89 */
90struct ipp_context {
91 struct exynos_drm_subdrv subdrv;
92 struct mutex ipp_lock;
93 struct mutex prop_lock;
94 struct idr ipp_idr;
95 struct idr prop_idr;
96 struct workqueue_struct *event_workq;
97 struct workqueue_struct *cmd_workq;
98};
99
100static LIST_HEAD(exynos_drm_ippdrv_list);
101static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
102static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
103
104int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
105{
106 DRM_DEBUG_KMS("%s\n", __func__);
107
108 if (!ippdrv)
109 return -EINVAL;
110
111 mutex_lock(&exynos_drm_ippdrv_lock);
112 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
113 mutex_unlock(&exynos_drm_ippdrv_lock);
114
115 return 0;
116}
117
118int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
119{
120 DRM_DEBUG_KMS("%s\n", __func__);
121
122 if (!ippdrv)
123 return -EINVAL;
124
125 mutex_lock(&exynos_drm_ippdrv_lock);
126 list_del(&ippdrv->drv_list);
127 mutex_unlock(&exynos_drm_ippdrv_lock);
128
129 return 0;
130}
131
132static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
133 u32 *idp)
134{
135 int ret;
136
137 DRM_DEBUG_KMS("%s\n", __func__);
138
139again:
140 /* ensure there is space available to allocate a handle */
141 if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
142 DRM_ERROR("failed to get idr.\n");
143 return -ENOMEM;
144 }
145
146 /* do the allocation under our mutexlock */
147 mutex_lock(lock);
148 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
149 mutex_unlock(lock);
150 if (ret == -EAGAIN)
151 goto again;
152
153 return ret;
154}
155
156static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
157{
158 void *obj;
159
160 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
161
162 mutex_lock(lock);
163
164 /* find object using handle */
165 obj = idr_find(id_idr, id);
166 if (!obj) {
167 DRM_ERROR("failed to find object.\n");
168 mutex_unlock(lock);
169 return ERR_PTR(-ENODEV);
170 }
171
172 mutex_unlock(lock);
173
174 return obj;
175}
176
177static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
178 enum drm_exynos_ipp_cmd cmd)
179{
180 /*
181 * check dedicated flag and WB, OUTPUT operation with
182 * power on state.
183 */
184 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
185 !pm_runtime_suspended(ippdrv->dev)))
186 return true;
187
188 return false;
189}
190
191static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
192 struct drm_exynos_ipp_property *property)
193{
194 struct exynos_drm_ippdrv *ippdrv;
195 u32 ipp_id = property->ipp_id;
196
197 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
198
199 if (ipp_id) {
200 /* find ipp driver using idr */
201 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
202 ipp_id);
203 if (IS_ERR_OR_NULL(ippdrv)) {
204 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
205 return ippdrv;
206 }
207
208 /*
209 * WB, OUTPUT opertion not supported multi-operation.
210 * so, make dedicated state at set property ioctl.
211 * when ipp driver finished operations, clear dedicated flags.
212 */
213 if (ipp_check_dedicated(ippdrv, property->cmd)) {
214 DRM_ERROR("already used choose device.\n");
215 return ERR_PTR(-EBUSY);
216 }
217
218 /*
219 * This is necessary to find correct device in ipp drivers.
220 * ipp drivers have different abilities,
221 * so need to check property.
222 */
223 if (ippdrv->check_property &&
224 ippdrv->check_property(ippdrv->dev, property)) {
225 DRM_ERROR("not support property.\n");
226 return ERR_PTR(-EINVAL);
227 }
228
229 return ippdrv;
230 } else {
231 /*
232 * This case is search all ipp driver for finding.
233 * user application don't set ipp_id in this case,
234 * so ipp subsystem search correct driver in driver list.
235 */
236 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
237 if (ipp_check_dedicated(ippdrv, property->cmd)) {
238 DRM_DEBUG_KMS("%s:used device.\n", __func__);
239 continue;
240 }
241
242 if (ippdrv->check_property &&
243 ippdrv->check_property(ippdrv->dev, property)) {
244 DRM_DEBUG_KMS("%s:not support property.\n",
245 __func__);
246 continue;
247 }
248
249 return ippdrv;
250 }
251
252 DRM_ERROR("not support ipp driver operations.\n");
253 }
254
255 return ERR_PTR(-ENODEV);
256}
257
258static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
259{
260 struct exynos_drm_ippdrv *ippdrv;
261 struct drm_exynos_ipp_cmd_node *c_node;
262 int count = 0;
263
264 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
265
266 if (list_empty(&exynos_drm_ippdrv_list)) {
267 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
268 return ERR_PTR(-ENODEV);
269 }
270
271 /*
272 * This case is search ipp driver by prop_id handle.
273 * sometimes, ipp subsystem find driver by prop_id.
274 * e.g PAUSE state, queue buf, command contro.
275 */
276 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
277 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
278 count++, (int)ippdrv);
279
280 if (!list_empty(&ippdrv->cmd_list)) {
281 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
282 if (c_node->property.prop_id == prop_id)
283 return ippdrv;
284 }
285 }
286
287 return ERR_PTR(-ENODEV);
288}
289
290int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
291 struct drm_file *file)
292{
293 struct drm_exynos_file_private *file_priv = file->driver_priv;
294 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
295 struct device *dev = priv->dev;
296 struct ipp_context *ctx = get_ipp_context(dev);
297 struct drm_exynos_ipp_prop_list *prop_list = data;
298 struct exynos_drm_ippdrv *ippdrv;
299 int count = 0;
300
301 DRM_DEBUG_KMS("%s\n", __func__);
302
303 if (!ctx) {
304 DRM_ERROR("invalid context.\n");
305 return -EINVAL;
306 }
307
308 if (!prop_list) {
309 DRM_ERROR("invalid property parameter.\n");
310 return -EINVAL;
311 }
312
313 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
314
315 if (!prop_list->ipp_id) {
316 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
317 count++;
318 /*
319 * Supports ippdrv list count for user application.
320 * First step user application getting ippdrv count.
321 * and second step getting ippdrv capability using ipp_id.
322 */
323 prop_list->count = count;
324 } else {
325 /*
326 * Getting ippdrv capability by ipp_id.
327 * some deivce not supported wb, output interface.
328 * so, user application detect correct ipp driver
329 * using this ioctl.
330 */
331 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
332 prop_list->ipp_id);
333 if (!ippdrv) {
334 DRM_ERROR("not found ipp%d driver.\n",
335 prop_list->ipp_id);
336 return -EINVAL;
337 }
338
339 prop_list = ippdrv->prop_list;
340 }
341
342 return 0;
343}
344
345static void ipp_print_property(struct drm_exynos_ipp_property *property,
346 int idx)
347{
348 struct drm_exynos_ipp_config *config = &property->config[idx];
349 struct drm_exynos_pos *pos = &config->pos;
350 struct drm_exynos_sz *sz = &config->sz;
351
352 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
353 __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
354
355 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
356 __func__, pos->x, pos->y, pos->w, pos->h,
357 sz->hsize, sz->vsize, config->flip, config->degree);
358}
359
360static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
361{
362 struct exynos_drm_ippdrv *ippdrv;
363 struct drm_exynos_ipp_cmd_node *c_node;
364 u32 prop_id = property->prop_id;
365
366 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
367
368 ippdrv = ipp_find_drv_by_handle(prop_id);
369 if (IS_ERR_OR_NULL(ippdrv)) {
370 DRM_ERROR("failed to get ipp driver.\n");
371 return -EINVAL;
372 }
373
374 /*
375 * Find command node using command list in ippdrv.
376 * when we find this command no using prop_id.
377 * return property information set in this command node.
378 */
379 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
380 if ((c_node->property.prop_id == prop_id) &&
381 (c_node->state == IPP_STATE_STOP)) {
382 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
383 __func__, property->cmd, (int)ippdrv);
384
385 c_node->property = *property;
386 return 0;
387 }
388 }
389
390 DRM_ERROR("failed to search property.\n");
391
392 return -EINVAL;
393}
394
395static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
396{
397 struct drm_exynos_ipp_cmd_work *cmd_work;
398
399 DRM_DEBUG_KMS("%s\n", __func__);
400
401 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
402 if (!cmd_work) {
403 DRM_ERROR("failed to alloc cmd_work.\n");
404 return ERR_PTR(-ENOMEM);
405 }
406
407 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
408
409 return cmd_work;
410}
411
412static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
413{
414 struct drm_exynos_ipp_event_work *event_work;
415
416 DRM_DEBUG_KMS("%s\n", __func__);
417
418 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
419 if (!event_work) {
420 DRM_ERROR("failed to alloc event_work.\n");
421 return ERR_PTR(-ENOMEM);
422 }
423
424 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
425
426 return event_work;
427}
428
429int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
430 struct drm_file *file)
431{
432 struct drm_exynos_file_private *file_priv = file->driver_priv;
433 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
434 struct device *dev = priv->dev;
435 struct ipp_context *ctx = get_ipp_context(dev);
436 struct drm_exynos_ipp_property *property = data;
437 struct exynos_drm_ippdrv *ippdrv;
438 struct drm_exynos_ipp_cmd_node *c_node;
439 int ret, i;
440
441 DRM_DEBUG_KMS("%s\n", __func__);
442
443 if (!ctx) {
444 DRM_ERROR("invalid context.\n");
445 return -EINVAL;
446 }
447
448 if (!property) {
449 DRM_ERROR("invalid property parameter.\n");
450 return -EINVAL;
451 }
452
453 /*
454 * This is log print for user application property.
455 * user application set various property.
456 */
457 for_each_ipp_ops(i)
458 ipp_print_property(property, i);
459
460 /*
461 * set property ioctl generated new prop_id.
462 * but in this case already asigned prop_id using old set property.
463 * e.g PAUSE state. this case supports find current prop_id and use it
464 * instead of allocation.
465 */
466 if (property->prop_id) {
467 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
468 return ipp_find_and_set_property(property);
469 }
470
471 /* find ipp driver using ipp id */
472 ippdrv = ipp_find_driver(ctx, property);
473 if (IS_ERR_OR_NULL(ippdrv)) {
474 DRM_ERROR("failed to get ipp driver.\n");
475 return -EINVAL;
476 }
477
478 /* allocate command node */
479 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
480 if (!c_node) {
481 DRM_ERROR("failed to allocate map node.\n");
482 return -ENOMEM;
483 }
484
485 /* create property id */
486 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
487 &property->prop_id);
488 if (ret) {
489 DRM_ERROR("failed to create id.\n");
490 goto err_clear;
491 }
492
493 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
494 __func__, property->prop_id, property->cmd, (int)ippdrv);
495
496 /* stored property information and ippdrv in private data */
497 c_node->priv = priv;
498 c_node->property = *property;
499 c_node->state = IPP_STATE_IDLE;
500
501 c_node->start_work = ipp_create_cmd_work();
502 if (IS_ERR_OR_NULL(c_node->start_work)) {
503 DRM_ERROR("failed to create start work.\n");
504 goto err_clear;
505 }
506
507 c_node->stop_work = ipp_create_cmd_work();
508 if (IS_ERR_OR_NULL(c_node->stop_work)) {
509 DRM_ERROR("failed to create stop work.\n");
510 goto err_free_start;
511 }
512
513 c_node->event_work = ipp_create_event_work();
514 if (IS_ERR_OR_NULL(c_node->event_work)) {
515 DRM_ERROR("failed to create event work.\n");
516 goto err_free_stop;
517 }
518
519 mutex_init(&c_node->cmd_lock);
520 mutex_init(&c_node->mem_lock);
521 mutex_init(&c_node->event_lock);
522
523 init_completion(&c_node->start_complete);
524 init_completion(&c_node->stop_complete);
525
526 for_each_ipp_ops(i)
527 INIT_LIST_HEAD(&c_node->mem_list[i]);
528
529 INIT_LIST_HEAD(&c_node->event_list);
530 list_splice_init(&priv->event_list, &c_node->event_list);
531 list_add_tail(&c_node->list, &ippdrv->cmd_list);
532
533 /* make dedicated state without m2m */
534 if (!ipp_is_m2m_cmd(property->cmd))
535 ippdrv->dedicated = true;
536
537 return 0;
538
539err_free_stop:
540 kfree(c_node->stop_work);
541err_free_start:
542 kfree(c_node->start_work);
543err_clear:
544 kfree(c_node);
545 return ret;
546}
547
548static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
549{
550 DRM_DEBUG_KMS("%s\n", __func__);
551
552 /* delete list */
553 list_del(&c_node->list);
554
555 /* destroy mutex */
556 mutex_destroy(&c_node->cmd_lock);
557 mutex_destroy(&c_node->mem_lock);
558 mutex_destroy(&c_node->event_lock);
559
560 /* free command node */
561 kfree(c_node->start_work);
562 kfree(c_node->stop_work);
563 kfree(c_node->event_work);
564 kfree(c_node);
565}
566
567static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
568{
569 struct drm_exynos_ipp_property *property = &c_node->property;
570 struct drm_exynos_ipp_mem_node *m_node;
571 struct list_head *head;
572 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
573
574 DRM_DEBUG_KMS("%s\n", __func__);
575
576 mutex_lock(&c_node->mem_lock);
577
578 for_each_ipp_ops(i) {
579 /* source/destination memory list */
580 head = &c_node->mem_list[i];
581
582 if (list_empty(head)) {
583 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
584 i ? "dst" : "src");
585 continue;
586 }
587
588 /* find memory node entry */
589 list_for_each_entry(m_node, head, list) {
590 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
591 i ? "dst" : "src", count[i], (int)m_node);
592 count[i]++;
593 }
594 }
595
596 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
597 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
598 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
599
600 /*
601 * M2M operations should be need paired memory address.
602 * so, need to check minimum count about src, dst.
603 * other case not use paired memory, so use maximum count
604 */
605 if (ipp_is_m2m_cmd(property->cmd))
606 ret = min(count[EXYNOS_DRM_OPS_SRC],
607 count[EXYNOS_DRM_OPS_DST]);
608 else
609 ret = max(count[EXYNOS_DRM_OPS_SRC],
610 count[EXYNOS_DRM_OPS_DST]);
611
612 mutex_unlock(&c_node->mem_lock);
613
614 return ret;
615}
616
617static struct drm_exynos_ipp_mem_node
618 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
619 struct drm_exynos_ipp_queue_buf *qbuf)
620{
621 struct drm_exynos_ipp_mem_node *m_node;
622 struct list_head *head;
623 int count = 0;
624
625 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
626
627 /* source/destination memory list */
628 head = &c_node->mem_list[qbuf->ops_id];
629
630 /* find memory node from memory list */
631 list_for_each_entry(m_node, head, list) {
632 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
633 __func__, count++, (int)m_node);
634
635 /* compare buffer id */
636 if (m_node->buf_id == qbuf->buf_id)
637 return m_node;
638 }
639
640 return NULL;
641}
642
643static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
644 struct drm_exynos_ipp_cmd_node *c_node,
645 struct drm_exynos_ipp_mem_node *m_node)
646{
647 struct exynos_drm_ipp_ops *ops = NULL;
648 int ret = 0;
649
650 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
651
652 if (!m_node) {
653 DRM_ERROR("invalid queue node.\n");
654 return -EFAULT;
655 }
656
657 mutex_lock(&c_node->mem_lock);
658
659 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
660
661 /* get operations callback */
662 ops = ippdrv->ops[m_node->ops_id];
663 if (!ops) {
664 DRM_ERROR("not support ops.\n");
665 ret = -EFAULT;
666 goto err_unlock;
667 }
668
669 /* set address and enable irq */
670 if (ops->set_addr) {
671 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
672 m_node->buf_id, IPP_BUF_ENQUEUE);
673 if (ret) {
674 DRM_ERROR("failed to set addr.\n");
675 goto err_unlock;
676 }
677 }
678
679err_unlock:
680 mutex_unlock(&c_node->mem_lock);
681 return ret;
682}
683
684static struct drm_exynos_ipp_mem_node
685 *ipp_get_mem_node(struct drm_device *drm_dev,
686 struct drm_file *file,
687 struct drm_exynos_ipp_cmd_node *c_node,
688 struct drm_exynos_ipp_queue_buf *qbuf)
689{
690 struct drm_exynos_ipp_mem_node *m_node;
691 struct drm_exynos_ipp_buf_info buf_info;
692 void *addr;
693 int i;
694
695 DRM_DEBUG_KMS("%s\n", __func__);
696
697 mutex_lock(&c_node->mem_lock);
698
699 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
700 if (!m_node) {
701 DRM_ERROR("failed to allocate queue node.\n");
702 goto err_unlock;
703 }
704
705 /* clear base address for error handling */
706 memset(&buf_info, 0x0, sizeof(buf_info));
707
708 /* operations, buffer id */
709 m_node->ops_id = qbuf->ops_id;
710 m_node->prop_id = qbuf->prop_id;
711 m_node->buf_id = qbuf->buf_id;
712
713 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
714 (int)m_node, qbuf->ops_id);
715 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
716 qbuf->prop_id, m_node->buf_id);
717
718 for_each_ipp_planar(i) {
719 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
720 i, qbuf->handle[i]);
721
722 /* get dma address by handle */
723 if (qbuf->handle[i]) {
724 addr = exynos_drm_gem_get_dma_addr(drm_dev,
725 qbuf->handle[i], file);
726 if (IS_ERR(addr)) {
727 DRM_ERROR("failed to get addr.\n");
728 goto err_clear;
729 }
730
731 buf_info.handles[i] = qbuf->handle[i];
732 buf_info.base[i] = *(dma_addr_t *) addr;
733 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
734 __func__, i, buf_info.base[i],
735 (int)buf_info.handles[i]);
736 }
737 }
738
739 m_node->filp = file;
740 m_node->buf_info = buf_info;
741 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
742
743 mutex_unlock(&c_node->mem_lock);
744 return m_node;
745
746err_clear:
747 kfree(m_node);
748err_unlock:
749 mutex_unlock(&c_node->mem_lock);
750 return ERR_PTR(-EFAULT);
751}
752
753static int ipp_put_mem_node(struct drm_device *drm_dev,
754 struct drm_exynos_ipp_cmd_node *c_node,
755 struct drm_exynos_ipp_mem_node *m_node)
756{
757 int i;
758
759 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
760
761 if (!m_node) {
762 DRM_ERROR("invalid dequeue node.\n");
763 return -EFAULT;
764 }
765
766 if (list_empty(&m_node->list)) {
767 DRM_ERROR("empty memory node.\n");
768 return -ENOMEM;
769 }
770
771 mutex_lock(&c_node->mem_lock);
772
773 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
774
775 /* put gem buffer */
776 for_each_ipp_planar(i) {
777 unsigned long handle = m_node->buf_info.handles[i];
778 if (handle)
779 exynos_drm_gem_put_dma_addr(drm_dev, handle,
780 m_node->filp);
781 }
782
783 /* delete list in queue */
784 list_del(&m_node->list);
785 kfree(m_node);
786
787 mutex_unlock(&c_node->mem_lock);
788
789 return 0;
790}
791
792static void ipp_free_event(struct drm_pending_event *event)
793{
794 kfree(event);
795}
796
797static int ipp_get_event(struct drm_device *drm_dev,
798 struct drm_file *file,
799 struct drm_exynos_ipp_cmd_node *c_node,
800 struct drm_exynos_ipp_queue_buf *qbuf)
801{
802 struct drm_exynos_ipp_send_event *e;
803 unsigned long flags;
804
805 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
806 qbuf->ops_id, qbuf->buf_id);
807
808 e = kzalloc(sizeof(*e), GFP_KERNEL);
809
810 if (!e) {
811 DRM_ERROR("failed to allocate event.\n");
812 spin_lock_irqsave(&drm_dev->event_lock, flags);
813 file->event_space += sizeof(e->event);
814 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
815 return -ENOMEM;
816 }
817
818 /* make event */
819 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
820 e->event.base.length = sizeof(e->event);
821 e->event.user_data = qbuf->user_data;
822 e->event.prop_id = qbuf->prop_id;
823 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
824 e->base.event = &e->event.base;
825 e->base.file_priv = file;
826 e->base.destroy = ipp_free_event;
827 list_add_tail(&e->base.link, &c_node->event_list);
828
829 return 0;
830}
831
832static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
833 struct drm_exynos_ipp_queue_buf *qbuf)
834{
835 struct drm_exynos_ipp_send_event *e, *te;
836 int count = 0;
837
838 DRM_DEBUG_KMS("%s\n", __func__);
839
840 if (list_empty(&c_node->event_list)) {
841 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
842 return;
843 }
844
845 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
846 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
847 __func__, count++, (int)e);
848
849 /*
850 * quf == NULL condition means all event deletion.
851 * stop operations want to delete all event list.
852 * another case delete only same buf id.
853 */
854 if (!qbuf) {
855 /* delete list */
856 list_del(&e->base.link);
857 kfree(e);
858 }
859
860 /* compare buffer id */
861 if (qbuf && (qbuf->buf_id ==
862 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
863 /* delete list */
864 list_del(&e->base.link);
865 kfree(e);
866 return;
867 }
868 }
869}
870
871void ipp_handle_cmd_work(struct device *dev,
872 struct exynos_drm_ippdrv *ippdrv,
873 struct drm_exynos_ipp_cmd_work *cmd_work,
874 struct drm_exynos_ipp_cmd_node *c_node)
875{
876 struct ipp_context *ctx = get_ipp_context(dev);
877
878 cmd_work->ippdrv = ippdrv;
879 cmd_work->c_node = c_node;
880 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
881}
882
883static int ipp_queue_buf_with_run(struct device *dev,
884 struct drm_exynos_ipp_cmd_node *c_node,
885 struct drm_exynos_ipp_mem_node *m_node,
886 struct drm_exynos_ipp_queue_buf *qbuf)
887{
888 struct exynos_drm_ippdrv *ippdrv;
889 struct drm_exynos_ipp_property *property;
890 struct exynos_drm_ipp_ops *ops;
891 int ret;
892
893 DRM_DEBUG_KMS("%s\n", __func__);
894
895 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
896 if (IS_ERR_OR_NULL(ippdrv)) {
897 DRM_ERROR("failed to get ipp driver.\n");
898 return -EFAULT;
899 }
900
901 ops = ippdrv->ops[qbuf->ops_id];
902 if (!ops) {
903 DRM_ERROR("failed to get ops.\n");
904 return -EFAULT;
905 }
906
907 property = &c_node->property;
908
909 if (c_node->state != IPP_STATE_START) {
910 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
911 return 0;
912 }
913
914 if (!ipp_check_mem_list(c_node)) {
915 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
916 return 0;
917 }
918
919 /*
920 * If set destination buffer and enabled clock,
921 * then m2m operations need start operations at queue_buf
922 */
923 if (ipp_is_m2m_cmd(property->cmd)) {
924 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
925
926 cmd_work->ctrl = IPP_CTRL_PLAY;
927 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
928 } else {
929 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
930 if (ret) {
931 DRM_ERROR("failed to set m node.\n");
932 return ret;
933 }
934 }
935
936 return 0;
937}
938
939static void ipp_clean_queue_buf(struct drm_device *drm_dev,
940 struct drm_exynos_ipp_cmd_node *c_node,
941 struct drm_exynos_ipp_queue_buf *qbuf)
942{
943 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
944
945 DRM_DEBUG_KMS("%s\n", __func__);
946
947 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
948 /* delete list */
949 list_for_each_entry_safe(m_node, tm_node,
950 &c_node->mem_list[qbuf->ops_id], list) {
951 if (m_node->buf_id == qbuf->buf_id &&
952 m_node->ops_id == qbuf->ops_id)
953 ipp_put_mem_node(drm_dev, c_node, m_node);
954 }
955 }
956}
957
958int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
959 struct drm_file *file)
960{
961 struct drm_exynos_file_private *file_priv = file->driver_priv;
962 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
963 struct device *dev = priv->dev;
964 struct ipp_context *ctx = get_ipp_context(dev);
965 struct drm_exynos_ipp_queue_buf *qbuf = data;
966 struct drm_exynos_ipp_cmd_node *c_node;
967 struct drm_exynos_ipp_mem_node *m_node;
968 int ret;
969
970 DRM_DEBUG_KMS("%s\n", __func__);
971
972 if (!qbuf) {
973 DRM_ERROR("invalid buf parameter.\n");
974 return -EINVAL;
975 }
976
977 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
978 DRM_ERROR("invalid ops parameter.\n");
979 return -EINVAL;
980 }
981
982 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
983 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
984 qbuf->buf_id, qbuf->buf_type);
985
986 /* find command node */
987 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
988 qbuf->prop_id);
989 if (!c_node) {
990 DRM_ERROR("failed to get command node.\n");
991 return -EFAULT;
992 }
993
994 /* buffer control */
995 switch (qbuf->buf_type) {
996 case IPP_BUF_ENQUEUE:
997 /* get memory node */
998 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
999 if (IS_ERR(m_node)) {
1000 DRM_ERROR("failed to get m_node.\n");
1001 return PTR_ERR(m_node);
1002 }
1003
1004 /*
1005 * first step get event for destination buffer.
1006 * and second step when M2M case run with destination buffer
1007 * if needed.
1008 */
1009 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
1010 /* get event for destination buffer */
1011 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
1012 if (ret) {
1013 DRM_ERROR("failed to get event.\n");
1014 goto err_clean_node;
1015 }
1016
1017 /*
1018 * M2M case run play control for streaming feature.
1019 * other case set address and waiting.
1020 */
1021 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1022 if (ret) {
1023 DRM_ERROR("failed to run command.\n");
1024 goto err_clean_node;
1025 }
1026 }
1027 break;
1028 case IPP_BUF_DEQUEUE:
1029 mutex_lock(&c_node->cmd_lock);
1030
1031 /* put event for destination buffer */
1032 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1033 ipp_put_event(c_node, qbuf);
1034
1035 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1036
1037 mutex_unlock(&c_node->cmd_lock);
1038 break;
1039 default:
1040 DRM_ERROR("invalid buffer control.\n");
1041 return -EINVAL;
1042 }
1043
1044 return 0;
1045
1046err_clean_node:
1047 DRM_ERROR("clean memory nodes.\n");
1048
1049 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1050 return ret;
1051}
1052
1053static bool exynos_drm_ipp_check_valid(struct device *dev,
1054 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1055{
1056 DRM_DEBUG_KMS("%s\n", __func__);
1057
1058 if (ctrl != IPP_CTRL_PLAY) {
1059 if (pm_runtime_suspended(dev)) {
1060 DRM_ERROR("pm:runtime_suspended.\n");
1061 goto err_status;
1062 }
1063 }
1064
1065 switch (ctrl) {
1066 case IPP_CTRL_PLAY:
1067 if (state != IPP_STATE_IDLE)
1068 goto err_status;
1069 break;
1070 case IPP_CTRL_STOP:
1071 if (state == IPP_STATE_STOP)
1072 goto err_status;
1073 break;
1074 case IPP_CTRL_PAUSE:
1075 if (state != IPP_STATE_START)
1076 goto err_status;
1077 break;
1078 case IPP_CTRL_RESUME:
1079 if (state != IPP_STATE_STOP)
1080 goto err_status;
1081 break;
1082 default:
1083 DRM_ERROR("invalid state.\n");
1084 goto err_status;
1085 break;
1086 }
1087
1088 return true;
1089
1090err_status:
1091 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1092 return false;
1093}
1094
1095int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1096 struct drm_file *file)
1097{
1098 struct drm_exynos_file_private *file_priv = file->driver_priv;
1099 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1100 struct exynos_drm_ippdrv *ippdrv = NULL;
1101 struct device *dev = priv->dev;
1102 struct ipp_context *ctx = get_ipp_context(dev);
1103 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1104 struct drm_exynos_ipp_cmd_work *cmd_work;
1105 struct drm_exynos_ipp_cmd_node *c_node;
1106
1107 DRM_DEBUG_KMS("%s\n", __func__);
1108
1109 if (!ctx) {
1110 DRM_ERROR("invalid context.\n");
1111 return -EINVAL;
1112 }
1113
1114 if (!cmd_ctrl) {
1115 DRM_ERROR("invalid control parameter.\n");
1116 return -EINVAL;
1117 }
1118
1119 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1120 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1121
1122 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1123 if (IS_ERR(ippdrv)) {
1124 DRM_ERROR("failed to get ipp driver.\n");
1125 return PTR_ERR(ippdrv);
1126 }
1127
1128 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1129 cmd_ctrl->prop_id);
1130 if (!c_node) {
1131 DRM_ERROR("invalid command node list.\n");
1132 return -EINVAL;
1133 }
1134
1135 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1136 c_node->state)) {
1137 DRM_ERROR("invalid state.\n");
1138 return -EINVAL;
1139 }
1140
1141 switch (cmd_ctrl->ctrl) {
1142 case IPP_CTRL_PLAY:
1143 if (pm_runtime_suspended(ippdrv->dev))
1144 pm_runtime_get_sync(ippdrv->dev);
1145 c_node->state = IPP_STATE_START;
1146
1147 cmd_work = c_node->start_work;
1148 cmd_work->ctrl = cmd_ctrl->ctrl;
1149 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1150 c_node->state = IPP_STATE_START;
1151 break;
1152 case IPP_CTRL_STOP:
1153 cmd_work = c_node->stop_work;
1154 cmd_work->ctrl = cmd_ctrl->ctrl;
1155 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1156
1157 if (!wait_for_completion_timeout(&c_node->stop_complete,
1158 msecs_to_jiffies(300))) {
1159 DRM_ERROR("timeout stop:prop_id[%d]\n",
1160 c_node->property.prop_id);
1161 }
1162
1163 c_node->state = IPP_STATE_STOP;
1164 ippdrv->dedicated = false;
1165 ipp_clean_cmd_node(c_node);
1166
1167 if (list_empty(&ippdrv->cmd_list))
1168 pm_runtime_put_sync(ippdrv->dev);
1169 break;
1170 case IPP_CTRL_PAUSE:
1171 cmd_work = c_node->stop_work;
1172 cmd_work->ctrl = cmd_ctrl->ctrl;
1173 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1174
1175 if (!wait_for_completion_timeout(&c_node->stop_complete,
1176 msecs_to_jiffies(200))) {
1177 DRM_ERROR("timeout stop:prop_id[%d]\n",
1178 c_node->property.prop_id);
1179 }
1180
1181 c_node->state = IPP_STATE_STOP;
1182 break;
1183 case IPP_CTRL_RESUME:
1184 c_node->state = IPP_STATE_START;
1185 cmd_work = c_node->start_work;
1186 cmd_work->ctrl = cmd_ctrl->ctrl;
1187 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1188 break;
1189 default:
1190 DRM_ERROR("could not support this state currently.\n");
1191 return -EINVAL;
1192 }
1193
1194 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1195 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1196
1197 return 0;
1198}
1199
1200int exynos_drm_ippnb_register(struct notifier_block *nb)
1201{
1202 return blocking_notifier_chain_register(
1203 &exynos_drm_ippnb_list, nb);
1204}
1205
1206int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1207{
1208 return blocking_notifier_chain_unregister(
1209 &exynos_drm_ippnb_list, nb);
1210}
1211
1212int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1213{
1214 return blocking_notifier_call_chain(
1215 &exynos_drm_ippnb_list, val, v);
1216}
1217
1218static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1219 struct drm_exynos_ipp_property *property)
1220{
1221 struct exynos_drm_ipp_ops *ops = NULL;
1222 bool swap = false;
1223 int ret, i;
1224
1225 if (!property) {
1226 DRM_ERROR("invalid property parameter.\n");
1227 return -EINVAL;
1228 }
1229
1230 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1231
1232 /* reset h/w block */
1233 if (ippdrv->reset &&
1234 ippdrv->reset(ippdrv->dev)) {
1235 DRM_ERROR("failed to reset.\n");
1236 return -EINVAL;
1237 }
1238
1239 /* set source,destination operations */
1240 for_each_ipp_ops(i) {
1241 struct drm_exynos_ipp_config *config =
1242 &property->config[i];
1243
1244 ops = ippdrv->ops[i];
1245 if (!ops || !config) {
1246 DRM_ERROR("not support ops and config.\n");
1247 return -EINVAL;
1248 }
1249
1250 /* set format */
1251 if (ops->set_fmt) {
1252 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1253 if (ret) {
1254 DRM_ERROR("not support format.\n");
1255 return ret;
1256 }
1257 }
1258
1259 /* set transform for rotation, flip */
1260 if (ops->set_transf) {
1261 ret = ops->set_transf(ippdrv->dev, config->degree,
1262 config->flip, &swap);
1263 if (ret) {
1264 DRM_ERROR("not support tranf.\n");
1265 return -EINVAL;
1266 }
1267 }
1268
1269 /* set size */
1270 if (ops->set_size) {
1271 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1272 &config->sz);
1273 if (ret) {
1274 DRM_ERROR("not support size.\n");
1275 return ret;
1276 }
1277 }
1278 }
1279
1280 return 0;
1281}
1282
1283static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1284 struct drm_exynos_ipp_cmd_node *c_node)
1285{
1286 struct drm_exynos_ipp_mem_node *m_node;
1287 struct drm_exynos_ipp_property *property = &c_node->property;
1288 struct list_head *head;
1289 int ret, i;
1290
1291 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1292
1293 /* store command info in ippdrv */
1294 ippdrv->cmd = c_node;
1295
1296 if (!ipp_check_mem_list(c_node)) {
1297 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1298 return -ENOMEM;
1299 }
1300
1301 /* set current property in ippdrv */
1302 ret = ipp_set_property(ippdrv, property);
1303 if (ret) {
1304 DRM_ERROR("failed to set property.\n");
1305 ippdrv->cmd = NULL;
1306 return ret;
1307 }
1308
1309 /* check command */
1310 switch (property->cmd) {
1311 case IPP_CMD_M2M:
1312 for_each_ipp_ops(i) {
1313 /* source/destination memory list */
1314 head = &c_node->mem_list[i];
1315
1316 m_node = list_first_entry(head,
1317 struct drm_exynos_ipp_mem_node, list);
1318 if (!m_node) {
1319 DRM_ERROR("failed to get node.\n");
1320 ret = -EFAULT;
1321 return ret;
1322 }
1323
1324 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1325 __func__, (int)m_node);
1326
1327 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1328 if (ret) {
1329 DRM_ERROR("failed to set m node.\n");
1330 return ret;
1331 }
1332 }
1333 break;
1334 case IPP_CMD_WB:
1335 /* destination memory list */
1336 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1337
1338 list_for_each_entry(m_node, head, list) {
1339 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1340 if (ret) {
1341 DRM_ERROR("failed to set m node.\n");
1342 return ret;
1343 }
1344 }
1345 break;
1346 case IPP_CMD_OUTPUT:
1347 /* source memory list */
1348 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1349
1350 list_for_each_entry(m_node, head, list) {
1351 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1352 if (ret) {
1353 DRM_ERROR("failed to set m node.\n");
1354 return ret;
1355 }
1356 }
1357 break;
1358 default:
1359 DRM_ERROR("invalid operations.\n");
1360 return -EINVAL;
1361 }
1362
1363 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1364
1365 /* start operations */
1366 if (ippdrv->start) {
1367 ret = ippdrv->start(ippdrv->dev, property->cmd);
1368 if (ret) {
1369 DRM_ERROR("failed to start ops.\n");
1370 return ret;
1371 }
1372 }
1373
1374 return 0;
1375}
1376
1377static int ipp_stop_property(struct drm_device *drm_dev,
1378 struct exynos_drm_ippdrv *ippdrv,
1379 struct drm_exynos_ipp_cmd_node *c_node)
1380{
1381 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1382 struct drm_exynos_ipp_property *property = &c_node->property;
1383 struct list_head *head;
1384 int ret = 0, i;
1385
1386 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1387
1388 /* put event */
1389 ipp_put_event(c_node, NULL);
1390
1391 /* check command */
1392 switch (property->cmd) {
1393 case IPP_CMD_M2M:
1394 for_each_ipp_ops(i) {
1395 /* source/destination memory list */
1396 head = &c_node->mem_list[i];
1397
1398 if (list_empty(head)) {
1399 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1400 __func__);
1401 break;
1402 }
1403
1404 list_for_each_entry_safe(m_node, tm_node,
1405 head, list) {
1406 ret = ipp_put_mem_node(drm_dev, c_node,
1407 m_node);
1408 if (ret) {
1409 DRM_ERROR("failed to put m_node.\n");
1410 goto err_clear;
1411 }
1412 }
1413 }
1414 break;
1415 case IPP_CMD_WB:
1416 /* destination memory list */
1417 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1418
1419 if (list_empty(head)) {
1420 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1421 break;
1422 }
1423
1424 list_for_each_entry_safe(m_node, tm_node, head, list) {
1425 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1426 if (ret) {
1427 DRM_ERROR("failed to put m_node.\n");
1428 goto err_clear;
1429 }
1430 }
1431 break;
1432 case IPP_CMD_OUTPUT:
1433 /* source memory list */
1434 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1435
1436 if (list_empty(head)) {
1437 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1438 break;
1439 }
1440
1441 list_for_each_entry_safe(m_node, tm_node, head, list) {
1442 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1443 if (ret) {
1444 DRM_ERROR("failed to put m_node.\n");
1445 goto err_clear;
1446 }
1447 }
1448 break;
1449 default:
1450 DRM_ERROR("invalid operations.\n");
1451 ret = -EINVAL;
1452 goto err_clear;
1453 }
1454
1455err_clear:
1456 /* stop operations */
1457 if (ippdrv->stop)
1458 ippdrv->stop(ippdrv->dev, property->cmd);
1459
1460 return ret;
1461}
1462
1463void ipp_sched_cmd(struct work_struct *work)
1464{
1465 struct drm_exynos_ipp_cmd_work *cmd_work =
1466 (struct drm_exynos_ipp_cmd_work *)work;
1467 struct exynos_drm_ippdrv *ippdrv;
1468 struct drm_exynos_ipp_cmd_node *c_node;
1469 struct drm_exynos_ipp_property *property;
1470 int ret;
1471
1472 DRM_DEBUG_KMS("%s\n", __func__);
1473
1474 ippdrv = cmd_work->ippdrv;
1475 if (!ippdrv) {
1476 DRM_ERROR("invalid ippdrv list.\n");
1477 return;
1478 }
1479
1480 c_node = cmd_work->c_node;
1481 if (!c_node) {
1482 DRM_ERROR("invalid command node list.\n");
1483 return;
1484 }
1485
1486 mutex_lock(&c_node->cmd_lock);
1487
1488 property = &c_node->property;
1489 if (!property) {
1490 DRM_ERROR("failed to get property:prop_id[%d]\n",
1491 c_node->property.prop_id);
1492 goto err_unlock;
1493 }
1494
1495 switch (cmd_work->ctrl) {
1496 case IPP_CTRL_PLAY:
1497 case IPP_CTRL_RESUME:
1498 ret = ipp_start_property(ippdrv, c_node);
1499 if (ret) {
1500 DRM_ERROR("failed to start property:prop_id[%d]\n",
1501 c_node->property.prop_id);
1502 goto err_unlock;
1503 }
1504
1505 /*
1506 * M2M case supports wait_completion of transfer.
1507 * because M2M case supports single unit operation
1508 * with multiple queue.
1509 * M2M need to wait completion of data transfer.
1510 */
1511 if (ipp_is_m2m_cmd(property->cmd)) {
1512 if (!wait_for_completion_timeout
1513 (&c_node->start_complete, msecs_to_jiffies(200))) {
1514 DRM_ERROR("timeout event:prop_id[%d]\n",
1515 c_node->property.prop_id);
1516 goto err_unlock;
1517 }
1518 }
1519 break;
1520 case IPP_CTRL_STOP:
1521 case IPP_CTRL_PAUSE:
1522 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1523 c_node);
1524 if (ret) {
1525 DRM_ERROR("failed to stop property.\n");
1526 goto err_unlock;
1527 }
1528
1529 complete(&c_node->stop_complete);
1530 break;
1531 default:
1532 DRM_ERROR("unknown control type\n");
1533 break;
1534 }
1535
1536 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1537
1538err_unlock:
1539 mutex_unlock(&c_node->cmd_lock);
1540}
1541
1542static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1543 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1544{
1545 struct drm_device *drm_dev = ippdrv->drm_dev;
1546 struct drm_exynos_ipp_property *property = &c_node->property;
1547 struct drm_exynos_ipp_mem_node *m_node;
1548 struct drm_exynos_ipp_queue_buf qbuf;
1549 struct drm_exynos_ipp_send_event *e;
1550 struct list_head *head;
1551 struct timeval now;
1552 unsigned long flags;
1553 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1554 int ret, i;
1555
1556 for_each_ipp_ops(i)
1557 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1558 i ? "dst" : "src", buf_id[i]);
1559
1560 if (!drm_dev) {
1561 DRM_ERROR("failed to get drm_dev.\n");
1562 return -EINVAL;
1563 }
1564
1565 if (!property) {
1566 DRM_ERROR("failed to get property.\n");
1567 return -EINVAL;
1568 }
1569
1570 if (list_empty(&c_node->event_list)) {
1571 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1572 return 0;
1573 }
1574
1575 if (!ipp_check_mem_list(c_node)) {
1576 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1577 return 0;
1578 }
1579
1580 /* check command */
1581 switch (property->cmd) {
1582 case IPP_CMD_M2M:
1583 for_each_ipp_ops(i) {
1584 /* source/destination memory list */
1585 head = &c_node->mem_list[i];
1586
1587 m_node = list_first_entry(head,
1588 struct drm_exynos_ipp_mem_node, list);
1589 if (!m_node) {
1590 DRM_ERROR("empty memory node.\n");
1591 return -ENOMEM;
1592 }
1593
1594 tbuf_id[i] = m_node->buf_id;
1595 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1596 i ? "dst" : "src", tbuf_id[i]);
1597
1598 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1599 if (ret)
1600 DRM_ERROR("failed to put m_node.\n");
1601 }
1602 break;
1603 case IPP_CMD_WB:
1604 /* clear buf for finding */
1605 memset(&qbuf, 0x0, sizeof(qbuf));
1606 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1607 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1608
1609 /* get memory node entry */
1610 m_node = ipp_find_mem_node(c_node, &qbuf);
1611 if (!m_node) {
1612 DRM_ERROR("empty memory node.\n");
1613 return -ENOMEM;
1614 }
1615
1616 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1617
1618 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1619 if (ret)
1620 DRM_ERROR("failed to put m_node.\n");
1621 break;
1622 case IPP_CMD_OUTPUT:
1623 /* source memory list */
1624 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1625
1626 m_node = list_first_entry(head,
1627 struct drm_exynos_ipp_mem_node, list);
1628 if (!m_node) {
1629 DRM_ERROR("empty memory node.\n");
1630 return -ENOMEM;
1631 }
1632
1633 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1634
1635 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1636 if (ret)
1637 DRM_ERROR("failed to put m_node.\n");
1638 break;
1639 default:
1640 DRM_ERROR("invalid operations.\n");
1641 return -EINVAL;
1642 }
1643
1644 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1645 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1646 tbuf_id[1], buf_id[1], property->prop_id);
1647
1648 /*
1649 * command node have event list of destination buffer
1650 * If destination buffer enqueue to mem list,
1651 * then we make event and link to event list tail.
1652 * so, we get first event for first enqueued buffer.
1653 */
1654 e = list_first_entry(&c_node->event_list,
1655 struct drm_exynos_ipp_send_event, base.link);
1656
1657 if (!e) {
1658 DRM_ERROR("empty event.\n");
1659 return -EINVAL;
1660 }
1661
1662 do_gettimeofday(&now);
1663 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1664 , __func__, now.tv_sec, now.tv_usec);
1665 e->event.tv_sec = now.tv_sec;
1666 e->event.tv_usec = now.tv_usec;
1667 e->event.prop_id = property->prop_id;
1668
1669 /* set buffer id about source destination */
1670 for_each_ipp_ops(i)
1671 e->event.buf_id[i] = tbuf_id[i];
1672
1673 spin_lock_irqsave(&drm_dev->event_lock, flags);
1674 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1675 wake_up_interruptible(&e->base.file_priv->event_wait);
1676 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1677
1678 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1679 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1680
1681 return 0;
1682}
1683
1684void ipp_sched_event(struct work_struct *work)
1685{
1686 struct drm_exynos_ipp_event_work *event_work =
1687 (struct drm_exynos_ipp_event_work *)work;
1688 struct exynos_drm_ippdrv *ippdrv;
1689 struct drm_exynos_ipp_cmd_node *c_node;
1690 int ret;
1691
1692 if (!event_work) {
1693 DRM_ERROR("failed to get event_work.\n");
1694 return;
1695 }
1696
1697 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1698 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1699
1700 ippdrv = event_work->ippdrv;
1701 if (!ippdrv) {
1702 DRM_ERROR("failed to get ipp driver.\n");
1703 return;
1704 }
1705
1706 c_node = ippdrv->cmd;
1707 if (!c_node) {
1708 DRM_ERROR("failed to get command node.\n");
1709 return;
1710 }
1711
1712 /*
1713 * IPP supports command thread, event thread synchronization.
1714 * If IPP close immediately from user land, then IPP make
1715 * synchronization with command thread, so make complete event.
1716 * or going out operations.
1717 */
1718 if (c_node->state != IPP_STATE_START) {
1719 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1720 __func__, c_node->state, c_node->property.prop_id);
1721 goto err_completion;
1722 }
1723
1724 mutex_lock(&c_node->event_lock);
1725
1726 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1727 if (ret) {
1728 DRM_ERROR("failed to send event.\n");
1729 goto err_completion;
1730 }
1731
1732err_completion:
1733 if (ipp_is_m2m_cmd(c_node->property.cmd))
1734 complete(&c_node->start_complete);
1735
1736 mutex_unlock(&c_node->event_lock);
1737}
1738
1739static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1740{
1741 struct ipp_context *ctx = get_ipp_context(dev);
1742 struct exynos_drm_ippdrv *ippdrv;
1743 int ret, count = 0;
1744
1745 DRM_DEBUG_KMS("%s\n", __func__);
1746
1747 /* get ipp driver entry */
1748 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1749 ippdrv->drm_dev = drm_dev;
1750
1751 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1752 &ippdrv->ipp_id);
1753 if (ret) {
1754 DRM_ERROR("failed to create id.\n");
1755 goto err_idr;
1756 }
1757
1758 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1759 count++, (int)ippdrv, ippdrv->ipp_id);
1760
1761 if (ippdrv->ipp_id == 0) {
1762 DRM_ERROR("failed to get ipp_id[%d]\n",
1763 ippdrv->ipp_id);
1764 goto err_idr;
1765 }
1766
1767 /* store parent device for node */
1768 ippdrv->parent_dev = dev;
1769
1770 /* store event work queue and handler */
1771 ippdrv->event_workq = ctx->event_workq;
1772 ippdrv->sched_event = ipp_sched_event;
1773 INIT_LIST_HEAD(&ippdrv->cmd_list);
1774 }
1775
1776 return 0;
1777
1778err_idr:
1779 idr_remove_all(&ctx->ipp_idr);
1780 idr_remove_all(&ctx->prop_idr);
1781 idr_destroy(&ctx->ipp_idr);
1782 idr_destroy(&ctx->prop_idr);
1783 return ret;
1784}
1785
1786static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1787{
1788 struct exynos_drm_ippdrv *ippdrv;
1789
1790 DRM_DEBUG_KMS("%s\n", __func__);
1791
1792 /* get ipp driver entry */
1793 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1794 ippdrv->drm_dev = NULL;
1795 exynos_drm_ippdrv_unregister(ippdrv);
1796 }
1797}
1798
1799static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1800 struct drm_file *file)
1801{
1802 struct drm_exynos_file_private *file_priv = file->driver_priv;
1803 struct exynos_drm_ipp_private *priv;
1804
1805 DRM_DEBUG_KMS("%s\n", __func__);
1806
1807 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1808 if (!priv) {
1809 DRM_ERROR("failed to allocate priv.\n");
1810 return -ENOMEM;
1811 }
1812 priv->dev = dev;
1813 file_priv->ipp_priv = priv;
1814
1815 INIT_LIST_HEAD(&priv->event_list);
1816
1817 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1818
1819 return 0;
1820}
1821
1822static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1823 struct drm_file *file)
1824{
1825 struct drm_exynos_file_private *file_priv = file->driver_priv;
1826 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1827 struct exynos_drm_ippdrv *ippdrv = NULL;
1828 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1829 int count = 0;
1830
1831 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1832
1833 if (list_empty(&exynos_drm_ippdrv_list)) {
1834 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1835 goto err_clear;
1836 }
1837
1838 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1839 if (list_empty(&ippdrv->cmd_list))
1840 continue;
1841
1842 list_for_each_entry_safe(c_node, tc_node,
1843 &ippdrv->cmd_list, list) {
1844 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1845 __func__, count++, (int)ippdrv);
1846
1847 if (c_node->priv == priv) {
1848 /*
1849 * userland goto unnormal state. process killed.
1850 * and close the file.
1851 * so, IPP didn't called stop cmd ctrl.
1852 * so, we are make stop operation in this state.
1853 */
1854 if (c_node->state == IPP_STATE_START) {
1855 ipp_stop_property(drm_dev, ippdrv,
1856 c_node);
1857 c_node->state = IPP_STATE_STOP;
1858 }
1859
1860 ippdrv->dedicated = false;
1861 ipp_clean_cmd_node(c_node);
1862 if (list_empty(&ippdrv->cmd_list))
1863 pm_runtime_put_sync(ippdrv->dev);
1864 }
1865 }
1866 }
1867
1868err_clear:
1869 kfree(priv);
1870 return;
1871}
1872
1873static int __devinit ipp_probe(struct platform_device *pdev)
1874{
1875 struct device *dev = &pdev->dev;
1876 struct ipp_context *ctx;
1877 struct exynos_drm_subdrv *subdrv;
1878 int ret;
1879
1880 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1881 if (!ctx)
1882 return -ENOMEM;
1883
1884 DRM_DEBUG_KMS("%s\n", __func__);
1885
1886 mutex_init(&ctx->ipp_lock);
1887 mutex_init(&ctx->prop_lock);
1888
1889 idr_init(&ctx->ipp_idr);
1890 idr_init(&ctx->prop_idr);
1891
1892 /*
1893 * create single thread for ipp event
1894 * IPP supports event thread for IPP drivers.
1895 * IPP driver send event_work to this thread.
1896 * and IPP event thread send event to user process.
1897 */
1898 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1899 if (!ctx->event_workq) {
1900 dev_err(dev, "failed to create event workqueue\n");
1901 ret = -EINVAL;
1902 goto err_clear;
1903 }
1904
1905 /*
1906 * create single thread for ipp command
1907 * IPP supports command thread for user process.
1908 * user process make command node using set property ioctl.
1909 * and make start_work and send this work to command thread.
1910 * and then this command thread start property.
1911 */
1912 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1913 if (!ctx->cmd_workq) {
1914 dev_err(dev, "failed to create cmd workqueue\n");
1915 ret = -EINVAL;
1916 goto err_event_workq;
1917 }
1918
1919 /* set sub driver informations */
1920 subdrv = &ctx->subdrv;
1921 subdrv->dev = dev;
1922 subdrv->probe = ipp_subdrv_probe;
1923 subdrv->remove = ipp_subdrv_remove;
1924 subdrv->open = ipp_subdrv_open;
1925 subdrv->close = ipp_subdrv_close;
1926
1927 platform_set_drvdata(pdev, ctx);
1928
1929 ret = exynos_drm_subdrv_register(subdrv);
1930 if (ret < 0) {
1931 DRM_ERROR("failed to register drm ipp device.\n");
1932 goto err_cmd_workq;
1933 }
1934
1935 dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1936
1937 return 0;
1938
1939err_cmd_workq:
1940 destroy_workqueue(ctx->cmd_workq);
1941err_event_workq:
1942 destroy_workqueue(ctx->event_workq);
1943err_clear:
1944 kfree(ctx);
1945 return ret;
1946}
1947
1948static int __devexit ipp_remove(struct platform_device *pdev)
1949{
1950 struct ipp_context *ctx = platform_get_drvdata(pdev);
1951
1952 DRM_DEBUG_KMS("%s\n", __func__);
1953
1954 /* unregister sub driver */
1955 exynos_drm_subdrv_unregister(&ctx->subdrv);
1956
1957 /* remove,destroy ipp idr */
1958 idr_remove_all(&ctx->ipp_idr);
1959 idr_remove_all(&ctx->prop_idr);
1960 idr_destroy(&ctx->ipp_idr);
1961 idr_destroy(&ctx->prop_idr);
1962
1963 mutex_destroy(&ctx->ipp_lock);
1964 mutex_destroy(&ctx->prop_lock);
1965
1966 /* destroy command, event work queue */
1967 destroy_workqueue(ctx->cmd_workq);
1968 destroy_workqueue(ctx->event_workq);
1969
1970 kfree(ctx);
1971
1972 return 0;
1973}
1974
1975static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1976{
1977 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1978
1979 return 0;
1980}
1981
1982#ifdef CONFIG_PM_SLEEP
1983static int ipp_suspend(struct device *dev)
1984{
1985 struct ipp_context *ctx = get_ipp_context(dev);
1986
1987 DRM_DEBUG_KMS("%s\n", __func__);
1988
1989 if (pm_runtime_suspended(dev))
1990 return 0;
1991
1992 return ipp_power_ctrl(ctx, false);
1993}
1994
1995static int ipp_resume(struct device *dev)
1996{
1997 struct ipp_context *ctx = get_ipp_context(dev);
1998
1999 DRM_DEBUG_KMS("%s\n", __func__);
2000
2001 if (!pm_runtime_suspended(dev))
2002 return ipp_power_ctrl(ctx, true);
2003
2004 return 0;
2005}
2006#endif
2007
2008#ifdef CONFIG_PM_RUNTIME
2009static int ipp_runtime_suspend(struct device *dev)
2010{
2011 struct ipp_context *ctx = get_ipp_context(dev);
2012
2013 DRM_DEBUG_KMS("%s\n", __func__);
2014
2015 return ipp_power_ctrl(ctx, false);
2016}
2017
2018static int ipp_runtime_resume(struct device *dev)
2019{
2020 struct ipp_context *ctx = get_ipp_context(dev);
2021
2022 DRM_DEBUG_KMS("%s\n", __func__);
2023
2024 return ipp_power_ctrl(ctx, true);
2025}
2026#endif
2027
2028static const struct dev_pm_ops ipp_pm_ops = {
2029 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
2030 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
2031};
2032
2033struct platform_driver ipp_driver = {
2034 .probe = ipp_probe,
2035 .remove = __devexit_p(ipp_remove),
2036 .driver = {
2037 .name = "exynos-drm-ipp",
2038 .owner = THIS_MODULE,
2039 .pm = &ipp_pm_ops,
2040 },
2041};
2042
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..28ffac95386c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,266 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef _EXYNOS_DRM_IPP_H_
30#define _EXYNOS_DRM_IPP_H_
31
32#define for_each_ipp_ops(pos) \
33 for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
34#define for_each_ipp_planar(pos) \
35 for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
36
37#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
38#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
39#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
40
41/* definition of state */
42enum drm_exynos_ipp_state {
43 IPP_STATE_IDLE,
44 IPP_STATE_START,
45 IPP_STATE_STOP,
46};
47
48/*
49 * A structure of command work information.
50 * @work: work structure.
51 * @ippdrv: current work ippdrv.
52 * @c_node: command node information.
53 * @ctrl: command control.
54 */
55struct drm_exynos_ipp_cmd_work {
56 struct work_struct work;
57 struct exynos_drm_ippdrv *ippdrv;
58 struct drm_exynos_ipp_cmd_node *c_node;
59 enum drm_exynos_ipp_ctrl ctrl;
60};
61
62/*
63 * A structure of command node.
64 *
65 * @priv: IPP private infomation.
66 * @list: list head to command queue information.
67 * @event_list: list head of event.
68 * @mem_list: list head to source,destination memory queue information.
69 * @cmd_lock: lock for synchronization of access to ioctl.
70 * @mem_lock: lock for synchronization of access to memory nodes.
71 * @event_lock: lock for synchronization of access to scheduled event.
72 * @start_complete: completion of start of command.
73 * @stop_complete: completion of stop of command.
74 * @property: property information.
75 * @start_work: start command work structure.
76 * @stop_work: stop command work structure.
77 * @event_work: event work structure.
78 * @state: state of command node.
79 */
80struct drm_exynos_ipp_cmd_node {
81 struct exynos_drm_ipp_private *priv;
82 struct list_head list;
83 struct list_head event_list;
84 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
85 struct mutex cmd_lock;
86 struct mutex mem_lock;
87 struct mutex event_lock;
88 struct completion start_complete;
89 struct completion stop_complete;
90 struct drm_exynos_ipp_property property;
91 struct drm_exynos_ipp_cmd_work *start_work;
92 struct drm_exynos_ipp_cmd_work *stop_work;
93 struct drm_exynos_ipp_event_work *event_work;
94 enum drm_exynos_ipp_state state;
95};
96
97/*
98 * A structure of buffer information.
99 *
100 * @gem_objs: Y, Cb, Cr each gem object.
101 * @base: Y, Cb, Cr each planar address.
102 */
103struct drm_exynos_ipp_buf_info {
104 unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
105 dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
106};
107
108/*
109 * A structure of wb setting infomation.
110 *
111 * @enable: enable flag for wb.
112 * @refresh: HZ of the refresh rate.
113 */
114struct drm_exynos_ipp_set_wb {
115 __u32 enable;
116 __u32 refresh;
117};
118
119/*
120 * A structure of event work information.
121 *
122 * @work: work structure.
123 * @ippdrv: current work ippdrv.
124 * @buf_id: id of src, dst buffer.
125 */
126struct drm_exynos_ipp_event_work {
127 struct work_struct work;
128 struct exynos_drm_ippdrv *ippdrv;
129 u32 buf_id[EXYNOS_DRM_OPS_MAX];
130};
131
132/*
133 * A structure of source,destination operations.
134 *
135 * @set_fmt: set format of image.
136 * @set_transf: set transform(rotations, flip).
137 * @set_size: set size of region.
138 * @set_addr: set address for dma.
139 */
140struct exynos_drm_ipp_ops {
141 int (*set_fmt)(struct device *dev, u32 fmt);
142 int (*set_transf)(struct device *dev,
143 enum drm_exynos_degree degree,
144 enum drm_exynos_flip flip, bool *swap);
145 int (*set_size)(struct device *dev, int swap,
146 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
147 int (*set_addr)(struct device *dev,
148 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
149 enum drm_exynos_ipp_buf_type buf_type);
150};
151
152/*
153 * A structure of ipp driver.
154 *
155 * @drv_list: list head for registed sub driver information.
156 * @parent_dev: parent device information.
157 * @dev: platform device.
158 * @drm_dev: drm device.
159 * @ipp_id: id of ipp driver.
160 * @dedicated: dedicated ipp device.
161 * @ops: source, destination operations.
162 * @event_workq: event work queue.
163 * @cmd: current command information.
164 * @cmd_list: list head for command information.
165 * @prop_list: property informations of current ipp driver.
166 * @check_property: check property about format, size, buffer.
167 * @reset: reset ipp block.
168 * @start: ipp each device start.
169 * @stop: ipp each device stop.
170 * @sched_event: work schedule handler.
171 */
172struct exynos_drm_ippdrv {
173 struct list_head drv_list;
174 struct device *parent_dev;
175 struct device *dev;
176 struct drm_device *drm_dev;
177 u32 ipp_id;
178 bool dedicated;
179 struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
180 struct workqueue_struct *event_workq;
181 struct drm_exynos_ipp_cmd_node *cmd;
182 struct list_head cmd_list;
183 struct drm_exynos_ipp_prop_list *prop_list;
184
185 int (*check_property)(struct device *dev,
186 struct drm_exynos_ipp_property *property);
187 int (*reset)(struct device *dev);
188 int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
189 void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
190 void (*sched_event)(struct work_struct *work);
191};
192
193#ifdef CONFIG_DRM_EXYNOS_IPP
194extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
195extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
196extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
197 struct drm_file *file);
198extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
199 struct drm_file *file);
200extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
201 struct drm_file *file);
202extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
203 struct drm_file *file);
204extern int exynos_drm_ippnb_register(struct notifier_block *nb);
205extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
206extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
207extern void ipp_sched_cmd(struct work_struct *work);
208extern void ipp_sched_event(struct work_struct *work);
209
210#else
211static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
212{
213 return -ENODEV;
214}
215
216static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
217{
218 return -ENODEV;
219}
220
221static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
222 void *data,
223 struct drm_file *file_priv)
224{
225 return -ENOTTY;
226}
227
228static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
229 void *data,
230 struct drm_file *file_priv)
231{
232 return -ENOTTY;
233}
234
235static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
236 void *data,
237 struct drm_file *file)
238{
239 return -ENOTTY;
240}
241
242static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
243 void *data,
244 struct drm_file *file)
245{
246 return -ENOTTY;
247}
248
249static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
250{
251 return -ENODEV;
252}
253
254static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
255{
256 return -ENODEV;
257}
258
259static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
260{
261 return -ENOTTY;
262}
263#endif
264
265#endif /* _EXYNOS_DRM_IPP_H_ */
266