aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-09-19 16:24:26 -0400
committerDave Airlie <airlied@redhat.com>2016-09-19 16:24:26 -0400
commit26e34d2d8ba7aca0fe86e5362ce5811749df77b2 (patch)
tree51fba9f283f18d86de24bf5a21d150c57c14121d
parentb81a6179b6035a77d9d56d08ba1c0f81d6d4c2c5 (diff)
parentcd98e85a6b786da83e0b120b53a182d100c19c9b (diff)
Merge tag 'imx-drm-next-2016-09-19' of git://git.pengutronix.de/git/pza/linux into drm-next
imx-drm active plane reconfiguration, cleanup, FSU/IC/IRT/VDIC support - add active plane reconfiguration support (v4), use the atomic_disable callback - stop calling disable_plane manually in the plane destroy path - let mode cleanup destroy mode objects on driver unbind - drop deprecated load/unload drm_driver ops - add exclusive fence to plane state, so the atomic helper can wait on them, remove the open-coded fence wait from imx-drm - add low level deinterlacer (VDIC) support - add support for channel linking via the frame synchronisation unit (FSU) - add queued image conversion support for memory-to-memory scaling, rotation, and color space conversion, using IC and IRT. * tag 'imx-drm-next-2016-09-19' of git://git.pengutronix.de/git/pza/linux: gpu: ipu-v3: Add queued image conversion support gpu: ipu-v3: Add ipu_rot_mode_is_irt() gpu: ipu-v3: fix a possible NULL dereference drm/imx: parallel-display: detach bridge or panel on unbind drm/imx: imx-ldb: detach bridge on unbind drm/imx: imx-ldb: detach panel on unbind gpu: ipu-v3: Add FSU channel linking support gpu: ipu-v3: Add Video Deinterlacer unit drm/imx: add exclusive fence to plane state drm/imx: fold ipu_plane_disable into ipu_disable_plane drm/imx: don't destroy mode objects manually on driver unbind drm/imx: drop deprecated load/unload drm_driver ops drm/imx: don't call disable_plane in plane destroy path drm/imx: Add active plane reconfiguration support drm/imx: Use DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET flag drm/imx: ipuv3-crtc: Use the callback ->atomic_disable instead of ->disable gpu: ipu-v3: Do not wait for DMFC FIFO to clear when disabling DMFC channel
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c332
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c17
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c37
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c6
-rw-r--r--drivers/gpu/ipu-v3/Makefile3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c157
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c18
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c1709
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h38
-rw-r--r--drivers/gpu/ipu-v3/ipu-vdi.c243
-rw-r--r--include/video/imx-ipu-image-convert.h207
-rw-r--r--include/video/imx-ipu-v3.h58
16 files changed, 2612 insertions, 230 deletions
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index cdf39aa3943c..66ad8e6fb11e 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1813,9 +1813,6 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
1813 /* Disable all interrupts */ 1813 /* Disable all interrupts */
1814 hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); 1814 hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
1815 1815
1816 hdmi->connector.funcs->destroy(&hdmi->connector);
1817 hdmi->encoder->funcs->destroy(hdmi->encoder);
1818
1819 clk_disable_unprepare(hdmi->iahb_clk); 1816 clk_disable_unprepare(hdmi->iahb_clk);
1820 clk_disable_unprepare(hdmi->isfr_clk); 1817 clk_disable_unprepare(hdmi->isfr_clk);
1821 i2c_put_adapter(hdmi->ddc); 1818 i2c_put_adapter(hdmi->ddc);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 56dfc4cd50c6..98df09c2b388 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -64,25 +64,6 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
64 drm_fbdev_cma_restore_mode(imxdrm->fbhelper); 64 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
65} 65}
66 66
67static int imx_drm_driver_unload(struct drm_device *drm)
68{
69 struct imx_drm_device *imxdrm = drm->dev_private;
70
71 drm_kms_helper_poll_fini(drm);
72
73 if (imxdrm->fbhelper)
74 drm_fbdev_cma_fini(imxdrm->fbhelper);
75
76 component_unbind_all(drm->dev, drm);
77
78 drm_vblank_cleanup(drm);
79 drm_mode_config_cleanup(drm);
80
81 platform_set_drvdata(drm->platformdev, NULL);
82
83 return 0;
84}
85
86static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe) 67static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
87{ 68{
88 struct imx_drm_device *imxdrm = drm->dev_private; 69 struct imx_drm_device *imxdrm = drm->dev_private;
@@ -146,55 +127,73 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
146 drm_fbdev_cma_hotplug_event(imxdrm->fbhelper); 127 drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
147} 128}
148 129
130static int imx_drm_atomic_check(struct drm_device *dev,
131 struct drm_atomic_state *state)
132{
133 int ret;
134
135 ret = drm_atomic_helper_check_modeset(dev, state);
136 if (ret)
137 return ret;
138
139 ret = drm_atomic_helper_check_planes(dev, state);
140 if (ret)
141 return ret;
142
143 /*
144 * Check modeset again in case crtc_state->mode_changed is
145 * updated in plane's ->atomic_check callback.
146 */
147 ret = drm_atomic_helper_check_modeset(dev, state);
148 if (ret)
149 return ret;
150
151 return ret;
152}
153
154static int imx_drm_atomic_commit(struct drm_device *dev,
155 struct drm_atomic_state *state,
156 bool nonblock)
157{
158 struct drm_plane_state *plane_state;
159 struct drm_plane *plane;
160 struct dma_buf *dma_buf;
161 int i;
162
163 /*
164 * If the plane fb has an dma-buf attached, fish out the exclusive
165 * fence for the atomic helper to wait on.
166 */
167 for_each_plane_in_state(state, plane, plane_state, i) {
168 if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
169 dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
170 0)->base.dma_buf;
171 if (!dma_buf)
172 continue;
173 plane_state->fence =
174 reservation_object_get_excl_rcu(dma_buf->resv);
175 }
176 }
177
178 return drm_atomic_helper_commit(dev, state, nonblock);
179}
180
149static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { 181static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
150 .fb_create = drm_fb_cma_create, 182 .fb_create = drm_fb_cma_create,
151 .output_poll_changed = imx_drm_output_poll_changed, 183 .output_poll_changed = imx_drm_output_poll_changed,
152 .atomic_check = drm_atomic_helper_check, 184 .atomic_check = imx_drm_atomic_check,
153 .atomic_commit = drm_atomic_helper_commit, 185 .atomic_commit = imx_drm_atomic_commit,
154}; 186};
155 187
156static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) 188static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
157{ 189{
158 struct drm_device *dev = state->dev; 190 struct drm_device *dev = state->dev;
159 struct drm_crtc *crtc;
160 struct drm_crtc_state *crtc_state;
161 struct drm_plane_state *plane_state;
162 struct drm_gem_cma_object *cma_obj;
163 struct fence *excl;
164 unsigned shared_count;
165 struct fence **shared;
166 unsigned int i, j;
167 int ret;
168
169 /* Wait for fences. */
170 for_each_crtc_in_state(state, crtc, crtc_state, i) {
171 plane_state = crtc->primary->state;
172 if (plane_state->fb) {
173 cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
174 if (cma_obj->base.dma_buf) {
175 ret = reservation_object_get_fences_rcu(
176 cma_obj->base.dma_buf->resv, &excl,
177 &shared_count, &shared);
178 if (unlikely(ret))
179 DRM_ERROR("failed to get fences "
180 "for buffer\n");
181
182 if (excl) {
183 fence_wait(excl, false);
184 fence_put(excl);
185 }
186 for (j = 0; j < shared_count; i++) {
187 fence_wait(shared[j], false);
188 fence_put(shared[j]);
189 }
190 }
191 }
192 }
193 191
194 drm_atomic_helper_commit_modeset_disables(dev, state); 192 drm_atomic_helper_commit_modeset_disables(dev, state);
195 193
196 drm_atomic_helper_commit_planes(dev, state, 194 drm_atomic_helper_commit_planes(dev, state,
197 DRM_PLANE_COMMIT_ACTIVE_ONLY); 195 DRM_PLANE_COMMIT_ACTIVE_ONLY |
196 DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
198 197
199 drm_atomic_helper_commit_modeset_enables(dev, state); 198 drm_atomic_helper_commit_modeset_enables(dev, state);
200 199
@@ -210,111 +209,6 @@ static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
210}; 209};
211 210
212/* 211/*
213 * Main DRM initialisation. This binds, initialises and registers
214 * with DRM the subcomponents of the driver.
215 */
216static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
217{
218 struct imx_drm_device *imxdrm;
219 struct drm_connector *connector;
220 int ret;
221
222 imxdrm = devm_kzalloc(drm->dev, sizeof(*imxdrm), GFP_KERNEL);
223 if (!imxdrm)
224 return -ENOMEM;
225
226 imxdrm->drm = drm;
227
228 drm->dev_private = imxdrm;
229
230 /*
231 * enable drm irq mode.
232 * - with irq_enabled = true, we can use the vblank feature.
233 *
234 * P.S. note that we wouldn't use drm irq handler but
235 * just specific driver own one instead because
236 * drm framework supports only one irq handler and
237 * drivers can well take care of their interrupts
238 */
239 drm->irq_enabled = true;
240
241 /*
242 * set max width and height as default value(4096x4096).
243 * this value would be used to check framebuffer size limitation
244 * at drm_mode_addfb().
245 */
246 drm->mode_config.min_width = 64;
247 drm->mode_config.min_height = 64;
248 drm->mode_config.max_width = 4096;
249 drm->mode_config.max_height = 4096;
250 drm->mode_config.funcs = &imx_drm_mode_config_funcs;
251 drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
252
253 drm_mode_config_init(drm);
254
255 ret = drm_vblank_init(drm, MAX_CRTC);
256 if (ret)
257 goto err_kms;
258
259 platform_set_drvdata(drm->platformdev, drm);
260
261 /* Now try and bind all our sub-components */
262 ret = component_bind_all(drm->dev, drm);
263 if (ret)
264 goto err_vblank;
265
266 /*
267 * All components are now added, we can publish the connector sysfs
268 * entries to userspace. This will generate hotplug events and so
269 * userspace will expect to be able to access DRM at this point.
270 */
271 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
272 ret = drm_connector_register(connector);
273 if (ret) {
274 dev_err(drm->dev,
275 "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
276 connector->base.id,
277 connector->name, ret);
278 goto err_unbind;
279 }
280 }
281
282 drm_mode_config_reset(drm);
283
284 /*
285 * All components are now initialised, so setup the fb helper.
286 * The fb helper takes copies of key hardware information, so the
287 * crtcs/connectors/encoders must not change after this point.
288 */
289#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
290 if (legacyfb_depth != 16 && legacyfb_depth != 32) {
291 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
292 legacyfb_depth = 16;
293 }
294 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
295 drm->mode_config.num_crtc, MAX_CRTC);
296 if (IS_ERR(imxdrm->fbhelper)) {
297 ret = PTR_ERR(imxdrm->fbhelper);
298 imxdrm->fbhelper = NULL;
299 goto err_unbind;
300 }
301#endif
302
303 drm_kms_helper_poll_init(drm);
304
305 return 0;
306
307err_unbind:
308 component_unbind_all(drm->dev, drm);
309err_vblank:
310 drm_vblank_cleanup(drm);
311err_kms:
312 drm_mode_config_cleanup(drm);
313
314 return ret;
315}
316
317/*
318 * imx_drm_add_crtc - add a new crtc 212 * imx_drm_add_crtc - add a new crtc
319 */ 213 */
320int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, 214int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
@@ -406,8 +300,6 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
406static struct drm_driver imx_drm_driver = { 300static struct drm_driver imx_drm_driver = {
407 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 301 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
408 DRIVER_ATOMIC, 302 DRIVER_ATOMIC,
409 .load = imx_drm_driver_load,
410 .unload = imx_drm_driver_unload,
411 .lastclose = imx_drm_driver_lastclose, 303 .lastclose = imx_drm_driver_lastclose,
412 .gem_free_object_unlocked = drm_gem_cma_free_object, 304 .gem_free_object_unlocked = drm_gem_cma_free_object,
413 .gem_vm_ops = &drm_gem_cma_vm_ops, 305 .gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -460,12 +352,122 @@ static int compare_of(struct device *dev, void *data)
460 352
461static int imx_drm_bind(struct device *dev) 353static int imx_drm_bind(struct device *dev)
462{ 354{
463 return drm_platform_init(&imx_drm_driver, to_platform_device(dev)); 355 struct drm_device *drm;
356 struct imx_drm_device *imxdrm;
357 int ret;
358
359 drm = drm_dev_alloc(&imx_drm_driver, dev);
360 if (!drm)
361 return -ENOMEM;
362
363 imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
364 if (!imxdrm) {
365 ret = -ENOMEM;
366 goto err_unref;
367 }
368
369 imxdrm->drm = drm;
370 drm->dev_private = imxdrm;
371
372 /*
373 * enable drm irq mode.
374 * - with irq_enabled = true, we can use the vblank feature.
375 *
376 * P.S. note that we wouldn't use drm irq handler but
377 * just specific driver own one instead because
378 * drm framework supports only one irq handler and
379 * drivers can well take care of their interrupts
380 */
381 drm->irq_enabled = true;
382
383 /*
384 * set max width and height as default value(4096x4096).
385 * this value would be used to check framebuffer size limitation
386 * at drm_mode_addfb().
387 */
388 drm->mode_config.min_width = 64;
389 drm->mode_config.min_height = 64;
390 drm->mode_config.max_width = 4096;
391 drm->mode_config.max_height = 4096;
392 drm->mode_config.funcs = &imx_drm_mode_config_funcs;
393 drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
394
395 drm_mode_config_init(drm);
396
397 ret = drm_vblank_init(drm, MAX_CRTC);
398 if (ret)
399 goto err_kms;
400
401 dev_set_drvdata(dev, drm);
402
403 /* Now try and bind all our sub-components */
404 ret = component_bind_all(dev, drm);
405 if (ret)
406 goto err_vblank;
407
408 drm_mode_config_reset(drm);
409
410 /*
411 * All components are now initialised, so setup the fb helper.
412 * The fb helper takes copies of key hardware information, so the
413 * crtcs/connectors/encoders must not change after this point.
414 */
415#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
416 if (legacyfb_depth != 16 && legacyfb_depth != 32) {
417 dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
418 legacyfb_depth = 16;
419 }
420 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
421 drm->mode_config.num_crtc, MAX_CRTC);
422 if (IS_ERR(imxdrm->fbhelper)) {
423 ret = PTR_ERR(imxdrm->fbhelper);
424 imxdrm->fbhelper = NULL;
425 goto err_unbind;
426 }
427#endif
428
429 drm_kms_helper_poll_init(drm);
430
431 ret = drm_dev_register(drm, 0);
432 if (ret)
433 goto err_fbhelper;
434
435 return 0;
436
437err_fbhelper:
438 drm_kms_helper_poll_fini(drm);
439 if (imxdrm->fbhelper)
440 drm_fbdev_cma_fini(imxdrm->fbhelper);
441err_unbind:
442 component_unbind_all(drm->dev, drm);
443err_vblank:
444 drm_vblank_cleanup(drm);
445err_kms:
446 drm_mode_config_cleanup(drm);
447err_unref:
448 drm_dev_unref(drm);
449
450 return ret;
464} 451}
465 452
466static void imx_drm_unbind(struct device *dev) 453static void imx_drm_unbind(struct device *dev)
467{ 454{
468 drm_put_dev(dev_get_drvdata(dev)); 455 struct drm_device *drm = dev_get_drvdata(dev);
456 struct imx_drm_device *imxdrm = drm->dev_private;
457
458 drm_dev_unregister(drm);
459
460 drm_kms_helper_poll_fini(drm);
461
462 if (imxdrm->fbhelper)
463 drm_fbdev_cma_fini(imxdrm->fbhelper);
464
465 drm_mode_config_cleanup(drm);
466
467 component_unbind_all(drm->dev, drm);
468 dev_set_drvdata(dev, NULL);
469
470 drm_dev_unref(drm);
469} 471}
470 472
471static const struct component_master_ops imx_drm_ops = { 473static const struct component_master_ops imx_drm_ops = {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 4eed3a6addad..3ce391c239b0 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -757,11 +757,10 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
757 for (i = 0; i < 2; i++) { 757 for (i = 0; i < 2; i++) {
758 struct imx_ldb_channel *channel = &imx_ldb->channel[i]; 758 struct imx_ldb_channel *channel = &imx_ldb->channel[i];
759 759
760 if (!channel->connector.funcs) 760 if (channel->bridge)
761 continue; 761 drm_bridge_detach(channel->bridge);
762 762 if (channel->panel)
763 channel->connector.funcs->destroy(&channel->connector); 763 drm_panel_detach(channel->panel);
764 channel->encoder.funcs->destroy(&channel->encoder);
765 764
766 kfree(channel->edid); 765 kfree(channel->edid);
767 i2c_put_adapter(channel->ddc); 766 i2c_put_adapter(channel->ddc);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 5e875944ffa2..8fc088843e55 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -685,9 +685,6 @@ static void imx_tve_unbind(struct device *dev, struct device *master,
685{ 685{
686 struct imx_tve *tve = dev_get_drvdata(dev); 686 struct imx_tve *tve = dev_get_drvdata(dev);
687 687
688 tve->connector.funcs->destroy(&tve->connector);
689 tve->encoder.funcs->destroy(&tve->encoder);
690
691 if (!IS_ERR(tve->dac_reg)) 688 if (!IS_ERR(tve->dac_reg))
692 regulator_disable(tve->dac_reg); 689 regulator_disable(tve->dac_reg);
693} 690}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 6e1dc902522c..9df29f1cb16a 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -60,7 +60,8 @@ static void ipu_crtc_enable(struct drm_crtc *crtc)
60 ipu_di_enable(ipu_crtc->di); 60 ipu_di_enable(ipu_crtc->di);
61} 61}
62 62
63static void ipu_crtc_disable(struct drm_crtc *crtc) 63static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
64 struct drm_crtc_state *old_crtc_state)
64{ 65{
65 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 66 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
66 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 67 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
@@ -75,6 +76,9 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
75 crtc->state->event = NULL; 76 crtc->state->event = NULL;
76 } 77 }
77 spin_unlock_irq(&crtc->dev->event_lock); 78 spin_unlock_irq(&crtc->dev->event_lock);
79
80 /* always disable planes on the CRTC */
81 drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
78} 82}
79 83
80static void imx_drm_crtc_reset(struct drm_crtc *crtc) 84static void imx_drm_crtc_reset(struct drm_crtc *crtc)
@@ -120,9 +124,14 @@ static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
120 kfree(to_imx_crtc_state(state)); 124 kfree(to_imx_crtc_state(state));
121} 125}
122 126
127static void imx_drm_crtc_destroy(struct drm_crtc *crtc)
128{
129 imx_drm_remove_crtc(to_ipu_crtc(crtc)->imx_crtc);
130}
131
123static const struct drm_crtc_funcs ipu_crtc_funcs = { 132static const struct drm_crtc_funcs ipu_crtc_funcs = {
124 .set_config = drm_atomic_helper_set_config, 133 .set_config = drm_atomic_helper_set_config,
125 .destroy = drm_crtc_cleanup, 134 .destroy = imx_drm_crtc_destroy,
126 .page_flip = drm_atomic_helper_page_flip, 135 .page_flip = drm_atomic_helper_page_flip,
127 .reset = imx_drm_crtc_reset, 136 .reset = imx_drm_crtc_reset,
128 .atomic_duplicate_state = imx_drm_crtc_duplicate_state, 137 .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -241,7 +250,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
241 .mode_set_nofb = ipu_crtc_mode_set_nofb, 250 .mode_set_nofb = ipu_crtc_mode_set_nofb,
242 .atomic_check = ipu_crtc_atomic_check, 251 .atomic_check = ipu_crtc_atomic_check,
243 .atomic_begin = ipu_crtc_atomic_begin, 252 .atomic_begin = ipu_crtc_atomic_begin,
244 .disable = ipu_crtc_disable, 253 .atomic_disable = ipu_crtc_atomic_disable,
245 .enable = ipu_crtc_enable, 254 .enable = ipu_crtc_enable,
246}; 255};
247 256
@@ -409,8 +418,6 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
409{ 418{
410 struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev); 419 struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
411 420
412 imx_drm_remove_crtc(ipu_crtc->imx_crtc);
413
414 ipu_put_resources(ipu_crtc); 421 ipu_put_resources(ipu_crtc);
415 if (ipu_crtc->plane[1]) 422 if (ipu_crtc->plane[1])
416 ipu_plane_put_resources(ipu_crtc->plane[1]); 423 ipu_plane_put_resources(ipu_crtc->plane[1]);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 4ad67d015ec7..ce22d0a0ddc8 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -213,8 +213,12 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
213 ipu_dp_enable_channel(ipu_plane->dp); 213 ipu_dp_enable_channel(ipu_plane->dp);
214} 214}
215 215
216static void ipu_plane_disable(struct ipu_plane *ipu_plane) 216static int ipu_disable_plane(struct drm_plane *plane)
217{ 217{
218 struct ipu_plane *ipu_plane = to_ipu_plane(plane);
219
220 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
221
218 ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50); 222 ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
219 223
220 if (ipu_plane->dp) 224 if (ipu_plane->dp)
@@ -223,15 +227,6 @@ static void ipu_plane_disable(struct ipu_plane *ipu_plane)
223 ipu_dmfc_disable_channel(ipu_plane->dmfc); 227 ipu_dmfc_disable_channel(ipu_plane->dmfc);
224 if (ipu_plane->dp) 228 if (ipu_plane->dp)
225 ipu_dp_disable(ipu_plane->ipu); 229 ipu_dp_disable(ipu_plane->ipu);
226}
227
228static int ipu_disable_plane(struct drm_plane *plane)
229{
230 struct ipu_plane *ipu_plane = to_ipu_plane(plane);
231
232 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
233
234 ipu_plane_disable(ipu_plane);
235 230
236 return 0; 231 return 0;
237} 232}
@@ -242,7 +237,6 @@ static void ipu_plane_destroy(struct drm_plane *plane)
242 237
243 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 238 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
244 239
245 ipu_disable_plane(plane);
246 drm_plane_cleanup(plane); 240 drm_plane_cleanup(plane);
247 kfree(ipu_plane); 241 kfree(ipu_plane);
248} 242}
@@ -319,13 +313,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
319 return -EINVAL; 313 return -EINVAL;
320 314
321 /* 315 /*
322 * since we cannot touch active IDMAC channels, we do not support 316 * We support resizing active plane or changing its format by
323 * resizing the enabled plane or changing its format 317 * forcing CRTC mode change in plane's ->atomic_check callback
318 * and disabling all affected active planes in CRTC's ->atomic_disable
319 * callback. The planes will be reenabled in plane's ->atomic_update
320 * callback.
324 */ 321 */
325 if (old_fb && (state->src_w != old_state->src_w || 322 if (old_fb && (state->src_w != old_state->src_w ||
326 state->src_h != old_state->src_h || 323 state->src_h != old_state->src_h ||
327 fb->pixel_format != old_fb->pixel_format)) 324 fb->pixel_format != old_fb->pixel_format))
328 return -EINVAL; 325 crtc_state->mode_changed = true;
329 326
330 eba = drm_plane_state_to_eba(state); 327 eba = drm_plane_state_to_eba(state);
331 328
@@ -336,7 +333,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
336 return -EINVAL; 333 return -EINVAL;
337 334
338 if (old_fb && fb->pitches[0] != old_fb->pitches[0]) 335 if (old_fb && fb->pitches[0] != old_fb->pitches[0])
339 return -EINVAL; 336 crtc_state->mode_changed = true;
340 337
341 switch (fb->pixel_format) { 338 switch (fb->pixel_format) {
342 case DRM_FORMAT_YUV420: 339 case DRM_FORMAT_YUV420:
@@ -372,7 +369,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
372 return -EINVAL; 369 return -EINVAL;
373 370
374 if (old_fb && old_fb->pitches[1] != fb->pitches[1]) 371 if (old_fb && old_fb->pitches[1] != fb->pitches[1])
375 return -EINVAL; 372 crtc_state->mode_changed = true;
376 } 373 }
377 374
378 return 0; 375 return 0;
@@ -392,8 +389,12 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
392 enum ipu_color_space ics; 389 enum ipu_color_space ics;
393 390
394 if (old_state->fb) { 391 if (old_state->fb) {
395 ipu_plane_atomic_set_base(ipu_plane, old_state); 392 struct drm_crtc_state *crtc_state = state->crtc->state;
396 return; 393
394 if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
395 ipu_plane_atomic_set_base(ipu_plane, old_state);
396 return;
397 }
397 } 398 }
398 399
399 switch (ipu_plane->dp_flow) { 400 switch (ipu_plane->dp_flow) {
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 74b0ac06fdab..d796ada2a47a 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -293,8 +293,10 @@ static void imx_pd_unbind(struct device *dev, struct device *master,
293{ 293{
294 struct imx_parallel_display *imxpd = dev_get_drvdata(dev); 294 struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
295 295
296 imxpd->encoder.funcs->destroy(&imxpd->encoder); 296 if (imxpd->bridge)
297 imxpd->connector.funcs->destroy(&imxpd->connector); 297 drm_bridge_detach(imxpd->bridge);
298 if (imxpd->panel)
299 drm_panel_detach(imxpd->panel);
298 300
299 kfree(imxpd->edid); 301 kfree(imxpd->edid);
300} 302}
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 107ec236a4a6..5f961416c4ee 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o 1obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
2 2
3imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \ 3imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
4 ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o 4 ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
5 ipu-smfc.o ipu-vdi.o
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index d230988ddb8f..b9539f7c5e9a 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -730,6 +730,137 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
730} 730}
731EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux); 731EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
732 732
733
734/* Frame Synchronization Unit Channel Linking */
735
736struct fsu_link_reg_info {
737 int chno;
738 u32 reg;
739 u32 mask;
740 u32 val;
741};
742
743struct fsu_link_info {
744 struct fsu_link_reg_info src;
745 struct fsu_link_reg_info sink;
746};
747
748static const struct fsu_link_info fsu_link_info[] = {
749 {
750 .src = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
751 FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
752 .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
753 FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
754 }, {
755 .src = { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
756 FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
757 .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
758 FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
759 }, {
760 .src = { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
761 FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
762 .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
763 FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
764 }, {
765 .src = { IPUV3_CHANNEL_CSI_DIRECT, 0 },
766 .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
767 FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
768 },
769};
770
771static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
772{
773 int i;
774
775 for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
776 if (src == fsu_link_info[i].src.chno &&
777 sink == fsu_link_info[i].sink.chno)
778 return &fsu_link_info[i];
779 }
780
781 return NULL;
782}
783
784/*
785 * Links a source channel to a sink channel in the FSU.
786 */
787int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
788{
789 const struct fsu_link_info *link;
790 u32 src_reg, sink_reg;
791 unsigned long flags;
792
793 link = find_fsu_link_info(src_ch, sink_ch);
794 if (!link)
795 return -EINVAL;
796
797 spin_lock_irqsave(&ipu->lock, flags);
798
799 if (link->src.mask) {
800 src_reg = ipu_cm_read(ipu, link->src.reg);
801 src_reg &= ~link->src.mask;
802 src_reg |= link->src.val;
803 ipu_cm_write(ipu, src_reg, link->src.reg);
804 }
805
806 if (link->sink.mask) {
807 sink_reg = ipu_cm_read(ipu, link->sink.reg);
808 sink_reg &= ~link->sink.mask;
809 sink_reg |= link->sink.val;
810 ipu_cm_write(ipu, sink_reg, link->sink.reg);
811 }
812
813 spin_unlock_irqrestore(&ipu->lock, flags);
814 return 0;
815}
816EXPORT_SYMBOL_GPL(ipu_fsu_link);
817
818/*
819 * Unlinks source and sink channels in the FSU.
820 */
821int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
822{
823 const struct fsu_link_info *link;
824 u32 src_reg, sink_reg;
825 unsigned long flags;
826
827 link = find_fsu_link_info(src_ch, sink_ch);
828 if (!link)
829 return -EINVAL;
830
831 spin_lock_irqsave(&ipu->lock, flags);
832
833 if (link->src.mask) {
834 src_reg = ipu_cm_read(ipu, link->src.reg);
835 src_reg &= ~link->src.mask;
836 ipu_cm_write(ipu, src_reg, link->src.reg);
837 }
838
839 if (link->sink.mask) {
840 sink_reg = ipu_cm_read(ipu, link->sink.reg);
841 sink_reg &= ~link->sink.mask;
842 ipu_cm_write(ipu, sink_reg, link->sink.reg);
843 }
844
845 spin_unlock_irqrestore(&ipu->lock, flags);
846 return 0;
847}
848EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
849
850/* Link IDMAC channels in the FSU */
851int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
852{
853 return ipu_fsu_link(src->ipu, src->num, sink->num);
854}
855EXPORT_SYMBOL_GPL(ipu_idmac_link);
856
857/* Unlink IDMAC channels in the FSU */
858int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
859{
860 return ipu_fsu_unlink(src->ipu, src->num, sink->num);
861}
862EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
863
733struct ipu_devtype { 864struct ipu_devtype {
734 const char *name; 865 const char *name;
735 unsigned long cm_ofs; 866 unsigned long cm_ofs;
@@ -839,6 +970,20 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
839 goto err_ic; 970 goto err_ic;
840 } 971 }
841 972
973 ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
974 IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
975 IPU_CONF_IC_INPUT);
976 if (ret) {
977 unit = "vdi";
978 goto err_vdi;
979 }
980
981 ret = ipu_image_convert_init(ipu, dev);
982 if (ret) {
983 unit = "image_convert";
984 goto err_image_convert;
985 }
986
842 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs, 987 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
843 IPU_CONF_DI0_EN, ipu_clk); 988 IPU_CONF_DI0_EN, ipu_clk);
844 if (ret) { 989 if (ret) {
@@ -893,6 +1038,10 @@ err_dc:
893err_di_1: 1038err_di_1:
894 ipu_di_exit(ipu, 0); 1039 ipu_di_exit(ipu, 0);
895err_di_0: 1040err_di_0:
1041 ipu_image_convert_exit(ipu);
1042err_image_convert:
1043 ipu_vdi_exit(ipu);
1044err_vdi:
896 ipu_ic_exit(ipu); 1045 ipu_ic_exit(ipu);
897err_ic: 1046err_ic:
898 ipu_csi_exit(ipu, 1); 1047 ipu_csi_exit(ipu, 1);
@@ -977,6 +1126,8 @@ static void ipu_submodules_exit(struct ipu_soc *ipu)
977 ipu_dc_exit(ipu); 1126 ipu_dc_exit(ipu);
978 ipu_di_exit(ipu, 1); 1127 ipu_di_exit(ipu, 1);
979 ipu_di_exit(ipu, 0); 1128 ipu_di_exit(ipu, 0);
1129 ipu_image_convert_exit(ipu);
1130 ipu_vdi_exit(ipu);
980 ipu_ic_exit(ipu); 1131 ipu_ic_exit(ipu);
981 ipu_csi_exit(ipu, 1); 1132 ipu_csi_exit(ipu, 1);
982 ipu_csi_exit(ipu, 0); 1133 ipu_csi_exit(ipu, 0);
@@ -1213,8 +1364,6 @@ EXPORT_SYMBOL_GPL(ipu_dump);
1213 1364
1214static int ipu_probe(struct platform_device *pdev) 1365static int ipu_probe(struct platform_device *pdev)
1215{ 1366{
1216 const struct of_device_id *of_id =
1217 of_match_device(imx_ipu_dt_ids, &pdev->dev);
1218 struct device_node *np = pdev->dev.of_node; 1367 struct device_node *np = pdev->dev.of_node;
1219 struct ipu_soc *ipu; 1368 struct ipu_soc *ipu;
1220 struct resource *res; 1369 struct resource *res;
@@ -1222,7 +1371,9 @@ static int ipu_probe(struct platform_device *pdev)
1222 int i, ret, irq_sync, irq_err; 1371 int i, ret, irq_sync, irq_err;
1223 const struct ipu_devtype *devtype; 1372 const struct ipu_devtype *devtype;
1224 1373
1225 devtype = of_id->data; 1374 devtype = of_device_get_match_data(&pdev->dev);
1375 if (!devtype)
1376 return -EINVAL;
1226 1377
1227 irq_sync = platform_get_irq(pdev, 0); 1378 irq_sync = platform_get_irq(pdev, 0);
1228 irq_err = platform_get_irq(pdev, 1); 1379 irq_err = platform_get_irq(pdev, 1);
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 42705bb5aaa3..a40f211f382f 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -123,20 +123,6 @@ int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
123} 123}
124EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel); 124EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
125 125
126static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
127{
128 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
129
130 while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
131 if (time_after(jiffies, timeout)) {
132 dev_warn(priv->dev,
133 "Timeout waiting for DMFC FIFOs to clear\n");
134 break;
135 }
136 cpu_relax();
137 }
138}
139
140void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc) 126void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
141{ 127{
142 struct ipu_dmfc_priv *priv = dmfc->priv; 128 struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -145,10 +131,8 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
145 131
146 priv->use_count--; 132 priv->use_count--;
147 133
148 if (!priv->use_count) { 134 if (!priv->use_count)
149 ipu_dmfc_wait_fifos(priv);
150 ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN); 135 ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
151 }
152 136
153 if (priv->use_count < 0) 137 if (priv->use_count < 0)
154 priv->use_count = 0; 138 priv->use_count = 0;
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 1a37afcd85bd..321eb983c2f5 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -619,7 +619,7 @@ int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
619 ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2); 619 ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
620 ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3); 620 ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
621 621
622 if (rot >= IPU_ROTATE_90_RIGHT) 622 if (ipu_rot_mode_is_irt(rot))
623 ic->rotation = true; 623 ic->rotation = true;
624 624
625unlock: 625unlock:
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
new file mode 100644
index 000000000000..2ba7d437a2af
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -0,0 +1,1709 @@
1/*
2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
3 *
4 * Queued image conversion support, with tiling and rotation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
19#include <video/imx-ipu-image-convert.h>
20#include "ipu-prv.h"
21
22/*
23 * The IC Resizer has a restriction that the output frame from the
24 * resizer must be 1024 or less in both width (pixels) and height
25 * (lines).
26 *
27 * The image converter attempts to split up a conversion when
28 * the desired output (converted) frame resolution exceeds the
29 * IC resizer limit of 1024 in either dimension.
30 *
31 * If either dimension of the output frame exceeds the limit, the
32 * dimension is split into 1, 2, or 4 equal stripes, for a maximum
33 * of 4*4 or 16 tiles. A conversion is then carried out for each
34 * tile (but taking care to pass the full frame stride length to
35 * the DMA channel's parameter memory!). IDMA double-buffering is used
36 * to convert each tile back-to-back when possible (see note below
37 * when double_buffering boolean is set).
38 *
39 * Note that the input frame must be split up into the same number
40 * of tiles as the output frame.
41 *
42 * FIXME: at this point there is no attempt to deal with visible seams
43 * at the tile boundaries when upscaling. The seams are caused by a reset
44 * of the bilinear upscale interpolation when starting a new tile. The
45 * seams are barely visible for small upscale factors, but become
46 * increasingly visible as the upscale factor gets larger, since more
47 * interpolated pixels get thrown out at the tile boundaries. A possilble
48 * fix might be to overlap tiles of different sizes, but this must be done
49 * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
50 * alignment restrictions of each tile.
51 */
52
53#define MAX_STRIPES_W 4
54#define MAX_STRIPES_H 4
55#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
56
57#define MIN_W 16
58#define MIN_H 8
59#define MAX_W 4096
60#define MAX_H 4096
61
62enum ipu_image_convert_type {
63 IMAGE_CONVERT_IN = 0,
64 IMAGE_CONVERT_OUT,
65};
66
67struct ipu_image_convert_dma_buf {
68 void *virt;
69 dma_addr_t phys;
70 unsigned long len;
71};
72
73struct ipu_image_convert_dma_chan {
74 int in;
75 int out;
76 int rot_in;
77 int rot_out;
78 int vdi_in_p;
79 int vdi_in;
80 int vdi_in_n;
81};
82
83/* dimensions of one tile */
84struct ipu_image_tile {
85 u32 width;
86 u32 height;
87 /* size and strides are in bytes */
88 u32 size;
89 u32 stride;
90 u32 rot_stride;
91 /* start Y or packed offset of this tile */
92 u32 offset;
93 /* offset from start to tile in U plane, for planar formats */
94 u32 u_off;
95 /* offset from start to tile in V plane, for planar formats */
96 u32 v_off;
97};
98
99struct ipu_image_convert_image {
100 struct ipu_image base;
101 enum ipu_image_convert_type type;
102
103 const struct ipu_image_pixfmt *fmt;
104 unsigned int stride;
105
106 /* # of rows (horizontal stripes) if dest height is > 1024 */
107 unsigned int num_rows;
108 /* # of columns (vertical stripes) if dest width is > 1024 */
109 unsigned int num_cols;
110
111 struct ipu_image_tile tile[MAX_TILES];
112};
113
114struct ipu_image_pixfmt {
115 u32 fourcc; /* V4L2 fourcc */
116 int bpp; /* total bpp */
117 int uv_width_dec; /* decimation in width for U/V planes */
118 int uv_height_dec; /* decimation in height for U/V planes */
119 bool planar; /* planar format */
120 bool uv_swapped; /* U and V planes are swapped */
121 bool uv_packed; /* partial planar (U and V in same plane) */
122};
123
124struct ipu_image_convert_ctx;
125struct ipu_image_convert_chan;
126struct ipu_image_convert_priv;
127
128struct ipu_image_convert_ctx {
129 struct ipu_image_convert_chan *chan;
130
131 ipu_image_convert_cb_t complete;
132 void *complete_context;
133
134 /* Source/destination image data and rotation mode */
135 struct ipu_image_convert_image in;
136 struct ipu_image_convert_image out;
137 enum ipu_rotate_mode rot_mode;
138
139 /* intermediate buffer for rotation */
140 struct ipu_image_convert_dma_buf rot_intermediate[2];
141
142 /* current buffer number for double buffering */
143 int cur_buf_num;
144
145 bool aborting;
146 struct completion aborted;
147
148 /* can we use double-buffering for this conversion operation? */
149 bool double_buffering;
150 /* num_rows * num_cols */
151 unsigned int num_tiles;
152 /* next tile to process */
153 unsigned int next_tile;
154 /* where to place converted tile in dest image */
155 unsigned int out_tile_map[MAX_TILES];
156
157 struct list_head list;
158};
159
160struct ipu_image_convert_chan {
161 struct ipu_image_convert_priv *priv;
162
163 enum ipu_ic_task ic_task;
164 const struct ipu_image_convert_dma_chan *dma_ch;
165
166 struct ipu_ic *ic;
167 struct ipuv3_channel *in_chan;
168 struct ipuv3_channel *out_chan;
169 struct ipuv3_channel *rotation_in_chan;
170 struct ipuv3_channel *rotation_out_chan;
171
172 /* the IPU end-of-frame irqs */
173 int out_eof_irq;
174 int rot_out_eof_irq;
175
176 spinlock_t irqlock;
177
178 /* list of convert contexts */
179 struct list_head ctx_list;
180 /* queue of conversion runs */
181 struct list_head pending_q;
182 /* queue of completed runs */
183 struct list_head done_q;
184
185 /* the current conversion run */
186 struct ipu_image_convert_run *current_run;
187};
188
189struct ipu_image_convert_priv {
190 struct ipu_image_convert_chan chan[IC_NUM_TASKS];
191 struct ipu_soc *ipu;
192};
193
194static const struct ipu_image_convert_dma_chan
195image_convert_dma_chan[IC_NUM_TASKS] = {
196 [IC_TASK_VIEWFINDER] = {
197 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
198 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
199 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
200 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
201 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
202 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
203 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
204 },
205 [IC_TASK_POST_PROCESSOR] = {
206 .in = IPUV3_CHANNEL_MEM_IC_PP,
207 .out = IPUV3_CHANNEL_IC_PP_MEM,
208 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
209 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
210 },
211};
212
213static const struct ipu_image_pixfmt image_convert_formats[] = {
214 {
215 .fourcc = V4L2_PIX_FMT_RGB565,
216 .bpp = 16,
217 }, {
218 .fourcc = V4L2_PIX_FMT_RGB24,
219 .bpp = 24,
220 }, {
221 .fourcc = V4L2_PIX_FMT_BGR24,
222 .bpp = 24,
223 }, {
224 .fourcc = V4L2_PIX_FMT_RGB32,
225 .bpp = 32,
226 }, {
227 .fourcc = V4L2_PIX_FMT_BGR32,
228 .bpp = 32,
229 }, {
230 .fourcc = V4L2_PIX_FMT_YUYV,
231 .bpp = 16,
232 .uv_width_dec = 2,
233 .uv_height_dec = 1,
234 }, {
235 .fourcc = V4L2_PIX_FMT_UYVY,
236 .bpp = 16,
237 .uv_width_dec = 2,
238 .uv_height_dec = 1,
239 }, {
240 .fourcc = V4L2_PIX_FMT_YUV420,
241 .bpp = 12,
242 .planar = true,
243 .uv_width_dec = 2,
244 .uv_height_dec = 2,
245 }, {
246 .fourcc = V4L2_PIX_FMT_YVU420,
247 .bpp = 12,
248 .planar = true,
249 .uv_width_dec = 2,
250 .uv_height_dec = 2,
251 .uv_swapped = true,
252 }, {
253 .fourcc = V4L2_PIX_FMT_NV12,
254 .bpp = 12,
255 .planar = true,
256 .uv_width_dec = 2,
257 .uv_height_dec = 2,
258 .uv_packed = true,
259 }, {
260 .fourcc = V4L2_PIX_FMT_YUV422P,
261 .bpp = 16,
262 .planar = true,
263 .uv_width_dec = 2,
264 .uv_height_dec = 1,
265 }, {
266 .fourcc = V4L2_PIX_FMT_NV16,
267 .bpp = 16,
268 .planar = true,
269 .uv_width_dec = 2,
270 .uv_height_dec = 1,
271 .uv_packed = true,
272 },
273};
274
275static const struct ipu_image_pixfmt *get_format(u32 fourcc)
276{
277 const struct ipu_image_pixfmt *ret = NULL;
278 unsigned int i;
279
280 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
281 if (image_convert_formats[i].fourcc == fourcc) {
282 ret = &image_convert_formats[i];
283 break;
284 }
285 }
286
287 return ret;
288}
289
290static void dump_format(struct ipu_image_convert_ctx *ctx,
291 struct ipu_image_convert_image *ic_image)
292{
293 struct ipu_image_convert_chan *chan = ctx->chan;
294 struct ipu_image_convert_priv *priv = chan->priv;
295
296 dev_dbg(priv->ipu->dev,
297 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
298 chan->ic_task, ctx,
299 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
300 ic_image->base.pix.width, ic_image->base.pix.height,
301 ic_image->num_cols, ic_image->num_rows,
302 ic_image->tile[0].width, ic_image->tile[0].height,
303 ic_image->fmt->fourcc & 0xff,
304 (ic_image->fmt->fourcc >> 8) & 0xff,
305 (ic_image->fmt->fourcc >> 16) & 0xff,
306 (ic_image->fmt->fourcc >> 24) & 0xff);
307}
308
309int ipu_image_convert_enum_format(int index, u32 *fourcc)
310{
311 const struct ipu_image_pixfmt *fmt;
312
313 if (index >= (int)ARRAY_SIZE(image_convert_formats))
314 return -EINVAL;
315
316 /* Format found */
317 fmt = &image_convert_formats[index];
318 *fourcc = fmt->fourcc;
319 return 0;
320}
321EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
322
323static void free_dma_buf(struct ipu_image_convert_priv *priv,
324 struct ipu_image_convert_dma_buf *buf)
325{
326 if (buf->virt)
327 dma_free_coherent(priv->ipu->dev,
328 buf->len, buf->virt, buf->phys);
329 buf->virt = NULL;
330 buf->phys = 0;
331}
332
333static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
334 struct ipu_image_convert_dma_buf *buf,
335 int size)
336{
337 buf->len = PAGE_ALIGN(size);
338 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
339 GFP_DMA | GFP_KERNEL);
340 if (!buf->virt) {
341 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
342 return -ENOMEM;
343 }
344
345 return 0;
346}
347
348static inline int num_stripes(int dim)
349{
350 if (dim <= 1024)
351 return 1;
352 else if (dim <= 2048)
353 return 2;
354 else
355 return 4;
356}
357
358static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
359 struct ipu_image_convert_image *image)
360{
361 int i;
362
363 for (i = 0; i < ctx->num_tiles; i++) {
364 struct ipu_image_tile *tile = &image->tile[i];
365
366 tile->height = image->base.pix.height / image->num_rows;
367 tile->width = image->base.pix.width / image->num_cols;
368 tile->size = ((tile->height * image->fmt->bpp) >> 3) *
369 tile->width;
370
371 if (image->fmt->planar) {
372 tile->stride = tile->width;
373 tile->rot_stride = tile->height;
374 } else {
375 tile->stride =
376 (image->fmt->bpp * tile->width) >> 3;
377 tile->rot_stride =
378 (image->fmt->bpp * tile->height) >> 3;
379 }
380 }
381}
382
383/*
384 * Use the rotation transformation to find the tile coordinates
385 * (row, col) of a tile in the destination frame that corresponds
386 * to the given tile coordinates of a source frame. The destination
387 * coordinate is then converted to a tile index.
388 */
389static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
390 int src_row, int src_col)
391{
392 struct ipu_image_convert_chan *chan = ctx->chan;
393 struct ipu_image_convert_priv *priv = chan->priv;
394 struct ipu_image_convert_image *s_image = &ctx->in;
395 struct ipu_image_convert_image *d_image = &ctx->out;
396 int dst_row, dst_col;
397
398 /* with no rotation it's a 1:1 mapping */
399 if (ctx->rot_mode == IPU_ROTATE_NONE)
400 return src_row * s_image->num_cols + src_col;
401
402 /*
403 * before doing the transform, first we have to translate
404 * source row,col for an origin in the center of s_image
405 */
406 src_row = src_row * 2 - (s_image->num_rows - 1);
407 src_col = src_col * 2 - (s_image->num_cols - 1);
408
409 /* do the rotation transform */
410 if (ctx->rot_mode & IPU_ROT_BIT_90) {
411 dst_col = -src_row;
412 dst_row = src_col;
413 } else {
414 dst_col = src_col;
415 dst_row = src_row;
416 }
417
418 /* apply flip */
419 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
420 dst_col = -dst_col;
421 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
422 dst_row = -dst_row;
423
424 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
425 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
426
427 /*
428 * finally translate dest row,col using an origin in upper
429 * left of d_image
430 */
431 dst_row += d_image->num_rows - 1;
432 dst_col += d_image->num_cols - 1;
433 dst_row /= 2;
434 dst_col /= 2;
435
436 return dst_row * d_image->num_cols + dst_col;
437}
438
439/*
440 * Fill the out_tile_map[] with transformed destination tile indeces.
441 */
442static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
443{
444 struct ipu_image_convert_image *s_image = &ctx->in;
445 unsigned int row, col, tile = 0;
446
447 for (row = 0; row < s_image->num_rows; row++) {
448 for (col = 0; col < s_image->num_cols; col++) {
449 ctx->out_tile_map[tile] =
450 transform_tile_index(ctx, row, col);
451 tile++;
452 }
453 }
454}
455
456static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
457 struct ipu_image_convert_image *image)
458{
459 struct ipu_image_convert_chan *chan = ctx->chan;
460 struct ipu_image_convert_priv *priv = chan->priv;
461 const struct ipu_image_pixfmt *fmt = image->fmt;
462 unsigned int row, col, tile = 0;
463 u32 H, w, h, y_stride, uv_stride;
464 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
465 u32 y_row_off, y_col_off, y_off;
466 u32 y_size, uv_size;
467
468 /* setup some convenience vars */
469 H = image->base.pix.height;
470
471 y_stride = image->stride;
472 uv_stride = y_stride / fmt->uv_width_dec;
473 if (fmt->uv_packed)
474 uv_stride *= 2;
475
476 y_size = H * y_stride;
477 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
478
479 for (row = 0; row < image->num_rows; row++) {
480 w = image->tile[tile].width;
481 h = image->tile[tile].height;
482 y_row_off = row * h * y_stride;
483 uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
484
485 for (col = 0; col < image->num_cols; col++) {
486 y_col_off = col * w;
487 uv_col_off = y_col_off / fmt->uv_width_dec;
488 if (fmt->uv_packed)
489 uv_col_off *= 2;
490
491 y_off = y_row_off + y_col_off;
492 uv_off = uv_row_off + uv_col_off;
493
494 u_off = y_size - y_off + uv_off;
495 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
496 if (fmt->uv_swapped) {
497 tmp = u_off;
498 u_off = v_off;
499 v_off = tmp;
500 }
501
502 image->tile[tile].offset = y_off;
503 image->tile[tile].u_off = u_off;
504 image->tile[tile++].v_off = v_off;
505
506 dev_dbg(priv->ipu->dev,
507 "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
508 chan->ic_task, ctx,
509 image->type == IMAGE_CONVERT_IN ?
510 "Input" : "Output", row, col,
511 y_off, u_off, v_off);
512 }
513 }
514}
515
516static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
517 struct ipu_image_convert_image *image)
518{
519 struct ipu_image_convert_chan *chan = ctx->chan;
520 struct ipu_image_convert_priv *priv = chan->priv;
521 const struct ipu_image_pixfmt *fmt = image->fmt;
522 unsigned int row, col, tile = 0;
523 u32 w, h, bpp, stride;
524 u32 row_off, col_off;
525
526 /* setup some convenience vars */
527 stride = image->stride;
528 bpp = fmt->bpp;
529
530 for (row = 0; row < image->num_rows; row++) {
531 w = image->tile[tile].width;
532 h = image->tile[tile].height;
533 row_off = row * h * stride;
534
535 for (col = 0; col < image->num_cols; col++) {
536 col_off = (col * w * bpp) >> 3;
537
538 image->tile[tile].offset = row_off + col_off;
539 image->tile[tile].u_off = 0;
540 image->tile[tile++].v_off = 0;
541
542 dev_dbg(priv->ipu->dev,
543 "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
544 chan->ic_task, ctx,
545 image->type == IMAGE_CONVERT_IN ?
546 "Input" : "Output", row, col,
547 row_off + col_off);
548 }
549 }
550}
551
552static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
553 struct ipu_image_convert_image *image)
554{
555 if (image->fmt->planar)
556 calc_tile_offsets_planar(ctx, image);
557 else
558 calc_tile_offsets_packed(ctx, image);
559}
560
561/*
562 * return the number of runs in given queue (pending_q or done_q)
563 * for this context. hold irqlock when calling.
564 */
565static int get_run_count(struct ipu_image_convert_ctx *ctx,
566 struct list_head *q)
567{
568 struct ipu_image_convert_run *run;
569 int count = 0;
570
571 lockdep_assert_held(&ctx->chan->irqlock);
572
573 list_for_each_entry(run, q, list) {
574 if (run->ctx == ctx)
575 count++;
576 }
577
578 return count;
579}
580
581static void convert_stop(struct ipu_image_convert_run *run)
582{
583 struct ipu_image_convert_ctx *ctx = run->ctx;
584 struct ipu_image_convert_chan *chan = ctx->chan;
585 struct ipu_image_convert_priv *priv = chan->priv;
586
587 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
588 __func__, chan->ic_task, ctx, run);
589
590 /* disable IC tasks and the channels */
591 ipu_ic_task_disable(chan->ic);
592 ipu_idmac_disable_channel(chan->in_chan);
593 ipu_idmac_disable_channel(chan->out_chan);
594
595 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
596 ipu_idmac_disable_channel(chan->rotation_in_chan);
597 ipu_idmac_disable_channel(chan->rotation_out_chan);
598 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
599 }
600
601 ipu_ic_disable(chan->ic);
602}
603
604static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
605 struct ipuv3_channel *channel,
606 struct ipu_image_convert_image *image,
607 enum ipu_rotate_mode rot_mode,
608 bool rot_swap_width_height)
609{
610 struct ipu_image_convert_chan *chan = ctx->chan;
611 unsigned int burst_size;
612 u32 width, height, stride;
613 dma_addr_t addr0, addr1 = 0;
614 struct ipu_image tile_image;
615 unsigned int tile_idx[2];
616
617 if (image->type == IMAGE_CONVERT_OUT) {
618 tile_idx[0] = ctx->out_tile_map[0];
619 tile_idx[1] = ctx->out_tile_map[1];
620 } else {
621 tile_idx[0] = 0;
622 tile_idx[1] = 1;
623 }
624
625 if (rot_swap_width_height) {
626 width = image->tile[0].height;
627 height = image->tile[0].width;
628 stride = image->tile[0].rot_stride;
629 addr0 = ctx->rot_intermediate[0].phys;
630 if (ctx->double_buffering)
631 addr1 = ctx->rot_intermediate[1].phys;
632 } else {
633 width = image->tile[0].width;
634 height = image->tile[0].height;
635 stride = image->stride;
636 addr0 = image->base.phys0 +
637 image->tile[tile_idx[0]].offset;
638 if (ctx->double_buffering)
639 addr1 = image->base.phys0 +
640 image->tile[tile_idx[1]].offset;
641 }
642
643 ipu_cpmem_zero(channel);
644
645 memset(&tile_image, 0, sizeof(tile_image));
646 tile_image.pix.width = tile_image.rect.width = width;
647 tile_image.pix.height = tile_image.rect.height = height;
648 tile_image.pix.bytesperline = stride;
649 tile_image.pix.pixelformat = image->fmt->fourcc;
650 tile_image.phys0 = addr0;
651 tile_image.phys1 = addr1;
652 ipu_cpmem_set_image(channel, &tile_image);
653
654 if (image->fmt->planar && !rot_swap_width_height)
655 ipu_cpmem_set_uv_offset(channel,
656 image->tile[tile_idx[0]].u_off,
657 image->tile[tile_idx[0]].v_off);
658
659 if (rot_mode)
660 ipu_cpmem_set_rotation(channel, rot_mode);
661
662 if (channel == chan->rotation_in_chan ||
663 channel == chan->rotation_out_chan) {
664 burst_size = 8;
665 ipu_cpmem_set_block_mode(channel);
666 } else
667 burst_size = (width % 16) ? 8 : 16;
668
669 ipu_cpmem_set_burstsize(channel, burst_size);
670
671 ipu_ic_task_idma_init(chan->ic, channel, width, height,
672 burst_size, rot_mode);
673
674 ipu_cpmem_set_axi_id(channel, 1);
675
676 ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
677}
678
679static int convert_start(struct ipu_image_convert_run *run)
680{
681 struct ipu_image_convert_ctx *ctx = run->ctx;
682 struct ipu_image_convert_chan *chan = ctx->chan;
683 struct ipu_image_convert_priv *priv = chan->priv;
684 struct ipu_image_convert_image *s_image = &ctx->in;
685 struct ipu_image_convert_image *d_image = &ctx->out;
686 enum ipu_color_space src_cs, dest_cs;
687 unsigned int dest_width, dest_height;
688 int ret;
689
690 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
691 __func__, chan->ic_task, ctx, run);
692
693 src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
694 dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
695
696 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
697 /* swap width/height for resizer */
698 dest_width = d_image->tile[0].height;
699 dest_height = d_image->tile[0].width;
700 } else {
701 dest_width = d_image->tile[0].width;
702 dest_height = d_image->tile[0].height;
703 }
704
705 /* setup the IC resizer and CSC */
706 ret = ipu_ic_task_init(chan->ic,
707 s_image->tile[0].width,
708 s_image->tile[0].height,
709 dest_width,
710 dest_height,
711 src_cs, dest_cs);
712 if (ret) {
713 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
714 return ret;
715 }
716
717 /* init the source MEM-->IC PP IDMAC channel */
718 init_idmac_channel(ctx, chan->in_chan, s_image,
719 IPU_ROTATE_NONE, false);
720
721 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
722 /* init the IC PP-->MEM IDMAC channel */
723 init_idmac_channel(ctx, chan->out_chan, d_image,
724 IPU_ROTATE_NONE, true);
725
726 /* init the MEM-->IC PP ROT IDMAC channel */
727 init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
728 ctx->rot_mode, true);
729
730 /* init the destination IC PP ROT-->MEM IDMAC channel */
731 init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
732 IPU_ROTATE_NONE, false);
733
734 /* now link IC PP-->MEM to MEM-->IC PP ROT */
735 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
736 } else {
737 /* init the destination IC PP-->MEM IDMAC channel */
738 init_idmac_channel(ctx, chan->out_chan, d_image,
739 ctx->rot_mode, false);
740 }
741
742 /* enable the IC */
743 ipu_ic_enable(chan->ic);
744
745 /* set buffers ready */
746 ipu_idmac_select_buffer(chan->in_chan, 0);
747 ipu_idmac_select_buffer(chan->out_chan, 0);
748 if (ipu_rot_mode_is_irt(ctx->rot_mode))
749 ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
750 if (ctx->double_buffering) {
751 ipu_idmac_select_buffer(chan->in_chan, 1);
752 ipu_idmac_select_buffer(chan->out_chan, 1);
753 if (ipu_rot_mode_is_irt(ctx->rot_mode))
754 ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
755 }
756
757 /* enable the channels! */
758 ipu_idmac_enable_channel(chan->in_chan);
759 ipu_idmac_enable_channel(chan->out_chan);
760 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
761 ipu_idmac_enable_channel(chan->rotation_in_chan);
762 ipu_idmac_enable_channel(chan->rotation_out_chan);
763 }
764
765 ipu_ic_task_enable(chan->ic);
766
767 ipu_cpmem_dump(chan->in_chan);
768 ipu_cpmem_dump(chan->out_chan);
769 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
770 ipu_cpmem_dump(chan->rotation_in_chan);
771 ipu_cpmem_dump(chan->rotation_out_chan);
772 }
773
774 ipu_dump(priv->ipu);
775
776 return 0;
777}
778
779/* hold irqlock when calling */
780static int do_run(struct ipu_image_convert_run *run)
781{
782 struct ipu_image_convert_ctx *ctx = run->ctx;
783 struct ipu_image_convert_chan *chan = ctx->chan;
784
785 lockdep_assert_held(&chan->irqlock);
786
787 ctx->in.base.phys0 = run->in_phys;
788 ctx->out.base.phys0 = run->out_phys;
789
790 ctx->cur_buf_num = 0;
791 ctx->next_tile = 1;
792
793 /* remove run from pending_q and set as current */
794 list_del(&run->list);
795 chan->current_run = run;
796
797 return convert_start(run);
798}
799
800/* hold irqlock when calling */
801static void run_next(struct ipu_image_convert_chan *chan)
802{
803 struct ipu_image_convert_priv *priv = chan->priv;
804 struct ipu_image_convert_run *run, *tmp;
805 int ret;
806
807 lockdep_assert_held(&chan->irqlock);
808
809 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
810 /* skip contexts that are aborting */
811 if (run->ctx->aborting) {
812 dev_dbg(priv->ipu->dev,
813 "%s: task %u: skipping aborting ctx %p run %p\n",
814 __func__, chan->ic_task, run->ctx, run);
815 continue;
816 }
817
818 ret = do_run(run);
819 if (!ret)
820 break;
821
822 /*
823 * something went wrong with start, add the run
824 * to done q and continue to the next run in the
825 * pending q.
826 */
827 run->status = ret;
828 list_add_tail(&run->list, &chan->done_q);
829 chan->current_run = NULL;
830 }
831}
832
833static void empty_done_q(struct ipu_image_convert_chan *chan)
834{
835 struct ipu_image_convert_priv *priv = chan->priv;
836 struct ipu_image_convert_run *run;
837 unsigned long flags;
838
839 spin_lock_irqsave(&chan->irqlock, flags);
840
841 while (!list_empty(&chan->done_q)) {
842 run = list_entry(chan->done_q.next,
843 struct ipu_image_convert_run,
844 list);
845
846 list_del(&run->list);
847
848 dev_dbg(priv->ipu->dev,
849 "%s: task %u: completing ctx %p run %p with %d\n",
850 __func__, chan->ic_task, run->ctx, run, run->status);
851
852 /* call the completion callback and free the run */
853 spin_unlock_irqrestore(&chan->irqlock, flags);
854 run->ctx->complete(run, run->ctx->complete_context);
855 spin_lock_irqsave(&chan->irqlock, flags);
856 }
857
858 spin_unlock_irqrestore(&chan->irqlock, flags);
859}
860
861/*
862 * the bottom half thread clears out the done_q, calling the
863 * completion handler for each.
864 */
865static irqreturn_t do_bh(int irq, void *dev_id)
866{
867 struct ipu_image_convert_chan *chan = dev_id;
868 struct ipu_image_convert_priv *priv = chan->priv;
869 struct ipu_image_convert_ctx *ctx;
870 unsigned long flags;
871
872 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
873 chan->ic_task);
874
875 empty_done_q(chan);
876
877 spin_lock_irqsave(&chan->irqlock, flags);
878
879 /*
880 * the done_q is cleared out, signal any contexts
881 * that are aborting that abort can complete.
882 */
883 list_for_each_entry(ctx, &chan->ctx_list, list) {
884 if (ctx->aborting) {
885 dev_dbg(priv->ipu->dev,
886 "%s: task %u: signaling abort for ctx %p\n",
887 __func__, chan->ic_task, ctx);
888 complete(&ctx->aborted);
889 }
890 }
891
892 spin_unlock_irqrestore(&chan->irqlock, flags);
893
894 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
895 chan->ic_task);
896
897 return IRQ_HANDLED;
898}
899
900/* hold irqlock when calling */
901static irqreturn_t do_irq(struct ipu_image_convert_run *run)
902{
903 struct ipu_image_convert_ctx *ctx = run->ctx;
904 struct ipu_image_convert_chan *chan = ctx->chan;
905 struct ipu_image_tile *src_tile, *dst_tile;
906 struct ipu_image_convert_image *s_image = &ctx->in;
907 struct ipu_image_convert_image *d_image = &ctx->out;
908 struct ipuv3_channel *outch;
909 unsigned int dst_idx;
910
911 lockdep_assert_held(&chan->irqlock);
912
913 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
914 chan->rotation_out_chan : chan->out_chan;
915
916 /*
917 * It is difficult to stop the channel DMA before the channels
918 * enter the paused state. Without double-buffering the channels
919 * are always in a paused state when the EOF irq occurs, so it
920 * is safe to stop the channels now. For double-buffering we
921 * just ignore the abort until the operation completes, when it
922 * is safe to shut down.
923 */
924 if (ctx->aborting && !ctx->double_buffering) {
925 convert_stop(run);
926 run->status = -EIO;
927 goto done;
928 }
929
930 if (ctx->next_tile == ctx->num_tiles) {
931 /*
932 * the conversion is complete
933 */
934 convert_stop(run);
935 run->status = 0;
936 goto done;
937 }
938
939 /*
940 * not done, place the next tile buffers.
941 */
942 if (!ctx->double_buffering) {
943
944 src_tile = &s_image->tile[ctx->next_tile];
945 dst_idx = ctx->out_tile_map[ctx->next_tile];
946 dst_tile = &d_image->tile[dst_idx];
947
948 ipu_cpmem_set_buffer(chan->in_chan, 0,
949 s_image->base.phys0 + src_tile->offset);
950 ipu_cpmem_set_buffer(outch, 0,
951 d_image->base.phys0 + dst_tile->offset);
952 if (s_image->fmt->planar)
953 ipu_cpmem_set_uv_offset(chan->in_chan,
954 src_tile->u_off,
955 src_tile->v_off);
956 if (d_image->fmt->planar)
957 ipu_cpmem_set_uv_offset(outch,
958 dst_tile->u_off,
959 dst_tile->v_off);
960
961 ipu_idmac_select_buffer(chan->in_chan, 0);
962 ipu_idmac_select_buffer(outch, 0);
963
964 } else if (ctx->next_tile < ctx->num_tiles - 1) {
965
966 src_tile = &s_image->tile[ctx->next_tile + 1];
967 dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
968 dst_tile = &d_image->tile[dst_idx];
969
970 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
971 s_image->base.phys0 + src_tile->offset);
972 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
973 d_image->base.phys0 + dst_tile->offset);
974
975 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
976 ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
977
978 ctx->cur_buf_num ^= 1;
979 }
980
981 ctx->next_tile++;
982 return IRQ_HANDLED;
983done:
984 list_add_tail(&run->list, &chan->done_q);
985 chan->current_run = NULL;
986 run_next(chan);
987 return IRQ_WAKE_THREAD;
988}
989
990static irqreturn_t norotate_irq(int irq, void *data)
991{
992 struct ipu_image_convert_chan *chan = data;
993 struct ipu_image_convert_ctx *ctx;
994 struct ipu_image_convert_run *run;
995 unsigned long flags;
996 irqreturn_t ret;
997
998 spin_lock_irqsave(&chan->irqlock, flags);
999
1000 /* get current run and its context */
1001 run = chan->current_run;
1002 if (!run) {
1003 ret = IRQ_NONE;
1004 goto out;
1005 }
1006
1007 ctx = run->ctx;
1008
1009 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1010 /* this is a rotation operation, just ignore */
1011 spin_unlock_irqrestore(&chan->irqlock, flags);
1012 return IRQ_HANDLED;
1013 }
1014
1015 ret = do_irq(run);
1016out:
1017 spin_unlock_irqrestore(&chan->irqlock, flags);
1018 return ret;
1019}
1020
1021static irqreturn_t rotate_irq(int irq, void *data)
1022{
1023 struct ipu_image_convert_chan *chan = data;
1024 struct ipu_image_convert_priv *priv = chan->priv;
1025 struct ipu_image_convert_ctx *ctx;
1026 struct ipu_image_convert_run *run;
1027 unsigned long flags;
1028 irqreturn_t ret;
1029
1030 spin_lock_irqsave(&chan->irqlock, flags);
1031
1032 /* get current run and its context */
1033 run = chan->current_run;
1034 if (!run) {
1035 ret = IRQ_NONE;
1036 goto out;
1037 }
1038
1039 ctx = run->ctx;
1040
1041 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1042 /* this was NOT a rotation operation, shouldn't happen */
1043 dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
1044 spin_unlock_irqrestore(&chan->irqlock, flags);
1045 return IRQ_HANDLED;
1046 }
1047
1048 ret = do_irq(run);
1049out:
1050 spin_unlock_irqrestore(&chan->irqlock, flags);
1051 return ret;
1052}
1053
1054/*
1055 * try to force the completion of runs for this ctx. Called when
1056 * abort wait times out in ipu_image_convert_abort().
1057 */
1058static void force_abort(struct ipu_image_convert_ctx *ctx)
1059{
1060 struct ipu_image_convert_chan *chan = ctx->chan;
1061 struct ipu_image_convert_run *run;
1062 unsigned long flags;
1063
1064 spin_lock_irqsave(&chan->irqlock, flags);
1065
1066 run = chan->current_run;
1067 if (run && run->ctx == ctx) {
1068 convert_stop(run);
1069 run->status = -EIO;
1070 list_add_tail(&run->list, &chan->done_q);
1071 chan->current_run = NULL;
1072 run_next(chan);
1073 }
1074
1075 spin_unlock_irqrestore(&chan->irqlock, flags);
1076
1077 empty_done_q(chan);
1078}
1079
1080static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1081{
1082 if (chan->out_eof_irq >= 0)
1083 free_irq(chan->out_eof_irq, chan);
1084 if (chan->rot_out_eof_irq >= 0)
1085 free_irq(chan->rot_out_eof_irq, chan);
1086
1087 if (!IS_ERR_OR_NULL(chan->in_chan))
1088 ipu_idmac_put(chan->in_chan);
1089 if (!IS_ERR_OR_NULL(chan->out_chan))
1090 ipu_idmac_put(chan->out_chan);
1091 if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
1092 ipu_idmac_put(chan->rotation_in_chan);
1093 if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
1094 ipu_idmac_put(chan->rotation_out_chan);
1095 if (!IS_ERR_OR_NULL(chan->ic))
1096 ipu_ic_put(chan->ic);
1097
1098 chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1099 chan->rotation_out_chan = NULL;
1100 chan->out_eof_irq = chan->rot_out_eof_irq = -1;
1101}
1102
1103static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1104{
1105 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
1106 struct ipu_image_convert_priv *priv = chan->priv;
1107 int ret;
1108
1109 /* get IC */
1110 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
1111 if (IS_ERR(chan->ic)) {
1112 dev_err(priv->ipu->dev, "could not acquire IC\n");
1113 ret = PTR_ERR(chan->ic);
1114 goto err;
1115 }
1116
1117 /* get IDMAC channels */
1118 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
1119 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
1120 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
1121 dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
1122 ret = -EBUSY;
1123 goto err;
1124 }
1125
1126 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
1127 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
1128 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
1129 dev_err(priv->ipu->dev,
1130 "could not acquire idmac rotation channels\n");
1131 ret = -EBUSY;
1132 goto err;
1133 }
1134
1135 /* acquire the EOF interrupts */
1136 chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1137 chan->out_chan,
1138 IPU_IRQ_EOF);
1139
1140 ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
1141 0, "ipu-ic", chan);
1142 if (ret < 0) {
1143 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1144 chan->out_eof_irq);
1145 chan->out_eof_irq = -1;
1146 goto err;
1147 }
1148
1149 chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1150 chan->rotation_out_chan,
1151 IPU_IRQ_EOF);
1152
1153 ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
1154 0, "ipu-ic", chan);
1155 if (ret < 0) {
1156 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1157 chan->rot_out_eof_irq);
1158 chan->rot_out_eof_irq = -1;
1159 goto err;
1160 }
1161
1162 return 0;
1163err:
1164 release_ipu_resources(chan);
1165 return ret;
1166}
1167
1168static int fill_image(struct ipu_image_convert_ctx *ctx,
1169 struct ipu_image_convert_image *ic_image,
1170 struct ipu_image *image,
1171 enum ipu_image_convert_type type)
1172{
1173 struct ipu_image_convert_priv *priv = ctx->chan->priv;
1174
1175 ic_image->base = *image;
1176 ic_image->type = type;
1177
1178 ic_image->fmt = get_format(image->pix.pixelformat);
1179 if (!ic_image->fmt) {
1180 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
1181 type == IMAGE_CONVERT_OUT ? "Output" : "Input");
1182 return -EINVAL;
1183 }
1184
1185 if (ic_image->fmt->planar)
1186 ic_image->stride = ic_image->base.pix.width;
1187 else
1188 ic_image->stride = ic_image->base.pix.bytesperline;
1189
1190 calc_tile_dimensions(ctx, ic_image);
1191 calc_tile_offsets(ctx, ic_image);
1192
1193 return 0;
1194}
1195
1196/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
1197static unsigned int clamp_align(unsigned int x, unsigned int min,
1198 unsigned int max, unsigned int align)
1199{
1200 /* Bits that must be zero to be aligned */
1201 unsigned int mask = ~((1 << align) - 1);
1202
1203 /* Clamp to aligned min and max */
1204 x = clamp(x, (min + ~mask) & mask, max & mask);
1205
1206 /* Round to nearest aligned value */
1207 if (align)
1208 x = (x + (1 << (align - 1))) & mask;
1209
1210 return x;
1211}
1212
1213/*
1214 * We have to adjust the tile width such that the tile physaddrs and
1215 * U and V plane offsets are multiples of 8 bytes as required by
1216 * the IPU DMA Controller. For the planar formats, this corresponds
1217 * to a pixel alignment of 16 (but use a more formal equation since
1218 * the variables are available). For all the packed formats, 8 is
1219 * good enough.
1220 */
1221static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
1222{
1223 return fmt->planar ? 8 * fmt->uv_width_dec : 8;
1224}
1225
1226/*
1227 * For tile height alignment, we have to ensure that the output tile
1228 * heights are multiples of 8 lines if the IRT is required by the
1229 * given rotation mode (the IRT performs rotations on 8x8 blocks
1230 * at a time). If the IRT is not used, or for input image tiles,
1231 * 2 lines are good enough.
1232 */
1233static inline u32 tile_height_align(enum ipu_image_convert_type type,
1234 enum ipu_rotate_mode rot_mode)
1235{
1236 return (type == IMAGE_CONVERT_OUT &&
1237 ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
1238}
1239
1240/* Adjusts input/output images to IPU restrictions */
1241void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
1242 enum ipu_rotate_mode rot_mode)
1243{
1244 const struct ipu_image_pixfmt *infmt, *outfmt;
1245 unsigned int num_in_rows, num_in_cols;
1246 unsigned int num_out_rows, num_out_cols;
1247 u32 w_align, h_align;
1248
1249 infmt = get_format(in->pix.pixelformat);
1250 outfmt = get_format(out->pix.pixelformat);
1251
1252 /* set some default pixel formats if needed */
1253 if (!infmt) {
1254 in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1255 infmt = get_format(V4L2_PIX_FMT_RGB24);
1256 }
1257 if (!outfmt) {
1258 out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1259 outfmt = get_format(V4L2_PIX_FMT_RGB24);
1260 }
1261
1262 /* image converter does not handle fields */
1263 in->pix.field = out->pix.field = V4L2_FIELD_NONE;
1264
1265 /* resizer cannot downsize more than 4:1 */
1266 if (ipu_rot_mode_is_irt(rot_mode)) {
1267 out->pix.height = max_t(__u32, out->pix.height,
1268 in->pix.width / 4);
1269 out->pix.width = max_t(__u32, out->pix.width,
1270 in->pix.height / 4);
1271 } else {
1272 out->pix.width = max_t(__u32, out->pix.width,
1273 in->pix.width / 4);
1274 out->pix.height = max_t(__u32, out->pix.height,
1275 in->pix.height / 4);
1276 }
1277
1278 /* get tiling rows/cols from output format */
1279 num_out_rows = num_stripes(out->pix.height);
1280 num_out_cols = num_stripes(out->pix.width);
1281 if (ipu_rot_mode_is_irt(rot_mode)) {
1282 num_in_rows = num_out_cols;
1283 num_in_cols = num_out_rows;
1284 } else {
1285 num_in_rows = num_out_rows;
1286 num_in_cols = num_out_cols;
1287 }
1288
1289 /* align input width/height */
1290 w_align = ilog2(tile_width_align(infmt) * num_in_cols);
1291 h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
1292 num_in_rows);
1293 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
1294 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
1295
1296 /* align output width/height */
1297 w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
1298 h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
1299 num_out_rows);
1300 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
1301 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
1302
1303 /* set input/output strides and image sizes */
1304 in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
1305 in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
1306 out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
1307 out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
1308}
1309EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
1310
1311/*
1312 * this is used by ipu_image_convert_prepare() to verify set input and
1313 * output images are valid before starting the conversion. Clients can
1314 * also call it before calling ipu_image_convert_prepare().
1315 */
1316int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
1317 enum ipu_rotate_mode rot_mode)
1318{
1319 struct ipu_image testin, testout;
1320
1321 testin = *in;
1322 testout = *out;
1323
1324 ipu_image_convert_adjust(&testin, &testout, rot_mode);
1325
1326 if (testin.pix.width != in->pix.width ||
1327 testin.pix.height != in->pix.height ||
1328 testout.pix.width != out->pix.width ||
1329 testout.pix.height != out->pix.height)
1330 return -EINVAL;
1331
1332 return 0;
1333}
1334EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
1335
1336/*
1337 * Call ipu_image_convert_prepare() to prepare for the conversion of
1338 * given images and rotation mode. Returns a new conversion context.
1339 */
1340struct ipu_image_convert_ctx *
1341ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1342 struct ipu_image *in, struct ipu_image *out,
1343 enum ipu_rotate_mode rot_mode,
1344 ipu_image_convert_cb_t complete,
1345 void *complete_context)
1346{
1347 struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
1348 struct ipu_image_convert_image *s_image, *d_image;
1349 struct ipu_image_convert_chan *chan;
1350 struct ipu_image_convert_ctx *ctx;
1351 unsigned long flags;
1352 bool get_res;
1353 int ret;
1354
1355 if (!in || !out || !complete ||
1356 (ic_task != IC_TASK_VIEWFINDER &&
1357 ic_task != IC_TASK_POST_PROCESSOR))
1358 return ERR_PTR(-EINVAL);
1359
1360 /* verify the in/out images before continuing */
1361 ret = ipu_image_convert_verify(in, out, rot_mode);
1362 if (ret) {
1363 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
1364 __func__);
1365 return ERR_PTR(ret);
1366 }
1367
1368 chan = &priv->chan[ic_task];
1369
1370 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1371 if (!ctx)
1372 return ERR_PTR(-ENOMEM);
1373
1374 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
1375 chan->ic_task, ctx);
1376
1377 ctx->chan = chan;
1378 init_completion(&ctx->aborted);
1379
1380 s_image = &ctx->in;
1381 d_image = &ctx->out;
1382
1383 /* set tiling and rotation */
1384 d_image->num_rows = num_stripes(out->pix.height);
1385 d_image->num_cols = num_stripes(out->pix.width);
1386 if (ipu_rot_mode_is_irt(rot_mode)) {
1387 s_image->num_rows = d_image->num_cols;
1388 s_image->num_cols = d_image->num_rows;
1389 } else {
1390 s_image->num_rows = d_image->num_rows;
1391 s_image->num_cols = d_image->num_cols;
1392 }
1393
1394 ctx->num_tiles = d_image->num_cols * d_image->num_rows;
1395 ctx->rot_mode = rot_mode;
1396
1397 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
1398 if (ret)
1399 goto out_free;
1400 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
1401 if (ret)
1402 goto out_free;
1403
1404 calc_out_tile_map(ctx);
1405
1406 dump_format(ctx, s_image);
1407 dump_format(ctx, d_image);
1408
1409 ctx->complete = complete;
1410 ctx->complete_context = complete_context;
1411
1412 /*
1413 * Can we use double-buffering for this operation? If there is
1414 * only one tile (the whole image can be converted in a single
1415 * operation) there's no point in using double-buffering. Also,
1416 * the IPU's IDMAC channels allow only a single U and V plane
1417 * offset shared between both buffers, but these offsets change
1418 * for every tile, and therefore would have to be updated for
1419 * each buffer which is not possible. So double-buffering is
1420 * impossible when either the source or destination images are
1421 * a planar format (YUV420, YUV422P, etc.).
1422 */
1423 ctx->double_buffering = (ctx->num_tiles > 1 &&
1424 !s_image->fmt->planar &&
1425 !d_image->fmt->planar);
1426
1427 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1428 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
1429 d_image->tile[0].size);
1430 if (ret)
1431 goto out_free;
1432 if (ctx->double_buffering) {
1433 ret = alloc_dma_buf(priv,
1434 &ctx->rot_intermediate[1],
1435 d_image->tile[0].size);
1436 if (ret)
1437 goto out_free_dmabuf0;
1438 }
1439 }
1440
1441 spin_lock_irqsave(&chan->irqlock, flags);
1442
1443 get_res = list_empty(&chan->ctx_list);
1444
1445 list_add_tail(&ctx->list, &chan->ctx_list);
1446
1447 spin_unlock_irqrestore(&chan->irqlock, flags);
1448
1449 if (get_res) {
1450 ret = get_ipu_resources(chan);
1451 if (ret)
1452 goto out_free_dmabuf1;
1453 }
1454
1455 return ctx;
1456
1457out_free_dmabuf1:
1458 free_dma_buf(priv, &ctx->rot_intermediate[1]);
1459 spin_lock_irqsave(&chan->irqlock, flags);
1460 list_del(&ctx->list);
1461 spin_unlock_irqrestore(&chan->irqlock, flags);
1462out_free_dmabuf0:
1463 free_dma_buf(priv, &ctx->rot_intermediate[0]);
1464out_free:
1465 kfree(ctx);
1466 return ERR_PTR(ret);
1467}
1468EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
1469
1470/*
1471 * Carry out a single image conversion run. Only the physaddr's of the input
1472 * and output image buffers are needed. The conversion context must have
1473 * been created previously with ipu_image_convert_prepare().
1474 */
1475int ipu_image_convert_queue(struct ipu_image_convert_run *run)
1476{
1477 struct ipu_image_convert_chan *chan;
1478 struct ipu_image_convert_priv *priv;
1479 struct ipu_image_convert_ctx *ctx;
1480 unsigned long flags;
1481 int ret = 0;
1482
1483 if (!run || !run->ctx || !run->in_phys || !run->out_phys)
1484 return -EINVAL;
1485
1486 ctx = run->ctx;
1487 chan = ctx->chan;
1488 priv = chan->priv;
1489
1490 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
1491 chan->ic_task, ctx, run);
1492
1493 INIT_LIST_HEAD(&run->list);
1494
1495 spin_lock_irqsave(&chan->irqlock, flags);
1496
1497 if (ctx->aborting) {
1498 ret = -EIO;
1499 goto unlock;
1500 }
1501
1502 list_add_tail(&run->list, &chan->pending_q);
1503
1504 if (!chan->current_run) {
1505 ret = do_run(run);
1506 if (ret)
1507 chan->current_run = NULL;
1508 }
1509unlock:
1510 spin_unlock_irqrestore(&chan->irqlock, flags);
1511 return ret;
1512}
1513EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
1514
1515/* Abort any active or pending conversions for this context */
1516void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
1517{
1518 struct ipu_image_convert_chan *chan = ctx->chan;
1519 struct ipu_image_convert_priv *priv = chan->priv;
1520 struct ipu_image_convert_run *run, *active_run, *tmp;
1521 unsigned long flags;
1522 int run_count, ret;
1523 bool need_abort;
1524
1525 reinit_completion(&ctx->aborted);
1526
1527 spin_lock_irqsave(&chan->irqlock, flags);
1528
1529 /* move all remaining pending runs in this context to done_q */
1530 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
1531 if (run->ctx != ctx)
1532 continue;
1533 run->status = -EIO;
1534 list_move_tail(&run->list, &chan->done_q);
1535 }
1536
1537 run_count = get_run_count(ctx, &chan->done_q);
1538 active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
1539 chan->current_run : NULL;
1540
1541 need_abort = (run_count || active_run);
1542
1543 ctx->aborting = need_abort;
1544
1545 spin_unlock_irqrestore(&chan->irqlock, flags);
1546
1547 if (!need_abort) {
1548 dev_dbg(priv->ipu->dev,
1549 "%s: task %u: no abort needed for ctx %p\n",
1550 __func__, chan->ic_task, ctx);
1551 return;
1552 }
1553
1554 dev_dbg(priv->ipu->dev,
1555 "%s: task %u: wait for completion: %d runs, active run %p\n",
1556 __func__, chan->ic_task, run_count, active_run);
1557
1558 ret = wait_for_completion_timeout(&ctx->aborted,
1559 msecs_to_jiffies(10000));
1560 if (ret == 0) {
1561 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
1562 force_abort(ctx);
1563 }
1564
1565 ctx->aborting = false;
1566}
1567EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
1568
1569/* Unprepare image conversion context */
1570void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
1571{
1572 struct ipu_image_convert_chan *chan = ctx->chan;
1573 struct ipu_image_convert_priv *priv = chan->priv;
1574 unsigned long flags;
1575 bool put_res;
1576
1577 /* make sure no runs are hanging around */
1578 ipu_image_convert_abort(ctx);
1579
1580 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
1581 chan->ic_task, ctx);
1582
1583 spin_lock_irqsave(&chan->irqlock, flags);
1584
1585 list_del(&ctx->list);
1586
1587 put_res = list_empty(&chan->ctx_list);
1588
1589 spin_unlock_irqrestore(&chan->irqlock, flags);
1590
1591 if (put_res)
1592 release_ipu_resources(chan);
1593
1594 free_dma_buf(priv, &ctx->rot_intermediate[1]);
1595 free_dma_buf(priv, &ctx->rot_intermediate[0]);
1596
1597 kfree(ctx);
1598}
1599EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
1600
1601/*
1602 * "Canned" asynchronous single image conversion. Allocates and returns
1603 * a new conversion run. On successful return the caller must free the
1604 * run and call ipu_image_convert_unprepare() after conversion completes.
1605 */
1606struct ipu_image_convert_run *
1607ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1608 struct ipu_image *in, struct ipu_image *out,
1609 enum ipu_rotate_mode rot_mode,
1610 ipu_image_convert_cb_t complete,
1611 void *complete_context)
1612{
1613 struct ipu_image_convert_ctx *ctx;
1614 struct ipu_image_convert_run *run;
1615 int ret;
1616
1617 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
1618 complete, complete_context);
1619 if (IS_ERR(ctx))
1620 return ERR_PTR(PTR_ERR(ctx));
1621
1622 run = kzalloc(sizeof(*run), GFP_KERNEL);
1623 if (!run) {
1624 ipu_image_convert_unprepare(ctx);
1625 return ERR_PTR(-ENOMEM);
1626 }
1627
1628 run->ctx = ctx;
1629 run->in_phys = in->phys0;
1630 run->out_phys = out->phys0;
1631
1632 ret = ipu_image_convert_queue(run);
1633 if (ret) {
1634 ipu_image_convert_unprepare(ctx);
1635 kfree(run);
1636 return ERR_PTR(ret);
1637 }
1638
1639 return run;
1640}
1641EXPORT_SYMBOL_GPL(ipu_image_convert);
1642
1643/* "Canned" synchronous single image conversion */
1644static void image_convert_sync_complete(struct ipu_image_convert_run *run,
1645 void *data)
1646{
1647 struct completion *comp = data;
1648
1649 complete(comp);
1650}
1651
1652int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1653 struct ipu_image *in, struct ipu_image *out,
1654 enum ipu_rotate_mode rot_mode)
1655{
1656 struct ipu_image_convert_run *run;
1657 struct completion comp;
1658 int ret;
1659
1660 init_completion(&comp);
1661
1662 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
1663 image_convert_sync_complete, &comp);
1664 if (IS_ERR(run))
1665 return PTR_ERR(run);
1666
1667 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
1668 ret = (ret == 0) ? -ETIMEDOUT : 0;
1669
1670 ipu_image_convert_unprepare(run->ctx);
1671 kfree(run);
1672
1673 return ret;
1674}
1675EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
1676
1677int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
1678{
1679 struct ipu_image_convert_priv *priv;
1680 int i;
1681
1682 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1683 if (!priv)
1684 return -ENOMEM;
1685
1686 ipu->image_convert_priv = priv;
1687 priv->ipu = ipu;
1688
1689 for (i = 0; i < IC_NUM_TASKS; i++) {
1690 struct ipu_image_convert_chan *chan = &priv->chan[i];
1691
1692 chan->ic_task = i;
1693 chan->priv = priv;
1694 chan->dma_ch = &image_convert_dma_chan[i];
1695 chan->out_eof_irq = -1;
1696 chan->rot_out_eof_irq = -1;
1697
1698 spin_lock_init(&chan->irqlock);
1699 INIT_LIST_HEAD(&chan->ctx_list);
1700 INIT_LIST_HEAD(&chan->pending_q);
1701 INIT_LIST_HEAD(&chan->done_q);
1702 }
1703
1704 return 0;
1705}
1706
1707void ipu_image_convert_exit(struct ipu_soc *ipu)
1708{
1709}
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index fd47f8f555cd..22e47b68b14a 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -75,6 +75,33 @@ struct ipu_soc;
75#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * (n)) 75#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * (n))
76#define IPU_INT_STAT(n) IPU_CM_REG(0x0200 + 4 * (n)) 76#define IPU_INT_STAT(n) IPU_CM_REG(0x0200 + 4 * (n))
77 77
78/* FS_PROC_FLOW1 */
79#define FS_PRPENC_ROT_SRC_SEL_MASK (0xf << 0)
80#define FS_PRPENC_ROT_SRC_SEL_ENC (0x7 << 0)
81#define FS_PRPVF_ROT_SRC_SEL_MASK (0xf << 8)
82#define FS_PRPVF_ROT_SRC_SEL_VF (0x8 << 8)
83#define FS_PP_SRC_SEL_MASK (0xf << 12)
84#define FS_PP_ROT_SRC_SEL_MASK (0xf << 16)
85#define FS_PP_ROT_SRC_SEL_PP (0x5 << 16)
86#define FS_VDI1_SRC_SEL_MASK (0x3 << 20)
87#define FS_VDI3_SRC_SEL_MASK (0x3 << 20)
88#define FS_PRP_SRC_SEL_MASK (0xf << 24)
89#define FS_VDI_SRC_SEL_MASK (0x3 << 28)
90#define FS_VDI_SRC_SEL_CSI_DIRECT (0x1 << 28)
91#define FS_VDI_SRC_SEL_VDOA (0x2 << 28)
92
93/* FS_PROC_FLOW2 */
94#define FS_PRP_ENC_DEST_SEL_MASK (0xf << 0)
95#define FS_PRP_ENC_DEST_SEL_IRT_ENC (0x1 << 0)
96#define FS_PRPVF_DEST_SEL_MASK (0xf << 4)
97#define FS_PRPVF_DEST_SEL_IRT_VF (0x1 << 4)
98#define FS_PRPVF_ROT_DEST_SEL_MASK (0xf << 8)
99#define FS_PP_DEST_SEL_MASK (0xf << 12)
100#define FS_PP_DEST_SEL_IRT_PP (0x3 << 12)
101#define FS_PP_ROT_DEST_SEL_MASK (0xf << 16)
102#define FS_PRPENC_ROT_DEST_SEL_MASK (0xf << 20)
103#define FS_PRP_DEST_SEL_MASK (0xf << 24)
104
78#define IPU_DI0_COUNTER_RELEASE (1 << 24) 105#define IPU_DI0_COUNTER_RELEASE (1 << 24)
79#define IPU_DI1_COUNTER_RELEASE (1 << 25) 106#define IPU_DI1_COUNTER_RELEASE (1 << 25)
80 107
@@ -138,6 +165,8 @@ struct ipu_dc_priv;
138struct ipu_dmfc_priv; 165struct ipu_dmfc_priv;
139struct ipu_di; 166struct ipu_di;
140struct ipu_ic_priv; 167struct ipu_ic_priv;
168struct ipu_vdi;
169struct ipu_image_convert_priv;
141struct ipu_smfc_priv; 170struct ipu_smfc_priv;
142 171
143struct ipu_devtype; 172struct ipu_devtype;
@@ -170,6 +199,8 @@ struct ipu_soc {
170 struct ipu_di *di_priv[2]; 199 struct ipu_di *di_priv[2];
171 struct ipu_csi *csi_priv[2]; 200 struct ipu_csi *csi_priv[2];
172 struct ipu_ic_priv *ic_priv; 201 struct ipu_ic_priv *ic_priv;
202 struct ipu_vdi *vdi_priv;
203 struct ipu_image_convert_priv *image_convert_priv;
173 struct ipu_smfc_priv *smfc_priv; 204 struct ipu_smfc_priv *smfc_priv;
174}; 205};
175 206
@@ -200,6 +231,13 @@ int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
200 unsigned long base, unsigned long tpmem_base); 231 unsigned long base, unsigned long tpmem_base);
201void ipu_ic_exit(struct ipu_soc *ipu); 232void ipu_ic_exit(struct ipu_soc *ipu);
202 233
234int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
235 unsigned long base, u32 module);
236void ipu_vdi_exit(struct ipu_soc *ipu);
237
238int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev);
239void ipu_image_convert_exit(struct ipu_soc *ipu);
240
203int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id, 241int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
204 unsigned long base, u32 module, struct clk *ipu_clk); 242 unsigned long base, u32 module, struct clk *ipu_clk);
205void ipu_di_exit(struct ipu_soc *ipu, int id); 243void ipu_di_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/ipu-v3/ipu-vdi.c
new file mode 100644
index 000000000000..f27bf5a12ebc
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-vdi.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
3 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15#include <linux/io.h>
16#include "ipu-prv.h"
17
18struct ipu_vdi {
19 void __iomem *base;
20 u32 module;
21 spinlock_t lock;
22 int use_count;
23 struct ipu_soc *ipu;
24};
25
26
27/* VDI Register Offsets */
28#define VDI_FSIZE 0x0000
29#define VDI_C 0x0004
30
31/* VDI Register Fields */
32#define VDI_C_CH_420 (0 << 1)
33#define VDI_C_CH_422 (1 << 1)
34#define VDI_C_MOT_SEL_MASK (0x3 << 2)
35#define VDI_C_MOT_SEL_FULL (2 << 2)
36#define VDI_C_MOT_SEL_LOW (1 << 2)
37#define VDI_C_MOT_SEL_MED (0 << 2)
38#define VDI_C_BURST_SIZE1_4 (3 << 4)
39#define VDI_C_BURST_SIZE2_4 (3 << 8)
40#define VDI_C_BURST_SIZE3_4 (3 << 12)
41#define VDI_C_BURST_SIZE_MASK 0xF
42#define VDI_C_BURST_SIZE1_OFFSET 4
43#define VDI_C_BURST_SIZE2_OFFSET 8
44#define VDI_C_BURST_SIZE3_OFFSET 12
45#define VDI_C_VWM1_SET_1 (0 << 16)
46#define VDI_C_VWM1_SET_2 (1 << 16)
47#define VDI_C_VWM1_CLR_2 (1 << 19)
48#define VDI_C_VWM3_SET_1 (0 << 22)
49#define VDI_C_VWM3_SET_2 (1 << 22)
50#define VDI_C_VWM3_CLR_2 (1 << 25)
51#define VDI_C_TOP_FIELD_MAN_1 (1 << 30)
52#define VDI_C_TOP_FIELD_AUTO_1 (1 << 31)
53
54static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset)
55{
56 return readl(vdi->base + offset);
57}
58
59static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value,
60 unsigned int offset)
61{
62 writel(value, vdi->base + offset);
63}
64
65void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
66{
67 bool top_field_0 = false;
68 unsigned long flags;
69 u32 reg;
70
71 switch (field) {
72 case V4L2_FIELD_INTERLACED_TB:
73 case V4L2_FIELD_SEQ_TB:
74 case V4L2_FIELD_TOP:
75 top_field_0 = true;
76 break;
77 case V4L2_FIELD_INTERLACED_BT:
78 case V4L2_FIELD_SEQ_BT:
79 case V4L2_FIELD_BOTTOM:
80 top_field_0 = false;
81 break;
82 default:
83 top_field_0 = (std & V4L2_STD_525_60) ? true : false;
84 break;
85 }
86
87 spin_lock_irqsave(&vdi->lock, flags);
88
89 reg = ipu_vdi_read(vdi, VDI_C);
90 if (top_field_0)
91 reg &= ~VDI_C_TOP_FIELD_MAN_1;
92 else
93 reg |= VDI_C_TOP_FIELD_MAN_1;
94 ipu_vdi_write(vdi, reg, VDI_C);
95
96 spin_unlock_irqrestore(&vdi->lock, flags);
97}
98EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order);
99
100void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel)
101{
102 unsigned long flags;
103 u32 reg;
104
105 spin_lock_irqsave(&vdi->lock, flags);
106
107 reg = ipu_vdi_read(vdi, VDI_C);
108
109 reg &= ~VDI_C_MOT_SEL_MASK;
110
111 switch (motion_sel) {
112 case MED_MOTION:
113 reg |= VDI_C_MOT_SEL_MED;
114 break;
115 case HIGH_MOTION:
116 reg |= VDI_C_MOT_SEL_FULL;
117 break;
118 default:
119 reg |= VDI_C_MOT_SEL_LOW;
120 break;
121 }
122
123 ipu_vdi_write(vdi, reg, VDI_C);
124
125 spin_unlock_irqrestore(&vdi->lock, flags);
126}
127EXPORT_SYMBOL_GPL(ipu_vdi_set_motion);
128
129void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
130{
131 unsigned long flags;
132 u32 pixel_fmt, reg;
133
134 spin_lock_irqsave(&vdi->lock, flags);
135
136 reg = ((yres - 1) << 16) | (xres - 1);
137 ipu_vdi_write(vdi, reg, VDI_FSIZE);
138
139 /*
140 * Full motion, only vertical filter is used.
141 * Burst size is 4 accesses
142 */
143 if (code == MEDIA_BUS_FMT_UYVY8_2X8 ||
144 code == MEDIA_BUS_FMT_UYVY8_1X16 ||
145 code == MEDIA_BUS_FMT_YUYV8_2X8 ||
146 code == MEDIA_BUS_FMT_YUYV8_1X16)
147 pixel_fmt = VDI_C_CH_422;
148 else
149 pixel_fmt = VDI_C_CH_420;
150
151 reg = ipu_vdi_read(vdi, VDI_C);
152 reg |= pixel_fmt;
153 reg |= VDI_C_BURST_SIZE2_4;
154 reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2;
155 reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2;
156 ipu_vdi_write(vdi, reg, VDI_C);
157
158 spin_unlock_irqrestore(&vdi->lock, flags);
159}
160EXPORT_SYMBOL_GPL(ipu_vdi_setup);
161
162void ipu_vdi_unsetup(struct ipu_vdi *vdi)
163{
164 unsigned long flags;
165
166 spin_lock_irqsave(&vdi->lock, flags);
167 ipu_vdi_write(vdi, 0, VDI_FSIZE);
168 ipu_vdi_write(vdi, 0, VDI_C);
169 spin_unlock_irqrestore(&vdi->lock, flags);
170}
171EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
172
173int ipu_vdi_enable(struct ipu_vdi *vdi)
174{
175 unsigned long flags;
176
177 spin_lock_irqsave(&vdi->lock, flags);
178
179 if (!vdi->use_count)
180 ipu_module_enable(vdi->ipu, vdi->module);
181
182 vdi->use_count++;
183
184 spin_unlock_irqrestore(&vdi->lock, flags);
185
186 return 0;
187}
188EXPORT_SYMBOL_GPL(ipu_vdi_enable);
189
190int ipu_vdi_disable(struct ipu_vdi *vdi)
191{
192 unsigned long flags;
193
194 spin_lock_irqsave(&vdi->lock, flags);
195
196 if (vdi->use_count) {
197 if (!--vdi->use_count)
198 ipu_module_disable(vdi->ipu, vdi->module);
199 }
200
201 spin_unlock_irqrestore(&vdi->lock, flags);
202
203 return 0;
204}
205EXPORT_SYMBOL_GPL(ipu_vdi_disable);
206
207struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu)
208{
209 return ipu->vdi_priv;
210}
211EXPORT_SYMBOL_GPL(ipu_vdi_get);
212
213void ipu_vdi_put(struct ipu_vdi *vdi)
214{
215}
216EXPORT_SYMBOL_GPL(ipu_vdi_put);
217
218int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
219 unsigned long base, u32 module)
220{
221 struct ipu_vdi *vdi;
222
223 vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL);
224 if (!vdi)
225 return -ENOMEM;
226
227 ipu->vdi_priv = vdi;
228
229 spin_lock_init(&vdi->lock);
230 vdi->module = module;
231 vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
232 if (!vdi->base)
233 return -ENOMEM;
234
235 dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base);
236 vdi->ipu = ipu;
237
238 return 0;
239}
240
241void ipu_vdi_exit(struct ipu_soc *ipu)
242{
243}
diff --git a/include/video/imx-ipu-image-convert.h b/include/video/imx-ipu-image-convert.h
new file mode 100644
index 000000000000..7b87efc6d77a
--- /dev/null
+++ b/include/video/imx-ipu-image-convert.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
3 *
4 * i.MX Queued image conversion support, with tiling and rotation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16#ifndef __IMX_IPU_IMAGE_CONVERT_H__
17#define __IMX_IPU_IMAGE_CONVERT_H__
18
19#include <video/imx-ipu-v3.h>
20
21struct ipu_image_convert_ctx;
22
23/**
24 * struct ipu_image_convert_run - image conversion run request struct
25 *
26 * @ctx: the conversion context
27 * @in_phys: dma addr of input image buffer for this run
28 * @out_phys: dma addr of output image buffer for this run
29 * @status: completion status of this run
30 */
31struct ipu_image_convert_run {
32 struct ipu_image_convert_ctx *ctx;
33
34 dma_addr_t in_phys;
35 dma_addr_t out_phys;
36
37 int status;
38
39 /* internal to image converter, callers don't touch */
40 struct list_head list;
41};
42
43/**
44 * ipu_image_convert_cb_t - conversion callback function prototype
45 *
46 * @run: the completed conversion run pointer
47 * @ctx: a private context pointer for the callback
48 */
49typedef void (*ipu_image_convert_cb_t)(struct ipu_image_convert_run *run,
50 void *ctx);
51
52/**
53 * ipu_image_convert_enum_format() - enumerate the image converter's
54 * supported input and output pixel formats.
55 *
56 * @index: pixel format index
57 * @fourcc: v4l2 fourcc for this index
58 *
59 * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise.
60 *
61 * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt.
62 */
63int ipu_image_convert_enum_format(int index, u32 *fourcc);
64
65/**
66 * ipu_image_convert_adjust() - adjust input/output images to IPU restrictions.
67 *
68 * @in: input image format, adjusted on return
69 * @out: output image format, adjusted on return
70 * @rot_mode: rotation mode
71 *
72 * In V4L2, drivers can call ipu_image_convert_adjust() in .try_fmt.
73 */
74void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
75 enum ipu_rotate_mode rot_mode);
76
77/**
78 * ipu_image_convert_verify() - verify that input/output image formats
79 * and rotation mode meet IPU restrictions.
80 *
81 * @in: input image format
82 * @out: output image format
83 * @rot_mode: rotation mode
84 *
85 * Returns 0 if the formats and rotation mode meet IPU restrictions,
86 * -EINVAL otherwise.
87 */
88int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
89 enum ipu_rotate_mode rot_mode);
90
91/**
92 * ipu_image_convert_prepare() - prepare a conversion context.
93 *
94 * @ipu: the IPU handle to use for the conversions
95 * @ic_task: the IC task to use for the conversions
96 * @in: input image format
97 * @out: output image format
98 * @rot_mode: rotation mode
99 * @complete: run completion callback
100 * @complete_context: a context pointer for the completion callback
101 *
102 * Returns an opaque conversion context pointer on success, error pointer
103 * on failure. The input/output formats and rotation mode must already meet
104 * IPU retrictions.
105 *
106 * In V4L2, drivers should call ipu_image_convert_prepare() at streamon.
107 */
108struct ipu_image_convert_ctx *
109ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
110 struct ipu_image *in, struct ipu_image *out,
111 enum ipu_rotate_mode rot_mode,
112 ipu_image_convert_cb_t complete,
113 void *complete_context);
114
115/**
116 * ipu_image_convert_unprepare() - unprepare a conversion context.
117 *
118 * @ctx: the conversion context pointer to unprepare
119 *
120 * Aborts any active or pending conversions for this context and
121 * frees the context. Any currently active or pending runs belonging
122 * to this context are returned via the completion callback with an
123 * error run status.
124 *
125 * In V4L2, drivers should call ipu_image_convert_unprepare() at
126 * streamoff.
127 */
128void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx);
129
130/**
131 * ipu_image_convert_queue() - queue a conversion run
132 *
133 * @run: the run request pointer
134 *
135 * ipu_image_convert_run must be dynamically allocated (_not_ as a local
136 * var) by callers and filled in with a previously prepared conversion
137 * context handle and the dma addr's of the input and output image buffers
138 * for this conversion run.
139 *
140 * When this conversion completes, the run pointer is returned via the
141 * completion callback. The caller is responsible for freeing the run
142 * object after it completes.
143 *
144 * In V4L2, drivers should call ipu_image_convert_queue() while
145 * streaming to queue the conversion of a received input buffer.
146 * For example mem2mem devices this would be called in .device_run.
147 */
148int ipu_image_convert_queue(struct ipu_image_convert_run *run);
149
150/**
151 * ipu_image_convert_abort() - abort conversions
152 *
153 * @ctx: the conversion context pointer
154 *
155 * This will abort any active or pending conversions for this context.
156 * Any currently active or pending runs belonging to this context are
157 * returned via the completion callback with an error run status.
158 */
159void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx);
160
161/**
162 * ipu_image_convert() - asynchronous image conversion request
163 *
164 * @ipu: the IPU handle to use for the conversion
165 * @ic_task: the IC task to use for the conversion
166 * @in: input image format
167 * @out: output image format
168 * @rot_mode: rotation mode
169 * @complete: run completion callback
170 * @complete_context: a context pointer for the completion callback
171 *
172 * Request a single image conversion. Returns the run that has been queued.
173 * A conversion context is automatically created and is available in run->ctx.
174 * As with ipu_image_convert_prepare(), the input/output formats and rotation
175 * mode must already meet IPU retrictions.
176 *
177 * On successful return the caller can queue more run requests if needed, using
178 * the prepared context in run->ctx. The caller is responsible for unpreparing
179 * the context when no more conversion requests are needed.
180 */
181struct ipu_image_convert_run *
182ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
183 struct ipu_image *in, struct ipu_image *out,
184 enum ipu_rotate_mode rot_mode,
185 ipu_image_convert_cb_t complete,
186 void *complete_context);
187
188/**
189 * ipu_image_convert_sync() - synchronous single image conversion request
190 *
191 * @ipu: the IPU handle to use for the conversion
192 * @ic_task: the IC task to use for the conversion
193 * @in: input image format
194 * @out: output image format
195 * @rot_mode: rotation mode
196 *
197 * Carry out a single image conversion. Returns when the conversion
198 * completes. The input/output formats and rotation mode must already
199 * meet IPU retrictions. The created context is automatically unprepared
200 * and the run freed on return.
201 */
202int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
203 struct ipu_image *in, struct ipu_image *out,
204 enum ipu_rotate_mode rot_mode);
205
206
207#endif /* __IMX_IPU_IMAGE_CONVERT_H__ */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index c3de7406474b..173073eb6aaf 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -63,23 +63,41 @@ enum ipu_csi_dest {
63/* 63/*
64 * Enumeration of IPU rotation modes 64 * Enumeration of IPU rotation modes
65 */ 65 */
66#define IPU_ROT_BIT_VFLIP (1 << 0)
67#define IPU_ROT_BIT_HFLIP (1 << 1)
68#define IPU_ROT_BIT_90 (1 << 2)
69
66enum ipu_rotate_mode { 70enum ipu_rotate_mode {
67 IPU_ROTATE_NONE = 0, 71 IPU_ROTATE_NONE = 0,
68 IPU_ROTATE_VERT_FLIP, 72 IPU_ROTATE_VERT_FLIP = IPU_ROT_BIT_VFLIP,
69 IPU_ROTATE_HORIZ_FLIP, 73 IPU_ROTATE_HORIZ_FLIP = IPU_ROT_BIT_HFLIP,
70 IPU_ROTATE_180, 74 IPU_ROTATE_180 = (IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
71 IPU_ROTATE_90_RIGHT, 75 IPU_ROTATE_90_RIGHT = IPU_ROT_BIT_90,
72 IPU_ROTATE_90_RIGHT_VFLIP, 76 IPU_ROTATE_90_RIGHT_VFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_VFLIP),
73 IPU_ROTATE_90_RIGHT_HFLIP, 77 IPU_ROTATE_90_RIGHT_HFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_HFLIP),
74 IPU_ROTATE_90_LEFT, 78 IPU_ROTATE_90_LEFT = (IPU_ROT_BIT_90 |
79 IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
75}; 80};
76 81
82/* 90-degree rotations require the IRT unit */
83#define ipu_rot_mode_is_irt(m) (((m) & IPU_ROT_BIT_90) != 0)
84
77enum ipu_color_space { 85enum ipu_color_space {
78 IPUV3_COLORSPACE_RGB, 86 IPUV3_COLORSPACE_RGB,
79 IPUV3_COLORSPACE_YUV, 87 IPUV3_COLORSPACE_YUV,
80 IPUV3_COLORSPACE_UNKNOWN, 88 IPUV3_COLORSPACE_UNKNOWN,
81}; 89};
82 90
91/*
92 * Enumeration of VDI MOTION select
93 */
94enum ipu_motion_sel {
95 MOTION_NONE = 0,
96 LOW_MOTION,
97 MED_MOTION,
98 HIGH_MOTION,
99};
100
83struct ipuv3_channel; 101struct ipuv3_channel;
84 102
85enum ipu_channel_irq { 103enum ipu_channel_irq {
@@ -97,6 +115,14 @@ enum ipu_channel_irq {
97#define IPUV3_CHANNEL_CSI2 2 115#define IPUV3_CHANNEL_CSI2 2
98#define IPUV3_CHANNEL_CSI3 3 116#define IPUV3_CHANNEL_CSI3 3
99#define IPUV3_CHANNEL_VDI_MEM_IC_VF 5 117#define IPUV3_CHANNEL_VDI_MEM_IC_VF 5
118/*
119 * NOTE: channels 6,7 are unused in the IPU and are not IDMAC channels,
120 * but the direct CSI->VDI linking is handled the same way as IDMAC
121 * channel linking in the FSU via the IPU_FS_PROC_FLOW registers, so
122 * these channel names are used to support the direct CSI->VDI link.
123 */
124#define IPUV3_CHANNEL_CSI_DIRECT 6
125#define IPUV3_CHANNEL_CSI_VDI_PREV 7
100#define IPUV3_CHANNEL_MEM_VDI_PREV 8 126#define IPUV3_CHANNEL_MEM_VDI_PREV 8
101#define IPUV3_CHANNEL_MEM_VDI_CUR 9 127#define IPUV3_CHANNEL_MEM_VDI_CUR 9
102#define IPUV3_CHANNEL_MEM_VDI_NEXT 10 128#define IPUV3_CHANNEL_MEM_VDI_NEXT 10
@@ -133,6 +159,7 @@ enum ipu_channel_irq {
133#define IPUV3_CHANNEL_ROT_PP_MEM 50 159#define IPUV3_CHANNEL_ROT_PP_MEM 50
134#define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA 51 160#define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA 51
135#define IPUV3_CHANNEL_MEM_BG_ASYNC_ALPHA 52 161#define IPUV3_CHANNEL_MEM_BG_ASYNC_ALPHA 52
162#define IPUV3_NUM_CHANNELS 64
136 163
137int ipu_map_irq(struct ipu_soc *ipu, int irq); 164int ipu_map_irq(struct ipu_soc *ipu, int irq);
138int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel, 165int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
@@ -176,6 +203,10 @@ int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel);
176bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num); 203bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num);
177void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num); 204void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
178void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num); 205void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num);
206int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch);
207int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch);
208int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink);
209int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink);
179 210
180/* 211/*
181 * IPU Channel Parameter Memory (cpmem) functions 212 * IPU Channel Parameter Memory (cpmem) functions
@@ -335,6 +366,19 @@ void ipu_ic_put(struct ipu_ic *ic);
335void ipu_ic_dump(struct ipu_ic *ic); 366void ipu_ic_dump(struct ipu_ic *ic);
336 367
337/* 368/*
369 * IPU Video De-Interlacer (vdi) functions
370 */
371struct ipu_vdi;
372void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field);
373void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel);
374void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres);
375void ipu_vdi_unsetup(struct ipu_vdi *vdi);
376int ipu_vdi_enable(struct ipu_vdi *vdi);
377int ipu_vdi_disable(struct ipu_vdi *vdi);
378struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu);
379void ipu_vdi_put(struct ipu_vdi *vdi);
380
381/*
338 * IPU Sensor Multiple FIFO Controller (SMFC) functions 382 * IPU Sensor Multiple FIFO Controller (SMFC) functions
339 */ 383 */
340struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno); 384struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno);