aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-05-15 01:37:07 -0400
committerDave Airlie <airlied@redhat.com>2018-05-15 01:37:07 -0400
commit444ac87becd8a2ff76f9e4194dd98da4f5d5586d (patch)
tree2ddcf9ae5622a2f69e1cf575772cf9a3db6c34ba /drivers/gpu/drm
parentba72385b3319752967fdec96e19d45b71d217586 (diff)
parente8929999fa718da5758ff877592f33fea368ca8a (diff)
Merge tag 'exynos-drm-next-for-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next
- Add S5PV210 FIMD variant support. - Add IPP v2 framework. . it is a rewritten version of the Exynos mem-to-mem image processing framework which supprts color space conversion, image up/down-scaling and rotation. This new version replaces existing userspace API with new easy-to-use and simple ones so we have already applied the use of these API to real user, Tizen Platform[1], and also makes existing Scaler, FIMC, GScaler and Rotator drivers to use IPP v2 core API. And below are patch lists we have applied to a real user, https://git.tizen.org/cgit/platform/adaptation/samsung_exynos/libtdm-exynos/log/?h=tizen&qt=grep&q=ipp https://git.tizen.org/cgit/platform/adaptation/samsung_exynos/libtdm-exynos/commit/?h=tizen&id=b59be207365d10efd489e6f71c8a045b558c44fe https://git.tizen.org/cgit/platform/kernel/linux-exynos/log/?h=tizen&qt=grep&q=ipp TDM(Tizen Display Manager) is a Display HAL for Tizen platform. Ps. Only real user using IPP API is Tizen. [1] https://www.tizen.org/ - Two cleanups . One is to just remove mode_set callback from MIPI-DSI driver because drm_display_mode data is already available from crtc atomic state. . And other is to just use new return type, vm_fault_t for page fault handler. Signed-off-by: Dave Airlie <airlied@redhat.com> # gpg: Signature made Mon 14 May 2018 14:23:53 AEST # gpg: using RSA key 573834890C4312B8 # gpg: Can't check signature: public key not found Link: https://patchwork.freedesktop.org/patch/msgid/1526276453-29879-1-git-send-email-inki.dae@samsung.com
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c4
-rw-r--r--drivers/gpu/drm/exynos/Kconfig18
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1080
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1075
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c916
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h175
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c758
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c694
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c22
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h1
-rw-r--r--drivers/gpu/drm/exynos/regs-scaler.h426
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
25 files changed, 3253 insertions, 2176 deletions
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 498d5948d1a8..9837c8d69e69 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -56,7 +56,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
56 } 56 }
57 57
58 drm_mode_connector_update_edid_property(connector, edid); 58 drm_mode_connector_update_edid_property(connector, edid);
59 return drm_add_edid_modes(connector, edid); 59 ret = drm_add_edid_modes(connector, edid);
60 kfree(edid);
61 return ret;
60 62
61fallback: 63fallback:
62 /* 64 /*
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 735ce47688f9..208bc27be3cc 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select VIDEOMODE_HELPERS 5 select VIDEOMODE_HELPERS
6 select SND_SOC_HDMI_CODEC if SND_SOC 6 select SND_SOC_HDMI_CODEC if SND_SOC
@@ -95,21 +95,31 @@ config DRM_EXYNOS_G2D
95 help 95 help
96 Choose this option if you want to use Exynos G2D for DRM. 96 Choose this option if you want to use Exynos G2D for DRM.
97 97
98config DRM_EXYNOS_IPP
99 bool
100
98config DRM_EXYNOS_FIMC 101config DRM_EXYNOS_FIMC
99 bool "FIMC" 102 bool "FIMC"
100 depends on BROKEN && MFD_SYSCON 103 select DRM_EXYNOS_IPP
101 help 104 help
102 Choose this option if you want to use Exynos FIMC for DRM. 105 Choose this option if you want to use Exynos FIMC for DRM.
103 106
104config DRM_EXYNOS_ROTATOR 107config DRM_EXYNOS_ROTATOR
105 bool "Rotator" 108 bool "Rotator"
106 depends on BROKEN 109 select DRM_EXYNOS_IPP
107 help 110 help
108 Choose this option if you want to use Exynos Rotator for DRM. 111 Choose this option if you want to use Exynos Rotator for DRM.
109 112
113config DRM_EXYNOS_SCALER
114 bool "Scaler"
115 select DRM_EXYNOS_IPP
116 help
117 Choose this option if you want to use Exynos Scaler for DRM.
118
110config DRM_EXYNOS_GSC 119config DRM_EXYNOS_GSC
111 bool "GScaler" 120 bool "GScaler"
112 depends on BROKEN && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n 121 depends on VIDEO_SAMSUNG_EXYNOS_GSC=n
122 select DRM_EXYNOS_IPP
113 help 123 help
114 Choose this option if you want to use Exynos GSC for DRM. 124 Choose this option if you want to use Exynos GSC for DRM.
115 125
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index a51c5459bb13..3b323f1e0475 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -18,8 +18,10 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER) += exynos_mixer.o
18exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o 18exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o
19exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 19exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
20exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o 20exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
21exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
21exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o 22exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
22exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o 23exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
24exynosdrm-$(CONFIG_DRM_EXYNOS_SCALER) += exynos_drm_scaler.o
23exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o 25exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
24exynosdrm-$(CONFIG_DRM_EXYNOS_MIC) += exynos_drm_mic.o 26exynosdrm-$(CONFIG_DRM_EXYNOS_MIC) += exynos_drm_mic.o
25 27
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 39284bb7c2c2..a81b4a5e24a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,15 +27,23 @@
27#include "exynos_drm_fb.h" 27#include "exynos_drm_fb.h"
28#include "exynos_drm_gem.h" 28#include "exynos_drm_gem.h"
29#include "exynos_drm_plane.h" 29#include "exynos_drm_plane.h"
30#include "exynos_drm_ipp.h"
30#include "exynos_drm_vidi.h" 31#include "exynos_drm_vidi.h"
31#include "exynos_drm_g2d.h" 32#include "exynos_drm_g2d.h"
32#include "exynos_drm_iommu.h" 33#include "exynos_drm_iommu.h"
33 34
34#define DRIVER_NAME "exynos" 35#define DRIVER_NAME "exynos"
35#define DRIVER_DESC "Samsung SoC DRM" 36#define DRIVER_DESC "Samsung SoC DRM"
36#define DRIVER_DATE "20110530" 37#define DRIVER_DATE "20180330"
38
39/*
40 * Interface history:
41 *
42 * 1.0 - Original version
43 * 1.1 - Upgrade IPP driver to version 2.0
44 */
37#define DRIVER_MAJOR 1 45#define DRIVER_MAJOR 1
38#define DRIVER_MINOR 0 46#define DRIVER_MINOR 1
39 47
40static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 48static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
41{ 49{
@@ -88,6 +96,16 @@ static const struct drm_ioctl_desc exynos_ioctls[] = {
88 DRM_AUTH | DRM_RENDER_ALLOW), 96 DRM_AUTH | DRM_RENDER_ALLOW),
89 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl, 97 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
90 DRM_AUTH | DRM_RENDER_ALLOW), 98 DRM_AUTH | DRM_RENDER_ALLOW),
99 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES,
100 exynos_drm_ipp_get_res_ioctl,
101 DRM_AUTH | DRM_RENDER_ALLOW),
102 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl,
103 DRM_AUTH | DRM_RENDER_ALLOW),
104 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS,
105 exynos_drm_ipp_get_limits_ioctl,
106 DRM_AUTH | DRM_RENDER_ALLOW),
107 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl,
108 DRM_AUTH | DRM_RENDER_ALLOW),
91}; 109};
92 110
93static const struct file_operations exynos_drm_driver_fops = { 111static const struct file_operations exynos_drm_driver_fops = {
@@ -184,6 +202,7 @@ struct exynos_drm_driver_info {
184#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */ 202#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
185#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */ 203#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
186#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */ 204#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
205#define DRM_FIMC_DEVICE BIT(3) /* devices shared with V4L2 subsystem */
187 206
188#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL) 207#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
189 208
@@ -223,10 +242,16 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
223 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D), 242 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
224 }, { 243 }, {
225 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC), 244 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
245 DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
226 }, { 246 }, {
227 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR), 247 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
248 DRM_COMPONENT_DRIVER
249 }, {
250 DRV_PTR(scaler_driver, CONFIG_DRM_EXYNOS_SCALER),
251 DRM_COMPONENT_DRIVER
228 }, { 252 }, {
229 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC), 253 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
254 DRM_COMPONENT_DRIVER
230 }, { 255 }, {
231 &exynos_drm_platform_driver, 256 &exynos_drm_platform_driver,
232 DRM_VIRTUAL_DEVICE 257 DRM_VIRTUAL_DEVICE
@@ -254,7 +279,11 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
254 &info->driver->driver, 279 &info->driver->driver,
255 (void *)platform_bus_type.match))) { 280 (void *)platform_bus_type.match))) {
256 put_device(p); 281 put_device(p);
257 component_match_add(dev, &match, compare_dev, d); 282
283 if (!(info->flags & DRM_FIMC_DEVICE) ||
284 exynos_drm_check_fimc_device(d) == 0)
285 component_match_add(dev, &match,
286 compare_dev, d);
258 p = d; 287 p = d;
259 } 288 }
260 put_device(p); 289 put_device(p);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 075957cb6ba1..0f6d079a55c9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -273,6 +273,15 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
273} 273}
274#endif 274#endif
275 275
276#ifdef CONFIG_DRM_EXYNOS_FIMC
277int exynos_drm_check_fimc_device(struct device *dev);
278#else
279static inline int exynos_drm_check_fimc_device(struct device *dev)
280{
281 return 0;
282}
283#endif
284
276int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, 285int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
277 bool nonblock); 286 bool nonblock);
278 287
@@ -288,6 +297,7 @@ extern struct platform_driver vidi_driver;
288extern struct platform_driver g2d_driver; 297extern struct platform_driver g2d_driver;
289extern struct platform_driver fimc_driver; 298extern struct platform_driver fimc_driver;
290extern struct platform_driver rotator_driver; 299extern struct platform_driver rotator_driver;
300extern struct platform_driver scaler_driver;
291extern struct platform_driver gsc_driver; 301extern struct platform_driver gsc_driver;
292extern struct platform_driver mic_driver; 302extern struct platform_driver mic_driver;
293#endif 303#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 7904ffa9abfb..eae44fd714f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -270,7 +270,6 @@ struct exynos_dsi {
270 u32 lanes; 270 u32 lanes;
271 u32 mode_flags; 271 u32 mode_flags;
272 u32 format; 272 u32 format;
273 struct videomode vm;
274 273
275 int state; 274 int state;
276 struct drm_property *brightness; 275 struct drm_property *brightness;
@@ -881,30 +880,30 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
881 880
882static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi) 881static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
883{ 882{
884 struct videomode *vm = &dsi->vm; 883 struct drm_display_mode *m = &dsi->encoder.crtc->state->adjusted_mode;
885 unsigned int num_bits_resol = dsi->driver_data->num_bits_resol; 884 unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
886 u32 reg; 885 u32 reg;
887 886
888 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 887 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
889 reg = DSIM_CMD_ALLOW(0xf) 888 reg = DSIM_CMD_ALLOW(0xf)
890 | DSIM_STABLE_VFP(vm->vfront_porch) 889 | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
891 | DSIM_MAIN_VBP(vm->vback_porch); 890 | DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
892 exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg); 891 exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg);
893 892
894 reg = DSIM_MAIN_HFP(vm->hfront_porch) 893 reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
895 | DSIM_MAIN_HBP(vm->hback_porch); 894 | DSIM_MAIN_HBP(m->htotal - m->hsync_end);
896 exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg); 895 exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg);
897 896
898 reg = DSIM_MAIN_VSA(vm->vsync_len) 897 reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
899 | DSIM_MAIN_HSA(vm->hsync_len); 898 | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
900 exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg); 899 exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg);
901 } 900 }
902 reg = DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) | 901 reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
903 DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol); 902 DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
904 903
905 exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg); 904 exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
906 905
907 dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive); 906 dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
908} 907}
909 908
910static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable) 909static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
@@ -1485,26 +1484,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
1485 return 0; 1484 return 0;
1486} 1485}
1487 1486
1488static void exynos_dsi_mode_set(struct drm_encoder *encoder,
1489 struct drm_display_mode *mode,
1490 struct drm_display_mode *adjusted_mode)
1491{
1492 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1493 struct videomode *vm = &dsi->vm;
1494 struct drm_display_mode *m = adjusted_mode;
1495
1496 vm->hactive = m->hdisplay;
1497 vm->vactive = m->vdisplay;
1498 vm->vfront_porch = m->vsync_start - m->vdisplay;
1499 vm->vback_porch = m->vtotal - m->vsync_end;
1500 vm->vsync_len = m->vsync_end - m->vsync_start;
1501 vm->hfront_porch = m->hsync_start - m->hdisplay;
1502 vm->hback_porch = m->htotal - m->hsync_end;
1503 vm->hsync_len = m->hsync_end - m->hsync_start;
1504}
1505
1506static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { 1487static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
1507 .mode_set = exynos_dsi_mode_set,
1508 .enable = exynos_dsi_enable, 1488 .enable = exynos_dsi_enable,
1509 .disable = exynos_dsi_disable, 1489 .disable = exynos_dsi_disable,
1510}; 1490};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 5b18b5c5fdf2..4dfbfc7f3b84 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,6 +12,7 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/component.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16#include <linux/mfd/syscon.h> 17#include <linux/mfd/syscon.h>
17#include <linux/regmap.h> 18#include <linux/regmap.h>
@@ -24,8 +25,8 @@
24#include <drm/exynos_drm.h> 25#include <drm/exynos_drm.h>
25#include "regs-fimc.h" 26#include "regs-fimc.h"
26#include "exynos_drm_drv.h" 27#include "exynos_drm_drv.h"
28#include "exynos_drm_iommu.h"
27#include "exynos_drm_ipp.h" 29#include "exynos_drm_ipp.h"
28#include "exynos_drm_fimc.h"
29 30
30/* 31/*
31 * FIMC stands for Fully Interactive Mobile Camera and 32 * FIMC stands for Fully Interactive Mobile Camera and
@@ -33,23 +34,6 @@
33 * input DMA reads image data from the memory. 34 * input DMA reads image data from the memory.
34 * output DMA writes image data to memory. 35 * output DMA writes image data to memory.
35 * FIMC supports image rotation and image effect functions. 36 * FIMC supports image rotation and image effect functions.
36 *
37 * M2M operation : supports crop/scale/rotation/csc so on.
38 * Memory ----> FIMC H/W ----> Memory.
39 * Writeback operation : supports cloned screen with FIMD.
40 * FIMD ----> FIMC H/W ----> Memory.
41 * Output operation : supports direct display using local path.
42 * Memory ----> FIMC H/W ----> FIMD.
43 */
44
45/*
46 * TODO
47 * 1. check suspend/resume api if needed.
48 * 2. need to check use case platform_device_id.
49 * 3. check src/dst size with, height.
50 * 4. added check_prepare api for right register.
51 * 5. need to add supported list in prop_list.
52 * 6. check prescaler/scaler optimization.
53 */ 37 */
54 38
55#define FIMC_MAX_DEVS 4 39#define FIMC_MAX_DEVS 4
@@ -59,29 +43,19 @@
59#define FIMC_BUF_STOP 1 43#define FIMC_BUF_STOP 1
60#define FIMC_BUF_START 2 44#define FIMC_BUF_START 2
61#define FIMC_WIDTH_ITU_709 1280 45#define FIMC_WIDTH_ITU_709 1280
62#define FIMC_REFRESH_MAX 60 46#define FIMC_AUTOSUSPEND_DELAY 2000
63#define FIMC_REFRESH_MIN 12 47
64#define FIMC_CROP_MAX 8192 48static unsigned int fimc_mask = 0xc;
65#define FIMC_CROP_MIN 32 49module_param_named(fimc_devs, fimc_mask, uint, 0644);
66#define FIMC_SCALE_MAX 4224 50MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
67#define FIMC_SCALE_MIN 32
68 51
69#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) 52#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
70#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
71 struct fimc_context, ippdrv);
72enum fimc_wb {
73 FIMC_WB_NONE,
74 FIMC_WB_A,
75 FIMC_WB_B,
76};
77 53
78enum { 54enum {
79 FIMC_CLK_LCLK, 55 FIMC_CLK_LCLK,
80 FIMC_CLK_GATE, 56 FIMC_CLK_GATE,
81 FIMC_CLK_WB_A, 57 FIMC_CLK_WB_A,
82 FIMC_CLK_WB_B, 58 FIMC_CLK_WB_B,
83 FIMC_CLK_MUX,
84 FIMC_CLK_PARENT,
85 FIMC_CLKS_MAX 59 FIMC_CLKS_MAX
86}; 60};
87 61
@@ -90,12 +64,8 @@ static const char * const fimc_clock_names[] = {
90 [FIMC_CLK_GATE] = "fimc", 64 [FIMC_CLK_GATE] = "fimc",
91 [FIMC_CLK_WB_A] = "pxl_async0", 65 [FIMC_CLK_WB_A] = "pxl_async0",
92 [FIMC_CLK_WB_B] = "pxl_async1", 66 [FIMC_CLK_WB_B] = "pxl_async1",
93 [FIMC_CLK_MUX] = "mux",
94 [FIMC_CLK_PARENT] = "parent",
95}; 67};
96 68
97#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL
98
99/* 69/*
100 * A structure of scaler. 70 * A structure of scaler.
101 * 71 *
@@ -107,7 +77,7 @@ static const char * const fimc_clock_names[] = {
107 * @vratio: vertical ratio. 77 * @vratio: vertical ratio.
108 */ 78 */
109struct fimc_scaler { 79struct fimc_scaler {
110 bool range; 80 bool range;
111 bool bypass; 81 bool bypass;
112 bool up_h; 82 bool up_h;
113 bool up_v; 83 bool up_v;
@@ -116,56 +86,32 @@ struct fimc_scaler {
116}; 86};
117 87
118/* 88/*
119 * A structure of scaler capability.
120 *
121 * find user manual table 43-1.
122 * @in_hori: scaler input horizontal size.
123 * @bypass: scaler bypass mode.
124 * @dst_h_wo_rot: target horizontal size without output rotation.
125 * @dst_h_rot: target horizontal size with output rotation.
126 * @rl_w_wo_rot: real width without input rotation.
127 * @rl_h_rot: real height without output rotation.
128 */
129struct fimc_capability {
130 /* scaler */
131 u32 in_hori;
132 u32 bypass;
133 /* output rotator */
134 u32 dst_h_wo_rot;
135 u32 dst_h_rot;
136 /* input rotator */
137 u32 rl_w_wo_rot;
138 u32 rl_h_rot;
139};
140
141/*
142 * A structure of fimc context. 89 * A structure of fimc context.
143 * 90 *
144 * @ippdrv: prepare initialization using ippdrv.
145 * @regs_res: register resources. 91 * @regs_res: register resources.
146 * @regs: memory mapped io registers. 92 * @regs: memory mapped io registers.
147 * @lock: locking of operations. 93 * @lock: locking of operations.
148 * @clocks: fimc clocks. 94 * @clocks: fimc clocks.
149 * @clk_frequency: LCLK clock frequency.
150 * @sysreg: handle to SYSREG block regmap.
151 * @sc: scaler infomations. 95 * @sc: scaler infomations.
152 * @pol: porarity of writeback. 96 * @pol: porarity of writeback.
153 * @id: fimc id. 97 * @id: fimc id.
154 * @irq: irq number. 98 * @irq: irq number.
155 * @suspended: qos operations.
156 */ 99 */
157struct fimc_context { 100struct fimc_context {
158 struct exynos_drm_ippdrv ippdrv; 101 struct exynos_drm_ipp ipp;
102 struct drm_device *drm_dev;
103 struct device *dev;
104 struct exynos_drm_ipp_task *task;
105 struct exynos_drm_ipp_formats *formats;
106 unsigned int num_formats;
107
159 struct resource *regs_res; 108 struct resource *regs_res;
160 void __iomem *regs; 109 void __iomem *regs;
161 spinlock_t lock; 110 spinlock_t lock;
162 struct clk *clocks[FIMC_CLKS_MAX]; 111 struct clk *clocks[FIMC_CLKS_MAX];
163 u32 clk_frequency;
164 struct regmap *sysreg;
165 struct fimc_scaler sc; 112 struct fimc_scaler sc;
166 int id; 113 int id;
167 int irq; 114 int irq;
168 bool suspended;
169}; 115};
170 116
171static u32 fimc_read(struct fimc_context *ctx, u32 reg) 117static u32 fimc_read(struct fimc_context *ctx, u32 reg)
@@ -217,19 +163,10 @@ static void fimc_sw_reset(struct fimc_context *ctx)
217 fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ); 163 fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
218} 164}
219 165
220static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) 166static void fimc_set_type_ctrl(struct fimc_context *ctx)
221{
222 return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
223 SYSREG_FIMD0WB_DEST_MASK,
224 ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
225}
226
227static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
228{ 167{
229 u32 cfg; 168 u32 cfg;
230 169
231 DRM_DEBUG_KMS("wb[%d]\n", wb);
232
233 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); 170 cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
234 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK | 171 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
235 EXYNOS_CIGCTRL_SELCAM_ITU_MASK | 172 EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
@@ -238,23 +175,10 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
238 EXYNOS_CIGCTRL_SELWB_CAMIF_MASK | 175 EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
239 EXYNOS_CIGCTRL_SELWRITEBACK_MASK); 176 EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
240 177
241 switch (wb) { 178 cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
242 case FIMC_WB_A: 179 EXYNOS_CIGCTRL_SELWRITEBACK_A |
243 cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A | 180 EXYNOS_CIGCTRL_SELCAM_MIPI_A |
244 EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); 181 EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
245 break;
246 case FIMC_WB_B:
247 cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
248 EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
249 break;
250 case FIMC_WB_NONE:
251 default:
252 cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
253 EXYNOS_CIGCTRL_SELWRITEBACK_A |
254 EXYNOS_CIGCTRL_SELCAM_MIPI_A |
255 EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
256 break;
257 }
258 182
259 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); 183 fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
260} 184}
@@ -296,7 +220,6 @@ static void fimc_clear_irq(struct fimc_context *ctx)
296 220
297static bool fimc_check_ovf(struct fimc_context *ctx) 221static bool fimc_check_ovf(struct fimc_context *ctx)
298{ 222{
299 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
300 u32 status, flag; 223 u32 status, flag;
301 224
302 status = fimc_read(ctx, EXYNOS_CISTATUS); 225 status = fimc_read(ctx, EXYNOS_CISTATUS);
@@ -310,7 +233,7 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
310 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | 233 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
311 EXYNOS_CIWDOFST_CLROVFICR); 234 EXYNOS_CIWDOFST_CLROVFICR);
312 235
313 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", 236 dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n",
314 ctx->id, status); 237 ctx->id, status);
315 return true; 238 return true;
316 } 239 }
@@ -376,10 +299,8 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
376 fimc_write(ctx, cfg, EXYNOS_CIOCTRL); 299 fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
377} 300}
378 301
379 302static void fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
380static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
381{ 303{
382 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
383 u32 cfg; 304 u32 cfg;
384 305
385 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 306 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -392,12 +313,12 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
392 case DRM_FORMAT_RGB565: 313 case DRM_FORMAT_RGB565:
393 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565; 314 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
394 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); 315 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
395 return 0; 316 return;
396 case DRM_FORMAT_RGB888: 317 case DRM_FORMAT_RGB888:
397 case DRM_FORMAT_XRGB8888: 318 case DRM_FORMAT_XRGB8888:
398 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888; 319 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
399 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); 320 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
400 return 0; 321 return;
401 default: 322 default:
402 /* bypass */ 323 /* bypass */
403 break; 324 break;
@@ -438,20 +359,13 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
438 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR | 359 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
439 EXYNOS_MSCTRL_C_INT_IN_2PLANE); 360 EXYNOS_MSCTRL_C_INT_IN_2PLANE);
440 break; 361 break;
441 default:
442 dev_err(ippdrv->dev, "invalid source yuv order 0x%x.\n", fmt);
443 return -EINVAL;
444 } 362 }
445 363
446 fimc_write(ctx, cfg, EXYNOS_MSCTRL); 364 fimc_write(ctx, cfg, EXYNOS_MSCTRL);
447
448 return 0;
449} 365}
450 366
451static int fimc_src_set_fmt(struct device *dev, u32 fmt) 367static void fimc_src_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
452{ 368{
453 struct fimc_context *ctx = get_fimc_context(dev);
454 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
455 u32 cfg; 369 u32 cfg;
456 370
457 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 371 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -485,9 +399,6 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
485 case DRM_FORMAT_NV21: 399 case DRM_FORMAT_NV21:
486 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; 400 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
487 break; 401 break;
488 default:
489 dev_err(ippdrv->dev, "invalid source format 0x%x.\n", fmt);
490 return -EINVAL;
491 } 402 }
492 403
493 fimc_write(ctx, cfg, EXYNOS_MSCTRL); 404 fimc_write(ctx, cfg, EXYNOS_MSCTRL);
@@ -495,22 +406,22 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
495 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); 406 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
496 cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK; 407 cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
497 408
498 cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; 409 if (tiled)
410 cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
411 else
412 cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
499 413
500 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); 414 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
501 415
502 return fimc_src_set_fmt_order(ctx, fmt); 416 fimc_src_set_fmt_order(ctx, fmt);
503} 417}
504 418
505static int fimc_src_set_transf(struct device *dev, 419static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
506 enum drm_exynos_degree degree,
507 enum drm_exynos_flip flip, bool *swap)
508{ 420{
509 struct fimc_context *ctx = get_fimc_context(dev); 421 unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
510 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
511 u32 cfg1, cfg2; 422 u32 cfg1, cfg2;
512 423
513 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); 424 DRM_DEBUG_KMS("rotation[%x]\n", rotation);
514 425
515 cfg1 = fimc_read(ctx, EXYNOS_MSCTRL); 426 cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
516 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR | 427 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
@@ -520,61 +431,56 @@ static int fimc_src_set_transf(struct device *dev,
520 cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE; 431 cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
521 432
522 switch (degree) { 433 switch (degree) {
523 case EXYNOS_DRM_DEGREE_0: 434 case DRM_MODE_ROTATE_0:
524 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 435 if (rotation & DRM_MODE_REFLECT_X)
525 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; 436 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
526 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 437 if (rotation & DRM_MODE_REFLECT_Y)
527 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; 438 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
528 break; 439 break;
529 case EXYNOS_DRM_DEGREE_90: 440 case DRM_MODE_ROTATE_90:
530 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; 441 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
531 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 442 if (rotation & DRM_MODE_REFLECT_X)
532 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; 443 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
533 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 444 if (rotation & DRM_MODE_REFLECT_Y)
534 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; 445 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
535 break; 446 break;
536 case EXYNOS_DRM_DEGREE_180: 447 case DRM_MODE_ROTATE_180:
537 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | 448 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
538 EXYNOS_MSCTRL_FLIP_Y_MIRROR); 449 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
539 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 450 if (rotation & DRM_MODE_REFLECT_X)
540 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; 451 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
541 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 452 if (rotation & DRM_MODE_REFLECT_Y)
542 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; 453 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
543 break; 454 break;
544 case EXYNOS_DRM_DEGREE_270: 455 case DRM_MODE_ROTATE_270:
545 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | 456 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
546 EXYNOS_MSCTRL_FLIP_Y_MIRROR); 457 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
547 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; 458 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
548 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 459 if (rotation & DRM_MODE_REFLECT_X)
549 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; 460 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
550 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 461 if (rotation & DRM_MODE_REFLECT_Y)
551 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; 462 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
552 break; 463 break;
553 default:
554 dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
555 return -EINVAL;
556 } 464 }
557 465
558 fimc_write(ctx, cfg1, EXYNOS_MSCTRL); 466 fimc_write(ctx, cfg1, EXYNOS_MSCTRL);
559 fimc_write(ctx, cfg2, EXYNOS_CITRGFMT); 467 fimc_write(ctx, cfg2, EXYNOS_CITRGFMT);
560 *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
561
562 return 0;
563} 468}
564 469
565static int fimc_set_window(struct fimc_context *ctx, 470static void fimc_set_window(struct fimc_context *ctx,
566 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 471 struct exynos_drm_ipp_buffer *buf)
567{ 472{
568 u32 cfg, h1, h2, v1, v2; 473 u32 cfg, h1, h2, v1, v2;
569 474
570 /* cropped image */ 475 /* cropped image */
571 h1 = pos->x; 476 h1 = buf->rect.x;
572 h2 = sz->hsize - pos->w - pos->x; 477 h2 = buf->buf.width - buf->rect.w - buf->rect.x;
573 v1 = pos->y; 478 v1 = buf->rect.y;
574 v2 = sz->vsize - pos->h - pos->y; 479 v2 = buf->buf.height - buf->rect.h - buf->rect.y;
575 480
576 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", 481 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
577 pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize); 482 buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
483 buf->buf.width, buf->buf.height);
578 DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); 484 DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
579 485
580 /* 486 /*
@@ -592,42 +498,30 @@ static int fimc_set_window(struct fimc_context *ctx,
592 cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) | 498 cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
593 EXYNOS_CIWDOFST2_WINVEROFST2(v2)); 499 EXYNOS_CIWDOFST2_WINVEROFST2(v2));
594 fimc_write(ctx, cfg, EXYNOS_CIWDOFST2); 500 fimc_write(ctx, cfg, EXYNOS_CIWDOFST2);
595
596 return 0;
597} 501}
598 502
599static int fimc_src_set_size(struct device *dev, int swap, 503static void fimc_src_set_size(struct fimc_context *ctx,
600 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 504 struct exynos_drm_ipp_buffer *buf)
601{ 505{
602 struct fimc_context *ctx = get_fimc_context(dev);
603 struct drm_exynos_pos img_pos = *pos;
604 struct drm_exynos_sz img_sz = *sz;
605 u32 cfg; 506 u32 cfg;
606 507
607 DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n", 508 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
608 swap, sz->hsize, sz->vsize);
609 509
610 /* original size */ 510 /* original size */
611 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) | 511 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
612 EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize)); 512 EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
613 513
614 fimc_write(ctx, cfg, EXYNOS_ORGISIZE); 514 fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
615 515
616 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); 516 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
617 517 buf->rect.w, buf->rect.h);
618 if (swap) {
619 img_pos.w = pos->h;
620 img_pos.h = pos->w;
621 img_sz.hsize = sz->vsize;
622 img_sz.vsize = sz->hsize;
623 }
624 518
625 /* set input DMA image size */ 519 /* set input DMA image size */
626 cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE); 520 cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
627 cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK | 521 cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
628 EXYNOS_CIREAL_ISIZE_WIDTH_MASK); 522 EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
629 cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) | 523 cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(buf->rect.w) |
630 EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h)); 524 EXYNOS_CIREAL_ISIZE_HEIGHT(buf->rect.h));
631 fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE); 525 fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE);
632 526
633 /* 527 /*
@@ -635,91 +529,34 @@ static int fimc_src_set_size(struct device *dev, int swap,
635 * for now, we support only ITU601 8 bit mode 529 * for now, we support only ITU601 8 bit mode
636 */ 530 */
637 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | 531 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
638 EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) | 532 EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
639 EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize)); 533 EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
640 fimc_write(ctx, cfg, EXYNOS_CISRCFMT); 534 fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
641 535
642 /* offset Y(RGB), Cb, Cr */ 536 /* offset Y(RGB), Cb, Cr */
643 cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) | 537 cfg = (EXYNOS_CIIYOFF_HORIZONTAL(buf->rect.x) |
644 EXYNOS_CIIYOFF_VERTICAL(img_pos.y)); 538 EXYNOS_CIIYOFF_VERTICAL(buf->rect.y));
645 fimc_write(ctx, cfg, EXYNOS_CIIYOFF); 539 fimc_write(ctx, cfg, EXYNOS_CIIYOFF);
646 cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) | 540 cfg = (EXYNOS_CIICBOFF_HORIZONTAL(buf->rect.x) |
647 EXYNOS_CIICBOFF_VERTICAL(img_pos.y)); 541 EXYNOS_CIICBOFF_VERTICAL(buf->rect.y));
648 fimc_write(ctx, cfg, EXYNOS_CIICBOFF); 542 fimc_write(ctx, cfg, EXYNOS_CIICBOFF);
649 cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) | 543 cfg = (EXYNOS_CIICROFF_HORIZONTAL(buf->rect.x) |
650 EXYNOS_CIICROFF_VERTICAL(img_pos.y)); 544 EXYNOS_CIICROFF_VERTICAL(buf->rect.y));
651 fimc_write(ctx, cfg, EXYNOS_CIICROFF); 545 fimc_write(ctx, cfg, EXYNOS_CIICROFF);
652 546
653 return fimc_set_window(ctx, &img_pos, &img_sz); 547 fimc_set_window(ctx, buf);
654} 548}
655 549
656static int fimc_src_set_addr(struct device *dev, 550static void fimc_src_set_addr(struct fimc_context *ctx,
657 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, 551 struct exynos_drm_ipp_buffer *buf)
658 enum drm_exynos_ipp_buf_type buf_type)
659{ 552{
660 struct fimc_context *ctx = get_fimc_context(dev); 553 fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIIYSA(0));
661 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 554 fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIICBSA(0));
662 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 555 fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIICRSA(0));
663 struct drm_exynos_ipp_property *property;
664 struct drm_exynos_ipp_config *config;
665
666 if (!c_node) {
667 DRM_ERROR("failed to get c_node.\n");
668 return -EINVAL;
669 }
670
671 property = &c_node->property;
672
673 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
674 property->prop_id, buf_id, buf_type);
675
676 if (buf_id > FIMC_MAX_SRC) {
677 dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
678 return -ENOMEM;
679 }
680
681 /* address register set */
682 switch (buf_type) {
683 case IPP_BUF_ENQUEUE:
684 config = &property->config[EXYNOS_DRM_OPS_SRC];
685 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
686 EXYNOS_CIIYSA0);
687
688 if (config->fmt == DRM_FORMAT_YVU420) {
689 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
690 EXYNOS_CIICBSA0);
691 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
692 EXYNOS_CIICRSA0);
693 } else {
694 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
695 EXYNOS_CIICBSA0);
696 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
697 EXYNOS_CIICRSA0);
698 }
699 break;
700 case IPP_BUF_DEQUEUE:
701 fimc_write(ctx, 0x0, EXYNOS_CIIYSA0);
702 fimc_write(ctx, 0x0, EXYNOS_CIICBSA0);
703 fimc_write(ctx, 0x0, EXYNOS_CIICRSA0);
704 break;
705 default:
706 /* bypass */
707 break;
708 }
709
710 return 0;
711} 556}
712 557
713static struct exynos_drm_ipp_ops fimc_src_ops = { 558static void fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
714 .set_fmt = fimc_src_set_fmt,
715 .set_transf = fimc_src_set_transf,
716 .set_size = fimc_src_set_size,
717 .set_addr = fimc_src_set_addr,
718};
719
720static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
721{ 559{
722 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
723 u32 cfg; 560 u32 cfg;
724 561
725 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 562 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -732,11 +569,11 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
732 case DRM_FORMAT_RGB565: 569 case DRM_FORMAT_RGB565:
733 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565; 570 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
734 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); 571 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
735 return 0; 572 return;
736 case DRM_FORMAT_RGB888: 573 case DRM_FORMAT_RGB888:
737 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888; 574 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
738 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); 575 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
739 return 0; 576 return;
740 case DRM_FORMAT_XRGB8888: 577 case DRM_FORMAT_XRGB8888:
741 cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 | 578 cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
742 EXYNOS_CISCCTRL_EXTRGB_EXTENSION); 579 EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
@@ -784,20 +621,13 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
784 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR; 621 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
785 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; 622 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
786 break; 623 break;
787 default:
788 dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
789 return -EINVAL;
790 } 624 }
791 625
792 fimc_write(ctx, cfg, EXYNOS_CIOCTRL); 626 fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
793
794 return 0;
795} 627}
796 628
797static int fimc_dst_set_fmt(struct device *dev, u32 fmt) 629static void fimc_dst_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
798{ 630{
799 struct fimc_context *ctx = get_fimc_context(dev);
800 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
801 u32 cfg; 631 u32 cfg;
802 632
803 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 633 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -837,10 +667,6 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
837 case DRM_FORMAT_NV21: 667 case DRM_FORMAT_NV21:
838 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420; 668 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
839 break; 669 break;
840 default:
841 dev_err(ippdrv->dev, "invalid target format 0x%x.\n",
842 fmt);
843 return -EINVAL;
844 } 670 }
845 671
846 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); 672 fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
@@ -849,73 +675,67 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
849 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); 675 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
850 cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK; 676 cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
851 677
852 cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; 678 if (tiled)
679 cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
680 else
681 cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
853 682
854 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); 683 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
855 684
856 return fimc_dst_set_fmt_order(ctx, fmt); 685 fimc_dst_set_fmt_order(ctx, fmt);
857} 686}
858 687
859static int fimc_dst_set_transf(struct device *dev, 688static void fimc_dst_set_transf(struct fimc_context *ctx, unsigned int rotation)
860 enum drm_exynos_degree degree,
861 enum drm_exynos_flip flip, bool *swap)
862{ 689{
863 struct fimc_context *ctx = get_fimc_context(dev); 690 unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
864 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
865 u32 cfg; 691 u32 cfg;
866 692
867 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); 693 DRM_DEBUG_KMS("rotation[0x%x]\n", rotation);
868 694
869 cfg = fimc_read(ctx, EXYNOS_CITRGFMT); 695 cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
870 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK; 696 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
871 cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; 697 cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
872 698
873 switch (degree) { 699 switch (degree) {
874 case EXYNOS_DRM_DEGREE_0: 700 case DRM_MODE_ROTATE_0:
875 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 701 if (rotation & DRM_MODE_REFLECT_X)
876 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; 702 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
877 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 703 if (rotation & DRM_MODE_REFLECT_Y)
878 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 704 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
879 break; 705 break;
880 case EXYNOS_DRM_DEGREE_90: 706 case DRM_MODE_ROTATE_90:
881 cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; 707 cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
882 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 708 if (rotation & DRM_MODE_REFLECT_X)
883 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; 709 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
884 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 710 if (rotation & DRM_MODE_REFLECT_Y)
885 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 711 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
886 break; 712 break;
887 case EXYNOS_DRM_DEGREE_180: 713 case DRM_MODE_ROTATE_180:
888 cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR | 714 cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
889 EXYNOS_CITRGFMT_FLIP_Y_MIRROR); 715 EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
890 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 716 if (rotation & DRM_MODE_REFLECT_X)
891 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; 717 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
892 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 718 if (rotation & DRM_MODE_REFLECT_Y)
893 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 719 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
894 break; 720 break;
895 case EXYNOS_DRM_DEGREE_270: 721 case DRM_MODE_ROTATE_270:
896 cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE | 722 cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
897 EXYNOS_CITRGFMT_FLIP_X_MIRROR | 723 EXYNOS_CITRGFMT_FLIP_X_MIRROR |
898 EXYNOS_CITRGFMT_FLIP_Y_MIRROR); 724 EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
899 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 725 if (rotation & DRM_MODE_REFLECT_X)
900 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; 726 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
901 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 727 if (rotation & DRM_MODE_REFLECT_Y)
902 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 728 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
903 break; 729 break;
904 default:
905 dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
906 return -EINVAL;
907 } 730 }
908 731
909 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); 732 fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
910 *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
911
912 return 0;
913} 733}
914 734
915static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, 735static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
916 struct drm_exynos_pos *src, struct drm_exynos_pos *dst) 736 struct drm_exynos_ipp_task_rect *src,
737 struct drm_exynos_ipp_task_rect *dst)
917{ 738{
918 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
919 u32 cfg, cfg_ext, shfactor; 739 u32 cfg, cfg_ext, shfactor;
920 u32 pre_dst_width, pre_dst_height; 740 u32 pre_dst_width, pre_dst_height;
921 u32 hfactor, vfactor; 741 u32 hfactor, vfactor;
@@ -942,13 +762,13 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
942 /* fimc_ippdrv_check_property assures that dividers are not null */ 762 /* fimc_ippdrv_check_property assures that dividers are not null */
943 hfactor = fls(src_w / dst_w / 2); 763 hfactor = fls(src_w / dst_w / 2);
944 if (hfactor > FIMC_SHFACTOR / 2) { 764 if (hfactor > FIMC_SHFACTOR / 2) {
945 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); 765 dev_err(ctx->dev, "failed to get ratio horizontal.\n");
946 return -EINVAL; 766 return -EINVAL;
947 } 767 }
948 768
949 vfactor = fls(src_h / dst_h / 2); 769 vfactor = fls(src_h / dst_h / 2);
950 if (vfactor > FIMC_SHFACTOR / 2) { 770 if (vfactor > FIMC_SHFACTOR / 2) {
951 dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); 771 dev_err(ctx->dev, "failed to get ratio vertical.\n");
952 return -EINVAL; 772 return -EINVAL;
953 } 773 }
954 774
@@ -1019,83 +839,77 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
1019 fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN); 839 fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN);
1020} 840}
1021 841
1022static int fimc_dst_set_size(struct device *dev, int swap, 842static void fimc_dst_set_size(struct fimc_context *ctx,
1023 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 843 struct exynos_drm_ipp_buffer *buf)
1024{ 844{
1025 struct fimc_context *ctx = get_fimc_context(dev); 845 u32 cfg, cfg_ext;
1026 struct drm_exynos_pos img_pos = *pos;
1027 struct drm_exynos_sz img_sz = *sz;
1028 u32 cfg;
1029 846
1030 DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n", 847 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
1031 swap, sz->hsize, sz->vsize);
1032 848
1033 /* original size */ 849 /* original size */
1034 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) | 850 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
1035 EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize)); 851 EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
1036 852
1037 fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); 853 fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
1038 854
1039 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); 855 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
856 buf->rect.w, buf->rect.h);
1040 857
1041 /* CSC ITU */ 858 /* CSC ITU */
1042 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); 859 cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
1043 cfg &= ~EXYNOS_CIGCTRL_CSC_MASK; 860 cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
1044 861
1045 if (sz->hsize >= FIMC_WIDTH_ITU_709) 862 if (buf->buf.width >= FIMC_WIDTH_ITU_709)
1046 cfg |= EXYNOS_CIGCTRL_CSC_ITU709; 863 cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
1047 else 864 else
1048 cfg |= EXYNOS_CIGCTRL_CSC_ITU601; 865 cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
1049 866
1050 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); 867 fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
1051 868
1052 if (swap) { 869 cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT);
1053 img_pos.w = pos->h;
1054 img_pos.h = pos->w;
1055 img_sz.hsize = sz->vsize;
1056 img_sz.vsize = sz->hsize;
1057 }
1058 870
1059 /* target image size */ 871 /* target image size */
1060 cfg = fimc_read(ctx, EXYNOS_CITRGFMT); 872 cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
1061 cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK | 873 cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
1062 EXYNOS_CITRGFMT_TARGETV_MASK); 874 EXYNOS_CITRGFMT_TARGETV_MASK);
1063 cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) | 875 if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE)
1064 EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h)); 876 cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.h) |
877 EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.w));
878 else
879 cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.w) |
880 EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.h));
1065 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); 881 fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
1066 882
1067 /* target area */ 883 /* target area */
1068 cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h); 884 cfg = EXYNOS_CITAREA_TARGET_AREA(buf->rect.w * buf->rect.h);
1069 fimc_write(ctx, cfg, EXYNOS_CITAREA); 885 fimc_write(ctx, cfg, EXYNOS_CITAREA);
1070 886
1071 /* offset Y(RGB), Cb, Cr */ 887 /* offset Y(RGB), Cb, Cr */
1072 cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) | 888 cfg = (EXYNOS_CIOYOFF_HORIZONTAL(buf->rect.x) |
1073 EXYNOS_CIOYOFF_VERTICAL(img_pos.y)); 889 EXYNOS_CIOYOFF_VERTICAL(buf->rect.y));
1074 fimc_write(ctx, cfg, EXYNOS_CIOYOFF); 890 fimc_write(ctx, cfg, EXYNOS_CIOYOFF);
1075 cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) | 891 cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(buf->rect.x) |
1076 EXYNOS_CIOCBOFF_VERTICAL(img_pos.y)); 892 EXYNOS_CIOCBOFF_VERTICAL(buf->rect.y));
1077 fimc_write(ctx, cfg, EXYNOS_CIOCBOFF); 893 fimc_write(ctx, cfg, EXYNOS_CIOCBOFF);
1078 cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) | 894 cfg = (EXYNOS_CIOCROFF_HORIZONTAL(buf->rect.x) |
1079 EXYNOS_CIOCROFF_VERTICAL(img_pos.y)); 895 EXYNOS_CIOCROFF_VERTICAL(buf->rect.y));
1080 fimc_write(ctx, cfg, EXYNOS_CIOCROFF); 896 fimc_write(ctx, cfg, EXYNOS_CIOCROFF);
1081
1082 return 0;
1083} 897}
1084 898
1085static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, 899static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1086 enum drm_exynos_ipp_buf_type buf_type) 900 bool enqueue)
1087{ 901{
1088 unsigned long flags; 902 unsigned long flags;
1089 u32 buf_num; 903 u32 buf_num;
1090 u32 cfg; 904 u32 cfg;
1091 905
1092 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); 906 DRM_DEBUG_KMS("buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
1093 907
1094 spin_lock_irqsave(&ctx->lock, flags); 908 spin_lock_irqsave(&ctx->lock, flags);
1095 909
1096 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); 910 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1097 911
1098 if (buf_type == IPP_BUF_ENQUEUE) 912 if (enqueue)
1099 cfg |= (1 << buf_id); 913 cfg |= (1 << buf_id);
1100 else 914 else
1101 cfg &= ~(1 << buf_id); 915 cfg &= ~(1 << buf_id);
@@ -1104,88 +918,29 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1104 918
1105 buf_num = hweight32(cfg); 919 buf_num = hweight32(cfg);
1106 920
1107 if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START) 921 if (enqueue && buf_num >= FIMC_BUF_START)
1108 fimc_mask_irq(ctx, true); 922 fimc_mask_irq(ctx, true);
1109 else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP) 923 else if (!enqueue && buf_num <= FIMC_BUF_STOP)
1110 fimc_mask_irq(ctx, false); 924 fimc_mask_irq(ctx, false);
1111 925
1112 spin_unlock_irqrestore(&ctx->lock, flags); 926 spin_unlock_irqrestore(&ctx->lock, flags);
1113} 927}
1114 928
1115static int fimc_dst_set_addr(struct device *dev, 929static void fimc_dst_set_addr(struct fimc_context *ctx,
1116 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, 930 struct exynos_drm_ipp_buffer *buf)
1117 enum drm_exynos_ipp_buf_type buf_type)
1118{ 931{
1119 struct fimc_context *ctx = get_fimc_context(dev); 932 fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIOYSA(0));
1120 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 933 fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIOCBSA(0));
1121 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 934 fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIOCRSA(0));
1122 struct drm_exynos_ipp_property *property;
1123 struct drm_exynos_ipp_config *config;
1124
1125 if (!c_node) {
1126 DRM_ERROR("failed to get c_node.\n");
1127 return -EINVAL;
1128 }
1129
1130 property = &c_node->property;
1131
1132 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
1133 property->prop_id, buf_id, buf_type);
1134 935
1135 if (buf_id > FIMC_MAX_DST) { 936 fimc_dst_set_buf_seq(ctx, 0, true);
1136 dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
1137 return -ENOMEM;
1138 }
1139
1140 /* address register set */
1141 switch (buf_type) {
1142 case IPP_BUF_ENQUEUE:
1143 config = &property->config[EXYNOS_DRM_OPS_DST];
1144
1145 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
1146 EXYNOS_CIOYSA(buf_id));
1147
1148 if (config->fmt == DRM_FORMAT_YVU420) {
1149 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
1150 EXYNOS_CIOCBSA(buf_id));
1151 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
1152 EXYNOS_CIOCRSA(buf_id));
1153 } else {
1154 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
1155 EXYNOS_CIOCBSA(buf_id));
1156 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
1157 EXYNOS_CIOCRSA(buf_id));
1158 }
1159 break;
1160 case IPP_BUF_DEQUEUE:
1161 fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id));
1162 fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id));
1163 fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id));
1164 break;
1165 default:
1166 /* bypass */
1167 break;
1168 }
1169
1170 fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
1171
1172 return 0;
1173} 937}
1174 938
1175static struct exynos_drm_ipp_ops fimc_dst_ops = { 939static void fimc_stop(struct fimc_context *ctx);
1176 .set_fmt = fimc_dst_set_fmt,
1177 .set_transf = fimc_dst_set_transf,
1178 .set_size = fimc_dst_set_size,
1179 .set_addr = fimc_dst_set_addr,
1180};
1181 940
1182static irqreturn_t fimc_irq_handler(int irq, void *dev_id) 941static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1183{ 942{
1184 struct fimc_context *ctx = dev_id; 943 struct fimc_context *ctx = dev_id;
1185 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1186 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1187 struct drm_exynos_ipp_event_work *event_work =
1188 c_node->event_work;
1189 int buf_id; 944 int buf_id;
1190 945
1191 DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id); 946 DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id);
@@ -1203,170 +958,19 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1203 958
1204 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); 959 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
1205 960
1206 fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); 961 if (ctx->task) {
1207 962 struct exynos_drm_ipp_task *task = ctx->task;
1208 event_work->ippdrv = ippdrv;
1209 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
1210 queue_work(ippdrv->event_workq, &event_work->work);
1211
1212 return IRQ_HANDLED;
1213}
1214
1215static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1216{
1217 struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
1218
1219 prop_list->version = 1;
1220 prop_list->writeback = 1;
1221 prop_list->refresh_min = FIMC_REFRESH_MIN;
1222 prop_list->refresh_max = FIMC_REFRESH_MAX;
1223 prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
1224 (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1225 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1226 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1227 (1 << EXYNOS_DRM_DEGREE_90) |
1228 (1 << EXYNOS_DRM_DEGREE_180) |
1229 (1 << EXYNOS_DRM_DEGREE_270);
1230 prop_list->csc = 1;
1231 prop_list->crop = 1;
1232 prop_list->crop_max.hsize = FIMC_CROP_MAX;
1233 prop_list->crop_max.vsize = FIMC_CROP_MAX;
1234 prop_list->crop_min.hsize = FIMC_CROP_MIN;
1235 prop_list->crop_min.vsize = FIMC_CROP_MIN;
1236 prop_list->scale = 1;
1237 prop_list->scale_max.hsize = FIMC_SCALE_MAX;
1238 prop_list->scale_max.vsize = FIMC_SCALE_MAX;
1239 prop_list->scale_min.hsize = FIMC_SCALE_MIN;
1240 prop_list->scale_min.vsize = FIMC_SCALE_MIN;
1241
1242 return 0;
1243}
1244
1245static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
1246{
1247 switch (flip) {
1248 case EXYNOS_DRM_FLIP_NONE:
1249 case EXYNOS_DRM_FLIP_VERTICAL:
1250 case EXYNOS_DRM_FLIP_HORIZONTAL:
1251 case EXYNOS_DRM_FLIP_BOTH:
1252 return true;
1253 default:
1254 DRM_DEBUG_KMS("invalid flip\n");
1255 return false;
1256 }
1257}
1258
1259static int fimc_ippdrv_check_property(struct device *dev,
1260 struct drm_exynos_ipp_property *property)
1261{
1262 struct fimc_context *ctx = get_fimc_context(dev);
1263 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1264 struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
1265 struct drm_exynos_ipp_config *config;
1266 struct drm_exynos_pos *pos;
1267 struct drm_exynos_sz *sz;
1268 bool swap;
1269 int i;
1270
1271 for_each_ipp_ops(i) {
1272 if ((i == EXYNOS_DRM_OPS_SRC) &&
1273 (property->cmd == IPP_CMD_WB))
1274 continue;
1275
1276 config = &property->config[i];
1277 pos = &config->pos;
1278 sz = &config->sz;
1279
1280 /* check for flip */
1281 if (!fimc_check_drm_flip(config->flip)) {
1282 DRM_ERROR("invalid flip.\n");
1283 goto err_property;
1284 }
1285
1286 /* check for degree */
1287 switch (config->degree) {
1288 case EXYNOS_DRM_DEGREE_90:
1289 case EXYNOS_DRM_DEGREE_270:
1290 swap = true;
1291 break;
1292 case EXYNOS_DRM_DEGREE_0:
1293 case EXYNOS_DRM_DEGREE_180:
1294 swap = false;
1295 break;
1296 default:
1297 DRM_ERROR("invalid degree.\n");
1298 goto err_property;
1299 }
1300
1301 /* check for buffer bound */
1302 if ((pos->x + pos->w > sz->hsize) ||
1303 (pos->y + pos->h > sz->vsize)) {
1304 DRM_ERROR("out of buf bound.\n");
1305 goto err_property;
1306 }
1307 963
1308 /* check for crop */ 964 ctx->task = NULL;
1309 if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { 965 pm_runtime_mark_last_busy(ctx->dev);
1310 if (swap) { 966 pm_runtime_put_autosuspend(ctx->dev);
1311 if ((pos->h < pp->crop_min.hsize) || 967 exynos_drm_ipp_task_done(task, 0);
1312 (sz->vsize > pp->crop_max.hsize) ||
1313 (pos->w < pp->crop_min.vsize) ||
1314 (sz->hsize > pp->crop_max.vsize)) {
1315 DRM_ERROR("out of crop size.\n");
1316 goto err_property;
1317 }
1318 } else {
1319 if ((pos->w < pp->crop_min.hsize) ||
1320 (sz->hsize > pp->crop_max.hsize) ||
1321 (pos->h < pp->crop_min.vsize) ||
1322 (sz->vsize > pp->crop_max.vsize)) {
1323 DRM_ERROR("out of crop size.\n");
1324 goto err_property;
1325 }
1326 }
1327 }
1328
1329 /* check for scale */
1330 if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1331 if (swap) {
1332 if ((pos->h < pp->scale_min.hsize) ||
1333 (sz->vsize > pp->scale_max.hsize) ||
1334 (pos->w < pp->scale_min.vsize) ||
1335 (sz->hsize > pp->scale_max.vsize)) {
1336 DRM_ERROR("out of scale size.\n");
1337 goto err_property;
1338 }
1339 } else {
1340 if ((pos->w < pp->scale_min.hsize) ||
1341 (sz->hsize > pp->scale_max.hsize) ||
1342 (pos->h < pp->scale_min.vsize) ||
1343 (sz->vsize > pp->scale_max.vsize)) {
1344 DRM_ERROR("out of scale size.\n");
1345 goto err_property;
1346 }
1347 }
1348 }
1349 } 968 }
1350 969
1351 return 0; 970 fimc_dst_set_buf_seq(ctx, buf_id, false);
971 fimc_stop(ctx);
1352 972
1353err_property: 973 return IRQ_HANDLED;
1354 for_each_ipp_ops(i) {
1355 if ((i == EXYNOS_DRM_OPS_SRC) &&
1356 (property->cmd == IPP_CMD_WB))
1357 continue;
1358
1359 config = &property->config[i];
1360 pos = &config->pos;
1361 sz = &config->sz;
1362
1363 DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1364 i ? "dst" : "src", config->flip, config->degree,
1365 pos->x, pos->y, pos->w, pos->h,
1366 sz->hsize, sz->vsize);
1367 }
1368
1369 return -EINVAL;
1370} 974}
1371 975
1372static void fimc_clear_addr(struct fimc_context *ctx) 976static void fimc_clear_addr(struct fimc_context *ctx)
@@ -1386,10 +990,8 @@ static void fimc_clear_addr(struct fimc_context *ctx)
1386 } 990 }
1387} 991}
1388 992
1389static int fimc_ippdrv_reset(struct device *dev) 993static void fimc_reset(struct fimc_context *ctx)
1390{ 994{
1391 struct fimc_context *ctx = get_fimc_context(dev);
1392
1393 /* reset h/w block */ 995 /* reset h/w block */
1394 fimc_sw_reset(ctx); 996 fimc_sw_reset(ctx);
1395 997
@@ -1397,82 +999,26 @@ static int fimc_ippdrv_reset(struct device *dev)
1397 memset(&ctx->sc, 0x0, sizeof(ctx->sc)); 999 memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1398 1000
1399 fimc_clear_addr(ctx); 1001 fimc_clear_addr(ctx);
1400
1401 return 0;
1402} 1002}
1403 1003
1404static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) 1004static void fimc_start(struct fimc_context *ctx)
1405{ 1005{
1406 struct fimc_context *ctx = get_fimc_context(dev);
1407 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1408 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1409 struct drm_exynos_ipp_property *property;
1410 struct drm_exynos_ipp_config *config;
1411 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
1412 struct drm_exynos_ipp_set_wb set_wb;
1413 int ret, i;
1414 u32 cfg0, cfg1; 1006 u32 cfg0, cfg1;
1415 1007
1416 DRM_DEBUG_KMS("cmd[%d]\n", cmd);
1417
1418 if (!c_node) {
1419 DRM_ERROR("failed to get c_node.\n");
1420 return -EINVAL;
1421 }
1422
1423 property = &c_node->property;
1424
1425 fimc_mask_irq(ctx, true); 1008 fimc_mask_irq(ctx, true);
1426 1009
1427 for_each_ipp_ops(i) { 1010 /* If set true, we can save jpeg about screen */
1428 config = &property->config[i];
1429 img_pos[i] = config->pos;
1430 }
1431
1432 ret = fimc_set_prescaler(ctx, &ctx->sc,
1433 &img_pos[EXYNOS_DRM_OPS_SRC],
1434 &img_pos[EXYNOS_DRM_OPS_DST]);
1435 if (ret) {
1436 dev_err(dev, "failed to set prescaler.\n");
1437 return ret;
1438 }
1439
1440 /* If set ture, we can save jpeg about screen */
1441 fimc_handle_jpeg(ctx, false); 1011 fimc_handle_jpeg(ctx, false);
1442 fimc_set_scaler(ctx, &ctx->sc); 1012 fimc_set_scaler(ctx, &ctx->sc);
1443 1013
1444 switch (cmd) { 1014 fimc_set_type_ctrl(ctx);
1445 case IPP_CMD_M2M: 1015 fimc_handle_lastend(ctx, false);
1446 fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
1447 fimc_handle_lastend(ctx, false);
1448
1449 /* setup dma */
1450 cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
1451 cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
1452 cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
1453 fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
1454 break;
1455 case IPP_CMD_WB:
1456 fimc_set_type_ctrl(ctx, FIMC_WB_A);
1457 fimc_handle_lastend(ctx, true);
1458
1459 /* setup FIMD */
1460 ret = fimc_set_camblk_fimd0_wb(ctx);
1461 if (ret < 0) {
1462 dev_err(dev, "camblk setup failed.\n");
1463 return ret;
1464 }
1465 1016
1466 set_wb.enable = 1; 1017 /* setup dma */
1467 set_wb.refresh = property->refresh_rate; 1018 cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
1468 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); 1019 cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
1469 break; 1020 cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
1470 case IPP_CMD_OUTPUT: 1021 fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
1471 default:
1472 ret = -EINVAL;
1473 dev_err(dev, "invalid operations.\n");
1474 return ret;
1475 }
1476 1022
1477 /* Reset status */ 1023 /* Reset status */
1478 fimc_write(ctx, 0x0, EXYNOS_CISTATUS); 1024 fimc_write(ctx, 0x0, EXYNOS_CISTATUS);
@@ -1498,36 +1044,18 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1498 1044
1499 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK); 1045 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
1500 1046
1501 if (cmd == IPP_CMD_M2M) 1047 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1502 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1503
1504 return 0;
1505} 1048}
1506 1049
1507static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) 1050static void fimc_stop(struct fimc_context *ctx)
1508{ 1051{
1509 struct fimc_context *ctx = get_fimc_context(dev);
1510 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1511 u32 cfg; 1052 u32 cfg;
1512 1053
1513 DRM_DEBUG_KMS("cmd[%d]\n", cmd); 1054 /* Source clear */
1514 1055 cfg = fimc_read(ctx, EXYNOS_MSCTRL);
1515 switch (cmd) { 1056 cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
1516 case IPP_CMD_M2M: 1057 cfg &= ~EXYNOS_MSCTRL_ENVID;
1517 /* Source clear */ 1058 fimc_write(ctx, cfg, EXYNOS_MSCTRL);
1518 cfg = fimc_read(ctx, EXYNOS_MSCTRL);
1519 cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
1520 cfg &= ~EXYNOS_MSCTRL_ENVID;
1521 fimc_write(ctx, cfg, EXYNOS_MSCTRL);
1522 break;
1523 case IPP_CMD_WB:
1524 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1525 break;
1526 case IPP_CMD_OUTPUT:
1527 default:
1528 dev_err(dev, "invalid operations.\n");
1529 break;
1530 }
1531 1059
1532 fimc_mask_irq(ctx, false); 1060 fimc_mask_irq(ctx, false);
1533 1061
@@ -1545,6 +1073,87 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1545 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE); 1073 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
1546} 1074}
1547 1075
1076static int fimc_commit(struct exynos_drm_ipp *ipp,
1077 struct exynos_drm_ipp_task *task)
1078{
1079 struct fimc_context *ctx =
1080 container_of(ipp, struct fimc_context, ipp);
1081
1082 pm_runtime_get_sync(ctx->dev);
1083 ctx->task = task;
1084
1085 fimc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
1086 fimc_src_set_size(ctx, &task->src);
1087 fimc_src_set_transf(ctx, DRM_MODE_ROTATE_0);
1088 fimc_src_set_addr(ctx, &task->src);
1089 fimc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier);
1090 fimc_dst_set_transf(ctx, task->transform.rotation);
1091 fimc_dst_set_size(ctx, &task->dst);
1092 fimc_dst_set_addr(ctx, &task->dst);
1093 fimc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
1094 fimc_start(ctx);
1095
1096 return 0;
1097}
1098
1099static void fimc_abort(struct exynos_drm_ipp *ipp,
1100 struct exynos_drm_ipp_task *task)
1101{
1102 struct fimc_context *ctx =
1103 container_of(ipp, struct fimc_context, ipp);
1104
1105 fimc_reset(ctx);
1106
1107 if (ctx->task) {
1108 struct exynos_drm_ipp_task *task = ctx->task;
1109
1110 ctx->task = NULL;
1111 pm_runtime_mark_last_busy(ctx->dev);
1112 pm_runtime_put_autosuspend(ctx->dev);
1113 exynos_drm_ipp_task_done(task, -EIO);
1114 }
1115}
1116
1117static struct exynos_drm_ipp_funcs ipp_funcs = {
1118 .commit = fimc_commit,
1119 .abort = fimc_abort,
1120};
1121
1122static int fimc_bind(struct device *dev, struct device *master, void *data)
1123{
1124 struct fimc_context *ctx = dev_get_drvdata(dev);
1125 struct drm_device *drm_dev = data;
1126 struct exynos_drm_ipp *ipp = &ctx->ipp;
1127
1128 ctx->drm_dev = drm_dev;
1129 drm_iommu_attach_device(drm_dev, dev);
1130
1131 exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
1132 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
1133 DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
1134 ctx->formats, ctx->num_formats, "fimc");
1135
1136 dev_info(dev, "The exynos fimc has been probed successfully\n");
1137
1138 return 0;
1139}
1140
1141static void fimc_unbind(struct device *dev, struct device *master,
1142 void *data)
1143{
1144 struct fimc_context *ctx = dev_get_drvdata(dev);
1145 struct drm_device *drm_dev = data;
1146 struct exynos_drm_ipp *ipp = &ctx->ipp;
1147
1148 exynos_drm_ipp_unregister(drm_dev, ipp);
1149 drm_iommu_detach_device(drm_dev, dev);
1150}
1151
1152static const struct component_ops fimc_component_ops = {
1153 .bind = fimc_bind,
1154 .unbind = fimc_unbind,
1155};
1156
1548static void fimc_put_clocks(struct fimc_context *ctx) 1157static void fimc_put_clocks(struct fimc_context *ctx)
1549{ 1158{
1550 int i; 1159 int i;
@@ -1559,7 +1168,7 @@ static void fimc_put_clocks(struct fimc_context *ctx)
1559 1168
1560static int fimc_setup_clocks(struct fimc_context *ctx) 1169static int fimc_setup_clocks(struct fimc_context *ctx)
1561{ 1170{
1562 struct device *fimc_dev = ctx->ippdrv.dev; 1171 struct device *fimc_dev = ctx->dev;
1563 struct device *dev; 1172 struct device *dev;
1564 int ret, i; 1173 int ret, i;
1565 1174
@@ -1574,8 +1183,6 @@ static int fimc_setup_clocks(struct fimc_context *ctx)
1574 1183
1575 ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]); 1184 ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
1576 if (IS_ERR(ctx->clocks[i])) { 1185 if (IS_ERR(ctx->clocks[i])) {
1577 if (i >= FIMC_CLK_MUX)
1578 break;
1579 ret = PTR_ERR(ctx->clocks[i]); 1186 ret = PTR_ERR(ctx->clocks[i]);
1580 dev_err(fimc_dev, "failed to get clock: %s\n", 1187 dev_err(fimc_dev, "failed to get clock: %s\n",
1581 fimc_clock_names[i]); 1188 fimc_clock_names[i]);
@@ -1583,20 +1190,6 @@ static int fimc_setup_clocks(struct fimc_context *ctx)
1583 } 1190 }
1584 } 1191 }
1585 1192
1586 /* Optional FIMC LCLK parent clock setting */
1587 if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) {
1588 ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX],
1589 ctx->clocks[FIMC_CLK_PARENT]);
1590 if (ret < 0) {
1591 dev_err(fimc_dev, "failed to set parent.\n");
1592 goto e_clk_free;
1593 }
1594 }
1595
1596 ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency);
1597 if (ret < 0)
1598 goto e_clk_free;
1599
1600 ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]); 1193 ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
1601 if (!ret) 1194 if (!ret)
1602 return ret; 1195 return ret;
@@ -1605,57 +1198,118 @@ e_clk_free:
1605 return ret; 1198 return ret;
1606} 1199}
1607 1200
1608static int fimc_parse_dt(struct fimc_context *ctx) 1201int exynos_drm_check_fimc_device(struct device *dev)
1609{ 1202{
1610 struct device_node *node = ctx->ippdrv.dev->of_node; 1203 unsigned int id = of_alias_get_id(dev->of_node, "fimc");
1611 1204
1612 /* Handle only devices that support the LCD Writeback data path */ 1205 if (id >= 0 && (BIT(id) & fimc_mask))
1613 if (!of_property_read_bool(node, "samsung,lcd-wb")) 1206 return 0;
1614 return -ENODEV; 1207 return -ENODEV;
1208}
1615 1209
1616 if (of_property_read_u32(node, "clock-frequency", 1210static const unsigned int fimc_formats[] = {
1617 &ctx->clk_frequency)) 1211 DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565,
1618 ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY; 1212 DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61,
1213 DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU,
1214 DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422,
1215 DRM_FORMAT_YUV444,
1216};
1619 1217
1620 ctx->id = of_alias_get_id(node, "fimc"); 1218static const unsigned int fimc_tiled_formats[] = {
1219 DRM_FORMAT_NV12, DRM_FORMAT_NV21,
1220};
1621 1221
1622 if (ctx->id < 0) { 1222static const struct drm_exynos_ipp_limit fimc_4210_limits_v1[] = {
1623 dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n"); 1223 { IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) },
1624 return -EINVAL; 1224 { IPP_SIZE_LIMIT(AREA, .h = { 16, 4224, 2 }, .v = { 16, 0, 2 }) },
1625 } 1225 { IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1920 }, .v = { 128, 0 }) },
1226 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
1227 .v = { (1 << 16) / 64, (1 << 16) * 64 }) },
1228};
1626 1229
1627 return 0; 1230static const struct drm_exynos_ipp_limit fimc_4210_limits_v2[] = {
1628} 1231 { IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) },
1232 { IPP_SIZE_LIMIT(AREA, .h = { 16, 1920, 2 }, .v = { 16, 0, 2 }) },
1233 { IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1366 }, .v = { 128, 0 }) },
1234 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
1235 .v = { (1 << 16) / 64, (1 << 16) * 64 }) },
1236};
1237
1238static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v1[] = {
1239 { IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) },
1240 { IPP_SIZE_LIMIT(AREA, .h = { 128, 1920, 2 }, .v = { 128, 0, 2 }) },
1241 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
1242 .v = { (1 << 16) / 64, (1 << 16) * 64 }) },
1243};
1244
1245static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v2[] = {
1246 { IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) },
1247 { IPP_SIZE_LIMIT(AREA, .h = { 128, 1366, 2 }, .v = { 128, 0, 2 }) },
1248 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
1249 .v = { (1 << 16) / 64, (1 << 16) * 64 }) },
1250};
1629 1251
1630static int fimc_probe(struct platform_device *pdev) 1252static int fimc_probe(struct platform_device *pdev)
1631{ 1253{
1254 const struct drm_exynos_ipp_limit *limits;
1255 struct exynos_drm_ipp_formats *formats;
1632 struct device *dev = &pdev->dev; 1256 struct device *dev = &pdev->dev;
1633 struct fimc_context *ctx; 1257 struct fimc_context *ctx;
1634 struct resource *res; 1258 struct resource *res;
1635 struct exynos_drm_ippdrv *ippdrv;
1636 int ret; 1259 int ret;
1260 int i, j, num_limits, num_formats;
1637 1261
1638 if (!dev->of_node) { 1262 if (exynos_drm_check_fimc_device(dev) != 0)
1639 dev_err(dev, "device tree node not found.\n");
1640 return -ENODEV; 1263 return -ENODEV;
1641 }
1642 1264
1643 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1265 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1644 if (!ctx) 1266 if (!ctx)
1645 return -ENOMEM; 1267 return -ENOMEM;
1646 1268
1647 ctx->ippdrv.dev = dev; 1269 ctx->dev = dev;
1270 ctx->id = of_alias_get_id(dev->of_node, "fimc");
1648 1271
1649 ret = fimc_parse_dt(ctx); 1272 /* construct formats/limits array */
1650 if (ret < 0) 1273 num_formats = ARRAY_SIZE(fimc_formats) + ARRAY_SIZE(fimc_tiled_formats);
1651 return ret; 1274 formats = devm_kzalloc(dev, sizeof(*formats) * num_formats, GFP_KERNEL);
1275 if (!formats)
1276 return -ENOMEM;
1277
1278 /* linear formats */
1279 if (ctx->id < 3) {
1280 limits = fimc_4210_limits_v1;
1281 num_limits = ARRAY_SIZE(fimc_4210_limits_v1);
1282 } else {
1283 limits = fimc_4210_limits_v2;
1284 num_limits = ARRAY_SIZE(fimc_4210_limits_v2);
1285 }
1286 for (i = 0; i < ARRAY_SIZE(fimc_formats); i++) {
1287 formats[i].fourcc = fimc_formats[i];
1288 formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
1289 DRM_EXYNOS_IPP_FORMAT_DESTINATION;
1290 formats[i].limits = limits;
1291 formats[i].num_limits = num_limits;
1292 }
1652 1293
1653 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, 1294 /* tiled formats */
1654 "samsung,sysreg"); 1295 if (ctx->id < 3) {
1655 if (IS_ERR(ctx->sysreg)) { 1296 limits = fimc_4210_limits_tiled_v1;
1656 dev_err(dev, "syscon regmap lookup failed.\n"); 1297 num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v1);
1657 return PTR_ERR(ctx->sysreg); 1298 } else {
1299 limits = fimc_4210_limits_tiled_v2;
1300 num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v2);
1658 } 1301 }
1302 for (j = i, i = 0; i < ARRAY_SIZE(fimc_tiled_formats); j++, i++) {
1303 formats[j].fourcc = fimc_tiled_formats[i];
1304 formats[j].modifier = DRM_FORMAT_MOD_SAMSUNG_64_32_TILE;
1305 formats[j].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
1306 DRM_EXYNOS_IPP_FORMAT_DESTINATION;
1307 formats[j].limits = limits;
1308 formats[j].num_limits = num_limits;
1309 }
1310
1311 ctx->formats = formats;
1312 ctx->num_formats = num_formats;
1659 1313
1660 /* resource memory */ 1314 /* resource memory */
1661 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1315 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1670,9 +1324,8 @@ static int fimc_probe(struct platform_device *pdev)
1670 return -ENOENT; 1324 return -ENOENT;
1671 } 1325 }
1672 1326
1673 ctx->irq = res->start; 1327 ret = devm_request_irq(dev, res->start, fimc_irq_handler,
1674 ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler, 1328 0, dev_name(dev), ctx);
1675 IRQF_ONESHOT, "drm_fimc", ctx);
1676 if (ret < 0) { 1329 if (ret < 0) {
1677 dev_err(dev, "failed to request irq.\n"); 1330 dev_err(dev, "failed to request irq.\n");
1678 return ret; 1331 return ret;
@@ -1682,39 +1335,24 @@ static int fimc_probe(struct platform_device *pdev)
1682 if (ret < 0) 1335 if (ret < 0)
1683 return ret; 1336 return ret;
1684 1337
1685 ippdrv = &ctx->ippdrv;
1686 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
1687 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
1688 ippdrv->check_property = fimc_ippdrv_check_property;
1689 ippdrv->reset = fimc_ippdrv_reset;
1690 ippdrv->start = fimc_ippdrv_start;
1691 ippdrv->stop = fimc_ippdrv_stop;
1692 ret = fimc_init_prop_list(ippdrv);
1693 if (ret < 0) {
1694 dev_err(dev, "failed to init property list.\n");
1695 goto err_put_clk;
1696 }
1697
1698 DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
1699
1700 spin_lock_init(&ctx->lock); 1338 spin_lock_init(&ctx->lock);
1701 platform_set_drvdata(pdev, ctx); 1339 platform_set_drvdata(pdev, ctx);
1702 1340
1341 pm_runtime_use_autosuspend(dev);
1342 pm_runtime_set_autosuspend_delay(dev, FIMC_AUTOSUSPEND_DELAY);
1703 pm_runtime_enable(dev); 1343 pm_runtime_enable(dev);
1704 1344
1705 ret = exynos_drm_ippdrv_register(ippdrv); 1345 ret = component_add(dev, &fimc_component_ops);
1706 if (ret < 0) { 1346 if (ret)
1707 dev_err(dev, "failed to register drm fimc device.\n");
1708 goto err_pm_dis; 1347 goto err_pm_dis;
1709 }
1710 1348
1711 dev_info(dev, "drm fimc registered successfully.\n"); 1349 dev_info(dev, "drm fimc registered successfully.\n");
1712 1350
1713 return 0; 1351 return 0;
1714 1352
1715err_pm_dis: 1353err_pm_dis:
1354 pm_runtime_dont_use_autosuspend(dev);
1716 pm_runtime_disable(dev); 1355 pm_runtime_disable(dev);
1717err_put_clk:
1718 fimc_put_clocks(ctx); 1356 fimc_put_clocks(ctx);
1719 1357
1720 return ret; 1358 return ret;
@@ -1724,42 +1362,24 @@ static int fimc_remove(struct platform_device *pdev)
1724{ 1362{
1725 struct device *dev = &pdev->dev; 1363 struct device *dev = &pdev->dev;
1726 struct fimc_context *ctx = get_fimc_context(dev); 1364 struct fimc_context *ctx = get_fimc_context(dev);
1727 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1728 1365
1729 exynos_drm_ippdrv_unregister(ippdrv); 1366 component_del(dev, &fimc_component_ops);
1367 pm_runtime_dont_use_autosuspend(dev);
1368 pm_runtime_disable(dev);
1730 1369
1731 fimc_put_clocks(ctx); 1370 fimc_put_clocks(ctx);
1732 pm_runtime_set_suspended(dev);
1733 pm_runtime_disable(dev);
1734 1371
1735 return 0; 1372 return 0;
1736} 1373}
1737 1374
1738#ifdef CONFIG_PM 1375#ifdef CONFIG_PM
1739static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1740{
1741 DRM_DEBUG_KMS("enable[%d]\n", enable);
1742
1743 if (enable) {
1744 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
1745 clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
1746 ctx->suspended = false;
1747 } else {
1748 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
1749 clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
1750 ctx->suspended = true;
1751 }
1752
1753 return 0;
1754}
1755
1756static int fimc_runtime_suspend(struct device *dev) 1376static int fimc_runtime_suspend(struct device *dev)
1757{ 1377{
1758 struct fimc_context *ctx = get_fimc_context(dev); 1378 struct fimc_context *ctx = get_fimc_context(dev);
1759 1379
1760 DRM_DEBUG_KMS("id[%d]\n", ctx->id); 1380 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1761 1381 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
1762 return fimc_clk_ctrl(ctx, false); 1382 return 0;
1763} 1383}
1764 1384
1765static int fimc_runtime_resume(struct device *dev) 1385static int fimc_runtime_resume(struct device *dev)
@@ -1767,8 +1387,7 @@ static int fimc_runtime_resume(struct device *dev)
1767 struct fimc_context *ctx = get_fimc_context(dev); 1387 struct fimc_context *ctx = get_fimc_context(dev);
1768 1388
1769 DRM_DEBUG_KMS("id[%d]\n", ctx->id); 1389 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1770 1390 return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
1771 return fimc_clk_ctrl(ctx, true);
1772} 1391}
1773#endif 1392#endif
1774 1393
@@ -1795,4 +1414,3 @@ struct platform_driver fimc_driver = {
1795 .pm = &fimc_pm_ops, 1414 .pm = &fimc_pm_ops,
1796 }, 1415 },
1797}; 1416};
1798
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
deleted file mode 100644
index 127a424c5fdf..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#ifndef _EXYNOS_DRM_FIMC_H_
16#define _EXYNOS_DRM_FIMC_H_
17
18/*
19 * TODO
20 * FIMD output interface notifier callback.
21 */
22
23#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index d42ae2bc3e56..01b1570d0c3a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -121,6 +121,12 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
121 .has_limited_fmt = 1, 121 .has_limited_fmt = 1,
122}; 122};
123 123
124static struct fimd_driver_data s5pv210_fimd_driver_data = {
125 .timing_base = 0x0,
126 .has_shadowcon = 1,
127 .has_clksel = 1,
128};
129
124static struct fimd_driver_data exynos3_fimd_driver_data = { 130static struct fimd_driver_data exynos3_fimd_driver_data = {
125 .timing_base = 0x20000, 131 .timing_base = 0x20000,
126 .lcdblk_offset = 0x210, 132 .lcdblk_offset = 0x210,
@@ -193,6 +199,8 @@ struct fimd_context {
193static const struct of_device_id fimd_driver_dt_match[] = { 199static const struct of_device_id fimd_driver_dt_match[] = {
194 { .compatible = "samsung,s3c6400-fimd", 200 { .compatible = "samsung,s3c6400-fimd",
195 .data = &s3c64xx_fimd_driver_data }, 201 .data = &s3c64xx_fimd_driver_data },
202 { .compatible = "samsung,s5pv210-fimd",
203 .data = &s5pv210_fimd_driver_data },
196 { .compatible = "samsung,exynos3250-fimd", 204 { .compatible = "samsung,exynos3250-fimd",
197 .data = &exynos3_fimd_driver_data }, 205 .data = &exynos3_fimd_driver_data },
198 { .compatible = "samsung,exynos4210-fimd", 206 { .compatible = "samsung,exynos4210-fimd",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 11cc01b47bc0..6e1494fa71b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -431,37 +431,24 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
431 return 0; 431 return 0;
432} 432}
433 433
434int exynos_drm_gem_fault(struct vm_fault *vmf) 434vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
435{ 435{
436 struct vm_area_struct *vma = vmf->vma; 436 struct vm_area_struct *vma = vmf->vma;
437 struct drm_gem_object *obj = vma->vm_private_data; 437 struct drm_gem_object *obj = vma->vm_private_data;
438 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 438 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
439 unsigned long pfn; 439 unsigned long pfn;
440 pgoff_t page_offset; 440 pgoff_t page_offset;
441 int ret;
442 441
443 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 442 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
444 443
445 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) { 444 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
446 DRM_ERROR("invalid page offset\n"); 445 DRM_ERROR("invalid page offset\n");
447 ret = -EINVAL; 446 return VM_FAULT_SIGBUS;
448 goto out;
449 } 447 }
450 448
451 pfn = page_to_pfn(exynos_gem->pages[page_offset]); 449 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
452 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 450 return vmf_insert_mixed(vma, vmf->address,
453 451 __pfn_to_pfn_t(pfn, PFN_DEV));
454out:
455 switch (ret) {
456 case 0:
457 case -ERESTARTSYS:
458 case -EINTR:
459 return VM_FAULT_NOPAGE;
460 case -ENOMEM:
461 return VM_FAULT_OOM;
462 default:
463 return VM_FAULT_SIGBUS;
464 }
465} 452}
466 453
467static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, 454static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 5a4c7de80f65..9057d7f1d6ed 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -13,6 +13,7 @@
13#define _EXYNOS_DRM_GEM_H_ 13#define _EXYNOS_DRM_GEM_H_
14 14
15#include <drm/drm_gem.h> 15#include <drm/drm_gem.h>
16#include <linux/mm_types.h>
16 17
17#define to_exynos_gem(x) container_of(x, struct exynos_drm_gem, base) 18#define to_exynos_gem(x) container_of(x, struct exynos_drm_gem, base)
18 19
@@ -111,7 +112,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
111 struct drm_mode_create_dumb *args); 112 struct drm_mode_create_dumb *args);
112 113
113/* page fault handler and mmap fault address(virtual) to physical memory. */ 114/* page fault handler and mmap fault address(virtual) to physical memory. */
114int exynos_drm_gem_fault(struct vm_fault *vmf); 115vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
115 116
116/* set vm_flags and we can change the vm attribute to other one at here. */ 117/* set vm_flags and we can change the vm attribute to other one at here. */
117int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 118int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 0506b2b17ac1..e99dd1e4ba65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -12,18 +12,20 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/component.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16#include <linux/clk.h> 17#include <linux/clk.h>
17#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
18#include <linux/mfd/syscon.h> 19#include <linux/mfd/syscon.h>
20#include <linux/of_device.h>
19#include <linux/regmap.h> 21#include <linux/regmap.h>
20 22
21#include <drm/drmP.h> 23#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 24#include <drm/exynos_drm.h>
23#include "regs-gsc.h" 25#include "regs-gsc.h"
24#include "exynos_drm_drv.h" 26#include "exynos_drm_drv.h"
27#include "exynos_drm_iommu.h"
25#include "exynos_drm_ipp.h" 28#include "exynos_drm_ipp.h"
26#include "exynos_drm_gsc.h"
27 29
28/* 30/*
29 * GSC stands for General SCaler and 31 * GSC stands for General SCaler and
@@ -31,26 +33,10 @@
31 * input DMA reads image data from the memory. 33 * input DMA reads image data from the memory.
32 * output DMA writes image data to memory. 34 * output DMA writes image data to memory.
33 * GSC supports image rotation and image effect functions. 35 * GSC supports image rotation and image effect functions.
34 *
35 * M2M operation : supports crop/scale/rotation/csc so on.
36 * Memory ----> GSC H/W ----> Memory.
37 * Writeback operation : supports cloned screen with FIMD.
38 * FIMD ----> GSC H/W ----> Memory.
39 * Output operation : supports direct display using local path.
40 * Memory ----> GSC H/W ----> FIMD, Mixer.
41 */ 36 */
42 37
43/*
44 * TODO
45 * 1. check suspend/resume api if needed.
46 * 2. need to check use case platform_device_id.
47 * 3. check src/dst size with, height.
48 * 4. added check_prepare api for right register.
49 * 5. need to add supported list in prop_list.
50 * 6. check prescaler/scaler optimization.
51 */
52 38
53#define GSC_MAX_DEVS 4 39#define GSC_MAX_CLOCKS 8
54#define GSC_MAX_SRC 4 40#define GSC_MAX_SRC 4
55#define GSC_MAX_DST 16 41#define GSC_MAX_DST 16
56#define GSC_RESET_TIMEOUT 50 42#define GSC_RESET_TIMEOUT 50
@@ -65,8 +51,6 @@
65#define GSC_SC_DOWN_RATIO_4_8 131072 51#define GSC_SC_DOWN_RATIO_4_8 131072
66#define GSC_SC_DOWN_RATIO_3_8 174762 52#define GSC_SC_DOWN_RATIO_3_8 174762
67#define GSC_SC_DOWN_RATIO_2_8 262144 53#define GSC_SC_DOWN_RATIO_2_8 262144
68#define GSC_REFRESH_MIN 12
69#define GSC_REFRESH_MAX 60
70#define GSC_CROP_MAX 8192 54#define GSC_CROP_MAX 8192
71#define GSC_CROP_MIN 32 55#define GSC_CROP_MIN 32
72#define GSC_SCALE_MAX 4224 56#define GSC_SCALE_MAX 4224
@@ -77,10 +61,9 @@
77#define GSC_COEF_H_8T 8 61#define GSC_COEF_H_8T 8
78#define GSC_COEF_V_4T 4 62#define GSC_COEF_V_4T 4
79#define GSC_COEF_DEPTH 3 63#define GSC_COEF_DEPTH 3
64#define GSC_AUTOSUSPEND_DELAY 2000
80 65
81#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev)) 66#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
82#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
83 struct gsc_context, ippdrv);
84#define gsc_read(offset) readl(ctx->regs + (offset)) 67#define gsc_read(offset) readl(ctx->regs + (offset))
85#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) 68#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
86 69
@@ -104,50 +87,47 @@ struct gsc_scaler {
104}; 87};
105 88
106/* 89/*
107 * A structure of scaler capability.
108 *
109 * find user manual 49.2 features.
110 * @tile_w: tile mode or rotation width.
111 * @tile_h: tile mode or rotation height.
112 * @w: other cases width.
113 * @h: other cases height.
114 */
115struct gsc_capability {
116 /* tile or rotation */
117 u32 tile_w;
118 u32 tile_h;
119 /* other cases */
120 u32 w;
121 u32 h;
122};
123
124/*
125 * A structure of gsc context. 90 * A structure of gsc context.
126 * 91 *
127 * @ippdrv: prepare initialization using ippdrv.
128 * @regs_res: register resources. 92 * @regs_res: register resources.
129 * @regs: memory mapped io registers. 93 * @regs: memory mapped io registers.
130 * @sysreg: handle to SYSREG block regmap.
131 * @lock: locking of operations.
132 * @gsc_clk: gsc gate clock. 94 * @gsc_clk: gsc gate clock.
133 * @sc: scaler infomations. 95 * @sc: scaler infomations.
134 * @id: gsc id. 96 * @id: gsc id.
135 * @irq: irq number. 97 * @irq: irq number.
136 * @rotation: supports rotation of src. 98 * @rotation: supports rotation of src.
137 * @suspended: qos operations.
138 */ 99 */
139struct gsc_context { 100struct gsc_context {
140 struct exynos_drm_ippdrv ippdrv; 101 struct exynos_drm_ipp ipp;
102 struct drm_device *drm_dev;
103 struct device *dev;
104 struct exynos_drm_ipp_task *task;
105 struct exynos_drm_ipp_formats *formats;
106 unsigned int num_formats;
107
141 struct resource *regs_res; 108 struct resource *regs_res;
142 void __iomem *regs; 109 void __iomem *regs;
143 struct regmap *sysreg; 110 const char **clk_names;
144 struct mutex lock; 111 struct clk *clocks[GSC_MAX_CLOCKS];
145 struct clk *gsc_clk; 112 int num_clocks;
146 struct gsc_scaler sc; 113 struct gsc_scaler sc;
147 int id; 114 int id;
148 int irq; 115 int irq;
149 bool rotation; 116 bool rotation;
150 bool suspended; 117};
118
119/**
120 * struct gsc_driverdata - per device type driver data for init time.
121 *
122 * @limits: picture size limits array
123 * @clk_names: names of clocks needed by this variant
124 * @num_clocks: the number of clocks needed by this variant
125 */
126struct gsc_driverdata {
127 const struct drm_exynos_ipp_limit *limits;
128 int num_limits;
129 const char *clk_names[GSC_MAX_CLOCKS];
130 int num_clocks;
151}; 131};
152 132
153/* 8-tap Filter Coefficient */ 133/* 8-tap Filter Coefficient */
@@ -438,25 +418,6 @@ static int gsc_sw_reset(struct gsc_context *ctx)
438 return 0; 418 return 0;
439} 419}
440 420
441static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
442{
443 unsigned int gscblk_cfg;
444
445 if (!ctx->sysreg)
446 return;
447
448 regmap_read(ctx->sysreg, SYSREG_GSCBLK_CFG1, &gscblk_cfg);
449
450 if (enable)
451 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
452 GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
453 GSC_BLK_SW_RESET_WB_DEST(ctx->id);
454 else
455 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
456
457 regmap_write(ctx->sysreg, SYSREG_GSCBLK_CFG1, gscblk_cfg);
458}
459
460static void gsc_handle_irq(struct gsc_context *ctx, bool enable, 421static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
461 bool overflow, bool done) 422 bool overflow, bool done)
462{ 423{
@@ -487,10 +448,8 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
487} 448}
488 449
489 450
490static int gsc_src_set_fmt(struct device *dev, u32 fmt) 451static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
491{ 452{
492 struct gsc_context *ctx = get_gsc_context(dev);
493 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
494 u32 cfg; 453 u32 cfg;
495 454
496 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 455 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -506,6 +465,7 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
506 cfg |= GSC_IN_RGB565; 465 cfg |= GSC_IN_RGB565;
507 break; 466 break;
508 case DRM_FORMAT_XRGB8888: 467 case DRM_FORMAT_XRGB8888:
468 case DRM_FORMAT_ARGB8888:
509 cfg |= GSC_IN_XRGB8888; 469 cfg |= GSC_IN_XRGB8888;
510 break; 470 break;
511 case DRM_FORMAT_BGRX8888: 471 case DRM_FORMAT_BGRX8888:
@@ -548,115 +508,84 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
548 cfg |= (GSC_IN_CHROMA_ORDER_CBCR | 508 cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
549 GSC_IN_YUV420_2P); 509 GSC_IN_YUV420_2P);
550 break; 510 break;
551 default:
552 dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
553 return -EINVAL;
554 } 511 }
555 512
556 gsc_write(cfg, GSC_IN_CON); 513 gsc_write(cfg, GSC_IN_CON);
557
558 return 0;
559} 514}
560 515
561static int gsc_src_set_transf(struct device *dev, 516static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
562 enum drm_exynos_degree degree,
563 enum drm_exynos_flip flip, bool *swap)
564{ 517{
565 struct gsc_context *ctx = get_gsc_context(dev); 518 unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
566 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
567 u32 cfg; 519 u32 cfg;
568 520
569 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
570
571 cfg = gsc_read(GSC_IN_CON); 521 cfg = gsc_read(GSC_IN_CON);
572 cfg &= ~GSC_IN_ROT_MASK; 522 cfg &= ~GSC_IN_ROT_MASK;
573 523
574 switch (degree) { 524 switch (degree) {
575 case EXYNOS_DRM_DEGREE_0: 525 case DRM_MODE_ROTATE_0:
576 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 526 if (rotation & DRM_MODE_REFLECT_Y)
577 cfg |= GSC_IN_ROT_XFLIP; 527 cfg |= GSC_IN_ROT_XFLIP;
578 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 528 if (rotation & DRM_MODE_REFLECT_X)
579 cfg |= GSC_IN_ROT_YFLIP; 529 cfg |= GSC_IN_ROT_YFLIP;
580 break; 530 break;
581 case EXYNOS_DRM_DEGREE_90: 531 case DRM_MODE_ROTATE_90:
582 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 532 cfg |= GSC_IN_ROT_90;
583 cfg |= GSC_IN_ROT_90_XFLIP; 533 if (rotation & DRM_MODE_REFLECT_Y)
584 else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 534 cfg |= GSC_IN_ROT_XFLIP;
585 cfg |= GSC_IN_ROT_90_YFLIP; 535 if (rotation & DRM_MODE_REFLECT_X)
586 else 536 cfg |= GSC_IN_ROT_YFLIP;
587 cfg |= GSC_IN_ROT_90;
588 break; 537 break;
589 case EXYNOS_DRM_DEGREE_180: 538 case DRM_MODE_ROTATE_180:
590 cfg |= GSC_IN_ROT_180; 539 cfg |= GSC_IN_ROT_180;
591 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 540 if (rotation & DRM_MODE_REFLECT_Y)
592 cfg &= ~GSC_IN_ROT_XFLIP; 541 cfg &= ~GSC_IN_ROT_XFLIP;
593 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 542 if (rotation & DRM_MODE_REFLECT_X)
594 cfg &= ~GSC_IN_ROT_YFLIP; 543 cfg &= ~GSC_IN_ROT_YFLIP;
595 break; 544 break;
596 case EXYNOS_DRM_DEGREE_270: 545 case DRM_MODE_ROTATE_270:
597 cfg |= GSC_IN_ROT_270; 546 cfg |= GSC_IN_ROT_270;
598 if (flip & EXYNOS_DRM_FLIP_VERTICAL) 547 if (rotation & DRM_MODE_REFLECT_Y)
599 cfg &= ~GSC_IN_ROT_XFLIP; 548 cfg &= ~GSC_IN_ROT_XFLIP;
600 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 549 if (rotation & DRM_MODE_REFLECT_X)
601 cfg &= ~GSC_IN_ROT_YFLIP; 550 cfg &= ~GSC_IN_ROT_YFLIP;
602 break; 551 break;
603 default:
604 dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
605 return -EINVAL;
606 } 552 }
607 553
608 gsc_write(cfg, GSC_IN_CON); 554 gsc_write(cfg, GSC_IN_CON);
609 555
610 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; 556 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
611 *swap = ctx->rotation;
612
613 return 0;
614} 557}
615 558
616static int gsc_src_set_size(struct device *dev, int swap, 559static void gsc_src_set_size(struct gsc_context *ctx,
617 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 560 struct exynos_drm_ipp_buffer *buf)
618{ 561{
619 struct gsc_context *ctx = get_gsc_context(dev);
620 struct drm_exynos_pos img_pos = *pos;
621 struct gsc_scaler *sc = &ctx->sc; 562 struct gsc_scaler *sc = &ctx->sc;
622 u32 cfg; 563 u32 cfg;
623 564
624 DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
625 swap, pos->x, pos->y, pos->w, pos->h);
626
627 if (swap) {
628 img_pos.w = pos->h;
629 img_pos.h = pos->w;
630 }
631
632 /* pixel offset */ 565 /* pixel offset */
633 cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) | 566 cfg = (GSC_SRCIMG_OFFSET_X(buf->rect.x) |
634 GSC_SRCIMG_OFFSET_Y(img_pos.y)); 567 GSC_SRCIMG_OFFSET_Y(buf->rect.y));
635 gsc_write(cfg, GSC_SRCIMG_OFFSET); 568 gsc_write(cfg, GSC_SRCIMG_OFFSET);
636 569
637 /* cropped size */ 570 /* cropped size */
638 cfg = (GSC_CROPPED_WIDTH(img_pos.w) | 571 cfg = (GSC_CROPPED_WIDTH(buf->rect.w) |
639 GSC_CROPPED_HEIGHT(img_pos.h)); 572 GSC_CROPPED_HEIGHT(buf->rect.h));
640 gsc_write(cfg, GSC_CROPPED_SIZE); 573 gsc_write(cfg, GSC_CROPPED_SIZE);
641 574
642 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
643
644 /* original size */ 575 /* original size */
645 cfg = gsc_read(GSC_SRCIMG_SIZE); 576 cfg = gsc_read(GSC_SRCIMG_SIZE);
646 cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | 577 cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
647 GSC_SRCIMG_WIDTH_MASK); 578 GSC_SRCIMG_WIDTH_MASK);
648 579
649 cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) | 580 cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
650 GSC_SRCIMG_HEIGHT(sz->vsize)); 581 GSC_SRCIMG_HEIGHT(buf->buf.height));
651 582
652 gsc_write(cfg, GSC_SRCIMG_SIZE); 583 gsc_write(cfg, GSC_SRCIMG_SIZE);
653 584
654 cfg = gsc_read(GSC_IN_CON); 585 cfg = gsc_read(GSC_IN_CON);
655 cfg &= ~GSC_IN_RGB_TYPE_MASK; 586 cfg &= ~GSC_IN_RGB_TYPE_MASK;
656 587
657 DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range); 588 if (buf->rect.w >= GSC_WIDTH_ITU_709)
658
659 if (pos->w >= GSC_WIDTH_ITU_709)
660 if (sc->range) 589 if (sc->range)
661 cfg |= GSC_IN_RGB_HD_WIDE; 590 cfg |= GSC_IN_RGB_HD_WIDE;
662 else 591 else
@@ -668,103 +597,39 @@ static int gsc_src_set_size(struct device *dev, int swap,
668 cfg |= GSC_IN_RGB_SD_NARROW; 597 cfg |= GSC_IN_RGB_SD_NARROW;
669 598
670 gsc_write(cfg, GSC_IN_CON); 599 gsc_write(cfg, GSC_IN_CON);
671
672 return 0;
673} 600}
674 601
675static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id, 602static void gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
676 enum drm_exynos_ipp_buf_type buf_type) 603 bool enqueue)
677{ 604{
678 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 605 bool masked = !enqueue;
679 bool masked;
680 u32 cfg; 606 u32 cfg;
681 u32 mask = 0x00000001 << buf_id; 607 u32 mask = 0x00000001 << buf_id;
682 608
683 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
684
685 /* mask register set */ 609 /* mask register set */
686 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK); 610 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
687 611
688 switch (buf_type) {
689 case IPP_BUF_ENQUEUE:
690 masked = false;
691 break;
692 case IPP_BUF_DEQUEUE:
693 masked = true;
694 break;
695 default:
696 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
697 return -EINVAL;
698 }
699
700 /* sequence id */ 612 /* sequence id */
701 cfg &= ~mask; 613 cfg &= ~mask;
702 cfg |= masked << buf_id; 614 cfg |= masked << buf_id;
703 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK); 615 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
704 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK); 616 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
705 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK); 617 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
706
707 return 0;
708} 618}
709 619
710static int gsc_src_set_addr(struct device *dev, 620static void gsc_src_set_addr(struct gsc_context *ctx, u32 buf_id,
711 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, 621 struct exynos_drm_ipp_buffer *buf)
712 enum drm_exynos_ipp_buf_type buf_type)
713{ 622{
714 struct gsc_context *ctx = get_gsc_context(dev);
715 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
716 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
717 struct drm_exynos_ipp_property *property;
718
719 if (!c_node) {
720 DRM_ERROR("failed to get c_node.\n");
721 return -EFAULT;
722 }
723
724 property = &c_node->property;
725
726 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
727 property->prop_id, buf_id, buf_type);
728
729 if (buf_id > GSC_MAX_SRC) {
730 dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
731 return -EINVAL;
732 }
733
734 /* address register set */ 623 /* address register set */
735 switch (buf_type) { 624 gsc_write(buf->dma_addr[0], GSC_IN_BASE_ADDR_Y(buf_id));
736 case IPP_BUF_ENQUEUE: 625 gsc_write(buf->dma_addr[1], GSC_IN_BASE_ADDR_CB(buf_id));
737 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], 626 gsc_write(buf->dma_addr[2], GSC_IN_BASE_ADDR_CR(buf_id));
738 GSC_IN_BASE_ADDR_Y(buf_id));
739 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
740 GSC_IN_BASE_ADDR_CB(buf_id));
741 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
742 GSC_IN_BASE_ADDR_CR(buf_id));
743 break;
744 case IPP_BUF_DEQUEUE:
745 gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
746 gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
747 gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
748 break;
749 default:
750 /* bypass */
751 break;
752 }
753 627
754 return gsc_src_set_buf_seq(ctx, buf_id, buf_type); 628 gsc_src_set_buf_seq(ctx, buf_id, true);
755} 629}
756 630
757static struct exynos_drm_ipp_ops gsc_src_ops = { 631static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
758 .set_fmt = gsc_src_set_fmt,
759 .set_transf = gsc_src_set_transf,
760 .set_size = gsc_src_set_size,
761 .set_addr = gsc_src_set_addr,
762};
763
764static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
765{ 632{
766 struct gsc_context *ctx = get_gsc_context(dev);
767 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
768 u32 cfg; 633 u32 cfg;
769 634
770 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 635 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -779,8 +644,9 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
779 case DRM_FORMAT_RGB565: 644 case DRM_FORMAT_RGB565:
780 cfg |= GSC_OUT_RGB565; 645 cfg |= GSC_OUT_RGB565;
781 break; 646 break;
647 case DRM_FORMAT_ARGB8888:
782 case DRM_FORMAT_XRGB8888: 648 case DRM_FORMAT_XRGB8888:
783 cfg |= GSC_OUT_XRGB8888; 649 cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_GLOBAL_ALPHA(0xff));
784 break; 650 break;
785 case DRM_FORMAT_BGRX8888: 651 case DRM_FORMAT_BGRX8888:
786 cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP); 652 cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
@@ -819,69 +685,9 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
819 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | 685 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
820 GSC_OUT_YUV420_2P); 686 GSC_OUT_YUV420_2P);
821 break; 687 break;
822 default:
823 dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
824 return -EINVAL;
825 } 688 }
826 689
827 gsc_write(cfg, GSC_OUT_CON); 690 gsc_write(cfg, GSC_OUT_CON);
828
829 return 0;
830}
831
832static int gsc_dst_set_transf(struct device *dev,
833 enum drm_exynos_degree degree,
834 enum drm_exynos_flip flip, bool *swap)
835{
836 struct gsc_context *ctx = get_gsc_context(dev);
837 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
838 u32 cfg;
839
840 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
841
842 cfg = gsc_read(GSC_IN_CON);
843 cfg &= ~GSC_IN_ROT_MASK;
844
845 switch (degree) {
846 case EXYNOS_DRM_DEGREE_0:
847 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
848 cfg |= GSC_IN_ROT_XFLIP;
849 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
850 cfg |= GSC_IN_ROT_YFLIP;
851 break;
852 case EXYNOS_DRM_DEGREE_90:
853 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
854 cfg |= GSC_IN_ROT_90_XFLIP;
855 else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
856 cfg |= GSC_IN_ROT_90_YFLIP;
857 else
858 cfg |= GSC_IN_ROT_90;
859 break;
860 case EXYNOS_DRM_DEGREE_180:
861 cfg |= GSC_IN_ROT_180;
862 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
863 cfg &= ~GSC_IN_ROT_XFLIP;
864 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
865 cfg &= ~GSC_IN_ROT_YFLIP;
866 break;
867 case EXYNOS_DRM_DEGREE_270:
868 cfg |= GSC_IN_ROT_270;
869 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
870 cfg &= ~GSC_IN_ROT_XFLIP;
871 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
872 cfg &= ~GSC_IN_ROT_YFLIP;
873 break;
874 default:
875 dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
876 return -EINVAL;
877 }
878
879 gsc_write(cfg, GSC_IN_CON);
880
881 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
882 *swap = ctx->rotation;
883
884 return 0;
885} 691}
886 692
887static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio) 693static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
@@ -919,9 +725,9 @@ static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
919} 725}
920 726
921static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc, 727static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
922 struct drm_exynos_pos *src, struct drm_exynos_pos *dst) 728 struct drm_exynos_ipp_task_rect *src,
729 struct drm_exynos_ipp_task_rect *dst)
923{ 730{
924 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
925 u32 cfg; 731 u32 cfg;
926 u32 src_w, src_h, dst_w, dst_h; 732 u32 src_w, src_h, dst_w, dst_h;
927 int ret = 0; 733 int ret = 0;
@@ -939,13 +745,13 @@ static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
939 745
940 ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio); 746 ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
941 if (ret) { 747 if (ret) {
942 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); 748 dev_err(ctx->dev, "failed to get ratio horizontal.\n");
943 return ret; 749 return ret;
944 } 750 }
945 751
946 ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio); 752 ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
947 if (ret) { 753 if (ret) {
948 dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); 754 dev_err(ctx->dev, "failed to get ratio vertical.\n");
949 return ret; 755 return ret;
950 } 756 }
951 757
@@ -1039,47 +845,37 @@ static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
1039 gsc_write(cfg, GSC_MAIN_V_RATIO); 845 gsc_write(cfg, GSC_MAIN_V_RATIO);
1040} 846}
1041 847
1042static int gsc_dst_set_size(struct device *dev, int swap, 848static void gsc_dst_set_size(struct gsc_context *ctx,
1043 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 849 struct exynos_drm_ipp_buffer *buf)
1044{ 850{
1045 struct gsc_context *ctx = get_gsc_context(dev);
1046 struct drm_exynos_pos img_pos = *pos;
1047 struct gsc_scaler *sc = &ctx->sc; 851 struct gsc_scaler *sc = &ctx->sc;
1048 u32 cfg; 852 u32 cfg;
1049 853
1050 DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
1051 swap, pos->x, pos->y, pos->w, pos->h);
1052
1053 if (swap) {
1054 img_pos.w = pos->h;
1055 img_pos.h = pos->w;
1056 }
1057
1058 /* pixel offset */ 854 /* pixel offset */
1059 cfg = (GSC_DSTIMG_OFFSET_X(pos->x) | 855 cfg = (GSC_DSTIMG_OFFSET_X(buf->rect.x) |
1060 GSC_DSTIMG_OFFSET_Y(pos->y)); 856 GSC_DSTIMG_OFFSET_Y(buf->rect.y));
1061 gsc_write(cfg, GSC_DSTIMG_OFFSET); 857 gsc_write(cfg, GSC_DSTIMG_OFFSET);
1062 858
1063 /* scaled size */ 859 /* scaled size */
1064 cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h)); 860 if (ctx->rotation)
861 cfg = (GSC_SCALED_WIDTH(buf->rect.h) |
862 GSC_SCALED_HEIGHT(buf->rect.w));
863 else
864 cfg = (GSC_SCALED_WIDTH(buf->rect.w) |
865 GSC_SCALED_HEIGHT(buf->rect.h));
1065 gsc_write(cfg, GSC_SCALED_SIZE); 866 gsc_write(cfg, GSC_SCALED_SIZE);
1066 867
1067 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
1068
1069 /* original size */ 868 /* original size */
1070 cfg = gsc_read(GSC_DSTIMG_SIZE); 869 cfg = gsc_read(GSC_DSTIMG_SIZE);
1071 cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | 870 cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
1072 GSC_DSTIMG_WIDTH_MASK); 871 cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
1073 cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) | 872 GSC_DSTIMG_HEIGHT(buf->buf.height);
1074 GSC_DSTIMG_HEIGHT(sz->vsize));
1075 gsc_write(cfg, GSC_DSTIMG_SIZE); 873 gsc_write(cfg, GSC_DSTIMG_SIZE);
1076 874
1077 cfg = gsc_read(GSC_OUT_CON); 875 cfg = gsc_read(GSC_OUT_CON);
1078 cfg &= ~GSC_OUT_RGB_TYPE_MASK; 876 cfg &= ~GSC_OUT_RGB_TYPE_MASK;
1079 877
1080 DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range); 878 if (buf->rect.w >= GSC_WIDTH_ITU_709)
1081
1082 if (pos->w >= GSC_WIDTH_ITU_709)
1083 if (sc->range) 879 if (sc->range)
1084 cfg |= GSC_OUT_RGB_HD_WIDE; 880 cfg |= GSC_OUT_RGB_HD_WIDE;
1085 else 881 else
@@ -1091,8 +887,6 @@ static int gsc_dst_set_size(struct device *dev, int swap,
1091 cfg |= GSC_OUT_RGB_SD_NARROW; 887 cfg |= GSC_OUT_RGB_SD_NARROW;
1092 888
1093 gsc_write(cfg, GSC_OUT_CON); 889 gsc_write(cfg, GSC_OUT_CON);
1094
1095 return 0;
1096} 890}
1097 891
1098static int gsc_dst_get_buf_seq(struct gsc_context *ctx) 892static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
@@ -1111,35 +905,16 @@ static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
1111 return buf_num; 905 return buf_num;
1112} 906}
1113 907
1114static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id, 908static void gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1115 enum drm_exynos_ipp_buf_type buf_type) 909 bool enqueue)
1116{ 910{
1117 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 911 bool masked = !enqueue;
1118 bool masked;
1119 u32 cfg; 912 u32 cfg;
1120 u32 mask = 0x00000001 << buf_id; 913 u32 mask = 0x00000001 << buf_id;
1121 int ret = 0;
1122
1123 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
1124
1125 mutex_lock(&ctx->lock);
1126 914
1127 /* mask register set */ 915 /* mask register set */
1128 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK); 916 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1129 917
1130 switch (buf_type) {
1131 case IPP_BUF_ENQUEUE:
1132 masked = false;
1133 break;
1134 case IPP_BUF_DEQUEUE:
1135 masked = true;
1136 break;
1137 default:
1138 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1139 ret = -EINVAL;
1140 goto err_unlock;
1141 }
1142
1143 /* sequence id */ 918 /* sequence id */
1144 cfg &= ~mask; 919 cfg &= ~mask;
1145 cfg |= masked << buf_id; 920 cfg |= masked << buf_id;
@@ -1148,94 +923,29 @@ static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1148 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK); 923 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
1149 924
1150 /* interrupt enable */ 925 /* interrupt enable */
1151 if (buf_type == IPP_BUF_ENQUEUE && 926 if (enqueue && gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
1152 gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
1153 gsc_handle_irq(ctx, true, false, true); 927 gsc_handle_irq(ctx, true, false, true);
1154 928
1155 /* interrupt disable */ 929 /* interrupt disable */
1156 if (buf_type == IPP_BUF_DEQUEUE && 930 if (!enqueue && gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
1157 gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
1158 gsc_handle_irq(ctx, false, false, true); 931 gsc_handle_irq(ctx, false, false, true);
1159
1160err_unlock:
1161 mutex_unlock(&ctx->lock);
1162 return ret;
1163} 932}
1164 933
1165static int gsc_dst_set_addr(struct device *dev, 934static void gsc_dst_set_addr(struct gsc_context *ctx,
1166 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, 935 u32 buf_id, struct exynos_drm_ipp_buffer *buf)
1167 enum drm_exynos_ipp_buf_type buf_type)
1168{ 936{
1169 struct gsc_context *ctx = get_gsc_context(dev);
1170 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1171 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1172 struct drm_exynos_ipp_property *property;
1173
1174 if (!c_node) {
1175 DRM_ERROR("failed to get c_node.\n");
1176 return -EFAULT;
1177 }
1178
1179 property = &c_node->property;
1180
1181 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
1182 property->prop_id, buf_id, buf_type);
1183
1184 if (buf_id > GSC_MAX_DST) {
1185 dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
1186 return -EINVAL;
1187 }
1188
1189 /* address register set */ 937 /* address register set */
1190 switch (buf_type) { 938 gsc_write(buf->dma_addr[0], GSC_OUT_BASE_ADDR_Y(buf_id));
1191 case IPP_BUF_ENQUEUE: 939 gsc_write(buf->dma_addr[1], GSC_OUT_BASE_ADDR_CB(buf_id));
1192 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], 940 gsc_write(buf->dma_addr[2], GSC_OUT_BASE_ADDR_CR(buf_id));
1193 GSC_OUT_BASE_ADDR_Y(buf_id));
1194 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1195 GSC_OUT_BASE_ADDR_CB(buf_id));
1196 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1197 GSC_OUT_BASE_ADDR_CR(buf_id));
1198 break;
1199 case IPP_BUF_DEQUEUE:
1200 gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
1201 gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
1202 gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
1203 break;
1204 default:
1205 /* bypass */
1206 break;
1207 }
1208 941
1209 return gsc_dst_set_buf_seq(ctx, buf_id, buf_type); 942 gsc_dst_set_buf_seq(ctx, buf_id, true);
1210}
1211
1212static struct exynos_drm_ipp_ops gsc_dst_ops = {
1213 .set_fmt = gsc_dst_set_fmt,
1214 .set_transf = gsc_dst_set_transf,
1215 .set_size = gsc_dst_set_size,
1216 .set_addr = gsc_dst_set_addr,
1217};
1218
1219static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1220{
1221 DRM_DEBUG_KMS("enable[%d]\n", enable);
1222
1223 if (enable) {
1224 clk_prepare_enable(ctx->gsc_clk);
1225 ctx->suspended = false;
1226 } else {
1227 clk_disable_unprepare(ctx->gsc_clk);
1228 ctx->suspended = true;
1229 }
1230
1231 return 0;
1232} 943}
1233 944
1234static int gsc_get_src_buf_index(struct gsc_context *ctx) 945static int gsc_get_src_buf_index(struct gsc_context *ctx)
1235{ 946{
1236 u32 cfg, curr_index, i; 947 u32 cfg, curr_index, i;
1237 u32 buf_id = GSC_MAX_SRC; 948 u32 buf_id = GSC_MAX_SRC;
1238 int ret;
1239 949
1240 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); 950 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1241 951
@@ -1249,19 +959,15 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
1249 } 959 }
1250 } 960 }
1251 961
962 DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
963 curr_index, buf_id);
964
1252 if (buf_id == GSC_MAX_SRC) { 965 if (buf_id == GSC_MAX_SRC) {
1253 DRM_ERROR("failed to get in buffer index.\n"); 966 DRM_ERROR("failed to get in buffer index.\n");
1254 return -EINVAL; 967 return -EINVAL;
1255 } 968 }
1256 969
1257 ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); 970 gsc_src_set_buf_seq(ctx, buf_id, false);
1258 if (ret < 0) {
1259 DRM_ERROR("failed to dequeue.\n");
1260 return ret;
1261 }
1262
1263 DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
1264 curr_index, buf_id);
1265 971
1266 return buf_id; 972 return buf_id;
1267} 973}
@@ -1270,7 +976,6 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1270{ 976{
1271 u32 cfg, curr_index, i; 977 u32 cfg, curr_index, i;
1272 u32 buf_id = GSC_MAX_DST; 978 u32 buf_id = GSC_MAX_DST;
1273 int ret;
1274 979
1275 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); 980 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1276 981
@@ -1289,11 +994,7 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1289 return -EINVAL; 994 return -EINVAL;
1290 } 995 }
1291 996
1292 ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); 997 gsc_dst_set_buf_seq(ctx, buf_id, false);
1293 if (ret < 0) {
1294 DRM_ERROR("failed to dequeue.\n");
1295 return ret;
1296 }
1297 998
1298 DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, 999 DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
1299 curr_index, buf_id); 1000 curr_index, buf_id);
@@ -1304,215 +1005,55 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1304static irqreturn_t gsc_irq_handler(int irq, void *dev_id) 1005static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1305{ 1006{
1306 struct gsc_context *ctx = dev_id; 1007 struct gsc_context *ctx = dev_id;
1307 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1308 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1309 struct drm_exynos_ipp_event_work *event_work =
1310 c_node->event_work;
1311 u32 status; 1008 u32 status;
1312 int buf_id[EXYNOS_DRM_OPS_MAX]; 1009 int err = 0;
1313 1010
1314 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); 1011 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1315 1012
1316 status = gsc_read(GSC_IRQ); 1013 status = gsc_read(GSC_IRQ);
1317 if (status & GSC_IRQ_STATUS_OR_IRQ) { 1014 if (status & GSC_IRQ_STATUS_OR_IRQ) {
1318 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", 1015 dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n",
1319 ctx->id, status); 1016 ctx->id, status);
1320 return IRQ_NONE; 1017 err = -EINVAL;
1321 } 1018 }
1322 1019
1323 if (status & GSC_IRQ_STATUS_OR_FRM_DONE) { 1020 if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
1324 dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n", 1021 int src_buf_id, dst_buf_id;
1325 ctx->id, status);
1326
1327 buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
1328 if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
1329 return IRQ_HANDLED;
1330
1331 buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
1332 if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
1333 return IRQ_HANDLED;
1334
1335 DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n",
1336 buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
1337
1338 event_work->ippdrv = ippdrv;
1339 event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
1340 buf_id[EXYNOS_DRM_OPS_SRC];
1341 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1342 buf_id[EXYNOS_DRM_OPS_DST];
1343 queue_work(ippdrv->event_workq, &event_work->work);
1344 }
1345
1346 return IRQ_HANDLED;
1347}
1348
1349static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1350{
1351 struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
1352
1353 prop_list->version = 1;
1354 prop_list->writeback = 1;
1355 prop_list->refresh_min = GSC_REFRESH_MIN;
1356 prop_list->refresh_max = GSC_REFRESH_MAX;
1357 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1358 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1359 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1360 (1 << EXYNOS_DRM_DEGREE_90) |
1361 (1 << EXYNOS_DRM_DEGREE_180) |
1362 (1 << EXYNOS_DRM_DEGREE_270);
1363 prop_list->csc = 1;
1364 prop_list->crop = 1;
1365 prop_list->crop_max.hsize = GSC_CROP_MAX;
1366 prop_list->crop_max.vsize = GSC_CROP_MAX;
1367 prop_list->crop_min.hsize = GSC_CROP_MIN;
1368 prop_list->crop_min.vsize = GSC_CROP_MIN;
1369 prop_list->scale = 1;
1370 prop_list->scale_max.hsize = GSC_SCALE_MAX;
1371 prop_list->scale_max.vsize = GSC_SCALE_MAX;
1372 prop_list->scale_min.hsize = GSC_SCALE_MIN;
1373 prop_list->scale_min.vsize = GSC_SCALE_MIN;
1374
1375 return 0;
1376}
1377
1378static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1379{
1380 switch (flip) {
1381 case EXYNOS_DRM_FLIP_NONE:
1382 case EXYNOS_DRM_FLIP_VERTICAL:
1383 case EXYNOS_DRM_FLIP_HORIZONTAL:
1384 case EXYNOS_DRM_FLIP_BOTH:
1385 return true;
1386 default:
1387 DRM_DEBUG_KMS("invalid flip\n");
1388 return false;
1389 }
1390}
1391
1392static int gsc_ippdrv_check_property(struct device *dev,
1393 struct drm_exynos_ipp_property *property)
1394{
1395 struct gsc_context *ctx = get_gsc_context(dev);
1396 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1397 struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
1398 struct drm_exynos_ipp_config *config;
1399 struct drm_exynos_pos *pos;
1400 struct drm_exynos_sz *sz;
1401 bool swap;
1402 int i;
1403
1404 for_each_ipp_ops(i) {
1405 if ((i == EXYNOS_DRM_OPS_SRC) &&
1406 (property->cmd == IPP_CMD_WB))
1407 continue;
1408 1022
1409 config = &property->config[i]; 1023 dev_dbg(ctx->dev, "occurred frame done at %d, status 0x%x.\n",
1410 pos = &config->pos; 1024 ctx->id, status);
1411 sz = &config->sz;
1412
1413 /* check for flip */
1414 if (!gsc_check_drm_flip(config->flip)) {
1415 DRM_ERROR("invalid flip.\n");
1416 goto err_property;
1417 }
1418
1419 /* check for degree */
1420 switch (config->degree) {
1421 case EXYNOS_DRM_DEGREE_90:
1422 case EXYNOS_DRM_DEGREE_270:
1423 swap = true;
1424 break;
1425 case EXYNOS_DRM_DEGREE_0:
1426 case EXYNOS_DRM_DEGREE_180:
1427 swap = false;
1428 break;
1429 default:
1430 DRM_ERROR("invalid degree.\n");
1431 goto err_property;
1432 }
1433 1025
1434 /* check for buffer bound */ 1026 src_buf_id = gsc_get_src_buf_index(ctx);
1435 if ((pos->x + pos->w > sz->hsize) || 1027 dst_buf_id = gsc_get_dst_buf_index(ctx);
1436 (pos->y + pos->h > sz->vsize)) {
1437 DRM_ERROR("out of buf bound.\n");
1438 goto err_property;
1439 }
1440 1028
1441 /* check for crop */ 1029 DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", src_buf_id,
1442 if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { 1030 dst_buf_id);
1443 if (swap) {
1444 if ((pos->h < pp->crop_min.hsize) ||
1445 (sz->vsize > pp->crop_max.hsize) ||
1446 (pos->w < pp->crop_min.vsize) ||
1447 (sz->hsize > pp->crop_max.vsize)) {
1448 DRM_ERROR("out of crop size.\n");
1449 goto err_property;
1450 }
1451 } else {
1452 if ((pos->w < pp->crop_min.hsize) ||
1453 (sz->hsize > pp->crop_max.hsize) ||
1454 (pos->h < pp->crop_min.vsize) ||
1455 (sz->vsize > pp->crop_max.vsize)) {
1456 DRM_ERROR("out of crop size.\n");
1457 goto err_property;
1458 }
1459 }
1460 }
1461 1031
1462 /* check for scale */ 1032 if (src_buf_id < 0 || dst_buf_id < 0)
1463 if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) { 1033 err = -EINVAL;
1464 if (swap) {
1465 if ((pos->h < pp->scale_min.hsize) ||
1466 (sz->vsize > pp->scale_max.hsize) ||
1467 (pos->w < pp->scale_min.vsize) ||
1468 (sz->hsize > pp->scale_max.vsize)) {
1469 DRM_ERROR("out of scale size.\n");
1470 goto err_property;
1471 }
1472 } else {
1473 if ((pos->w < pp->scale_min.hsize) ||
1474 (sz->hsize > pp->scale_max.hsize) ||
1475 (pos->h < pp->scale_min.vsize) ||
1476 (sz->vsize > pp->scale_max.vsize)) {
1477 DRM_ERROR("out of scale size.\n");
1478 goto err_property;
1479 }
1480 }
1481 }
1482 } 1034 }
1483 1035
1484 return 0; 1036 if (ctx->task) {
1485 1037 struct exynos_drm_ipp_task *task = ctx->task;
1486err_property:
1487 for_each_ipp_ops(i) {
1488 if ((i == EXYNOS_DRM_OPS_SRC) &&
1489 (property->cmd == IPP_CMD_WB))
1490 continue;
1491 1038
1492 config = &property->config[i]; 1039 ctx->task = NULL;
1493 pos = &config->pos; 1040 pm_runtime_mark_last_busy(ctx->dev);
1494 sz = &config->sz; 1041 pm_runtime_put_autosuspend(ctx->dev);
1495 1042 exynos_drm_ipp_task_done(task, err);
1496 DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1497 i ? "dst" : "src", config->flip, config->degree,
1498 pos->x, pos->y, pos->w, pos->h,
1499 sz->hsize, sz->vsize);
1500 } 1043 }
1501 1044
1502 return -EINVAL; 1045 return IRQ_HANDLED;
1503} 1046}
1504 1047
1505 1048static int gsc_reset(struct gsc_context *ctx)
1506static int gsc_ippdrv_reset(struct device *dev)
1507{ 1049{
1508 struct gsc_context *ctx = get_gsc_context(dev);
1509 struct gsc_scaler *sc = &ctx->sc; 1050 struct gsc_scaler *sc = &ctx->sc;
1510 int ret; 1051 int ret;
1511 1052
1512 /* reset h/w block */ 1053 /* reset h/w block */
1513 ret = gsc_sw_reset(ctx); 1054 ret = gsc_sw_reset(ctx);
1514 if (ret < 0) { 1055 if (ret < 0) {
1515 dev_err(dev, "failed to reset hardware.\n"); 1056 dev_err(ctx->dev, "failed to reset hardware.\n");
1516 return ret; 1057 return ret;
1517 } 1058 }
1518 1059
@@ -1523,166 +1064,172 @@ static int gsc_ippdrv_reset(struct device *dev)
1523 return 0; 1064 return 0;
1524} 1065}
1525 1066
1526static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) 1067static void gsc_start(struct gsc_context *ctx)
1527{ 1068{
1528 struct gsc_context *ctx = get_gsc_context(dev);
1529 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1530 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1531 struct drm_exynos_ipp_property *property;
1532 struct drm_exynos_ipp_config *config;
1533 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
1534 struct drm_exynos_ipp_set_wb set_wb;
1535 u32 cfg; 1069 u32 cfg;
1536 int ret, i;
1537
1538 DRM_DEBUG_KMS("cmd[%d]\n", cmd);
1539
1540 if (!c_node) {
1541 DRM_ERROR("failed to get c_node.\n");
1542 return -EINVAL;
1543 }
1544
1545 property = &c_node->property;
1546 1070
1547 gsc_handle_irq(ctx, true, false, true); 1071 gsc_handle_irq(ctx, true, false, true);
1548 1072
1549 for_each_ipp_ops(i) { 1073 /* enable one shot */
1550 config = &property->config[i]; 1074 cfg = gsc_read(GSC_ENABLE);
1551 img_pos[i] = config->pos; 1075 cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
1552 } 1076 GSC_ENABLE_CLK_GATE_MODE_MASK);
1077 cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
1078 gsc_write(cfg, GSC_ENABLE);
1553 1079
1554 switch (cmd) { 1080 /* src dma memory */
1555 case IPP_CMD_M2M: 1081 cfg = gsc_read(GSC_IN_CON);
1556 /* enable one shot */ 1082 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1557 cfg = gsc_read(GSC_ENABLE); 1083 cfg |= GSC_IN_PATH_MEMORY;
1558 cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK | 1084 gsc_write(cfg, GSC_IN_CON);
1559 GSC_ENABLE_CLK_GATE_MODE_MASK);
1560 cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
1561 gsc_write(cfg, GSC_ENABLE);
1562
1563 /* src dma memory */
1564 cfg = gsc_read(GSC_IN_CON);
1565 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1566 cfg |= GSC_IN_PATH_MEMORY;
1567 gsc_write(cfg, GSC_IN_CON);
1568
1569 /* dst dma memory */
1570 cfg = gsc_read(GSC_OUT_CON);
1571 cfg |= GSC_OUT_PATH_MEMORY;
1572 gsc_write(cfg, GSC_OUT_CON);
1573 break;
1574 case IPP_CMD_WB:
1575 set_wb.enable = 1;
1576 set_wb.refresh = property->refresh_rate;
1577 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1578 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1579
1580 /* src local path */
1581 cfg = gsc_read(GSC_IN_CON);
1582 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1583 cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
1584 gsc_write(cfg, GSC_IN_CON);
1585
1586 /* dst dma memory */
1587 cfg = gsc_read(GSC_OUT_CON);
1588 cfg |= GSC_OUT_PATH_MEMORY;
1589 gsc_write(cfg, GSC_OUT_CON);
1590 break;
1591 case IPP_CMD_OUTPUT:
1592 /* src dma memory */
1593 cfg = gsc_read(GSC_IN_CON);
1594 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1595 cfg |= GSC_IN_PATH_MEMORY;
1596 gsc_write(cfg, GSC_IN_CON);
1597
1598 /* dst local path */
1599 cfg = gsc_read(GSC_OUT_CON);
1600 cfg |= GSC_OUT_PATH_MEMORY;
1601 gsc_write(cfg, GSC_OUT_CON);
1602 break;
1603 default:
1604 ret = -EINVAL;
1605 dev_err(dev, "invalid operations.\n");
1606 return ret;
1607 }
1608 1085
1609 ret = gsc_set_prescaler(ctx, &ctx->sc, 1086 /* dst dma memory */
1610 &img_pos[EXYNOS_DRM_OPS_SRC], 1087 cfg = gsc_read(GSC_OUT_CON);
1611 &img_pos[EXYNOS_DRM_OPS_DST]); 1088 cfg |= GSC_OUT_PATH_MEMORY;
1612 if (ret) { 1089 gsc_write(cfg, GSC_OUT_CON);
1613 dev_err(dev, "failed to set prescaler.\n");
1614 return ret;
1615 }
1616 1090
1617 gsc_set_scaler(ctx, &ctx->sc); 1091 gsc_set_scaler(ctx, &ctx->sc);
1618 1092
1619 cfg = gsc_read(GSC_ENABLE); 1093 cfg = gsc_read(GSC_ENABLE);
1620 cfg |= GSC_ENABLE_ON; 1094 cfg |= GSC_ENABLE_ON;
1621 gsc_write(cfg, GSC_ENABLE); 1095 gsc_write(cfg, GSC_ENABLE);
1096}
1097
1098static int gsc_commit(struct exynos_drm_ipp *ipp,
1099 struct exynos_drm_ipp_task *task)
1100{
1101 struct gsc_context *ctx = container_of(ipp, struct gsc_context, ipp);
1102 int ret;
1103
1104 pm_runtime_get_sync(ctx->dev);
1105 ctx->task = task;
1106
1107 ret = gsc_reset(ctx);
1108 if (ret) {
1109 pm_runtime_put_autosuspend(ctx->dev);
1110 ctx->task = NULL;
1111 return ret;
1112 }
1113
1114 gsc_src_set_fmt(ctx, task->src.buf.fourcc);
1115 gsc_src_set_transf(ctx, task->transform.rotation);
1116 gsc_src_set_size(ctx, &task->src);
1117 gsc_src_set_addr(ctx, 0, &task->src);
1118 gsc_dst_set_fmt(ctx, task->dst.buf.fourcc);
1119 gsc_dst_set_size(ctx, &task->dst);
1120 gsc_dst_set_addr(ctx, 0, &task->dst);
1121 gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
1122 gsc_start(ctx);
1622 1123
1623 return 0; 1124 return 0;
1624} 1125}
1625 1126
1626static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) 1127static void gsc_abort(struct exynos_drm_ipp *ipp,
1128 struct exynos_drm_ipp_task *task)
1627{ 1129{
1628 struct gsc_context *ctx = get_gsc_context(dev); 1130 struct gsc_context *ctx =
1629 struct drm_exynos_ipp_set_wb set_wb = {0, 0}; 1131 container_of(ipp, struct gsc_context, ipp);
1630 u32 cfg;
1631 1132
1632 DRM_DEBUG_KMS("cmd[%d]\n", cmd); 1133 gsc_reset(ctx);
1134 if (ctx->task) {
1135 struct exynos_drm_ipp_task *task = ctx->task;
1633 1136
1634 switch (cmd) { 1137 ctx->task = NULL;
1635 case IPP_CMD_M2M: 1138 pm_runtime_mark_last_busy(ctx->dev);
1636 /* bypass */ 1139 pm_runtime_put_autosuspend(ctx->dev);
1637 break; 1140 exynos_drm_ipp_task_done(task, -EIO);
1638 case IPP_CMD_WB:
1639 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1640 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1641 break;
1642 case IPP_CMD_OUTPUT:
1643 default:
1644 dev_err(dev, "invalid operations.\n");
1645 break;
1646 } 1141 }
1142}
1647 1143
1648 gsc_handle_irq(ctx, false, false, true); 1144static struct exynos_drm_ipp_funcs ipp_funcs = {
1145 .commit = gsc_commit,
1146 .abort = gsc_abort,
1147};
1649 1148
1650 /* reset sequence */ 1149static int gsc_bind(struct device *dev, struct device *master, void *data)
1651 gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK); 1150{
1652 gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK); 1151 struct gsc_context *ctx = dev_get_drvdata(dev);
1653 gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK); 1152 struct drm_device *drm_dev = data;
1153 struct exynos_drm_ipp *ipp = &ctx->ipp;
1654 1154
1655 cfg = gsc_read(GSC_ENABLE); 1155 ctx->drm_dev = drm_dev;
1656 cfg &= ~GSC_ENABLE_ON; 1156 drm_iommu_attach_device(drm_dev, dev);
1657 gsc_write(cfg, GSC_ENABLE); 1157
1158 exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
1159 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
1160 DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
1161 ctx->formats, ctx->num_formats, "gsc");
1162
1163 dev_info(dev, "The exynos gscaler has been probed successfully\n");
1164
1165 return 0;
1166}
1167
1168static void gsc_unbind(struct device *dev, struct device *master,
1169 void *data)
1170{
1171 struct gsc_context *ctx = dev_get_drvdata(dev);
1172 struct drm_device *drm_dev = data;
1173 struct exynos_drm_ipp *ipp = &ctx->ipp;
1174
1175 exynos_drm_ipp_unregister(drm_dev, ipp);
1176 drm_iommu_detach_device(drm_dev, dev);
1658} 1177}
1659 1178
1179static const struct component_ops gsc_component_ops = {
1180 .bind = gsc_bind,
1181 .unbind = gsc_unbind,
1182};
1183
1184static const unsigned int gsc_formats[] = {
1185 DRM_FORMAT_ARGB8888,
1186 DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_BGRX8888,
1187 DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61,
1188 DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU,
1189 DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422,
1190};
1191
1660static int gsc_probe(struct platform_device *pdev) 1192static int gsc_probe(struct platform_device *pdev)
1661{ 1193{
1662 struct device *dev = &pdev->dev; 1194 struct device *dev = &pdev->dev;
1195 struct gsc_driverdata *driver_data;
1196 struct exynos_drm_ipp_formats *formats;
1663 struct gsc_context *ctx; 1197 struct gsc_context *ctx;
1664 struct resource *res; 1198 struct resource *res;
1665 struct exynos_drm_ippdrv *ippdrv; 1199 int ret, i;
1666 int ret;
1667 1200
1668 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1201 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1669 if (!ctx) 1202 if (!ctx)
1670 return -ENOMEM; 1203 return -ENOMEM;
1671 1204
1672 if (dev->of_node) { 1205 formats = devm_kzalloc(dev, sizeof(*formats) *
1673 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, 1206 (ARRAY_SIZE(gsc_formats)), GFP_KERNEL);
1674 "samsung,sysreg"); 1207 if (!formats)
1675 if (IS_ERR(ctx->sysreg)) { 1208 return -ENOMEM;
1676 dev_warn(dev, "failed to get system register.\n"); 1209
1677 ctx->sysreg = NULL; 1210 driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
1678 } 1211 ctx->dev = dev;
1212 ctx->num_clocks = driver_data->num_clocks;
1213 ctx->clk_names = driver_data->clk_names;
1214
1215 for (i = 0; i < ARRAY_SIZE(gsc_formats); i++) {
1216 formats[i].fourcc = gsc_formats[i];
1217 formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
1218 DRM_EXYNOS_IPP_FORMAT_DESTINATION;
1219 formats[i].limits = driver_data->limits;
1220 formats[i].num_limits = driver_data->num_limits;
1679 } 1221 }
1222 ctx->formats = formats;
1223 ctx->num_formats = ARRAY_SIZE(gsc_formats);
1680 1224
1681 /* clock control */ 1225 /* clock control */
1682 ctx->gsc_clk = devm_clk_get(dev, "gscl"); 1226 for (i = 0; i < ctx->num_clocks; i++) {
1683 if (IS_ERR(ctx->gsc_clk)) { 1227 ctx->clocks[i] = devm_clk_get(dev, ctx->clk_names[i]);
1684 dev_err(dev, "failed to get gsc clock.\n"); 1228 if (IS_ERR(ctx->clocks[i])) {
1685 return PTR_ERR(ctx->gsc_clk); 1229 dev_err(dev, "failed to get clock: %s\n",
1230 ctx->clk_names[i]);
1231 return PTR_ERR(ctx->clocks[i]);
1232 }
1686 } 1233 }
1687 1234
1688 /* resource memory */ 1235 /* resource memory */
@@ -1699,8 +1246,8 @@ static int gsc_probe(struct platform_device *pdev)
1699 } 1246 }
1700 1247
1701 ctx->irq = res->start; 1248 ctx->irq = res->start;
1702 ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler, 1249 ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0,
1703 IRQF_ONESHOT, "drm_gsc", ctx); 1250 dev_name(dev), ctx);
1704 if (ret < 0) { 1251 if (ret < 0) {
1705 dev_err(dev, "failed to request irq.\n"); 1252 dev_err(dev, "failed to request irq.\n");
1706 return ret; 1253 return ret;
@@ -1709,38 +1256,22 @@ static int gsc_probe(struct platform_device *pdev)
1709 /* context initailization */ 1256 /* context initailization */
1710 ctx->id = pdev->id; 1257 ctx->id = pdev->id;
1711 1258
1712 ippdrv = &ctx->ippdrv;
1713 ippdrv->dev = dev;
1714 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
1715 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
1716 ippdrv->check_property = gsc_ippdrv_check_property;
1717 ippdrv->reset = gsc_ippdrv_reset;
1718 ippdrv->start = gsc_ippdrv_start;
1719 ippdrv->stop = gsc_ippdrv_stop;
1720 ret = gsc_init_prop_list(ippdrv);
1721 if (ret < 0) {
1722 dev_err(dev, "failed to init property list.\n");
1723 return ret;
1724 }
1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
1727
1728 mutex_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1259 platform_set_drvdata(pdev, ctx);
1730 1260
1261 pm_runtime_use_autosuspend(dev);
1262 pm_runtime_set_autosuspend_delay(dev, GSC_AUTOSUSPEND_DELAY);
1731 pm_runtime_enable(dev); 1263 pm_runtime_enable(dev);
1732 1264
1733 ret = exynos_drm_ippdrv_register(ippdrv); 1265 ret = component_add(dev, &gsc_component_ops);
1734 if (ret < 0) { 1266 if (ret)
1735 dev_err(dev, "failed to register drm gsc device.\n"); 1267 goto err_pm_dis;
1736 goto err_ippdrv_register;
1737 }
1738 1268
1739 dev_info(dev, "drm gsc registered successfully.\n"); 1269 dev_info(dev, "drm gsc registered successfully.\n");
1740 1270
1741 return 0; 1271 return 0;
1742 1272
1743err_ippdrv_register: 1273err_pm_dis:
1274 pm_runtime_dont_use_autosuspend(dev);
1744 pm_runtime_disable(dev); 1275 pm_runtime_disable(dev);
1745 return ret; 1276 return ret;
1746} 1277}
@@ -1748,13 +1279,8 @@ err_ippdrv_register:
1748static int gsc_remove(struct platform_device *pdev) 1279static int gsc_remove(struct platform_device *pdev)
1749{ 1280{
1750 struct device *dev = &pdev->dev; 1281 struct device *dev = &pdev->dev;
1751 struct gsc_context *ctx = get_gsc_context(dev);
1752 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1753 1282
1754 exynos_drm_ippdrv_unregister(ippdrv); 1283 pm_runtime_dont_use_autosuspend(dev);
1755 mutex_destroy(&ctx->lock);
1756
1757 pm_runtime_set_suspended(dev);
1758 pm_runtime_disable(dev); 1284 pm_runtime_disable(dev);
1759 1285
1760 return 0; 1286 return 0;
@@ -1763,19 +1289,32 @@ static int gsc_remove(struct platform_device *pdev)
1763static int __maybe_unused gsc_runtime_suspend(struct device *dev) 1289static int __maybe_unused gsc_runtime_suspend(struct device *dev)
1764{ 1290{
1765 struct gsc_context *ctx = get_gsc_context(dev); 1291 struct gsc_context *ctx = get_gsc_context(dev);
1292 int i;
1766 1293
1767 DRM_DEBUG_KMS("id[%d]\n", ctx->id); 1294 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1768 1295
1769 return gsc_clk_ctrl(ctx, false); 1296 for (i = ctx->num_clocks - 1; i >= 0; i--)
1297 clk_disable_unprepare(ctx->clocks[i]);
1298
1299 return 0;
1770} 1300}
1771 1301
1772static int __maybe_unused gsc_runtime_resume(struct device *dev) 1302static int __maybe_unused gsc_runtime_resume(struct device *dev)
1773{ 1303{
1774 struct gsc_context *ctx = get_gsc_context(dev); 1304 struct gsc_context *ctx = get_gsc_context(dev);
1305 int i, ret;
1775 1306
1776 DRM_DEBUG_KMS("id[%d]\n", ctx->id); 1307 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1777 1308
1778 return gsc_clk_ctrl(ctx, true); 1309 for (i = 0; i < ctx->num_clocks; i++) {
1310 ret = clk_prepare_enable(ctx->clocks[i]);
1311 if (ret) {
1312 while (--i > 0)
1313 clk_disable_unprepare(ctx->clocks[i]);
1314 return ret;
1315 }
1316 }
1317 return 0;
1779} 1318}
1780 1319
1781static const struct dev_pm_ops gsc_pm_ops = { 1320static const struct dev_pm_ops gsc_pm_ops = {
@@ -1784,9 +1323,66 @@ static const struct dev_pm_ops gsc_pm_ops = {
1784 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) 1323 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1785}; 1324};
1786 1325
1326static const struct drm_exynos_ipp_limit gsc_5250_limits[] = {
1327 { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) },
1328 { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) },
1329 { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2048 }, .v = { 16, 2048 }) },
1330 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
1331 .v = { (1 << 16) / 16, (1 << 16) * 8 }) },
1332};
1333
1334static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
1335 { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) },
1336 { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) },
1337 { IPP_SIZE_LIMIT(ROTATED, .h = { 16, 2016 }, .v = { 8, 2016 }) },
1338 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
1339 .v = { (1 << 16) / 16, (1 << 16) * 8 }) },
1340};
1341
1342static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
1343 { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
1344 { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
1345 { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
1346 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
1347 .v = { (1 << 16) / 16, (1 << 16) * 8 }) },
1348};
1349
1350static struct gsc_driverdata gsc_exynos5250_drvdata = {
1351 .clk_names = {"gscl"},
1352 .num_clocks = 1,
1353 .limits = gsc_5250_limits,
1354 .num_limits = ARRAY_SIZE(gsc_5250_limits),
1355};
1356
1357static struct gsc_driverdata gsc_exynos5420_drvdata = {
1358 .clk_names = {"gscl"},
1359 .num_clocks = 1,
1360 .limits = gsc_5420_limits,
1361 .num_limits = ARRAY_SIZE(gsc_5420_limits),
1362};
1363
1364static struct gsc_driverdata gsc_exynos5433_drvdata = {
1365 .clk_names = {"pclk", "aclk", "aclk_xiu", "aclk_gsclbend"},
1366 .num_clocks = 4,
1367 .limits = gsc_5433_limits,
1368 .num_limits = ARRAY_SIZE(gsc_5433_limits),
1369};
1370
1787static const struct of_device_id exynos_drm_gsc_of_match[] = { 1371static const struct of_device_id exynos_drm_gsc_of_match[] = {
1788 { .compatible = "samsung,exynos5-gsc" }, 1372 {
1789 { }, 1373 .compatible = "samsung,exynos5-gsc",
1374 .data = &gsc_exynos5250_drvdata,
1375 }, {
1376 .compatible = "samsung,exynos5250-gsc",
1377 .data = &gsc_exynos5250_drvdata,
1378 }, {
1379 .compatible = "samsung,exynos5420-gsc",
1380 .data = &gsc_exynos5420_drvdata,
1381 }, {
1382 .compatible = "samsung,exynos5433-gsc",
1383 .data = &gsc_exynos5433_drvdata,
1384 }, {
1385 },
1790}; 1386};
1791MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match); 1387MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
1792 1388
@@ -1800,4 +1396,3 @@ struct platform_driver gsc_driver = {
1800 .of_match_table = of_match_ptr(exynos_drm_gsc_of_match), 1396 .of_match_table = of_match_ptr(exynos_drm_gsc_of_match),
1801 }, 1397 },
1802}; 1398};
1803
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
deleted file mode 100644
index 29ec1c5efcf2..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#ifndef _EXYNOS_DRM_GSC_H_
16#define _EXYNOS_DRM_GSC_H_
17
18/*
19 * TODO
20 * FIMD output interface notifier callback.
21 * Mixer output interface notifier callback.
22 */
23
24#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..26374e58c557
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,916 @@
1/*
2 * Copyright (C) 2017 Samsung Electronics Co.Ltd
3 * Authors:
4 * Marek Szyprowski <m.szyprowski@samsung.com>
5 *
6 * Exynos DRM Image Post Processing (IPP) related functions
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 */
18
19
20#include <drm/drmP.h>
21#include <drm/drm_mode.h>
22#include <uapi/drm/exynos_drm.h>
23
24#include "exynos_drm_drv.h"
25#include "exynos_drm_gem.h"
26#include "exynos_drm_ipp.h"
27
28static int num_ipp;
29static LIST_HEAD(ipp_list);
30
31/**
32 * exynos_drm_ipp_register - Register a new picture processor hardware module
33 * @dev: DRM device
34 * @ipp: ipp module to init
35 * @funcs: callbacks for the new ipp object
36 * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
37 * @formats: array of supported formats
38 * @num_formats: size of the supported formats array
39 * @name: name (for debugging purposes)
40 *
41 * Initializes a ipp module.
42 *
43 * Returns:
44 * Zero on success, error code on failure.
45 */
46int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
47 const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
48 const struct exynos_drm_ipp_formats *formats,
49 unsigned int num_formats, const char *name)
50{
51 WARN_ON(!ipp);
52 WARN_ON(!funcs);
53 WARN_ON(!formats);
54 WARN_ON(!num_formats);
55
56 spin_lock_init(&ipp->lock);
57 INIT_LIST_HEAD(&ipp->todo_list);
58 init_waitqueue_head(&ipp->done_wq);
59 ipp->dev = dev;
60 ipp->funcs = funcs;
61 ipp->capabilities = caps;
62 ipp->name = name;
63 ipp->formats = formats;
64 ipp->num_formats = num_formats;
65
66 /* ipp_list modification is serialized by component framework */
67 list_add_tail(&ipp->head, &ipp_list);
68 ipp->id = num_ipp++;
69
70 DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id);
71
72 return 0;
73}
74
75/**
76 * exynos_drm_ipp_unregister - Unregister the picture processor module
77 * @dev: DRM device
78 * @ipp: ipp module
79 */
80void exynos_drm_ipp_unregister(struct drm_device *dev,
81 struct exynos_drm_ipp *ipp)
82{
83 WARN_ON(ipp->task);
84 WARN_ON(!list_empty(&ipp->todo_list));
85 list_del(&ipp->head);
86}
87
88/**
89 * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
90 * @dev: DRM device
91 * @data: ioctl data
92 * @file_priv: DRM file info
93 *
94 * Construct a list of ipp ids.
95 *
96 * Called by the user via ioctl.
97 *
98 * Returns:
99 * Zero on success, negative errno on failure.
100 */
101int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
102 struct drm_file *file_priv)
103{
104 struct drm_exynos_ioctl_ipp_get_res *resp = data;
105 struct exynos_drm_ipp *ipp;
106 uint32_t __user *ipp_ptr = (uint32_t __user *)
107 (unsigned long)resp->ipp_id_ptr;
108 unsigned int count = num_ipp, copied = 0;
109
110 /*
111 * This ioctl is called twice, once to determine how much space is
112 * needed, and the 2nd time to fill it.
113 */
114 if (count && resp->count_ipps >= count) {
115 list_for_each_entry(ipp, &ipp_list, head) {
116 if (put_user(ipp->id, ipp_ptr + copied))
117 return -EFAULT;
118 copied++;
119 }
120 }
121 resp->count_ipps = count;
122
123 return 0;
124}
125
126static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
127{
128 struct exynos_drm_ipp *ipp;
129
130 list_for_each_entry(ipp, &ipp_list, head)
131 if (ipp->id == id)
132 return ipp;
133 return NULL;
134}
135
136/**
137 * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
138 * @dev: DRM device
139 * @data: ioctl data
140 * @file_priv: DRM file info
141 *
142 * Construct a structure describing ipp module capabilities.
143 *
144 * Called by the user via ioctl.
145 *
146 * Returns:
147 * Zero on success, negative errno on failure.
148 */
149int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
150 struct drm_file *file_priv)
151{
152 struct drm_exynos_ioctl_ipp_get_caps *resp = data;
153 void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
154 struct exynos_drm_ipp *ipp;
155 int i;
156
157 ipp = __ipp_get(resp->ipp_id);
158 if (!ipp)
159 return -ENOENT;
160
161 resp->ipp_id = ipp->id;
162 resp->capabilities = ipp->capabilities;
163
164 /*
165 * This ioctl is called twice, once to determine how much space is
166 * needed, and the 2nd time to fill it.
167 */
168 if (resp->formats_count >= ipp->num_formats) {
169 for (i = 0; i < ipp->num_formats; i++) {
170 struct drm_exynos_ipp_format tmp = {
171 .fourcc = ipp->formats[i].fourcc,
172 .type = ipp->formats[i].type,
173 .modifier = ipp->formats[i].modifier,
174 };
175
176 if (copy_to_user(ptr, &tmp, sizeof(tmp)))
177 return -EFAULT;
178 ptr += sizeof(tmp);
179 }
180 }
181 resp->formats_count = ipp->num_formats;
182
183 return 0;
184}
185
186static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
187 struct exynos_drm_ipp *ipp, uint32_t fourcc,
188 uint64_t mod, unsigned int type)
189{
190 int i;
191
192 for (i = 0; i < ipp->num_formats; i++) {
193 if ((ipp->formats[i].type & type) &&
194 ipp->formats[i].fourcc == fourcc &&
195 ipp->formats[i].modifier == mod)
196 return &ipp->formats[i];
197 }
198 return NULL;
199}
200
201/**
202 * exynos_drm_ipp_get_limits_ioctl - get ipp module limits
203 * @dev: DRM device
204 * @data: ioctl data
205 * @file_priv: DRM file info
206 *
207 * Construct a structure describing ipp module limitations for provided
208 * picture format.
209 *
210 * Called by the user via ioctl.
211 *
212 * Returns:
213 * Zero on success, negative errno on failure.
214 */
215int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
216 struct drm_file *file_priv)
217{
218 struct drm_exynos_ioctl_ipp_get_limits *resp = data;
219 void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
220 const struct exynos_drm_ipp_formats *format;
221 struct exynos_drm_ipp *ipp;
222
223 if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
224 resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
225 return -EINVAL;
226
227 ipp = __ipp_get(resp->ipp_id);
228 if (!ipp)
229 return -ENOENT;
230
231 format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
232 resp->type);
233 if (!format)
234 return -EINVAL;
235
236 /*
237 * This ioctl is called twice, once to determine how much space is
238 * needed, and the 2nd time to fill it.
239 */
240 if (format->num_limits && resp->limits_count >= format->num_limits)
241 if (copy_to_user((void __user *)ptr, format->limits,
242 sizeof(*format->limits) * format->num_limits))
243 return -EFAULT;
244 resp->limits_count = format->num_limits;
245
246 return 0;
247}
248
249struct drm_pending_exynos_ipp_event {
250 struct drm_pending_event base;
251 struct drm_exynos_ipp_event event;
252};
253
254static inline struct exynos_drm_ipp_task *
255 exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
256{
257 struct exynos_drm_ipp_task *task;
258
259 task = kzalloc(sizeof(*task), GFP_KERNEL);
260 if (!task)
261 return NULL;
262
263 task->dev = ipp->dev;
264 task->ipp = ipp;
265
266 /* some defaults */
267 task->src.rect.w = task->dst.rect.w = UINT_MAX;
268 task->src.rect.h = task->dst.rect.h = UINT_MAX;
269 task->transform.rotation = DRM_MODE_ROTATE_0;
270
271 DRM_DEBUG_DRIVER("Allocated task %pK\n", task);
272
273 return task;
274}
275
276static const struct exynos_drm_param_map {
277 unsigned int id;
278 unsigned int size;
279 unsigned int offset;
280} exynos_drm_ipp_params_maps[] = {
281 {
282 DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
283 sizeof(struct drm_exynos_ipp_task_buffer),
284 offsetof(struct exynos_drm_ipp_task, src.buf),
285 }, {
286 DRM_EXYNOS_IPP_TASK_BUFFER |
287 DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
288 sizeof(struct drm_exynos_ipp_task_buffer),
289 offsetof(struct exynos_drm_ipp_task, dst.buf),
290 }, {
291 DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
292 sizeof(struct drm_exynos_ipp_task_rect),
293 offsetof(struct exynos_drm_ipp_task, src.rect),
294 }, {
295 DRM_EXYNOS_IPP_TASK_RECTANGLE |
296 DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
297 sizeof(struct drm_exynos_ipp_task_rect),
298 offsetof(struct exynos_drm_ipp_task, dst.rect),
299 }, {
300 DRM_EXYNOS_IPP_TASK_TRANSFORM,
301 sizeof(struct drm_exynos_ipp_task_transform),
302 offsetof(struct exynos_drm_ipp_task, transform),
303 }, {
304 DRM_EXYNOS_IPP_TASK_ALPHA,
305 sizeof(struct drm_exynos_ipp_task_alpha),
306 offsetof(struct exynos_drm_ipp_task, alpha),
307 },
308};
309
310static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
311 struct drm_exynos_ioctl_ipp_commit *arg)
312{
313 const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
314 void __user *params = (void __user *)(unsigned long)arg->params_ptr;
315 unsigned int size = arg->params_size;
316 uint32_t id;
317 int i;
318
319 while (size) {
320 if (get_user(id, (uint32_t __user *)params))
321 return -EFAULT;
322
323 for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
324 if (map[i].id == id)
325 break;
326 if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
327 map[i].size > size)
328 return -EINVAL;
329
330 if (copy_from_user((void *)task + map[i].offset, params,
331 map[i].size))
332 return -EFAULT;
333
334 params += map[i].size;
335 size -= map[i].size;
336 }
337
338 DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task);
339 return 0;
340}
341
342static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
343 struct drm_file *filp)
344{
345 int ret = 0;
346 int i;
347
348 /* basic checks */
349 if (buf->buf.width == 0 || buf->buf.height == 0)
350 return -EINVAL;
351 buf->format = drm_format_info(buf->buf.fourcc);
352 for (i = 0; i < buf->format->num_planes; i++) {
353 unsigned int width = (i == 0) ? buf->buf.width :
354 DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
355
356 if (buf->buf.pitch[i] == 0)
357 buf->buf.pitch[i] = width * buf->format->cpp[i];
358 if (buf->buf.pitch[i] < width * buf->format->cpp[i])
359 return -EINVAL;
360 if (!buf->buf.gem_id[i])
361 return -ENOENT;
362 }
363
364 /* pitch for additional planes must match */
365 if (buf->format->num_planes > 2 &&
366 buf->buf.pitch[1] != buf->buf.pitch[2])
367 return -EINVAL;
368
369 /* get GEM buffers and check their size */
370 for (i = 0; i < buf->format->num_planes; i++) {
371 unsigned int height = (i == 0) ? buf->buf.height :
372 DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
373 unsigned long size = height * buf->buf.pitch[i];
374 struct drm_gem_object *obj = drm_gem_object_lookup(filp,
375 buf->buf.gem_id[i]);
376 if (!obj) {
377 ret = -ENOENT;
378 goto gem_free;
379 }
380 buf->exynos_gem[i] = to_exynos_gem(obj);
381
382 if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
383 i++;
384 ret = -EINVAL;
385 goto gem_free;
386 }
387 buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
388 buf->buf.offset[i];
389 }
390
391 return 0;
392gem_free:
393 while (i--) {
394 drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
395 buf->exynos_gem[i] = NULL;
396 }
397 return ret;
398}
399
400static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
401{
402 int i;
403
404 if (!buf->exynos_gem[0])
405 return;
406 for (i = 0; i < buf->format->num_planes; i++)
407 drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
408}
409
410static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
411 struct exynos_drm_ipp_task *task)
412{
413 DRM_DEBUG_DRIVER("Freeing task %pK\n", task);
414
415 exynos_drm_ipp_task_release_buf(&task->src);
416 exynos_drm_ipp_task_release_buf(&task->dst);
417 if (task->event)
418 drm_event_cancel_free(ipp->dev, &task->event->base);
419 kfree(task);
420}
421
422struct drm_ipp_limit {
423 struct drm_exynos_ipp_limit_val h;
424 struct drm_exynos_ipp_limit_val v;
425};
426
427enum drm_ipp_size_id {
428 IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
429};
430
431static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
432 [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
433 [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
434 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
435 [IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
436 DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
437 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
438};
439
440static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
441{
442 if (!*ptr)
443 *ptr = val;
444}
445
446static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
447 unsigned int num_limits, enum drm_ipp_size_id id,
448 struct drm_ipp_limit *res)
449{
450 const struct drm_exynos_ipp_limit *l = limits;
451 int i = 0;
452
453 memset(res, 0, sizeof(*res));
454 for (i = 0; limit_id_fallback[id][i]; i++)
455 for (l = limits; l - limits < num_limits; l++) {
456 if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
457 DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
458 ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
459 limit_id_fallback[id][i]))
460 continue;
461 __limit_set_val(&res->h.min, l->h.min);
462 __limit_set_val(&res->h.max, l->h.max);
463 __limit_set_val(&res->h.align, l->h.align);
464 __limit_set_val(&res->v.min, l->v.min);
465 __limit_set_val(&res->v.max, l->v.max);
466 __limit_set_val(&res->v.align, l->v.align);
467 }
468}
469
470static inline bool __align_check(unsigned int val, unsigned int align)
471{
472 if (align && (val & (align - 1))) {
473 DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
474 val, align);
475 return false;
476 }
477 return true;
478}
479
480static inline bool __size_limit_check(unsigned int val,
481 struct drm_exynos_ipp_limit_val *l)
482{
483 if ((l->min && val < l->min) || (l->max && val > l->max)) {
484 DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
485 val, l->min, l->max);
486 return false;
487 }
488 return __align_check(val, l->align);
489}
490
491static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
492 const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
493 bool rotate, bool swap)
494{
495 enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
496 struct drm_ipp_limit l;
497 struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
498
499 if (!limits)
500 return 0;
501
502 __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
503 if (!__size_limit_check(buf->buf.width, &l.h) ||
504 !__size_limit_check(buf->buf.height, &l.v))
505 return -EINVAL;
506
507 if (swap) {
508 lv = &l.h;
509 lh = &l.v;
510 }
511 __get_size_limit(limits, num_limits, id, &l);
512 if (!__size_limit_check(buf->rect.w, lh) ||
513 !__align_check(buf->rect.x, lh->align) ||
514 !__size_limit_check(buf->rect.h, lv) ||
515 !__align_check(buf->rect.y, lv->align))
516 return -EINVAL;
517
518 return 0;
519}
520
521static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
522 unsigned int min, unsigned int max)
523{
524 if ((max && (dst << 16) > src * max) ||
525 (min && (dst << 16) < src * min)) {
526 DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
527 src, dst,
528 min >> 16, 100000 * (min & 0xffff) / (1 << 16),
529 max >> 16, 100000 * (max & 0xffff) / (1 << 16));
530 return false;
531 }
532 return true;
533}
534
535static int exynos_drm_ipp_check_scale_limits(
536 struct drm_exynos_ipp_task_rect *src,
537 struct drm_exynos_ipp_task_rect *dst,
538 const struct drm_exynos_ipp_limit *limits,
539 unsigned int num_limits, bool swap)
540{
541 const struct drm_exynos_ipp_limit_val *lh, *lv;
542 int dw, dh;
543
544 for (; num_limits; limits++, num_limits--)
545 if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
546 DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
547 break;
548 if (!num_limits)
549 return 0;
550
551 lh = (!swap) ? &limits->h : &limits->v;
552 lv = (!swap) ? &limits->v : &limits->h;
553 dw = (!swap) ? dst->w : dst->h;
554 dh = (!swap) ? dst->h : dst->w;
555
556 if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
557 !__scale_limit_check(src->h, dh, lv->min, lv->max))
558 return -EINVAL;
559
560 return 0;
561}
562
563static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
564{
565 struct exynos_drm_ipp *ipp = task->ipp;
566 const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
567 struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
568 unsigned int rotation = task->transform.rotation;
569 int ret = 0;
570 bool swap = drm_rotation_90_or_270(rotation);
571 bool rotate = (rotation != DRM_MODE_ROTATE_0);
572 bool scale = false;
573
574 DRM_DEBUG_DRIVER("Checking task %pK\n", task);
575
576 if (src->rect.w == UINT_MAX)
577 src->rect.w = src->buf.width;
578 if (src->rect.h == UINT_MAX)
579 src->rect.h = src->buf.height;
580 if (dst->rect.w == UINT_MAX)
581 dst->rect.w = dst->buf.width;
582 if (dst->rect.h == UINT_MAX)
583 dst->rect.h = dst->buf.height;
584
585 if (src->rect.x + src->rect.w > (src->buf.width) ||
586 src->rect.y + src->rect.h > (src->buf.height) ||
587 dst->rect.x + dst->rect.w > (dst->buf.width) ||
588 dst->rect.y + dst->rect.h > (dst->buf.height)) {
589 DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n",
590 task);
591 return -EINVAL;
592 }
593
594 if ((!swap && (src->rect.w != dst->rect.w ||
595 src->rect.h != dst->rect.h)) ||
596 (swap && (src->rect.w != dst->rect.h ||
597 src->rect.h != dst->rect.w)))
598 scale = true;
599
600 if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
601 (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
602 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
603 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
604 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
605 src->buf.fourcc != dst->buf.fourcc)) {
606 DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task);
607 return -EINVAL;
608 }
609
610 src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
611 DRM_EXYNOS_IPP_FORMAT_SOURCE);
612 if (!src_fmt) {
613 DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
614 return -EINVAL;
615 }
616 ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
617 src_fmt->num_limits,
618 rotate, false);
619 if (ret)
620 return ret;
621 ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
622 src_fmt->limits,
623 src_fmt->num_limits, swap);
624 if (ret)
625 return ret;
626
627 dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
628 DRM_EXYNOS_IPP_FORMAT_DESTINATION);
629 if (!dst_fmt) {
630 DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
631 return -EINVAL;
632 }
633 ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
634 dst_fmt->num_limits,
635 false, swap);
636 if (ret)
637 return ret;
638 ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
639 dst_fmt->limits,
640 dst_fmt->num_limits, swap);
641 if (ret)
642 return ret;
643
644 DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task);
645
646 return ret;
647}
648
649static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
650 struct drm_file *filp)
651{
652 struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
653 int ret = 0;
654
655 DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task);
656
657 ret = exynos_drm_ipp_task_setup_buffer(src, filp);
658 if (ret) {
659 DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task);
660 return ret;
661 }
662 ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
663 if (ret) {
664 DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task);
665 return ret;
666 }
667
668 DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task);
669
670 return ret;
671}
672
673
674static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
675 struct drm_file *file_priv, uint64_t user_data)
676{
677 struct drm_pending_exynos_ipp_event *e = NULL;
678 int ret;
679
680 e = kzalloc(sizeof(*e), GFP_KERNEL);
681 if (!e)
682 return -ENOMEM;
683
684 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
685 e->event.base.length = sizeof(e->event);
686 e->event.user_data = user_data;
687
688 ret = drm_event_reserve_init(task->dev, file_priv, &e->base,
689 &e->event.base);
690 if (ret)
691 goto free;
692
693 task->event = e;
694 return 0;
695free:
696 kfree(e);
697 return ret;
698}
699
700static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
701{
702 struct timespec64 now;
703
704 ktime_get_ts64(&now);
705 task->event->event.tv_sec = now.tv_sec;
706 task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
707 task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
708
709 drm_send_event(task->dev, &task->event->base);
710}
711
712static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
713{
714 int ret = task->ret;
715
716 if (ret == 0 && task->event) {
717 exynos_drm_ipp_event_send(task);
718 /* ensure event won't be canceled on task free */
719 task->event = NULL;
720 }
721
722 exynos_drm_ipp_task_free(task->ipp, task);
723 return ret;
724}
725
726static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
727{
728 struct exynos_drm_ipp_task *task = container_of(work,
729 struct exynos_drm_ipp_task, cleanup_work);
730
731 exynos_drm_ipp_task_cleanup(task);
732}
733
734static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
735
736/**
737 * exynos_drm_ipp_task_done - finish given task and set return code
738 * @task: ipp task to finish
739 * @ret: error code or 0 if operation has been performed successfully
740 */
741void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
742{
743 struct exynos_drm_ipp *ipp = task->ipp;
744 unsigned long flags;
745
746 DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
747
748 spin_lock_irqsave(&ipp->lock, flags);
749 if (ipp->task == task)
750 ipp->task = NULL;
751 task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
752 task->ret = ret;
753 spin_unlock_irqrestore(&ipp->lock, flags);
754
755 exynos_drm_ipp_next_task(ipp);
756 wake_up(&ipp->done_wq);
757
758 if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
759 INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
760 schedule_work(&task->cleanup_work);
761 }
762}
763
764static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
765{
766 struct exynos_drm_ipp_task *task;
767 unsigned long flags;
768 int ret;
769
770 DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id);
771
772 spin_lock_irqsave(&ipp->lock, flags);
773
774 if (ipp->task || list_empty(&ipp->todo_list)) {
775 spin_unlock_irqrestore(&ipp->lock, flags);
776 return;
777 }
778
779 task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
780 head);
781 list_del_init(&task->head);
782 ipp->task = task;
783
784 spin_unlock_irqrestore(&ipp->lock, flags);
785
786 DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task);
787
788 ret = ipp->funcs->commit(ipp, task);
789 if (ret)
790 exynos_drm_ipp_task_done(task, ret);
791}
792
793static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
794 struct exynos_drm_ipp_task *task)
795{
796 unsigned long flags;
797
798 spin_lock_irqsave(&ipp->lock, flags);
799 list_add(&task->head, &ipp->todo_list);
800 spin_unlock_irqrestore(&ipp->lock, flags);
801
802 exynos_drm_ipp_next_task(ipp);
803}
804
805static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
806 struct exynos_drm_ipp_task *task)
807{
808 unsigned long flags;
809
810 spin_lock_irqsave(&ipp->lock, flags);
811 if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
812 /* already completed task */
813 exynos_drm_ipp_task_cleanup(task);
814 } else if (ipp->task != task) {
815 /* task has not been scheduled for execution yet */
816 list_del_init(&task->head);
817 exynos_drm_ipp_task_cleanup(task);
818 } else {
819 /*
820 * currently processed task, call abort() and perform
821 * cleanup with async worker
822 */
823 task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
824 spin_unlock_irqrestore(&ipp->lock, flags);
825 if (ipp->funcs->abort)
826 ipp->funcs->abort(ipp, task);
827 return;
828 }
829 spin_unlock_irqrestore(&ipp->lock, flags);
830}
831
832/**
833 * exynos_drm_ipp_commit_ioctl - perform image processing operation
834 * @dev: DRM device
835 * @data: ioctl data
836 * @file_priv: DRM file info
837 *
838 * Construct a ipp task from the set of properties provided from the user
839 * and try to schedule it to framebuffer processor hardware.
840 *
841 * Called by the user via ioctl.
842 *
843 * Returns:
844 * Zero on success, negative errno on failure.
845 */
846int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
847 struct drm_file *file_priv)
848{
849 struct drm_exynos_ioctl_ipp_commit *arg = data;
850 struct exynos_drm_ipp *ipp;
851 struct exynos_drm_ipp_task *task;
852 int ret = 0;
853
854 if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
855 return -EINVAL;
856
857 /* can't test and expect an event at the same time */
858 if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
859 (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
860 return -EINVAL;
861
862 ipp = __ipp_get(arg->ipp_id);
863 if (!ipp)
864 return -ENOENT;
865
866 task = exynos_drm_ipp_task_alloc(ipp);
867 if (!task)
868 return -ENOMEM;
869
870 ret = exynos_drm_ipp_task_set(task, arg);
871 if (ret)
872 goto free;
873
874 ret = exynos_drm_ipp_task_check(task);
875 if (ret)
876 goto free;
877
878 ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
879 if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
880 goto free;
881
882 if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
883 ret = exynos_drm_ipp_event_create(task, file_priv,
884 arg->user_data);
885 if (ret)
886 goto free;
887 }
888
889 /*
890 * Queue task for processing on the hardware. task object will be
891 * then freed after exynos_drm_ipp_task_done()
892 */
893 if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
894 DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n",
895 ipp->id, task);
896
897 task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
898 exynos_drm_ipp_schedule_task(task->ipp, task);
899 ret = 0;
900 } else {
901 DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id,
902 task);
903 exynos_drm_ipp_schedule_task(ipp, task);
904 ret = wait_event_interruptible(ipp->done_wq,
905 task->flags & DRM_EXYNOS_IPP_TASK_DONE);
906 if (ret)
907 exynos_drm_ipp_task_abort(ipp, task);
908 else
909 ret = exynos_drm_ipp_task_cleanup(task);
910 }
911 return ret;
912free:
913 exynos_drm_ipp_task_free(ipp, task);
914
915 return ret;
916}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..0b27d4a9bf94
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10#ifndef _EXYNOS_DRM_IPP_H_
11#define _EXYNOS_DRM_IPP_H_
12
13#include <drm/drmP.h>
14
15struct exynos_drm_ipp;
16struct exynos_drm_ipp_task;
17
18/**
19 * struct exynos_drm_ipp_funcs - exynos_drm_ipp control functions
20 */
21struct exynos_drm_ipp_funcs {
22 /**
23 * @commit:
24 *
25 * This is the main entry point to start framebuffer processing
26 * in the hardware. The exynos_drm_ipp_task has been already validated.
27 * This function must not wait until the device finishes processing.
28 * When the driver finishes processing, it has to call
29 * exynos_exynos_drm_ipp_task_done() function.
30 *
31 * RETURNS:
32 *
33 * 0 on success or negative error codes in case of failure.
34 */
35 int (*commit)(struct exynos_drm_ipp *ipp,
36 struct exynos_drm_ipp_task *task);
37
38 /**
39 * @abort:
40 *
41 * Informs the driver that it has to abort the currently running
42 * task as soon as possible (i.e. as soon as it can stop the device
43 * safely), even if the task would not have been finished by then.
44 * After the driver performs the necessary steps, it has to call
45 * exynos_drm_ipp_task_done() (as if the task ended normally).
46 * This function does not have to (and will usually not) wait
47 * until the device enters a state when it can be stopped.
48 */
49 void (*abort)(struct exynos_drm_ipp *ipp,
50 struct exynos_drm_ipp_task *task);
51};
52
53/**
54 * struct exynos_drm_ipp - central picture processor module structure
55 */
56struct exynos_drm_ipp {
57 struct drm_device *dev;
58 struct list_head head;
59 unsigned int id;
60
61 const char *name;
62 const struct exynos_drm_ipp_funcs *funcs;
63 unsigned int capabilities;
64 const struct exynos_drm_ipp_formats *formats;
65 unsigned int num_formats;
66 atomic_t sequence;
67
68 spinlock_t lock;
69 struct exynos_drm_ipp_task *task;
70 struct list_head todo_list;
71 wait_queue_head_t done_wq;
72};
73
74struct exynos_drm_ipp_buffer {
75 struct drm_exynos_ipp_task_buffer buf;
76 struct drm_exynos_ipp_task_rect rect;
77
78 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
79 const struct drm_format_info *format;
80 dma_addr_t dma_addr[MAX_FB_BUFFER];
81};
82
83/**
84 * struct exynos_drm_ipp_task - a structure describing transformation that
85 * has to be performed by the picture processor hardware module
86 */
87struct exynos_drm_ipp_task {
88 struct drm_device *dev;
89 struct exynos_drm_ipp *ipp;
90 struct list_head head;
91
92 struct exynos_drm_ipp_buffer src;
93 struct exynos_drm_ipp_buffer dst;
94
95 struct drm_exynos_ipp_task_transform transform;
96 struct drm_exynos_ipp_task_alpha alpha;
97
98 struct work_struct cleanup_work;
99 unsigned int flags;
100 int ret;
101
102 struct drm_pending_exynos_ipp_event *event;
103};
104
105#define DRM_EXYNOS_IPP_TASK_DONE (1 << 0)
106#define DRM_EXYNOS_IPP_TASK_ASYNC (1 << 1)
107
108struct exynos_drm_ipp_formats {
109 uint32_t fourcc;
110 uint32_t type;
111 uint64_t modifier;
112 const struct drm_exynos_ipp_limit *limits;
113 unsigned int num_limits;
114};
115
116/* helper macros to set exynos_drm_ipp_formats structure and limits*/
117#define IPP_SRCDST_MFORMAT(f, m, l) \
118 .fourcc = DRM_FORMAT_##f, .modifier = m, .limits = l, \
119 .num_limits = ARRAY_SIZE(l), \
120 .type = (DRM_EXYNOS_IPP_FORMAT_SOURCE | \
121 DRM_EXYNOS_IPP_FORMAT_DESTINATION)
122
123#define IPP_SRCDST_FORMAT(f, l) IPP_SRCDST_MFORMAT(f, 0, l)
124
125#define IPP_SIZE_LIMIT(l, val...) \
126 .type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE | \
127 DRM_EXYNOS_IPP_LIMIT_SIZE_##l), val
128
129#define IPP_SCALE_LIMIT(val...) \
130 .type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE), val
131
132int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
133 const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
134 const struct exynos_drm_ipp_formats *formats,
135 unsigned int num_formats, const char *name);
136void exynos_drm_ipp_unregister(struct drm_device *dev,
137 struct exynos_drm_ipp *ipp);
138
139void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret);
140
141#ifdef CONFIG_DRM_EXYNOS_IPP
142int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
143 struct drm_file *file_priv);
144int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
145 struct drm_file *file_priv);
146int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
147 struct drm_file *file_priv);
148int exynos_drm_ipp_commit_ioctl(struct drm_device *dev,
149 void *data, struct drm_file *file_priv);
150#else
151static inline int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev,
152 void *data, struct drm_file *file_priv)
153{
154 struct drm_exynos_ioctl_ipp_get_res *resp = data;
155
156 resp->count_ipps = 0;
157 return 0;
158}
159static inline int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev,
160 void *data, struct drm_file *file_priv)
161{
162 return -ENODEV;
163}
164static inline int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev,
165 void *data, struct drm_file *file_priv)
166{
167 return -ENODEV;
168}
169static inline int exynos_drm_ipp_commit_ioctl(struct drm_device *dev,
170 void *data, struct drm_file *file_priv)
171{
172 return -ENODEV;
173}
174#endif
175#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 79282a820ecc..1a76dd3d52e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/component.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/io.h> 16#include <linux/io.h>
@@ -22,29 +23,18 @@
22#include <drm/exynos_drm.h> 23#include <drm/exynos_drm.h>
23#include "regs-rotator.h" 24#include "regs-rotator.h"
24#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
26#include "exynos_drm_iommu.h"
25#include "exynos_drm_ipp.h" 27#include "exynos_drm_ipp.h"
26 28
27/* 29/*
28 * Rotator supports image crop/rotator and input/output DMA operations. 30 * Rotator supports image crop/rotator and input/output DMA operations.
29 * input DMA reads image data from the memory. 31 * input DMA reads image data from the memory.
30 * output DMA writes image data to memory. 32 * output DMA writes image data to memory.
31 *
32 * M2M operation : supports crop/scale/rotation/csc so on.
33 * Memory ----> Rotator H/W ----> Memory.
34 */ 33 */
35 34
36/* 35#define ROTATOR_AUTOSUSPEND_DELAY 2000
37 * TODO
38 * 1. check suspend/resume api if needed.
39 * 2. need to check use case platform_device_id.
40 * 3. check src/dst size with, height.
41 * 4. need to add supported list in prop_list.
42 */
43 36
44#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev)) 37#define rot_read(offset) readl(rot->regs + (offset))
45#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
46 struct rot_context, ippdrv);
47#define rot_read(offset) readl(rot->regs + (offset))
48#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset)) 38#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
49 39
50enum rot_irq_status { 40enum rot_irq_status {
@@ -52,54 +42,28 @@ enum rot_irq_status {
52 ROT_IRQ_STATUS_ILLEGAL = 9, 42 ROT_IRQ_STATUS_ILLEGAL = 9,
53}; 43};
54 44
55/* 45struct rot_variant {
56 * A structure of limitation. 46 const struct exynos_drm_ipp_formats *formats;
57 * 47 unsigned int num_formats;
58 * @min_w: minimum width.
59 * @min_h: minimum height.
60 * @max_w: maximum width.
61 * @max_h: maximum height.
62 * @align: align size.
63 */
64struct rot_limit {
65 u32 min_w;
66 u32 min_h;
67 u32 max_w;
68 u32 max_h;
69 u32 align;
70};
71
72/*
73 * A structure of limitation table.
74 *
75 * @ycbcr420_2p: case of YUV.
76 * @rgb888: case of RGB.
77 */
78struct rot_limit_table {
79 struct rot_limit ycbcr420_2p;
80 struct rot_limit rgb888;
81}; 48};
82 49
83/* 50/*
84 * A structure of rotator context. 51 * A structure of rotator context.
85 * @ippdrv: prepare initialization using ippdrv. 52 * @ippdrv: prepare initialization using ippdrv.
86 * @regs_res: register resources.
87 * @regs: memory mapped io registers. 53 * @regs: memory mapped io registers.
88 * @clock: rotator gate clock. 54 * @clock: rotator gate clock.
89 * @limit_tbl: limitation of rotator. 55 * @limit_tbl: limitation of rotator.
90 * @irq: irq number. 56 * @irq: irq number.
91 * @cur_buf_id: current operation buffer id.
92 * @suspended: suspended state.
93 */ 57 */
94struct rot_context { 58struct rot_context {
95 struct exynos_drm_ippdrv ippdrv; 59 struct exynos_drm_ipp ipp;
96 struct resource *regs_res; 60 struct drm_device *drm_dev;
61 struct device *dev;
97 void __iomem *regs; 62 void __iomem *regs;
98 struct clk *clock; 63 struct clk *clock;
99 struct rot_limit_table *limit_tbl; 64 const struct exynos_drm_ipp_formats *formats;
100 int irq; 65 unsigned int num_formats;
101 int cur_buf_id[EXYNOS_DRM_OPS_MAX]; 66 struct exynos_drm_ipp_task *task;
102 bool suspended;
103}; 67};
104 68
105static void rotator_reg_set_irq(struct rot_context *rot, bool enable) 69static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
@@ -114,15 +78,6 @@ static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
114 rot_write(val, ROT_CONFIG); 78 rot_write(val, ROT_CONFIG);
115} 79}
116 80
117static u32 rotator_reg_get_fmt(struct rot_context *rot)
118{
119 u32 val = rot_read(ROT_CONTROL);
120
121 val &= ROT_CONTROL_FMT_MASK;
122
123 return val;
124}
125
126static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot) 81static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
127{ 82{
128 u32 val = rot_read(ROT_STATUS); 83 u32 val = rot_read(ROT_STATUS);
@@ -138,9 +93,6 @@ static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
138static irqreturn_t rotator_irq_handler(int irq, void *arg) 93static irqreturn_t rotator_irq_handler(int irq, void *arg)
139{ 94{
140 struct rot_context *rot = arg; 95 struct rot_context *rot = arg;
141 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
142 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
143 struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
144 enum rot_irq_status irq_status; 96 enum rot_irq_status irq_status;
145 u32 val; 97 u32 val;
146 98
@@ -152,56 +104,21 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
152 val |= ROT_STATUS_IRQ_PENDING((u32)irq_status); 104 val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
153 rot_write(val, ROT_STATUS); 105 rot_write(val, ROT_STATUS);
154 106
155 if (irq_status == ROT_IRQ_STATUS_COMPLETE) { 107 if (rot->task) {
156 event_work->ippdrv = ippdrv; 108 struct exynos_drm_ipp_task *task = rot->task;
157 event_work->buf_id[EXYNOS_DRM_OPS_DST] = 109
158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; 110 rot->task = NULL;
159 queue_work(ippdrv->event_workq, &event_work->work); 111 pm_runtime_mark_last_busy(rot->dev);
160 } else { 112 pm_runtime_put_autosuspend(rot->dev);
161 DRM_ERROR("the SFR is set illegally\n"); 113 exynos_drm_ipp_task_done(task,
114 irq_status == ROT_IRQ_STATUS_COMPLETE ? 0 : -EINVAL);
162 } 115 }
163 116
164 return IRQ_HANDLED; 117 return IRQ_HANDLED;
165} 118}
166 119
167static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize, 120static void rotator_src_set_fmt(struct rot_context *rot, u32 fmt)
168 u32 *vsize)
169{ 121{
170 struct rot_limit_table *limit_tbl = rot->limit_tbl;
171 struct rot_limit *limit;
172 u32 mask, val;
173
174 /* Get size limit */
175 if (fmt == ROT_CONTROL_FMT_RGB888)
176 limit = &limit_tbl->rgb888;
177 else
178 limit = &limit_tbl->ycbcr420_2p;
179
180 /* Get mask for rounding to nearest aligned val */
181 mask = ~((1 << limit->align) - 1);
182
183 /* Set aligned width */
184 val = ROT_ALIGN(*hsize, limit->align, mask);
185 if (val < limit->min_w)
186 *hsize = ROT_MIN(limit->min_w, mask);
187 else if (val > limit->max_w)
188 *hsize = ROT_MAX(limit->max_w, mask);
189 else
190 *hsize = val;
191
192 /* Set aligned height */
193 val = ROT_ALIGN(*vsize, limit->align, mask);
194 if (val < limit->min_h)
195 *vsize = ROT_MIN(limit->min_h, mask);
196 else if (val > limit->max_h)
197 *vsize = ROT_MAX(limit->max_h, mask);
198 else
199 *vsize = val;
200}
201
202static int rotator_src_set_fmt(struct device *dev, u32 fmt)
203{
204 struct rot_context *rot = dev_get_drvdata(dev);
205 u32 val; 122 u32 val;
206 123
207 val = rot_read(ROT_CONTROL); 124 val = rot_read(ROT_CONTROL);
@@ -214,515 +131,176 @@ static int rotator_src_set_fmt(struct device *dev, u32 fmt)
214 case DRM_FORMAT_XRGB8888: 131 case DRM_FORMAT_XRGB8888:
215 val |= ROT_CONTROL_FMT_RGB888; 132 val |= ROT_CONTROL_FMT_RGB888;
216 break; 133 break;
217 default:
218 DRM_ERROR("invalid image format\n");
219 return -EINVAL;
220 } 134 }
221 135
222 rot_write(val, ROT_CONTROL); 136 rot_write(val, ROT_CONTROL);
223
224 return 0;
225} 137}
226 138
227static inline bool rotator_check_reg_fmt(u32 fmt) 139static void rotator_src_set_buf(struct rot_context *rot,
140 struct exynos_drm_ipp_buffer *buf)
228{ 141{
229 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
230 (fmt == ROT_CONTROL_FMT_RGB888))
231 return true;
232
233 return false;
234}
235
236static int rotator_src_set_size(struct device *dev, int swap,
237 struct drm_exynos_pos *pos,
238 struct drm_exynos_sz *sz)
239{
240 struct rot_context *rot = dev_get_drvdata(dev);
241 u32 fmt, hsize, vsize;
242 u32 val; 142 u32 val;
243 143
244 /* Get format */
245 fmt = rotator_reg_get_fmt(rot);
246 if (!rotator_check_reg_fmt(fmt)) {
247 DRM_ERROR("invalid format.\n");
248 return -EINVAL;
249 }
250
251 /* Align buffer size */
252 hsize = sz->hsize;
253 vsize = sz->vsize;
254 rotator_align_size(rot, fmt, &hsize, &vsize);
255
256 /* Set buffer size configuration */ 144 /* Set buffer size configuration */
257 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); 145 val = ROT_SET_BUF_SIZE_H(buf->buf.height) |
146 ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]);
258 rot_write(val, ROT_SRC_BUF_SIZE); 147 rot_write(val, ROT_SRC_BUF_SIZE);
259 148
260 /* Set crop image position configuration */ 149 /* Set crop image position configuration */
261 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); 150 val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x);
262 rot_write(val, ROT_SRC_CROP_POS); 151 rot_write(val, ROT_SRC_CROP_POS);
263 val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w); 152 val = ROT_SRC_CROP_SIZE_H(buf->rect.h) |
153 ROT_SRC_CROP_SIZE_W(buf->rect.w);
264 rot_write(val, ROT_SRC_CROP_SIZE); 154 rot_write(val, ROT_SRC_CROP_SIZE);
265 155
266 return 0; 156 /* Set buffer DMA address */
157 rot_write(buf->dma_addr[0], ROT_SRC_BUF_ADDR(0));
158 rot_write(buf->dma_addr[1], ROT_SRC_BUF_ADDR(1));
267} 159}
268 160
269static int rotator_src_set_addr(struct device *dev, 161static void rotator_dst_set_transf(struct rot_context *rot,
270 struct drm_exynos_ipp_buf_info *buf_info, 162 unsigned int rotation)
271 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
272{ 163{
273 struct rot_context *rot = dev_get_drvdata(dev);
274 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
275 u32 val, fmt, hsize, vsize;
276 int i;
277
278 /* Set current buf_id */
279 rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
280
281 switch (buf_type) {
282 case IPP_BUF_ENQUEUE:
283 /* Set address configuration */
284 for_each_ipp_planar(i)
285 addr[i] = buf_info->base[i];
286
287 /* Get format */
288 fmt = rotator_reg_get_fmt(rot);
289 if (!rotator_check_reg_fmt(fmt)) {
290 DRM_ERROR("invalid format.\n");
291 return -EINVAL;
292 }
293
294 /* Re-set cb planar for NV12 format */
295 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
296 !addr[EXYNOS_DRM_PLANAR_CB]) {
297
298 val = rot_read(ROT_SRC_BUF_SIZE);
299 hsize = ROT_GET_BUF_SIZE_W(val);
300 vsize = ROT_GET_BUF_SIZE_H(val);
301
302 /* Set cb planar */
303 addr[EXYNOS_DRM_PLANAR_CB] =
304 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
305 }
306
307 for_each_ipp_planar(i)
308 rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
309 break;
310 case IPP_BUF_DEQUEUE:
311 for_each_ipp_planar(i)
312 rot_write(0x0, ROT_SRC_BUF_ADDR(i));
313 break;
314 default:
315 /* Nothing to do */
316 break;
317 }
318
319 return 0;
320}
321
322static int rotator_dst_set_transf(struct device *dev,
323 enum drm_exynos_degree degree,
324 enum drm_exynos_flip flip, bool *swap)
325{
326 struct rot_context *rot = dev_get_drvdata(dev);
327 u32 val; 164 u32 val;
328 165
329 /* Set transform configuration */ 166 /* Set transform configuration */
330 val = rot_read(ROT_CONTROL); 167 val = rot_read(ROT_CONTROL);
331 val &= ~ROT_CONTROL_FLIP_MASK; 168 val &= ~ROT_CONTROL_FLIP_MASK;
332 169
333 switch (flip) { 170 if (rotation & DRM_MODE_REFLECT_X)
334 case EXYNOS_DRM_FLIP_VERTICAL:
335 val |= ROT_CONTROL_FLIP_VERTICAL;
336 break;
337 case EXYNOS_DRM_FLIP_HORIZONTAL:
338 val |= ROT_CONTROL_FLIP_HORIZONTAL; 171 val |= ROT_CONTROL_FLIP_HORIZONTAL;
339 break; 172 if (rotation & DRM_MODE_REFLECT_Y)
340 default: 173 val |= ROT_CONTROL_FLIP_VERTICAL;
341 /* Flip None */
342 break;
343 }
344 174
345 val &= ~ROT_CONTROL_ROT_MASK; 175 val &= ~ROT_CONTROL_ROT_MASK;
346 176
347 switch (degree) { 177 if (rotation & DRM_MODE_ROTATE_90)
348 case EXYNOS_DRM_DEGREE_90:
349 val |= ROT_CONTROL_ROT_90; 178 val |= ROT_CONTROL_ROT_90;
350 break; 179 else if (rotation & DRM_MODE_ROTATE_180)
351 case EXYNOS_DRM_DEGREE_180:
352 val |= ROT_CONTROL_ROT_180; 180 val |= ROT_CONTROL_ROT_180;
353 break; 181 else if (rotation & DRM_MODE_ROTATE_270)
354 case EXYNOS_DRM_DEGREE_270:
355 val |= ROT_CONTROL_ROT_270; 182 val |= ROT_CONTROL_ROT_270;
356 break;
357 default:
358 /* Rotation 0 Degree */
359 break;
360 }
361 183
362 rot_write(val, ROT_CONTROL); 184 rot_write(val, ROT_CONTROL);
363
364 /* Check degree for setting buffer size swap */
365 if ((degree == EXYNOS_DRM_DEGREE_90) ||
366 (degree == EXYNOS_DRM_DEGREE_270))
367 *swap = true;
368 else
369 *swap = false;
370
371 return 0;
372} 185}
373 186
374static int rotator_dst_set_size(struct device *dev, int swap, 187static void rotator_dst_set_buf(struct rot_context *rot,
375 struct drm_exynos_pos *pos, 188 struct exynos_drm_ipp_buffer *buf)
376 struct drm_exynos_sz *sz)
377{ 189{
378 struct rot_context *rot = dev_get_drvdata(dev); 190 u32 val;
379 u32 val, fmt, hsize, vsize;
380
381 /* Get format */
382 fmt = rotator_reg_get_fmt(rot);
383 if (!rotator_check_reg_fmt(fmt)) {
384 DRM_ERROR("invalid format.\n");
385 return -EINVAL;
386 }
387
388 /* Align buffer size */
389 hsize = sz->hsize;
390 vsize = sz->vsize;
391 rotator_align_size(rot, fmt, &hsize, &vsize);
392 191
393 /* Set buffer size configuration */ 192 /* Set buffer size configuration */
394 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); 193 val = ROT_SET_BUF_SIZE_H(buf->buf.height) |
194 ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]);
395 rot_write(val, ROT_DST_BUF_SIZE); 195 rot_write(val, ROT_DST_BUF_SIZE);
396 196
397 /* Set crop image position configuration */ 197 /* Set crop image position configuration */
398 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); 198 val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x);
399 rot_write(val, ROT_DST_CROP_POS); 199 rot_write(val, ROT_DST_CROP_POS);
400 200
401 return 0; 201 /* Set buffer DMA address */
202 rot_write(buf->dma_addr[0], ROT_DST_BUF_ADDR(0));
203 rot_write(buf->dma_addr[1], ROT_DST_BUF_ADDR(1));
402} 204}
403 205
404static int rotator_dst_set_addr(struct device *dev, 206static void rotator_start(struct rot_context *rot)
405 struct drm_exynos_ipp_buf_info *buf_info,
406 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
407{ 207{
408 struct rot_context *rot = dev_get_drvdata(dev); 208 u32 val;
409 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
410 u32 val, fmt, hsize, vsize;
411 int i;
412
413 /* Set current buf_id */
414 rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
415
416 switch (buf_type) {
417 case IPP_BUF_ENQUEUE:
418 /* Set address configuration */
419 for_each_ipp_planar(i)
420 addr[i] = buf_info->base[i];
421
422 /* Get format */
423 fmt = rotator_reg_get_fmt(rot);
424 if (!rotator_check_reg_fmt(fmt)) {
425 DRM_ERROR("invalid format.\n");
426 return -EINVAL;
427 }
428
429 /* Re-set cb planar for NV12 format */
430 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
431 !addr[EXYNOS_DRM_PLANAR_CB]) {
432 /* Get buf size */
433 val = rot_read(ROT_DST_BUF_SIZE);
434
435 hsize = ROT_GET_BUF_SIZE_W(val);
436 vsize = ROT_GET_BUF_SIZE_H(val);
437
438 /* Set cb planar */
439 addr[EXYNOS_DRM_PLANAR_CB] =
440 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
441 }
442
443 for_each_ipp_planar(i)
444 rot_write(addr[i], ROT_DST_BUF_ADDR(i));
445 break;
446 case IPP_BUF_DEQUEUE:
447 for_each_ipp_planar(i)
448 rot_write(0x0, ROT_DST_BUF_ADDR(i));
449 break;
450 default:
451 /* Nothing to do */
452 break;
453 }
454 209
455 return 0; 210 /* Set interrupt enable */
211 rotator_reg_set_irq(rot, true);
212
213 val = rot_read(ROT_CONTROL);
214 val |= ROT_CONTROL_START;
215 rot_write(val, ROT_CONTROL);
456} 216}
457 217
458static struct exynos_drm_ipp_ops rot_src_ops = { 218static int rotator_commit(struct exynos_drm_ipp *ipp,
459 .set_fmt = rotator_src_set_fmt, 219 struct exynos_drm_ipp_task *task)
460 .set_size = rotator_src_set_size, 220{
461 .set_addr = rotator_src_set_addr, 221 struct rot_context *rot =
462}; 222 container_of(ipp, struct rot_context, ipp);
463 223
464static struct exynos_drm_ipp_ops rot_dst_ops = { 224 pm_runtime_get_sync(rot->dev);
465 .set_transf = rotator_dst_set_transf, 225 rot->task = task;
466 .set_size = rotator_dst_set_size,
467 .set_addr = rotator_dst_set_addr,
468};
469 226
470static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv) 227 rotator_src_set_fmt(rot, task->src.buf.fourcc);
471{ 228 rotator_src_set_buf(rot, &task->src);
472 struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; 229 rotator_dst_set_transf(rot, task->transform.rotation);
473 230 rotator_dst_set_buf(rot, &task->dst);
474 prop_list->version = 1; 231 rotator_start(rot);
475 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
476 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
477 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
478 (1 << EXYNOS_DRM_DEGREE_90) |
479 (1 << EXYNOS_DRM_DEGREE_180) |
480 (1 << EXYNOS_DRM_DEGREE_270);
481 prop_list->csc = 0;
482 prop_list->crop = 0;
483 prop_list->scale = 0;
484 232
485 return 0; 233 return 0;
486} 234}
487 235
488static inline bool rotator_check_drm_fmt(u32 fmt) 236static const struct exynos_drm_ipp_funcs ipp_funcs = {
489{ 237 .commit = rotator_commit,
490 switch (fmt) { 238};
491 case DRM_FORMAT_XRGB8888:
492 case DRM_FORMAT_NV12:
493 return true;
494 default:
495 DRM_DEBUG_KMS("not support format\n");
496 return false;
497 }
498}
499
500static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
501{
502 switch (flip) {
503 case EXYNOS_DRM_FLIP_NONE:
504 case EXYNOS_DRM_FLIP_VERTICAL:
505 case EXYNOS_DRM_FLIP_HORIZONTAL:
506 case EXYNOS_DRM_FLIP_BOTH:
507 return true;
508 default:
509 DRM_DEBUG_KMS("invalid flip\n");
510 return false;
511 }
512}
513 239
514static int rotator_ippdrv_check_property(struct device *dev, 240static int rotator_bind(struct device *dev, struct device *master, void *data)
515 struct drm_exynos_ipp_property *property)
516{ 241{
517 struct drm_exynos_ipp_config *src_config = 242 struct rot_context *rot = dev_get_drvdata(dev);
518 &property->config[EXYNOS_DRM_OPS_SRC]; 243 struct drm_device *drm_dev = data;
519 struct drm_exynos_ipp_config *dst_config = 244 struct exynos_drm_ipp *ipp = &rot->ipp;
520 &property->config[EXYNOS_DRM_OPS_DST];
521 struct drm_exynos_pos *src_pos = &src_config->pos;
522 struct drm_exynos_pos *dst_pos = &dst_config->pos;
523 struct drm_exynos_sz *src_sz = &src_config->sz;
524 struct drm_exynos_sz *dst_sz = &dst_config->sz;
525 bool swap = false;
526
527 /* Check format configuration */
528 if (src_config->fmt != dst_config->fmt) {
529 DRM_DEBUG_KMS("not support csc feature\n");
530 return -EINVAL;
531 }
532
533 if (!rotator_check_drm_fmt(dst_config->fmt)) {
534 DRM_DEBUG_KMS("invalid format\n");
535 return -EINVAL;
536 }
537
538 /* Check transform configuration */
539 if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
540 DRM_DEBUG_KMS("not support source-side rotation\n");
541 return -EINVAL;
542 }
543
544 switch (dst_config->degree) {
545 case EXYNOS_DRM_DEGREE_90:
546 case EXYNOS_DRM_DEGREE_270:
547 swap = true;
548 case EXYNOS_DRM_DEGREE_0:
549 case EXYNOS_DRM_DEGREE_180:
550 /* No problem */
551 break;
552 default:
553 DRM_DEBUG_KMS("invalid degree\n");
554 return -EINVAL;
555 }
556
557 if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
558 DRM_DEBUG_KMS("not support source-side flip\n");
559 return -EINVAL;
560 }
561 245
562 if (!rotator_check_drm_flip(dst_config->flip)) { 246 rot->drm_dev = drm_dev;
563 DRM_DEBUG_KMS("invalid flip\n"); 247 drm_iommu_attach_device(drm_dev, dev);
564 return -EINVAL;
565 }
566 248
567 /* Check size configuration */ 249 exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
568 if ((src_pos->x + src_pos->w > src_sz->hsize) || 250 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
569 (src_pos->y + src_pos->h > src_sz->vsize)) { 251 rot->formats, rot->num_formats, "rotator");
570 DRM_DEBUG_KMS("out of source buffer bound\n");
571 return -EINVAL;
572 }
573 252
574 if (swap) { 253 dev_info(dev, "The exynos rotator has been probed successfully\n");
575 if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
576 (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
577 DRM_DEBUG_KMS("out of destination buffer bound\n");
578 return -EINVAL;
579 }
580
581 if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
582 DRM_DEBUG_KMS("not support scale feature\n");
583 return -EINVAL;
584 }
585 } else {
586 if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
587 (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
588 DRM_DEBUG_KMS("out of destination buffer bound\n");
589 return -EINVAL;
590 }
591
592 if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
593 DRM_DEBUG_KMS("not support scale feature\n");
594 return -EINVAL;
595 }
596 }
597 254
598 return 0; 255 return 0;
599} 256}
600 257
601static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) 258static void rotator_unbind(struct device *dev, struct device *master,
259 void *data)
602{ 260{
603 struct rot_context *rot = dev_get_drvdata(dev); 261 struct rot_context *rot = dev_get_drvdata(dev);
604 u32 val; 262 struct drm_device *drm_dev = data;
605 263 struct exynos_drm_ipp *ipp = &rot->ipp;
606 if (rot->suspended) {
607 DRM_ERROR("suspended state\n");
608 return -EPERM;
609 }
610
611 if (cmd != IPP_CMD_M2M) {
612 DRM_ERROR("not support cmd: %d\n", cmd);
613 return -EINVAL;
614 }
615
616 /* Set interrupt enable */
617 rotator_reg_set_irq(rot, true);
618
619 val = rot_read(ROT_CONTROL);
620 val |= ROT_CONTROL_START;
621
622 rot_write(val, ROT_CONTROL);
623 264
624 return 0; 265 exynos_drm_ipp_unregister(drm_dev, ipp);
266 drm_iommu_detach_device(rot->drm_dev, rot->dev);
625} 267}
626 268
627static struct rot_limit_table rot_limit_tbl_4210 = { 269static const struct component_ops rotator_component_ops = {
628 .ycbcr420_2p = { 270 .bind = rotator_bind,
629 .min_w = 32, 271 .unbind = rotator_unbind,
630 .min_h = 32,
631 .max_w = SZ_64K,
632 .max_h = SZ_64K,
633 .align = 3,
634 },
635 .rgb888 = {
636 .min_w = 8,
637 .min_h = 8,
638 .max_w = SZ_16K,
639 .max_h = SZ_16K,
640 .align = 2,
641 },
642};
643
644static struct rot_limit_table rot_limit_tbl_4x12 = {
645 .ycbcr420_2p = {
646 .min_w = 32,
647 .min_h = 32,
648 .max_w = SZ_32K,
649 .max_h = SZ_32K,
650 .align = 3,
651 },
652 .rgb888 = {
653 .min_w = 8,
654 .min_h = 8,
655 .max_w = SZ_8K,
656 .max_h = SZ_8K,
657 .align = 2,
658 },
659}; 272};
660 273
661static struct rot_limit_table rot_limit_tbl_5250 = {
662 .ycbcr420_2p = {
663 .min_w = 32,
664 .min_h = 32,
665 .max_w = SZ_32K,
666 .max_h = SZ_32K,
667 .align = 3,
668 },
669 .rgb888 = {
670 .min_w = 8,
671 .min_h = 8,
672 .max_w = SZ_8K,
673 .max_h = SZ_8K,
674 .align = 1,
675 },
676};
677
678static const struct of_device_id exynos_rotator_match[] = {
679 {
680 .compatible = "samsung,exynos4210-rotator",
681 .data = &rot_limit_tbl_4210,
682 },
683 {
684 .compatible = "samsung,exynos4212-rotator",
685 .data = &rot_limit_tbl_4x12,
686 },
687 {
688 .compatible = "samsung,exynos5250-rotator",
689 .data = &rot_limit_tbl_5250,
690 },
691 {},
692};
693MODULE_DEVICE_TABLE(of, exynos_rotator_match);
694
695static int rotator_probe(struct platform_device *pdev) 274static int rotator_probe(struct platform_device *pdev)
696{ 275{
697 struct device *dev = &pdev->dev; 276 struct device *dev = &pdev->dev;
277 struct resource *regs_res;
698 struct rot_context *rot; 278 struct rot_context *rot;
699 struct exynos_drm_ippdrv *ippdrv; 279 const struct rot_variant *variant;
280 int irq;
700 int ret; 281 int ret;
701 282
702 if (!dev->of_node) {
703 dev_err(dev, "cannot find of_node.\n");
704 return -ENODEV;
705 }
706
707 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); 283 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
708 if (!rot) 284 if (!rot)
709 return -ENOMEM; 285 return -ENOMEM;
710 286
711 rot->limit_tbl = (struct rot_limit_table *) 287 variant = of_device_get_match_data(dev);
712 of_device_get_match_data(dev); 288 rot->formats = variant->formats;
713 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 289 rot->num_formats = variant->num_formats;
714 rot->regs = devm_ioremap_resource(dev, rot->regs_res); 290 rot->dev = dev;
291 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
292 rot->regs = devm_ioremap_resource(dev, regs_res);
715 if (IS_ERR(rot->regs)) 293 if (IS_ERR(rot->regs))
716 return PTR_ERR(rot->regs); 294 return PTR_ERR(rot->regs);
717 295
718 rot->irq = platform_get_irq(pdev, 0); 296 irq = platform_get_irq(pdev, 0);
719 if (rot->irq < 0) { 297 if (irq < 0) {
720 dev_err(dev, "failed to get irq\n"); 298 dev_err(dev, "failed to get irq\n");
721 return rot->irq; 299 return irq;
722 } 300 }
723 301
724 ret = devm_request_threaded_irq(dev, rot->irq, NULL, 302 ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
725 rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot); 303 rot);
726 if (ret < 0) { 304 if (ret < 0) {
727 dev_err(dev, "failed to request irq\n"); 305 dev_err(dev, "failed to request irq\n");
728 return ret; 306 return ret;
@@ -734,35 +312,19 @@ static int rotator_probe(struct platform_device *pdev)
734 return PTR_ERR(rot->clock); 312 return PTR_ERR(rot->clock);
735 } 313 }
736 314
315 pm_runtime_use_autosuspend(dev);
316 pm_runtime_set_autosuspend_delay(dev, ROTATOR_AUTOSUSPEND_DELAY);
737 pm_runtime_enable(dev); 317 pm_runtime_enable(dev);
738
739 ippdrv = &rot->ippdrv;
740 ippdrv->dev = dev;
741 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
742 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
743 ippdrv->check_property = rotator_ippdrv_check_property;
744 ippdrv->start = rotator_ippdrv_start;
745 ret = rotator_init_prop_list(ippdrv);
746 if (ret < 0) {
747 dev_err(dev, "failed to init property list.\n");
748 goto err_ippdrv_register;
749 }
750
751 DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
752
753 platform_set_drvdata(pdev, rot); 318 platform_set_drvdata(pdev, rot);
754 319
755 ret = exynos_drm_ippdrv_register(ippdrv); 320 ret = component_add(dev, &rotator_component_ops);
756 if (ret < 0) { 321 if (ret)
757 dev_err(dev, "failed to register drm rotator device\n"); 322 goto err_component;
758 goto err_ippdrv_register;
759 }
760
761 dev_info(dev, "The exynos rotator is probed successfully\n");
762 323
763 return 0; 324 return 0;
764 325
765err_ippdrv_register: 326err_component:
327 pm_runtime_dont_use_autosuspend(dev);
766 pm_runtime_disable(dev); 328 pm_runtime_disable(dev);
767 return ret; 329 return ret;
768} 330}
@@ -770,45 +332,101 @@ err_ippdrv_register:
770static int rotator_remove(struct platform_device *pdev) 332static int rotator_remove(struct platform_device *pdev)
771{ 333{
772 struct device *dev = &pdev->dev; 334 struct device *dev = &pdev->dev;
773 struct rot_context *rot = dev_get_drvdata(dev);
774 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
775
776 exynos_drm_ippdrv_unregister(ippdrv);
777 335
336 component_del(dev, &rotator_component_ops);
337 pm_runtime_dont_use_autosuspend(dev);
778 pm_runtime_disable(dev); 338 pm_runtime_disable(dev);
779 339
780 return 0; 340 return 0;
781} 341}
782 342
783#ifdef CONFIG_PM 343#ifdef CONFIG_PM
784static int rotator_clk_crtl(struct rot_context *rot, bool enable)
785{
786 if (enable) {
787 clk_prepare_enable(rot->clock);
788 rot->suspended = false;
789 } else {
790 clk_disable_unprepare(rot->clock);
791 rot->suspended = true;
792 }
793
794 return 0;
795}
796
797static int rotator_runtime_suspend(struct device *dev) 344static int rotator_runtime_suspend(struct device *dev)
798{ 345{
799 struct rot_context *rot = dev_get_drvdata(dev); 346 struct rot_context *rot = dev_get_drvdata(dev);
800 347
801 return rotator_clk_crtl(rot, false); 348 clk_disable_unprepare(rot->clock);
349 return 0;
802} 350}
803 351
804static int rotator_runtime_resume(struct device *dev) 352static int rotator_runtime_resume(struct device *dev)
805{ 353{
806 struct rot_context *rot = dev_get_drvdata(dev); 354 struct rot_context *rot = dev_get_drvdata(dev);
807 355
808 return rotator_clk_crtl(rot, true); 356 return clk_prepare_enable(rot->clock);
809} 357}
810#endif 358#endif
811 359
360static const struct drm_exynos_ipp_limit rotator_4210_rbg888_limits[] = {
361 { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
362 { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
363};
364
365static const struct drm_exynos_ipp_limit rotator_4412_rbg888_limits[] = {
366 { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) },
367 { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
368};
369
370static const struct drm_exynos_ipp_limit rotator_5250_rbg888_limits[] = {
371 { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) },
372 { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
373};
374
375static const struct drm_exynos_ipp_limit rotator_4210_yuv_limits[] = {
376 { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) },
377 { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
378};
379
380static const struct drm_exynos_ipp_limit rotator_4412_yuv_limits[] = {
381 { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_32K }, .v = { 32, SZ_32K }) },
382 { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
383};
384
385static const struct exynos_drm_ipp_formats rotator_4210_formats[] = {
386 { IPP_SRCDST_FORMAT(XRGB8888, rotator_4210_rbg888_limits) },
387 { IPP_SRCDST_FORMAT(NV12, rotator_4210_yuv_limits) },
388};
389
390static const struct exynos_drm_ipp_formats rotator_4412_formats[] = {
391 { IPP_SRCDST_FORMAT(XRGB8888, rotator_4412_rbg888_limits) },
392 { IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
393};
394
395static const struct exynos_drm_ipp_formats rotator_5250_formats[] = {
396 { IPP_SRCDST_FORMAT(XRGB8888, rotator_5250_rbg888_limits) },
397 { IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
398};
399
400static const struct rot_variant rotator_4210_data = {
401 .formats = rotator_4210_formats,
402 .num_formats = ARRAY_SIZE(rotator_4210_formats),
403};
404
405static const struct rot_variant rotator_4412_data = {
406 .formats = rotator_4412_formats,
407 .num_formats = ARRAY_SIZE(rotator_4412_formats),
408};
409
410static const struct rot_variant rotator_5250_data = {
411 .formats = rotator_5250_formats,
412 .num_formats = ARRAY_SIZE(rotator_5250_formats),
413};
414
415static const struct of_device_id exynos_rotator_match[] = {
416 {
417 .compatible = "samsung,exynos4210-rotator",
418 .data = &rotator_4210_data,
419 }, {
420 .compatible = "samsung,exynos4212-rotator",
421 .data = &rotator_4412_data,
422 }, {
423 .compatible = "samsung,exynos5250-rotator",
424 .data = &rotator_5250_data,
425 }, {
426 },
427};
428MODULE_DEVICE_TABLE(of, exynos_rotator_match);
429
812static const struct dev_pm_ops rotator_pm_ops = { 430static const struct dev_pm_ops rotator_pm_ops = {
813 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 431 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
814 pm_runtime_force_resume) 432 pm_runtime_force_resume)
@@ -820,7 +438,7 @@ struct platform_driver rotator_driver = {
820 .probe = rotator_probe, 438 .probe = rotator_probe,
821 .remove = rotator_remove, 439 .remove = rotator_remove,
822 .driver = { 440 .driver = {
823 .name = "exynos-rot", 441 .name = "exynos-rotator",
824 .owner = THIS_MODULE, 442 .owner = THIS_MODULE,
825 .pm = &rotator_pm_ops, 443 .pm = &rotator_pm_ops,
826 .of_match_table = exynos_rotator_match, 444 .of_match_table = exynos_rotator_match,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
new file mode 100644
index 000000000000..63b05b7c846a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -0,0 +1,694 @@
1/*
2 * Copyright (C) 2017 Samsung Electronics Co.Ltd
3 * Author:
4 * Andrzej Pietrasiewicz <andrzej.p@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundationr
9 */
10
11#include <linux/kernel.h>
12#include <linux/component.h>
13#include <linux/err.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18#include <linux/of_device.h>
19#include <linux/pm_runtime.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-scaler.h"
24#include "exynos_drm_fb.h"
25#include "exynos_drm_drv.h"
26#include "exynos_drm_iommu.h"
27#include "exynos_drm_ipp.h"
28
29#define scaler_read(offset) readl(scaler->regs + (offset))
30#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
31#define SCALER_MAX_CLK 4
32#define SCALER_AUTOSUSPEND_DELAY 2000
33
34struct scaler_data {
35 const char *clk_name[SCALER_MAX_CLK];
36 unsigned int num_clk;
37 const struct exynos_drm_ipp_formats *formats;
38 unsigned int num_formats;
39};
40
41struct scaler_context {
42 struct exynos_drm_ipp ipp;
43 struct drm_device *drm_dev;
44 struct device *dev;
45 void __iomem *regs;
46 struct clk *clock[SCALER_MAX_CLK];
47 struct exynos_drm_ipp_task *task;
48 const struct scaler_data *scaler_data;
49};
50
51static u32 scaler_get_format(u32 drm_fmt)
52{
53 switch (drm_fmt) {
54 case DRM_FORMAT_NV21:
55 return SCALER_YUV420_2P_UV;
56 case DRM_FORMAT_NV12:
57 return SCALER_YUV420_2P_VU;
58 case DRM_FORMAT_YUV420:
59 return SCALER_YUV420_3P;
60 case DRM_FORMAT_YUYV:
61 return SCALER_YUV422_1P_YUYV;
62 case DRM_FORMAT_UYVY:
63 return SCALER_YUV422_1P_UYVY;
64 case DRM_FORMAT_YVYU:
65 return SCALER_YUV422_1P_YVYU;
66 case DRM_FORMAT_NV61:
67 return SCALER_YUV422_2P_UV;
68 case DRM_FORMAT_NV16:
69 return SCALER_YUV422_2P_VU;
70 case DRM_FORMAT_YUV422:
71 return SCALER_YUV422_3P;
72 case DRM_FORMAT_NV42:
73 return SCALER_YUV444_2P_UV;
74 case DRM_FORMAT_NV24:
75 return SCALER_YUV444_2P_VU;
76 case DRM_FORMAT_YUV444:
77 return SCALER_YUV444_3P;
78 case DRM_FORMAT_RGB565:
79 return SCALER_RGB_565;
80 case DRM_FORMAT_XRGB1555:
81 return SCALER_ARGB1555;
82 case DRM_FORMAT_ARGB1555:
83 return SCALER_ARGB1555;
84 case DRM_FORMAT_XRGB4444:
85 return SCALER_ARGB4444;
86 case DRM_FORMAT_ARGB4444:
87 return SCALER_ARGB4444;
88 case DRM_FORMAT_XRGB8888:
89 return SCALER_ARGB8888;
90 case DRM_FORMAT_ARGB8888:
91 return SCALER_ARGB8888;
92 case DRM_FORMAT_RGBX8888:
93 return SCALER_RGBA8888;
94 case DRM_FORMAT_RGBA8888:
95 return SCALER_RGBA8888;
96 default:
97 break;
98 }
99
100 return 0;
101}
102
103static inline void scaler_enable_int(struct scaler_context *scaler)
104{
105 u32 val;
106
107 val = SCALER_INT_EN_TIMEOUT |
108 SCALER_INT_EN_ILLEGAL_BLEND |
109 SCALER_INT_EN_ILLEGAL_RATIO |
110 SCALER_INT_EN_ILLEGAL_DST_HEIGHT |
111 SCALER_INT_EN_ILLEGAL_DST_WIDTH |
112 SCALER_INT_EN_ILLEGAL_DST_V_POS |
113 SCALER_INT_EN_ILLEGAL_DST_H_POS |
114 SCALER_INT_EN_ILLEGAL_DST_C_SPAN |
115 SCALER_INT_EN_ILLEGAL_DST_Y_SPAN |
116 SCALER_INT_EN_ILLEGAL_DST_CR_BASE |
117 SCALER_INT_EN_ILLEGAL_DST_CB_BASE |
118 SCALER_INT_EN_ILLEGAL_DST_Y_BASE |
119 SCALER_INT_EN_ILLEGAL_DST_COLOR |
120 SCALER_INT_EN_ILLEGAL_SRC_HEIGHT |
121 SCALER_INT_EN_ILLEGAL_SRC_WIDTH |
122 SCALER_INT_EN_ILLEGAL_SRC_CV_POS |
123 SCALER_INT_EN_ILLEGAL_SRC_CH_POS |
124 SCALER_INT_EN_ILLEGAL_SRC_YV_POS |
125 SCALER_INT_EN_ILLEGAL_SRC_YH_POS |
126 SCALER_INT_EN_ILLEGAL_DST_SPAN |
127 SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN |
128 SCALER_INT_EN_ILLEGAL_SRC_CR_BASE |
129 SCALER_INT_EN_ILLEGAL_SRC_CB_BASE |
130 SCALER_INT_EN_ILLEGAL_SRC_Y_BASE |
131 SCALER_INT_EN_ILLEGAL_SRC_COLOR |
132 SCALER_INT_EN_FRAME_END;
133 scaler_write(val, SCALER_INT_EN);
134}
135
136static inline void scaler_set_src_fmt(struct scaler_context *scaler,
137 u32 src_fmt)
138{
139 u32 val;
140
141 val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt);
142 scaler_write(val, SCALER_SRC_CFG);
143}
144
145static inline void scaler_set_src_base(struct scaler_context *scaler,
146 struct exynos_drm_ipp_buffer *src_buf)
147{
148 static unsigned int bases[] = {
149 SCALER_SRC_Y_BASE,
150 SCALER_SRC_CB_BASE,
151 SCALER_SRC_CR_BASE,
152 };
153 int i;
154
155 for (i = 0; i < src_buf->format->num_planes; ++i)
156 scaler_write(src_buf->dma_addr[i], bases[i]);
157}
158
159static inline void scaler_set_src_span(struct scaler_context *scaler,
160 struct exynos_drm_ipp_buffer *src_buf)
161{
162 u32 val;
163
164 val = SCALER_SRC_SPAN_SET_Y_SPAN(src_buf->buf.pitch[0] /
165 src_buf->format->cpp[0]);
166
167 if (src_buf->format->num_planes > 1)
168 val |= SCALER_SRC_SPAN_SET_C_SPAN(src_buf->buf.pitch[1]);
169
170 scaler_write(val, SCALER_SRC_SPAN);
171}
172
173static inline void scaler_set_src_luma_pos(struct scaler_context *scaler,
174 struct drm_exynos_ipp_task_rect *src_pos)
175{
176 u32 val;
177
178 val = SCALER_SRC_Y_POS_SET_YH_POS(src_pos->x << 2);
179 val |= SCALER_SRC_Y_POS_SET_YV_POS(src_pos->y << 2);
180 scaler_write(val, SCALER_SRC_Y_POS);
181 scaler_write(val, SCALER_SRC_C_POS); /* ATTENTION! */
182}
183
184static inline void scaler_set_src_wh(struct scaler_context *scaler,
185 struct drm_exynos_ipp_task_rect *src_pos)
186{
187 u32 val;
188
189 val = SCALER_SRC_WH_SET_WIDTH(src_pos->w);
190 val |= SCALER_SRC_WH_SET_HEIGHT(src_pos->h);
191 scaler_write(val, SCALER_SRC_WH);
192}
193
194static inline void scaler_set_dst_fmt(struct scaler_context *scaler,
195 u32 dst_fmt)
196{
197 u32 val;
198
199 val = SCALER_DST_CFG_SET_COLOR_FORMAT(dst_fmt);
200 scaler_write(val, SCALER_DST_CFG);
201}
202
203static inline void scaler_set_dst_base(struct scaler_context *scaler,
204 struct exynos_drm_ipp_buffer *dst_buf)
205{
206 static unsigned int bases[] = {
207 SCALER_DST_Y_BASE,
208 SCALER_DST_CB_BASE,
209 SCALER_DST_CR_BASE,
210 };
211 int i;
212
213 for (i = 0; i < dst_buf->format->num_planes; ++i)
214 scaler_write(dst_buf->dma_addr[i], bases[i]);
215}
216
217static inline void scaler_set_dst_span(struct scaler_context *scaler,
218 struct exynos_drm_ipp_buffer *dst_buf)
219{
220 u32 val;
221
222 val = SCALER_DST_SPAN_SET_Y_SPAN(dst_buf->buf.pitch[0] /
223 dst_buf->format->cpp[0]);
224
225 if (dst_buf->format->num_planes > 1)
226 val |= SCALER_DST_SPAN_SET_C_SPAN(dst_buf->buf.pitch[1]);
227
228 scaler_write(val, SCALER_DST_SPAN);
229}
230
231static inline void scaler_set_dst_luma_pos(struct scaler_context *scaler,
232 struct drm_exynos_ipp_task_rect *dst_pos)
233{
234 u32 val;
235
236 val = SCALER_DST_WH_SET_WIDTH(dst_pos->w);
237 val |= SCALER_DST_WH_SET_HEIGHT(dst_pos->h);
238 scaler_write(val, SCALER_DST_WH);
239}
240
241static inline void scaler_set_dst_wh(struct scaler_context *scaler,
242 struct drm_exynos_ipp_task_rect *dst_pos)
243{
244 u32 val;
245
246 val = SCALER_DST_POS_SET_H_POS(dst_pos->x);
247 val |= SCALER_DST_POS_SET_V_POS(dst_pos->y);
248 scaler_write(val, SCALER_DST_POS);
249}
250
251static inline void scaler_set_hv_ratio(struct scaler_context *scaler,
252 unsigned int rotation,
253 struct drm_exynos_ipp_task_rect *src_pos,
254 struct drm_exynos_ipp_task_rect *dst_pos)
255{
256 u32 val, h_ratio, v_ratio;
257
258 if (drm_rotation_90_or_270(rotation)) {
259 h_ratio = (src_pos->h << 16) / dst_pos->w;
260 v_ratio = (src_pos->w << 16) / dst_pos->h;
261 } else {
262 h_ratio = (src_pos->w << 16) / dst_pos->w;
263 v_ratio = (src_pos->h << 16) / dst_pos->h;
264 }
265
266 val = SCALER_H_RATIO_SET(h_ratio);
267 scaler_write(val, SCALER_H_RATIO);
268
269 val = SCALER_V_RATIO_SET(v_ratio);
270 scaler_write(val, SCALER_V_RATIO);
271}
272
273static inline void scaler_set_rotation(struct scaler_context *scaler,
274 unsigned int rotation)
275{
276 u32 val = 0;
277
278 if (rotation & DRM_MODE_ROTATE_90)
279 val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_90);
280 else if (rotation & DRM_MODE_ROTATE_180)
281 val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_180);
282 else if (rotation & DRM_MODE_ROTATE_270)
283 val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_270);
284 if (rotation & DRM_MODE_REFLECT_X)
285 val |= SCALER_ROT_CFG_FLIP_X_EN;
286 if (rotation & DRM_MODE_REFLECT_Y)
287 val |= SCALER_ROT_CFG_FLIP_Y_EN;
288 scaler_write(val, SCALER_ROT_CFG);
289}
290
291static inline void scaler_set_csc(struct scaler_context *scaler,
292 const struct drm_format_info *fmt)
293{
294 static const u32 csc_mtx[2][3][3] = {
295 { /* YCbCr to RGB */
296 {0x254, 0x000, 0x331},
297 {0x254, 0xf38, 0xe60},
298 {0x254, 0x409, 0x000},
299 },
300 { /* RGB to YCbCr */
301 {0x084, 0x102, 0x032},
302 {0xfb4, 0xf6b, 0x0e1},
303 {0x0e1, 0xf44, 0xfdc},
304 },
305 };
306 int i, j, dir;
307
308 switch (fmt->format) {
309 case DRM_FORMAT_RGB565:
310 case DRM_FORMAT_XRGB1555:
311 case DRM_FORMAT_ARGB1555:
312 case DRM_FORMAT_XRGB4444:
313 case DRM_FORMAT_ARGB4444:
314 case DRM_FORMAT_XRGB8888:
315 case DRM_FORMAT_ARGB8888:
316 case DRM_FORMAT_RGBX8888:
317 case DRM_FORMAT_RGBA8888:
318 dir = 1;
319 break;
320 default:
321 dir = 0;
322 }
323
324 for (i = 0; i < 3; i++)
325 for (j = 0; j < 3; j++)
326 scaler_write(csc_mtx[dir][i][j], SCALER_CSC_COEF(j, i));
327}
328
329static inline void scaler_set_timer(struct scaler_context *scaler,
330 unsigned int timer, unsigned int divider)
331{
332 u32 val;
333
334 val = SCALER_TIMEOUT_CTRL_TIMER_ENABLE;
335 val |= SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(timer);
336 val |= SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(divider);
337 scaler_write(val, SCALER_TIMEOUT_CTRL);
338}
339
340static inline void scaler_start_hw(struct scaler_context *scaler)
341{
342 scaler_write(SCALER_CFG_START_CMD, SCALER_CFG);
343}
344
345static int scaler_commit(struct exynos_drm_ipp *ipp,
346 struct exynos_drm_ipp_task *task)
347{
348 struct scaler_context *scaler =
349 container_of(ipp, struct scaler_context, ipp);
350
351 u32 src_fmt = scaler_get_format(task->src.buf.fourcc);
352 struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect;
353
354 u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
355 struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
356
357 scaler->task = task;
358
359 pm_runtime_get_sync(scaler->dev);
360
361 scaler_set_src_fmt(scaler, src_fmt);
362 scaler_set_src_base(scaler, &task->src);
363 scaler_set_src_span(scaler, &task->src);
364 scaler_set_src_luma_pos(scaler, src_pos);
365 scaler_set_src_wh(scaler, src_pos);
366
367 scaler_set_dst_fmt(scaler, dst_fmt);
368 scaler_set_dst_base(scaler, &task->dst);
369 scaler_set_dst_span(scaler, &task->dst);
370 scaler_set_dst_luma_pos(scaler, dst_pos);
371 scaler_set_dst_wh(scaler, dst_pos);
372
373 scaler_set_hv_ratio(scaler, task->transform.rotation, src_pos, dst_pos);
374 scaler_set_rotation(scaler, task->transform.rotation);
375
376 scaler_set_csc(scaler, task->src.format);
377
378 scaler_set_timer(scaler, 0xffff, 0xf);
379
380 scaler_enable_int(scaler);
381 scaler_start_hw(scaler);
382
383 return 0;
384}
385
386static struct exynos_drm_ipp_funcs ipp_funcs = {
387 .commit = scaler_commit,
388};
389
390static inline void scaler_disable_int(struct scaler_context *scaler)
391{
392 scaler_write(0, SCALER_INT_EN);
393}
394
395static inline u32 scaler_get_int_status(struct scaler_context *scaler)
396{
397 return scaler_read(SCALER_INT_STATUS);
398}
399
400static inline bool scaler_task_done(u32 val)
401{
402 return val & SCALER_INT_STATUS_FRAME_END ? 0 : -EINVAL;
403}
404
405static irqreturn_t scaler_irq_handler(int irq, void *arg)
406{
407 struct scaler_context *scaler = arg;
408
409 u32 val = scaler_get_int_status(scaler);
410
411 scaler_disable_int(scaler);
412
413 if (scaler->task) {
414 struct exynos_drm_ipp_task *task = scaler->task;
415
416 scaler->task = NULL;
417 pm_runtime_mark_last_busy(scaler->dev);
418 pm_runtime_put_autosuspend(scaler->dev);
419 exynos_drm_ipp_task_done(task, scaler_task_done(val));
420 }
421
422 return IRQ_HANDLED;
423}
424
425static int scaler_bind(struct device *dev, struct device *master, void *data)
426{
427 struct scaler_context *scaler = dev_get_drvdata(dev);
428 struct drm_device *drm_dev = data;
429 struct exynos_drm_ipp *ipp = &scaler->ipp;
430
431 scaler->drm_dev = drm_dev;
432 drm_iommu_attach_device(drm_dev, dev);
433
434 exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
435 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
436 DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
437 scaler->scaler_data->formats,
438 scaler->scaler_data->num_formats, "scaler");
439
440 dev_info(dev, "The exynos scaler has been probed successfully\n");
441
442 return 0;
443}
444
445static void scaler_unbind(struct device *dev, struct device *master,
446 void *data)
447{
448 struct scaler_context *scaler = dev_get_drvdata(dev);
449 struct drm_device *drm_dev = data;
450 struct exynos_drm_ipp *ipp = &scaler->ipp;
451
452 exynos_drm_ipp_unregister(drm_dev, ipp);
453 drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
454}
455
456static const struct component_ops scaler_component_ops = {
457 .bind = scaler_bind,
458 .unbind = scaler_unbind,
459};
460
461static int scaler_probe(struct platform_device *pdev)
462{
463 struct device *dev = &pdev->dev;
464 struct resource *regs_res;
465 struct scaler_context *scaler;
466 int irq;
467 int ret, i;
468
469 scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL);
470 if (!scaler)
471 return -ENOMEM;
472
473 scaler->scaler_data =
474 (struct scaler_data *)of_device_get_match_data(dev);
475
476 scaler->dev = dev;
477 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
478 scaler->regs = devm_ioremap_resource(dev, regs_res);
479 if (IS_ERR(scaler->regs))
480 return PTR_ERR(scaler->regs);
481
482 irq = platform_get_irq(pdev, 0);
483 if (irq < 0) {
484 dev_err(dev, "failed to get irq\n");
485 return irq;
486 }
487
488 ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler,
489 IRQF_ONESHOT, "drm_scaler", scaler);
490 if (ret < 0) {
491 dev_err(dev, "failed to request irq\n");
492 return ret;
493 }
494
495 for (i = 0; i < scaler->scaler_data->num_clk; ++i) {
496 scaler->clock[i] = devm_clk_get(dev,
497 scaler->scaler_data->clk_name[i]);
498 if (IS_ERR(scaler->clock[i])) {
499 dev_err(dev, "failed to get clock\n");
500 return PTR_ERR(scaler->clock[i]);
501 }
502 }
503
504 pm_runtime_use_autosuspend(dev);
505 pm_runtime_set_autosuspend_delay(dev, SCALER_AUTOSUSPEND_DELAY);
506 pm_runtime_enable(dev);
507 platform_set_drvdata(pdev, scaler);
508
509 ret = component_add(dev, &scaler_component_ops);
510 if (ret)
511 goto err_ippdrv_register;
512
513 return 0;
514
515err_ippdrv_register:
516 pm_runtime_dont_use_autosuspend(dev);
517 pm_runtime_disable(dev);
518 return ret;
519}
520
521static int scaler_remove(struct platform_device *pdev)
522{
523 struct device *dev = &pdev->dev;
524
525 component_del(dev, &scaler_component_ops);
526 pm_runtime_dont_use_autosuspend(dev);
527 pm_runtime_disable(dev);
528
529 return 0;
530}
531
532#ifdef CONFIG_PM
533
534static int clk_disable_unprepare_wrapper(struct clk *clk)
535{
536 clk_disable_unprepare(clk);
537
538 return 0;
539}
540
541static int scaler_clk_ctrl(struct scaler_context *scaler, bool enable)
542{
543 int (*clk_fun)(struct clk *clk), i;
544
545 clk_fun = enable ? clk_prepare_enable : clk_disable_unprepare_wrapper;
546
547 for (i = 0; i < scaler->scaler_data->num_clk; ++i)
548 clk_fun(scaler->clock[i]);
549
550 return 0;
551}
552
553static int scaler_runtime_suspend(struct device *dev)
554{
555 struct scaler_context *scaler = dev_get_drvdata(dev);
556
557 return scaler_clk_ctrl(scaler, false);
558}
559
560static int scaler_runtime_resume(struct device *dev)
561{
562 struct scaler_context *scaler = dev_get_drvdata(dev);
563
564 return scaler_clk_ctrl(scaler, true);
565}
566#endif
567
568static const struct dev_pm_ops scaler_pm_ops = {
569 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
570 pm_runtime_force_resume)
571 SET_RUNTIME_PM_OPS(scaler_runtime_suspend, scaler_runtime_resume, NULL)
572};
573
574static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = {
575 { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
576 { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
577 { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
578 .v = { 65536 * 1 / 4, 65536 * 16 }) },
579};
580
581static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_h_limits[] = {
582 { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
583 { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 1) },
584 { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
585 .v = { 65536 * 1 / 4, 65536 * 16 }) },
586};
587
588static const struct drm_exynos_ipp_limit scaler_5420_one_pixel_limits[] = {
589 { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
590 { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
591 .v = { 65536 * 1 / 4, 65536 * 16 }) },
592};
593
594static const struct exynos_drm_ipp_formats exynos5420_formats[] = {
595 /* SCALER_YUV420_2P_UV */
596 { IPP_SRCDST_FORMAT(NV21, scaler_5420_two_pixel_hv_limits) },
597
598 /* SCALER_YUV420_2P_VU */
599 { IPP_SRCDST_FORMAT(NV12, scaler_5420_two_pixel_hv_limits) },
600
601 /* SCALER_YUV420_3P */
602 { IPP_SRCDST_FORMAT(YUV420, scaler_5420_two_pixel_hv_limits) },
603
604 /* SCALER_YUV422_1P_YUYV */
605 { IPP_SRCDST_FORMAT(YUYV, scaler_5420_two_pixel_h_limits) },
606
607 /* SCALER_YUV422_1P_UYVY */
608 { IPP_SRCDST_FORMAT(UYVY, scaler_5420_two_pixel_h_limits) },
609
610 /* SCALER_YUV422_1P_YVYU */
611 { IPP_SRCDST_FORMAT(YVYU, scaler_5420_two_pixel_h_limits) },
612
613 /* SCALER_YUV422_2P_UV */
614 { IPP_SRCDST_FORMAT(NV61, scaler_5420_two_pixel_h_limits) },
615
616 /* SCALER_YUV422_2P_VU */
617 { IPP_SRCDST_FORMAT(NV16, scaler_5420_two_pixel_h_limits) },
618
619 /* SCALER_YUV422_3P */
620 { IPP_SRCDST_FORMAT(YUV422, scaler_5420_two_pixel_h_limits) },
621
622 /* SCALER_YUV444_2P_UV */
623 { IPP_SRCDST_FORMAT(NV42, scaler_5420_one_pixel_limits) },
624
625 /* SCALER_YUV444_2P_VU */
626 { IPP_SRCDST_FORMAT(NV24, scaler_5420_one_pixel_limits) },
627
628 /* SCALER_YUV444_3P */
629 { IPP_SRCDST_FORMAT(YUV444, scaler_5420_one_pixel_limits) },
630
631 /* SCALER_RGB_565 */
632 { IPP_SRCDST_FORMAT(RGB565, scaler_5420_one_pixel_limits) },
633
634 /* SCALER_ARGB1555 */
635 { IPP_SRCDST_FORMAT(XRGB1555, scaler_5420_one_pixel_limits) },
636
637 /* SCALER_ARGB1555 */
638 { IPP_SRCDST_FORMAT(ARGB1555, scaler_5420_one_pixel_limits) },
639
640 /* SCALER_ARGB4444 */
641 { IPP_SRCDST_FORMAT(XRGB4444, scaler_5420_one_pixel_limits) },
642
643 /* SCALER_ARGB4444 */
644 { IPP_SRCDST_FORMAT(ARGB4444, scaler_5420_one_pixel_limits) },
645
646 /* SCALER_ARGB8888 */
647 { IPP_SRCDST_FORMAT(XRGB8888, scaler_5420_one_pixel_limits) },
648
649 /* SCALER_ARGB8888 */
650 { IPP_SRCDST_FORMAT(ARGB8888, scaler_5420_one_pixel_limits) },
651
652 /* SCALER_RGBA8888 */
653 { IPP_SRCDST_FORMAT(RGBX8888, scaler_5420_one_pixel_limits) },
654
655 /* SCALER_RGBA8888 */
656 { IPP_SRCDST_FORMAT(RGBA8888, scaler_5420_one_pixel_limits) },
657};
658
659static const struct scaler_data exynos5420_data = {
660 .clk_name = {"mscl"},
661 .num_clk = 1,
662 .formats = exynos5420_formats,
663 .num_formats = ARRAY_SIZE(exynos5420_formats),
664};
665
666static const struct scaler_data exynos5433_data = {
667 .clk_name = {"pclk", "aclk", "aclk_xiu"},
668 .num_clk = 3,
669 .formats = exynos5420_formats, /* intentional */
670 .num_formats = ARRAY_SIZE(exynos5420_formats),
671};
672
673static const struct of_device_id exynos_scaler_match[] = {
674 {
675 .compatible = "samsung,exynos5420-scaler",
676 .data = &exynos5420_data,
677 }, {
678 .compatible = "samsung,exynos5433-scaler",
679 .data = &exynos5433_data,
680 }, {
681 },
682};
683MODULE_DEVICE_TABLE(of, exynos_scaler_match);
684
685struct platform_driver scaler_driver = {
686 .probe = scaler_probe,
687 .remove = scaler_remove,
688 .driver = {
689 .name = "exynos-scaler",
690 .owner = THIS_MODULE,
691 .pm = &scaler_pm_ops,
692 .of_match_table = exynos_scaler_match,
693 },
694};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index abd84cbcf1c2..09c4bc0b1859 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -954,8 +954,6 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
954 drm_mode_connector_attach_encoder(connector, encoder); 954 drm_mode_connector_attach_encoder(connector, encoder);
955 955
956 if (hdata->bridge) { 956 if (hdata->bridge) {
957 encoder->bridge = hdata->bridge;
958 hdata->bridge->encoder = encoder;
959 ret = drm_bridge_attach(encoder, hdata->bridge, NULL); 957 ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
960 if (ret) 958 if (ret)
961 DRM_ERROR("Failed to attach bridge\n"); 959 DRM_ERROR("Failed to attach bridge\n");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 257299ec95c4..272c79f5f5bf 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -473,7 +473,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
473 chroma_addr[1] = chroma_addr[0] + 0x40; 473 chroma_addr[1] = chroma_addr[0] + 0x40;
474 } else { 474 } else {
475 luma_addr[1] = luma_addr[0] + fb->pitches[0]; 475 luma_addr[1] = luma_addr[0] + fb->pitches[0];
476 chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; 476 chroma_addr[1] = chroma_addr[0] + fb->pitches[1];
477 } 477 }
478 } else { 478 } else {
479 luma_addr[1] = 0; 479 luma_addr[1] = 0;
@@ -482,6 +482,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
482 482
483 spin_lock_irqsave(&ctx->reg_slock, flags); 483 spin_lock_irqsave(&ctx->reg_slock, flags);
484 484
485 vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
485 /* interlace or progressive scan mode */ 486 /* interlace or progressive scan mode */
486 val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); 487 val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
487 vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); 488 vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -495,21 +496,23 @@ static void vp_video_buffer(struct mixer_context *ctx,
495 vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | 496 vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
496 VP_IMG_VSIZE(fb->height)); 497 VP_IMG_VSIZE(fb->height));
497 /* chroma plane for NV12/NV21 is half the height of the luma plane */ 498 /* chroma plane for NV12/NV21 is half the height of the luma plane */
498 vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | 499 vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) |
499 VP_IMG_VSIZE(fb->height / 2)); 500 VP_IMG_VSIZE(fb->height / 2));
500 501
501 vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w); 502 vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w);
502 vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
503 vp_reg_write(ctx, VP_SRC_H_POSITION, 503 vp_reg_write(ctx, VP_SRC_H_POSITION,
504 VP_SRC_H_POSITION_VAL(state->src.x)); 504 VP_SRC_H_POSITION_VAL(state->src.x));
505 vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
506
507 vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w); 505 vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w);
508 vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x); 506 vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x);
507
509 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { 508 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
509 vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2);
510 vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2);
510 vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2); 511 vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2);
511 vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2); 512 vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2);
512 } else { 513 } else {
514 vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
515 vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
513 vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h); 516 vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h);
514 vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y); 517 vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y);
515 } 518 }
@@ -699,6 +702,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
699 702
700 /* interlace scan need to check shadow register */ 703 /* interlace scan need to check shadow register */
701 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { 704 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
705 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
706 vp_reg_read(ctx, VP_SHADOW_UPDATE))
707 goto out;
708
709 base = mixer_reg_read(ctx, MXR_CFG);
710 shadow = mixer_reg_read(ctx, MXR_CFG_S);
711 if (base != shadow)
712 goto out;
713
702 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); 714 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
703 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); 715 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
704 if (base != shadow) 716 if (base != shadow)
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index c311f571bdf9..189cfa2470a8 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -47,6 +47,7 @@
47#define MXR_MO 0x0304 47#define MXR_MO 0x0304
48#define MXR_RESOLUTION 0x0310 48#define MXR_RESOLUTION 0x0310
49 49
50#define MXR_CFG_S 0x2004
50#define MXR_GRAPHIC0_BASE_S 0x2024 51#define MXR_GRAPHIC0_BASE_S 0x2024
51#define MXR_GRAPHIC1_BASE_S 0x2044 52#define MXR_GRAPHIC1_BASE_S 0x2044
52 53
diff --git a/drivers/gpu/drm/exynos/regs-scaler.h b/drivers/gpu/drm/exynos/regs-scaler.h
new file mode 100644
index 000000000000..fc7ccad75e74
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-scaler.h
@@ -0,0 +1,426 @@
1/* drivers/gpu/drm/exynos/regs-scaler.h
2 *
3 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
6 *
7 * Register definition file for Samsung scaler driver
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef EXYNOS_REGS_SCALER_H
15#define EXYNOS_REGS_SCALER_H
16
17/* Register part */
18
19/* Global setting */
20#define SCALER_STATUS 0x0 /* no shadow */
21#define SCALER_CFG 0x4
22
23/* Interrupt */
24#define SCALER_INT_EN 0x8 /* no shadow */
25#define SCALER_INT_STATUS 0xc /* no shadow */
26
27/* SRC */
28#define SCALER_SRC_CFG 0x10
29#define SCALER_SRC_Y_BASE 0x14
30#define SCALER_SRC_CB_BASE 0x18
31#define SCALER_SRC_CR_BASE 0x294
32#define SCALER_SRC_SPAN 0x1c
33#define SCALER_SRC_Y_POS 0x20
34#define SCALER_SRC_WH 0x24
35#define SCALER_SRC_C_POS 0x28
36
37/* DST */
38#define SCALER_DST_CFG 0x30
39#define SCALER_DST_Y_BASE 0x34
40#define SCALER_DST_CB_BASE 0x38
41#define SCALER_DST_CR_BASE 0x298
42#define SCALER_DST_SPAN 0x3c
43#define SCALER_DST_WH 0x40
44#define SCALER_DST_POS 0x44
45
46/* Ratio */
47#define SCALER_H_RATIO 0x50
48#define SCALER_V_RATIO 0x54
49
50/* Rotation */
51#define SCALER_ROT_CFG 0x58
52
53/* Coefficient */
54/*
55 * YHCOEF_{x}{A|B|C|D} CHCOEF_{x}{A|B|C|D}
56 *
57 * A B C D A B C D
58 * 0 60 64 68 6c 140 144 148 14c
59 * 1 70 74 78 7c 150 154 158 15c
60 * 2 80 84 88 8c 160 164 168 16c
61 * 3 90 94 98 9c 170 174 178 17c
62 * 4 a0 a4 a8 ac 180 184 188 18c
63 * 5 b0 b4 b8 bc 190 194 198 19c
64 * 6 c0 c4 c8 cc 1a0 1a4 1a8 1ac
65 * 7 d0 d4 d8 dc 1b0 1b4 1b8 1bc
66 * 8 e0 e4 e8 ec 1c0 1c4 1c8 1cc
67 *
68 *
69 * YVCOEF_{x}{A|B} CVCOEF_{x}{A|B}
70 *
71 * A B A B
72 * 0 f0 f4 1d0 1d4
73 * 1 f8 fc 1d8 1dc
74 * 2 100 104 1e0 1e4
75 * 3 108 10c 1e8 1ec
76 * 4 110 114 1f0 1f4
77 * 5 118 11c 1f8 1fc
78 * 6 120 124 200 204
79 * 7 128 12c 208 20c
80 * 8 130 134 210 214
81 */
82#define _SCALER_HCOEF_DELTA(r, c) ((r) * 0x10 + (c) * 0x4)
83#define _SCALER_VCOEF_DELTA(r, c) ((r) * 0x8 + (c) * 0x4)
84
85#define SCALER_YHCOEF(r, c) (0x60 + _SCALER_HCOEF_DELTA((r), (c)))
86#define SCALER_YVCOEF(r, c) (0xf0 + _SCALER_VCOEF_DELTA((r), (c)))
87#define SCALER_CHCOEF(r, c) (0x140 + _SCALER_HCOEF_DELTA((r), (c)))
88#define SCALER_CVCOEF(r, c) (0x1d0 + _SCALER_VCOEF_DELTA((r), (c)))
89
90
91/* Color Space Conversion */
92#define SCALER_CSC_COEF(x, y) (0x220 + (y) * 0xc + (x) * 0x4)
93
94/* Dithering */
95#define SCALER_DITH_CFG 0x250
96
97/* Version Number */
98#define SCALER_VER 0x260 /* no shadow */
99
100/* Cycle count and Timeout */
101#define SCALER_CYCLE_COUNT 0x278 /* no shadow */
102#define SCALER_TIMEOUT_CTRL 0x2c0 /* no shadow */
103#define SCALER_TIMEOUT_CNT 0x2c4 /* no shadow */
104
105/* Blending */
106#define SCALER_SRC_BLEND_COLOR 0x280
107#define SCALER_SRC_BLEND_ALPHA 0x284
108#define SCALER_DST_BLEND_COLOR 0x288
109#define SCALER_DST_BLEND_ALPHA 0x28c
110
111/* Color Fill */
112#define SCALER_FILL_COLOR 0x290
113
114/* Multiple Command Queue */
115#define SCALER_ADDR_Q_CONFIG 0x2a0 /* no shadow */
116#define SCALER_SRC_ADDR_Q_STATUS 0x2a4 /* no shadow */
117#define SCALER_SRC_ADDR_Q 0x2a8 /* no shadow */
118
119/* CRC */
120#define SCALER_CRC_COLOR00_10 0x2b0 /* no shadow */
121#define SCALER_CRC_COLOR20_30 0x2b4 /* no shadow */
122#define SCALER_CRC_COLOR01_11 0x2b8 /* no shadow */
123#define SCALER_CRC_COLOR21_31 0x2bc /* no shadow */
124
125/* Shadow Registers */
126#define SCALER_SHADOW_OFFSET 0x1000
127
128
129/* Bit definition part */
130#define SCALER_MASK(hi_b, lo_b) ((1 << ((hi_b) - (lo_b) + 1)) - 1)
131#define SCALER_GET(reg, hi_b, lo_b) \
132 (((reg) >> (lo_b)) & SCALER_MASK(hi_b, lo_b))
133#define SCALER_SET(val, hi_b, lo_b) \
134 (((val) & SCALER_MASK(hi_b, lo_b)) << lo_b)
135
136/* SCALER_STATUS */
137#define SCALER_STATUS_SCALER_RUNNING (1 << 1)
138#define SCALER_STATUS_SCALER_READY_CLK_DOWN (1 << 0)
139
140/* SCALER_CFG */
141#define SCALER_CFG_FILL_EN (1 << 24)
142#define SCALER_CFG_BLEND_COLOR_DIVIDE_ALPHA_EN (1 << 17)
143#define SCALER_CFG_BLEND_EN (1 << 16)
144#define SCALER_CFG_CSC_Y_OFFSET_SRC_EN (1 << 10)
145#define SCALER_CFG_CSC_Y_OFFSET_DST_EN (1 << 9)
146#define SCALER_CFG_16_BURST_MODE (1 << 8)
147#define SCALER_CFG_SOFT_RESET (1 << 1)
148#define SCALER_CFG_START_CMD (1 << 0)
149
150/* SCALER_INT_EN */
151#define SCALER_INT_EN_TIMEOUT (1 << 31)
152#define SCALER_INT_EN_ILLEGAL_BLEND (1 << 24)
153#define SCALER_INT_EN_ILLEGAL_RATIO (1 << 23)
154#define SCALER_INT_EN_ILLEGAL_DST_HEIGHT (1 << 22)
155#define SCALER_INT_EN_ILLEGAL_DST_WIDTH (1 << 21)
156#define SCALER_INT_EN_ILLEGAL_DST_V_POS (1 << 20)
157#define SCALER_INT_EN_ILLEGAL_DST_H_POS (1 << 19)
158#define SCALER_INT_EN_ILLEGAL_DST_C_SPAN (1 << 18)
159#define SCALER_INT_EN_ILLEGAL_DST_Y_SPAN (1 << 17)
160#define SCALER_INT_EN_ILLEGAL_DST_CR_BASE (1 << 16)
161#define SCALER_INT_EN_ILLEGAL_DST_CB_BASE (1 << 15)
162#define SCALER_INT_EN_ILLEGAL_DST_Y_BASE (1 << 14)
163#define SCALER_INT_EN_ILLEGAL_DST_COLOR (1 << 13)
164#define SCALER_INT_EN_ILLEGAL_SRC_HEIGHT (1 << 12)
165#define SCALER_INT_EN_ILLEGAL_SRC_WIDTH (1 << 11)
166#define SCALER_INT_EN_ILLEGAL_SRC_CV_POS (1 << 10)
167#define SCALER_INT_EN_ILLEGAL_SRC_CH_POS (1 << 9)
168#define SCALER_INT_EN_ILLEGAL_SRC_YV_POS (1 << 8)
169#define SCALER_INT_EN_ILLEGAL_SRC_YH_POS (1 << 7)
170#define SCALER_INT_EN_ILLEGAL_DST_SPAN (1 << 6)
171#define SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN (1 << 5)
172#define SCALER_INT_EN_ILLEGAL_SRC_CR_BASE (1 << 4)
173#define SCALER_INT_EN_ILLEGAL_SRC_CB_BASE (1 << 3)
174#define SCALER_INT_EN_ILLEGAL_SRC_Y_BASE (1 << 2)
175#define SCALER_INT_EN_ILLEGAL_SRC_COLOR (1 << 1)
176#define SCALER_INT_EN_FRAME_END (1 << 0)
177
178/* SCALER_INT_STATUS */
179#define SCALER_INT_STATUS_TIMEOUT (1 << 31)
180#define SCALER_INT_STATUS_ILLEGAL_BLEND (1 << 24)
181#define SCALER_INT_STATUS_ILLEGAL_RATIO (1 << 23)
182#define SCALER_INT_STATUS_ILLEGAL_DST_HEIGHT (1 << 22)
183#define SCALER_INT_STATUS_ILLEGAL_DST_WIDTH (1 << 21)
184#define SCALER_INT_STATUS_ILLEGAL_DST_V_POS (1 << 20)
185#define SCALER_INT_STATUS_ILLEGAL_DST_H_POS (1 << 19)
186#define SCALER_INT_STATUS_ILLEGAL_DST_C_SPAN (1 << 18)
187#define SCALER_INT_STATUS_ILLEGAL_DST_Y_SPAN (1 << 17)
188#define SCALER_INT_STATUS_ILLEGAL_DST_CR_BASE (1 << 16)
189#define SCALER_INT_STATUS_ILLEGAL_DST_CB_BASE (1 << 15)
190#define SCALER_INT_STATUS_ILLEGAL_DST_Y_BASE (1 << 14)
191#define SCALER_INT_STATUS_ILLEGAL_DST_COLOR (1 << 13)
192#define SCALER_INT_STATUS_ILLEGAL_SRC_HEIGHT (1 << 12)
193#define SCALER_INT_STATUS_ILLEGAL_SRC_WIDTH (1 << 11)
194#define SCALER_INT_STATUS_ILLEGAL_SRC_CV_POS (1 << 10)
195#define SCALER_INT_STATUS_ILLEGAL_SRC_CH_POS (1 << 9)
196#define SCALER_INT_STATUS_ILLEGAL_SRC_YV_POS (1 << 8)
197#define SCALER_INT_STATUS_ILLEGAL_SRC_YH_POS (1 << 7)
198#define SCALER_INT_STATUS_ILLEGAL_DST_SPAN (1 << 6)
199#define SCALER_INT_STATUS_ILLEGAL_SRC_Y_SPAN (1 << 5)
200#define SCALER_INT_STATUS_ILLEGAL_SRC_CR_BASE (1 << 4)
201#define SCALER_INT_STATUS_ILLEGAL_SRC_CB_BASE (1 << 3)
202#define SCALER_INT_STATUS_ILLEGAL_SRC_Y_BASE (1 << 2)
203#define SCALER_INT_STATUS_ILLEGAL_SRC_COLOR (1 << 1)
204#define SCALER_INT_STATUS_FRAME_END (1 << 0)
205
206/* SCALER_SRC_CFG */
207#define SCALER_SRC_CFG_TILE_EN (1 << 10)
208#define SCALER_SRC_CFG_GET_BYTE_SWAP(r) SCALER_GET(r, 6, 5)
209#define SCALER_SRC_CFG_SET_BYTE_SWAP(v) SCALER_SET(v, 6, 5)
210#define SCALER_SRC_CFG_GET_COLOR_FORMAT(r) SCALER_GET(r, 4, 0)
211#define SCALER_SRC_CFG_SET_COLOR_FORMAT(v) SCALER_SET(v, 4, 0)
212#define SCALER_YUV420_2P_UV 0
213#define SCALER_YUV422_2P_UV 2
214#define SCALER_YUV444_2P_UV 3
215#define SCALER_RGB_565 4
216#define SCALER_ARGB1555 5
217#define SCALER_ARGB8888 6
218#define SCALER_ARGB8888_PRE 7
219#define SCALER_YUV422_1P_YVYU 9
220#define SCALER_YUV422_1P_YUYV 10
221#define SCALER_YUV422_1P_UYVY 11
222#define SCALER_ARGB4444 12
223#define SCALER_L8A8 13
224#define SCALER_RGBA8888 14
225#define SCALER_L8 15
226#define SCALER_YUV420_2P_VU 16
227#define SCALER_YUV422_2P_VU 18
228#define SCALER_YUV444_2P_VU 19
229#define SCALER_YUV420_3P 20
230#define SCALER_YUV422_3P 22
231#define SCALER_YUV444_3P 23
232
233/* SCALER_SRC_SPAN */
234#define SCALER_SRC_SPAN_GET_C_SPAN(r) SCALER_GET(r, 29, 16)
235#define SCALER_SRC_SPAN_SET_C_SPAN(v) SCALER_SET(v, 29, 16)
236#define SCALER_SRC_SPAN_GET_Y_SPAN(r) SCALER_GET(r, 13, 0)
237#define SCALER_SRC_SPAN_SET_Y_SPAN(v) SCALER_SET(v, 13, 0)
238
239/* SCALER_SRC_Y_POS */
240#define SCALER_SRC_Y_POS_GET_YH_POS(r) SCALER_GET(r, 31, 16)
241#define SCALER_SRC_Y_POS_SET_YH_POS(v) SCALER_SET(v, 31, 16)
242#define SCALER_SRC_Y_POS_GET_YV_POS(r) SCALER_GET(r, 15, 0)
243#define SCALER_SRC_Y_POS_SET_YV_POS(v) SCALER_SET(v, 15, 0)
244
245/* SCALER_SRC_WH */
246#define SCALER_SRC_WH_GET_WIDTH(r) SCALER_GET(r, 29, 16)
247#define SCALER_SRC_WH_SET_WIDTH(v) SCALER_SET(v, 29, 16)
248#define SCALER_SRC_WH_GET_HEIGHT(r) SCALER_GET(r, 13, 0)
249#define SCALER_SRC_WH_SET_HEIGHT(v) SCALER_SET(v, 13, 0)
250
251/* SCALER_SRC_C_POS */
252#define SCALER_SRC_C_POS_GET_CH_POS(r) SCALER_GET(r, 31, 16)
253#define SCALER_SRC_C_POS_SET_CH_POS(v) SCALER_SET(v, 31, 16)
254#define SCALER_SRC_C_POS_GET_CV_POS(r) SCALER_GET(r, 15, 0)
255#define SCALER_SRC_C_POS_SET_CV_POS(v) SCALER_SET(v, 15, 0)
256
257/* SCALER_DST_CFG */
258#define SCALER_DST_CFG_GET_BYTE_SWAP(r) SCALER_GET(r, 6, 5)
259#define SCALER_DST_CFG_SET_BYTE_SWAP(v) SCALER_SET(v, 6, 5)
260#define SCALER_DST_CFG_GET_COLOR_FORMAT(r) SCALER_GET(r, 4, 0)
261#define SCALER_DST_CFG_SET_COLOR_FORMAT(v) SCALER_SET(v, 4, 0)
262
263/* SCALER_DST_SPAN */
264#define SCALER_DST_SPAN_GET_C_SPAN(r) SCALER_GET(r, 29, 16)
265#define SCALER_DST_SPAN_SET_C_SPAN(v) SCALER_SET(v, 29, 16)
266#define SCALER_DST_SPAN_GET_Y_SPAN(r) SCALER_GET(r, 13, 0)
267#define SCALER_DST_SPAN_SET_Y_SPAN(v) SCALER_SET(v, 13, 0)
268
269/* SCALER_DST_WH */
270#define SCALER_DST_WH_GET_WIDTH(r) SCALER_GET(r, 29, 16)
271#define SCALER_DST_WH_SET_WIDTH(v) SCALER_SET(v, 29, 16)
272#define SCALER_DST_WH_GET_HEIGHT(r) SCALER_GET(r, 13, 0)
273#define SCALER_DST_WH_SET_HEIGHT(v) SCALER_SET(v, 13, 0)
274
275/* SCALER_DST_POS */
276#define SCALER_DST_POS_GET_H_POS(r) SCALER_GET(r, 29, 16)
277#define SCALER_DST_POS_SET_H_POS(v) SCALER_SET(v, 29, 16)
278#define SCALER_DST_POS_GET_V_POS(r) SCALER_GET(r, 13, 0)
279#define SCALER_DST_POS_SET_V_POS(v) SCALER_SET(v, 13, 0)
280
281/* SCALER_H_RATIO */
282#define SCALER_H_RATIO_GET(r) SCALER_GET(r, 18, 0)
283#define SCALER_H_RATIO_SET(v) SCALER_SET(v, 18, 0)
284
285/* SCALER_V_RATIO */
286#define SCALER_V_RATIO_GET(r) SCALER_GET(r, 18, 0)
287#define SCALER_V_RATIO_SET(v) SCALER_SET(v, 18, 0)
288
289/* SCALER_ROT_CFG */
290#define SCALER_ROT_CFG_FLIP_X_EN (1 << 3)
291#define SCALER_ROT_CFG_FLIP_Y_EN (1 << 2)
292#define SCALER_ROT_CFG_GET_ROTMODE(r) SCALER_GET(r, 1, 0)
293#define SCALER_ROT_CFG_SET_ROTMODE(v) SCALER_SET(v, 1, 0)
294#define SCALER_ROT_MODE_90 1
295#define SCALER_ROT_MODE_180 2
296#define SCALER_ROT_MODE_270 3
297
298/* SCALER_HCOEF, SCALER_VCOEF */
299#define SCALER_COEF_SHIFT(i) (16 * (1 - (i) % 2))
300#define SCALER_COEF_GET(r, i) \
301 (((r) >> SCALER_COEF_SHIFT(i)) & 0x1ff)
302#define SCALER_COEF_SET(v, i) \
303 (((v) & 0x1ff) << SCALER_COEF_SHIFT(i))
304
305/* SCALER_CSC_COEFxy */
306#define SCALER_CSC_COEF_GET(r) SCALER_GET(r, 11, 0)
307#define SCALER_CSC_COEF_SET(v) SCALER_SET(v, 11, 0)
308
309/* SCALER_DITH_CFG */
310#define SCALER_DITH_CFG_GET_R_TYPE(r) SCALER_GET(r, 8, 6)
311#define SCALER_DITH_CFG_SET_R_TYPE(v) SCALER_SET(v, 8, 6)
312#define SCALER_DITH_CFG_GET_G_TYPE(r) SCALER_GET(r, 5, 3)
313#define SCALER_DITH_CFG_SET_G_TYPE(v) SCALER_SET(v, 5, 3)
314#define SCALER_DITH_CFG_GET_B_TYPE(r) SCALER_GET(r, 2, 0)
315#define SCALER_DITH_CFG_SET_B_TYPE(v) SCALER_SET(v, 2, 0)
316
317/* SCALER_TIMEOUT_CTRL */
318#define SCALER_TIMEOUT_CTRL_GET_TIMER_VALUE(r) SCALER_GET(r, 31, 16)
319#define SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(v) SCALER_SET(v, 31, 16)
320#define SCALER_TIMEOUT_CTRL_GET_TIMER_DIV(r) SCALER_GET(r, 7, 4)
321#define SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(v) SCALER_SET(v, 7, 4)
322#define SCALER_TIMEOUT_CTRL_TIMER_ENABLE (1 << 0)
323
324/* SCALER_TIMEOUT_CNT */
325#define SCALER_TIMEOUT_CTRL_GET_TIMER_COUNT(r) SCALER_GET(r, 31, 16)
326
327/* SCALER_SRC_BLEND_COLOR */
328#define SCALER_SRC_BLEND_COLOR_SEL_INV (1 << 31)
329#define SCALER_SRC_BLEND_COLOR_GET_SEL(r) SCALER_GET(r, 30, 29)
330#define SCALER_SRC_BLEND_COLOR_SET_SEL(v) SCALER_SET(v, 30, 29)
331#define SCALER_SRC_BLEND_COLOR_OP_SEL_INV (1 << 28)
332#define SCALER_SRC_BLEND_COLOR_GET_OP_SEL(r) SCALER_GET(r, 27, 24)
333#define SCALER_SRC_BLEND_COLOR_SET_OP_SEL(v) SCALER_SET(v, 27, 24)
334#define SCALER_SRC_BLEND_COLOR_GET_COLOR0(r) SCALER_GET(r, 23, 16)
335#define SCALER_SRC_BLEND_COLOR_SET_COLOR0(v) SCALER_SET(v, 23, 16)
336#define SCALER_SRC_BLEND_COLOR_GET_COLOR1(r) SCALER_GET(r, 15, 8)
337#define SCALER_SRC_BLEND_COLOR_SET_COLOR1(v) SCALER_SET(v, 15, 8)
338#define SCALER_SRC_BLEND_COLOR_GET_COLOR2(r) SCALER_GET(r, 7, 0)
339#define SCALER_SRC_BLEND_COLOR_SET_COLOR2(v) SCALER_SET(v, 7, 0)
340
341/* SCALER_SRC_BLEND_ALPHA */
342#define SCALER_SRC_BLEND_ALPHA_SEL_INV (1 << 31)
343#define SCALER_SRC_BLEND_ALPHA_GET_SEL(r) SCALER_GET(r, 30, 29)
344#define SCALER_SRC_BLEND_ALPHA_SET_SEL(v) SCALER_SET(v, 30, 29)
345#define SCALER_SRC_BLEND_ALPHA_OP_SEL_INV (1 << 28)
346#define SCALER_SRC_BLEND_ALPHA_GET_OP_SEL(r) SCALER_GET(r, 27, 24)
347#define SCALER_SRC_BLEND_ALPHA_SET_OP_SEL(v) SCALER_SET(v, 27, 24)
348#define SCALER_SRC_BLEND_ALPHA_GET_ALPHA(r) SCALER_GET(r, 7, 0)
349#define SCALER_SRC_BLEND_ALPHA_SET_ALPHA(v) SCALER_SET(v, 7, 0)
350
351/* SCALER_DST_BLEND_COLOR */
352#define SCALER_DST_BLEND_COLOR_SEL_INV (1 << 31)
353#define SCALER_DST_BLEND_COLOR_GET_SEL(r) SCALER_GET(r, 30, 29)
354#define SCALER_DST_BLEND_COLOR_SET_SEL(v) SCALER_SET(v, 30, 29)
355#define SCALER_DST_BLEND_COLOR_OP_SEL_INV (1 << 28)
356#define SCALER_DST_BLEND_COLOR_GET_OP_SEL(r) SCALER_GET(r, 27, 24)
357#define SCALER_DST_BLEND_COLOR_SET_OP_SEL(v) SCALER_SET(v, 27, 24)
358#define SCALER_DST_BLEND_COLOR_GET_COLOR0(r) SCALER_GET(r, 23, 16)
359#define SCALER_DST_BLEND_COLOR_SET_COLOR0(v) SCALER_SET(v, 23, 16)
360#define SCALER_DST_BLEND_COLOR_GET_COLOR1(r) SCALER_GET(r, 15, 8)
361#define SCALER_DST_BLEND_COLOR_SET_COLOR1(v) SCALER_SET(v, 15, 8)
362#define SCALER_DST_BLEND_COLOR_GET_COLOR2(r) SCALER_GET(r, 7, 0)
363#define SCALER_DST_BLEND_COLOR_SET_COLOR2(v) SCALER_SET(v, 7, 0)
364
365/* SCALER_DST_BLEND_ALPHA */
366#define SCALER_DST_BLEND_ALPHA_SEL_INV (1 << 31)
367#define SCALER_DST_BLEND_ALPHA_GET_SEL(r) SCALER_GET(r, 30, 29)
368#define SCALER_DST_BLEND_ALPHA_SET_SEL(v) SCALER_SET(v, 30, 29)
369#define SCALER_DST_BLEND_ALPHA_OP_SEL_INV (1 << 28)
370#define SCALER_DST_BLEND_ALPHA_GET_OP_SEL(r) SCALER_GET(r, 27, 24)
371#define SCALER_DST_BLEND_ALPHA_SET_OP_SEL(v) SCALER_SET(v, 27, 24)
372#define SCALER_DST_BLEND_ALPHA_GET_ALPHA(r) SCALER_GET(r, 7, 0)
373#define SCALER_DST_BLEND_ALPHA_SET_ALPHA(v) SCALER_SET(v, 7, 0)
374
375/* SCALER_FILL_COLOR */
376#define SCALER_FILL_COLOR_GET_ALPHA(r) SCALER_GET(r, 31, 24)
377#define SCALER_FILL_COLOR_SET_ALPHA(v) SCALER_SET(v, 31, 24)
378#define SCALER_FILL_COLOR_GET_FILL_COLOR0(r) SCALER_GET(r, 23, 16)
379#define SCALER_FILL_COLOR_SET_FILL_COLOR0(v) SCALER_SET(v, 23, 16)
380#define SCALER_FILL_COLOR_GET_FILL_COLOR1(r) SCALER_GET(r, 15, 8)
381#define SCALER_FILL_COLOR_SET_FILL_COLOR1(v) SCALER_SET(v, 15, 8)
382#define SCALER_FILL_COLOR_GET_FILL_COLOR2(r) SCALER_GET(r, 7, 0)
383#define SCALER_FILL_COLOR_SET_FILL_COLOR2(v) SCALER_SET(v, 7, 0)
384
385/* SCALER_ADDR_Q_CONFIG */
386#define SCALER_ADDR_Q_CONFIG_RST (1 << 0)
387
388/* SCALER_SRC_ADDR_Q_STATUS */
389#define SCALER_SRC_ADDR_Q_STATUS_Y_FULL (1 << 23)
390#define SCALER_SRC_ADDR_Q_STATUS_Y_EMPTY (1 << 22)
391#define SCALER_SRC_ADDR_Q_STATUS_GET_Y_WR_IDX(r) SCALER_GET(r, 21, 16)
392#define SCALER_SRC_ADDR_Q_STATUS_CB_FULL (1 << 15)
393#define SCALER_SRC_ADDR_Q_STATUS_CB_EMPTY (1 << 14)
394#define SCALER_SRC_ADDR_Q_STATUS_GET_CB_WR_IDX(r) SCALER_GET(r, 13, 8)
395#define SCALER_SRC_ADDR_Q_STATUS_CR_FULL (1 << 7)
396#define SCALER_SRC_ADDR_Q_STATUS_CR_EMPTY (1 << 6)
397#define SCALER_SRC_ADDR_Q_STATUS_GET_CR_WR_IDX(r) SCALER_GET(r, 5, 0)
398
399/* SCALER_DST_ADDR_Q_STATUS */
400#define SCALER_DST_ADDR_Q_STATUS_Y_FULL (1 << 23)
401#define SCALER_DST_ADDR_Q_STATUS_Y_EMPTY (1 << 22)
402#define SCALER_DST_ADDR_Q_STATUS_GET_Y_WR_IDX(r) SCALER_GET(r, 21, 16)
403#define SCALER_DST_ADDR_Q_STATUS_CB_FULL (1 << 15)
404#define SCALER_DST_ADDR_Q_STATUS_CB_EMPTY (1 << 14)
405#define SCALER_DST_ADDR_Q_STATUS_GET_CB_WR_IDX(r) SCALER_GET(r, 13, 8)
406#define SCALER_DST_ADDR_Q_STATUS_CR_FULL (1 << 7)
407#define SCALER_DST_ADDR_Q_STATUS_CR_EMPTY (1 << 6)
408#define SCALER_DST_ADDR_Q_STATUS_GET_CR_WR_IDX(r) SCALER_GET(r, 5, 0)
409
410/* SCALER_CRC_COLOR00_10 */
411#define SCALER_CRC_COLOR00_10_GET_00(r) SCALER_GET(r, 31, 16)
412#define SCALER_CRC_COLOR00_10_GET_10(r) SCALER_GET(r, 15, 0)
413
414/* SCALER_CRC_COLOR20_30 */
415#define SCALER_CRC_COLOR20_30_GET_20(r) SCALER_GET(r, 31, 16)
416#define SCALER_CRC_COLOR20_30_GET_30(r) SCALER_GET(r, 15, 0)
417
418/* SCALER_CRC_COLOR01_11 */
419#define SCALER_CRC_COLOR01_11_GET_01(r) SCALER_GET(r, 31, 16)
420#define SCALER_CRC_COLOR01_11_GET_11(r) SCALER_GET(r, 15, 0)
421
422/* SCALER_CRC_COLOR21_31 */
423#define SCALER_CRC_COLOR21_31_GET_21(r) SCALER_GET(r, 31, 16)
424#define SCALER_CRC_COLOR21_31_GET_31(r) SCALER_GET(r, 15, 0)
425
426#endif /* EXYNOS_REGS_SCALER_H */
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 41e6c75a7f3c..f9550ea46c26 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -35,6 +35,7 @@
35 */ 35 */
36 36
37#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" 37#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
38MODULE_FIRMWARE(I915_CSR_GLK);
38#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) 39#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
39 40
40#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin" 41#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 83d3b7912fc2..c8650bbcbcb3 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -741,6 +741,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
741struct vc4_async_flip_state { 741struct vc4_async_flip_state {
742 struct drm_crtc *crtc; 742 struct drm_crtc *crtc;
743 struct drm_framebuffer *fb; 743 struct drm_framebuffer *fb;
744 struct drm_framebuffer *old_fb;
744 struct drm_pending_vblank_event *event; 745 struct drm_pending_vblank_event *event;
745 746
746 struct vc4_seqno_cb cb; 747 struct vc4_seqno_cb cb;
@@ -770,6 +771,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
770 771
771 drm_crtc_vblank_put(crtc); 772 drm_crtc_vblank_put(crtc);
772 drm_framebuffer_put(flip_state->fb); 773 drm_framebuffer_put(flip_state->fb);
774
775 /* Decrement the BO usecnt in order to keep the inc/dec calls balanced
776 * when the planes are updated through the async update path.
777 * FIXME: we should move to generic async-page-flip when it's
778 * available, so that we can get rid of this hand-made cleanup_fb()
779 * logic.
780 */
781 if (flip_state->old_fb) {
782 struct drm_gem_cma_object *cma_bo;
783 struct vc4_bo *bo;
784
785 cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
786 bo = to_vc4_bo(&cma_bo->base);
787 vc4_bo_dec_usecnt(bo);
788 drm_framebuffer_put(flip_state->old_fb);
789 }
790
773 kfree(flip_state); 791 kfree(flip_state);
774 792
775 up(&vc4->async_modeset); 793 up(&vc4->async_modeset);
@@ -794,9 +812,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
794 struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); 812 struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
795 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); 813 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
796 814
815 /* Increment the BO usecnt here, so that we never end up with an
816 * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
817 * plane is later updated through the non-async path.
818 * FIXME: we should move to generic async-page-flip when it's
819 * available, so that we can get rid of this hand-made prepare_fb()
820 * logic.
821 */
822 ret = vc4_bo_inc_usecnt(bo);
823 if (ret)
824 return ret;
825
797 flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); 826 flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
798 if (!flip_state) 827 if (!flip_state) {
828 vc4_bo_dec_usecnt(bo);
799 return -ENOMEM; 829 return -ENOMEM;
830 }
800 831
801 drm_framebuffer_get(fb); 832 drm_framebuffer_get(fb);
802 flip_state->fb = fb; 833 flip_state->fb = fb;
@@ -807,10 +838,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
807 ret = down_interruptible(&vc4->async_modeset); 838 ret = down_interruptible(&vc4->async_modeset);
808 if (ret) { 839 if (ret) {
809 drm_framebuffer_put(fb); 840 drm_framebuffer_put(fb);
841 vc4_bo_dec_usecnt(bo);
810 kfree(flip_state); 842 kfree(flip_state);
811 return ret; 843 return ret;
812 } 844 }
813 845
846 /* Save the current FB before it's replaced by the new one in
847 * drm_atomic_set_fb_for_plane(). We'll need the old FB in
848 * vc4_async_page_flip_complete() to decrement the BO usecnt and keep
849 * it consistent.
850 * FIXME: we should move to generic async-page-flip when it's
851 * available, so that we can get rid of this hand-made cleanup_fb()
852 * logic.
853 */
854 flip_state->old_fb = plane->state->fb;
855 if (flip_state->old_fb)
856 drm_framebuffer_get(flip_state->old_fb);
857
814 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 858 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
815 859
816 /* Immediately update the plane's legacy fb pointer, so that later 860 /* Immediately update the plane's legacy fb pointer, so that later
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 2582ffd36bb5..ba0cdb743c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -441,11 +441,11 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set)
441 struct drm_crtc *crtc = set->crtc; 441 struct drm_crtc *crtc = set->crtc;
442 struct drm_framebuffer *fb; 442 struct drm_framebuffer *fb;
443 struct drm_crtc *tmp; 443 struct drm_crtc *tmp;
444 struct drm_modeset_acquire_ctx *ctx;
445 struct drm_device *dev = set->crtc->dev; 444 struct drm_device *dev = set->crtc->dev;
445 struct drm_modeset_acquire_ctx ctx;
446 int ret; 446 int ret;
447 447
448 ctx = dev->mode_config.acquire_ctx; 448 drm_modeset_acquire_init(&ctx, 0);
449 449
450restart: 450restart:
451 /* 451 /*
@@ -458,7 +458,7 @@ restart:
458 458
459 fb = set->fb; 459 fb = set->fb;
460 460
461 ret = crtc->funcs->set_config(set, ctx); 461 ret = crtc->funcs->set_config(set, &ctx);
462 if (ret == 0) { 462 if (ret == 0) {
463 crtc->primary->crtc = crtc; 463 crtc->primary->crtc = crtc;
464 crtc->primary->fb = fb; 464 crtc->primary->fb = fb;
@@ -473,20 +473,13 @@ restart:
473 } 473 }
474 474
475 if (ret == -EDEADLK) { 475 if (ret == -EDEADLK) {
476 dev->mode_config.acquire_ctx = NULL; 476 drm_modeset_backoff(&ctx);
477
478retry_locking:
479 drm_modeset_backoff(ctx);
480
481 ret = drm_modeset_lock_all_ctx(dev, ctx);
482 if (ret)
483 goto retry_locking;
484
485 dev->mode_config.acquire_ctx = ctx;
486
487 goto restart; 477 goto restart;
488 } 478 }
489 479
480 drm_modeset_drop_locks(&ctx);
481 drm_modeset_acquire_fini(&ctx);
482
490 return ret; 483 return ret;
491} 484}
492 485
@@ -624,7 +617,6 @@ static int vmw_fb_set_par(struct fb_info *info)
624 } 617 }
625 618
626 mutex_lock(&par->bo_mutex); 619 mutex_lock(&par->bo_mutex);
627 drm_modeset_lock_all(vmw_priv->dev);
628 ret = vmw_fb_kms_framebuffer(info); 620 ret = vmw_fb_kms_framebuffer(info);
629 if (ret) 621 if (ret)
630 goto out_unlock; 622 goto out_unlock;
@@ -657,7 +649,6 @@ out_unlock:
657 drm_mode_destroy(vmw_priv->dev, old_mode); 649 drm_mode_destroy(vmw_priv->dev, old_mode);
658 par->set_mode = mode; 650 par->set_mode = mode;
659 651
660 drm_modeset_unlock_all(vmw_priv->dev);
661 mutex_unlock(&par->bo_mutex); 652 mutex_unlock(&par->bo_mutex);
662 653
663 return ret; 654 return ret;
@@ -713,18 +704,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
713 par->max_width = fb_width; 704 par->max_width = fb_width;
714 par->max_height = fb_height; 705 par->max_height = fb_height;
715 706
716 drm_modeset_lock_all(vmw_priv->dev);
717 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, 707 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
718 par->max_height, &par->con, 708 par->max_height, &par->con,
719 &par->crtc, &init_mode); 709 &par->crtc, &init_mode);
720 if (ret) { 710 if (ret)
721 drm_modeset_unlock_all(vmw_priv->dev);
722 goto err_kms; 711 goto err_kms;
723 }
724 712
725 info->var.xres = init_mode->hdisplay; 713 info->var.xres = init_mode->hdisplay;
726 info->var.yres = init_mode->vdisplay; 714 info->var.yres = init_mode->vdisplay;
727 drm_modeset_unlock_all(vmw_priv->dev);
728 715
729 /* 716 /*
730 * Create buffers and alloc memory 717 * Create buffers and alloc memory
@@ -832,7 +819,9 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
832 cancel_delayed_work_sync(&par->local_work); 819 cancel_delayed_work_sync(&par->local_work);
833 unregister_framebuffer(info); 820 unregister_framebuffer(info);
834 821
822 mutex_lock(&par->bo_mutex);
835 (void) vmw_fb_kms_detach(par, true, true); 823 (void) vmw_fb_kms_detach(par, true, true);
824 mutex_unlock(&par->bo_mutex);
836 825
837 vfree(par->vmalloc); 826 vfree(par->vmalloc);
838 framebuffer_release(info); 827 framebuffer_release(info);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 6728c6247b4b..01f2dc9e6f52 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2595,6 +2595,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
2595 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, 2595 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2596 out_fence, NULL); 2596 out_fence, NULL);
2597 2597
2598 vmw_dmabuf_unreference(&ctx->buf);
2598 vmw_resource_unreserve(res, false, NULL, 0); 2599 vmw_resource_unreserve(res, false, NULL, 0);
2599 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2600 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2600} 2601}
@@ -2680,7 +2681,9 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2680 struct vmw_display_unit *du; 2681 struct vmw_display_unit *du;
2681 struct drm_display_mode *mode; 2682 struct drm_display_mode *mode;
2682 int i = 0; 2683 int i = 0;
2684 int ret = 0;
2683 2685
2686 mutex_lock(&dev_priv->dev->mode_config.mutex);
2684 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, 2687 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2685 head) { 2688 head) {
2686 if (i == unit) 2689 if (i == unit)
@@ -2691,7 +2694,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2691 2694
2692 if (i != unit) { 2695 if (i != unit) {
2693 DRM_ERROR("Could not find initial display unit.\n"); 2696 DRM_ERROR("Could not find initial display unit.\n");
2694 return -EINVAL; 2697 ret = -EINVAL;
2698 goto out_unlock;
2695 } 2699 }
2696 2700
2697 if (list_empty(&con->modes)) 2701 if (list_empty(&con->modes))
@@ -2699,7 +2703,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2699 2703
2700 if (list_empty(&con->modes)) { 2704 if (list_empty(&con->modes)) {
2701 DRM_ERROR("Could not find initial display mode.\n"); 2705 DRM_ERROR("Could not find initial display mode.\n");
2702 return -EINVAL; 2706 ret = -EINVAL;
2707 goto out_unlock;
2703 } 2708 }
2704 2709
2705 du = vmw_connector_to_du(con); 2710 du = vmw_connector_to_du(con);
@@ -2720,7 +2725,10 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2720 head); 2725 head);
2721 } 2726 }
2722 2727
2723 return 0; 2728 out_unlock:
2729 mutex_unlock(&dev_priv->dev->mode_config.mutex);
2730
2731 return ret;
2724} 2732}
2725 2733
2726/** 2734/**